PM / clk: Add support for adding a specific clock from device-tree
[deliverable/linux.git] / drivers / net / wireless / intel / iwlwifi / mvm / mac80211.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66 #include <linux/kernel.h>
67 #include <linux/slab.h>
68 #include <linux/skbuff.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/ip.h>
72 #include <linux/if_arp.h>
73 #include <linux/time.h>
74 #include <net/mac80211.h>
75 #include <net/ieee80211_radiotap.h>
76 #include <net/tcp.h>
77
78 #include "iwl-op-mode.h"
79 #include "iwl-io.h"
80 #include "mvm.h"
81 #include "sta.h"
82 #include "time-event.h"
83 #include "iwl-eeprom-parse.h"
84 #include "iwl-phy-db.h"
85 #include "testmode.h"
86 #include "iwl-fw-error-dump.h"
87 #include "iwl-prph.h"
88 #include "iwl-nvm-parse.h"
89 #include "fw-dbg.h"
90
91 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
92 {
93 .max = 1,
94 .types = BIT(NL80211_IFTYPE_STATION),
95 },
96 {
97 .max = 1,
98 .types = BIT(NL80211_IFTYPE_AP) |
99 BIT(NL80211_IFTYPE_P2P_CLIENT) |
100 BIT(NL80211_IFTYPE_P2P_GO),
101 },
102 {
103 .max = 1,
104 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
105 },
106 };
107
108 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
109 {
110 .num_different_channels = 2,
111 .max_interfaces = 3,
112 .limits = iwl_mvm_limits,
113 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
114 },
115 };
116
117 #ifdef CONFIG_PM_SLEEP
118 static const struct nl80211_wowlan_tcp_data_token_feature
119 iwl_mvm_wowlan_tcp_token_feature = {
120 .min_len = 0,
121 .max_len = 255,
122 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
123 };
124
125 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
126 .tok = &iwl_mvm_wowlan_tcp_token_feature,
127 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
128 sizeof(struct ethhdr) -
129 sizeof(struct iphdr) -
130 sizeof(struct tcphdr),
131 .data_interval_max = 65535, /* __le16 in API */
132 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
133 sizeof(struct ethhdr) -
134 sizeof(struct iphdr) -
135 sizeof(struct tcphdr),
136 .seq = true,
137 };
138 #endif
139
140 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
141 /*
142 * Use the reserved field to indicate magic values.
143 * these values will only be used internally by the driver,
144 * and won't make it to the fw (reserved will be 0).
145 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
146 * be the vif's ip address. in case there is not a single
147 * ip address (0, or more than 1), this attribute will
148 * be skipped.
149 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
150 * the LSB bytes of the vif's mac address
151 */
152 enum {
153 BC_FILTER_MAGIC_NONE = 0,
154 BC_FILTER_MAGIC_IP,
155 BC_FILTER_MAGIC_MAC,
156 };
157
158 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
159 {
160 /* arp */
161 .discard = 0,
162 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
163 .attrs = {
164 {
165 /* frame type - arp, hw type - ethernet */
166 .offset_type =
167 BCAST_FILTER_OFFSET_PAYLOAD_START,
168 .offset = sizeof(rfc1042_header),
169 .val = cpu_to_be32(0x08060001),
170 .mask = cpu_to_be32(0xffffffff),
171 },
172 {
173 /* arp dest ip */
174 .offset_type =
175 BCAST_FILTER_OFFSET_PAYLOAD_START,
176 .offset = sizeof(rfc1042_header) + 2 +
177 sizeof(struct arphdr) +
178 ETH_ALEN + sizeof(__be32) +
179 ETH_ALEN,
180 .mask = cpu_to_be32(0xffffffff),
181 /* mark it as special field */
182 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
183 },
184 },
185 },
186 {
187 /* dhcp offer bcast */
188 .discard = 0,
189 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
190 .attrs = {
191 {
192 /* udp dest port - 68 (bootp client)*/
193 .offset_type = BCAST_FILTER_OFFSET_IP_END,
194 .offset = offsetof(struct udphdr, dest),
195 .val = cpu_to_be32(0x00440000),
196 .mask = cpu_to_be32(0xffff0000),
197 },
198 {
199 /* dhcp - lsb bytes of client hw address */
200 .offset_type = BCAST_FILTER_OFFSET_IP_END,
201 .offset = 38,
202 .mask = cpu_to_be32(0xffffffff),
203 /* mark it as special field */
204 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
205 },
206 },
207 },
208 /* last filter must be empty */
209 {},
210 };
211 #endif
212
213 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
214 {
215 if (!iwl_mvm_is_d0i3_supported(mvm))
216 return;
217
218 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
219 spin_lock_bh(&mvm->refs_lock);
220 mvm->refs[ref_type]++;
221 spin_unlock_bh(&mvm->refs_lock);
222 iwl_trans_ref(mvm->trans);
223 }
224
225 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
226 {
227 if (!iwl_mvm_is_d0i3_supported(mvm))
228 return;
229
230 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
231 spin_lock_bh(&mvm->refs_lock);
232 if (WARN_ON(!mvm->refs[ref_type])) {
233 spin_unlock_bh(&mvm->refs_lock);
234 return;
235 }
236 mvm->refs[ref_type]--;
237 spin_unlock_bh(&mvm->refs_lock);
238 iwl_trans_unref(mvm->trans);
239 }
240
241 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
242 enum iwl_mvm_ref_type except_ref)
243 {
244 int i, j;
245
246 if (!iwl_mvm_is_d0i3_supported(mvm))
247 return;
248
249 spin_lock_bh(&mvm->refs_lock);
250 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
251 if (except_ref == i || !mvm->refs[i])
252 continue;
253
254 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
255 i, mvm->refs[i]);
256 for (j = 0; j < mvm->refs[i]; j++)
257 iwl_trans_unref(mvm->trans);
258 mvm->refs[i] = 0;
259 }
260 spin_unlock_bh(&mvm->refs_lock);
261 }
262
263 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
264 {
265 int i;
266 bool taken = false;
267
268 if (!iwl_mvm_is_d0i3_supported(mvm))
269 return true;
270
271 spin_lock_bh(&mvm->refs_lock);
272 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
273 if (mvm->refs[i]) {
274 taken = true;
275 break;
276 }
277 }
278 spin_unlock_bh(&mvm->refs_lock);
279
280 return taken;
281 }
282
283 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
284 {
285 iwl_mvm_ref(mvm, ref_type);
286
287 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
288 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
289 HZ)) {
290 WARN_ON_ONCE(1);
291 iwl_mvm_unref(mvm, ref_type);
292 return -EIO;
293 }
294
295 return 0;
296 }
297
298 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
299 {
300 int i;
301
302 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
303 for (i = 0; i < NUM_PHY_CTX; i++) {
304 mvm->phy_ctxts[i].id = i;
305 mvm->phy_ctxts[i].ref = 0;
306 }
307 }
308
309 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
310 const char *alpha2,
311 enum iwl_mcc_source src_id,
312 bool *changed)
313 {
314 struct ieee80211_regdomain *regd = NULL;
315 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
316 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
317 struct iwl_mcc_update_resp *resp;
318
319 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
320
321 lockdep_assert_held(&mvm->mutex);
322
323 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
324 if (IS_ERR_OR_NULL(resp)) {
325 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
326 PTR_ERR_OR_ZERO(resp));
327 goto out;
328 }
329
330 if (changed)
331 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
332
333 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
334 __le32_to_cpu(resp->n_channels),
335 resp->channels,
336 __le16_to_cpu(resp->mcc));
337 /* Store the return source id */
338 src_id = resp->source_id;
339 kfree(resp);
340 if (IS_ERR_OR_NULL(regd)) {
341 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
342 PTR_ERR_OR_ZERO(regd));
343 goto out;
344 }
345
346 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
347 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
348 mvm->lar_regdom_set = true;
349 mvm->mcc_src = src_id;
350
351 out:
352 return regd;
353 }
354
355 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
356 {
357 bool changed;
358 struct ieee80211_regdomain *regd;
359
360 if (!iwl_mvm_is_lar_supported(mvm))
361 return;
362
363 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
364 if (!IS_ERR_OR_NULL(regd)) {
365 /* only update the regulatory core if changed */
366 if (changed)
367 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
368
369 kfree(regd);
370 }
371 }
372
373 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
374 bool *changed)
375 {
376 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
377 iwl_mvm_is_wifi_mcc_supported(mvm) ?
378 MCC_SOURCE_GET_CURRENT :
379 MCC_SOURCE_OLD_FW, changed);
380 }
381
382 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
383 {
384 enum iwl_mcc_source used_src;
385 struct ieee80211_regdomain *regd;
386 int ret;
387 bool changed;
388 const struct ieee80211_regdomain *r =
389 rtnl_dereference(mvm->hw->wiphy->regd);
390
391 if (!r)
392 return -ENOENT;
393
394 /* save the last source in case we overwrite it below */
395 used_src = mvm->mcc_src;
396 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
397 /* Notify the firmware we support wifi location updates */
398 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
399 if (!IS_ERR_OR_NULL(regd))
400 kfree(regd);
401 }
402
403 /* Now set our last stored MCC and source */
404 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
405 &changed);
406 if (IS_ERR_OR_NULL(regd))
407 return -EIO;
408
409 /* update cfg80211 if the regdomain was changed */
410 if (changed)
411 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
412 else
413 ret = 0;
414
415 kfree(regd);
416 return ret;
417 }
418
419 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
420 {
421 struct ieee80211_hw *hw = mvm->hw;
422 int num_mac, ret, i;
423 static const u32 mvm_ciphers[] = {
424 WLAN_CIPHER_SUITE_WEP40,
425 WLAN_CIPHER_SUITE_WEP104,
426 WLAN_CIPHER_SUITE_TKIP,
427 WLAN_CIPHER_SUITE_CCMP,
428 };
429
430 /* Tell mac80211 our characteristics */
431 ieee80211_hw_set(hw, SIGNAL_DBM);
432 ieee80211_hw_set(hw, SPECTRUM_MGMT);
433 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
434 ieee80211_hw_set(hw, QUEUE_CONTROL);
435 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
436 ieee80211_hw_set(hw, SUPPORTS_PS);
437 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
438 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
439 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
440 ieee80211_hw_set(hw, CONNECTION_MONITOR);
441 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
442 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
443 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
444 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
445 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
446 if (iwl_mvm_has_new_rx_api(mvm))
447 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
448
449 if (mvm->trans->num_rx_queues > 1)
450 ieee80211_hw_set(hw, USES_RSS);
451
452 if (mvm->trans->max_skb_frags)
453 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
454
455 if (!iwl_mvm_is_dqa_supported(mvm))
456 hw->queues = mvm->first_agg_queue;
457 else
458 hw->queues = IEEE80211_MAX_QUEUES;
459 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
460 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
461 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
462 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
463 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
464 hw->rate_control_algorithm = "iwl-mvm-rs";
465 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
466 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
467
468 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
469 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
470 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
471 hw->wiphy->cipher_suites = mvm->ciphers;
472
473 /*
474 * Enable 11w if advertised by firmware and software crypto
475 * is not enabled (as the firmware will interpret some mgmt
476 * packets, so enabling it with software crypto isn't safe)
477 */
478 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
479 !iwlwifi_mod_params.sw_crypto) {
480 ieee80211_hw_set(hw, MFP_CAPABLE);
481 mvm->ciphers[hw->wiphy->n_cipher_suites] =
482 WLAN_CIPHER_SUITE_AES_CMAC;
483 hw->wiphy->n_cipher_suites++;
484 }
485
486 /* currently FW API supports only one optional cipher scheme */
487 if (mvm->fw->cs[0].cipher) {
488 mvm->hw->n_cipher_schemes = 1;
489 mvm->hw->cipher_schemes = &mvm->fw->cs[0];
490 mvm->ciphers[hw->wiphy->n_cipher_suites] =
491 mvm->fw->cs[0].cipher;
492 hw->wiphy->n_cipher_suites++;
493 }
494
495 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
496 hw->wiphy->features |=
497 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
498 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
499 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
500
501 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
502 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
503 hw->chanctx_data_size = sizeof(u16);
504
505 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
506 BIT(NL80211_IFTYPE_P2P_CLIENT) |
507 BIT(NL80211_IFTYPE_AP) |
508 BIT(NL80211_IFTYPE_P2P_GO) |
509 BIT(NL80211_IFTYPE_P2P_DEVICE) |
510 BIT(NL80211_IFTYPE_ADHOC);
511
512 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
513 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
514 if (iwl_mvm_is_lar_supported(mvm))
515 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
516 else
517 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
518 REGULATORY_DISABLE_BEACON_HINTS;
519
520 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
521 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
522
523 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
524
525 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
526 hw->wiphy->n_iface_combinations =
527 ARRAY_SIZE(iwl_mvm_iface_combinations);
528
529 hw->wiphy->max_remain_on_channel_duration = 10000;
530 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
531 /* we can compensate an offset of up to 3 channels = 15 MHz */
532 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
533
534 /* Extract MAC address */
535 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
536 hw->wiphy->addresses = mvm->addresses;
537 hw->wiphy->n_addresses = 1;
538
539 /* Extract additional MAC addresses if available */
540 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
541 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
542
543 for (i = 1; i < num_mac; i++) {
544 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
545 ETH_ALEN);
546 mvm->addresses[i].addr[5]++;
547 hw->wiphy->n_addresses++;
548 }
549
550 iwl_mvm_reset_phy_ctxts(mvm);
551
552 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
553
554 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
555
556 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
557 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
558 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
559
560 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
561 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
562 else
563 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
564
565 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
566 hw->wiphy->bands[NL80211_BAND_2GHZ] =
567 &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
568 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
569 hw->wiphy->bands[NL80211_BAND_5GHZ] =
570 &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
571
572 if (fw_has_capa(&mvm->fw->ucode_capa,
573 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
574 fw_has_api(&mvm->fw->ucode_capa,
575 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
576 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
577 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
578 }
579
580 hw->wiphy->hw_version = mvm->trans->hw_id;
581
582 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
583 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
584 else
585 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
586
587 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
588 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
589 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
590 /* we create the 802.11 header and zero length SSID IE. */
591 hw->wiphy->max_sched_scan_ie_len =
592 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
593 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
594 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
595
596 /*
597 * the firmware uses u8 for num of iterations, but 0xff is saved for
598 * infinite loop, so the maximum number of iterations is actually 254.
599 */
600 hw->wiphy->max_sched_scan_plan_iterations = 254;
601
602 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
603 NL80211_FEATURE_LOW_PRIORITY_SCAN |
604 NL80211_FEATURE_P2P_GO_OPPPS |
605 NL80211_FEATURE_DYNAMIC_SMPS |
606 NL80211_FEATURE_STATIC_SMPS |
607 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
608
609 if (fw_has_capa(&mvm->fw->ucode_capa,
610 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
611 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
612 if (fw_has_capa(&mvm->fw->ucode_capa,
613 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
614 hw->wiphy->features |= NL80211_FEATURE_QUIET;
615
616 if (fw_has_capa(&mvm->fw->ucode_capa,
617 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
618 hw->wiphy->features |=
619 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
620
621 if (fw_has_capa(&mvm->fw->ucode_capa,
622 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
623 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
624
625 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
626
627 #ifdef CONFIG_PM_SLEEP
628 if (iwl_mvm_is_d0i3_supported(mvm) &&
629 device_can_wakeup(mvm->trans->dev)) {
630 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
631 hw->wiphy->wowlan = &mvm->wowlan;
632 }
633
634 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
635 mvm->trans->ops->d3_suspend &&
636 mvm->trans->ops->d3_resume &&
637 device_can_wakeup(mvm->trans->dev)) {
638 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
639 WIPHY_WOWLAN_DISCONNECT |
640 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
641 WIPHY_WOWLAN_RFKILL_RELEASE |
642 WIPHY_WOWLAN_NET_DETECT;
643 if (!iwlwifi_mod_params.sw_crypto)
644 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
645 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
646 WIPHY_WOWLAN_4WAY_HANDSHAKE;
647
648 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
649 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
650 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
651 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
652 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
653 hw->wiphy->wowlan = &mvm->wowlan;
654 }
655 #endif
656
657 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
658 /* assign default bcast filtering configuration */
659 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
660 #endif
661
662 ret = iwl_mvm_leds_init(mvm);
663 if (ret)
664 return ret;
665
666 if (fw_has_capa(&mvm->fw->ucode_capa,
667 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
668 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
669 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
670 ieee80211_hw_set(hw, TDLS_WIDER_BW);
671 }
672
673 if (fw_has_capa(&mvm->fw->ucode_capa,
674 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
675 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
676 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
677 }
678
679 hw->netdev_features |= mvm->cfg->features;
680 if (!iwl_mvm_is_csum_supported(mvm)) {
681 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
682 NETIF_F_RXCSUM);
683 /* We may support SW TX CSUM */
684 if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
685 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
686 }
687
688 ret = ieee80211_register_hw(mvm->hw);
689 if (ret)
690 iwl_mvm_leds_exit(mvm);
691
692 return ret;
693 }
694
695 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
696 struct ieee80211_sta *sta,
697 struct sk_buff *skb)
698 {
699 struct iwl_mvm_sta *mvmsta;
700 bool defer = false;
701
702 /*
703 * double check the IN_D0I3 flag both before and after
704 * taking the spinlock, in order to prevent taking
705 * the spinlock when not needed.
706 */
707 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
708 return false;
709
710 spin_lock(&mvm->d0i3_tx_lock);
711 /*
712 * testing the flag again ensures the skb dequeue
713 * loop (on d0i3 exit) hasn't run yet.
714 */
715 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
716 goto out;
717
718 mvmsta = iwl_mvm_sta_from_mac80211(sta);
719 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
720 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
721 goto out;
722
723 __skb_queue_tail(&mvm->d0i3_tx, skb);
724 ieee80211_stop_queues(mvm->hw);
725
726 /* trigger wakeup */
727 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
728 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
729
730 defer = true;
731 out:
732 spin_unlock(&mvm->d0i3_tx_lock);
733 return defer;
734 }
735
736 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
737 struct ieee80211_tx_control *control,
738 struct sk_buff *skb)
739 {
740 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
741 struct ieee80211_sta *sta = control->sta;
742 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
743 struct ieee80211_hdr *hdr = (void *)skb->data;
744
745 if (iwl_mvm_is_radio_killed(mvm)) {
746 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
747 goto drop;
748 }
749
750 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
751 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
752 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
753 goto drop;
754
755 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
756 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
757 ieee80211_is_mgmt(hdr->frame_control) &&
758 !ieee80211_is_deauth(hdr->frame_control) &&
759 !ieee80211_is_disassoc(hdr->frame_control) &&
760 !ieee80211_is_action(hdr->frame_control)))
761 sta = NULL;
762
763 if (sta) {
764 if (iwl_mvm_defer_tx(mvm, sta, skb))
765 return;
766 if (iwl_mvm_tx_skb(mvm, skb, sta))
767 goto drop;
768 return;
769 }
770
771 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
772 goto drop;
773 return;
774 drop:
775 ieee80211_free_txskb(hw, skb);
776 }
777
778 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
779 {
780 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
781 return false;
782 return true;
783 }
784
785 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
786 {
787 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
788 return false;
789 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
790 return true;
791
792 /* enabled by default */
793 return true;
794 }
795
796 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
797 do { \
798 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
799 break; \
800 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
801 } while (0)
802
803 static void
804 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
805 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
806 enum ieee80211_ampdu_mlme_action action)
807 {
808 struct iwl_fw_dbg_trigger_tlv *trig;
809 struct iwl_fw_dbg_trigger_ba *ba_trig;
810
811 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
812 return;
813
814 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
815 ba_trig = (void *)trig->data;
816
817 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
818 return;
819
820 switch (action) {
821 case IEEE80211_AMPDU_TX_OPERATIONAL: {
822 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
823 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
824
825 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
826 "TX AGG START: MAC %pM tid %d ssn %d\n",
827 sta->addr, tid, tid_data->ssn);
828 break;
829 }
830 case IEEE80211_AMPDU_TX_STOP_CONT:
831 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
832 "TX AGG STOP: MAC %pM tid %d\n",
833 sta->addr, tid);
834 break;
835 case IEEE80211_AMPDU_RX_START:
836 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
837 "RX AGG START: MAC %pM tid %d ssn %d\n",
838 sta->addr, tid, rx_ba_ssn);
839 break;
840 case IEEE80211_AMPDU_RX_STOP:
841 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
842 "RX AGG STOP: MAC %pM tid %d\n",
843 sta->addr, tid);
844 break;
845 default:
846 break;
847 }
848 }
849
850 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
851 struct ieee80211_vif *vif,
852 struct ieee80211_ampdu_params *params)
853 {
854 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
855 int ret;
856 bool tx_agg_ref = false;
857 struct ieee80211_sta *sta = params->sta;
858 enum ieee80211_ampdu_mlme_action action = params->action;
859 u16 tid = params->tid;
860 u16 *ssn = &params->ssn;
861 u8 buf_size = params->buf_size;
862 bool amsdu = params->amsdu;
863 u16 timeout = params->timeout;
864
865 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
866 sta->addr, tid, action);
867
868 if (!(mvm->nvm_data->sku_cap_11n_enable))
869 return -EACCES;
870
871 /* return from D0i3 before starting a new Tx aggregation */
872 switch (action) {
873 case IEEE80211_AMPDU_TX_START:
874 case IEEE80211_AMPDU_TX_STOP_CONT:
875 case IEEE80211_AMPDU_TX_STOP_FLUSH:
876 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
877 case IEEE80211_AMPDU_TX_OPERATIONAL:
878 /*
879 * for tx start, wait synchronously until D0i3 exit to
880 * get the correct sequence number for the tid.
881 * additionally, some other ampdu actions use direct
882 * target access, which is not handled automatically
883 * by the trans layer (unlike commands), so wait for
884 * d0i3 exit in these cases as well.
885 */
886 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
887 if (ret)
888 return ret;
889
890 tx_agg_ref = true;
891 break;
892 default:
893 break;
894 }
895
896 mutex_lock(&mvm->mutex);
897
898 switch (action) {
899 case IEEE80211_AMPDU_RX_START:
900 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
901 ret = -EINVAL;
902 break;
903 }
904 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
905 timeout);
906 break;
907 case IEEE80211_AMPDU_RX_STOP:
908 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
909 timeout);
910 break;
911 case IEEE80211_AMPDU_TX_START:
912 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
913 ret = -EINVAL;
914 break;
915 }
916 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
917 break;
918 case IEEE80211_AMPDU_TX_STOP_CONT:
919 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
920 break;
921 case IEEE80211_AMPDU_TX_STOP_FLUSH:
922 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
923 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
924 break;
925 case IEEE80211_AMPDU_TX_OPERATIONAL:
926 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
927 buf_size, amsdu);
928 break;
929 default:
930 WARN_ON_ONCE(1);
931 ret = -EINVAL;
932 break;
933 }
934
935 if (!ret) {
936 u16 rx_ba_ssn = 0;
937
938 if (action == IEEE80211_AMPDU_RX_START)
939 rx_ba_ssn = *ssn;
940
941 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
942 rx_ba_ssn, action);
943 }
944 mutex_unlock(&mvm->mutex);
945
946 /*
947 * If the tid is marked as started, we won't use it for offloaded
948 * traffic on the next D0i3 entry. It's safe to unref.
949 */
950 if (tx_agg_ref)
951 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
952
953 return ret;
954 }
955
956 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
957 struct ieee80211_vif *vif)
958 {
959 struct iwl_mvm *mvm = data;
960 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
961
962 mvmvif->uploaded = false;
963 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
964
965 spin_lock_bh(&mvm->time_event_lock);
966 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
967 spin_unlock_bh(&mvm->time_event_lock);
968
969 mvmvif->phy_ctxt = NULL;
970 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
971 }
972
973 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
974 {
975 /* clear the D3 reconfig, we only need it to avoid dumping a
976 * firmware coredump on reconfiguration, we shouldn't do that
977 * on D3->D0 transition
978 */
979 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
980 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
981 iwl_mvm_fw_error_dump(mvm);
982 }
983
984 /* cleanup all stale references (scan, roc), but keep the
985 * ucode_down ref until reconfig is complete
986 */
987 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
988
989 iwl_mvm_stop_device(mvm);
990
991 mvm->scan_status = 0;
992 mvm->ps_disabled = false;
993 mvm->calibrating = false;
994
995 /* just in case one was running */
996 iwl_mvm_cleanup_roc_te(mvm);
997 ieee80211_remain_on_channel_expired(mvm->hw);
998
999 /*
1000 * cleanup all interfaces, even inactive ones, as some might have
1001 * gone down during the HW restart
1002 */
1003 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1004
1005 mvm->p2p_device_vif = NULL;
1006 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1007
1008 iwl_mvm_reset_phy_ctxts(mvm);
1009 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1010 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1011 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
1012 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1013 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1014 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
1015 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1016 memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
1017 memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
1018 memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
1019
1020 ieee80211_wake_queues(mvm->hw);
1021
1022 /* clear any stale d0i3 state */
1023 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1024
1025 mvm->vif_count = 0;
1026 mvm->rx_ba_sessions = 0;
1027 mvm->fw_dbg_conf = FW_DBG_INVALID;
1028
1029 /* keep statistics ticking */
1030 iwl_mvm_accu_radio_stats(mvm);
1031 }
1032
1033 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1034 {
1035 int ret;
1036
1037 lockdep_assert_held(&mvm->mutex);
1038
1039 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1040 /* Clean up some internal and mac80211 state on restart */
1041 iwl_mvm_restart_cleanup(mvm);
1042 } else {
1043 /* Hold the reference to prevent runtime suspend while
1044 * the start procedure runs. It's a bit confusing
1045 * that the UCODE_DOWN reference is taken, but it just
1046 * means "UCODE is not UP yet". ( TODO: rename this
1047 * reference).
1048 */
1049 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1050 }
1051 ret = iwl_mvm_up(mvm);
1052
1053 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1054 /* Something went wrong - we need to finish some cleanup
1055 * that normally iwl_mvm_mac_restart_complete() below
1056 * would do.
1057 */
1058 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1059 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1060 }
1061
1062 return ret;
1063 }
1064
1065 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1066 {
1067 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1068 int ret;
1069
1070 /* Some hw restart cleanups must not hold the mutex */
1071 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1072 /*
1073 * Make sure we are out of d0i3. This is needed
1074 * to make sure the reference accounting is correct
1075 * (and there is no stale d0i3_exit_work).
1076 */
1077 wait_event_timeout(mvm->d0i3_exit_waitq,
1078 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1079 &mvm->status),
1080 HZ);
1081 }
1082
1083 mutex_lock(&mvm->mutex);
1084 ret = __iwl_mvm_mac_start(mvm);
1085 mutex_unlock(&mvm->mutex);
1086
1087 return ret;
1088 }
1089
1090 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1091 {
1092 int ret;
1093
1094 mutex_lock(&mvm->mutex);
1095
1096 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1097 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1098 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1099 if (ret)
1100 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1101 ret);
1102
1103 /* allow transport/FW low power modes */
1104 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1105
1106 /*
1107 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1108 * of packets the FW sent out, so we must reconnect.
1109 */
1110 iwl_mvm_teardown_tdls_peers(mvm);
1111
1112 mutex_unlock(&mvm->mutex);
1113 }
1114
1115 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1116 {
1117 if (iwl_mvm_is_d0i3_supported(mvm) &&
1118 iwl_mvm_enter_d0i3_on_suspend(mvm))
1119 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
1120 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1121 &mvm->status),
1122 HZ),
1123 "D0i3 exit on resume timed out\n");
1124 }
1125
1126 static void
1127 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1128 enum ieee80211_reconfig_type reconfig_type)
1129 {
1130 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1131
1132 switch (reconfig_type) {
1133 case IEEE80211_RECONFIG_TYPE_RESTART:
1134 iwl_mvm_restart_complete(mvm);
1135 break;
1136 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1137 iwl_mvm_resume_complete(mvm);
1138 break;
1139 }
1140 }
1141
1142 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1143 {
1144 lockdep_assert_held(&mvm->mutex);
1145
1146 /* firmware counters are obviously reset now, but we shouldn't
1147 * partially track so also clear the fw_reset_accu counters.
1148 */
1149 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1150
1151 /* async_handlers_wk is now blocked */
1152
1153 /*
1154 * The work item could be running or queued if the
1155 * ROC time event stops just as we get here.
1156 */
1157 flush_work(&mvm->roc_done_wk);
1158
1159 iwl_mvm_stop_device(mvm);
1160
1161 iwl_mvm_async_handlers_purge(mvm);
1162 /* async_handlers_list is empty and will stay empty: HW is stopped */
1163
1164 /* the fw is stopped, the aux sta is dead: clean up driver state */
1165 iwl_mvm_del_aux_sta(mvm);
1166
1167 iwl_free_fw_paging(mvm);
1168
1169 /*
1170 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1171 * won't be called in this case).
1172 * But make sure to cleanup interfaces that have gone down before/during
1173 * HW restart was requested.
1174 */
1175 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1176 ieee80211_iterate_interfaces(mvm->hw, 0,
1177 iwl_mvm_cleanup_iterator, mvm);
1178
1179 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1180 * make sure there's nothing left there and warn if any is found.
1181 */
1182 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1183 int i;
1184
1185 for (i = 0; i < mvm->max_scans; i++) {
1186 if (WARN_ONCE(mvm->scan_uid_status[i],
1187 "UMAC scan UID %d status was not cleaned\n",
1188 i))
1189 mvm->scan_uid_status[i] = 0;
1190 }
1191 }
1192 }
1193
1194 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1195 {
1196 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1197
1198 flush_work(&mvm->d0i3_exit_work);
1199 flush_work(&mvm->async_handlers_wk);
1200 flush_work(&mvm->add_stream_wk);
1201 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1202 iwl_mvm_free_fw_dump_desc(mvm);
1203
1204 mutex_lock(&mvm->mutex);
1205 __iwl_mvm_mac_stop(mvm);
1206 mutex_unlock(&mvm->mutex);
1207
1208 /*
1209 * The worker might have been waiting for the mutex, let it run and
1210 * discover that its list is now empty.
1211 */
1212 cancel_work_sync(&mvm->async_handlers_wk);
1213 }
1214
1215 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1216 {
1217 u16 i;
1218
1219 lockdep_assert_held(&mvm->mutex);
1220
1221 for (i = 0; i < NUM_PHY_CTX; i++)
1222 if (!mvm->phy_ctxts[i].ref)
1223 return &mvm->phy_ctxts[i];
1224
1225 IWL_ERR(mvm, "No available PHY context\n");
1226 return NULL;
1227 }
1228
1229 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1230 s16 tx_power)
1231 {
1232 struct iwl_dev_tx_power_cmd cmd = {
1233 .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1234 .v2.mac_context_id =
1235 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1236 .v2.pwr_restriction = cpu_to_le16(8 * tx_power),
1237 };
1238 int len = sizeof(cmd);
1239
1240 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1241 cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1242
1243 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
1244 len = sizeof(cmd.v2);
1245
1246 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1247 }
1248
1249 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1250 struct ieee80211_vif *vif)
1251 {
1252 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1253 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1254 int ret;
1255
1256 mvmvif->mvm = mvm;
1257
1258 /*
1259 * make sure D0i3 exit is completed, otherwise a target access
1260 * during tx queue configuration could be done when still in
1261 * D0i3 state.
1262 */
1263 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1264 if (ret)
1265 return ret;
1266
1267 /*
1268 * Not much to do here. The stack will not allow interface
1269 * types or combinations that we didn't advertise, so we
1270 * don't really have to check the types.
1271 */
1272
1273 mutex_lock(&mvm->mutex);
1274
1275 /* make sure that beacon statistics don't go backwards with FW reset */
1276 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1277 mvmvif->beacon_stats.accu_num_beacons +=
1278 mvmvif->beacon_stats.num_beacons;
1279
1280 /* Allocate resources for the MAC context, and add it to the fw */
1281 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1282 if (ret)
1283 goto out_unlock;
1284
1285 /* Counting number of interfaces is needed for legacy PM */
1286 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1287 mvm->vif_count++;
1288
1289 /*
1290 * The AP binding flow can be done only after the beacon
1291 * template is configured (which happens only in the mac80211
1292 * start_ap() flow), and adding the broadcast station can happen
1293 * only after the binding.
1294 * In addition, since modifying the MAC before adding a bcast
1295 * station is not allowed by the FW, delay the adding of MAC context to
1296 * the point where we can also add the bcast station.
1297 * In short: there's not much we can do at this point, other than
1298 * allocating resources :)
1299 */
1300 if (vif->type == NL80211_IFTYPE_AP ||
1301 vif->type == NL80211_IFTYPE_ADHOC) {
1302 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1303 if (ret) {
1304 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1305 goto out_release;
1306 }
1307
1308 iwl_mvm_vif_dbgfs_register(mvm, vif);
1309 goto out_unlock;
1310 }
1311
1312 mvmvif->features |= hw->netdev_features;
1313
1314 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1315 if (ret)
1316 goto out_release;
1317
1318 ret = iwl_mvm_power_update_mac(mvm);
1319 if (ret)
1320 goto out_remove_mac;
1321
1322 /* beacon filtering */
1323 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1324 if (ret)
1325 goto out_remove_mac;
1326
1327 if (!mvm->bf_allowed_vif &&
1328 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1329 mvm->bf_allowed_vif = mvmvif;
1330 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1331 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1332 }
1333
1334 /*
1335 * P2P_DEVICE interface does not have a channel context assigned to it,
1336 * so a dedicated PHY context is allocated to it and the corresponding
1337 * MAC context is bound to it at this stage.
1338 */
1339 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1340
1341 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1342 if (!mvmvif->phy_ctxt) {
1343 ret = -ENOSPC;
1344 goto out_free_bf;
1345 }
1346
1347 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1348 ret = iwl_mvm_binding_add_vif(mvm, vif);
1349 if (ret)
1350 goto out_unref_phy;
1351
1352 ret = iwl_mvm_add_bcast_sta(mvm, vif);
1353 if (ret)
1354 goto out_unbind;
1355
1356 /* Save a pointer to p2p device vif, so it can later be used to
1357 * update the p2p device MAC when a GO is started/stopped */
1358 mvm->p2p_device_vif = vif;
1359 }
1360
1361 iwl_mvm_vif_dbgfs_register(mvm, vif);
1362 goto out_unlock;
1363
1364 out_unbind:
1365 iwl_mvm_binding_remove_vif(mvm, vif);
1366 out_unref_phy:
1367 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1368 out_free_bf:
1369 if (mvm->bf_allowed_vif == mvmvif) {
1370 mvm->bf_allowed_vif = NULL;
1371 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1372 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1373 }
1374 out_remove_mac:
1375 mvmvif->phy_ctxt = NULL;
1376 iwl_mvm_mac_ctxt_remove(mvm, vif);
1377 out_release:
1378 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1379 mvm->vif_count--;
1380
1381 iwl_mvm_mac_ctxt_release(mvm, vif);
1382 out_unlock:
1383 mutex_unlock(&mvm->mutex);
1384
1385 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1386
1387 return ret;
1388 }
1389
1390 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1391 struct ieee80211_vif *vif)
1392 {
1393 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1394
1395 if (tfd_msk) {
1396 /*
1397 * mac80211 first removes all the stations of the vif and
1398 * then removes the vif. When it removes a station it also
1399 * flushes the AMPDU session. So by now, all the AMPDU sessions
1400 * of all the stations of this vif are closed, and the queues
1401 * of these AMPDU sessions are properly closed.
1402 * We still need to take care of the shared queues of the vif.
1403 * Flush them here.
1404 */
1405 mutex_lock(&mvm->mutex);
1406 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1407 mutex_unlock(&mvm->mutex);
1408
1409 /*
1410 * There are transports that buffer a few frames in the host.
1411 * For these, the flush above isn't enough since while we were
1412 * flushing, the transport might have sent more frames to the
1413 * device. To solve this, wait here until the transport is
1414 * empty. Technically, this could have replaced the flush
1415 * above, but flush is much faster than draining. So flush
1416 * first, and drain to make sure we have no frames in the
1417 * transport anymore.
1418 * If a station still had frames on the shared queues, it is
1419 * already marked as draining, so to complete the draining, we
1420 * just need to wait until the transport is empty.
1421 */
1422 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1423 }
1424
1425 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1426 /*
1427 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1428 * We assume here that all the packets sent to the OFFCHANNEL
1429 * queue are sent in ROC session.
1430 */
1431 flush_work(&mvm->roc_done_wk);
1432 } else {
1433 /*
1434 * By now, all the AC queues are empty. The AGG queues are
1435 * empty too. We already got all the Tx responses for all the
1436 * packets in the queues. The drain work can have been
1437 * triggered. Flush it.
1438 */
1439 flush_work(&mvm->sta_drained_wk);
1440 }
1441 }
1442
1443 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1444 struct ieee80211_vif *vif)
1445 {
1446 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1447 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1448
1449 iwl_mvm_prepare_mac_removal(mvm, vif);
1450
1451 mutex_lock(&mvm->mutex);
1452
1453 if (mvm->bf_allowed_vif == mvmvif) {
1454 mvm->bf_allowed_vif = NULL;
1455 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1456 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1457 }
1458
1459 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1460
1461 /*
1462 * For AP/GO interface, the tear down of the resources allocated to the
1463 * interface is be handled as part of the stop_ap flow.
1464 */
1465 if (vif->type == NL80211_IFTYPE_AP ||
1466 vif->type == NL80211_IFTYPE_ADHOC) {
1467 #ifdef CONFIG_NL80211_TESTMODE
1468 if (vif == mvm->noa_vif) {
1469 mvm->noa_vif = NULL;
1470 mvm->noa_duration = 0;
1471 }
1472 #endif
1473 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1474 goto out_release;
1475 }
1476
1477 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1478 mvm->p2p_device_vif = NULL;
1479 iwl_mvm_rm_bcast_sta(mvm, vif);
1480 iwl_mvm_binding_remove_vif(mvm, vif);
1481 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1482 mvmvif->phy_ctxt = NULL;
1483 }
1484
1485 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1486 mvm->vif_count--;
1487
1488 iwl_mvm_power_update_mac(mvm);
1489 iwl_mvm_mac_ctxt_remove(mvm, vif);
1490
1491 out_release:
1492 iwl_mvm_mac_ctxt_release(mvm, vif);
1493 mutex_unlock(&mvm->mutex);
1494 }
1495
1496 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1497 {
1498 return 0;
1499 }
1500
1501 struct iwl_mvm_mc_iter_data {
1502 struct iwl_mvm *mvm;
1503 int port_id;
1504 };
1505
1506 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1507 struct ieee80211_vif *vif)
1508 {
1509 struct iwl_mvm_mc_iter_data *data = _data;
1510 struct iwl_mvm *mvm = data->mvm;
1511 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1512 int ret, len;
1513
1514 /* if we don't have free ports, mcast frames will be dropped */
1515 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1516 return;
1517
1518 if (vif->type != NL80211_IFTYPE_STATION ||
1519 !vif->bss_conf.assoc)
1520 return;
1521
1522 cmd->port_id = data->port_id++;
1523 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1524 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1525
1526 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1527 if (ret)
1528 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1529 }
1530
1531 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1532 {
1533 struct iwl_mvm_mc_iter_data iter_data = {
1534 .mvm = mvm,
1535 };
1536
1537 lockdep_assert_held(&mvm->mutex);
1538
1539 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1540 return;
1541
1542 ieee80211_iterate_active_interfaces_atomic(
1543 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1544 iwl_mvm_mc_iface_iterator, &iter_data);
1545 }
1546
1547 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1548 struct netdev_hw_addr_list *mc_list)
1549 {
1550 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1551 struct iwl_mcast_filter_cmd *cmd;
1552 struct netdev_hw_addr *addr;
1553 int addr_count;
1554 bool pass_all;
1555 int len;
1556
1557 addr_count = netdev_hw_addr_list_count(mc_list);
1558 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1559 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1560 if (pass_all)
1561 addr_count = 0;
1562
1563 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1564 cmd = kzalloc(len, GFP_ATOMIC);
1565 if (!cmd)
1566 return 0;
1567
1568 if (pass_all) {
1569 cmd->pass_all = 1;
1570 return (u64)(unsigned long)cmd;
1571 }
1572
1573 netdev_hw_addr_list_for_each(addr, mc_list) {
1574 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1575 cmd->count, addr->addr);
1576 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1577 addr->addr, ETH_ALEN);
1578 cmd->count++;
1579 }
1580
1581 return (u64)(unsigned long)cmd;
1582 }
1583
1584 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1585 unsigned int changed_flags,
1586 unsigned int *total_flags,
1587 u64 multicast)
1588 {
1589 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1590 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1591
1592 mutex_lock(&mvm->mutex);
1593
1594 /* replace previous configuration */
1595 kfree(mvm->mcast_filter_cmd);
1596 mvm->mcast_filter_cmd = cmd;
1597
1598 if (!cmd)
1599 goto out;
1600
1601 iwl_mvm_recalc_multicast(mvm);
1602 out:
1603 mutex_unlock(&mvm->mutex);
1604 *total_flags = 0;
1605 }
1606
1607 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
1608 struct ieee80211_vif *vif,
1609 unsigned int filter_flags,
1610 unsigned int changed_flags)
1611 {
1612 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1613
1614 /* We support only filter for probe requests */
1615 if (!(changed_flags & FIF_PROBE_REQ))
1616 return;
1617
1618 /* Supported only for p2p client interfaces */
1619 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1620 !vif->p2p)
1621 return;
1622
1623 mutex_lock(&mvm->mutex);
1624 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1625 mutex_unlock(&mvm->mutex);
1626 }
1627
1628 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1629 struct iwl_bcast_iter_data {
1630 struct iwl_mvm *mvm;
1631 struct iwl_bcast_filter_cmd *cmd;
1632 u8 current_filter;
1633 };
1634
1635 static void
1636 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1637 const struct iwl_fw_bcast_filter *in_filter,
1638 struct iwl_fw_bcast_filter *out_filter)
1639 {
1640 struct iwl_fw_bcast_filter_attr *attr;
1641 int i;
1642
1643 memcpy(out_filter, in_filter, sizeof(*out_filter));
1644
1645 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1646 attr = &out_filter->attrs[i];
1647
1648 if (!attr->mask)
1649 break;
1650
1651 switch (attr->reserved1) {
1652 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1653 if (vif->bss_conf.arp_addr_cnt != 1) {
1654 attr->mask = 0;
1655 continue;
1656 }
1657
1658 attr->val = vif->bss_conf.arp_addr_list[0];
1659 break;
1660 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1661 attr->val = *(__be32 *)&vif->addr[2];
1662 break;
1663 default:
1664 break;
1665 }
1666 attr->reserved1 = 0;
1667 out_filter->num_attrs++;
1668 }
1669 }
1670
1671 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1672 struct ieee80211_vif *vif)
1673 {
1674 struct iwl_bcast_iter_data *data = _data;
1675 struct iwl_mvm *mvm = data->mvm;
1676 struct iwl_bcast_filter_cmd *cmd = data->cmd;
1677 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1678 struct iwl_fw_bcast_mac *bcast_mac;
1679 int i;
1680
1681 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1682 return;
1683
1684 bcast_mac = &cmd->macs[mvmvif->id];
1685
1686 /*
1687 * enable filtering only for associated stations, but not for P2P
1688 * Clients
1689 */
1690 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1691 !vif->bss_conf.assoc)
1692 return;
1693
1694 bcast_mac->default_discard = 1;
1695
1696 /* copy all configured filters */
1697 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1698 /*
1699 * Make sure we don't exceed our filters limit.
1700 * if there is still a valid filter to be configured,
1701 * be on the safe side and just allow bcast for this mac.
1702 */
1703 if (WARN_ON_ONCE(data->current_filter >=
1704 ARRAY_SIZE(cmd->filters))) {
1705 bcast_mac->default_discard = 0;
1706 bcast_mac->attached_filters = 0;
1707 break;
1708 }
1709
1710 iwl_mvm_set_bcast_filter(vif,
1711 &mvm->bcast_filters[i],
1712 &cmd->filters[data->current_filter]);
1713
1714 /* skip current filter if it contains no attributes */
1715 if (!cmd->filters[data->current_filter].num_attrs)
1716 continue;
1717
1718 /* attach the filter to current mac */
1719 bcast_mac->attached_filters |=
1720 cpu_to_le16(BIT(data->current_filter));
1721
1722 data->current_filter++;
1723 }
1724 }
1725
1726 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1727 struct iwl_bcast_filter_cmd *cmd)
1728 {
1729 struct iwl_bcast_iter_data iter_data = {
1730 .mvm = mvm,
1731 .cmd = cmd,
1732 };
1733
1734 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1735 return false;
1736
1737 memset(cmd, 0, sizeof(*cmd));
1738 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1739 cmd->max_macs = ARRAY_SIZE(cmd->macs);
1740
1741 #ifdef CONFIG_IWLWIFI_DEBUGFS
1742 /* use debugfs filters/macs if override is configured */
1743 if (mvm->dbgfs_bcast_filtering.override) {
1744 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1745 sizeof(cmd->filters));
1746 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1747 sizeof(cmd->macs));
1748 return true;
1749 }
1750 #endif
1751
1752 /* if no filters are configured, do nothing */
1753 if (!mvm->bcast_filters)
1754 return false;
1755
1756 /* configure and attach these filters for each associated sta vif */
1757 ieee80211_iterate_active_interfaces(
1758 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1759 iwl_mvm_bcast_filter_iterator, &iter_data);
1760
1761 return true;
1762 }
1763
1764 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1765 {
1766 struct iwl_bcast_filter_cmd cmd;
1767
1768 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1769 return 0;
1770
1771 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1772 return 0;
1773
1774 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1775 sizeof(cmd), &cmd);
1776 }
1777 #else
1778 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1779 {
1780 return 0;
1781 }
1782 #endif
1783
1784 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
1785 struct ieee80211_vif *vif)
1786 {
1787 struct iwl_mu_group_mgmt_cmd cmd = {};
1788
1789 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
1790 WLAN_MEMBERSHIP_LEN);
1791 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
1792 WLAN_USER_POSITION_LEN);
1793
1794 return iwl_mvm_send_cmd_pdu(mvm,
1795 WIDE_ID(DATA_PATH_GROUP,
1796 UPDATE_MU_GROUPS_CMD),
1797 0, sizeof(cmd), &cmd);
1798 }
1799
1800 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
1801 struct ieee80211_vif *vif)
1802 {
1803 if (vif->mu_mimo_owner) {
1804 struct iwl_mu_group_mgmt_notif *notif = _data;
1805
1806 /*
1807 * MU-MIMO Group Id action frame is little endian. We treat
1808 * the data received from firmware as if it came from the
1809 * action frame, so no conversion is needed.
1810 */
1811 ieee80211_update_mu_groups(vif,
1812 (u8 *)&notif->membership_status,
1813 (u8 *)&notif->user_position);
1814 }
1815 }
1816
1817 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1818 struct iwl_rx_cmd_buffer *rxb)
1819 {
1820 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1821 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
1822
1823 ieee80211_iterate_active_interfaces_atomic(
1824 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1825 iwl_mvm_mu_mimo_iface_iterator, notif);
1826 }
1827
1828 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1829 struct ieee80211_vif *vif,
1830 struct ieee80211_bss_conf *bss_conf,
1831 u32 changes)
1832 {
1833 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1834 int ret;
1835
1836 /*
1837 * Re-calculate the tsf id, as the master-slave relations depend on the
1838 * beacon interval, which was not known when the station interface was
1839 * added.
1840 */
1841 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
1842 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
1843
1844 if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
1845 mvmvif->lqm_active)
1846 iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
1847 0, 0);
1848
1849 /*
1850 * If we're not associated yet, take the (new) BSSID before associating
1851 * so the firmware knows. If we're already associated, then use the old
1852 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
1853 * branch for disassociation below.
1854 */
1855 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
1856 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1857
1858 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
1859 if (ret)
1860 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1861
1862 /* after sending it once, adopt mac80211 data */
1863 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1864 mvmvif->associated = bss_conf->assoc;
1865
1866 if (changes & BSS_CHANGED_ASSOC) {
1867 if (bss_conf->assoc) {
1868 /* clear statistics to get clean beacon counter */
1869 iwl_mvm_request_statistics(mvm, true);
1870 memset(&mvmvif->beacon_stats, 0,
1871 sizeof(mvmvif->beacon_stats));
1872
1873 /* add quota for this interface */
1874 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1875 if (ret) {
1876 IWL_ERR(mvm, "failed to update quotas\n");
1877 return;
1878 }
1879
1880 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
1881 &mvm->status)) {
1882 /*
1883 * If we're restarting then the firmware will
1884 * obviously have lost synchronisation with
1885 * the AP. It will attempt to synchronise by
1886 * itself, but we can make it more reliable by
1887 * scheduling a session protection time event.
1888 *
1889 * The firmware needs to receive a beacon to
1890 * catch up with synchronisation, use 110% of
1891 * the beacon interval.
1892 *
1893 * Set a large maximum delay to allow for more
1894 * than a single interface.
1895 */
1896 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
1897 iwl_mvm_protect_session(mvm, vif, dur, dur,
1898 5 * dur, false);
1899 }
1900
1901 iwl_mvm_sf_update(mvm, vif, false);
1902 iwl_mvm_power_vif_assoc(mvm, vif);
1903 if (vif->p2p) {
1904 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
1905 iwl_mvm_update_smps(mvm, vif,
1906 IWL_MVM_SMPS_REQ_PROT,
1907 IEEE80211_SMPS_DYNAMIC);
1908 }
1909 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1910 /*
1911 * If update fails - SF might be running in associated
1912 * mode while disassociated - which is forbidden.
1913 */
1914 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
1915 "Failed to update SF upon disassociation\n");
1916
1917 /* remove AP station now that the MAC is unassoc */
1918 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
1919 if (ret)
1920 IWL_ERR(mvm, "failed to remove AP station\n");
1921
1922 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
1923 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1924 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1925 /* remove quota for this interface */
1926 ret = iwl_mvm_update_quotas(mvm, false, NULL);
1927 if (ret)
1928 IWL_ERR(mvm, "failed to update quotas\n");
1929
1930 if (vif->p2p)
1931 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
1932
1933 /* this will take the cleared BSSID from bss_conf */
1934 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1935 if (ret)
1936 IWL_ERR(mvm,
1937 "failed to update MAC %pM (clear after unassoc)\n",
1938 vif->addr);
1939 }
1940
1941 /*
1942 * The firmware tracks the MU-MIMO group on its own.
1943 * However, on HW restart we should restore this data.
1944 */
1945 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1946 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
1947 ret = iwl_mvm_update_mu_groups(mvm, vif);
1948 if (ret)
1949 IWL_ERR(mvm,
1950 "failed to update VHT MU_MIMO groups\n");
1951 }
1952
1953 iwl_mvm_recalc_multicast(mvm);
1954 iwl_mvm_configure_bcast_filter(mvm);
1955
1956 /* reset rssi values */
1957 mvmvif->bf_data.ave_beacon_signal = 0;
1958
1959 iwl_mvm_bt_coex_vif_change(mvm);
1960 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
1961 IEEE80211_SMPS_AUTOMATIC);
1962 if (fw_has_capa(&mvm->fw->ucode_capa,
1963 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1964 iwl_mvm_config_scan(mvm);
1965 } else if (changes & BSS_CHANGED_BEACON_INFO) {
1966 /*
1967 * We received a beacon _after_ association so
1968 * remove the session protection.
1969 */
1970 iwl_mvm_remove_time_event(mvm, mvmvif,
1971 &mvmvif->time_event_data);
1972 }
1973
1974 if (changes & BSS_CHANGED_BEACON_INFO) {
1975 iwl_mvm_sf_update(mvm, vif, false);
1976 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
1977 }
1978
1979 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
1980 /*
1981 * Send power command on every beacon change,
1982 * because we may have not enabled beacon abort yet.
1983 */
1984 BSS_CHANGED_BEACON_INFO)) {
1985 ret = iwl_mvm_power_update_mac(mvm);
1986 if (ret)
1987 IWL_ERR(mvm, "failed to update power mode\n");
1988 }
1989
1990 if (changes & BSS_CHANGED_TXPOWER) {
1991 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
1992 bss_conf->txpower);
1993 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
1994 }
1995
1996 if (changes & BSS_CHANGED_CQM) {
1997 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
1998 /* reset cqm events tracking */
1999 mvmvif->bf_data.last_cqm_event = 0;
2000 if (mvmvif->bf_data.bf_enabled) {
2001 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2002 if (ret)
2003 IWL_ERR(mvm,
2004 "failed to update CQM thresholds\n");
2005 }
2006 }
2007
2008 if (changes & BSS_CHANGED_ARP_FILTER) {
2009 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2010 iwl_mvm_configure_bcast_filter(mvm);
2011 }
2012 }
2013
2014 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2015 struct ieee80211_vif *vif)
2016 {
2017 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2018 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2019 int ret;
2020
2021 /*
2022 * iwl_mvm_mac_ctxt_add() might read directly from the device
2023 * (the system time), so make sure it is available.
2024 */
2025 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2026 if (ret)
2027 return ret;
2028
2029 mutex_lock(&mvm->mutex);
2030
2031 /* Send the beacon template */
2032 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2033 if (ret)
2034 goto out_unlock;
2035
2036 /*
2037 * Re-calculate the tsf id, as the master-slave relations depend on the
2038 * beacon interval, which was not known when the AP interface was added.
2039 */
2040 if (vif->type == NL80211_IFTYPE_AP)
2041 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2042
2043 mvmvif->ap_assoc_sta_count = 0;
2044
2045 /* Add the mac context */
2046 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2047 if (ret)
2048 goto out_unlock;
2049
2050 /* Perform the binding */
2051 ret = iwl_mvm_binding_add_vif(mvm, vif);
2052 if (ret)
2053 goto out_remove;
2054
2055 /* Send the bcast station. At this stage the TBTT and DTIM time events
2056 * are added and applied to the scheduler */
2057 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2058 if (ret)
2059 goto out_unbind;
2060
2061 /* must be set before quota calculations */
2062 mvmvif->ap_ibss_active = true;
2063
2064 /* power updated needs to be done before quotas */
2065 iwl_mvm_power_update_mac(mvm);
2066
2067 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2068 if (ret)
2069 goto out_quota_failed;
2070
2071 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2072 if (vif->p2p && mvm->p2p_device_vif)
2073 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2074
2075 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2076
2077 iwl_mvm_bt_coex_vif_change(mvm);
2078
2079 /* we don't support TDLS during DCM */
2080 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2081 iwl_mvm_teardown_tdls_peers(mvm);
2082
2083 goto out_unlock;
2084
2085 out_quota_failed:
2086 iwl_mvm_power_update_mac(mvm);
2087 mvmvif->ap_ibss_active = false;
2088 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2089 out_unbind:
2090 iwl_mvm_binding_remove_vif(mvm, vif);
2091 out_remove:
2092 iwl_mvm_mac_ctxt_remove(mvm, vif);
2093 out_unlock:
2094 mutex_unlock(&mvm->mutex);
2095 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2096 return ret;
2097 }
2098
2099 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2100 struct ieee80211_vif *vif)
2101 {
2102 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2103 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2104
2105 iwl_mvm_prepare_mac_removal(mvm, vif);
2106
2107 mutex_lock(&mvm->mutex);
2108
2109 /* Handle AP stop while in CSA */
2110 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2111 iwl_mvm_remove_time_event(mvm, mvmvif,
2112 &mvmvif->time_event_data);
2113 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2114 mvmvif->csa_countdown = false;
2115 }
2116
2117 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2118 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2119 mvm->csa_tx_block_bcn_timeout = 0;
2120 }
2121
2122 mvmvif->ap_ibss_active = false;
2123 mvm->ap_last_beacon_gp2 = 0;
2124
2125 iwl_mvm_bt_coex_vif_change(mvm);
2126
2127 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2128
2129 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2130 if (vif->p2p && mvm->p2p_device_vif)
2131 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2132
2133 iwl_mvm_update_quotas(mvm, false, NULL);
2134 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2135 iwl_mvm_binding_remove_vif(mvm, vif);
2136
2137 iwl_mvm_power_update_mac(mvm);
2138
2139 iwl_mvm_mac_ctxt_remove(mvm, vif);
2140
2141 mutex_unlock(&mvm->mutex);
2142 }
2143
2144 static void
2145 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2146 struct ieee80211_vif *vif,
2147 struct ieee80211_bss_conf *bss_conf,
2148 u32 changes)
2149 {
2150 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2151
2152 /* Changes will be applied when the AP/IBSS is started */
2153 if (!mvmvif->ap_ibss_active)
2154 return;
2155
2156 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2157 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2158 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2159 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2160
2161 /* Need to send a new beacon template to the FW */
2162 if (changes & BSS_CHANGED_BEACON &&
2163 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2164 IWL_WARN(mvm, "Failed updating beacon data\n");
2165
2166 if (changes & BSS_CHANGED_TXPOWER) {
2167 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2168 bss_conf->txpower);
2169 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2170 }
2171 }
2172
2173 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2174 struct ieee80211_vif *vif,
2175 struct ieee80211_bss_conf *bss_conf,
2176 u32 changes)
2177 {
2178 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2179
2180 /*
2181 * iwl_mvm_bss_info_changed_station() might call
2182 * iwl_mvm_protect_session(), which reads directly from
2183 * the device (the system time), so make sure it is available.
2184 */
2185 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2186 return;
2187
2188 mutex_lock(&mvm->mutex);
2189
2190 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2191 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2192
2193 switch (vif->type) {
2194 case NL80211_IFTYPE_STATION:
2195 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2196 break;
2197 case NL80211_IFTYPE_AP:
2198 case NL80211_IFTYPE_ADHOC:
2199 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2200 break;
2201 default:
2202 /* shouldn't happen */
2203 WARN_ON_ONCE(1);
2204 }
2205
2206 mutex_unlock(&mvm->mutex);
2207 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2208 }
2209
2210 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2211 struct ieee80211_vif *vif,
2212 struct ieee80211_scan_request *hw_req)
2213 {
2214 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2215 int ret;
2216
2217 if (hw_req->req.n_channels == 0 ||
2218 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2219 return -EINVAL;
2220
2221 mutex_lock(&mvm->mutex);
2222 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2223 mutex_unlock(&mvm->mutex);
2224
2225 return ret;
2226 }
2227
2228 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2229 struct ieee80211_vif *vif)
2230 {
2231 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2232
2233 mutex_lock(&mvm->mutex);
2234
2235 /* Due to a race condition, it's possible that mac80211 asks
2236 * us to stop a hw_scan when it's already stopped. This can
2237 * happen, for instance, if we stopped the scan ourselves,
2238 * called ieee80211_scan_completed() and the userspace called
2239 * cancel scan scan before ieee80211_scan_work() could run.
2240 * To handle that, simply return if the scan is not running.
2241 */
2242 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2243 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2244
2245 mutex_unlock(&mvm->mutex);
2246 }
2247
2248 static void
2249 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2250 struct ieee80211_sta *sta, u16 tids,
2251 int num_frames,
2252 enum ieee80211_frame_release_type reason,
2253 bool more_data)
2254 {
2255 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2256
2257 /* Called when we need to transmit (a) frame(s) from mac80211 */
2258
2259 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2260 tids, more_data, false);
2261 }
2262
2263 static void
2264 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2265 struct ieee80211_sta *sta, u16 tids,
2266 int num_frames,
2267 enum ieee80211_frame_release_type reason,
2268 bool more_data)
2269 {
2270 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2271
2272 /* Called when we need to transmit (a) frame(s) from agg queue */
2273
2274 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2275 tids, more_data, true);
2276 }
2277
2278 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2279 struct ieee80211_vif *vif,
2280 enum sta_notify_cmd cmd,
2281 struct ieee80211_sta *sta)
2282 {
2283 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2284 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2285 unsigned long txqs = 0, tids = 0;
2286 int tid;
2287
2288 spin_lock_bh(&mvmsta->lock);
2289 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2290 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2291
2292 if (tid_data->state != IWL_AGG_ON &&
2293 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2294 continue;
2295
2296 __set_bit(tid_data->txq_id, &txqs);
2297
2298 if (iwl_mvm_tid_queued(tid_data) == 0)
2299 continue;
2300
2301 __set_bit(tid, &tids);
2302 }
2303
2304 switch (cmd) {
2305 case STA_NOTIFY_SLEEP:
2306 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2307 ieee80211_sta_block_awake(hw, sta, true);
2308
2309 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2310 ieee80211_sta_set_buffered(sta, tid, true);
2311
2312 if (txqs)
2313 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2314 /*
2315 * The fw updates the STA to be asleep. Tx packets on the Tx
2316 * queues to this station will not be transmitted. The fw will
2317 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2318 */
2319 break;
2320 case STA_NOTIFY_AWAKE:
2321 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2322 break;
2323
2324 if (txqs)
2325 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2326 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2327 break;
2328 default:
2329 break;
2330 }
2331 spin_unlock_bh(&mvmsta->lock);
2332 }
2333
2334 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2335 struct ieee80211_vif *vif,
2336 struct ieee80211_sta *sta)
2337 {
2338 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2339 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2340
2341 /*
2342 * This is called before mac80211 does RCU synchronisation,
2343 * so here we already invalidate our internal RCU-protected
2344 * station pointer. The rest of the code will thus no longer
2345 * be able to find the station this way, and we don't rely
2346 * on further RCU synchronisation after the sta_state()
2347 * callback deleted the station.
2348 */
2349 mutex_lock(&mvm->mutex);
2350 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2351 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2352 ERR_PTR(-ENOENT));
2353
2354 mutex_unlock(&mvm->mutex);
2355 }
2356
2357 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2358 const u8 *bssid)
2359 {
2360 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2361 return;
2362
2363 if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) {
2364 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2365 return;
2366 }
2367
2368 if (!vif->p2p &&
2369 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
2370 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2371 return;
2372 }
2373
2374 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2375 }
2376
2377 static void
2378 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
2379 struct ieee80211_vif *vif, u8 *peer_addr,
2380 enum nl80211_tdls_operation action)
2381 {
2382 struct iwl_fw_dbg_trigger_tlv *trig;
2383 struct iwl_fw_dbg_trigger_tdls *tdls_trig;
2384
2385 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
2386 return;
2387
2388 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
2389 tdls_trig = (void *)trig->data;
2390 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
2391 return;
2392
2393 if (!(tdls_trig->action_bitmap & BIT(action)))
2394 return;
2395
2396 if (tdls_trig->peer_mode &&
2397 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
2398 return;
2399
2400 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
2401 "TDLS event occurred, peer %pM, action %d",
2402 peer_addr, action);
2403 }
2404
2405 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2406 struct iwl_mvm_sta *mvm_sta)
2407 {
2408 struct iwl_mvm_tid_data *tid_data;
2409 struct sk_buff *skb;
2410 int i;
2411
2412 spin_lock_bh(&mvm_sta->lock);
2413 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2414 tid_data = &mvm_sta->tid_data[i];
2415 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
2416 ieee80211_free_txskb(mvm->hw, skb);
2417 }
2418 spin_unlock_bh(&mvm_sta->lock);
2419 }
2420
2421 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2422 struct ieee80211_vif *vif,
2423 struct ieee80211_sta *sta,
2424 enum ieee80211_sta_state old_state,
2425 enum ieee80211_sta_state new_state)
2426 {
2427 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2428 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2429 int ret;
2430
2431 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2432 sta->addr, old_state, new_state);
2433
2434 /* this would be a mac80211 bug ... but don't crash */
2435 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2436 return -EINVAL;
2437
2438 /* if a STA is being removed, reuse its ID */
2439 flush_work(&mvm->sta_drained_wk);
2440
2441 /*
2442 * If we are in a STA removal flow and in DQA mode:
2443 *
2444 * This is after the sync_rcu part, so the queues have already been
2445 * flushed. No more TXs on their way in mac80211's path, and no more in
2446 * the queues.
2447 * Also, we won't be getting any new TX frames for this station.
2448 * What we might have are deferred TX frames that need to be taken care
2449 * of.
2450 *
2451 * Drop any still-queued deferred-frame before removing the STA, and
2452 * make sure the worker is no longer handling frames for this STA.
2453 */
2454 if (old_state == IEEE80211_STA_NONE &&
2455 new_state == IEEE80211_STA_NOTEXIST &&
2456 iwl_mvm_is_dqa_supported(mvm)) {
2457 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2458
2459 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
2460 flush_work(&mvm->add_stream_wk);
2461
2462 /*
2463 * No need to make sure deferred TX indication is off since the
2464 * worker will already remove it if it was on
2465 */
2466 }
2467
2468 mutex_lock(&mvm->mutex);
2469 if (old_state == IEEE80211_STA_NOTEXIST &&
2470 new_state == IEEE80211_STA_NONE) {
2471 /*
2472 * Firmware bug - it'll crash if the beacon interval is less
2473 * than 16. We can't avoid connecting at all, so refuse the
2474 * station state change, this will cause mac80211 to abandon
2475 * attempts to connect to this AP, and eventually wpa_s will
2476 * blacklist the AP...
2477 */
2478 if (vif->type == NL80211_IFTYPE_STATION &&
2479 vif->bss_conf.beacon_int < 16) {
2480 IWL_ERR(mvm,
2481 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2482 sta->addr, vif->bss_conf.beacon_int);
2483 ret = -EINVAL;
2484 goto out_unlock;
2485 }
2486
2487 if (sta->tdls &&
2488 (vif->p2p ||
2489 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2490 IWL_MVM_TDLS_STA_COUNT ||
2491 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2492 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2493 ret = -EBUSY;
2494 goto out_unlock;
2495 }
2496
2497 ret = iwl_mvm_add_sta(mvm, vif, sta);
2498 if (sta->tdls && ret == 0) {
2499 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2500 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2501 NL80211_TDLS_SETUP);
2502 }
2503 } else if (old_state == IEEE80211_STA_NONE &&
2504 new_state == IEEE80211_STA_AUTH) {
2505 /*
2506 * EBS may be disabled due to previous failures reported by FW.
2507 * Reset EBS status here assuming environment has been changed.
2508 */
2509 mvm->last_ebs_successful = true;
2510 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2511 ret = 0;
2512 } else if (old_state == IEEE80211_STA_AUTH &&
2513 new_state == IEEE80211_STA_ASSOC) {
2514 if (vif->type == NL80211_IFTYPE_AP) {
2515 mvmvif->ap_assoc_sta_count++;
2516 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2517 }
2518 ret = iwl_mvm_update_sta(mvm, vif, sta);
2519 if (ret == 0)
2520 iwl_mvm_rs_rate_init(mvm, sta,
2521 mvmvif->phy_ctxt->channel->band,
2522 true);
2523 } else if (old_state == IEEE80211_STA_ASSOC &&
2524 new_state == IEEE80211_STA_AUTHORIZED) {
2525
2526 /* we don't support TDLS during DCM */
2527 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2528 iwl_mvm_teardown_tdls_peers(mvm);
2529
2530 if (sta->tdls)
2531 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2532 NL80211_TDLS_ENABLE_LINK);
2533
2534 /* enable beacon filtering */
2535 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2536 ret = 0;
2537 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2538 new_state == IEEE80211_STA_ASSOC) {
2539 /* disable beacon filtering */
2540 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2541 ret = 0;
2542 } else if (old_state == IEEE80211_STA_ASSOC &&
2543 new_state == IEEE80211_STA_AUTH) {
2544 if (vif->type == NL80211_IFTYPE_AP) {
2545 mvmvif->ap_assoc_sta_count--;
2546 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2547 }
2548 ret = 0;
2549 } else if (old_state == IEEE80211_STA_AUTH &&
2550 new_state == IEEE80211_STA_NONE) {
2551 ret = 0;
2552 } else if (old_state == IEEE80211_STA_NONE &&
2553 new_state == IEEE80211_STA_NOTEXIST) {
2554 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2555 if (sta->tdls) {
2556 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2557 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2558 NL80211_TDLS_DISABLE_LINK);
2559 }
2560 } else {
2561 ret = -EIO;
2562 }
2563 out_unlock:
2564 mutex_unlock(&mvm->mutex);
2565
2566 if (sta->tdls && ret == 0) {
2567 if (old_state == IEEE80211_STA_NOTEXIST &&
2568 new_state == IEEE80211_STA_NONE)
2569 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2570 else if (old_state == IEEE80211_STA_NONE &&
2571 new_state == IEEE80211_STA_NOTEXIST)
2572 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2573 }
2574
2575 return ret;
2576 }
2577
2578 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2579 {
2580 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2581
2582 mvm->rts_threshold = value;
2583
2584 return 0;
2585 }
2586
2587 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2588 struct ieee80211_vif *vif,
2589 struct ieee80211_sta *sta, u32 changed)
2590 {
2591 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2592
2593 if (vif->type == NL80211_IFTYPE_STATION &&
2594 changed & IEEE80211_RC_NSS_CHANGED)
2595 iwl_mvm_sf_update(mvm, vif, false);
2596 }
2597
2598 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2599 struct ieee80211_vif *vif, u16 ac,
2600 const struct ieee80211_tx_queue_params *params)
2601 {
2602 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2603 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2604
2605 mvmvif->queue_params[ac] = *params;
2606
2607 /*
2608 * No need to update right away, we'll get BSS_CHANGED_QOS
2609 * The exception is P2P_DEVICE interface which needs immediate update.
2610 */
2611 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2612 int ret;
2613
2614 mutex_lock(&mvm->mutex);
2615 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2616 mutex_unlock(&mvm->mutex);
2617 return ret;
2618 }
2619 return 0;
2620 }
2621
2622 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2623 struct ieee80211_vif *vif)
2624 {
2625 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2626 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
2627 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
2628
2629 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2630 return;
2631
2632 /*
2633 * iwl_mvm_protect_session() reads directly from the device
2634 * (the system time), so make sure it is available.
2635 */
2636 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2637 return;
2638
2639 mutex_lock(&mvm->mutex);
2640 /* Try really hard to protect the session and hear a beacon */
2641 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2642 mutex_unlock(&mvm->mutex);
2643
2644 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2645 }
2646
2647 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2648 struct ieee80211_vif *vif,
2649 struct cfg80211_sched_scan_request *req,
2650 struct ieee80211_scan_ies *ies)
2651 {
2652 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2653
2654 int ret;
2655
2656 mutex_lock(&mvm->mutex);
2657
2658 if (!vif->bss_conf.idle) {
2659 ret = -EBUSY;
2660 goto out;
2661 }
2662
2663 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2664
2665 out:
2666 mutex_unlock(&mvm->mutex);
2667 return ret;
2668 }
2669
2670 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2671 struct ieee80211_vif *vif)
2672 {
2673 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2674 int ret;
2675
2676 mutex_lock(&mvm->mutex);
2677
2678 /* Due to a race condition, it's possible that mac80211 asks
2679 * us to stop a sched_scan when it's already stopped. This
2680 * can happen, for instance, if we stopped the scan ourselves,
2681 * called ieee80211_sched_scan_stopped() and the userspace called
2682 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2683 * could run. To handle this, simply return if the scan is
2684 * not running.
2685 */
2686 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2687 mutex_unlock(&mvm->mutex);
2688 return 0;
2689 }
2690
2691 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2692 mutex_unlock(&mvm->mutex);
2693 iwl_mvm_wait_for_async_handlers(mvm);
2694
2695 return ret;
2696 }
2697
2698 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2699 enum set_key_cmd cmd,
2700 struct ieee80211_vif *vif,
2701 struct ieee80211_sta *sta,
2702 struct ieee80211_key_conf *key)
2703 {
2704 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2705 struct iwl_mvm_sta *mvmsta;
2706 struct iwl_mvm_key_pn *ptk_pn;
2707 int keyidx = key->keyidx;
2708 int ret;
2709 u8 key_offset;
2710
2711 if (iwlwifi_mod_params.sw_crypto) {
2712 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2713 return -EOPNOTSUPP;
2714 }
2715
2716 switch (key->cipher) {
2717 case WLAN_CIPHER_SUITE_TKIP:
2718 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2719 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2720 break;
2721 case WLAN_CIPHER_SUITE_CCMP:
2722 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2723 break;
2724 case WLAN_CIPHER_SUITE_AES_CMAC:
2725 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2726 break;
2727 case WLAN_CIPHER_SUITE_WEP40:
2728 case WLAN_CIPHER_SUITE_WEP104:
2729 /* For non-client mode, only use WEP keys for TX as we probably
2730 * don't have a station yet anyway and would then have to keep
2731 * track of the keys, linking them to each of the clients/peers
2732 * as they appear. For now, don't do that, for performance WEP
2733 * offload doesn't really matter much, but we need it for some
2734 * other offload features in client mode.
2735 */
2736 if (vif->type != NL80211_IFTYPE_STATION)
2737 return 0;
2738 break;
2739 default:
2740 /* currently FW supports only one optional cipher scheme */
2741 if (hw->n_cipher_schemes &&
2742 hw->cipher_schemes->cipher == key->cipher)
2743 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2744 else
2745 return -EOPNOTSUPP;
2746 }
2747
2748 mutex_lock(&mvm->mutex);
2749
2750 switch (cmd) {
2751 case SET_KEY:
2752 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2753 vif->type == NL80211_IFTYPE_AP) && !sta) {
2754 /*
2755 * GTK on AP interface is a TX-only key, return 0;
2756 * on IBSS they're per-station and because we're lazy
2757 * we don't support them for RX, so do the same.
2758 * CMAC in AP/IBSS modes must be done in software.
2759 */
2760 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
2761 ret = -EOPNOTSUPP;
2762 else
2763 ret = 0;
2764 key->hw_key_idx = STA_KEY_IDX_INVALID;
2765 break;
2766 }
2767
2768 /* During FW restart, in order to restore the state as it was,
2769 * don't try to reprogram keys we previously failed for.
2770 */
2771 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2772 key->hw_key_idx == STA_KEY_IDX_INVALID) {
2773 IWL_DEBUG_MAC80211(mvm,
2774 "skip invalid idx key programming during restart\n");
2775 ret = 0;
2776 break;
2777 }
2778
2779 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2780 sta && iwl_mvm_has_new_rx_api(mvm) &&
2781 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2782 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2783 key->cipher == WLAN_CIPHER_SUITE_GCMP)) {
2784 struct ieee80211_key_seq seq;
2785 int tid, q;
2786
2787 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2788 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
2789 ptk_pn = kzalloc(sizeof(*ptk_pn) +
2790 mvm->trans->num_rx_queues *
2791 sizeof(ptk_pn->q[0]),
2792 GFP_KERNEL);
2793 if (!ptk_pn) {
2794 ret = -ENOMEM;
2795 break;
2796 }
2797
2798 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2799 ieee80211_get_key_rx_seq(key, tid, &seq);
2800 for (q = 0; q < mvm->trans->num_rx_queues; q++)
2801 memcpy(ptk_pn->q[q].pn[tid],
2802 seq.ccmp.pn,
2803 IEEE80211_CCMP_PN_LEN);
2804 }
2805
2806 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
2807 }
2808
2809 /* in HW restart reuse the index, otherwise request a new one */
2810 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2811 key_offset = key->hw_key_idx;
2812 else
2813 key_offset = STA_KEY_IDX_INVALID;
2814
2815 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
2816 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
2817 if (ret) {
2818 IWL_WARN(mvm, "set key failed\n");
2819 /*
2820 * can't add key for RX, but we don't need it
2821 * in the device for TX so still return 0
2822 */
2823 key->hw_key_idx = STA_KEY_IDX_INVALID;
2824 ret = 0;
2825 }
2826
2827 break;
2828 case DISABLE_KEY:
2829 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
2830 ret = 0;
2831 break;
2832 }
2833
2834 if (sta && iwl_mvm_has_new_rx_api(mvm) &&
2835 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2836 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2837 key->cipher == WLAN_CIPHER_SUITE_GCMP)) {
2838 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2839 ptk_pn = rcu_dereference_protected(
2840 mvmsta->ptk_pn[keyidx],
2841 lockdep_is_held(&mvm->mutex));
2842 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
2843 if (ptk_pn)
2844 kfree_rcu(ptk_pn, rcu_head);
2845 }
2846
2847 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
2848 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
2849 break;
2850 default:
2851 ret = -EINVAL;
2852 }
2853
2854 mutex_unlock(&mvm->mutex);
2855 return ret;
2856 }
2857
2858 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
2859 struct ieee80211_vif *vif,
2860 struct ieee80211_key_conf *keyconf,
2861 struct ieee80211_sta *sta,
2862 u32 iv32, u16 *phase1key)
2863 {
2864 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2865
2866 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
2867 return;
2868
2869 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
2870 }
2871
2872
2873 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
2874 struct iwl_rx_packet *pkt, void *data)
2875 {
2876 struct iwl_mvm *mvm =
2877 container_of(notif_wait, struct iwl_mvm, notif_wait);
2878 struct iwl_hs20_roc_res *resp;
2879 int resp_len = iwl_rx_packet_payload_len(pkt);
2880 struct iwl_mvm_time_event_data *te_data = data;
2881
2882 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
2883 return true;
2884
2885 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
2886 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
2887 return true;
2888 }
2889
2890 resp = (void *)pkt->data;
2891
2892 IWL_DEBUG_TE(mvm,
2893 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
2894 resp->status, resp->event_unique_id);
2895
2896 te_data->uid = le32_to_cpu(resp->event_unique_id);
2897 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
2898 te_data->uid);
2899
2900 spin_lock_bh(&mvm->time_event_lock);
2901 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
2902 spin_unlock_bh(&mvm->time_event_lock);
2903
2904 return true;
2905 }
2906
2907 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
2908 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
2909 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
2910 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
2911 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
2912 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
2913 struct ieee80211_channel *channel,
2914 struct ieee80211_vif *vif,
2915 int duration)
2916 {
2917 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
2918 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2919 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
2920 static const u16 time_event_response[] = { HOT_SPOT_CMD };
2921 struct iwl_notification_wait wait_time_event;
2922 u32 dtim_interval = vif->bss_conf.dtim_period *
2923 vif->bss_conf.beacon_int;
2924 u32 req_dur, delay;
2925 struct iwl_hs20_roc_req aux_roc_req = {
2926 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
2927 .id_and_color =
2928 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
2929 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
2930 /* Set the channel info data */
2931 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
2932 PHY_BAND_24 : PHY_BAND_5,
2933 .channel_info.channel = channel->hw_value,
2934 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
2935 /* Set the time and duration */
2936 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
2937 };
2938
2939 delay = AUX_ROC_MIN_DELAY;
2940 req_dur = MSEC_TO_TU(duration);
2941
2942 /*
2943 * If we are associated we want the delay time to be at least one
2944 * dtim interval so that the FW can wait until after the DTIM and
2945 * then start the time event, this will potentially allow us to
2946 * remain off-channel for the max duration.
2947 * Since we want to use almost a whole dtim interval we would also
2948 * like the delay to be for 2-3 dtim intervals, in case there are
2949 * other time events with higher priority.
2950 */
2951 if (vif->bss_conf.assoc) {
2952 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
2953 /* We cannot remain off-channel longer than the DTIM interval */
2954 if (dtim_interval <= req_dur) {
2955 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
2956 if (req_dur <= AUX_ROC_MIN_DURATION)
2957 req_dur = dtim_interval -
2958 AUX_ROC_MIN_SAFETY_BUFFER;
2959 }
2960 }
2961
2962 aux_roc_req.duration = cpu_to_le32(req_dur);
2963 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
2964
2965 IWL_DEBUG_TE(mvm,
2966 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
2967 channel->hw_value, req_dur, duration, delay,
2968 dtim_interval);
2969 /* Set the node address */
2970 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
2971
2972 lockdep_assert_held(&mvm->mutex);
2973
2974 spin_lock_bh(&mvm->time_event_lock);
2975
2976 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
2977 spin_unlock_bh(&mvm->time_event_lock);
2978 return -EIO;
2979 }
2980
2981 te_data->vif = vif;
2982 te_data->duration = duration;
2983 te_data->id = HOT_SPOT_CMD;
2984
2985 spin_unlock_bh(&mvm->time_event_lock);
2986
2987 /*
2988 * Use a notification wait, which really just processes the
2989 * command response and doesn't wait for anything, in order
2990 * to be able to process the response and get the UID inside
2991 * the RX path. Using CMD_WANT_SKB doesn't work because it
2992 * stores the buffer and then wakes up this thread, by which
2993 * time another notification (that the time event started)
2994 * might already be processed unsuccessfully.
2995 */
2996 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
2997 time_event_response,
2998 ARRAY_SIZE(time_event_response),
2999 iwl_mvm_rx_aux_roc, te_data);
3000
3001 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3002 &aux_roc_req);
3003
3004 if (res) {
3005 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3006 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3007 goto out_clear_te;
3008 }
3009
3010 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3011 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3012 /* should never fail */
3013 WARN_ON_ONCE(res);
3014
3015 if (res) {
3016 out_clear_te:
3017 spin_lock_bh(&mvm->time_event_lock);
3018 iwl_mvm_te_clear_data(mvm, te_data);
3019 spin_unlock_bh(&mvm->time_event_lock);
3020 }
3021
3022 return res;
3023 }
3024
3025 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3026 struct ieee80211_vif *vif,
3027 struct ieee80211_channel *channel,
3028 int duration,
3029 enum ieee80211_roc_type type)
3030 {
3031 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3032 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3033 struct cfg80211_chan_def chandef;
3034 struct iwl_mvm_phy_ctxt *phy_ctxt;
3035 int ret, i;
3036
3037 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3038 duration, type);
3039
3040 flush_work(&mvm->roc_done_wk);
3041
3042 mutex_lock(&mvm->mutex);
3043
3044 switch (vif->type) {
3045 case NL80211_IFTYPE_STATION:
3046 if (fw_has_capa(&mvm->fw->ucode_capa,
3047 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3048 /* Use aux roc framework (HS20) */
3049 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3050 vif, duration);
3051 goto out_unlock;
3052 }
3053 IWL_ERR(mvm, "hotspot not supported\n");
3054 ret = -EINVAL;
3055 goto out_unlock;
3056 case NL80211_IFTYPE_P2P_DEVICE:
3057 /* handle below */
3058 break;
3059 default:
3060 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3061 ret = -EINVAL;
3062 goto out_unlock;
3063 }
3064
3065 for (i = 0; i < NUM_PHY_CTX; i++) {
3066 phy_ctxt = &mvm->phy_ctxts[i];
3067 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3068 continue;
3069
3070 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3071 /*
3072 * Unbind the P2P_DEVICE from the current PHY context,
3073 * and if the PHY context is not used remove it.
3074 */
3075 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3076 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3077 goto out_unlock;
3078
3079 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3080
3081 /* Bind the P2P_DEVICE to the current PHY Context */
3082 mvmvif->phy_ctxt = phy_ctxt;
3083
3084 ret = iwl_mvm_binding_add_vif(mvm, vif);
3085 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3086 goto out_unlock;
3087
3088 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3089 goto schedule_time_event;
3090 }
3091 }
3092
3093 /* Need to update the PHY context only if the ROC channel changed */
3094 if (channel == mvmvif->phy_ctxt->channel)
3095 goto schedule_time_event;
3096
3097 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3098
3099 /*
3100 * Change the PHY context configuration as it is currently referenced
3101 * only by the P2P Device MAC
3102 */
3103 if (mvmvif->phy_ctxt->ref == 1) {
3104 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3105 &chandef, 1, 1);
3106 if (ret)
3107 goto out_unlock;
3108 } else {
3109 /*
3110 * The PHY context is shared with other MACs. Need to remove the
3111 * P2P Device from the binding, allocate an new PHY context and
3112 * create a new binding
3113 */
3114 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3115 if (!phy_ctxt) {
3116 ret = -ENOSPC;
3117 goto out_unlock;
3118 }
3119
3120 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3121 1, 1);
3122 if (ret) {
3123 IWL_ERR(mvm, "Failed to change PHY context\n");
3124 goto out_unlock;
3125 }
3126
3127 /* Unbind the P2P_DEVICE from the current PHY context */
3128 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3129 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3130 goto out_unlock;
3131
3132 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3133
3134 /* Bind the P2P_DEVICE to the new allocated PHY context */
3135 mvmvif->phy_ctxt = phy_ctxt;
3136
3137 ret = iwl_mvm_binding_add_vif(mvm, vif);
3138 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3139 goto out_unlock;
3140
3141 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3142 }
3143
3144 schedule_time_event:
3145 /* Schedule the time events */
3146 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3147
3148 out_unlock:
3149 mutex_unlock(&mvm->mutex);
3150 IWL_DEBUG_MAC80211(mvm, "leave\n");
3151 return ret;
3152 }
3153
3154 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3155 {
3156 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3157
3158 IWL_DEBUG_MAC80211(mvm, "enter\n");
3159
3160 mutex_lock(&mvm->mutex);
3161 iwl_mvm_stop_roc(mvm);
3162 mutex_unlock(&mvm->mutex);
3163
3164 IWL_DEBUG_MAC80211(mvm, "leave\n");
3165 return 0;
3166 }
3167
3168 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3169 struct ieee80211_chanctx_conf *ctx)
3170 {
3171 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3172 struct iwl_mvm_phy_ctxt *phy_ctxt;
3173 int ret;
3174
3175 lockdep_assert_held(&mvm->mutex);
3176
3177 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3178
3179 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3180 if (!phy_ctxt) {
3181 ret = -ENOSPC;
3182 goto out;
3183 }
3184
3185 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3186 ctx->rx_chains_static,
3187 ctx->rx_chains_dynamic);
3188 if (ret) {
3189 IWL_ERR(mvm, "Failed to add PHY context\n");
3190 goto out;
3191 }
3192
3193 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3194 *phy_ctxt_id = phy_ctxt->id;
3195 out:
3196 return ret;
3197 }
3198
3199 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3200 struct ieee80211_chanctx_conf *ctx)
3201 {
3202 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3203 int ret;
3204
3205 mutex_lock(&mvm->mutex);
3206 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3207 mutex_unlock(&mvm->mutex);
3208
3209 return ret;
3210 }
3211
3212 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3213 struct ieee80211_chanctx_conf *ctx)
3214 {
3215 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3216 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3217
3218 lockdep_assert_held(&mvm->mutex);
3219
3220 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3221 }
3222
3223 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3224 struct ieee80211_chanctx_conf *ctx)
3225 {
3226 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3227
3228 mutex_lock(&mvm->mutex);
3229 __iwl_mvm_remove_chanctx(mvm, ctx);
3230 mutex_unlock(&mvm->mutex);
3231 }
3232
3233 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3234 struct ieee80211_chanctx_conf *ctx,
3235 u32 changed)
3236 {
3237 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3238 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3239 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3240
3241 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3242 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3243 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3244 IEEE80211_CHANCTX_CHANGE_RADAR |
3245 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3246 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3247 phy_ctxt->ref, changed))
3248 return;
3249
3250 mutex_lock(&mvm->mutex);
3251 iwl_mvm_bt_coex_vif_change(mvm);
3252 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3253 ctx->rx_chains_static,
3254 ctx->rx_chains_dynamic);
3255 mutex_unlock(&mvm->mutex);
3256 }
3257
3258 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3259 struct ieee80211_vif *vif,
3260 struct ieee80211_chanctx_conf *ctx,
3261 bool switching_chanctx)
3262 {
3263 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3264 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3265 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3266 int ret;
3267
3268 lockdep_assert_held(&mvm->mutex);
3269
3270 mvmvif->phy_ctxt = phy_ctxt;
3271
3272 switch (vif->type) {
3273 case NL80211_IFTYPE_AP:
3274 /* only needed if we're switching chanctx (i.e. during CSA) */
3275 if (switching_chanctx) {
3276 mvmvif->ap_ibss_active = true;
3277 break;
3278 }
3279 case NL80211_IFTYPE_ADHOC:
3280 /*
3281 * The AP binding flow is handled as part of the start_ap flow
3282 * (in bss_info_changed), similarly for IBSS.
3283 */
3284 ret = 0;
3285 goto out;
3286 case NL80211_IFTYPE_STATION:
3287 break;
3288 case NL80211_IFTYPE_MONITOR:
3289 /* always disable PS when a monitor interface is active */
3290 mvmvif->ps_disabled = true;
3291 break;
3292 default:
3293 ret = -EINVAL;
3294 goto out;
3295 }
3296
3297 ret = iwl_mvm_binding_add_vif(mvm, vif);
3298 if (ret)
3299 goto out;
3300
3301 /*
3302 * Power state must be updated before quotas,
3303 * otherwise fw will complain.
3304 */
3305 iwl_mvm_power_update_mac(mvm);
3306
3307 /* Setting the quota at this stage is only required for monitor
3308 * interfaces. For the other types, the bss_info changed flow
3309 * will handle quota settings.
3310 */
3311 if (vif->type == NL80211_IFTYPE_MONITOR) {
3312 mvmvif->monitor_active = true;
3313 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3314 if (ret)
3315 goto out_remove_binding;
3316
3317 ret = iwl_mvm_add_snif_sta(mvm, vif);
3318 if (ret)
3319 goto out_remove_binding;
3320
3321 }
3322
3323 /* Handle binding during CSA */
3324 if (vif->type == NL80211_IFTYPE_AP) {
3325 iwl_mvm_update_quotas(mvm, false, NULL);
3326 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3327 }
3328
3329 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3330 u32 duration = 2 * vif->bss_conf.beacon_int;
3331
3332 /* iwl_mvm_protect_session() reads directly from the
3333 * device (the system time), so make sure it is
3334 * available.
3335 */
3336 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3337 if (ret)
3338 goto out_remove_binding;
3339
3340 /* Protect the session to make sure we hear the first
3341 * beacon on the new channel.
3342 */
3343 iwl_mvm_protect_session(mvm, vif, duration, duration,
3344 vif->bss_conf.beacon_int / 2,
3345 true);
3346
3347 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3348
3349 iwl_mvm_update_quotas(mvm, false, NULL);
3350 }
3351
3352 goto out;
3353
3354 out_remove_binding:
3355 iwl_mvm_binding_remove_vif(mvm, vif);
3356 iwl_mvm_power_update_mac(mvm);
3357 out:
3358 if (ret)
3359 mvmvif->phy_ctxt = NULL;
3360 return ret;
3361 }
3362 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3363 struct ieee80211_vif *vif,
3364 struct ieee80211_chanctx_conf *ctx)
3365 {
3366 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3367 int ret;
3368
3369 mutex_lock(&mvm->mutex);
3370 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3371 mutex_unlock(&mvm->mutex);
3372
3373 return ret;
3374 }
3375
3376 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3377 struct ieee80211_vif *vif,
3378 struct ieee80211_chanctx_conf *ctx,
3379 bool switching_chanctx)
3380 {
3381 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3382 struct ieee80211_vif *disabled_vif = NULL;
3383
3384 lockdep_assert_held(&mvm->mutex);
3385
3386 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3387
3388 switch (vif->type) {
3389 case NL80211_IFTYPE_ADHOC:
3390 goto out;
3391 case NL80211_IFTYPE_MONITOR:
3392 mvmvif->monitor_active = false;
3393 mvmvif->ps_disabled = false;
3394 iwl_mvm_rm_snif_sta(mvm, vif);
3395 break;
3396 case NL80211_IFTYPE_AP:
3397 /* This part is triggered only during CSA */
3398 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3399 goto out;
3400
3401 mvmvif->csa_countdown = false;
3402
3403 /* Set CS bit on all the stations */
3404 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3405
3406 /* Save blocked iface, the timeout is set on the next beacon */
3407 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3408
3409 mvmvif->ap_ibss_active = false;
3410 break;
3411 case NL80211_IFTYPE_STATION:
3412 if (!switching_chanctx)
3413 break;
3414
3415 disabled_vif = vif;
3416
3417 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3418 break;
3419 default:
3420 break;
3421 }
3422
3423 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3424 iwl_mvm_binding_remove_vif(mvm, vif);
3425
3426 out:
3427 mvmvif->phy_ctxt = NULL;
3428 iwl_mvm_power_update_mac(mvm);
3429 }
3430
3431 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3432 struct ieee80211_vif *vif,
3433 struct ieee80211_chanctx_conf *ctx)
3434 {
3435 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3436
3437 mutex_lock(&mvm->mutex);
3438 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3439 mutex_unlock(&mvm->mutex);
3440 }
3441
3442 static int
3443 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3444 struct ieee80211_vif_chanctx_switch *vifs)
3445 {
3446 int ret;
3447
3448 mutex_lock(&mvm->mutex);
3449 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3450 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3451
3452 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3453 if (ret) {
3454 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3455 goto out_reassign;
3456 }
3457
3458 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3459 true);
3460 if (ret) {
3461 IWL_ERR(mvm,
3462 "failed to assign new_ctx during channel switch\n");
3463 goto out_remove;
3464 }
3465
3466 /* we don't support TDLS during DCM - can be caused by channel switch */
3467 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3468 iwl_mvm_teardown_tdls_peers(mvm);
3469
3470 goto out;
3471
3472 out_remove:
3473 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3474
3475 out_reassign:
3476 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3477 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3478 goto out_restart;
3479 }
3480
3481 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3482 true)) {
3483 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3484 goto out_restart;
3485 }
3486
3487 goto out;
3488
3489 out_restart:
3490 /* things keep failing, better restart the hw */
3491 iwl_mvm_nic_restart(mvm, false);
3492
3493 out:
3494 mutex_unlock(&mvm->mutex);
3495
3496 return ret;
3497 }
3498
3499 static int
3500 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3501 struct ieee80211_vif_chanctx_switch *vifs)
3502 {
3503 int ret;
3504
3505 mutex_lock(&mvm->mutex);
3506 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3507
3508 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3509 true);
3510 if (ret) {
3511 IWL_ERR(mvm,
3512 "failed to assign new_ctx during channel switch\n");
3513 goto out_reassign;
3514 }
3515
3516 goto out;
3517
3518 out_reassign:
3519 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3520 true)) {
3521 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3522 goto out_restart;
3523 }
3524
3525 goto out;
3526
3527 out_restart:
3528 /* things keep failing, better restart the hw */
3529 iwl_mvm_nic_restart(mvm, false);
3530
3531 out:
3532 mutex_unlock(&mvm->mutex);
3533
3534 return ret;
3535 }
3536
3537 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3538 struct ieee80211_vif_chanctx_switch *vifs,
3539 int n_vifs,
3540 enum ieee80211_chanctx_switch_mode mode)
3541 {
3542 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3543 int ret;
3544
3545 /* we only support a single-vif right now */
3546 if (n_vifs > 1)
3547 return -EOPNOTSUPP;
3548
3549 switch (mode) {
3550 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3551 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3552 break;
3553 case CHANCTX_SWMODE_REASSIGN_VIF:
3554 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3555 break;
3556 default:
3557 ret = -EOPNOTSUPP;
3558 break;
3559 }
3560
3561 return ret;
3562 }
3563
3564 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3565 struct ieee80211_sta *sta,
3566 bool set)
3567 {
3568 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3569 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3570
3571 if (!mvm_sta || !mvm_sta->vif) {
3572 IWL_ERR(mvm, "Station is not associated to a vif\n");
3573 return -EINVAL;
3574 }
3575
3576 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3577 }
3578
3579 #ifdef CONFIG_NL80211_TESTMODE
3580 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3581 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3582 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3583 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3584 };
3585
3586 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3587 struct ieee80211_vif *vif,
3588 void *data, int len)
3589 {
3590 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3591 int err;
3592 u32 noa_duration;
3593
3594 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3595 if (err)
3596 return err;
3597
3598 if (!tb[IWL_MVM_TM_ATTR_CMD])
3599 return -EINVAL;
3600
3601 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3602 case IWL_MVM_TM_CMD_SET_NOA:
3603 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3604 !vif->bss_conf.enable_beacon ||
3605 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3606 return -EINVAL;
3607
3608 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3609 if (noa_duration >= vif->bss_conf.beacon_int)
3610 return -EINVAL;
3611
3612 mvm->noa_duration = noa_duration;
3613 mvm->noa_vif = vif;
3614
3615 return iwl_mvm_update_quotas(mvm, false, NULL);
3616 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3617 /* must be associated client vif - ignore authorized */
3618 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3619 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3620 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3621 return -EINVAL;
3622
3623 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3624 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3625 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3626 }
3627
3628 return -EOPNOTSUPP;
3629 }
3630
3631 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3632 struct ieee80211_vif *vif,
3633 void *data, int len)
3634 {
3635 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3636 int err;
3637
3638 mutex_lock(&mvm->mutex);
3639 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3640 mutex_unlock(&mvm->mutex);
3641
3642 return err;
3643 }
3644 #endif
3645
3646 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3647 struct ieee80211_vif *vif,
3648 struct ieee80211_channel_switch *chsw)
3649 {
3650 /* By implementing this operation, we prevent mac80211 from
3651 * starting its own channel switch timer, so that we can call
3652 * ieee80211_chswitch_done() ourselves at the right time
3653 * (which is when the absence time event starts).
3654 */
3655
3656 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3657 "dummy channel switch op\n");
3658 }
3659
3660 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3661 struct ieee80211_vif *vif,
3662 struct ieee80211_channel_switch *chsw)
3663 {
3664 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3665 struct ieee80211_vif *csa_vif;
3666 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3667 u32 apply_time;
3668 int ret;
3669
3670 mutex_lock(&mvm->mutex);
3671
3672 mvmvif->csa_failed = false;
3673
3674 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3675 chsw->chandef.center_freq1);
3676
3677 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3678
3679 switch (vif->type) {
3680 case NL80211_IFTYPE_AP:
3681 csa_vif =
3682 rcu_dereference_protected(mvm->csa_vif,
3683 lockdep_is_held(&mvm->mutex));
3684 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3685 "Another CSA is already in progress")) {
3686 ret = -EBUSY;
3687 goto out_unlock;
3688 }
3689
3690 rcu_assign_pointer(mvm->csa_vif, vif);
3691
3692 if (WARN_ONCE(mvmvif->csa_countdown,
3693 "Previous CSA countdown didn't complete")) {
3694 ret = -EBUSY;
3695 goto out_unlock;
3696 }
3697
3698 break;
3699 case NL80211_IFTYPE_STATION:
3700 if (mvmvif->lqm_active)
3701 iwl_mvm_send_lqm_cmd(vif,
3702 LQM_CMD_OPERATION_STOP_MEASUREMENT,
3703 0, 0);
3704
3705 /* Schedule the time event to a bit before beacon 1,
3706 * to make sure we're in the new channel when the
3707 * GO/AP arrives.
3708 */
3709 apply_time = chsw->device_timestamp +
3710 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3711 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3712
3713 if (chsw->block_tx)
3714 iwl_mvm_csa_client_absent(mvm, vif);
3715
3716 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3717 apply_time);
3718 if (mvmvif->bf_data.bf_enabled) {
3719 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3720 if (ret)
3721 goto out_unlock;
3722 }
3723
3724 break;
3725 default:
3726 break;
3727 }
3728
3729 mvmvif->ps_disabled = true;
3730
3731 ret = iwl_mvm_power_update_ps(mvm);
3732 if (ret)
3733 goto out_unlock;
3734
3735 /* we won't be on this channel any longer */
3736 iwl_mvm_teardown_tdls_peers(mvm);
3737
3738 out_unlock:
3739 mutex_unlock(&mvm->mutex);
3740
3741 return ret;
3742 }
3743
3744 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3745 struct ieee80211_vif *vif)
3746 {
3747 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3748 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3749 int ret;
3750
3751 mutex_lock(&mvm->mutex);
3752
3753 if (mvmvif->csa_failed) {
3754 mvmvif->csa_failed = false;
3755 ret = -EIO;
3756 goto out_unlock;
3757 }
3758
3759 if (vif->type == NL80211_IFTYPE_STATION) {
3760 struct iwl_mvm_sta *mvmsta;
3761
3762 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3763 mvmvif->ap_sta_id);
3764
3765 if (WARN_ON(!mvmsta)) {
3766 ret = -EIO;
3767 goto out_unlock;
3768 }
3769
3770 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3771
3772 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3773
3774 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3775 if (ret)
3776 goto out_unlock;
3777
3778 iwl_mvm_stop_session_protection(mvm, vif);
3779 }
3780
3781 mvmvif->ps_disabled = false;
3782
3783 ret = iwl_mvm_power_update_ps(mvm);
3784
3785 out_unlock:
3786 mutex_unlock(&mvm->mutex);
3787
3788 return ret;
3789 }
3790
3791 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3792 struct ieee80211_vif *vif, u32 queues, bool drop)
3793 {
3794 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3795 struct iwl_mvm_vif *mvmvif;
3796 struct iwl_mvm_sta *mvmsta;
3797 struct ieee80211_sta *sta;
3798 int i;
3799 u32 msk = 0;
3800
3801 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3802 return;
3803
3804 /* Make sure we're done with the deferred traffic before flushing */
3805 if (iwl_mvm_is_dqa_supported(mvm))
3806 flush_work(&mvm->add_stream_wk);
3807
3808 mutex_lock(&mvm->mutex);
3809 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3810
3811 /* flush the AP-station and all TDLS peers */
3812 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3813 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3814 lockdep_is_held(&mvm->mutex));
3815 if (IS_ERR_OR_NULL(sta))
3816 continue;
3817
3818 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3819 if (mvmsta->vif != vif)
3820 continue;
3821
3822 /* make sure only TDLS peers or the AP are flushed */
3823 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3824
3825 msk |= mvmsta->tfd_queue_msk;
3826 }
3827
3828 if (drop) {
3829 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
3830 IWL_ERR(mvm, "flush request fail\n");
3831 mutex_unlock(&mvm->mutex);
3832 } else {
3833 mutex_unlock(&mvm->mutex);
3834
3835 /* this can take a while, and we may need/want other operations
3836 * to succeed while doing this, so do it without the mutex held
3837 */
3838 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3839 }
3840 }
3841
3842 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3843 struct survey_info *survey)
3844 {
3845 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3846 int ret;
3847
3848 memset(survey, 0, sizeof(*survey));
3849
3850 /* only support global statistics right now */
3851 if (idx != 0)
3852 return -ENOENT;
3853
3854 if (fw_has_capa(&mvm->fw->ucode_capa,
3855 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3856 return -ENOENT;
3857
3858 mutex_lock(&mvm->mutex);
3859
3860 if (mvm->ucode_loaded) {
3861 ret = iwl_mvm_request_statistics(mvm, false);
3862 if (ret)
3863 goto out;
3864 }
3865
3866 survey->filled = SURVEY_INFO_TIME |
3867 SURVEY_INFO_TIME_RX |
3868 SURVEY_INFO_TIME_TX |
3869 SURVEY_INFO_TIME_SCAN;
3870 survey->time = mvm->accu_radio_stats.on_time_rf +
3871 mvm->radio_stats.on_time_rf;
3872 do_div(survey->time, USEC_PER_MSEC);
3873
3874 survey->time_rx = mvm->accu_radio_stats.rx_time +
3875 mvm->radio_stats.rx_time;
3876 do_div(survey->time_rx, USEC_PER_MSEC);
3877
3878 survey->time_tx = mvm->accu_radio_stats.tx_time +
3879 mvm->radio_stats.tx_time;
3880 do_div(survey->time_tx, USEC_PER_MSEC);
3881
3882 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
3883 mvm->radio_stats.on_time_scan;
3884 do_div(survey->time_scan, USEC_PER_MSEC);
3885
3886 ret = 0;
3887 out:
3888 mutex_unlock(&mvm->mutex);
3889 return ret;
3890 }
3891
3892 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
3893 struct ieee80211_vif *vif,
3894 struct ieee80211_sta *sta,
3895 struct station_info *sinfo)
3896 {
3897 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3898 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3899 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3900
3901 if (fw_has_capa(&mvm->fw->ucode_capa,
3902 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3903 return;
3904
3905 /* if beacon filtering isn't on mac80211 does it anyway */
3906 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
3907 return;
3908
3909 if (!vif->bss_conf.assoc)
3910 return;
3911
3912 mutex_lock(&mvm->mutex);
3913
3914 if (mvmvif->ap_sta_id != mvmsta->sta_id)
3915 goto unlock;
3916
3917 if (iwl_mvm_request_statistics(mvm, false))
3918 goto unlock;
3919
3920 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
3921 mvmvif->beacon_stats.accu_num_beacons;
3922 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
3923 if (mvmvif->beacon_stats.avg_signal) {
3924 /* firmware only reports a value after RXing a few beacons */
3925 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
3926 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
3927 }
3928 unlock:
3929 mutex_unlock(&mvm->mutex);
3930 }
3931
3932 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
3933 struct ieee80211_vif *vif,
3934 const struct ieee80211_event *event)
3935 {
3936 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
3937 do { \
3938 if ((_cnt) && --(_cnt)) \
3939 break; \
3940 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
3941 } while (0)
3942
3943 struct iwl_fw_dbg_trigger_tlv *trig;
3944 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
3945
3946 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
3947 return;
3948
3949 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
3950 trig_mlme = (void *)trig->data;
3951 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
3952 return;
3953
3954 if (event->u.mlme.data == ASSOC_EVENT) {
3955 if (event->u.mlme.status == MLME_DENIED)
3956 CHECK_MLME_TRIGGER(mvm, trig, buf,
3957 trig_mlme->stop_assoc_denied,
3958 "DENIED ASSOC: reason %d",
3959 event->u.mlme.reason);
3960 else if (event->u.mlme.status == MLME_TIMEOUT)
3961 CHECK_MLME_TRIGGER(mvm, trig, buf,
3962 trig_mlme->stop_assoc_timeout,
3963 "ASSOC TIMEOUT");
3964 } else if (event->u.mlme.data == AUTH_EVENT) {
3965 if (event->u.mlme.status == MLME_DENIED)
3966 CHECK_MLME_TRIGGER(mvm, trig, buf,
3967 trig_mlme->stop_auth_denied,
3968 "DENIED AUTH: reason %d",
3969 event->u.mlme.reason);
3970 else if (event->u.mlme.status == MLME_TIMEOUT)
3971 CHECK_MLME_TRIGGER(mvm, trig, buf,
3972 trig_mlme->stop_auth_timeout,
3973 "AUTH TIMEOUT");
3974 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
3975 CHECK_MLME_TRIGGER(mvm, trig, buf,
3976 trig_mlme->stop_rx_deauth,
3977 "DEAUTH RX %d", event->u.mlme.reason);
3978 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
3979 CHECK_MLME_TRIGGER(mvm, trig, buf,
3980 trig_mlme->stop_tx_deauth,
3981 "DEAUTH TX %d", event->u.mlme.reason);
3982 }
3983 #undef CHECK_MLME_TRIGGER
3984 }
3985
3986 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
3987 struct ieee80211_vif *vif,
3988 const struct ieee80211_event *event)
3989 {
3990 struct iwl_fw_dbg_trigger_tlv *trig;
3991 struct iwl_fw_dbg_trigger_ba *ba_trig;
3992
3993 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
3994 return;
3995
3996 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
3997 ba_trig = (void *)trig->data;
3998 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
3999 return;
4000
4001 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4002 return;
4003
4004 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4005 "BAR received from %pM, tid %d, ssn %d",
4006 event->u.ba.sta->addr, event->u.ba.tid,
4007 event->u.ba.ssn);
4008 }
4009
4010 static void
4011 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4012 struct ieee80211_vif *vif,
4013 const struct ieee80211_event *event)
4014 {
4015 struct iwl_fw_dbg_trigger_tlv *trig;
4016 struct iwl_fw_dbg_trigger_ba *ba_trig;
4017
4018 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4019 return;
4020
4021 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4022 ba_trig = (void *)trig->data;
4023 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4024 return;
4025
4026 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4027 return;
4028
4029 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4030 "Frame from %pM timed out, tid %d",
4031 event->u.ba.sta->addr, event->u.ba.tid);
4032 }
4033
4034 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4035 struct ieee80211_vif *vif,
4036 const struct ieee80211_event *event)
4037 {
4038 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4039
4040 switch (event->type) {
4041 case MLME_EVENT:
4042 iwl_mvm_event_mlme_callback(mvm, vif, event);
4043 break;
4044 case BAR_RX_EVENT:
4045 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4046 break;
4047 case BA_FRAME_TIMEOUT:
4048 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4049 break;
4050 default:
4051 break;
4052 }
4053 }
4054
4055 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4056 struct iwl_mvm_internal_rxq_notif *notif,
4057 u32 size)
4058 {
4059 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
4060 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4061 int ret;
4062
4063 lockdep_assert_held(&mvm->mutex);
4064
4065 if (!iwl_mvm_has_new_rx_api(mvm))
4066 return;
4067
4068 notif->cookie = mvm->queue_sync_cookie;
4069
4070 if (notif->sync)
4071 atomic_set(&mvm->queue_sync_counter,
4072 mvm->trans->num_rx_queues);
4073
4074 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4075 if (ret) {
4076 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
4077 goto out;
4078 }
4079
4080 if (notif->sync)
4081 ret = wait_event_timeout(notif_waitq,
4082 atomic_read(&mvm->queue_sync_counter) == 0,
4083 HZ);
4084 WARN_ON_ONCE(!ret);
4085
4086 out:
4087 atomic_set(&mvm->queue_sync_counter, 0);
4088 mvm->queue_sync_cookie++;
4089 }
4090
4091 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
4092 {
4093 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4094 struct iwl_mvm_internal_rxq_notif data = {
4095 .type = IWL_MVM_RXQ_EMPTY,
4096 .sync = 1,
4097 };
4098
4099 mutex_lock(&mvm->mutex);
4100 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
4101 mutex_unlock(&mvm->mutex);
4102 }
4103
4104 const struct ieee80211_ops iwl_mvm_hw_ops = {
4105 .tx = iwl_mvm_mac_tx,
4106 .ampdu_action = iwl_mvm_mac_ampdu_action,
4107 .start = iwl_mvm_mac_start,
4108 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4109 .stop = iwl_mvm_mac_stop,
4110 .add_interface = iwl_mvm_mac_add_interface,
4111 .remove_interface = iwl_mvm_mac_remove_interface,
4112 .config = iwl_mvm_mac_config,
4113 .prepare_multicast = iwl_mvm_prepare_multicast,
4114 .configure_filter = iwl_mvm_configure_filter,
4115 .config_iface_filter = iwl_mvm_config_iface_filter,
4116 .bss_info_changed = iwl_mvm_bss_info_changed,
4117 .hw_scan = iwl_mvm_mac_hw_scan,
4118 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4119 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4120 .sta_state = iwl_mvm_mac_sta_state,
4121 .sta_notify = iwl_mvm_mac_sta_notify,
4122 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4123 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4124 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4125 .sta_rc_update = iwl_mvm_sta_rc_update,
4126 .conf_tx = iwl_mvm_mac_conf_tx,
4127 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4128 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4129 .flush = iwl_mvm_mac_flush,
4130 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4131 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4132 .set_key = iwl_mvm_mac_set_key,
4133 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4134 .remain_on_channel = iwl_mvm_roc,
4135 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4136 .add_chanctx = iwl_mvm_add_chanctx,
4137 .remove_chanctx = iwl_mvm_remove_chanctx,
4138 .change_chanctx = iwl_mvm_change_chanctx,
4139 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4140 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4141 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4142
4143 .start_ap = iwl_mvm_start_ap_ibss,
4144 .stop_ap = iwl_mvm_stop_ap_ibss,
4145 .join_ibss = iwl_mvm_start_ap_ibss,
4146 .leave_ibss = iwl_mvm_stop_ap_ibss,
4147
4148 .set_tim = iwl_mvm_set_tim,
4149
4150 .channel_switch = iwl_mvm_channel_switch,
4151 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4152 .post_channel_switch = iwl_mvm_post_channel_switch,
4153
4154 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4155 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4156 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4157
4158 .event_callback = iwl_mvm_mac_event_callback,
4159
4160 .sync_rx_queues = iwl_mvm_sync_rx_queues,
4161
4162 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4163
4164 #ifdef CONFIG_PM_SLEEP
4165 /* look at d3.c */
4166 .suspend = iwl_mvm_suspend,
4167 .resume = iwl_mvm_resume,
4168 .set_wakeup = iwl_mvm_set_wakeup,
4169 .set_rekey_data = iwl_mvm_set_rekey_data,
4170 #if IS_ENABLED(CONFIG_IPV6)
4171 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4172 #endif
4173 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4174 #endif
4175 .get_survey = iwl_mvm_mac_get_survey,
4176 .sta_statistics = iwl_mvm_mac_sta_statistics,
4177 };
This page took 0.124592 seconds and 5 git commands to generate.