Merge remote-tracking branch 'mkp-scsi/4.8/scsi-fixes' into fixes
[deliverable/linux.git] / drivers / net / wireless / intel / iwlwifi / mvm / mac80211.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66 #include <linux/kernel.h>
67 #include <linux/slab.h>
68 #include <linux/skbuff.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/ip.h>
72 #include <linux/if_arp.h>
73 #include <linux/time.h>
74 #include <net/mac80211.h>
75 #include <net/ieee80211_radiotap.h>
76 #include <net/tcp.h>
77
78 #include "iwl-op-mode.h"
79 #include "iwl-io.h"
80 #include "mvm.h"
81 #include "sta.h"
82 #include "time-event.h"
83 #include "iwl-eeprom-parse.h"
84 #include "iwl-phy-db.h"
85 #include "testmode.h"
86 #include "iwl-fw-error-dump.h"
87 #include "iwl-prph.h"
88 #include "iwl-nvm-parse.h"
89 #include "fw-dbg.h"
90
91 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
92 {
93 .max = 1,
94 .types = BIT(NL80211_IFTYPE_STATION),
95 },
96 {
97 .max = 1,
98 .types = BIT(NL80211_IFTYPE_AP) |
99 BIT(NL80211_IFTYPE_P2P_CLIENT) |
100 BIT(NL80211_IFTYPE_P2P_GO),
101 },
102 {
103 .max = 1,
104 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
105 },
106 };
107
108 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
109 {
110 .num_different_channels = 2,
111 .max_interfaces = 3,
112 .limits = iwl_mvm_limits,
113 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
114 },
115 };
116
117 #ifdef CONFIG_PM_SLEEP
118 static const struct nl80211_wowlan_tcp_data_token_feature
119 iwl_mvm_wowlan_tcp_token_feature = {
120 .min_len = 0,
121 .max_len = 255,
122 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
123 };
124
125 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
126 .tok = &iwl_mvm_wowlan_tcp_token_feature,
127 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
128 sizeof(struct ethhdr) -
129 sizeof(struct iphdr) -
130 sizeof(struct tcphdr),
131 .data_interval_max = 65535, /* __le16 in API */
132 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
133 sizeof(struct ethhdr) -
134 sizeof(struct iphdr) -
135 sizeof(struct tcphdr),
136 .seq = true,
137 };
138 #endif
139
140 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
141 /*
142 * Use the reserved field to indicate magic values.
143 * these values will only be used internally by the driver,
144 * and won't make it to the fw (reserved will be 0).
145 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
146 * be the vif's ip address. in case there is not a single
147 * ip address (0, or more than 1), this attribute will
148 * be skipped.
149 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
150 * the LSB bytes of the vif's mac address
151 */
152 enum {
153 BC_FILTER_MAGIC_NONE = 0,
154 BC_FILTER_MAGIC_IP,
155 BC_FILTER_MAGIC_MAC,
156 };
157
158 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
159 {
160 /* arp */
161 .discard = 0,
162 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
163 .attrs = {
164 {
165 /* frame type - arp, hw type - ethernet */
166 .offset_type =
167 BCAST_FILTER_OFFSET_PAYLOAD_START,
168 .offset = sizeof(rfc1042_header),
169 .val = cpu_to_be32(0x08060001),
170 .mask = cpu_to_be32(0xffffffff),
171 },
172 {
173 /* arp dest ip */
174 .offset_type =
175 BCAST_FILTER_OFFSET_PAYLOAD_START,
176 .offset = sizeof(rfc1042_header) + 2 +
177 sizeof(struct arphdr) +
178 ETH_ALEN + sizeof(__be32) +
179 ETH_ALEN,
180 .mask = cpu_to_be32(0xffffffff),
181 /* mark it as special field */
182 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
183 },
184 },
185 },
186 {
187 /* dhcp offer bcast */
188 .discard = 0,
189 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
190 .attrs = {
191 {
192 /* udp dest port - 68 (bootp client)*/
193 .offset_type = BCAST_FILTER_OFFSET_IP_END,
194 .offset = offsetof(struct udphdr, dest),
195 .val = cpu_to_be32(0x00440000),
196 .mask = cpu_to_be32(0xffff0000),
197 },
198 {
199 /* dhcp - lsb bytes of client hw address */
200 .offset_type = BCAST_FILTER_OFFSET_IP_END,
201 .offset = 38,
202 .mask = cpu_to_be32(0xffffffff),
203 /* mark it as special field */
204 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
205 },
206 },
207 },
208 /* last filter must be empty */
209 {},
210 };
211 #endif
212
213 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
214 {
215 if (!iwl_mvm_is_d0i3_supported(mvm))
216 return;
217
218 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
219 spin_lock_bh(&mvm->refs_lock);
220 mvm->refs[ref_type]++;
221 spin_unlock_bh(&mvm->refs_lock);
222 iwl_trans_ref(mvm->trans);
223 }
224
225 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
226 {
227 if (!iwl_mvm_is_d0i3_supported(mvm))
228 return;
229
230 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
231 spin_lock_bh(&mvm->refs_lock);
232 if (WARN_ON(!mvm->refs[ref_type])) {
233 spin_unlock_bh(&mvm->refs_lock);
234 return;
235 }
236 mvm->refs[ref_type]--;
237 spin_unlock_bh(&mvm->refs_lock);
238 iwl_trans_unref(mvm->trans);
239 }
240
241 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
242 enum iwl_mvm_ref_type except_ref)
243 {
244 int i, j;
245
246 if (!iwl_mvm_is_d0i3_supported(mvm))
247 return;
248
249 spin_lock_bh(&mvm->refs_lock);
250 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
251 if (except_ref == i || !mvm->refs[i])
252 continue;
253
254 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
255 i, mvm->refs[i]);
256 for (j = 0; j < mvm->refs[i]; j++)
257 iwl_trans_unref(mvm->trans);
258 mvm->refs[i] = 0;
259 }
260 spin_unlock_bh(&mvm->refs_lock);
261 }
262
263 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
264 {
265 int i;
266 bool taken = false;
267
268 if (!iwl_mvm_is_d0i3_supported(mvm))
269 return true;
270
271 spin_lock_bh(&mvm->refs_lock);
272 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
273 if (mvm->refs[i]) {
274 taken = true;
275 break;
276 }
277 }
278 spin_unlock_bh(&mvm->refs_lock);
279
280 return taken;
281 }
282
283 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
284 {
285 iwl_mvm_ref(mvm, ref_type);
286
287 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
288 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
289 HZ)) {
290 WARN_ON_ONCE(1);
291 iwl_mvm_unref(mvm, ref_type);
292 return -EIO;
293 }
294
295 return 0;
296 }
297
298 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
299 {
300 int i;
301
302 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
303 for (i = 0; i < NUM_PHY_CTX; i++) {
304 mvm->phy_ctxts[i].id = i;
305 mvm->phy_ctxts[i].ref = 0;
306 }
307 }
308
309 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
310 const char *alpha2,
311 enum iwl_mcc_source src_id,
312 bool *changed)
313 {
314 struct ieee80211_regdomain *regd = NULL;
315 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
316 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
317 struct iwl_mcc_update_resp *resp;
318
319 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
320
321 lockdep_assert_held(&mvm->mutex);
322
323 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
324 if (IS_ERR_OR_NULL(resp)) {
325 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
326 PTR_ERR_OR_ZERO(resp));
327 goto out;
328 }
329
330 if (changed)
331 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
332
333 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
334 __le32_to_cpu(resp->n_channels),
335 resp->channels,
336 __le16_to_cpu(resp->mcc));
337 /* Store the return source id */
338 src_id = resp->source_id;
339 kfree(resp);
340 if (IS_ERR_OR_NULL(regd)) {
341 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
342 PTR_ERR_OR_ZERO(regd));
343 goto out;
344 }
345
346 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
347 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
348 mvm->lar_regdom_set = true;
349 mvm->mcc_src = src_id;
350
351 out:
352 return regd;
353 }
354
355 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
356 {
357 bool changed;
358 struct ieee80211_regdomain *regd;
359
360 if (!iwl_mvm_is_lar_supported(mvm))
361 return;
362
363 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
364 if (!IS_ERR_OR_NULL(regd)) {
365 /* only update the regulatory core if changed */
366 if (changed)
367 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
368
369 kfree(regd);
370 }
371 }
372
373 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
374 bool *changed)
375 {
376 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
377 iwl_mvm_is_wifi_mcc_supported(mvm) ?
378 MCC_SOURCE_GET_CURRENT :
379 MCC_SOURCE_OLD_FW, changed);
380 }
381
382 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
383 {
384 enum iwl_mcc_source used_src;
385 struct ieee80211_regdomain *regd;
386 int ret;
387 bool changed;
388 const struct ieee80211_regdomain *r =
389 rtnl_dereference(mvm->hw->wiphy->regd);
390
391 if (!r)
392 return -ENOENT;
393
394 /* save the last source in case we overwrite it below */
395 used_src = mvm->mcc_src;
396 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
397 /* Notify the firmware we support wifi location updates */
398 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
399 if (!IS_ERR_OR_NULL(regd))
400 kfree(regd);
401 }
402
403 /* Now set our last stored MCC and source */
404 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
405 &changed);
406 if (IS_ERR_OR_NULL(regd))
407 return -EIO;
408
409 /* update cfg80211 if the regdomain was changed */
410 if (changed)
411 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
412 else
413 ret = 0;
414
415 kfree(regd);
416 return ret;
417 }
418
419 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
420 {
421 struct ieee80211_hw *hw = mvm->hw;
422 int num_mac, ret, i;
423 static const u32 mvm_ciphers[] = {
424 WLAN_CIPHER_SUITE_WEP40,
425 WLAN_CIPHER_SUITE_WEP104,
426 WLAN_CIPHER_SUITE_TKIP,
427 WLAN_CIPHER_SUITE_CCMP,
428 };
429
430 /* Tell mac80211 our characteristics */
431 ieee80211_hw_set(hw, SIGNAL_DBM);
432 ieee80211_hw_set(hw, SPECTRUM_MGMT);
433 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
434 ieee80211_hw_set(hw, QUEUE_CONTROL);
435 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
436 ieee80211_hw_set(hw, SUPPORTS_PS);
437 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
438 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
439 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
440 ieee80211_hw_set(hw, CONNECTION_MONITOR);
441 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
442 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
443 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
444 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
445 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
446 if (iwl_mvm_has_new_rx_api(mvm))
447 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
448
449 if (mvm->trans->num_rx_queues > 1)
450 ieee80211_hw_set(hw, USES_RSS);
451
452 if (mvm->trans->max_skb_frags)
453 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
454
455 if (!iwl_mvm_is_dqa_supported(mvm))
456 hw->queues = mvm->first_agg_queue;
457 else
458 hw->queues = IEEE80211_MAX_QUEUES;
459 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
460 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
461 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
462 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
463 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
464 hw->rate_control_algorithm = "iwl-mvm-rs";
465 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
466 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
467
468 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4);
469 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
470 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
471 hw->wiphy->cipher_suites = mvm->ciphers;
472
473 if (iwl_mvm_has_new_rx_api(mvm)) {
474 mvm->ciphers[hw->wiphy->n_cipher_suites] =
475 WLAN_CIPHER_SUITE_GCMP;
476 hw->wiphy->n_cipher_suites++;
477 mvm->ciphers[hw->wiphy->n_cipher_suites] =
478 WLAN_CIPHER_SUITE_GCMP_256;
479 hw->wiphy->n_cipher_suites++;
480 }
481
482 /*
483 * Enable 11w if advertised by firmware and software crypto
484 * is not enabled (as the firmware will interpret some mgmt
485 * packets, so enabling it with software crypto isn't safe)
486 */
487 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
488 !iwlwifi_mod_params.sw_crypto) {
489 ieee80211_hw_set(hw, MFP_CAPABLE);
490 mvm->ciphers[hw->wiphy->n_cipher_suites] =
491 WLAN_CIPHER_SUITE_AES_CMAC;
492 hw->wiphy->n_cipher_suites++;
493 }
494
495 /* currently FW API supports only one optional cipher scheme */
496 if (mvm->fw->cs[0].cipher) {
497 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
498 struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
499
500 mvm->hw->n_cipher_schemes = 1;
501
502 cs->cipher = le32_to_cpu(fwcs->cipher);
503 cs->iftype = BIT(NL80211_IFTYPE_STATION);
504 cs->hdr_len = fwcs->hdr_len;
505 cs->pn_len = fwcs->pn_len;
506 cs->pn_off = fwcs->pn_off;
507 cs->key_idx_off = fwcs->key_idx_off;
508 cs->key_idx_mask = fwcs->key_idx_mask;
509 cs->key_idx_shift = fwcs->key_idx_shift;
510 cs->mic_len = fwcs->mic_len;
511
512 mvm->hw->cipher_schemes = mvm->cs;
513 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
514 hw->wiphy->n_cipher_suites++;
515 }
516
517 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
518 hw->wiphy->features |=
519 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
520 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
521 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
522
523 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
524 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
525 hw->chanctx_data_size = sizeof(u16);
526
527 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
528 BIT(NL80211_IFTYPE_P2P_CLIENT) |
529 BIT(NL80211_IFTYPE_AP) |
530 BIT(NL80211_IFTYPE_P2P_GO) |
531 BIT(NL80211_IFTYPE_P2P_DEVICE) |
532 BIT(NL80211_IFTYPE_ADHOC);
533
534 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
535 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
536 if (iwl_mvm_is_lar_supported(mvm))
537 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
538 else
539 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
540 REGULATORY_DISABLE_BEACON_HINTS;
541
542 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
543 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
544
545 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
546
547 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
548 hw->wiphy->n_iface_combinations =
549 ARRAY_SIZE(iwl_mvm_iface_combinations);
550
551 hw->wiphy->max_remain_on_channel_duration = 10000;
552 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
553 /* we can compensate an offset of up to 3 channels = 15 MHz */
554 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
555
556 /* Extract MAC address */
557 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
558 hw->wiphy->addresses = mvm->addresses;
559 hw->wiphy->n_addresses = 1;
560
561 /* Extract additional MAC addresses if available */
562 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
563 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
564
565 for (i = 1; i < num_mac; i++) {
566 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
567 ETH_ALEN);
568 mvm->addresses[i].addr[5]++;
569 hw->wiphy->n_addresses++;
570 }
571
572 iwl_mvm_reset_phy_ctxts(mvm);
573
574 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
575
576 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
577
578 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
579 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
580 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
581
582 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
583 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
584 else
585 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
586
587 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
588 hw->wiphy->bands[NL80211_BAND_2GHZ] =
589 &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
590 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
591 hw->wiphy->bands[NL80211_BAND_5GHZ] =
592 &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
593
594 if (fw_has_capa(&mvm->fw->ucode_capa,
595 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
596 fw_has_api(&mvm->fw->ucode_capa,
597 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
598 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
599 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
600 }
601
602 hw->wiphy->hw_version = mvm->trans->hw_id;
603
604 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
605 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
606 else
607 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
608
609 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
610 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
611 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
612 /* we create the 802.11 header and zero length SSID IE. */
613 hw->wiphy->max_sched_scan_ie_len =
614 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
615 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
616 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
617
618 /*
619 * the firmware uses u8 for num of iterations, but 0xff is saved for
620 * infinite loop, so the maximum number of iterations is actually 254.
621 */
622 hw->wiphy->max_sched_scan_plan_iterations = 254;
623
624 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
625 NL80211_FEATURE_LOW_PRIORITY_SCAN |
626 NL80211_FEATURE_P2P_GO_OPPPS |
627 NL80211_FEATURE_DYNAMIC_SMPS |
628 NL80211_FEATURE_STATIC_SMPS |
629 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
630
631 if (fw_has_capa(&mvm->fw->ucode_capa,
632 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
633 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
634 if (fw_has_capa(&mvm->fw->ucode_capa,
635 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
636 hw->wiphy->features |= NL80211_FEATURE_QUIET;
637
638 if (fw_has_capa(&mvm->fw->ucode_capa,
639 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
640 hw->wiphy->features |=
641 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
642
643 if (fw_has_capa(&mvm->fw->ucode_capa,
644 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
645 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
646
647 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
648
649 #ifdef CONFIG_PM_SLEEP
650 if (iwl_mvm_is_d0i3_supported(mvm) &&
651 device_can_wakeup(mvm->trans->dev)) {
652 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
653 hw->wiphy->wowlan = &mvm->wowlan;
654 }
655
656 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
657 mvm->trans->ops->d3_suspend &&
658 mvm->trans->ops->d3_resume &&
659 device_can_wakeup(mvm->trans->dev)) {
660 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
661 WIPHY_WOWLAN_DISCONNECT |
662 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
663 WIPHY_WOWLAN_RFKILL_RELEASE |
664 WIPHY_WOWLAN_NET_DETECT;
665 if (!iwlwifi_mod_params.sw_crypto)
666 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
667 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
668 WIPHY_WOWLAN_4WAY_HANDSHAKE;
669
670 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
671 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
672 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
673 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
674 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
675 hw->wiphy->wowlan = &mvm->wowlan;
676 }
677 #endif
678
679 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
680 /* assign default bcast filtering configuration */
681 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
682 #endif
683
684 ret = iwl_mvm_leds_init(mvm);
685 if (ret)
686 return ret;
687
688 if (fw_has_capa(&mvm->fw->ucode_capa,
689 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
690 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
691 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
692 ieee80211_hw_set(hw, TDLS_WIDER_BW);
693 }
694
695 if (fw_has_capa(&mvm->fw->ucode_capa,
696 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
697 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
698 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
699 }
700
701 hw->netdev_features |= mvm->cfg->features;
702 if (!iwl_mvm_is_csum_supported(mvm)) {
703 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
704 NETIF_F_RXCSUM);
705 /* We may support SW TX CSUM */
706 if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
707 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
708 }
709
710 ret = ieee80211_register_hw(mvm->hw);
711 if (ret)
712 iwl_mvm_leds_exit(mvm);
713
714 return ret;
715 }
716
717 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
718 struct ieee80211_sta *sta,
719 struct sk_buff *skb)
720 {
721 struct iwl_mvm_sta *mvmsta;
722 bool defer = false;
723
724 /*
725 * double check the IN_D0I3 flag both before and after
726 * taking the spinlock, in order to prevent taking
727 * the spinlock when not needed.
728 */
729 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
730 return false;
731
732 spin_lock(&mvm->d0i3_tx_lock);
733 /*
734 * testing the flag again ensures the skb dequeue
735 * loop (on d0i3 exit) hasn't run yet.
736 */
737 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
738 goto out;
739
740 mvmsta = iwl_mvm_sta_from_mac80211(sta);
741 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
742 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
743 goto out;
744
745 __skb_queue_tail(&mvm->d0i3_tx, skb);
746 ieee80211_stop_queues(mvm->hw);
747
748 /* trigger wakeup */
749 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
750 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
751
752 defer = true;
753 out:
754 spin_unlock(&mvm->d0i3_tx_lock);
755 return defer;
756 }
757
758 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
759 struct ieee80211_tx_control *control,
760 struct sk_buff *skb)
761 {
762 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
763 struct ieee80211_sta *sta = control->sta;
764 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
765 struct ieee80211_hdr *hdr = (void *)skb->data;
766
767 if (iwl_mvm_is_radio_killed(mvm)) {
768 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
769 goto drop;
770 }
771
772 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
773 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
774 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
775 goto drop;
776
777 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
778 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
779 ieee80211_is_mgmt(hdr->frame_control) &&
780 !ieee80211_is_deauth(hdr->frame_control) &&
781 !ieee80211_is_disassoc(hdr->frame_control) &&
782 !ieee80211_is_action(hdr->frame_control)))
783 sta = NULL;
784
785 if (sta) {
786 if (iwl_mvm_defer_tx(mvm, sta, skb))
787 return;
788 if (iwl_mvm_tx_skb(mvm, skb, sta))
789 goto drop;
790 return;
791 }
792
793 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
794 goto drop;
795 return;
796 drop:
797 ieee80211_free_txskb(hw, skb);
798 }
799
800 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
801 {
802 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
803 return false;
804 return true;
805 }
806
807 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
808 {
809 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
810 return false;
811 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
812 return true;
813
814 /* enabled by default */
815 return true;
816 }
817
818 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
819 do { \
820 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
821 break; \
822 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
823 } while (0)
824
825 static void
826 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
827 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
828 enum ieee80211_ampdu_mlme_action action)
829 {
830 struct iwl_fw_dbg_trigger_tlv *trig;
831 struct iwl_fw_dbg_trigger_ba *ba_trig;
832
833 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
834 return;
835
836 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
837 ba_trig = (void *)trig->data;
838
839 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
840 return;
841
842 switch (action) {
843 case IEEE80211_AMPDU_TX_OPERATIONAL: {
844 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
845 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
846
847 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
848 "TX AGG START: MAC %pM tid %d ssn %d\n",
849 sta->addr, tid, tid_data->ssn);
850 break;
851 }
852 case IEEE80211_AMPDU_TX_STOP_CONT:
853 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
854 "TX AGG STOP: MAC %pM tid %d\n",
855 sta->addr, tid);
856 break;
857 case IEEE80211_AMPDU_RX_START:
858 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
859 "RX AGG START: MAC %pM tid %d ssn %d\n",
860 sta->addr, tid, rx_ba_ssn);
861 break;
862 case IEEE80211_AMPDU_RX_STOP:
863 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
864 "RX AGG STOP: MAC %pM tid %d\n",
865 sta->addr, tid);
866 break;
867 default:
868 break;
869 }
870 }
871
872 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
873 struct ieee80211_vif *vif,
874 struct ieee80211_ampdu_params *params)
875 {
876 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
877 int ret;
878 bool tx_agg_ref = false;
879 struct ieee80211_sta *sta = params->sta;
880 enum ieee80211_ampdu_mlme_action action = params->action;
881 u16 tid = params->tid;
882 u16 *ssn = &params->ssn;
883 u8 buf_size = params->buf_size;
884 bool amsdu = params->amsdu;
885 u16 timeout = params->timeout;
886
887 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
888 sta->addr, tid, action);
889
890 if (!(mvm->nvm_data->sku_cap_11n_enable))
891 return -EACCES;
892
893 /* return from D0i3 before starting a new Tx aggregation */
894 switch (action) {
895 case IEEE80211_AMPDU_TX_START:
896 case IEEE80211_AMPDU_TX_STOP_CONT:
897 case IEEE80211_AMPDU_TX_STOP_FLUSH:
898 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
899 case IEEE80211_AMPDU_TX_OPERATIONAL:
900 /*
901 * for tx start, wait synchronously until D0i3 exit to
902 * get the correct sequence number for the tid.
903 * additionally, some other ampdu actions use direct
904 * target access, which is not handled automatically
905 * by the trans layer (unlike commands), so wait for
906 * d0i3 exit in these cases as well.
907 */
908 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
909 if (ret)
910 return ret;
911
912 tx_agg_ref = true;
913 break;
914 default:
915 break;
916 }
917
918 mutex_lock(&mvm->mutex);
919
920 switch (action) {
921 case IEEE80211_AMPDU_RX_START:
922 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
923 ret = -EINVAL;
924 break;
925 }
926 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
927 timeout);
928 break;
929 case IEEE80211_AMPDU_RX_STOP:
930 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
931 timeout);
932 break;
933 case IEEE80211_AMPDU_TX_START:
934 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
935 ret = -EINVAL;
936 break;
937 }
938 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
939 break;
940 case IEEE80211_AMPDU_TX_STOP_CONT:
941 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
942 break;
943 case IEEE80211_AMPDU_TX_STOP_FLUSH:
944 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
945 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
946 break;
947 case IEEE80211_AMPDU_TX_OPERATIONAL:
948 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
949 buf_size, amsdu);
950 break;
951 default:
952 WARN_ON_ONCE(1);
953 ret = -EINVAL;
954 break;
955 }
956
957 if (!ret) {
958 u16 rx_ba_ssn = 0;
959
960 if (action == IEEE80211_AMPDU_RX_START)
961 rx_ba_ssn = *ssn;
962
963 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
964 rx_ba_ssn, action);
965 }
966 mutex_unlock(&mvm->mutex);
967
968 /*
969 * If the tid is marked as started, we won't use it for offloaded
970 * traffic on the next D0i3 entry. It's safe to unref.
971 */
972 if (tx_agg_ref)
973 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
974
975 return ret;
976 }
977
978 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
979 struct ieee80211_vif *vif)
980 {
981 struct iwl_mvm *mvm = data;
982 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
983
984 mvmvif->uploaded = false;
985 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
986
987 spin_lock_bh(&mvm->time_event_lock);
988 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
989 spin_unlock_bh(&mvm->time_event_lock);
990
991 mvmvif->phy_ctxt = NULL;
992 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
993 }
994
995 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
996 {
997 /* clear the D3 reconfig, we only need it to avoid dumping a
998 * firmware coredump on reconfiguration, we shouldn't do that
999 * on D3->D0 transition
1000 */
1001 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1002 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1003 iwl_mvm_fw_error_dump(mvm);
1004 }
1005
1006 /* cleanup all stale references (scan, roc), but keep the
1007 * ucode_down ref until reconfig is complete
1008 */
1009 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1010
1011 iwl_mvm_stop_device(mvm);
1012
1013 mvm->scan_status = 0;
1014 mvm->ps_disabled = false;
1015 mvm->calibrating = false;
1016
1017 /* just in case one was running */
1018 iwl_mvm_cleanup_roc_te(mvm);
1019 ieee80211_remain_on_channel_expired(mvm->hw);
1020
1021 /*
1022 * cleanup all interfaces, even inactive ones, as some might have
1023 * gone down during the HW restart
1024 */
1025 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1026
1027 mvm->p2p_device_vif = NULL;
1028 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1029
1030 iwl_mvm_reset_phy_ctxts(mvm);
1031 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1032 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1033 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
1034 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1035 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1036 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1037
1038 ieee80211_wake_queues(mvm->hw);
1039
1040 /* clear any stale d0i3 state */
1041 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1042
1043 mvm->vif_count = 0;
1044 mvm->rx_ba_sessions = 0;
1045 mvm->fw_dbg_conf = FW_DBG_INVALID;
1046
1047 /* keep statistics ticking */
1048 iwl_mvm_accu_radio_stats(mvm);
1049 }
1050
1051 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1052 {
1053 int ret;
1054
1055 lockdep_assert_held(&mvm->mutex);
1056
1057 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1058 /* Clean up some internal and mac80211 state on restart */
1059 iwl_mvm_restart_cleanup(mvm);
1060 } else {
1061 /* Hold the reference to prevent runtime suspend while
1062 * the start procedure runs. It's a bit confusing
1063 * that the UCODE_DOWN reference is taken, but it just
1064 * means "UCODE is not UP yet". ( TODO: rename this
1065 * reference).
1066 */
1067 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1068 }
1069 ret = iwl_mvm_up(mvm);
1070
1071 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1072 /* Something went wrong - we need to finish some cleanup
1073 * that normally iwl_mvm_mac_restart_complete() below
1074 * would do.
1075 */
1076 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1077 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1078 }
1079
1080 return ret;
1081 }
1082
1083 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1084 {
1085 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1086 int ret;
1087
1088 /* Some hw restart cleanups must not hold the mutex */
1089 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1090 /*
1091 * Make sure we are out of d0i3. This is needed
1092 * to make sure the reference accounting is correct
1093 * (and there is no stale d0i3_exit_work).
1094 */
1095 wait_event_timeout(mvm->d0i3_exit_waitq,
1096 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1097 &mvm->status),
1098 HZ);
1099 }
1100
1101 mutex_lock(&mvm->mutex);
1102 ret = __iwl_mvm_mac_start(mvm);
1103 mutex_unlock(&mvm->mutex);
1104
1105 return ret;
1106 }
1107
1108 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1109 {
1110 int ret;
1111
1112 mutex_lock(&mvm->mutex);
1113
1114 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1115 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1116 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1117 if (ret)
1118 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1119 ret);
1120
1121 /* allow transport/FW low power modes */
1122 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1123
1124 /*
1125 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1126 * of packets the FW sent out, so we must reconnect.
1127 */
1128 iwl_mvm_teardown_tdls_peers(mvm);
1129
1130 mutex_unlock(&mvm->mutex);
1131 }
1132
1133 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1134 {
1135 if (iwl_mvm_is_d0i3_supported(mvm) &&
1136 iwl_mvm_enter_d0i3_on_suspend(mvm))
1137 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
1138 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1139 &mvm->status),
1140 HZ),
1141 "D0i3 exit on resume timed out\n");
1142 }
1143
1144 static void
1145 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1146 enum ieee80211_reconfig_type reconfig_type)
1147 {
1148 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1149
1150 switch (reconfig_type) {
1151 case IEEE80211_RECONFIG_TYPE_RESTART:
1152 iwl_mvm_restart_complete(mvm);
1153 break;
1154 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1155 iwl_mvm_resume_complete(mvm);
1156 break;
1157 }
1158 }
1159
1160 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1161 {
1162 lockdep_assert_held(&mvm->mutex);
1163
1164 /* firmware counters are obviously reset now, but we shouldn't
1165 * partially track so also clear the fw_reset_accu counters.
1166 */
1167 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1168
1169 /* async_handlers_wk is now blocked */
1170
1171 /*
1172 * The work item could be running or queued if the
1173 * ROC time event stops just as we get here.
1174 */
1175 flush_work(&mvm->roc_done_wk);
1176
1177 iwl_mvm_stop_device(mvm);
1178
1179 iwl_mvm_async_handlers_purge(mvm);
1180 /* async_handlers_list is empty and will stay empty: HW is stopped */
1181
1182 /* the fw is stopped, the aux sta is dead: clean up driver state */
1183 iwl_mvm_del_aux_sta(mvm);
1184
1185 iwl_free_fw_paging(mvm);
1186
1187 /*
1188 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1189 * won't be called in this case).
1190 * But make sure to cleanup interfaces that have gone down before/during
1191 * HW restart was requested.
1192 */
1193 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1194 ieee80211_iterate_interfaces(mvm->hw, 0,
1195 iwl_mvm_cleanup_iterator, mvm);
1196
1197 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1198 * make sure there's nothing left there and warn if any is found.
1199 */
1200 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1201 int i;
1202
1203 for (i = 0; i < mvm->max_scans; i++) {
1204 if (WARN_ONCE(mvm->scan_uid_status[i],
1205 "UMAC scan UID %d status was not cleaned\n",
1206 i))
1207 mvm->scan_uid_status[i] = 0;
1208 }
1209 }
1210 }
1211
1212 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1213 {
1214 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1215
1216 flush_work(&mvm->d0i3_exit_work);
1217 flush_work(&mvm->async_handlers_wk);
1218 flush_work(&mvm->add_stream_wk);
1219 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1220 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
1221 cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
1222 iwl_mvm_free_fw_dump_desc(mvm);
1223
1224 mutex_lock(&mvm->mutex);
1225 __iwl_mvm_mac_stop(mvm);
1226 mutex_unlock(&mvm->mutex);
1227
1228 /*
1229 * The worker might have been waiting for the mutex, let it run and
1230 * discover that its list is now empty.
1231 */
1232 cancel_work_sync(&mvm->async_handlers_wk);
1233 }
1234
1235 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1236 {
1237 u16 i;
1238
1239 lockdep_assert_held(&mvm->mutex);
1240
1241 for (i = 0; i < NUM_PHY_CTX; i++)
1242 if (!mvm->phy_ctxts[i].ref)
1243 return &mvm->phy_ctxts[i];
1244
1245 IWL_ERR(mvm, "No available PHY context\n");
1246 return NULL;
1247 }
1248
1249 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1250 s16 tx_power)
1251 {
1252 struct iwl_dev_tx_power_cmd cmd = {
1253 .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1254 .v3.v2.mac_context_id =
1255 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1256 .v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
1257 };
1258 int len = sizeof(cmd);
1259
1260 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1261 cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1262
1263 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1264 len = sizeof(cmd.v3);
1265 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
1266 len = sizeof(cmd.v3.v2);
1267
1268 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1269 }
1270
1271 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1272 struct ieee80211_vif *vif)
1273 {
1274 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1275 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1276 int ret;
1277
1278 mvmvif->mvm = mvm;
1279
1280 /*
1281 * make sure D0i3 exit is completed, otherwise a target access
1282 * during tx queue configuration could be done when still in
1283 * D0i3 state.
1284 */
1285 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1286 if (ret)
1287 return ret;
1288
1289 /*
1290 * Not much to do here. The stack will not allow interface
1291 * types or combinations that we didn't advertise, so we
1292 * don't really have to check the types.
1293 */
1294
1295 mutex_lock(&mvm->mutex);
1296
1297 /* make sure that beacon statistics don't go backwards with FW reset */
1298 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1299 mvmvif->beacon_stats.accu_num_beacons +=
1300 mvmvif->beacon_stats.num_beacons;
1301
1302 /* Allocate resources for the MAC context, and add it to the fw */
1303 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1304 if (ret)
1305 goto out_unlock;
1306
1307 /* Counting number of interfaces is needed for legacy PM */
1308 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1309 mvm->vif_count++;
1310
1311 /*
1312 * The AP binding flow can be done only after the beacon
1313 * template is configured (which happens only in the mac80211
1314 * start_ap() flow), and adding the broadcast station can happen
1315 * only after the binding.
1316 * In addition, since modifying the MAC before adding a bcast
1317 * station is not allowed by the FW, delay the adding of MAC context to
1318 * the point where we can also add the bcast station.
1319 * In short: there's not much we can do at this point, other than
1320 * allocating resources :)
1321 */
1322 if (vif->type == NL80211_IFTYPE_AP ||
1323 vif->type == NL80211_IFTYPE_ADHOC) {
1324 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1325 if (ret) {
1326 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1327 goto out_release;
1328 }
1329
1330 iwl_mvm_vif_dbgfs_register(mvm, vif);
1331 goto out_unlock;
1332 }
1333
1334 mvmvif->features |= hw->netdev_features;
1335
1336 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1337 if (ret)
1338 goto out_release;
1339
1340 ret = iwl_mvm_power_update_mac(mvm);
1341 if (ret)
1342 goto out_remove_mac;
1343
1344 /* beacon filtering */
1345 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1346 if (ret)
1347 goto out_remove_mac;
1348
1349 if (!mvm->bf_allowed_vif &&
1350 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1351 mvm->bf_allowed_vif = mvmvif;
1352 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1353 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1354 }
1355
1356 /*
1357 * P2P_DEVICE interface does not have a channel context assigned to it,
1358 * so a dedicated PHY context is allocated to it and the corresponding
1359 * MAC context is bound to it at this stage.
1360 */
1361 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1362
1363 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1364 if (!mvmvif->phy_ctxt) {
1365 ret = -ENOSPC;
1366 goto out_free_bf;
1367 }
1368
1369 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1370 ret = iwl_mvm_binding_add_vif(mvm, vif);
1371 if (ret)
1372 goto out_unref_phy;
1373
1374 ret = iwl_mvm_add_bcast_sta(mvm, vif);
1375 if (ret)
1376 goto out_unbind;
1377
1378 /* Save a pointer to p2p device vif, so it can later be used to
1379 * update the p2p device MAC when a GO is started/stopped */
1380 mvm->p2p_device_vif = vif;
1381 }
1382
1383 iwl_mvm_vif_dbgfs_register(mvm, vif);
1384 goto out_unlock;
1385
1386 out_unbind:
1387 iwl_mvm_binding_remove_vif(mvm, vif);
1388 out_unref_phy:
1389 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1390 out_free_bf:
1391 if (mvm->bf_allowed_vif == mvmvif) {
1392 mvm->bf_allowed_vif = NULL;
1393 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1394 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1395 }
1396 out_remove_mac:
1397 mvmvif->phy_ctxt = NULL;
1398 iwl_mvm_mac_ctxt_remove(mvm, vif);
1399 out_release:
1400 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1401 mvm->vif_count--;
1402
1403 iwl_mvm_mac_ctxt_release(mvm, vif);
1404 out_unlock:
1405 mutex_unlock(&mvm->mutex);
1406
1407 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1408
1409 return ret;
1410 }
1411
1412 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1413 struct ieee80211_vif *vif)
1414 {
1415 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1416
1417 if (tfd_msk) {
1418 /*
1419 * mac80211 first removes all the stations of the vif and
1420 * then removes the vif. When it removes a station it also
1421 * flushes the AMPDU session. So by now, all the AMPDU sessions
1422 * of all the stations of this vif are closed, and the queues
1423 * of these AMPDU sessions are properly closed.
1424 * We still need to take care of the shared queues of the vif.
1425 * Flush them here.
1426 */
1427 mutex_lock(&mvm->mutex);
1428 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1429 mutex_unlock(&mvm->mutex);
1430
1431 /*
1432 * There are transports that buffer a few frames in the host.
1433 * For these, the flush above isn't enough since while we were
1434 * flushing, the transport might have sent more frames to the
1435 * device. To solve this, wait here until the transport is
1436 * empty. Technically, this could have replaced the flush
1437 * above, but flush is much faster than draining. So flush
1438 * first, and drain to make sure we have no frames in the
1439 * transport anymore.
1440 * If a station still had frames on the shared queues, it is
1441 * already marked as draining, so to complete the draining, we
1442 * just need to wait until the transport is empty.
1443 */
1444 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1445 }
1446
1447 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1448 /*
1449 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1450 * We assume here that all the packets sent to the OFFCHANNEL
1451 * queue are sent in ROC session.
1452 */
1453 flush_work(&mvm->roc_done_wk);
1454 } else {
1455 /*
1456 * By now, all the AC queues are empty. The AGG queues are
1457 * empty too. We already got all the Tx responses for all the
1458 * packets in the queues. The drain work can have been
1459 * triggered. Flush it.
1460 */
1461 flush_work(&mvm->sta_drained_wk);
1462 }
1463 }
1464
1465 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1466 struct ieee80211_vif *vif)
1467 {
1468 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1469 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1470
1471 iwl_mvm_prepare_mac_removal(mvm, vif);
1472
1473 mutex_lock(&mvm->mutex);
1474
1475 if (mvm->bf_allowed_vif == mvmvif) {
1476 mvm->bf_allowed_vif = NULL;
1477 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1478 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1479 }
1480
1481 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1482
1483 /*
1484 * For AP/GO interface, the tear down of the resources allocated to the
1485 * interface is be handled as part of the stop_ap flow.
1486 */
1487 if (vif->type == NL80211_IFTYPE_AP ||
1488 vif->type == NL80211_IFTYPE_ADHOC) {
1489 #ifdef CONFIG_NL80211_TESTMODE
1490 if (vif == mvm->noa_vif) {
1491 mvm->noa_vif = NULL;
1492 mvm->noa_duration = 0;
1493 }
1494 #endif
1495 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1496 goto out_release;
1497 }
1498
1499 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1500 mvm->p2p_device_vif = NULL;
1501 iwl_mvm_rm_bcast_sta(mvm, vif);
1502 iwl_mvm_binding_remove_vif(mvm, vif);
1503 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1504 mvmvif->phy_ctxt = NULL;
1505 }
1506
1507 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1508 mvm->vif_count--;
1509
1510 iwl_mvm_power_update_mac(mvm);
1511 iwl_mvm_mac_ctxt_remove(mvm, vif);
1512
1513 out_release:
1514 iwl_mvm_mac_ctxt_release(mvm, vif);
1515 mutex_unlock(&mvm->mutex);
1516 }
1517
1518 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1519 {
1520 return 0;
1521 }
1522
1523 struct iwl_mvm_mc_iter_data {
1524 struct iwl_mvm *mvm;
1525 int port_id;
1526 };
1527
1528 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1529 struct ieee80211_vif *vif)
1530 {
1531 struct iwl_mvm_mc_iter_data *data = _data;
1532 struct iwl_mvm *mvm = data->mvm;
1533 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1534 int ret, len;
1535
1536 /* if we don't have free ports, mcast frames will be dropped */
1537 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1538 return;
1539
1540 if (vif->type != NL80211_IFTYPE_STATION ||
1541 !vif->bss_conf.assoc)
1542 return;
1543
1544 cmd->port_id = data->port_id++;
1545 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1546 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1547
1548 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1549 if (ret)
1550 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1551 }
1552
1553 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1554 {
1555 struct iwl_mvm_mc_iter_data iter_data = {
1556 .mvm = mvm,
1557 };
1558
1559 lockdep_assert_held(&mvm->mutex);
1560
1561 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1562 return;
1563
1564 ieee80211_iterate_active_interfaces_atomic(
1565 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1566 iwl_mvm_mc_iface_iterator, &iter_data);
1567 }
1568
1569 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1570 struct netdev_hw_addr_list *mc_list)
1571 {
1572 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1573 struct iwl_mcast_filter_cmd *cmd;
1574 struct netdev_hw_addr *addr;
1575 int addr_count;
1576 bool pass_all;
1577 int len;
1578
1579 addr_count = netdev_hw_addr_list_count(mc_list);
1580 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1581 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1582 if (pass_all)
1583 addr_count = 0;
1584
1585 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1586 cmd = kzalloc(len, GFP_ATOMIC);
1587 if (!cmd)
1588 return 0;
1589
1590 if (pass_all) {
1591 cmd->pass_all = 1;
1592 return (u64)(unsigned long)cmd;
1593 }
1594
1595 netdev_hw_addr_list_for_each(addr, mc_list) {
1596 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1597 cmd->count, addr->addr);
1598 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1599 addr->addr, ETH_ALEN);
1600 cmd->count++;
1601 }
1602
1603 return (u64)(unsigned long)cmd;
1604 }
1605
1606 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1607 unsigned int changed_flags,
1608 unsigned int *total_flags,
1609 u64 multicast)
1610 {
1611 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1612 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1613
1614 mutex_lock(&mvm->mutex);
1615
1616 /* replace previous configuration */
1617 kfree(mvm->mcast_filter_cmd);
1618 mvm->mcast_filter_cmd = cmd;
1619
1620 if (!cmd)
1621 goto out;
1622
1623 iwl_mvm_recalc_multicast(mvm);
1624 out:
1625 mutex_unlock(&mvm->mutex);
1626 *total_flags = 0;
1627 }
1628
1629 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
1630 struct ieee80211_vif *vif,
1631 unsigned int filter_flags,
1632 unsigned int changed_flags)
1633 {
1634 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1635
1636 /* We support only filter for probe requests */
1637 if (!(changed_flags & FIF_PROBE_REQ))
1638 return;
1639
1640 /* Supported only for p2p client interfaces */
1641 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1642 !vif->p2p)
1643 return;
1644
1645 mutex_lock(&mvm->mutex);
1646 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1647 mutex_unlock(&mvm->mutex);
1648 }
1649
1650 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1651 struct iwl_bcast_iter_data {
1652 struct iwl_mvm *mvm;
1653 struct iwl_bcast_filter_cmd *cmd;
1654 u8 current_filter;
1655 };
1656
1657 static void
1658 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1659 const struct iwl_fw_bcast_filter *in_filter,
1660 struct iwl_fw_bcast_filter *out_filter)
1661 {
1662 struct iwl_fw_bcast_filter_attr *attr;
1663 int i;
1664
1665 memcpy(out_filter, in_filter, sizeof(*out_filter));
1666
1667 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1668 attr = &out_filter->attrs[i];
1669
1670 if (!attr->mask)
1671 break;
1672
1673 switch (attr->reserved1) {
1674 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1675 if (vif->bss_conf.arp_addr_cnt != 1) {
1676 attr->mask = 0;
1677 continue;
1678 }
1679
1680 attr->val = vif->bss_conf.arp_addr_list[0];
1681 break;
1682 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1683 attr->val = *(__be32 *)&vif->addr[2];
1684 break;
1685 default:
1686 break;
1687 }
1688 attr->reserved1 = 0;
1689 out_filter->num_attrs++;
1690 }
1691 }
1692
1693 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1694 struct ieee80211_vif *vif)
1695 {
1696 struct iwl_bcast_iter_data *data = _data;
1697 struct iwl_mvm *mvm = data->mvm;
1698 struct iwl_bcast_filter_cmd *cmd = data->cmd;
1699 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1700 struct iwl_fw_bcast_mac *bcast_mac;
1701 int i;
1702
1703 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1704 return;
1705
1706 bcast_mac = &cmd->macs[mvmvif->id];
1707
1708 /*
1709 * enable filtering only for associated stations, but not for P2P
1710 * Clients
1711 */
1712 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1713 !vif->bss_conf.assoc)
1714 return;
1715
1716 bcast_mac->default_discard = 1;
1717
1718 /* copy all configured filters */
1719 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1720 /*
1721 * Make sure we don't exceed our filters limit.
1722 * if there is still a valid filter to be configured,
1723 * be on the safe side and just allow bcast for this mac.
1724 */
1725 if (WARN_ON_ONCE(data->current_filter >=
1726 ARRAY_SIZE(cmd->filters))) {
1727 bcast_mac->default_discard = 0;
1728 bcast_mac->attached_filters = 0;
1729 break;
1730 }
1731
1732 iwl_mvm_set_bcast_filter(vif,
1733 &mvm->bcast_filters[i],
1734 &cmd->filters[data->current_filter]);
1735
1736 /* skip current filter if it contains no attributes */
1737 if (!cmd->filters[data->current_filter].num_attrs)
1738 continue;
1739
1740 /* attach the filter to current mac */
1741 bcast_mac->attached_filters |=
1742 cpu_to_le16(BIT(data->current_filter));
1743
1744 data->current_filter++;
1745 }
1746 }
1747
1748 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1749 struct iwl_bcast_filter_cmd *cmd)
1750 {
1751 struct iwl_bcast_iter_data iter_data = {
1752 .mvm = mvm,
1753 .cmd = cmd,
1754 };
1755
1756 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1757 return false;
1758
1759 memset(cmd, 0, sizeof(*cmd));
1760 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1761 cmd->max_macs = ARRAY_SIZE(cmd->macs);
1762
1763 #ifdef CONFIG_IWLWIFI_DEBUGFS
1764 /* use debugfs filters/macs if override is configured */
1765 if (mvm->dbgfs_bcast_filtering.override) {
1766 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1767 sizeof(cmd->filters));
1768 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1769 sizeof(cmd->macs));
1770 return true;
1771 }
1772 #endif
1773
1774 /* if no filters are configured, do nothing */
1775 if (!mvm->bcast_filters)
1776 return false;
1777
1778 /* configure and attach these filters for each associated sta vif */
1779 ieee80211_iterate_active_interfaces(
1780 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1781 iwl_mvm_bcast_filter_iterator, &iter_data);
1782
1783 return true;
1784 }
1785
1786 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1787 {
1788 struct iwl_bcast_filter_cmd cmd;
1789
1790 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1791 return 0;
1792
1793 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1794 return 0;
1795
1796 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1797 sizeof(cmd), &cmd);
1798 }
1799 #else
1800 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1801 {
1802 return 0;
1803 }
1804 #endif
1805
1806 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
1807 struct ieee80211_vif *vif)
1808 {
1809 struct iwl_mu_group_mgmt_cmd cmd = {};
1810
1811 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
1812 WLAN_MEMBERSHIP_LEN);
1813 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
1814 WLAN_USER_POSITION_LEN);
1815
1816 return iwl_mvm_send_cmd_pdu(mvm,
1817 WIDE_ID(DATA_PATH_GROUP,
1818 UPDATE_MU_GROUPS_CMD),
1819 0, sizeof(cmd), &cmd);
1820 }
1821
1822 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
1823 struct ieee80211_vif *vif)
1824 {
1825 if (vif->mu_mimo_owner) {
1826 struct iwl_mu_group_mgmt_notif *notif = _data;
1827
1828 /*
1829 * MU-MIMO Group Id action frame is little endian. We treat
1830 * the data received from firmware as if it came from the
1831 * action frame, so no conversion is needed.
1832 */
1833 ieee80211_update_mu_groups(vif,
1834 (u8 *)&notif->membership_status,
1835 (u8 *)&notif->user_position);
1836 }
1837 }
1838
1839 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1840 struct iwl_rx_cmd_buffer *rxb)
1841 {
1842 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1843 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
1844
1845 ieee80211_iterate_active_interfaces_atomic(
1846 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1847 iwl_mvm_mu_mimo_iface_iterator, notif);
1848 }
1849
1850 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1851 struct ieee80211_vif *vif,
1852 struct ieee80211_bss_conf *bss_conf,
1853 u32 changes)
1854 {
1855 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1856 int ret;
1857
1858 /*
1859 * Re-calculate the tsf id, as the master-slave relations depend on the
1860 * beacon interval, which was not known when the station interface was
1861 * added.
1862 */
1863 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
1864 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
1865
1866 if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
1867 mvmvif->lqm_active)
1868 iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
1869 0, 0);
1870
1871 /*
1872 * If we're not associated yet, take the (new) BSSID before associating
1873 * so the firmware knows. If we're already associated, then use the old
1874 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
1875 * branch for disassociation below.
1876 */
1877 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
1878 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1879
1880 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
1881 if (ret)
1882 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1883
1884 /* after sending it once, adopt mac80211 data */
1885 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1886 mvmvif->associated = bss_conf->assoc;
1887
1888 if (changes & BSS_CHANGED_ASSOC) {
1889 if (bss_conf->assoc) {
1890 /* clear statistics to get clean beacon counter */
1891 iwl_mvm_request_statistics(mvm, true);
1892 memset(&mvmvif->beacon_stats, 0,
1893 sizeof(mvmvif->beacon_stats));
1894
1895 /* add quota for this interface */
1896 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1897 if (ret) {
1898 IWL_ERR(mvm, "failed to update quotas\n");
1899 return;
1900 }
1901
1902 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
1903 &mvm->status)) {
1904 /*
1905 * If we're restarting then the firmware will
1906 * obviously have lost synchronisation with
1907 * the AP. It will attempt to synchronise by
1908 * itself, but we can make it more reliable by
1909 * scheduling a session protection time event.
1910 *
1911 * The firmware needs to receive a beacon to
1912 * catch up with synchronisation, use 110% of
1913 * the beacon interval.
1914 *
1915 * Set a large maximum delay to allow for more
1916 * than a single interface.
1917 */
1918 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
1919 iwl_mvm_protect_session(mvm, vif, dur, dur,
1920 5 * dur, false);
1921 }
1922
1923 iwl_mvm_sf_update(mvm, vif, false);
1924 iwl_mvm_power_vif_assoc(mvm, vif);
1925 if (vif->p2p) {
1926 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
1927 iwl_mvm_update_smps(mvm, vif,
1928 IWL_MVM_SMPS_REQ_PROT,
1929 IEEE80211_SMPS_DYNAMIC);
1930 }
1931 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1932 /*
1933 * If update fails - SF might be running in associated
1934 * mode while disassociated - which is forbidden.
1935 */
1936 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
1937 "Failed to update SF upon disassociation\n");
1938
1939 /* remove AP station now that the MAC is unassoc */
1940 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
1941 if (ret)
1942 IWL_ERR(mvm, "failed to remove AP station\n");
1943
1944 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
1945 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1946 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1947 /* remove quota for this interface */
1948 ret = iwl_mvm_update_quotas(mvm, false, NULL);
1949 if (ret)
1950 IWL_ERR(mvm, "failed to update quotas\n");
1951
1952 if (vif->p2p)
1953 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
1954
1955 /* this will take the cleared BSSID from bss_conf */
1956 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1957 if (ret)
1958 IWL_ERR(mvm,
1959 "failed to update MAC %pM (clear after unassoc)\n",
1960 vif->addr);
1961 }
1962
1963 /*
1964 * The firmware tracks the MU-MIMO group on its own.
1965 * However, on HW restart we should restore this data.
1966 */
1967 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1968 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
1969 ret = iwl_mvm_update_mu_groups(mvm, vif);
1970 if (ret)
1971 IWL_ERR(mvm,
1972 "failed to update VHT MU_MIMO groups\n");
1973 }
1974
1975 iwl_mvm_recalc_multicast(mvm);
1976 iwl_mvm_configure_bcast_filter(mvm);
1977
1978 /* reset rssi values */
1979 mvmvif->bf_data.ave_beacon_signal = 0;
1980
1981 iwl_mvm_bt_coex_vif_change(mvm);
1982 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
1983 IEEE80211_SMPS_AUTOMATIC);
1984 if (fw_has_capa(&mvm->fw->ucode_capa,
1985 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1986 iwl_mvm_config_scan(mvm);
1987 } else if (changes & BSS_CHANGED_BEACON_INFO) {
1988 /*
1989 * We received a beacon _after_ association so
1990 * remove the session protection.
1991 */
1992 iwl_mvm_remove_time_event(mvm, mvmvif,
1993 &mvmvif->time_event_data);
1994 }
1995
1996 if (changes & BSS_CHANGED_BEACON_INFO) {
1997 iwl_mvm_sf_update(mvm, vif, false);
1998 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
1999 }
2000
2001 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
2002 /*
2003 * Send power command on every beacon change,
2004 * because we may have not enabled beacon abort yet.
2005 */
2006 BSS_CHANGED_BEACON_INFO)) {
2007 ret = iwl_mvm_power_update_mac(mvm);
2008 if (ret)
2009 IWL_ERR(mvm, "failed to update power mode\n");
2010 }
2011
2012 if (changes & BSS_CHANGED_TXPOWER) {
2013 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2014 bss_conf->txpower);
2015 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2016 }
2017
2018 if (changes & BSS_CHANGED_CQM) {
2019 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2020 /* reset cqm events tracking */
2021 mvmvif->bf_data.last_cqm_event = 0;
2022 if (mvmvif->bf_data.bf_enabled) {
2023 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2024 if (ret)
2025 IWL_ERR(mvm,
2026 "failed to update CQM thresholds\n");
2027 }
2028 }
2029
2030 if (changes & BSS_CHANGED_ARP_FILTER) {
2031 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2032 iwl_mvm_configure_bcast_filter(mvm);
2033 }
2034 }
2035
2036 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2037 struct ieee80211_vif *vif)
2038 {
2039 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2040 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2041 int ret;
2042
2043 /*
2044 * iwl_mvm_mac_ctxt_add() might read directly from the device
2045 * (the system time), so make sure it is available.
2046 */
2047 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2048 if (ret)
2049 return ret;
2050
2051 mutex_lock(&mvm->mutex);
2052
2053 /* Send the beacon template */
2054 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2055 if (ret)
2056 goto out_unlock;
2057
2058 /*
2059 * Re-calculate the tsf id, as the master-slave relations depend on the
2060 * beacon interval, which was not known when the AP interface was added.
2061 */
2062 if (vif->type == NL80211_IFTYPE_AP)
2063 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2064
2065 mvmvif->ap_assoc_sta_count = 0;
2066
2067 /* Add the mac context */
2068 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2069 if (ret)
2070 goto out_unlock;
2071
2072 /* Perform the binding */
2073 ret = iwl_mvm_binding_add_vif(mvm, vif);
2074 if (ret)
2075 goto out_remove;
2076
2077 /* Send the bcast station. At this stage the TBTT and DTIM time events
2078 * are added and applied to the scheduler */
2079 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2080 if (ret)
2081 goto out_unbind;
2082
2083 /* must be set before quota calculations */
2084 mvmvif->ap_ibss_active = true;
2085
2086 /* power updated needs to be done before quotas */
2087 iwl_mvm_power_update_mac(mvm);
2088
2089 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2090 if (ret)
2091 goto out_quota_failed;
2092
2093 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2094 if (vif->p2p && mvm->p2p_device_vif)
2095 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2096
2097 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2098
2099 iwl_mvm_bt_coex_vif_change(mvm);
2100
2101 /* we don't support TDLS during DCM */
2102 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2103 iwl_mvm_teardown_tdls_peers(mvm);
2104
2105 goto out_unlock;
2106
2107 out_quota_failed:
2108 iwl_mvm_power_update_mac(mvm);
2109 mvmvif->ap_ibss_active = false;
2110 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2111 out_unbind:
2112 iwl_mvm_binding_remove_vif(mvm, vif);
2113 out_remove:
2114 iwl_mvm_mac_ctxt_remove(mvm, vif);
2115 out_unlock:
2116 mutex_unlock(&mvm->mutex);
2117 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2118 return ret;
2119 }
2120
2121 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2122 struct ieee80211_vif *vif)
2123 {
2124 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2125 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2126
2127 iwl_mvm_prepare_mac_removal(mvm, vif);
2128
2129 mutex_lock(&mvm->mutex);
2130
2131 /* Handle AP stop while in CSA */
2132 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2133 iwl_mvm_remove_time_event(mvm, mvmvif,
2134 &mvmvif->time_event_data);
2135 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2136 mvmvif->csa_countdown = false;
2137 }
2138
2139 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2140 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2141 mvm->csa_tx_block_bcn_timeout = 0;
2142 }
2143
2144 mvmvif->ap_ibss_active = false;
2145 mvm->ap_last_beacon_gp2 = 0;
2146
2147 iwl_mvm_bt_coex_vif_change(mvm);
2148
2149 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2150
2151 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2152 if (vif->p2p && mvm->p2p_device_vif)
2153 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2154
2155 iwl_mvm_update_quotas(mvm, false, NULL);
2156 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2157 iwl_mvm_binding_remove_vif(mvm, vif);
2158
2159 iwl_mvm_power_update_mac(mvm);
2160
2161 iwl_mvm_mac_ctxt_remove(mvm, vif);
2162
2163 mutex_unlock(&mvm->mutex);
2164 }
2165
2166 static void
2167 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2168 struct ieee80211_vif *vif,
2169 struct ieee80211_bss_conf *bss_conf,
2170 u32 changes)
2171 {
2172 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2173
2174 /* Changes will be applied when the AP/IBSS is started */
2175 if (!mvmvif->ap_ibss_active)
2176 return;
2177
2178 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2179 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2180 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2181 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2182
2183 /* Need to send a new beacon template to the FW */
2184 if (changes & BSS_CHANGED_BEACON &&
2185 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2186 IWL_WARN(mvm, "Failed updating beacon data\n");
2187
2188 if (changes & BSS_CHANGED_TXPOWER) {
2189 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2190 bss_conf->txpower);
2191 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2192 }
2193 }
2194
2195 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2196 struct ieee80211_vif *vif,
2197 struct ieee80211_bss_conf *bss_conf,
2198 u32 changes)
2199 {
2200 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2201
2202 /*
2203 * iwl_mvm_bss_info_changed_station() might call
2204 * iwl_mvm_protect_session(), which reads directly from
2205 * the device (the system time), so make sure it is available.
2206 */
2207 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2208 return;
2209
2210 mutex_lock(&mvm->mutex);
2211
2212 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2213 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2214
2215 switch (vif->type) {
2216 case NL80211_IFTYPE_STATION:
2217 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2218 break;
2219 case NL80211_IFTYPE_AP:
2220 case NL80211_IFTYPE_ADHOC:
2221 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2222 break;
2223 default:
2224 /* shouldn't happen */
2225 WARN_ON_ONCE(1);
2226 }
2227
2228 mutex_unlock(&mvm->mutex);
2229 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2230 }
2231
2232 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2233 struct ieee80211_vif *vif,
2234 struct ieee80211_scan_request *hw_req)
2235 {
2236 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2237 int ret;
2238
2239 if (hw_req->req.n_channels == 0 ||
2240 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2241 return -EINVAL;
2242
2243 mutex_lock(&mvm->mutex);
2244 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2245 mutex_unlock(&mvm->mutex);
2246
2247 return ret;
2248 }
2249
2250 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2251 struct ieee80211_vif *vif)
2252 {
2253 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2254
2255 mutex_lock(&mvm->mutex);
2256
2257 /* Due to a race condition, it's possible that mac80211 asks
2258 * us to stop a hw_scan when it's already stopped. This can
2259 * happen, for instance, if we stopped the scan ourselves,
2260 * called ieee80211_scan_completed() and the userspace called
2261 * cancel scan scan before ieee80211_scan_work() could run.
2262 * To handle that, simply return if the scan is not running.
2263 */
2264 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2265 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2266
2267 mutex_unlock(&mvm->mutex);
2268 }
2269
2270 static void
2271 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2272 struct ieee80211_sta *sta, u16 tids,
2273 int num_frames,
2274 enum ieee80211_frame_release_type reason,
2275 bool more_data)
2276 {
2277 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2278
2279 /* Called when we need to transmit (a) frame(s) from mac80211 */
2280
2281 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2282 tids, more_data, false);
2283 }
2284
2285 static void
2286 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2287 struct ieee80211_sta *sta, u16 tids,
2288 int num_frames,
2289 enum ieee80211_frame_release_type reason,
2290 bool more_data)
2291 {
2292 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2293
2294 /* Called when we need to transmit (a) frame(s) from agg queue */
2295
2296 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2297 tids, more_data, true);
2298 }
2299
2300 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2301 struct ieee80211_vif *vif,
2302 enum sta_notify_cmd cmd,
2303 struct ieee80211_sta *sta)
2304 {
2305 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2306 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2307 unsigned long txqs = 0, tids = 0;
2308 int tid;
2309
2310 spin_lock_bh(&mvmsta->lock);
2311 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2312 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2313
2314 if (tid_data->state != IWL_AGG_ON &&
2315 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2316 continue;
2317
2318 __set_bit(tid_data->txq_id, &txqs);
2319
2320 if (iwl_mvm_tid_queued(tid_data) == 0)
2321 continue;
2322
2323 __set_bit(tid, &tids);
2324 }
2325
2326 switch (cmd) {
2327 case STA_NOTIFY_SLEEP:
2328 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2329 ieee80211_sta_block_awake(hw, sta, true);
2330
2331 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2332 ieee80211_sta_set_buffered(sta, tid, true);
2333
2334 if (txqs)
2335 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2336 /*
2337 * The fw updates the STA to be asleep. Tx packets on the Tx
2338 * queues to this station will not be transmitted. The fw will
2339 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2340 */
2341 break;
2342 case STA_NOTIFY_AWAKE:
2343 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2344 break;
2345
2346 if (txqs)
2347 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2348 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2349 break;
2350 default:
2351 break;
2352 }
2353 spin_unlock_bh(&mvmsta->lock);
2354 }
2355
2356 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2357 struct ieee80211_vif *vif,
2358 struct ieee80211_sta *sta)
2359 {
2360 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2361 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2362
2363 /*
2364 * This is called before mac80211 does RCU synchronisation,
2365 * so here we already invalidate our internal RCU-protected
2366 * station pointer. The rest of the code will thus no longer
2367 * be able to find the station this way, and we don't rely
2368 * on further RCU synchronisation after the sta_state()
2369 * callback deleted the station.
2370 */
2371 mutex_lock(&mvm->mutex);
2372 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2373 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2374 ERR_PTR(-ENOENT));
2375
2376 mutex_unlock(&mvm->mutex);
2377 }
2378
2379 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2380 const u8 *bssid)
2381 {
2382 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2383 return;
2384
2385 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
2386 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2387 return;
2388 }
2389
2390 if (!vif->p2p &&
2391 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
2392 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2393 return;
2394 }
2395
2396 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2397 }
2398
2399 static void
2400 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
2401 struct ieee80211_vif *vif, u8 *peer_addr,
2402 enum nl80211_tdls_operation action)
2403 {
2404 struct iwl_fw_dbg_trigger_tlv *trig;
2405 struct iwl_fw_dbg_trigger_tdls *tdls_trig;
2406
2407 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
2408 return;
2409
2410 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
2411 tdls_trig = (void *)trig->data;
2412 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
2413 return;
2414
2415 if (!(tdls_trig->action_bitmap & BIT(action)))
2416 return;
2417
2418 if (tdls_trig->peer_mode &&
2419 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
2420 return;
2421
2422 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
2423 "TDLS event occurred, peer %pM, action %d",
2424 peer_addr, action);
2425 }
2426
2427 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2428 struct iwl_mvm_sta *mvm_sta)
2429 {
2430 struct iwl_mvm_tid_data *tid_data;
2431 struct sk_buff *skb;
2432 int i;
2433
2434 spin_lock_bh(&mvm_sta->lock);
2435 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2436 tid_data = &mvm_sta->tid_data[i];
2437 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
2438 ieee80211_free_txskb(mvm->hw, skb);
2439 }
2440 spin_unlock_bh(&mvm_sta->lock);
2441 }
2442
2443 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2444 struct ieee80211_vif *vif,
2445 struct ieee80211_sta *sta,
2446 enum ieee80211_sta_state old_state,
2447 enum ieee80211_sta_state new_state)
2448 {
2449 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2450 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2451 int ret;
2452
2453 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2454 sta->addr, old_state, new_state);
2455
2456 /* this would be a mac80211 bug ... but don't crash */
2457 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2458 return -EINVAL;
2459
2460 /* if a STA is being removed, reuse its ID */
2461 flush_work(&mvm->sta_drained_wk);
2462
2463 /*
2464 * If we are in a STA removal flow and in DQA mode:
2465 *
2466 * This is after the sync_rcu part, so the queues have already been
2467 * flushed. No more TXs on their way in mac80211's path, and no more in
2468 * the queues.
2469 * Also, we won't be getting any new TX frames for this station.
2470 * What we might have are deferred TX frames that need to be taken care
2471 * of.
2472 *
2473 * Drop any still-queued deferred-frame before removing the STA, and
2474 * make sure the worker is no longer handling frames for this STA.
2475 */
2476 if (old_state == IEEE80211_STA_NONE &&
2477 new_state == IEEE80211_STA_NOTEXIST &&
2478 iwl_mvm_is_dqa_supported(mvm)) {
2479 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2480
2481 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
2482 flush_work(&mvm->add_stream_wk);
2483
2484 /*
2485 * No need to make sure deferred TX indication is off since the
2486 * worker will already remove it if it was on
2487 */
2488 }
2489
2490 mutex_lock(&mvm->mutex);
2491 if (old_state == IEEE80211_STA_NOTEXIST &&
2492 new_state == IEEE80211_STA_NONE) {
2493 /*
2494 * Firmware bug - it'll crash if the beacon interval is less
2495 * than 16. We can't avoid connecting at all, so refuse the
2496 * station state change, this will cause mac80211 to abandon
2497 * attempts to connect to this AP, and eventually wpa_s will
2498 * blacklist the AP...
2499 */
2500 if (vif->type == NL80211_IFTYPE_STATION &&
2501 vif->bss_conf.beacon_int < 16) {
2502 IWL_ERR(mvm,
2503 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2504 sta->addr, vif->bss_conf.beacon_int);
2505 ret = -EINVAL;
2506 goto out_unlock;
2507 }
2508
2509 if (sta->tdls &&
2510 (vif->p2p ||
2511 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2512 IWL_MVM_TDLS_STA_COUNT ||
2513 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2514 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2515 ret = -EBUSY;
2516 goto out_unlock;
2517 }
2518
2519 ret = iwl_mvm_add_sta(mvm, vif, sta);
2520 if (sta->tdls && ret == 0) {
2521 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2522 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2523 NL80211_TDLS_SETUP);
2524 }
2525 } else if (old_state == IEEE80211_STA_NONE &&
2526 new_state == IEEE80211_STA_AUTH) {
2527 /*
2528 * EBS may be disabled due to previous failures reported by FW.
2529 * Reset EBS status here assuming environment has been changed.
2530 */
2531 mvm->last_ebs_successful = true;
2532 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2533 ret = 0;
2534 } else if (old_state == IEEE80211_STA_AUTH &&
2535 new_state == IEEE80211_STA_ASSOC) {
2536 if (vif->type == NL80211_IFTYPE_AP) {
2537 mvmvif->ap_assoc_sta_count++;
2538 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2539 }
2540 ret = iwl_mvm_update_sta(mvm, vif, sta);
2541 if (ret == 0)
2542 iwl_mvm_rs_rate_init(mvm, sta,
2543 mvmvif->phy_ctxt->channel->band,
2544 true);
2545 } else if (old_state == IEEE80211_STA_ASSOC &&
2546 new_state == IEEE80211_STA_AUTHORIZED) {
2547
2548 /* we don't support TDLS during DCM */
2549 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2550 iwl_mvm_teardown_tdls_peers(mvm);
2551
2552 if (sta->tdls)
2553 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2554 NL80211_TDLS_ENABLE_LINK);
2555
2556 /* enable beacon filtering */
2557 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2558 ret = 0;
2559 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2560 new_state == IEEE80211_STA_ASSOC) {
2561 /* disable beacon filtering */
2562 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2563 ret = 0;
2564 } else if (old_state == IEEE80211_STA_ASSOC &&
2565 new_state == IEEE80211_STA_AUTH) {
2566 if (vif->type == NL80211_IFTYPE_AP) {
2567 mvmvif->ap_assoc_sta_count--;
2568 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2569 }
2570 ret = 0;
2571 } else if (old_state == IEEE80211_STA_AUTH &&
2572 new_state == IEEE80211_STA_NONE) {
2573 ret = 0;
2574 } else if (old_state == IEEE80211_STA_NONE &&
2575 new_state == IEEE80211_STA_NOTEXIST) {
2576 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2577 if (sta->tdls) {
2578 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2579 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2580 NL80211_TDLS_DISABLE_LINK);
2581 }
2582 } else {
2583 ret = -EIO;
2584 }
2585 out_unlock:
2586 mutex_unlock(&mvm->mutex);
2587
2588 if (sta->tdls && ret == 0) {
2589 if (old_state == IEEE80211_STA_NOTEXIST &&
2590 new_state == IEEE80211_STA_NONE)
2591 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2592 else if (old_state == IEEE80211_STA_NONE &&
2593 new_state == IEEE80211_STA_NOTEXIST)
2594 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2595 }
2596
2597 return ret;
2598 }
2599
2600 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2601 {
2602 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2603
2604 mvm->rts_threshold = value;
2605
2606 return 0;
2607 }
2608
2609 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2610 struct ieee80211_vif *vif,
2611 struct ieee80211_sta *sta, u32 changed)
2612 {
2613 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2614
2615 if (vif->type == NL80211_IFTYPE_STATION &&
2616 changed & IEEE80211_RC_NSS_CHANGED)
2617 iwl_mvm_sf_update(mvm, vif, false);
2618 }
2619
2620 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2621 struct ieee80211_vif *vif, u16 ac,
2622 const struct ieee80211_tx_queue_params *params)
2623 {
2624 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2625 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2626
2627 mvmvif->queue_params[ac] = *params;
2628
2629 /*
2630 * No need to update right away, we'll get BSS_CHANGED_QOS
2631 * The exception is P2P_DEVICE interface which needs immediate update.
2632 */
2633 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2634 int ret;
2635
2636 mutex_lock(&mvm->mutex);
2637 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2638 mutex_unlock(&mvm->mutex);
2639 return ret;
2640 }
2641 return 0;
2642 }
2643
2644 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2645 struct ieee80211_vif *vif)
2646 {
2647 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2648 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
2649 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
2650
2651 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2652 return;
2653
2654 /*
2655 * iwl_mvm_protect_session() reads directly from the device
2656 * (the system time), so make sure it is available.
2657 */
2658 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2659 return;
2660
2661 mutex_lock(&mvm->mutex);
2662 /* Try really hard to protect the session and hear a beacon */
2663 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2664 mutex_unlock(&mvm->mutex);
2665
2666 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2667 }
2668
2669 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2670 struct ieee80211_vif *vif,
2671 struct cfg80211_sched_scan_request *req,
2672 struct ieee80211_scan_ies *ies)
2673 {
2674 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2675
2676 int ret;
2677
2678 mutex_lock(&mvm->mutex);
2679
2680 if (!vif->bss_conf.idle) {
2681 ret = -EBUSY;
2682 goto out;
2683 }
2684
2685 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2686
2687 out:
2688 mutex_unlock(&mvm->mutex);
2689 return ret;
2690 }
2691
2692 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2693 struct ieee80211_vif *vif)
2694 {
2695 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2696 int ret;
2697
2698 mutex_lock(&mvm->mutex);
2699
2700 /* Due to a race condition, it's possible that mac80211 asks
2701 * us to stop a sched_scan when it's already stopped. This
2702 * can happen, for instance, if we stopped the scan ourselves,
2703 * called ieee80211_sched_scan_stopped() and the userspace called
2704 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2705 * could run. To handle this, simply return if the scan is
2706 * not running.
2707 */
2708 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2709 mutex_unlock(&mvm->mutex);
2710 return 0;
2711 }
2712
2713 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2714 mutex_unlock(&mvm->mutex);
2715 iwl_mvm_wait_for_async_handlers(mvm);
2716
2717 return ret;
2718 }
2719
2720 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2721 enum set_key_cmd cmd,
2722 struct ieee80211_vif *vif,
2723 struct ieee80211_sta *sta,
2724 struct ieee80211_key_conf *key)
2725 {
2726 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2727 struct iwl_mvm_sta *mvmsta;
2728 struct iwl_mvm_key_pn *ptk_pn;
2729 int keyidx = key->keyidx;
2730 int ret;
2731 u8 key_offset;
2732
2733 if (iwlwifi_mod_params.sw_crypto) {
2734 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2735 return -EOPNOTSUPP;
2736 }
2737
2738 switch (key->cipher) {
2739 case WLAN_CIPHER_SUITE_TKIP:
2740 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2741 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2742 break;
2743 case WLAN_CIPHER_SUITE_CCMP:
2744 case WLAN_CIPHER_SUITE_GCMP:
2745 case WLAN_CIPHER_SUITE_GCMP_256:
2746 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2747 break;
2748 case WLAN_CIPHER_SUITE_AES_CMAC:
2749 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2750 break;
2751 case WLAN_CIPHER_SUITE_WEP40:
2752 case WLAN_CIPHER_SUITE_WEP104:
2753 /* For non-client mode, only use WEP keys for TX as we probably
2754 * don't have a station yet anyway and would then have to keep
2755 * track of the keys, linking them to each of the clients/peers
2756 * as they appear. For now, don't do that, for performance WEP
2757 * offload doesn't really matter much, but we need it for some
2758 * other offload features in client mode.
2759 */
2760 if (vif->type != NL80211_IFTYPE_STATION)
2761 return 0;
2762 break;
2763 default:
2764 /* currently FW supports only one optional cipher scheme */
2765 if (hw->n_cipher_schemes &&
2766 hw->cipher_schemes->cipher == key->cipher)
2767 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2768 else
2769 return -EOPNOTSUPP;
2770 }
2771
2772 mutex_lock(&mvm->mutex);
2773
2774 switch (cmd) {
2775 case SET_KEY:
2776 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2777 vif->type == NL80211_IFTYPE_AP) && !sta) {
2778 /*
2779 * GTK on AP interface is a TX-only key, return 0;
2780 * on IBSS they're per-station and because we're lazy
2781 * we don't support them for RX, so do the same.
2782 * CMAC in AP/IBSS modes must be done in software.
2783 */
2784 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
2785 ret = -EOPNOTSUPP;
2786 else
2787 ret = 0;
2788 key->hw_key_idx = STA_KEY_IDX_INVALID;
2789 break;
2790 }
2791
2792 /* During FW restart, in order to restore the state as it was,
2793 * don't try to reprogram keys we previously failed for.
2794 */
2795 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2796 key->hw_key_idx == STA_KEY_IDX_INVALID) {
2797 IWL_DEBUG_MAC80211(mvm,
2798 "skip invalid idx key programming during restart\n");
2799 ret = 0;
2800 break;
2801 }
2802
2803 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2804 sta && iwl_mvm_has_new_rx_api(mvm) &&
2805 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2806 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2807 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2808 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2809 struct ieee80211_key_seq seq;
2810 int tid, q;
2811
2812 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2813 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
2814 ptk_pn = kzalloc(sizeof(*ptk_pn) +
2815 mvm->trans->num_rx_queues *
2816 sizeof(ptk_pn->q[0]),
2817 GFP_KERNEL);
2818 if (!ptk_pn) {
2819 ret = -ENOMEM;
2820 break;
2821 }
2822
2823 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2824 ieee80211_get_key_rx_seq(key, tid, &seq);
2825 for (q = 0; q < mvm->trans->num_rx_queues; q++)
2826 memcpy(ptk_pn->q[q].pn[tid],
2827 seq.ccmp.pn,
2828 IEEE80211_CCMP_PN_LEN);
2829 }
2830
2831 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
2832 }
2833
2834 /* in HW restart reuse the index, otherwise request a new one */
2835 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2836 key_offset = key->hw_key_idx;
2837 else
2838 key_offset = STA_KEY_IDX_INVALID;
2839
2840 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
2841 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
2842 if (ret) {
2843 IWL_WARN(mvm, "set key failed\n");
2844 /*
2845 * can't add key for RX, but we don't need it
2846 * in the device for TX so still return 0
2847 */
2848 key->hw_key_idx = STA_KEY_IDX_INVALID;
2849 ret = 0;
2850 }
2851
2852 break;
2853 case DISABLE_KEY:
2854 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
2855 ret = 0;
2856 break;
2857 }
2858
2859 if (sta && iwl_mvm_has_new_rx_api(mvm) &&
2860 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2861 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2862 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2863 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2864 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2865 ptk_pn = rcu_dereference_protected(
2866 mvmsta->ptk_pn[keyidx],
2867 lockdep_is_held(&mvm->mutex));
2868 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
2869 if (ptk_pn)
2870 kfree_rcu(ptk_pn, rcu_head);
2871 }
2872
2873 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
2874 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
2875 break;
2876 default:
2877 ret = -EINVAL;
2878 }
2879
2880 mutex_unlock(&mvm->mutex);
2881 return ret;
2882 }
2883
2884 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
2885 struct ieee80211_vif *vif,
2886 struct ieee80211_key_conf *keyconf,
2887 struct ieee80211_sta *sta,
2888 u32 iv32, u16 *phase1key)
2889 {
2890 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2891
2892 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
2893 return;
2894
2895 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
2896 }
2897
2898
2899 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
2900 struct iwl_rx_packet *pkt, void *data)
2901 {
2902 struct iwl_mvm *mvm =
2903 container_of(notif_wait, struct iwl_mvm, notif_wait);
2904 struct iwl_hs20_roc_res *resp;
2905 int resp_len = iwl_rx_packet_payload_len(pkt);
2906 struct iwl_mvm_time_event_data *te_data = data;
2907
2908 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
2909 return true;
2910
2911 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
2912 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
2913 return true;
2914 }
2915
2916 resp = (void *)pkt->data;
2917
2918 IWL_DEBUG_TE(mvm,
2919 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
2920 resp->status, resp->event_unique_id);
2921
2922 te_data->uid = le32_to_cpu(resp->event_unique_id);
2923 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
2924 te_data->uid);
2925
2926 spin_lock_bh(&mvm->time_event_lock);
2927 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
2928 spin_unlock_bh(&mvm->time_event_lock);
2929
2930 return true;
2931 }
2932
2933 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
2934 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
2935 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
2936 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
2937 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
2938 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
2939 struct ieee80211_channel *channel,
2940 struct ieee80211_vif *vif,
2941 int duration)
2942 {
2943 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
2944 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2945 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
2946 static const u16 time_event_response[] = { HOT_SPOT_CMD };
2947 struct iwl_notification_wait wait_time_event;
2948 u32 dtim_interval = vif->bss_conf.dtim_period *
2949 vif->bss_conf.beacon_int;
2950 u32 req_dur, delay;
2951 struct iwl_hs20_roc_req aux_roc_req = {
2952 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
2953 .id_and_color =
2954 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
2955 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
2956 /* Set the channel info data */
2957 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
2958 PHY_BAND_24 : PHY_BAND_5,
2959 .channel_info.channel = channel->hw_value,
2960 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
2961 /* Set the time and duration */
2962 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
2963 };
2964
2965 delay = AUX_ROC_MIN_DELAY;
2966 req_dur = MSEC_TO_TU(duration);
2967
2968 /*
2969 * If we are associated we want the delay time to be at least one
2970 * dtim interval so that the FW can wait until after the DTIM and
2971 * then start the time event, this will potentially allow us to
2972 * remain off-channel for the max duration.
2973 * Since we want to use almost a whole dtim interval we would also
2974 * like the delay to be for 2-3 dtim intervals, in case there are
2975 * other time events with higher priority.
2976 */
2977 if (vif->bss_conf.assoc) {
2978 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
2979 /* We cannot remain off-channel longer than the DTIM interval */
2980 if (dtim_interval <= req_dur) {
2981 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
2982 if (req_dur <= AUX_ROC_MIN_DURATION)
2983 req_dur = dtim_interval -
2984 AUX_ROC_MIN_SAFETY_BUFFER;
2985 }
2986 }
2987
2988 aux_roc_req.duration = cpu_to_le32(req_dur);
2989 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
2990
2991 IWL_DEBUG_TE(mvm,
2992 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
2993 channel->hw_value, req_dur, duration, delay,
2994 dtim_interval);
2995 /* Set the node address */
2996 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
2997
2998 lockdep_assert_held(&mvm->mutex);
2999
3000 spin_lock_bh(&mvm->time_event_lock);
3001
3002 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3003 spin_unlock_bh(&mvm->time_event_lock);
3004 return -EIO;
3005 }
3006
3007 te_data->vif = vif;
3008 te_data->duration = duration;
3009 te_data->id = HOT_SPOT_CMD;
3010
3011 spin_unlock_bh(&mvm->time_event_lock);
3012
3013 /*
3014 * Use a notification wait, which really just processes the
3015 * command response and doesn't wait for anything, in order
3016 * to be able to process the response and get the UID inside
3017 * the RX path. Using CMD_WANT_SKB doesn't work because it
3018 * stores the buffer and then wakes up this thread, by which
3019 * time another notification (that the time event started)
3020 * might already be processed unsuccessfully.
3021 */
3022 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3023 time_event_response,
3024 ARRAY_SIZE(time_event_response),
3025 iwl_mvm_rx_aux_roc, te_data);
3026
3027 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3028 &aux_roc_req);
3029
3030 if (res) {
3031 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3032 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3033 goto out_clear_te;
3034 }
3035
3036 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3037 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3038 /* should never fail */
3039 WARN_ON_ONCE(res);
3040
3041 if (res) {
3042 out_clear_te:
3043 spin_lock_bh(&mvm->time_event_lock);
3044 iwl_mvm_te_clear_data(mvm, te_data);
3045 spin_unlock_bh(&mvm->time_event_lock);
3046 }
3047
3048 return res;
3049 }
3050
3051 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3052 struct ieee80211_vif *vif,
3053 struct ieee80211_channel *channel,
3054 int duration,
3055 enum ieee80211_roc_type type)
3056 {
3057 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3058 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3059 struct cfg80211_chan_def chandef;
3060 struct iwl_mvm_phy_ctxt *phy_ctxt;
3061 int ret, i;
3062
3063 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3064 duration, type);
3065
3066 flush_work(&mvm->roc_done_wk);
3067
3068 mutex_lock(&mvm->mutex);
3069
3070 switch (vif->type) {
3071 case NL80211_IFTYPE_STATION:
3072 if (fw_has_capa(&mvm->fw->ucode_capa,
3073 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3074 /* Use aux roc framework (HS20) */
3075 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3076 vif, duration);
3077 goto out_unlock;
3078 }
3079 IWL_ERR(mvm, "hotspot not supported\n");
3080 ret = -EINVAL;
3081 goto out_unlock;
3082 case NL80211_IFTYPE_P2P_DEVICE:
3083 /* handle below */
3084 break;
3085 default:
3086 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3087 ret = -EINVAL;
3088 goto out_unlock;
3089 }
3090
3091 for (i = 0; i < NUM_PHY_CTX; i++) {
3092 phy_ctxt = &mvm->phy_ctxts[i];
3093 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3094 continue;
3095
3096 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3097 /*
3098 * Unbind the P2P_DEVICE from the current PHY context,
3099 * and if the PHY context is not used remove it.
3100 */
3101 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3102 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3103 goto out_unlock;
3104
3105 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3106
3107 /* Bind the P2P_DEVICE to the current PHY Context */
3108 mvmvif->phy_ctxt = phy_ctxt;
3109
3110 ret = iwl_mvm_binding_add_vif(mvm, vif);
3111 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3112 goto out_unlock;
3113
3114 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3115 goto schedule_time_event;
3116 }
3117 }
3118
3119 /* Need to update the PHY context only if the ROC channel changed */
3120 if (channel == mvmvif->phy_ctxt->channel)
3121 goto schedule_time_event;
3122
3123 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3124
3125 /*
3126 * Change the PHY context configuration as it is currently referenced
3127 * only by the P2P Device MAC
3128 */
3129 if (mvmvif->phy_ctxt->ref == 1) {
3130 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3131 &chandef, 1, 1);
3132 if (ret)
3133 goto out_unlock;
3134 } else {
3135 /*
3136 * The PHY context is shared with other MACs. Need to remove the
3137 * P2P Device from the binding, allocate an new PHY context and
3138 * create a new binding
3139 */
3140 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3141 if (!phy_ctxt) {
3142 ret = -ENOSPC;
3143 goto out_unlock;
3144 }
3145
3146 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3147 1, 1);
3148 if (ret) {
3149 IWL_ERR(mvm, "Failed to change PHY context\n");
3150 goto out_unlock;
3151 }
3152
3153 /* Unbind the P2P_DEVICE from the current PHY context */
3154 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3155 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3156 goto out_unlock;
3157
3158 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3159
3160 /* Bind the P2P_DEVICE to the new allocated PHY context */
3161 mvmvif->phy_ctxt = phy_ctxt;
3162
3163 ret = iwl_mvm_binding_add_vif(mvm, vif);
3164 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3165 goto out_unlock;
3166
3167 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3168 }
3169
3170 schedule_time_event:
3171 /* Schedule the time events */
3172 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3173
3174 out_unlock:
3175 mutex_unlock(&mvm->mutex);
3176 IWL_DEBUG_MAC80211(mvm, "leave\n");
3177 return ret;
3178 }
3179
3180 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3181 {
3182 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3183
3184 IWL_DEBUG_MAC80211(mvm, "enter\n");
3185
3186 mutex_lock(&mvm->mutex);
3187 iwl_mvm_stop_roc(mvm);
3188 mutex_unlock(&mvm->mutex);
3189
3190 IWL_DEBUG_MAC80211(mvm, "leave\n");
3191 return 0;
3192 }
3193
3194 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3195 struct ieee80211_chanctx_conf *ctx)
3196 {
3197 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3198 struct iwl_mvm_phy_ctxt *phy_ctxt;
3199 int ret;
3200
3201 lockdep_assert_held(&mvm->mutex);
3202
3203 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3204
3205 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3206 if (!phy_ctxt) {
3207 ret = -ENOSPC;
3208 goto out;
3209 }
3210
3211 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3212 ctx->rx_chains_static,
3213 ctx->rx_chains_dynamic);
3214 if (ret) {
3215 IWL_ERR(mvm, "Failed to add PHY context\n");
3216 goto out;
3217 }
3218
3219 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3220 *phy_ctxt_id = phy_ctxt->id;
3221 out:
3222 return ret;
3223 }
3224
3225 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3226 struct ieee80211_chanctx_conf *ctx)
3227 {
3228 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3229 int ret;
3230
3231 mutex_lock(&mvm->mutex);
3232 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3233 mutex_unlock(&mvm->mutex);
3234
3235 return ret;
3236 }
3237
3238 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3239 struct ieee80211_chanctx_conf *ctx)
3240 {
3241 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3242 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3243
3244 lockdep_assert_held(&mvm->mutex);
3245
3246 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3247 }
3248
3249 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3250 struct ieee80211_chanctx_conf *ctx)
3251 {
3252 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3253
3254 mutex_lock(&mvm->mutex);
3255 __iwl_mvm_remove_chanctx(mvm, ctx);
3256 mutex_unlock(&mvm->mutex);
3257 }
3258
3259 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3260 struct ieee80211_chanctx_conf *ctx,
3261 u32 changed)
3262 {
3263 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3264 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3265 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3266
3267 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3268 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3269 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3270 IEEE80211_CHANCTX_CHANGE_RADAR |
3271 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3272 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3273 phy_ctxt->ref, changed))
3274 return;
3275
3276 mutex_lock(&mvm->mutex);
3277 iwl_mvm_bt_coex_vif_change(mvm);
3278 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3279 ctx->rx_chains_static,
3280 ctx->rx_chains_dynamic);
3281 mutex_unlock(&mvm->mutex);
3282 }
3283
3284 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3285 struct ieee80211_vif *vif,
3286 struct ieee80211_chanctx_conf *ctx,
3287 bool switching_chanctx)
3288 {
3289 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3290 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3291 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3292 int ret;
3293
3294 lockdep_assert_held(&mvm->mutex);
3295
3296 mvmvif->phy_ctxt = phy_ctxt;
3297
3298 switch (vif->type) {
3299 case NL80211_IFTYPE_AP:
3300 /* only needed if we're switching chanctx (i.e. during CSA) */
3301 if (switching_chanctx) {
3302 mvmvif->ap_ibss_active = true;
3303 break;
3304 }
3305 case NL80211_IFTYPE_ADHOC:
3306 /*
3307 * The AP binding flow is handled as part of the start_ap flow
3308 * (in bss_info_changed), similarly for IBSS.
3309 */
3310 ret = 0;
3311 goto out;
3312 case NL80211_IFTYPE_STATION:
3313 break;
3314 case NL80211_IFTYPE_MONITOR:
3315 /* always disable PS when a monitor interface is active */
3316 mvmvif->ps_disabled = true;
3317 break;
3318 default:
3319 ret = -EINVAL;
3320 goto out;
3321 }
3322
3323 ret = iwl_mvm_binding_add_vif(mvm, vif);
3324 if (ret)
3325 goto out;
3326
3327 /*
3328 * Power state must be updated before quotas,
3329 * otherwise fw will complain.
3330 */
3331 iwl_mvm_power_update_mac(mvm);
3332
3333 /* Setting the quota at this stage is only required for monitor
3334 * interfaces. For the other types, the bss_info changed flow
3335 * will handle quota settings.
3336 */
3337 if (vif->type == NL80211_IFTYPE_MONITOR) {
3338 mvmvif->monitor_active = true;
3339 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3340 if (ret)
3341 goto out_remove_binding;
3342
3343 ret = iwl_mvm_add_snif_sta(mvm, vif);
3344 if (ret)
3345 goto out_remove_binding;
3346
3347 }
3348
3349 /* Handle binding during CSA */
3350 if (vif->type == NL80211_IFTYPE_AP) {
3351 iwl_mvm_update_quotas(mvm, false, NULL);
3352 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3353 }
3354
3355 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3356 u32 duration = 2 * vif->bss_conf.beacon_int;
3357
3358 /* iwl_mvm_protect_session() reads directly from the
3359 * device (the system time), so make sure it is
3360 * available.
3361 */
3362 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3363 if (ret)
3364 goto out_remove_binding;
3365
3366 /* Protect the session to make sure we hear the first
3367 * beacon on the new channel.
3368 */
3369 iwl_mvm_protect_session(mvm, vif, duration, duration,
3370 vif->bss_conf.beacon_int / 2,
3371 true);
3372
3373 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3374
3375 iwl_mvm_update_quotas(mvm, false, NULL);
3376 }
3377
3378 goto out;
3379
3380 out_remove_binding:
3381 iwl_mvm_binding_remove_vif(mvm, vif);
3382 iwl_mvm_power_update_mac(mvm);
3383 out:
3384 if (ret)
3385 mvmvif->phy_ctxt = NULL;
3386 return ret;
3387 }
3388 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3389 struct ieee80211_vif *vif,
3390 struct ieee80211_chanctx_conf *ctx)
3391 {
3392 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3393 int ret;
3394
3395 mutex_lock(&mvm->mutex);
3396 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3397 mutex_unlock(&mvm->mutex);
3398
3399 return ret;
3400 }
3401
3402 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3403 struct ieee80211_vif *vif,
3404 struct ieee80211_chanctx_conf *ctx,
3405 bool switching_chanctx)
3406 {
3407 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3408 struct ieee80211_vif *disabled_vif = NULL;
3409
3410 lockdep_assert_held(&mvm->mutex);
3411
3412 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3413
3414 switch (vif->type) {
3415 case NL80211_IFTYPE_ADHOC:
3416 goto out;
3417 case NL80211_IFTYPE_MONITOR:
3418 mvmvif->monitor_active = false;
3419 mvmvif->ps_disabled = false;
3420 iwl_mvm_rm_snif_sta(mvm, vif);
3421 break;
3422 case NL80211_IFTYPE_AP:
3423 /* This part is triggered only during CSA */
3424 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3425 goto out;
3426
3427 mvmvif->csa_countdown = false;
3428
3429 /* Set CS bit on all the stations */
3430 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3431
3432 /* Save blocked iface, the timeout is set on the next beacon */
3433 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3434
3435 mvmvif->ap_ibss_active = false;
3436 break;
3437 case NL80211_IFTYPE_STATION:
3438 if (!switching_chanctx)
3439 break;
3440
3441 disabled_vif = vif;
3442
3443 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3444 break;
3445 default:
3446 break;
3447 }
3448
3449 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3450 iwl_mvm_binding_remove_vif(mvm, vif);
3451
3452 out:
3453 mvmvif->phy_ctxt = NULL;
3454 iwl_mvm_power_update_mac(mvm);
3455 }
3456
3457 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3458 struct ieee80211_vif *vif,
3459 struct ieee80211_chanctx_conf *ctx)
3460 {
3461 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3462
3463 mutex_lock(&mvm->mutex);
3464 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3465 mutex_unlock(&mvm->mutex);
3466 }
3467
3468 static int
3469 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3470 struct ieee80211_vif_chanctx_switch *vifs)
3471 {
3472 int ret;
3473
3474 mutex_lock(&mvm->mutex);
3475 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3476 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3477
3478 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3479 if (ret) {
3480 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3481 goto out_reassign;
3482 }
3483
3484 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3485 true);
3486 if (ret) {
3487 IWL_ERR(mvm,
3488 "failed to assign new_ctx during channel switch\n");
3489 goto out_remove;
3490 }
3491
3492 /* we don't support TDLS during DCM - can be caused by channel switch */
3493 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3494 iwl_mvm_teardown_tdls_peers(mvm);
3495
3496 goto out;
3497
3498 out_remove:
3499 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3500
3501 out_reassign:
3502 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3503 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3504 goto out_restart;
3505 }
3506
3507 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3508 true)) {
3509 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3510 goto out_restart;
3511 }
3512
3513 goto out;
3514
3515 out_restart:
3516 /* things keep failing, better restart the hw */
3517 iwl_mvm_nic_restart(mvm, false);
3518
3519 out:
3520 mutex_unlock(&mvm->mutex);
3521
3522 return ret;
3523 }
3524
3525 static int
3526 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3527 struct ieee80211_vif_chanctx_switch *vifs)
3528 {
3529 int ret;
3530
3531 mutex_lock(&mvm->mutex);
3532 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3533
3534 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3535 true);
3536 if (ret) {
3537 IWL_ERR(mvm,
3538 "failed to assign new_ctx during channel switch\n");
3539 goto out_reassign;
3540 }
3541
3542 goto out;
3543
3544 out_reassign:
3545 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3546 true)) {
3547 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3548 goto out_restart;
3549 }
3550
3551 goto out;
3552
3553 out_restart:
3554 /* things keep failing, better restart the hw */
3555 iwl_mvm_nic_restart(mvm, false);
3556
3557 out:
3558 mutex_unlock(&mvm->mutex);
3559
3560 return ret;
3561 }
3562
3563 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3564 struct ieee80211_vif_chanctx_switch *vifs,
3565 int n_vifs,
3566 enum ieee80211_chanctx_switch_mode mode)
3567 {
3568 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3569 int ret;
3570
3571 /* we only support a single-vif right now */
3572 if (n_vifs > 1)
3573 return -EOPNOTSUPP;
3574
3575 switch (mode) {
3576 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3577 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3578 break;
3579 case CHANCTX_SWMODE_REASSIGN_VIF:
3580 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3581 break;
3582 default:
3583 ret = -EOPNOTSUPP;
3584 break;
3585 }
3586
3587 return ret;
3588 }
3589
3590 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3591 struct ieee80211_sta *sta,
3592 bool set)
3593 {
3594 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3595 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3596
3597 if (!mvm_sta || !mvm_sta->vif) {
3598 IWL_ERR(mvm, "Station is not associated to a vif\n");
3599 return -EINVAL;
3600 }
3601
3602 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3603 }
3604
3605 #ifdef CONFIG_NL80211_TESTMODE
3606 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3607 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3608 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3609 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3610 };
3611
3612 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3613 struct ieee80211_vif *vif,
3614 void *data, int len)
3615 {
3616 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3617 int err;
3618 u32 noa_duration;
3619
3620 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3621 if (err)
3622 return err;
3623
3624 if (!tb[IWL_MVM_TM_ATTR_CMD])
3625 return -EINVAL;
3626
3627 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3628 case IWL_MVM_TM_CMD_SET_NOA:
3629 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3630 !vif->bss_conf.enable_beacon ||
3631 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3632 return -EINVAL;
3633
3634 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3635 if (noa_duration >= vif->bss_conf.beacon_int)
3636 return -EINVAL;
3637
3638 mvm->noa_duration = noa_duration;
3639 mvm->noa_vif = vif;
3640
3641 return iwl_mvm_update_quotas(mvm, false, NULL);
3642 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3643 /* must be associated client vif - ignore authorized */
3644 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3645 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3646 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3647 return -EINVAL;
3648
3649 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3650 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3651 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3652 }
3653
3654 return -EOPNOTSUPP;
3655 }
3656
3657 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3658 struct ieee80211_vif *vif,
3659 void *data, int len)
3660 {
3661 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3662 int err;
3663
3664 mutex_lock(&mvm->mutex);
3665 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3666 mutex_unlock(&mvm->mutex);
3667
3668 return err;
3669 }
3670 #endif
3671
3672 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3673 struct ieee80211_vif *vif,
3674 struct ieee80211_channel_switch *chsw)
3675 {
3676 /* By implementing this operation, we prevent mac80211 from
3677 * starting its own channel switch timer, so that we can call
3678 * ieee80211_chswitch_done() ourselves at the right time
3679 * (which is when the absence time event starts).
3680 */
3681
3682 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3683 "dummy channel switch op\n");
3684 }
3685
3686 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3687 struct ieee80211_vif *vif,
3688 struct ieee80211_channel_switch *chsw)
3689 {
3690 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3691 struct ieee80211_vif *csa_vif;
3692 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3693 u32 apply_time;
3694 int ret;
3695
3696 mutex_lock(&mvm->mutex);
3697
3698 mvmvif->csa_failed = false;
3699
3700 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3701 chsw->chandef.center_freq1);
3702
3703 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3704
3705 switch (vif->type) {
3706 case NL80211_IFTYPE_AP:
3707 csa_vif =
3708 rcu_dereference_protected(mvm->csa_vif,
3709 lockdep_is_held(&mvm->mutex));
3710 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3711 "Another CSA is already in progress")) {
3712 ret = -EBUSY;
3713 goto out_unlock;
3714 }
3715
3716 /* we still didn't unblock tx. prevent new CS meanwhile */
3717 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
3718 lockdep_is_held(&mvm->mutex))) {
3719 ret = -EBUSY;
3720 goto out_unlock;
3721 }
3722
3723 rcu_assign_pointer(mvm->csa_vif, vif);
3724
3725 if (WARN_ONCE(mvmvif->csa_countdown,
3726 "Previous CSA countdown didn't complete")) {
3727 ret = -EBUSY;
3728 goto out_unlock;
3729 }
3730
3731 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
3732
3733 break;
3734 case NL80211_IFTYPE_STATION:
3735 if (mvmvif->lqm_active)
3736 iwl_mvm_send_lqm_cmd(vif,
3737 LQM_CMD_OPERATION_STOP_MEASUREMENT,
3738 0, 0);
3739
3740 /* Schedule the time event to a bit before beacon 1,
3741 * to make sure we're in the new channel when the
3742 * GO/AP arrives.
3743 */
3744 apply_time = chsw->device_timestamp +
3745 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3746 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3747
3748 if (chsw->block_tx)
3749 iwl_mvm_csa_client_absent(mvm, vif);
3750
3751 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3752 apply_time);
3753 if (mvmvif->bf_data.bf_enabled) {
3754 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3755 if (ret)
3756 goto out_unlock;
3757 }
3758
3759 break;
3760 default:
3761 break;
3762 }
3763
3764 mvmvif->ps_disabled = true;
3765
3766 ret = iwl_mvm_power_update_ps(mvm);
3767 if (ret)
3768 goto out_unlock;
3769
3770 /* we won't be on this channel any longer */
3771 iwl_mvm_teardown_tdls_peers(mvm);
3772
3773 out_unlock:
3774 mutex_unlock(&mvm->mutex);
3775
3776 return ret;
3777 }
3778
3779 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3780 struct ieee80211_vif *vif)
3781 {
3782 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3783 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3784 int ret;
3785
3786 mutex_lock(&mvm->mutex);
3787
3788 if (mvmvif->csa_failed) {
3789 mvmvif->csa_failed = false;
3790 ret = -EIO;
3791 goto out_unlock;
3792 }
3793
3794 if (vif->type == NL80211_IFTYPE_STATION) {
3795 struct iwl_mvm_sta *mvmsta;
3796
3797 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3798 mvmvif->ap_sta_id);
3799
3800 if (WARN_ON(!mvmsta)) {
3801 ret = -EIO;
3802 goto out_unlock;
3803 }
3804
3805 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3806
3807 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3808
3809 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3810 if (ret)
3811 goto out_unlock;
3812
3813 iwl_mvm_stop_session_protection(mvm, vif);
3814 }
3815
3816 mvmvif->ps_disabled = false;
3817
3818 ret = iwl_mvm_power_update_ps(mvm);
3819
3820 out_unlock:
3821 mutex_unlock(&mvm->mutex);
3822
3823 return ret;
3824 }
3825
3826 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3827 struct ieee80211_vif *vif, u32 queues, bool drop)
3828 {
3829 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3830 struct iwl_mvm_vif *mvmvif;
3831 struct iwl_mvm_sta *mvmsta;
3832 struct ieee80211_sta *sta;
3833 int i;
3834 u32 msk = 0;
3835
3836 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3837 return;
3838
3839 /* Make sure we're done with the deferred traffic before flushing */
3840 if (iwl_mvm_is_dqa_supported(mvm))
3841 flush_work(&mvm->add_stream_wk);
3842
3843 mutex_lock(&mvm->mutex);
3844 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3845
3846 /* flush the AP-station and all TDLS peers */
3847 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3848 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3849 lockdep_is_held(&mvm->mutex));
3850 if (IS_ERR_OR_NULL(sta))
3851 continue;
3852
3853 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3854 if (mvmsta->vif != vif)
3855 continue;
3856
3857 /* make sure only TDLS peers or the AP are flushed */
3858 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3859
3860 msk |= mvmsta->tfd_queue_msk;
3861 }
3862
3863 if (drop) {
3864 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
3865 IWL_ERR(mvm, "flush request fail\n");
3866 mutex_unlock(&mvm->mutex);
3867 } else {
3868 mutex_unlock(&mvm->mutex);
3869
3870 /* this can take a while, and we may need/want other operations
3871 * to succeed while doing this, so do it without the mutex held
3872 */
3873 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3874 }
3875 }
3876
3877 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3878 struct survey_info *survey)
3879 {
3880 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3881 int ret;
3882
3883 memset(survey, 0, sizeof(*survey));
3884
3885 /* only support global statistics right now */
3886 if (idx != 0)
3887 return -ENOENT;
3888
3889 if (!fw_has_capa(&mvm->fw->ucode_capa,
3890 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3891 return -ENOENT;
3892
3893 mutex_lock(&mvm->mutex);
3894
3895 if (mvm->ucode_loaded) {
3896 ret = iwl_mvm_request_statistics(mvm, false);
3897 if (ret)
3898 goto out;
3899 }
3900
3901 survey->filled = SURVEY_INFO_TIME |
3902 SURVEY_INFO_TIME_RX |
3903 SURVEY_INFO_TIME_TX |
3904 SURVEY_INFO_TIME_SCAN;
3905 survey->time = mvm->accu_radio_stats.on_time_rf +
3906 mvm->radio_stats.on_time_rf;
3907 do_div(survey->time, USEC_PER_MSEC);
3908
3909 survey->time_rx = mvm->accu_radio_stats.rx_time +
3910 mvm->radio_stats.rx_time;
3911 do_div(survey->time_rx, USEC_PER_MSEC);
3912
3913 survey->time_tx = mvm->accu_radio_stats.tx_time +
3914 mvm->radio_stats.tx_time;
3915 do_div(survey->time_tx, USEC_PER_MSEC);
3916
3917 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
3918 mvm->radio_stats.on_time_scan;
3919 do_div(survey->time_scan, USEC_PER_MSEC);
3920
3921 ret = 0;
3922 out:
3923 mutex_unlock(&mvm->mutex);
3924 return ret;
3925 }
3926
3927 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
3928 struct ieee80211_vif *vif,
3929 struct ieee80211_sta *sta,
3930 struct station_info *sinfo)
3931 {
3932 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3933 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3934 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3935
3936 if (mvmsta->avg_energy) {
3937 sinfo->signal_avg = mvmsta->avg_energy;
3938 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
3939 }
3940
3941 if (!fw_has_capa(&mvm->fw->ucode_capa,
3942 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3943 return;
3944
3945 /* if beacon filtering isn't on mac80211 does it anyway */
3946 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
3947 return;
3948
3949 if (!vif->bss_conf.assoc)
3950 return;
3951
3952 mutex_lock(&mvm->mutex);
3953
3954 if (mvmvif->ap_sta_id != mvmsta->sta_id)
3955 goto unlock;
3956
3957 if (iwl_mvm_request_statistics(mvm, false))
3958 goto unlock;
3959
3960 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
3961 mvmvif->beacon_stats.accu_num_beacons;
3962 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
3963 if (mvmvif->beacon_stats.avg_signal) {
3964 /* firmware only reports a value after RXing a few beacons */
3965 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
3966 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
3967 }
3968 unlock:
3969 mutex_unlock(&mvm->mutex);
3970 }
3971
3972 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
3973 struct ieee80211_vif *vif,
3974 const struct ieee80211_event *event)
3975 {
3976 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
3977 do { \
3978 if ((_cnt) && --(_cnt)) \
3979 break; \
3980 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
3981 } while (0)
3982
3983 struct iwl_fw_dbg_trigger_tlv *trig;
3984 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
3985
3986 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
3987 return;
3988
3989 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
3990 trig_mlme = (void *)trig->data;
3991 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
3992 return;
3993
3994 if (event->u.mlme.data == ASSOC_EVENT) {
3995 if (event->u.mlme.status == MLME_DENIED)
3996 CHECK_MLME_TRIGGER(mvm, trig, buf,
3997 trig_mlme->stop_assoc_denied,
3998 "DENIED ASSOC: reason %d",
3999 event->u.mlme.reason);
4000 else if (event->u.mlme.status == MLME_TIMEOUT)
4001 CHECK_MLME_TRIGGER(mvm, trig, buf,
4002 trig_mlme->stop_assoc_timeout,
4003 "ASSOC TIMEOUT");
4004 } else if (event->u.mlme.data == AUTH_EVENT) {
4005 if (event->u.mlme.status == MLME_DENIED)
4006 CHECK_MLME_TRIGGER(mvm, trig, buf,
4007 trig_mlme->stop_auth_denied,
4008 "DENIED AUTH: reason %d",
4009 event->u.mlme.reason);
4010 else if (event->u.mlme.status == MLME_TIMEOUT)
4011 CHECK_MLME_TRIGGER(mvm, trig, buf,
4012 trig_mlme->stop_auth_timeout,
4013 "AUTH TIMEOUT");
4014 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4015 CHECK_MLME_TRIGGER(mvm, trig, buf,
4016 trig_mlme->stop_rx_deauth,
4017 "DEAUTH RX %d", event->u.mlme.reason);
4018 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4019 CHECK_MLME_TRIGGER(mvm, trig, buf,
4020 trig_mlme->stop_tx_deauth,
4021 "DEAUTH TX %d", event->u.mlme.reason);
4022 }
4023 #undef CHECK_MLME_TRIGGER
4024 }
4025
4026 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4027 struct ieee80211_vif *vif,
4028 const struct ieee80211_event *event)
4029 {
4030 struct iwl_fw_dbg_trigger_tlv *trig;
4031 struct iwl_fw_dbg_trigger_ba *ba_trig;
4032
4033 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4034 return;
4035
4036 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4037 ba_trig = (void *)trig->data;
4038 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4039 return;
4040
4041 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4042 return;
4043
4044 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4045 "BAR received from %pM, tid %d, ssn %d",
4046 event->u.ba.sta->addr, event->u.ba.tid,
4047 event->u.ba.ssn);
4048 }
4049
4050 static void
4051 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4052 struct ieee80211_vif *vif,
4053 const struct ieee80211_event *event)
4054 {
4055 struct iwl_fw_dbg_trigger_tlv *trig;
4056 struct iwl_fw_dbg_trigger_ba *ba_trig;
4057
4058 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4059 return;
4060
4061 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4062 ba_trig = (void *)trig->data;
4063 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4064 return;
4065
4066 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4067 return;
4068
4069 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4070 "Frame from %pM timed out, tid %d",
4071 event->u.ba.sta->addr, event->u.ba.tid);
4072 }
4073
4074 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4075 struct ieee80211_vif *vif,
4076 const struct ieee80211_event *event)
4077 {
4078 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4079
4080 switch (event->type) {
4081 case MLME_EVENT:
4082 iwl_mvm_event_mlme_callback(mvm, vif, event);
4083 break;
4084 case BAR_RX_EVENT:
4085 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4086 break;
4087 case BA_FRAME_TIMEOUT:
4088 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4089 break;
4090 default:
4091 break;
4092 }
4093 }
4094
4095 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4096 struct iwl_mvm_internal_rxq_notif *notif,
4097 u32 size)
4098 {
4099 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
4100 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4101 int ret;
4102
4103 lockdep_assert_held(&mvm->mutex);
4104
4105 if (!iwl_mvm_has_new_rx_api(mvm))
4106 return;
4107
4108 notif->cookie = mvm->queue_sync_cookie;
4109
4110 if (notif->sync)
4111 atomic_set(&mvm->queue_sync_counter,
4112 mvm->trans->num_rx_queues);
4113
4114 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4115 if (ret) {
4116 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
4117 goto out;
4118 }
4119
4120 if (notif->sync)
4121 ret = wait_event_timeout(notif_waitq,
4122 atomic_read(&mvm->queue_sync_counter) == 0,
4123 HZ);
4124 WARN_ON_ONCE(!ret);
4125
4126 out:
4127 atomic_set(&mvm->queue_sync_counter, 0);
4128 mvm->queue_sync_cookie++;
4129 }
4130
4131 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
4132 {
4133 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4134 struct iwl_mvm_internal_rxq_notif data = {
4135 .type = IWL_MVM_RXQ_EMPTY,
4136 .sync = 1,
4137 };
4138
4139 mutex_lock(&mvm->mutex);
4140 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
4141 mutex_unlock(&mvm->mutex);
4142 }
4143
4144 const struct ieee80211_ops iwl_mvm_hw_ops = {
4145 .tx = iwl_mvm_mac_tx,
4146 .ampdu_action = iwl_mvm_mac_ampdu_action,
4147 .start = iwl_mvm_mac_start,
4148 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4149 .stop = iwl_mvm_mac_stop,
4150 .add_interface = iwl_mvm_mac_add_interface,
4151 .remove_interface = iwl_mvm_mac_remove_interface,
4152 .config = iwl_mvm_mac_config,
4153 .prepare_multicast = iwl_mvm_prepare_multicast,
4154 .configure_filter = iwl_mvm_configure_filter,
4155 .config_iface_filter = iwl_mvm_config_iface_filter,
4156 .bss_info_changed = iwl_mvm_bss_info_changed,
4157 .hw_scan = iwl_mvm_mac_hw_scan,
4158 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4159 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4160 .sta_state = iwl_mvm_mac_sta_state,
4161 .sta_notify = iwl_mvm_mac_sta_notify,
4162 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4163 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4164 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4165 .sta_rc_update = iwl_mvm_sta_rc_update,
4166 .conf_tx = iwl_mvm_mac_conf_tx,
4167 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4168 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4169 .flush = iwl_mvm_mac_flush,
4170 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4171 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4172 .set_key = iwl_mvm_mac_set_key,
4173 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4174 .remain_on_channel = iwl_mvm_roc,
4175 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4176 .add_chanctx = iwl_mvm_add_chanctx,
4177 .remove_chanctx = iwl_mvm_remove_chanctx,
4178 .change_chanctx = iwl_mvm_change_chanctx,
4179 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4180 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4181 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4182
4183 .start_ap = iwl_mvm_start_ap_ibss,
4184 .stop_ap = iwl_mvm_stop_ap_ibss,
4185 .join_ibss = iwl_mvm_start_ap_ibss,
4186 .leave_ibss = iwl_mvm_stop_ap_ibss,
4187
4188 .set_tim = iwl_mvm_set_tim,
4189
4190 .channel_switch = iwl_mvm_channel_switch,
4191 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4192 .post_channel_switch = iwl_mvm_post_channel_switch,
4193
4194 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4195 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4196 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4197
4198 .event_callback = iwl_mvm_mac_event_callback,
4199
4200 .sync_rx_queues = iwl_mvm_sync_rx_queues,
4201
4202 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4203
4204 #ifdef CONFIG_PM_SLEEP
4205 /* look at d3.c */
4206 .suspend = iwl_mvm_suspend,
4207 .resume = iwl_mvm_resume,
4208 .set_wakeup = iwl_mvm_set_wakeup,
4209 .set_rekey_data = iwl_mvm_set_rekey_data,
4210 #if IS_ENABLED(CONFIG_IPV6)
4211 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4212 #endif
4213 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4214 #endif
4215 .get_survey = iwl_mvm_mac_get_survey,
4216 .sta_statistics = iwl_mvm_mac_sta_statistics,
4217 };
This page took 0.123364 seconds and 5 git commands to generate.