PM / clk: Add support for adding a specific clock from device-tree
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / mac.c
1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "mac.h"
19
20 #include <net/mac80211.h>
21 #include <linux/etherdevice.h>
22
23 #include "hif.h"
24 #include "core.h"
25 #include "debug.h"
26 #include "wmi.h"
27 #include "htt.h"
28 #include "txrx.h"
29 #include "testmode.h"
30 #include "wmi.h"
31 #include "wmi-tlv.h"
32 #include "wmi-ops.h"
33 #include "wow.h"
34
35 /*********/
36 /* Rates */
37 /*********/
38
39 static struct ieee80211_rate ath10k_rates[] = {
40 { .bitrate = 10,
41 .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
42 { .bitrate = 20,
43 .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
44 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
45 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
46 { .bitrate = 55,
47 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
48 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
49 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
50 { .bitrate = 110,
51 .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
52 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
53 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
54
55 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
56 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
57 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
58 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
59 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
60 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
61 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
62 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
63 };
64
65 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
66
67 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
68 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
69 ATH10K_MAC_FIRST_OFDM_RATE_IDX)
70 #define ath10k_g_rates (ath10k_rates + 0)
71 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
72
73 static bool ath10k_mac_bitrate_is_cck(int bitrate)
74 {
75 switch (bitrate) {
76 case 10:
77 case 20:
78 case 55:
79 case 110:
80 return true;
81 }
82
83 return false;
84 }
85
86 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
87 {
88 return DIV_ROUND_UP(bitrate, 5) |
89 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
90 }
91
92 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
93 u8 hw_rate, bool cck)
94 {
95 const struct ieee80211_rate *rate;
96 int i;
97
98 for (i = 0; i < sband->n_bitrates; i++) {
99 rate = &sband->bitrates[i];
100
101 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
102 continue;
103
104 if (rate->hw_value == hw_rate)
105 return i;
106 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
107 rate->hw_value_short == hw_rate)
108 return i;
109 }
110
111 return 0;
112 }
113
114 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
115 u32 bitrate)
116 {
117 int i;
118
119 for (i = 0; i < sband->n_bitrates; i++)
120 if (sband->bitrates[i].bitrate == bitrate)
121 return i;
122
123 return 0;
124 }
125
126 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
127 {
128 switch ((mcs_map >> (2 * nss)) & 0x3) {
129 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
130 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
131 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
132 }
133 return 0;
134 }
135
136 static u32
137 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
138 {
139 int nss;
140
141 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
142 if (ht_mcs_mask[nss])
143 return nss + 1;
144
145 return 1;
146 }
147
148 static u32
149 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
150 {
151 int nss;
152
153 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
154 if (vht_mcs_mask[nss])
155 return nss + 1;
156
157 return 1;
158 }
159
160 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
161 {
162 enum wmi_host_platform_type platform_type;
163 int ret;
164
165 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
166 platform_type = WMI_HOST_PLATFORM_LOW_PERF;
167 else
168 platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
169
170 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
171
172 if (ret && ret != -EOPNOTSUPP) {
173 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
174 return ret;
175 }
176
177 return 0;
178 }
179
180 /**********/
181 /* Crypto */
182 /**********/
183
184 static int ath10k_send_key(struct ath10k_vif *arvif,
185 struct ieee80211_key_conf *key,
186 enum set_key_cmd cmd,
187 const u8 *macaddr, u32 flags)
188 {
189 struct ath10k *ar = arvif->ar;
190 struct wmi_vdev_install_key_arg arg = {
191 .vdev_id = arvif->vdev_id,
192 .key_idx = key->keyidx,
193 .key_len = key->keylen,
194 .key_data = key->key,
195 .key_flags = flags,
196 .macaddr = macaddr,
197 };
198
199 lockdep_assert_held(&arvif->ar->conf_mutex);
200
201 switch (key->cipher) {
202 case WLAN_CIPHER_SUITE_CCMP:
203 arg.key_cipher = WMI_CIPHER_AES_CCM;
204 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
205 break;
206 case WLAN_CIPHER_SUITE_TKIP:
207 arg.key_cipher = WMI_CIPHER_TKIP;
208 arg.key_txmic_len = 8;
209 arg.key_rxmic_len = 8;
210 break;
211 case WLAN_CIPHER_SUITE_WEP40:
212 case WLAN_CIPHER_SUITE_WEP104:
213 arg.key_cipher = WMI_CIPHER_WEP;
214 break;
215 case WLAN_CIPHER_SUITE_AES_CMAC:
216 WARN_ON(1);
217 return -EINVAL;
218 default:
219 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
220 return -EOPNOTSUPP;
221 }
222
223 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
224 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
225
226 if (cmd == DISABLE_KEY) {
227 arg.key_cipher = WMI_CIPHER_NONE;
228 arg.key_data = NULL;
229 }
230
231 return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
232 }
233
234 static int ath10k_install_key(struct ath10k_vif *arvif,
235 struct ieee80211_key_conf *key,
236 enum set_key_cmd cmd,
237 const u8 *macaddr, u32 flags)
238 {
239 struct ath10k *ar = arvif->ar;
240 int ret;
241 unsigned long time_left;
242
243 lockdep_assert_held(&ar->conf_mutex);
244
245 reinit_completion(&ar->install_key_done);
246
247 if (arvif->nohwcrypt)
248 return 1;
249
250 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
251 if (ret)
252 return ret;
253
254 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
255 if (time_left == 0)
256 return -ETIMEDOUT;
257
258 return 0;
259 }
260
261 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
262 const u8 *addr)
263 {
264 struct ath10k *ar = arvif->ar;
265 struct ath10k_peer *peer;
266 int ret;
267 int i;
268 u32 flags;
269
270 lockdep_assert_held(&ar->conf_mutex);
271
272 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
273 arvif->vif->type != NL80211_IFTYPE_ADHOC &&
274 arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
275 return -EINVAL;
276
277 spin_lock_bh(&ar->data_lock);
278 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
279 spin_unlock_bh(&ar->data_lock);
280
281 if (!peer)
282 return -ENOENT;
283
284 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
285 if (arvif->wep_keys[i] == NULL)
286 continue;
287
288 switch (arvif->vif->type) {
289 case NL80211_IFTYPE_AP:
290 flags = WMI_KEY_PAIRWISE;
291
292 if (arvif->def_wep_key_idx == i)
293 flags |= WMI_KEY_TX_USAGE;
294
295 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
296 SET_KEY, addr, flags);
297 if (ret < 0)
298 return ret;
299 break;
300 case NL80211_IFTYPE_ADHOC:
301 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
302 SET_KEY, addr,
303 WMI_KEY_PAIRWISE);
304 if (ret < 0)
305 return ret;
306
307 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
308 SET_KEY, addr, WMI_KEY_GROUP);
309 if (ret < 0)
310 return ret;
311 break;
312 default:
313 WARN_ON(1);
314 return -EINVAL;
315 }
316
317 spin_lock_bh(&ar->data_lock);
318 peer->keys[i] = arvif->wep_keys[i];
319 spin_unlock_bh(&ar->data_lock);
320 }
321
322 /* In some cases (notably with static WEP IBSS with multiple keys)
323 * multicast Tx becomes broken. Both pairwise and groupwise keys are
324 * installed already. Using WMI_KEY_TX_USAGE in different combinations
325 * didn't seem help. Using def_keyid vdev parameter seems to be
326 * effective so use that.
327 *
328 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
329 */
330 if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
331 return 0;
332
333 if (arvif->def_wep_key_idx == -1)
334 return 0;
335
336 ret = ath10k_wmi_vdev_set_param(arvif->ar,
337 arvif->vdev_id,
338 arvif->ar->wmi.vdev_param->def_keyid,
339 arvif->def_wep_key_idx);
340 if (ret) {
341 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
342 arvif->vdev_id, ret);
343 return ret;
344 }
345
346 return 0;
347 }
348
349 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
350 const u8 *addr)
351 {
352 struct ath10k *ar = arvif->ar;
353 struct ath10k_peer *peer;
354 int first_errno = 0;
355 int ret;
356 int i;
357 u32 flags = 0;
358
359 lockdep_assert_held(&ar->conf_mutex);
360
361 spin_lock_bh(&ar->data_lock);
362 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
363 spin_unlock_bh(&ar->data_lock);
364
365 if (!peer)
366 return -ENOENT;
367
368 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
369 if (peer->keys[i] == NULL)
370 continue;
371
372 /* key flags are not required to delete the key */
373 ret = ath10k_install_key(arvif, peer->keys[i],
374 DISABLE_KEY, addr, flags);
375 if (ret < 0 && first_errno == 0)
376 first_errno = ret;
377
378 if (ret < 0)
379 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
380 i, ret);
381
382 spin_lock_bh(&ar->data_lock);
383 peer->keys[i] = NULL;
384 spin_unlock_bh(&ar->data_lock);
385 }
386
387 return first_errno;
388 }
389
390 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
391 u8 keyidx)
392 {
393 struct ath10k_peer *peer;
394 int i;
395
396 lockdep_assert_held(&ar->data_lock);
397
398 /* We don't know which vdev this peer belongs to,
399 * since WMI doesn't give us that information.
400 *
401 * FIXME: multi-bss needs to be handled.
402 */
403 peer = ath10k_peer_find(ar, 0, addr);
404 if (!peer)
405 return false;
406
407 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
408 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
409 return true;
410 }
411
412 return false;
413 }
414
415 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
416 struct ieee80211_key_conf *key)
417 {
418 struct ath10k *ar = arvif->ar;
419 struct ath10k_peer *peer;
420 u8 addr[ETH_ALEN];
421 int first_errno = 0;
422 int ret;
423 int i;
424 u32 flags = 0;
425
426 lockdep_assert_held(&ar->conf_mutex);
427
428 for (;;) {
429 /* since ath10k_install_key we can't hold data_lock all the
430 * time, so we try to remove the keys incrementally */
431 spin_lock_bh(&ar->data_lock);
432 i = 0;
433 list_for_each_entry(peer, &ar->peers, list) {
434 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
435 if (peer->keys[i] == key) {
436 ether_addr_copy(addr, peer->addr);
437 peer->keys[i] = NULL;
438 break;
439 }
440 }
441
442 if (i < ARRAY_SIZE(peer->keys))
443 break;
444 }
445 spin_unlock_bh(&ar->data_lock);
446
447 if (i == ARRAY_SIZE(peer->keys))
448 break;
449 /* key flags are not required to delete the key */
450 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
451 if (ret < 0 && first_errno == 0)
452 first_errno = ret;
453
454 if (ret)
455 ath10k_warn(ar, "failed to remove key for %pM: %d\n",
456 addr, ret);
457 }
458
459 return first_errno;
460 }
461
462 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
463 struct ieee80211_key_conf *key)
464 {
465 struct ath10k *ar = arvif->ar;
466 struct ath10k_peer *peer;
467 int ret;
468
469 lockdep_assert_held(&ar->conf_mutex);
470
471 list_for_each_entry(peer, &ar->peers, list) {
472 if (ether_addr_equal(peer->addr, arvif->vif->addr))
473 continue;
474
475 if (ether_addr_equal(peer->addr, arvif->bssid))
476 continue;
477
478 if (peer->keys[key->keyidx] == key)
479 continue;
480
481 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
482 arvif->vdev_id, key->keyidx);
483
484 ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
485 if (ret) {
486 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
487 arvif->vdev_id, peer->addr, ret);
488 return ret;
489 }
490 }
491
492 return 0;
493 }
494
495 /*********************/
496 /* General utilities */
497 /*********************/
498
499 static inline enum wmi_phy_mode
500 chan_to_phymode(const struct cfg80211_chan_def *chandef)
501 {
502 enum wmi_phy_mode phymode = MODE_UNKNOWN;
503
504 switch (chandef->chan->band) {
505 case NL80211_BAND_2GHZ:
506 switch (chandef->width) {
507 case NL80211_CHAN_WIDTH_20_NOHT:
508 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
509 phymode = MODE_11B;
510 else
511 phymode = MODE_11G;
512 break;
513 case NL80211_CHAN_WIDTH_20:
514 phymode = MODE_11NG_HT20;
515 break;
516 case NL80211_CHAN_WIDTH_40:
517 phymode = MODE_11NG_HT40;
518 break;
519 case NL80211_CHAN_WIDTH_5:
520 case NL80211_CHAN_WIDTH_10:
521 case NL80211_CHAN_WIDTH_80:
522 case NL80211_CHAN_WIDTH_80P80:
523 case NL80211_CHAN_WIDTH_160:
524 phymode = MODE_UNKNOWN;
525 break;
526 }
527 break;
528 case NL80211_BAND_5GHZ:
529 switch (chandef->width) {
530 case NL80211_CHAN_WIDTH_20_NOHT:
531 phymode = MODE_11A;
532 break;
533 case NL80211_CHAN_WIDTH_20:
534 phymode = MODE_11NA_HT20;
535 break;
536 case NL80211_CHAN_WIDTH_40:
537 phymode = MODE_11NA_HT40;
538 break;
539 case NL80211_CHAN_WIDTH_80:
540 phymode = MODE_11AC_VHT80;
541 break;
542 case NL80211_CHAN_WIDTH_5:
543 case NL80211_CHAN_WIDTH_10:
544 case NL80211_CHAN_WIDTH_80P80:
545 case NL80211_CHAN_WIDTH_160:
546 phymode = MODE_UNKNOWN;
547 break;
548 }
549 break;
550 default:
551 break;
552 }
553
554 WARN_ON(phymode == MODE_UNKNOWN);
555 return phymode;
556 }
557
558 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
559 {
560 /*
561 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
562 * 0 for no restriction
563 * 1 for 1/4 us
564 * 2 for 1/2 us
565 * 3 for 1 us
566 * 4 for 2 us
567 * 5 for 4 us
568 * 6 for 8 us
569 * 7 for 16 us
570 */
571 switch (mpdudensity) {
572 case 0:
573 return 0;
574 case 1:
575 case 2:
576 case 3:
577 /* Our lower layer calculations limit our precision to
578 1 microsecond */
579 return 1;
580 case 4:
581 return 2;
582 case 5:
583 return 4;
584 case 6:
585 return 8;
586 case 7:
587 return 16;
588 default:
589 return 0;
590 }
591 }
592
593 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
594 struct cfg80211_chan_def *def)
595 {
596 struct ieee80211_chanctx_conf *conf;
597
598 rcu_read_lock();
599 conf = rcu_dereference(vif->chanctx_conf);
600 if (!conf) {
601 rcu_read_unlock();
602 return -ENOENT;
603 }
604
605 *def = conf->def;
606 rcu_read_unlock();
607
608 return 0;
609 }
610
611 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
612 struct ieee80211_chanctx_conf *conf,
613 void *data)
614 {
615 int *num = data;
616
617 (*num)++;
618 }
619
620 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
621 {
622 int num = 0;
623
624 ieee80211_iter_chan_contexts_atomic(ar->hw,
625 ath10k_mac_num_chanctxs_iter,
626 &num);
627
628 return num;
629 }
630
631 static void
632 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
633 struct ieee80211_chanctx_conf *conf,
634 void *data)
635 {
636 struct cfg80211_chan_def **def = data;
637
638 *def = &conf->def;
639 }
640
641 static int ath10k_peer_create(struct ath10k *ar,
642 struct ieee80211_vif *vif,
643 struct ieee80211_sta *sta,
644 u32 vdev_id,
645 const u8 *addr,
646 enum wmi_peer_type peer_type)
647 {
648 struct ath10k_vif *arvif;
649 struct ath10k_peer *peer;
650 int num_peers = 0;
651 int ret;
652
653 lockdep_assert_held(&ar->conf_mutex);
654
655 num_peers = ar->num_peers;
656
657 /* Each vdev consumes a peer entry as well */
658 list_for_each_entry(arvif, &ar->arvifs, list)
659 num_peers++;
660
661 if (num_peers >= ar->max_num_peers)
662 return -ENOBUFS;
663
664 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
665 if (ret) {
666 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
667 addr, vdev_id, ret);
668 return ret;
669 }
670
671 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
672 if (ret) {
673 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
674 addr, vdev_id, ret);
675 return ret;
676 }
677
678 spin_lock_bh(&ar->data_lock);
679
680 peer = ath10k_peer_find(ar, vdev_id, addr);
681 if (!peer) {
682 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
683 addr, vdev_id);
684 ath10k_wmi_peer_delete(ar, vdev_id, addr);
685 spin_unlock_bh(&ar->data_lock);
686 return -ENOENT;
687 }
688
689 peer->vif = vif;
690 peer->sta = sta;
691
692 spin_unlock_bh(&ar->data_lock);
693
694 ar->num_peers++;
695
696 return 0;
697 }
698
699 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
700 {
701 struct ath10k *ar = arvif->ar;
702 u32 param;
703 int ret;
704
705 param = ar->wmi.pdev_param->sta_kickout_th;
706 ret = ath10k_wmi_pdev_set_param(ar, param,
707 ATH10K_KICKOUT_THRESHOLD);
708 if (ret) {
709 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
710 arvif->vdev_id, ret);
711 return ret;
712 }
713
714 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
715 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
716 ATH10K_KEEPALIVE_MIN_IDLE);
717 if (ret) {
718 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
719 arvif->vdev_id, ret);
720 return ret;
721 }
722
723 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
724 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
725 ATH10K_KEEPALIVE_MAX_IDLE);
726 if (ret) {
727 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
728 arvif->vdev_id, ret);
729 return ret;
730 }
731
732 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
733 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
734 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
735 if (ret) {
736 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
737 arvif->vdev_id, ret);
738 return ret;
739 }
740
741 return 0;
742 }
743
744 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
745 {
746 struct ath10k *ar = arvif->ar;
747 u32 vdev_param;
748
749 vdev_param = ar->wmi.vdev_param->rts_threshold;
750 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
751 }
752
753 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
754 {
755 int ret;
756
757 lockdep_assert_held(&ar->conf_mutex);
758
759 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
760 if (ret)
761 return ret;
762
763 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
764 if (ret)
765 return ret;
766
767 ar->num_peers--;
768
769 return 0;
770 }
771
772 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
773 {
774 struct ath10k_peer *peer, *tmp;
775 int peer_id;
776
777 lockdep_assert_held(&ar->conf_mutex);
778
779 spin_lock_bh(&ar->data_lock);
780 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
781 if (peer->vdev_id != vdev_id)
782 continue;
783
784 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
785 peer->addr, vdev_id);
786
787 for_each_set_bit(peer_id, peer->peer_ids,
788 ATH10K_MAX_NUM_PEER_IDS) {
789 ar->peer_map[peer_id] = NULL;
790 }
791
792 list_del(&peer->list);
793 kfree(peer);
794 ar->num_peers--;
795 }
796 spin_unlock_bh(&ar->data_lock);
797 }
798
799 static void ath10k_peer_cleanup_all(struct ath10k *ar)
800 {
801 struct ath10k_peer *peer, *tmp;
802
803 lockdep_assert_held(&ar->conf_mutex);
804
805 spin_lock_bh(&ar->data_lock);
806 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
807 list_del(&peer->list);
808 kfree(peer);
809 }
810 spin_unlock_bh(&ar->data_lock);
811
812 ar->num_peers = 0;
813 ar->num_stations = 0;
814 }
815
816 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
817 struct ieee80211_sta *sta,
818 enum wmi_tdls_peer_state state)
819 {
820 int ret;
821 struct wmi_tdls_peer_update_cmd_arg arg = {};
822 struct wmi_tdls_peer_capab_arg cap = {};
823 struct wmi_channel_arg chan_arg = {};
824
825 lockdep_assert_held(&ar->conf_mutex);
826
827 arg.vdev_id = vdev_id;
828 arg.peer_state = state;
829 ether_addr_copy(arg.addr, sta->addr);
830
831 cap.peer_max_sp = sta->max_sp;
832 cap.peer_uapsd_queues = sta->uapsd_queues;
833
834 if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
835 !sta->tdls_initiator)
836 cap.is_peer_responder = 1;
837
838 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
839 if (ret) {
840 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
841 arg.addr, vdev_id, ret);
842 return ret;
843 }
844
845 return 0;
846 }
847
848 /************************/
849 /* Interface management */
850 /************************/
851
852 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
853 {
854 struct ath10k *ar = arvif->ar;
855
856 lockdep_assert_held(&ar->data_lock);
857
858 if (!arvif->beacon)
859 return;
860
861 if (!arvif->beacon_buf)
862 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
863 arvif->beacon->len, DMA_TO_DEVICE);
864
865 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
866 arvif->beacon_state != ATH10K_BEACON_SENT))
867 return;
868
869 dev_kfree_skb_any(arvif->beacon);
870
871 arvif->beacon = NULL;
872 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
873 }
874
875 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
876 {
877 struct ath10k *ar = arvif->ar;
878
879 lockdep_assert_held(&ar->data_lock);
880
881 ath10k_mac_vif_beacon_free(arvif);
882
883 if (arvif->beacon_buf) {
884 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
885 arvif->beacon_buf, arvif->beacon_paddr);
886 arvif->beacon_buf = NULL;
887 }
888 }
889
890 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
891 {
892 unsigned long time_left;
893
894 lockdep_assert_held(&ar->conf_mutex);
895
896 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
897 return -ESHUTDOWN;
898
899 time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
900 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
901 if (time_left == 0)
902 return -ETIMEDOUT;
903
904 return 0;
905 }
906
907 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
908 {
909 struct cfg80211_chan_def *chandef = NULL;
910 struct ieee80211_channel *channel = NULL;
911 struct wmi_vdev_start_request_arg arg = {};
912 int ret = 0;
913
914 lockdep_assert_held(&ar->conf_mutex);
915
916 ieee80211_iter_chan_contexts_atomic(ar->hw,
917 ath10k_mac_get_any_chandef_iter,
918 &chandef);
919 if (WARN_ON_ONCE(!chandef))
920 return -ENOENT;
921
922 channel = chandef->chan;
923
924 arg.vdev_id = vdev_id;
925 arg.channel.freq = channel->center_freq;
926 arg.channel.band_center_freq1 = chandef->center_freq1;
927
928 /* TODO setup this dynamically, what in case we
929 don't have any vifs? */
930 arg.channel.mode = chan_to_phymode(chandef);
931 arg.channel.chan_radar =
932 !!(channel->flags & IEEE80211_CHAN_RADAR);
933
934 arg.channel.min_power = 0;
935 arg.channel.max_power = channel->max_power * 2;
936 arg.channel.max_reg_power = channel->max_reg_power * 2;
937 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
938
939 reinit_completion(&ar->vdev_setup_done);
940
941 ret = ath10k_wmi_vdev_start(ar, &arg);
942 if (ret) {
943 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
944 vdev_id, ret);
945 return ret;
946 }
947
948 ret = ath10k_vdev_setup_sync(ar);
949 if (ret) {
950 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
951 vdev_id, ret);
952 return ret;
953 }
954
955 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
956 if (ret) {
957 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
958 vdev_id, ret);
959 goto vdev_stop;
960 }
961
962 ar->monitor_vdev_id = vdev_id;
963
964 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
965 ar->monitor_vdev_id);
966 return 0;
967
968 vdev_stop:
969 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
970 if (ret)
971 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
972 ar->monitor_vdev_id, ret);
973
974 return ret;
975 }
976
977 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
978 {
979 int ret = 0;
980
981 lockdep_assert_held(&ar->conf_mutex);
982
983 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
984 if (ret)
985 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
986 ar->monitor_vdev_id, ret);
987
988 reinit_completion(&ar->vdev_setup_done);
989
990 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
991 if (ret)
992 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
993 ar->monitor_vdev_id, ret);
994
995 ret = ath10k_vdev_setup_sync(ar);
996 if (ret)
997 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
998 ar->monitor_vdev_id, ret);
999
1000 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1001 ar->monitor_vdev_id);
1002 return ret;
1003 }
1004
1005 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1006 {
1007 int bit, ret = 0;
1008
1009 lockdep_assert_held(&ar->conf_mutex);
1010
1011 if (ar->free_vdev_map == 0) {
1012 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1013 return -ENOMEM;
1014 }
1015
1016 bit = __ffs64(ar->free_vdev_map);
1017
1018 ar->monitor_vdev_id = bit;
1019
1020 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1021 WMI_VDEV_TYPE_MONITOR,
1022 0, ar->mac_addr);
1023 if (ret) {
1024 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1025 ar->monitor_vdev_id, ret);
1026 return ret;
1027 }
1028
1029 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1030 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1031 ar->monitor_vdev_id);
1032
1033 return 0;
1034 }
1035
1036 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1037 {
1038 int ret = 0;
1039
1040 lockdep_assert_held(&ar->conf_mutex);
1041
1042 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1043 if (ret) {
1044 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1045 ar->monitor_vdev_id, ret);
1046 return ret;
1047 }
1048
1049 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1050
1051 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1052 ar->monitor_vdev_id);
1053 return ret;
1054 }
1055
1056 static int ath10k_monitor_start(struct ath10k *ar)
1057 {
1058 int ret;
1059
1060 lockdep_assert_held(&ar->conf_mutex);
1061
1062 ret = ath10k_monitor_vdev_create(ar);
1063 if (ret) {
1064 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1065 return ret;
1066 }
1067
1068 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1069 if (ret) {
1070 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1071 ath10k_monitor_vdev_delete(ar);
1072 return ret;
1073 }
1074
1075 ar->monitor_started = true;
1076 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1077
1078 return 0;
1079 }
1080
1081 static int ath10k_monitor_stop(struct ath10k *ar)
1082 {
1083 int ret;
1084
1085 lockdep_assert_held(&ar->conf_mutex);
1086
1087 ret = ath10k_monitor_vdev_stop(ar);
1088 if (ret) {
1089 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1090 return ret;
1091 }
1092
1093 ret = ath10k_monitor_vdev_delete(ar);
1094 if (ret) {
1095 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1096 return ret;
1097 }
1098
1099 ar->monitor_started = false;
1100 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1101
1102 return 0;
1103 }
1104
1105 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1106 {
1107 int num_ctx;
1108
1109 /* At least one chanctx is required to derive a channel to start
1110 * monitor vdev on.
1111 */
1112 num_ctx = ath10k_mac_num_chanctxs(ar);
1113 if (num_ctx == 0)
1114 return false;
1115
1116 /* If there's already an existing special monitor interface then don't
1117 * bother creating another monitor vdev.
1118 */
1119 if (ar->monitor_arvif)
1120 return false;
1121
1122 return ar->monitor ||
1123 ar->filter_flags & FIF_OTHER_BSS ||
1124 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1125 }
1126
1127 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1128 {
1129 int num_ctx;
1130
1131 num_ctx = ath10k_mac_num_chanctxs(ar);
1132
1133 /* FIXME: Current interface combinations and cfg80211/mac80211 code
1134 * shouldn't allow this but make sure to prevent handling the following
1135 * case anyway since multi-channel DFS hasn't been tested at all.
1136 */
1137 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1138 return false;
1139
1140 return true;
1141 }
1142
1143 static int ath10k_monitor_recalc(struct ath10k *ar)
1144 {
1145 bool needed;
1146 bool allowed;
1147 int ret;
1148
1149 lockdep_assert_held(&ar->conf_mutex);
1150
1151 needed = ath10k_mac_monitor_vdev_is_needed(ar);
1152 allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1153
1154 ath10k_dbg(ar, ATH10K_DBG_MAC,
1155 "mac monitor recalc started? %d needed? %d allowed? %d\n",
1156 ar->monitor_started, needed, allowed);
1157
1158 if (WARN_ON(needed && !allowed)) {
1159 if (ar->monitor_started) {
1160 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1161
1162 ret = ath10k_monitor_stop(ar);
1163 if (ret)
1164 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1165 ret);
1166 /* not serious */
1167 }
1168
1169 return -EPERM;
1170 }
1171
1172 if (needed == ar->monitor_started)
1173 return 0;
1174
1175 if (needed)
1176 return ath10k_monitor_start(ar);
1177 else
1178 return ath10k_monitor_stop(ar);
1179 }
1180
1181 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1182 {
1183 struct ath10k *ar = arvif->ar;
1184 u32 vdev_param, rts_cts = 0;
1185
1186 lockdep_assert_held(&ar->conf_mutex);
1187
1188 vdev_param = ar->wmi.vdev_param->enable_rtscts;
1189
1190 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1191
1192 if (arvif->num_legacy_stations > 0)
1193 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1194 WMI_RTSCTS_PROFILE);
1195 else
1196 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1197 WMI_RTSCTS_PROFILE);
1198
1199 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1200 rts_cts);
1201 }
1202
1203 static int ath10k_start_cac(struct ath10k *ar)
1204 {
1205 int ret;
1206
1207 lockdep_assert_held(&ar->conf_mutex);
1208
1209 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1210
1211 ret = ath10k_monitor_recalc(ar);
1212 if (ret) {
1213 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1214 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1215 return ret;
1216 }
1217
1218 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1219 ar->monitor_vdev_id);
1220
1221 return 0;
1222 }
1223
1224 static int ath10k_stop_cac(struct ath10k *ar)
1225 {
1226 lockdep_assert_held(&ar->conf_mutex);
1227
1228 /* CAC is not running - do nothing */
1229 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1230 return 0;
1231
1232 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1233 ath10k_monitor_stop(ar);
1234
1235 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1236
1237 return 0;
1238 }
1239
1240 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1241 struct ieee80211_chanctx_conf *conf,
1242 void *data)
1243 {
1244 bool *ret = data;
1245
1246 if (!*ret && conf->radar_enabled)
1247 *ret = true;
1248 }
1249
1250 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1251 {
1252 bool has_radar = false;
1253
1254 ieee80211_iter_chan_contexts_atomic(ar->hw,
1255 ath10k_mac_has_radar_iter,
1256 &has_radar);
1257
1258 return has_radar;
1259 }
1260
1261 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1262 {
1263 int ret;
1264
1265 lockdep_assert_held(&ar->conf_mutex);
1266
1267 ath10k_stop_cac(ar);
1268
1269 if (!ath10k_mac_has_radar_enabled(ar))
1270 return;
1271
1272 if (ar->num_started_vdevs > 0)
1273 return;
1274
1275 ret = ath10k_start_cac(ar);
1276 if (ret) {
1277 /*
1278 * Not possible to start CAC on current channel so starting
1279 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1280 * by indicating that radar was detected.
1281 */
1282 ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1283 ieee80211_radar_detected(ar->hw);
1284 }
1285 }
1286
1287 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1288 {
1289 struct ath10k *ar = arvif->ar;
1290 int ret;
1291
1292 lockdep_assert_held(&ar->conf_mutex);
1293
1294 reinit_completion(&ar->vdev_setup_done);
1295
1296 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1297 if (ret) {
1298 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1299 arvif->vdev_id, ret);
1300 return ret;
1301 }
1302
1303 ret = ath10k_vdev_setup_sync(ar);
1304 if (ret) {
1305 ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
1306 arvif->vdev_id, ret);
1307 return ret;
1308 }
1309
1310 WARN_ON(ar->num_started_vdevs == 0);
1311
1312 if (ar->num_started_vdevs != 0) {
1313 ar->num_started_vdevs--;
1314 ath10k_recalc_radar_detection(ar);
1315 }
1316
1317 return ret;
1318 }
1319
1320 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1321 const struct cfg80211_chan_def *chandef,
1322 bool restart)
1323 {
1324 struct ath10k *ar = arvif->ar;
1325 struct wmi_vdev_start_request_arg arg = {};
1326 int ret = 0;
1327
1328 lockdep_assert_held(&ar->conf_mutex);
1329
1330 reinit_completion(&ar->vdev_setup_done);
1331
1332 arg.vdev_id = arvif->vdev_id;
1333 arg.dtim_period = arvif->dtim_period;
1334 arg.bcn_intval = arvif->beacon_interval;
1335
1336 arg.channel.freq = chandef->chan->center_freq;
1337 arg.channel.band_center_freq1 = chandef->center_freq1;
1338 arg.channel.mode = chan_to_phymode(chandef);
1339
1340 arg.channel.min_power = 0;
1341 arg.channel.max_power = chandef->chan->max_power * 2;
1342 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1343 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1344
1345 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1346 arg.ssid = arvif->u.ap.ssid;
1347 arg.ssid_len = arvif->u.ap.ssid_len;
1348 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1349
1350 /* For now allow DFS for AP mode */
1351 arg.channel.chan_radar =
1352 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1353 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1354 arg.ssid = arvif->vif->bss_conf.ssid;
1355 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1356 }
1357
1358 ath10k_dbg(ar, ATH10K_DBG_MAC,
1359 "mac vdev %d start center_freq %d phymode %s\n",
1360 arg.vdev_id, arg.channel.freq,
1361 ath10k_wmi_phymode_str(arg.channel.mode));
1362
1363 if (restart)
1364 ret = ath10k_wmi_vdev_restart(ar, &arg);
1365 else
1366 ret = ath10k_wmi_vdev_start(ar, &arg);
1367
1368 if (ret) {
1369 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1370 arg.vdev_id, ret);
1371 return ret;
1372 }
1373
1374 ret = ath10k_vdev_setup_sync(ar);
1375 if (ret) {
1376 ath10k_warn(ar,
1377 "failed to synchronize setup for vdev %i restart %d: %d\n",
1378 arg.vdev_id, restart, ret);
1379 return ret;
1380 }
1381
1382 ar->num_started_vdevs++;
1383 ath10k_recalc_radar_detection(ar);
1384
1385 return ret;
1386 }
1387
1388 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1389 const struct cfg80211_chan_def *def)
1390 {
1391 return ath10k_vdev_start_restart(arvif, def, false);
1392 }
1393
1394 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1395 const struct cfg80211_chan_def *def)
1396 {
1397 return ath10k_vdev_start_restart(arvif, def, true);
1398 }
1399
1400 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1401 struct sk_buff *bcn)
1402 {
1403 struct ath10k *ar = arvif->ar;
1404 struct ieee80211_mgmt *mgmt;
1405 const u8 *p2p_ie;
1406 int ret;
1407
1408 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1409 return 0;
1410
1411 mgmt = (void *)bcn->data;
1412 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1413 mgmt->u.beacon.variable,
1414 bcn->len - (mgmt->u.beacon.variable -
1415 bcn->data));
1416 if (!p2p_ie)
1417 return -ENOENT;
1418
1419 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1420 if (ret) {
1421 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1422 arvif->vdev_id, ret);
1423 return ret;
1424 }
1425
1426 return 0;
1427 }
1428
1429 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1430 u8 oui_type, size_t ie_offset)
1431 {
1432 size_t len;
1433 const u8 *next;
1434 const u8 *end;
1435 u8 *ie;
1436
1437 if (WARN_ON(skb->len < ie_offset))
1438 return -EINVAL;
1439
1440 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1441 skb->data + ie_offset,
1442 skb->len - ie_offset);
1443 if (!ie)
1444 return -ENOENT;
1445
1446 len = ie[1] + 2;
1447 end = skb->data + skb->len;
1448 next = ie + len;
1449
1450 if (WARN_ON(next > end))
1451 return -EINVAL;
1452
1453 memmove(ie, next, end - next);
1454 skb_trim(skb, skb->len - len);
1455
1456 return 0;
1457 }
1458
1459 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1460 {
1461 struct ath10k *ar = arvif->ar;
1462 struct ieee80211_hw *hw = ar->hw;
1463 struct ieee80211_vif *vif = arvif->vif;
1464 struct ieee80211_mutable_offsets offs = {};
1465 struct sk_buff *bcn;
1466 int ret;
1467
1468 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1469 return 0;
1470
1471 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1472 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1473 return 0;
1474
1475 bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1476 if (!bcn) {
1477 ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1478 return -EPERM;
1479 }
1480
1481 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1482 if (ret) {
1483 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1484 kfree_skb(bcn);
1485 return ret;
1486 }
1487
1488 /* P2P IE is inserted by firmware automatically (as configured above)
1489 * so remove it from the base beacon template to avoid duplicate P2P
1490 * IEs in beacon frames.
1491 */
1492 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1493 offsetof(struct ieee80211_mgmt,
1494 u.beacon.variable));
1495
1496 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1497 0, NULL, 0);
1498 kfree_skb(bcn);
1499
1500 if (ret) {
1501 ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1502 ret);
1503 return ret;
1504 }
1505
1506 return 0;
1507 }
1508
1509 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1510 {
1511 struct ath10k *ar = arvif->ar;
1512 struct ieee80211_hw *hw = ar->hw;
1513 struct ieee80211_vif *vif = arvif->vif;
1514 struct sk_buff *prb;
1515 int ret;
1516
1517 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1518 return 0;
1519
1520 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1521 return 0;
1522
1523 prb = ieee80211_proberesp_get(hw, vif);
1524 if (!prb) {
1525 ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1526 return -EPERM;
1527 }
1528
1529 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1530 kfree_skb(prb);
1531
1532 if (ret) {
1533 ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1534 ret);
1535 return ret;
1536 }
1537
1538 return 0;
1539 }
1540
1541 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1542 {
1543 struct ath10k *ar = arvif->ar;
1544 struct cfg80211_chan_def def;
1545 int ret;
1546
1547 /* When originally vdev is started during assign_vif_chanctx() some
1548 * information is missing, notably SSID. Firmware revisions with beacon
1549 * offloading require the SSID to be provided during vdev (re)start to
1550 * handle hidden SSID properly.
1551 *
1552 * Vdev restart must be done after vdev has been both started and
1553 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1554 * deliver vdev restart response event causing timeouts during vdev
1555 * syncing in ath10k.
1556 *
1557 * Note: The vdev down/up and template reinstallation could be skipped
1558 * since only wmi-tlv firmware are known to have beacon offload and
1559 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1560 * response delivery. It's probably more robust to keep it as is.
1561 */
1562 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1563 return 0;
1564
1565 if (WARN_ON(!arvif->is_started))
1566 return -EINVAL;
1567
1568 if (WARN_ON(!arvif->is_up))
1569 return -EINVAL;
1570
1571 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1572 return -EINVAL;
1573
1574 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1575 if (ret) {
1576 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1577 arvif->vdev_id, ret);
1578 return ret;
1579 }
1580
1581 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1582 * firmware will crash upon vdev up.
1583 */
1584
1585 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1586 if (ret) {
1587 ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1588 return ret;
1589 }
1590
1591 ret = ath10k_mac_setup_prb_tmpl(arvif);
1592 if (ret) {
1593 ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1594 return ret;
1595 }
1596
1597 ret = ath10k_vdev_restart(arvif, &def);
1598 if (ret) {
1599 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1600 arvif->vdev_id, ret);
1601 return ret;
1602 }
1603
1604 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1605 arvif->bssid);
1606 if (ret) {
1607 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1608 arvif->vdev_id, ret);
1609 return ret;
1610 }
1611
1612 return 0;
1613 }
1614
1615 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1616 struct ieee80211_bss_conf *info)
1617 {
1618 struct ath10k *ar = arvif->ar;
1619 int ret = 0;
1620
1621 lockdep_assert_held(&arvif->ar->conf_mutex);
1622
1623 if (!info->enable_beacon) {
1624 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1625 if (ret)
1626 ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1627 arvif->vdev_id, ret);
1628
1629 arvif->is_up = false;
1630
1631 spin_lock_bh(&arvif->ar->data_lock);
1632 ath10k_mac_vif_beacon_free(arvif);
1633 spin_unlock_bh(&arvif->ar->data_lock);
1634
1635 return;
1636 }
1637
1638 arvif->tx_seq_no = 0x1000;
1639
1640 arvif->aid = 0;
1641 ether_addr_copy(arvif->bssid, info->bssid);
1642
1643 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1644 arvif->bssid);
1645 if (ret) {
1646 ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1647 arvif->vdev_id, ret);
1648 return;
1649 }
1650
1651 arvif->is_up = true;
1652
1653 ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1654 if (ret) {
1655 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1656 arvif->vdev_id, ret);
1657 return;
1658 }
1659
1660 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1661 }
1662
1663 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1664 struct ieee80211_bss_conf *info,
1665 const u8 self_peer[ETH_ALEN])
1666 {
1667 struct ath10k *ar = arvif->ar;
1668 u32 vdev_param;
1669 int ret = 0;
1670
1671 lockdep_assert_held(&arvif->ar->conf_mutex);
1672
1673 if (!info->ibss_joined) {
1674 if (is_zero_ether_addr(arvif->bssid))
1675 return;
1676
1677 eth_zero_addr(arvif->bssid);
1678
1679 return;
1680 }
1681
1682 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1683 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1684 ATH10K_DEFAULT_ATIM);
1685 if (ret)
1686 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1687 arvif->vdev_id, ret);
1688 }
1689
1690 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1691 {
1692 struct ath10k *ar = arvif->ar;
1693 u32 param;
1694 u32 value;
1695 int ret;
1696
1697 lockdep_assert_held(&arvif->ar->conf_mutex);
1698
1699 if (arvif->u.sta.uapsd)
1700 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1701 else
1702 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1703
1704 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1705 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1706 if (ret) {
1707 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1708 value, arvif->vdev_id, ret);
1709 return ret;
1710 }
1711
1712 return 0;
1713 }
1714
1715 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1716 {
1717 struct ath10k *ar = arvif->ar;
1718 u32 param;
1719 u32 value;
1720 int ret;
1721
1722 lockdep_assert_held(&arvif->ar->conf_mutex);
1723
1724 if (arvif->u.sta.uapsd)
1725 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1726 else
1727 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1728
1729 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1730 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1731 param, value);
1732 if (ret) {
1733 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1734 value, arvif->vdev_id, ret);
1735 return ret;
1736 }
1737
1738 return 0;
1739 }
1740
1741 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1742 {
1743 struct ath10k_vif *arvif;
1744 int num = 0;
1745
1746 lockdep_assert_held(&ar->conf_mutex);
1747
1748 list_for_each_entry(arvif, &ar->arvifs, list)
1749 if (arvif->is_started)
1750 num++;
1751
1752 return num;
1753 }
1754
1755 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1756 {
1757 struct ath10k *ar = arvif->ar;
1758 struct ieee80211_vif *vif = arvif->vif;
1759 struct ieee80211_conf *conf = &ar->hw->conf;
1760 enum wmi_sta_powersave_param param;
1761 enum wmi_sta_ps_mode psmode;
1762 int ret;
1763 int ps_timeout;
1764 bool enable_ps;
1765
1766 lockdep_assert_held(&arvif->ar->conf_mutex);
1767
1768 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1769 return 0;
1770
1771 enable_ps = arvif->ps;
1772
1773 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1774 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1775 ar->running_fw->fw_file.fw_features)) {
1776 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1777 arvif->vdev_id);
1778 enable_ps = false;
1779 }
1780
1781 if (!arvif->is_started) {
1782 /* mac80211 can update vif powersave state while disconnected.
1783 * Firmware doesn't behave nicely and consumes more power than
1784 * necessary if PS is disabled on a non-started vdev. Hence
1785 * force-enable PS for non-running vdevs.
1786 */
1787 psmode = WMI_STA_PS_MODE_ENABLED;
1788 } else if (enable_ps) {
1789 psmode = WMI_STA_PS_MODE_ENABLED;
1790 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1791
1792 ps_timeout = conf->dynamic_ps_timeout;
1793 if (ps_timeout == 0) {
1794 /* Firmware doesn't like 0 */
1795 ps_timeout = ieee80211_tu_to_usec(
1796 vif->bss_conf.beacon_int) / 1000;
1797 }
1798
1799 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1800 ps_timeout);
1801 if (ret) {
1802 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1803 arvif->vdev_id, ret);
1804 return ret;
1805 }
1806 } else {
1807 psmode = WMI_STA_PS_MODE_DISABLED;
1808 }
1809
1810 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1811 arvif->vdev_id, psmode ? "enable" : "disable");
1812
1813 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1814 if (ret) {
1815 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1816 psmode, arvif->vdev_id, ret);
1817 return ret;
1818 }
1819
1820 return 0;
1821 }
1822
1823 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1824 {
1825 struct ath10k *ar = arvif->ar;
1826 struct wmi_sta_keepalive_arg arg = {};
1827 int ret;
1828
1829 lockdep_assert_held(&arvif->ar->conf_mutex);
1830
1831 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1832 return 0;
1833
1834 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1835 return 0;
1836
1837 /* Some firmware revisions have a bug and ignore the `enabled` field.
1838 * Instead use the interval to disable the keepalive.
1839 */
1840 arg.vdev_id = arvif->vdev_id;
1841 arg.enabled = 1;
1842 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1843 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1844
1845 ret = ath10k_wmi_sta_keepalive(ar, &arg);
1846 if (ret) {
1847 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1848 arvif->vdev_id, ret);
1849 return ret;
1850 }
1851
1852 return 0;
1853 }
1854
1855 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1856 {
1857 struct ath10k *ar = arvif->ar;
1858 struct ieee80211_vif *vif = arvif->vif;
1859 int ret;
1860
1861 lockdep_assert_held(&arvif->ar->conf_mutex);
1862
1863 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1864 return;
1865
1866 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1867 return;
1868
1869 if (!vif->csa_active)
1870 return;
1871
1872 if (!arvif->is_up)
1873 return;
1874
1875 if (!ieee80211_csa_is_complete(vif)) {
1876 ieee80211_csa_update_counter(vif);
1877
1878 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1879 if (ret)
1880 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1881 ret);
1882
1883 ret = ath10k_mac_setup_prb_tmpl(arvif);
1884 if (ret)
1885 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1886 ret);
1887 } else {
1888 ieee80211_csa_finish(vif);
1889 }
1890 }
1891
1892 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1893 {
1894 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1895 ap_csa_work);
1896 struct ath10k *ar = arvif->ar;
1897
1898 mutex_lock(&ar->conf_mutex);
1899 ath10k_mac_vif_ap_csa_count_down(arvif);
1900 mutex_unlock(&ar->conf_mutex);
1901 }
1902
1903 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
1904 struct ieee80211_vif *vif)
1905 {
1906 struct sk_buff *skb = data;
1907 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1908 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1909
1910 if (vif->type != NL80211_IFTYPE_STATION)
1911 return;
1912
1913 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
1914 return;
1915
1916 cancel_delayed_work(&arvif->connection_loss_work);
1917 }
1918
1919 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
1920 {
1921 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1922 IEEE80211_IFACE_ITER_NORMAL,
1923 ath10k_mac_handle_beacon_iter,
1924 skb);
1925 }
1926
1927 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
1928 struct ieee80211_vif *vif)
1929 {
1930 u32 *vdev_id = data;
1931 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1932 struct ath10k *ar = arvif->ar;
1933 struct ieee80211_hw *hw = ar->hw;
1934
1935 if (arvif->vdev_id != *vdev_id)
1936 return;
1937
1938 if (!arvif->is_up)
1939 return;
1940
1941 ieee80211_beacon_loss(vif);
1942
1943 /* Firmware doesn't report beacon loss events repeatedly. If AP probe
1944 * (done by mac80211) succeeds but beacons do not resume then it
1945 * doesn't make sense to continue operation. Queue connection loss work
1946 * which can be cancelled when beacon is received.
1947 */
1948 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
1949 ATH10K_CONNECTION_LOSS_HZ);
1950 }
1951
1952 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
1953 {
1954 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1955 IEEE80211_IFACE_ITER_NORMAL,
1956 ath10k_mac_handle_beacon_miss_iter,
1957 &vdev_id);
1958 }
1959
1960 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
1961 {
1962 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1963 connection_loss_work.work);
1964 struct ieee80211_vif *vif = arvif->vif;
1965
1966 if (!arvif->is_up)
1967 return;
1968
1969 ieee80211_connection_loss(vif);
1970 }
1971
1972 /**********************/
1973 /* Station management */
1974 /**********************/
1975
1976 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
1977 struct ieee80211_vif *vif)
1978 {
1979 /* Some firmware revisions have unstable STA powersave when listen
1980 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
1981 * generate NullFunc frames properly even if buffered frames have been
1982 * indicated in Beacon TIM. Firmware would seldom wake up to pull
1983 * buffered frames. Often pinging the device from AP would simply fail.
1984 *
1985 * As a workaround set it to 1.
1986 */
1987 if (vif->type == NL80211_IFTYPE_STATION)
1988 return 1;
1989
1990 return ar->hw->conf.listen_interval;
1991 }
1992
1993 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
1994 struct ieee80211_vif *vif,
1995 struct ieee80211_sta *sta,
1996 struct wmi_peer_assoc_complete_arg *arg)
1997 {
1998 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1999 u32 aid;
2000
2001 lockdep_assert_held(&ar->conf_mutex);
2002
2003 if (vif->type == NL80211_IFTYPE_STATION)
2004 aid = vif->bss_conf.aid;
2005 else
2006 aid = sta->aid;
2007
2008 ether_addr_copy(arg->addr, sta->addr);
2009 arg->vdev_id = arvif->vdev_id;
2010 arg->peer_aid = aid;
2011 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2012 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2013 arg->peer_num_spatial_streams = 1;
2014 arg->peer_caps = vif->bss_conf.assoc_capability;
2015 }
2016
2017 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2018 struct ieee80211_vif *vif,
2019 struct ieee80211_sta *sta,
2020 struct wmi_peer_assoc_complete_arg *arg)
2021 {
2022 struct ieee80211_bss_conf *info = &vif->bss_conf;
2023 struct cfg80211_chan_def def;
2024 struct cfg80211_bss *bss;
2025 const u8 *rsnie = NULL;
2026 const u8 *wpaie = NULL;
2027
2028 lockdep_assert_held(&ar->conf_mutex);
2029
2030 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2031 return;
2032
2033 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2034 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2035 if (bss) {
2036 const struct cfg80211_bss_ies *ies;
2037
2038 rcu_read_lock();
2039 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2040
2041 ies = rcu_dereference(bss->ies);
2042
2043 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2044 WLAN_OUI_TYPE_MICROSOFT_WPA,
2045 ies->data,
2046 ies->len);
2047 rcu_read_unlock();
2048 cfg80211_put_bss(ar->hw->wiphy, bss);
2049 }
2050
2051 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
2052 if (rsnie || wpaie) {
2053 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2054 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2055 }
2056
2057 if (wpaie) {
2058 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2059 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2060 }
2061
2062 if (sta->mfp &&
2063 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2064 ar->running_fw->fw_file.fw_features)) {
2065 arg->peer_flags |= ar->wmi.peer_flags->pmf;
2066 }
2067 }
2068
2069 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2070 struct ieee80211_vif *vif,
2071 struct ieee80211_sta *sta,
2072 struct wmi_peer_assoc_complete_arg *arg)
2073 {
2074 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2075 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2076 struct cfg80211_chan_def def;
2077 const struct ieee80211_supported_band *sband;
2078 const struct ieee80211_rate *rates;
2079 enum nl80211_band band;
2080 u32 ratemask;
2081 u8 rate;
2082 int i;
2083
2084 lockdep_assert_held(&ar->conf_mutex);
2085
2086 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2087 return;
2088
2089 band = def.chan->band;
2090 sband = ar->hw->wiphy->bands[band];
2091 ratemask = sta->supp_rates[band];
2092 ratemask &= arvif->bitrate_mask.control[band].legacy;
2093 rates = sband->bitrates;
2094
2095 rateset->num_rates = 0;
2096
2097 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2098 if (!(ratemask & 1))
2099 continue;
2100
2101 rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2102 rateset->rates[rateset->num_rates] = rate;
2103 rateset->num_rates++;
2104 }
2105 }
2106
2107 static bool
2108 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2109 {
2110 int nss;
2111
2112 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2113 if (ht_mcs_mask[nss])
2114 return false;
2115
2116 return true;
2117 }
2118
2119 static bool
2120 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2121 {
2122 int nss;
2123
2124 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2125 if (vht_mcs_mask[nss])
2126 return false;
2127
2128 return true;
2129 }
2130
2131 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2132 struct ieee80211_vif *vif,
2133 struct ieee80211_sta *sta,
2134 struct wmi_peer_assoc_complete_arg *arg)
2135 {
2136 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2137 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2138 struct cfg80211_chan_def def;
2139 enum nl80211_band band;
2140 const u8 *ht_mcs_mask;
2141 const u16 *vht_mcs_mask;
2142 int i, n;
2143 u8 max_nss;
2144 u32 stbc;
2145
2146 lockdep_assert_held(&ar->conf_mutex);
2147
2148 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2149 return;
2150
2151 if (!ht_cap->ht_supported)
2152 return;
2153
2154 band = def.chan->band;
2155 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2156 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2157
2158 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2159 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2160 return;
2161
2162 arg->peer_flags |= ar->wmi.peer_flags->ht;
2163 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2164 ht_cap->ampdu_factor)) - 1;
2165
2166 arg->peer_mpdu_density =
2167 ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2168
2169 arg->peer_ht_caps = ht_cap->cap;
2170 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2171
2172 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2173 arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2174
2175 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2176 arg->peer_flags |= ar->wmi.peer_flags->bw40;
2177 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2178 }
2179
2180 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2181 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2182 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2183
2184 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2185 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2186 }
2187
2188 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2189 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2190 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2191 }
2192
2193 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2194 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2195 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2196 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2197 arg->peer_rate_caps |= stbc;
2198 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2199 }
2200
2201 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2202 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2203 else if (ht_cap->mcs.rx_mask[1])
2204 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2205
2206 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2207 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2208 (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2209 max_nss = (i / 8) + 1;
2210 arg->peer_ht_rates.rates[n++] = i;
2211 }
2212
2213 /*
2214 * This is a workaround for HT-enabled STAs which break the spec
2215 * and have no HT capabilities RX mask (no HT RX MCS map).
2216 *
2217 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2218 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2219 *
2220 * Firmware asserts if such situation occurs.
2221 */
2222 if (n == 0) {
2223 arg->peer_ht_rates.num_rates = 8;
2224 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2225 arg->peer_ht_rates.rates[i] = i;
2226 } else {
2227 arg->peer_ht_rates.num_rates = n;
2228 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2229 }
2230
2231 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2232 arg->addr,
2233 arg->peer_ht_rates.num_rates,
2234 arg->peer_num_spatial_streams);
2235 }
2236
2237 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2238 struct ath10k_vif *arvif,
2239 struct ieee80211_sta *sta)
2240 {
2241 u32 uapsd = 0;
2242 u32 max_sp = 0;
2243 int ret = 0;
2244
2245 lockdep_assert_held(&ar->conf_mutex);
2246
2247 if (sta->wme && sta->uapsd_queues) {
2248 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2249 sta->uapsd_queues, sta->max_sp);
2250
2251 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2252 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2253 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2254 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2255 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2256 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2257 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2258 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2259 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2260 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2261 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2262 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2263
2264 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2265 max_sp = sta->max_sp;
2266
2267 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2268 sta->addr,
2269 WMI_AP_PS_PEER_PARAM_UAPSD,
2270 uapsd);
2271 if (ret) {
2272 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2273 arvif->vdev_id, ret);
2274 return ret;
2275 }
2276
2277 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2278 sta->addr,
2279 WMI_AP_PS_PEER_PARAM_MAX_SP,
2280 max_sp);
2281 if (ret) {
2282 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2283 arvif->vdev_id, ret);
2284 return ret;
2285 }
2286
2287 /* TODO setup this based on STA listen interval and
2288 beacon interval. Currently we don't know
2289 sta->listen_interval - mac80211 patch required.
2290 Currently use 10 seconds */
2291 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2292 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2293 10);
2294 if (ret) {
2295 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2296 arvif->vdev_id, ret);
2297 return ret;
2298 }
2299 }
2300
2301 return 0;
2302 }
2303
2304 static u16
2305 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2306 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2307 {
2308 int idx_limit;
2309 int nss;
2310 u16 mcs_map;
2311 u16 mcs;
2312
2313 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2314 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2315 vht_mcs_limit[nss];
2316
2317 if (mcs_map)
2318 idx_limit = fls(mcs_map) - 1;
2319 else
2320 idx_limit = -1;
2321
2322 switch (idx_limit) {
2323 case 0: /* fall through */
2324 case 1: /* fall through */
2325 case 2: /* fall through */
2326 case 3: /* fall through */
2327 case 4: /* fall through */
2328 case 5: /* fall through */
2329 case 6: /* fall through */
2330 default:
2331 /* see ath10k_mac_can_set_bitrate_mask() */
2332 WARN_ON(1);
2333 /* fall through */
2334 case -1:
2335 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2336 break;
2337 case 7:
2338 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2339 break;
2340 case 8:
2341 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2342 break;
2343 case 9:
2344 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2345 break;
2346 }
2347
2348 tx_mcs_set &= ~(0x3 << (nss * 2));
2349 tx_mcs_set |= mcs << (nss * 2);
2350 }
2351
2352 return tx_mcs_set;
2353 }
2354
2355 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2356 struct ieee80211_vif *vif,
2357 struct ieee80211_sta *sta,
2358 struct wmi_peer_assoc_complete_arg *arg)
2359 {
2360 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2361 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2362 struct cfg80211_chan_def def;
2363 enum nl80211_band band;
2364 const u16 *vht_mcs_mask;
2365 u8 ampdu_factor;
2366
2367 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2368 return;
2369
2370 if (!vht_cap->vht_supported)
2371 return;
2372
2373 band = def.chan->band;
2374 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2375
2376 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2377 return;
2378
2379 arg->peer_flags |= ar->wmi.peer_flags->vht;
2380
2381 if (def.chan->band == NL80211_BAND_2GHZ)
2382 arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2383
2384 arg->peer_vht_caps = vht_cap->cap;
2385
2386 ampdu_factor = (vht_cap->cap &
2387 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2388 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2389
2390 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2391 * zero in VHT IE. Using it would result in degraded throughput.
2392 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2393 * it if VHT max_mpdu is smaller. */
2394 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2395 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2396 ampdu_factor)) - 1);
2397
2398 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2399 arg->peer_flags |= ar->wmi.peer_flags->bw80;
2400
2401 arg->peer_vht_rates.rx_max_rate =
2402 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2403 arg->peer_vht_rates.rx_mcs_set =
2404 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2405 arg->peer_vht_rates.tx_max_rate =
2406 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2407 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2408 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2409
2410 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2411 sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2412 }
2413
2414 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2415 struct ieee80211_vif *vif,
2416 struct ieee80211_sta *sta,
2417 struct wmi_peer_assoc_complete_arg *arg)
2418 {
2419 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2420
2421 switch (arvif->vdev_type) {
2422 case WMI_VDEV_TYPE_AP:
2423 if (sta->wme)
2424 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2425
2426 if (sta->wme && sta->uapsd_queues) {
2427 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2428 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2429 }
2430 break;
2431 case WMI_VDEV_TYPE_STA:
2432 if (vif->bss_conf.qos)
2433 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2434 break;
2435 case WMI_VDEV_TYPE_IBSS:
2436 if (sta->wme)
2437 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2438 break;
2439 default:
2440 break;
2441 }
2442
2443 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2444 sta->addr, !!(arg->peer_flags &
2445 arvif->ar->wmi.peer_flags->qos));
2446 }
2447
2448 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2449 {
2450 return sta->supp_rates[NL80211_BAND_2GHZ] >>
2451 ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2452 }
2453
2454 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2455 struct ieee80211_vif *vif,
2456 struct ieee80211_sta *sta,
2457 struct wmi_peer_assoc_complete_arg *arg)
2458 {
2459 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2460 struct cfg80211_chan_def def;
2461 enum nl80211_band band;
2462 const u8 *ht_mcs_mask;
2463 const u16 *vht_mcs_mask;
2464 enum wmi_phy_mode phymode = MODE_UNKNOWN;
2465
2466 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2467 return;
2468
2469 band = def.chan->band;
2470 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2471 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2472
2473 switch (band) {
2474 case NL80211_BAND_2GHZ:
2475 if (sta->vht_cap.vht_supported &&
2476 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2477 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2478 phymode = MODE_11AC_VHT40;
2479 else
2480 phymode = MODE_11AC_VHT20;
2481 } else if (sta->ht_cap.ht_supported &&
2482 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2483 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2484 phymode = MODE_11NG_HT40;
2485 else
2486 phymode = MODE_11NG_HT20;
2487 } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2488 phymode = MODE_11G;
2489 } else {
2490 phymode = MODE_11B;
2491 }
2492
2493 break;
2494 case NL80211_BAND_5GHZ:
2495 /*
2496 * Check VHT first.
2497 */
2498 if (sta->vht_cap.vht_supported &&
2499 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2500 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2501 phymode = MODE_11AC_VHT80;
2502 else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2503 phymode = MODE_11AC_VHT40;
2504 else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2505 phymode = MODE_11AC_VHT20;
2506 } else if (sta->ht_cap.ht_supported &&
2507 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2508 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2509 phymode = MODE_11NA_HT40;
2510 else
2511 phymode = MODE_11NA_HT20;
2512 } else {
2513 phymode = MODE_11A;
2514 }
2515
2516 break;
2517 default:
2518 break;
2519 }
2520
2521 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2522 sta->addr, ath10k_wmi_phymode_str(phymode));
2523
2524 arg->peer_phymode = phymode;
2525 WARN_ON(phymode == MODE_UNKNOWN);
2526 }
2527
2528 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2529 struct ieee80211_vif *vif,
2530 struct ieee80211_sta *sta,
2531 struct wmi_peer_assoc_complete_arg *arg)
2532 {
2533 lockdep_assert_held(&ar->conf_mutex);
2534
2535 memset(arg, 0, sizeof(*arg));
2536
2537 ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2538 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2539 ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2540 ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2541 ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2542 ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2543 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2544
2545 return 0;
2546 }
2547
2548 static const u32 ath10k_smps_map[] = {
2549 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2550 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2551 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2552 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2553 };
2554
2555 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2556 const u8 *addr,
2557 const struct ieee80211_sta_ht_cap *ht_cap)
2558 {
2559 int smps;
2560
2561 if (!ht_cap->ht_supported)
2562 return 0;
2563
2564 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2565 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2566
2567 if (smps >= ARRAY_SIZE(ath10k_smps_map))
2568 return -EINVAL;
2569
2570 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2571 WMI_PEER_SMPS_STATE,
2572 ath10k_smps_map[smps]);
2573 }
2574
2575 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2576 struct ieee80211_vif *vif,
2577 struct ieee80211_sta_vht_cap vht_cap)
2578 {
2579 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2580 int ret;
2581 u32 param;
2582 u32 value;
2583
2584 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2585 return 0;
2586
2587 if (!(ar->vht_cap_info &
2588 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2589 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2590 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2591 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2592 return 0;
2593
2594 param = ar->wmi.vdev_param->txbf;
2595 value = 0;
2596
2597 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2598 return 0;
2599
2600 /* The following logic is correct. If a remote STA advertises support
2601 * for being a beamformer then we should enable us being a beamformee.
2602 */
2603
2604 if (ar->vht_cap_info &
2605 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2606 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2607 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2608 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2609
2610 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2611 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2612 }
2613
2614 if (ar->vht_cap_info &
2615 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2616 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2617 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2618 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2619
2620 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2621 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2622 }
2623
2624 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2625 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2626
2627 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2628 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2629
2630 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2631 if (ret) {
2632 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2633 value, ret);
2634 return ret;
2635 }
2636
2637 return 0;
2638 }
2639
2640 /* can be called only in mac80211 callbacks due to `key_count` usage */
2641 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2642 struct ieee80211_vif *vif,
2643 struct ieee80211_bss_conf *bss_conf)
2644 {
2645 struct ath10k *ar = hw->priv;
2646 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2647 struct ieee80211_sta_ht_cap ht_cap;
2648 struct ieee80211_sta_vht_cap vht_cap;
2649 struct wmi_peer_assoc_complete_arg peer_arg;
2650 struct ieee80211_sta *ap_sta;
2651 int ret;
2652
2653 lockdep_assert_held(&ar->conf_mutex);
2654
2655 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2656 arvif->vdev_id, arvif->bssid, arvif->aid);
2657
2658 rcu_read_lock();
2659
2660 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2661 if (!ap_sta) {
2662 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2663 bss_conf->bssid, arvif->vdev_id);
2664 rcu_read_unlock();
2665 return;
2666 }
2667
2668 /* ap_sta must be accessed only within rcu section which must be left
2669 * before calling ath10k_setup_peer_smps() which might sleep. */
2670 ht_cap = ap_sta->ht_cap;
2671 vht_cap = ap_sta->vht_cap;
2672
2673 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2674 if (ret) {
2675 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2676 bss_conf->bssid, arvif->vdev_id, ret);
2677 rcu_read_unlock();
2678 return;
2679 }
2680
2681 rcu_read_unlock();
2682
2683 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2684 if (ret) {
2685 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2686 bss_conf->bssid, arvif->vdev_id, ret);
2687 return;
2688 }
2689
2690 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2691 if (ret) {
2692 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2693 arvif->vdev_id, ret);
2694 return;
2695 }
2696
2697 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2698 if (ret) {
2699 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2700 arvif->vdev_id, bss_conf->bssid, ret);
2701 return;
2702 }
2703
2704 ath10k_dbg(ar, ATH10K_DBG_MAC,
2705 "mac vdev %d up (associated) bssid %pM aid %d\n",
2706 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2707
2708 WARN_ON(arvif->is_up);
2709
2710 arvif->aid = bss_conf->aid;
2711 ether_addr_copy(arvif->bssid, bss_conf->bssid);
2712
2713 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2714 if (ret) {
2715 ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2716 arvif->vdev_id, ret);
2717 return;
2718 }
2719
2720 arvif->is_up = true;
2721
2722 /* Workaround: Some firmware revisions (tested with qca6174
2723 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2724 * poked with peer param command.
2725 */
2726 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2727 WMI_PEER_DUMMY_VAR, 1);
2728 if (ret) {
2729 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2730 arvif->bssid, arvif->vdev_id, ret);
2731 return;
2732 }
2733 }
2734
2735 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2736 struct ieee80211_vif *vif)
2737 {
2738 struct ath10k *ar = hw->priv;
2739 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2740 struct ieee80211_sta_vht_cap vht_cap = {};
2741 int ret;
2742
2743 lockdep_assert_held(&ar->conf_mutex);
2744
2745 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2746 arvif->vdev_id, arvif->bssid);
2747
2748 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2749 if (ret)
2750 ath10k_warn(ar, "faield to down vdev %i: %d\n",
2751 arvif->vdev_id, ret);
2752
2753 arvif->def_wep_key_idx = -1;
2754
2755 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2756 if (ret) {
2757 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2758 arvif->vdev_id, ret);
2759 return;
2760 }
2761
2762 arvif->is_up = false;
2763
2764 cancel_delayed_work_sync(&arvif->connection_loss_work);
2765 }
2766
2767 static int ath10k_station_assoc(struct ath10k *ar,
2768 struct ieee80211_vif *vif,
2769 struct ieee80211_sta *sta,
2770 bool reassoc)
2771 {
2772 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2773 struct wmi_peer_assoc_complete_arg peer_arg;
2774 int ret = 0;
2775
2776 lockdep_assert_held(&ar->conf_mutex);
2777
2778 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2779 if (ret) {
2780 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2781 sta->addr, arvif->vdev_id, ret);
2782 return ret;
2783 }
2784
2785 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2786 if (ret) {
2787 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2788 sta->addr, arvif->vdev_id, ret);
2789 return ret;
2790 }
2791
2792 /* Re-assoc is run only to update supported rates for given station. It
2793 * doesn't make much sense to reconfigure the peer completely.
2794 */
2795 if (!reassoc) {
2796 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2797 &sta->ht_cap);
2798 if (ret) {
2799 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2800 arvif->vdev_id, ret);
2801 return ret;
2802 }
2803
2804 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2805 if (ret) {
2806 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2807 sta->addr, arvif->vdev_id, ret);
2808 return ret;
2809 }
2810
2811 if (!sta->wme) {
2812 arvif->num_legacy_stations++;
2813 ret = ath10k_recalc_rtscts_prot(arvif);
2814 if (ret) {
2815 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2816 arvif->vdev_id, ret);
2817 return ret;
2818 }
2819 }
2820
2821 /* Plumb cached keys only for static WEP */
2822 if (arvif->def_wep_key_idx != -1) {
2823 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2824 if (ret) {
2825 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2826 arvif->vdev_id, ret);
2827 return ret;
2828 }
2829 }
2830 }
2831
2832 return ret;
2833 }
2834
2835 static int ath10k_station_disassoc(struct ath10k *ar,
2836 struct ieee80211_vif *vif,
2837 struct ieee80211_sta *sta)
2838 {
2839 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2840 int ret = 0;
2841
2842 lockdep_assert_held(&ar->conf_mutex);
2843
2844 if (!sta->wme) {
2845 arvif->num_legacy_stations--;
2846 ret = ath10k_recalc_rtscts_prot(arvif);
2847 if (ret) {
2848 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2849 arvif->vdev_id, ret);
2850 return ret;
2851 }
2852 }
2853
2854 ret = ath10k_clear_peer_keys(arvif, sta->addr);
2855 if (ret) {
2856 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
2857 arvif->vdev_id, ret);
2858 return ret;
2859 }
2860
2861 return ret;
2862 }
2863
2864 /**************/
2865 /* Regulatory */
2866 /**************/
2867
2868 static int ath10k_update_channel_list(struct ath10k *ar)
2869 {
2870 struct ieee80211_hw *hw = ar->hw;
2871 struct ieee80211_supported_band **bands;
2872 enum nl80211_band band;
2873 struct ieee80211_channel *channel;
2874 struct wmi_scan_chan_list_arg arg = {0};
2875 struct wmi_channel_arg *ch;
2876 bool passive;
2877 int len;
2878 int ret;
2879 int i;
2880
2881 lockdep_assert_held(&ar->conf_mutex);
2882
2883 bands = hw->wiphy->bands;
2884 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2885 if (!bands[band])
2886 continue;
2887
2888 for (i = 0; i < bands[band]->n_channels; i++) {
2889 if (bands[band]->channels[i].flags &
2890 IEEE80211_CHAN_DISABLED)
2891 continue;
2892
2893 arg.n_channels++;
2894 }
2895 }
2896
2897 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
2898 arg.channels = kzalloc(len, GFP_KERNEL);
2899 if (!arg.channels)
2900 return -ENOMEM;
2901
2902 ch = arg.channels;
2903 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2904 if (!bands[band])
2905 continue;
2906
2907 for (i = 0; i < bands[band]->n_channels; i++) {
2908 channel = &bands[band]->channels[i];
2909
2910 if (channel->flags & IEEE80211_CHAN_DISABLED)
2911 continue;
2912
2913 ch->allow_ht = true;
2914
2915 /* FIXME: when should we really allow VHT? */
2916 ch->allow_vht = true;
2917
2918 ch->allow_ibss =
2919 !(channel->flags & IEEE80211_CHAN_NO_IR);
2920
2921 ch->ht40plus =
2922 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
2923
2924 ch->chan_radar =
2925 !!(channel->flags & IEEE80211_CHAN_RADAR);
2926
2927 passive = channel->flags & IEEE80211_CHAN_NO_IR;
2928 ch->passive = passive;
2929
2930 ch->freq = channel->center_freq;
2931 ch->band_center_freq1 = channel->center_freq;
2932 ch->min_power = 0;
2933 ch->max_power = channel->max_power * 2;
2934 ch->max_reg_power = channel->max_reg_power * 2;
2935 ch->max_antenna_gain = channel->max_antenna_gain * 2;
2936 ch->reg_class_id = 0; /* FIXME */
2937
2938 /* FIXME: why use only legacy modes, why not any
2939 * HT/VHT modes? Would that even make any
2940 * difference? */
2941 if (channel->band == NL80211_BAND_2GHZ)
2942 ch->mode = MODE_11G;
2943 else
2944 ch->mode = MODE_11A;
2945
2946 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
2947 continue;
2948
2949 ath10k_dbg(ar, ATH10K_DBG_WMI,
2950 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
2951 ch - arg.channels, arg.n_channels,
2952 ch->freq, ch->max_power, ch->max_reg_power,
2953 ch->max_antenna_gain, ch->mode);
2954
2955 ch++;
2956 }
2957 }
2958
2959 ret = ath10k_wmi_scan_chan_list(ar, &arg);
2960 kfree(arg.channels);
2961
2962 return ret;
2963 }
2964
2965 static enum wmi_dfs_region
2966 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
2967 {
2968 switch (dfs_region) {
2969 case NL80211_DFS_UNSET:
2970 return WMI_UNINIT_DFS_DOMAIN;
2971 case NL80211_DFS_FCC:
2972 return WMI_FCC_DFS_DOMAIN;
2973 case NL80211_DFS_ETSI:
2974 return WMI_ETSI_DFS_DOMAIN;
2975 case NL80211_DFS_JP:
2976 return WMI_MKK4_DFS_DOMAIN;
2977 }
2978 return WMI_UNINIT_DFS_DOMAIN;
2979 }
2980
2981 static void ath10k_regd_update(struct ath10k *ar)
2982 {
2983 struct reg_dmn_pair_mapping *regpair;
2984 int ret;
2985 enum wmi_dfs_region wmi_dfs_reg;
2986 enum nl80211_dfs_regions nl_dfs_reg;
2987
2988 lockdep_assert_held(&ar->conf_mutex);
2989
2990 ret = ath10k_update_channel_list(ar);
2991 if (ret)
2992 ath10k_warn(ar, "failed to update channel list: %d\n", ret);
2993
2994 regpair = ar->ath_common.regulatory.regpair;
2995
2996 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
2997 nl_dfs_reg = ar->dfs_detector->region;
2998 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
2999 } else {
3000 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3001 }
3002
3003 /* Target allows setting up per-band regdomain but ath_common provides
3004 * a combined one only */
3005 ret = ath10k_wmi_pdev_set_regdomain(ar,
3006 regpair->reg_domain,
3007 regpair->reg_domain, /* 2ghz */
3008 regpair->reg_domain, /* 5ghz */
3009 regpair->reg_2ghz_ctl,
3010 regpair->reg_5ghz_ctl,
3011 wmi_dfs_reg);
3012 if (ret)
3013 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3014 }
3015
3016 static void ath10k_reg_notifier(struct wiphy *wiphy,
3017 struct regulatory_request *request)
3018 {
3019 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3020 struct ath10k *ar = hw->priv;
3021 bool result;
3022
3023 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3024
3025 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3026 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3027 request->dfs_region);
3028 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3029 request->dfs_region);
3030 if (!result)
3031 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3032 request->dfs_region);
3033 }
3034
3035 mutex_lock(&ar->conf_mutex);
3036 if (ar->state == ATH10K_STATE_ON)
3037 ath10k_regd_update(ar);
3038 mutex_unlock(&ar->conf_mutex);
3039 }
3040
3041 /***************/
3042 /* TX handlers */
3043 /***************/
3044
3045 enum ath10k_mac_tx_path {
3046 ATH10K_MAC_TX_HTT,
3047 ATH10K_MAC_TX_HTT_MGMT,
3048 ATH10K_MAC_TX_WMI_MGMT,
3049 ATH10K_MAC_TX_UNKNOWN,
3050 };
3051
3052 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3053 {
3054 lockdep_assert_held(&ar->htt.tx_lock);
3055
3056 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3057 ar->tx_paused |= BIT(reason);
3058 ieee80211_stop_queues(ar->hw);
3059 }
3060
3061 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3062 struct ieee80211_vif *vif)
3063 {
3064 struct ath10k *ar = data;
3065 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3066
3067 if (arvif->tx_paused)
3068 return;
3069
3070 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3071 }
3072
3073 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3074 {
3075 lockdep_assert_held(&ar->htt.tx_lock);
3076
3077 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3078 ar->tx_paused &= ~BIT(reason);
3079
3080 if (ar->tx_paused)
3081 return;
3082
3083 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3084 IEEE80211_IFACE_ITER_RESUME_ALL,
3085 ath10k_mac_tx_unlock_iter,
3086 ar);
3087
3088 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3089 }
3090
3091 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3092 {
3093 struct ath10k *ar = arvif->ar;
3094
3095 lockdep_assert_held(&ar->htt.tx_lock);
3096
3097 WARN_ON(reason >= BITS_PER_LONG);
3098 arvif->tx_paused |= BIT(reason);
3099 ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3100 }
3101
3102 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3103 {
3104 struct ath10k *ar = arvif->ar;
3105
3106 lockdep_assert_held(&ar->htt.tx_lock);
3107
3108 WARN_ON(reason >= BITS_PER_LONG);
3109 arvif->tx_paused &= ~BIT(reason);
3110
3111 if (ar->tx_paused)
3112 return;
3113
3114 if (arvif->tx_paused)
3115 return;
3116
3117 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3118 }
3119
3120 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3121 enum wmi_tlv_tx_pause_id pause_id,
3122 enum wmi_tlv_tx_pause_action action)
3123 {
3124 struct ath10k *ar = arvif->ar;
3125
3126 lockdep_assert_held(&ar->htt.tx_lock);
3127
3128 switch (action) {
3129 case WMI_TLV_TX_PAUSE_ACTION_STOP:
3130 ath10k_mac_vif_tx_lock(arvif, pause_id);
3131 break;
3132 case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3133 ath10k_mac_vif_tx_unlock(arvif, pause_id);
3134 break;
3135 default:
3136 ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
3137 action, arvif->vdev_id);
3138 break;
3139 }
3140 }
3141
3142 struct ath10k_mac_tx_pause {
3143 u32 vdev_id;
3144 enum wmi_tlv_tx_pause_id pause_id;
3145 enum wmi_tlv_tx_pause_action action;
3146 };
3147
3148 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3149 struct ieee80211_vif *vif)
3150 {
3151 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3152 struct ath10k_mac_tx_pause *arg = data;
3153
3154 if (arvif->vdev_id != arg->vdev_id)
3155 return;
3156
3157 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3158 }
3159
3160 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3161 enum wmi_tlv_tx_pause_id pause_id,
3162 enum wmi_tlv_tx_pause_action action)
3163 {
3164 struct ath10k_mac_tx_pause arg = {
3165 .vdev_id = vdev_id,
3166 .pause_id = pause_id,
3167 .action = action,
3168 };
3169
3170 spin_lock_bh(&ar->htt.tx_lock);
3171 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3172 IEEE80211_IFACE_ITER_RESUME_ALL,
3173 ath10k_mac_handle_tx_pause_iter,
3174 &arg);
3175 spin_unlock_bh(&ar->htt.tx_lock);
3176 }
3177
3178 static enum ath10k_hw_txrx_mode
3179 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3180 struct ieee80211_vif *vif,
3181 struct ieee80211_sta *sta,
3182 struct sk_buff *skb)
3183 {
3184 const struct ieee80211_hdr *hdr = (void *)skb->data;
3185 __le16 fc = hdr->frame_control;
3186
3187 if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3188 return ATH10K_HW_TXRX_RAW;
3189
3190 if (ieee80211_is_mgmt(fc))
3191 return ATH10K_HW_TXRX_MGMT;
3192
3193 /* Workaround:
3194 *
3195 * NullFunc frames are mostly used to ping if a client or AP are still
3196 * reachable and responsive. This implies tx status reports must be
3197 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3198 * come to a conclusion that the other end disappeared and tear down
3199 * BSS connection or it can never disconnect from BSS/client (which is
3200 * the case).
3201 *
3202 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3203 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3204 * which seems to deliver correct tx reports for NullFunc frames. The
3205 * downside of using it is it ignores client powersave state so it can
3206 * end up disconnecting sleeping clients in AP mode. It should fix STA
3207 * mode though because AP don't sleep.
3208 */
3209 if (ar->htt.target_version_major < 3 &&
3210 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3211 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3212 ar->running_fw->fw_file.fw_features))
3213 return ATH10K_HW_TXRX_MGMT;
3214
3215 /* Workaround:
3216 *
3217 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3218 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3219 * to work with Ethernet txmode so use it.
3220 *
3221 * FIXME: Check if raw mode works with TDLS.
3222 */
3223 if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3224 return ATH10K_HW_TXRX_ETHERNET;
3225
3226 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3227 return ATH10K_HW_TXRX_RAW;
3228
3229 return ATH10K_HW_TXRX_NATIVE_WIFI;
3230 }
3231
3232 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3233 struct sk_buff *skb)
3234 {
3235 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3236 const struct ieee80211_hdr *hdr = (void *)skb->data;
3237 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3238 IEEE80211_TX_CTL_INJECTED;
3239
3240 if (!ieee80211_has_protected(hdr->frame_control))
3241 return false;
3242
3243 if ((info->flags & mask) == mask)
3244 return false;
3245
3246 if (vif)
3247 return !ath10k_vif_to_arvif(vif)->nohwcrypt;
3248
3249 return true;
3250 }
3251
3252 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3253 * Control in the header.
3254 */
3255 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3256 {
3257 struct ieee80211_hdr *hdr = (void *)skb->data;
3258 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3259 u8 *qos_ctl;
3260
3261 if (!ieee80211_is_data_qos(hdr->frame_control))
3262 return;
3263
3264 qos_ctl = ieee80211_get_qos_ctl(hdr);
3265 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3266 skb->data, (void *)qos_ctl - (void *)skb->data);
3267 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3268
3269 /* Some firmware revisions don't handle sending QoS NullFunc well.
3270 * These frames are mainly used for CQM purposes so it doesn't really
3271 * matter whether QoS NullFunc or NullFunc are sent.
3272 */
3273 hdr = (void *)skb->data;
3274 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3275 cb->flags &= ~ATH10K_SKB_F_QOS;
3276
3277 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3278 }
3279
3280 static void ath10k_tx_h_8023(struct sk_buff *skb)
3281 {
3282 struct ieee80211_hdr *hdr;
3283 struct rfc1042_hdr *rfc1042;
3284 struct ethhdr *eth;
3285 size_t hdrlen;
3286 u8 da[ETH_ALEN];
3287 u8 sa[ETH_ALEN];
3288 __be16 type;
3289
3290 hdr = (void *)skb->data;
3291 hdrlen = ieee80211_hdrlen(hdr->frame_control);
3292 rfc1042 = (void *)skb->data + hdrlen;
3293
3294 ether_addr_copy(da, ieee80211_get_DA(hdr));
3295 ether_addr_copy(sa, ieee80211_get_SA(hdr));
3296 type = rfc1042->snap_type;
3297
3298 skb_pull(skb, hdrlen + sizeof(*rfc1042));
3299 skb_push(skb, sizeof(*eth));
3300
3301 eth = (void *)skb->data;
3302 ether_addr_copy(eth->h_dest, da);
3303 ether_addr_copy(eth->h_source, sa);
3304 eth->h_proto = type;
3305 }
3306
3307 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3308 struct ieee80211_vif *vif,
3309 struct sk_buff *skb)
3310 {
3311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3312 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3313
3314 /* This is case only for P2P_GO */
3315 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3316 return;
3317
3318 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3319 spin_lock_bh(&ar->data_lock);
3320 if (arvif->u.ap.noa_data)
3321 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3322 GFP_ATOMIC))
3323 memcpy(skb_put(skb, arvif->u.ap.noa_len),
3324 arvif->u.ap.noa_data,
3325 arvif->u.ap.noa_len);
3326 spin_unlock_bh(&ar->data_lock);
3327 }
3328 }
3329
3330 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3331 struct ieee80211_vif *vif,
3332 struct ieee80211_txq *txq,
3333 struct sk_buff *skb)
3334 {
3335 struct ieee80211_hdr *hdr = (void *)skb->data;
3336 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3337
3338 cb->flags = 0;
3339 if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3340 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3341
3342 if (ieee80211_is_mgmt(hdr->frame_control))
3343 cb->flags |= ATH10K_SKB_F_MGMT;
3344
3345 if (ieee80211_is_data_qos(hdr->frame_control))
3346 cb->flags |= ATH10K_SKB_F_QOS;
3347
3348 cb->vif = vif;
3349 cb->txq = txq;
3350 }
3351
3352 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3353 {
3354 /* FIXME: Not really sure since when the behaviour changed. At some
3355 * point new firmware stopped requiring creation of peer entries for
3356 * offchannel tx (and actually creating them causes issues with wmi-htc
3357 * tx credit replenishment and reliability). Assuming it's at least 3.4
3358 * because that's when the `freq` was introduced to TX_FRM HTT command.
3359 */
3360 return (ar->htt.target_version_major >= 3 &&
3361 ar->htt.target_version_minor >= 4 &&
3362 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3363 }
3364
3365 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3366 {
3367 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3368 int ret = 0;
3369
3370 spin_lock_bh(&ar->data_lock);
3371
3372 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3373 ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3374 ret = -ENOSPC;
3375 goto unlock;
3376 }
3377
3378 __skb_queue_tail(q, skb);
3379 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3380
3381 unlock:
3382 spin_unlock_bh(&ar->data_lock);
3383
3384 return ret;
3385 }
3386
3387 static enum ath10k_mac_tx_path
3388 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3389 struct sk_buff *skb,
3390 enum ath10k_hw_txrx_mode txmode)
3391 {
3392 switch (txmode) {
3393 case ATH10K_HW_TXRX_RAW:
3394 case ATH10K_HW_TXRX_NATIVE_WIFI:
3395 case ATH10K_HW_TXRX_ETHERNET:
3396 return ATH10K_MAC_TX_HTT;
3397 case ATH10K_HW_TXRX_MGMT:
3398 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3399 ar->running_fw->fw_file.fw_features))
3400 return ATH10K_MAC_TX_WMI_MGMT;
3401 else if (ar->htt.target_version_major >= 3)
3402 return ATH10K_MAC_TX_HTT;
3403 else
3404 return ATH10K_MAC_TX_HTT_MGMT;
3405 }
3406
3407 return ATH10K_MAC_TX_UNKNOWN;
3408 }
3409
3410 static int ath10k_mac_tx_submit(struct ath10k *ar,
3411 enum ath10k_hw_txrx_mode txmode,
3412 enum ath10k_mac_tx_path txpath,
3413 struct sk_buff *skb)
3414 {
3415 struct ath10k_htt *htt = &ar->htt;
3416 int ret = -EINVAL;
3417
3418 switch (txpath) {
3419 case ATH10K_MAC_TX_HTT:
3420 ret = ath10k_htt_tx(htt, txmode, skb);
3421 break;
3422 case ATH10K_MAC_TX_HTT_MGMT:
3423 ret = ath10k_htt_mgmt_tx(htt, skb);
3424 break;
3425 case ATH10K_MAC_TX_WMI_MGMT:
3426 ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3427 break;
3428 case ATH10K_MAC_TX_UNKNOWN:
3429 WARN_ON_ONCE(1);
3430 ret = -EINVAL;
3431 break;
3432 }
3433
3434 if (ret) {
3435 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3436 ret);
3437 ieee80211_free_txskb(ar->hw, skb);
3438 }
3439
3440 return ret;
3441 }
3442
3443 /* This function consumes the sk_buff regardless of return value as far as
3444 * caller is concerned so no freeing is necessary afterwards.
3445 */
3446 static int ath10k_mac_tx(struct ath10k *ar,
3447 struct ieee80211_vif *vif,
3448 struct ieee80211_sta *sta,
3449 enum ath10k_hw_txrx_mode txmode,
3450 enum ath10k_mac_tx_path txpath,
3451 struct sk_buff *skb)
3452 {
3453 struct ieee80211_hw *hw = ar->hw;
3454 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3455 int ret;
3456
3457 /* We should disable CCK RATE due to P2P */
3458 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3459 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3460
3461 switch (txmode) {
3462 case ATH10K_HW_TXRX_MGMT:
3463 case ATH10K_HW_TXRX_NATIVE_WIFI:
3464 ath10k_tx_h_nwifi(hw, skb);
3465 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3466 ath10k_tx_h_seq_no(vif, skb);
3467 break;
3468 case ATH10K_HW_TXRX_ETHERNET:
3469 ath10k_tx_h_8023(skb);
3470 break;
3471 case ATH10K_HW_TXRX_RAW:
3472 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3473 WARN_ON_ONCE(1);
3474 ieee80211_free_txskb(hw, skb);
3475 return -ENOTSUPP;
3476 }
3477 }
3478
3479 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3480 if (!ath10k_mac_tx_frm_has_freq(ar)) {
3481 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
3482 skb);
3483
3484 skb_queue_tail(&ar->offchan_tx_queue, skb);
3485 ieee80211_queue_work(hw, &ar->offchan_tx_work);
3486 return 0;
3487 }
3488 }
3489
3490 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3491 if (ret) {
3492 ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3493 return ret;
3494 }
3495
3496 return 0;
3497 }
3498
3499 void ath10k_offchan_tx_purge(struct ath10k *ar)
3500 {
3501 struct sk_buff *skb;
3502
3503 for (;;) {
3504 skb = skb_dequeue(&ar->offchan_tx_queue);
3505 if (!skb)
3506 break;
3507
3508 ieee80211_free_txskb(ar->hw, skb);
3509 }
3510 }
3511
3512 void ath10k_offchan_tx_work(struct work_struct *work)
3513 {
3514 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3515 struct ath10k_peer *peer;
3516 struct ath10k_vif *arvif;
3517 enum ath10k_hw_txrx_mode txmode;
3518 enum ath10k_mac_tx_path txpath;
3519 struct ieee80211_hdr *hdr;
3520 struct ieee80211_vif *vif;
3521 struct ieee80211_sta *sta;
3522 struct sk_buff *skb;
3523 const u8 *peer_addr;
3524 int vdev_id;
3525 int ret;
3526 unsigned long time_left;
3527 bool tmp_peer_created = false;
3528
3529 /* FW requirement: We must create a peer before FW will send out
3530 * an offchannel frame. Otherwise the frame will be stuck and
3531 * never transmitted. We delete the peer upon tx completion.
3532 * It is unlikely that a peer for offchannel tx will already be
3533 * present. However it may be in some rare cases so account for that.
3534 * Otherwise we might remove a legitimate peer and break stuff. */
3535
3536 for (;;) {
3537 skb = skb_dequeue(&ar->offchan_tx_queue);
3538 if (!skb)
3539 break;
3540
3541 mutex_lock(&ar->conf_mutex);
3542
3543 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
3544 skb);
3545
3546 hdr = (struct ieee80211_hdr *)skb->data;
3547 peer_addr = ieee80211_get_DA(hdr);
3548
3549 spin_lock_bh(&ar->data_lock);
3550 vdev_id = ar->scan.vdev_id;
3551 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3552 spin_unlock_bh(&ar->data_lock);
3553
3554 if (peer)
3555 /* FIXME: should this use ath10k_warn()? */
3556 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3557 peer_addr, vdev_id);
3558
3559 if (!peer) {
3560 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3561 peer_addr,
3562 WMI_PEER_TYPE_DEFAULT);
3563 if (ret)
3564 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3565 peer_addr, vdev_id, ret);
3566 tmp_peer_created = (ret == 0);
3567 }
3568
3569 spin_lock_bh(&ar->data_lock);
3570 reinit_completion(&ar->offchan_tx_completed);
3571 ar->offchan_tx_skb = skb;
3572 spin_unlock_bh(&ar->data_lock);
3573
3574 /* It's safe to access vif and sta - conf_mutex guarantees that
3575 * sta_state() and remove_interface() are locked exclusively
3576 * out wrt to this offchannel worker.
3577 */
3578 arvif = ath10k_get_arvif(ar, vdev_id);
3579 if (arvif) {
3580 vif = arvif->vif;
3581 sta = ieee80211_find_sta(vif, peer_addr);
3582 } else {
3583 vif = NULL;
3584 sta = NULL;
3585 }
3586
3587 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3588 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3589
3590 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3591 if (ret) {
3592 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3593 ret);
3594 /* not serious */
3595 }
3596
3597 time_left =
3598 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3599 if (time_left == 0)
3600 ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
3601 skb);
3602
3603 if (!peer && tmp_peer_created) {
3604 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3605 if (ret)
3606 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3607 peer_addr, vdev_id, ret);
3608 }
3609
3610 mutex_unlock(&ar->conf_mutex);
3611 }
3612 }
3613
3614 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3615 {
3616 struct sk_buff *skb;
3617
3618 for (;;) {
3619 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3620 if (!skb)
3621 break;
3622
3623 ieee80211_free_txskb(ar->hw, skb);
3624 }
3625 }
3626
3627 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3628 {
3629 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3630 struct sk_buff *skb;
3631 int ret;
3632
3633 for (;;) {
3634 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3635 if (!skb)
3636 break;
3637
3638 ret = ath10k_wmi_mgmt_tx(ar, skb);
3639 if (ret) {
3640 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3641 ret);
3642 ieee80211_free_txskb(ar->hw, skb);
3643 }
3644 }
3645 }
3646
3647 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3648 {
3649 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3650
3651 if (!txq)
3652 return;
3653
3654 INIT_LIST_HEAD(&artxq->list);
3655 }
3656
3657 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3658 {
3659 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3660 struct ath10k_skb_cb *cb;
3661 struct sk_buff *msdu;
3662 int msdu_id;
3663
3664 if (!txq)
3665 return;
3666
3667 spin_lock_bh(&ar->txqs_lock);
3668 if (!list_empty(&artxq->list))
3669 list_del_init(&artxq->list);
3670 spin_unlock_bh(&ar->txqs_lock);
3671
3672 spin_lock_bh(&ar->htt.tx_lock);
3673 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3674 cb = ATH10K_SKB_CB(msdu);
3675 if (cb->txq == txq)
3676 cb->txq = NULL;
3677 }
3678 spin_unlock_bh(&ar->htt.tx_lock);
3679 }
3680
3681 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3682 u16 peer_id,
3683 u8 tid)
3684 {
3685 struct ath10k_peer *peer;
3686
3687 lockdep_assert_held(&ar->data_lock);
3688
3689 peer = ar->peer_map[peer_id];
3690 if (!peer)
3691 return NULL;
3692
3693 if (peer->sta)
3694 return peer->sta->txq[tid];
3695 else if (peer->vif)
3696 return peer->vif->txq;
3697 else
3698 return NULL;
3699 }
3700
3701 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3702 struct ieee80211_txq *txq)
3703 {
3704 struct ath10k *ar = hw->priv;
3705 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3706
3707 /* No need to get locks */
3708
3709 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3710 return true;
3711
3712 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3713 return true;
3714
3715 if (artxq->num_fw_queued < artxq->num_push_allowed)
3716 return true;
3717
3718 return false;
3719 }
3720
3721 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3722 struct ieee80211_txq *txq)
3723 {
3724 struct ath10k *ar = hw->priv;
3725 struct ath10k_htt *htt = &ar->htt;
3726 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3727 struct ieee80211_vif *vif = txq->vif;
3728 struct ieee80211_sta *sta = txq->sta;
3729 enum ath10k_hw_txrx_mode txmode;
3730 enum ath10k_mac_tx_path txpath;
3731 struct sk_buff *skb;
3732 size_t skb_len;
3733 int ret;
3734
3735 spin_lock_bh(&ar->htt.tx_lock);
3736 ret = ath10k_htt_tx_inc_pending(htt);
3737 spin_unlock_bh(&ar->htt.tx_lock);
3738
3739 if (ret)
3740 return ret;
3741
3742 skb = ieee80211_tx_dequeue(hw, txq);
3743 if (!skb) {
3744 spin_lock_bh(&ar->htt.tx_lock);
3745 ath10k_htt_tx_dec_pending(htt);
3746 spin_unlock_bh(&ar->htt.tx_lock);
3747
3748 return -ENOENT;
3749 }
3750
3751 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3752
3753 skb_len = skb->len;
3754 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3755 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3756
3757 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3758 if (unlikely(ret)) {
3759 ath10k_warn(ar, "failed to push frame: %d\n", ret);
3760
3761 spin_lock_bh(&ar->htt.tx_lock);
3762 ath10k_htt_tx_dec_pending(htt);
3763 spin_unlock_bh(&ar->htt.tx_lock);
3764
3765 return ret;
3766 }
3767
3768 spin_lock_bh(&ar->htt.tx_lock);
3769 artxq->num_fw_queued++;
3770 spin_unlock_bh(&ar->htt.tx_lock);
3771
3772 return skb_len;
3773 }
3774
3775 void ath10k_mac_tx_push_pending(struct ath10k *ar)
3776 {
3777 struct ieee80211_hw *hw = ar->hw;
3778 struct ieee80211_txq *txq;
3779 struct ath10k_txq *artxq;
3780 struct ath10k_txq *last;
3781 int ret;
3782 int max;
3783
3784 spin_lock_bh(&ar->txqs_lock);
3785 rcu_read_lock();
3786
3787 last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3788 while (!list_empty(&ar->txqs)) {
3789 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3790 txq = container_of((void *)artxq, struct ieee80211_txq,
3791 drv_priv);
3792
3793 /* Prevent aggressive sta/tid taking over tx queue */
3794 max = 16;
3795 ret = 0;
3796 while (ath10k_mac_tx_can_push(hw, txq) && max--) {
3797 ret = ath10k_mac_tx_push_txq(hw, txq);
3798 if (ret < 0)
3799 break;
3800 }
3801
3802 list_del_init(&artxq->list);
3803 if (ret != -ENOENT)
3804 list_add_tail(&artxq->list, &ar->txqs);
3805
3806 ath10k_htt_tx_txq_update(hw, txq);
3807
3808 if (artxq == last || (ret < 0 && ret != -ENOENT))
3809 break;
3810 }
3811
3812 rcu_read_unlock();
3813 spin_unlock_bh(&ar->txqs_lock);
3814 }
3815
3816 /************/
3817 /* Scanning */
3818 /************/
3819
3820 void __ath10k_scan_finish(struct ath10k *ar)
3821 {
3822 lockdep_assert_held(&ar->data_lock);
3823
3824 switch (ar->scan.state) {
3825 case ATH10K_SCAN_IDLE:
3826 break;
3827 case ATH10K_SCAN_RUNNING:
3828 case ATH10K_SCAN_ABORTING:
3829 if (!ar->scan.is_roc)
3830 ieee80211_scan_completed(ar->hw,
3831 (ar->scan.state ==
3832 ATH10K_SCAN_ABORTING));
3833 else if (ar->scan.roc_notify)
3834 ieee80211_remain_on_channel_expired(ar->hw);
3835 /* fall through */
3836 case ATH10K_SCAN_STARTING:
3837 ar->scan.state = ATH10K_SCAN_IDLE;
3838 ar->scan_channel = NULL;
3839 ar->scan.roc_freq = 0;
3840 ath10k_offchan_tx_purge(ar);
3841 cancel_delayed_work(&ar->scan.timeout);
3842 complete_all(&ar->scan.completed);
3843 break;
3844 }
3845 }
3846
3847 void ath10k_scan_finish(struct ath10k *ar)
3848 {
3849 spin_lock_bh(&ar->data_lock);
3850 __ath10k_scan_finish(ar);
3851 spin_unlock_bh(&ar->data_lock);
3852 }
3853
3854 static int ath10k_scan_stop(struct ath10k *ar)
3855 {
3856 struct wmi_stop_scan_arg arg = {
3857 .req_id = 1, /* FIXME */
3858 .req_type = WMI_SCAN_STOP_ONE,
3859 .u.scan_id = ATH10K_SCAN_ID,
3860 };
3861 int ret;
3862
3863 lockdep_assert_held(&ar->conf_mutex);
3864
3865 ret = ath10k_wmi_stop_scan(ar, &arg);
3866 if (ret) {
3867 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
3868 goto out;
3869 }
3870
3871 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
3872 if (ret == 0) {
3873 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
3874 ret = -ETIMEDOUT;
3875 } else if (ret > 0) {
3876 ret = 0;
3877 }
3878
3879 out:
3880 /* Scan state should be updated upon scan completion but in case
3881 * firmware fails to deliver the event (for whatever reason) it is
3882 * desired to clean up scan state anyway. Firmware may have just
3883 * dropped the scan completion event delivery due to transport pipe
3884 * being overflown with data and/or it can recover on its own before
3885 * next scan request is submitted.
3886 */
3887 spin_lock_bh(&ar->data_lock);
3888 if (ar->scan.state != ATH10K_SCAN_IDLE)
3889 __ath10k_scan_finish(ar);
3890 spin_unlock_bh(&ar->data_lock);
3891
3892 return ret;
3893 }
3894
3895 static void ath10k_scan_abort(struct ath10k *ar)
3896 {
3897 int ret;
3898
3899 lockdep_assert_held(&ar->conf_mutex);
3900
3901 spin_lock_bh(&ar->data_lock);
3902
3903 switch (ar->scan.state) {
3904 case ATH10K_SCAN_IDLE:
3905 /* This can happen if timeout worker kicked in and called
3906 * abortion while scan completion was being processed.
3907 */
3908 break;
3909 case ATH10K_SCAN_STARTING:
3910 case ATH10K_SCAN_ABORTING:
3911 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
3912 ath10k_scan_state_str(ar->scan.state),
3913 ar->scan.state);
3914 break;
3915 case ATH10K_SCAN_RUNNING:
3916 ar->scan.state = ATH10K_SCAN_ABORTING;
3917 spin_unlock_bh(&ar->data_lock);
3918
3919 ret = ath10k_scan_stop(ar);
3920 if (ret)
3921 ath10k_warn(ar, "failed to abort scan: %d\n", ret);
3922
3923 spin_lock_bh(&ar->data_lock);
3924 break;
3925 }
3926
3927 spin_unlock_bh(&ar->data_lock);
3928 }
3929
3930 void ath10k_scan_timeout_work(struct work_struct *work)
3931 {
3932 struct ath10k *ar = container_of(work, struct ath10k,
3933 scan.timeout.work);
3934
3935 mutex_lock(&ar->conf_mutex);
3936 ath10k_scan_abort(ar);
3937 mutex_unlock(&ar->conf_mutex);
3938 }
3939
3940 static int ath10k_start_scan(struct ath10k *ar,
3941 const struct wmi_start_scan_arg *arg)
3942 {
3943 int ret;
3944
3945 lockdep_assert_held(&ar->conf_mutex);
3946
3947 ret = ath10k_wmi_start_scan(ar, arg);
3948 if (ret)
3949 return ret;
3950
3951 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
3952 if (ret == 0) {
3953 ret = ath10k_scan_stop(ar);
3954 if (ret)
3955 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
3956
3957 return -ETIMEDOUT;
3958 }
3959
3960 /* If we failed to start the scan, return error code at
3961 * this point. This is probably due to some issue in the
3962 * firmware, but no need to wedge the driver due to that...
3963 */
3964 spin_lock_bh(&ar->data_lock);
3965 if (ar->scan.state == ATH10K_SCAN_IDLE) {
3966 spin_unlock_bh(&ar->data_lock);
3967 return -EINVAL;
3968 }
3969 spin_unlock_bh(&ar->data_lock);
3970
3971 return 0;
3972 }
3973
3974 /**********************/
3975 /* mac80211 callbacks */
3976 /**********************/
3977
3978 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
3979 struct ieee80211_tx_control *control,
3980 struct sk_buff *skb)
3981 {
3982 struct ath10k *ar = hw->priv;
3983 struct ath10k_htt *htt = &ar->htt;
3984 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3985 struct ieee80211_vif *vif = info->control.vif;
3986 struct ieee80211_sta *sta = control->sta;
3987 struct ieee80211_txq *txq = NULL;
3988 struct ieee80211_hdr *hdr = (void *)skb->data;
3989 enum ath10k_hw_txrx_mode txmode;
3990 enum ath10k_mac_tx_path txpath;
3991 bool is_htt;
3992 bool is_mgmt;
3993 bool is_presp;
3994 int ret;
3995
3996 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3997
3998 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3999 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4000 is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4001 txpath == ATH10K_MAC_TX_HTT_MGMT);
4002 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4003
4004 if (is_htt) {
4005 spin_lock_bh(&ar->htt.tx_lock);
4006 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4007
4008 ret = ath10k_htt_tx_inc_pending(htt);
4009 if (ret) {
4010 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4011 ret);
4012 spin_unlock_bh(&ar->htt.tx_lock);
4013 ieee80211_free_txskb(ar->hw, skb);
4014 return;
4015 }
4016
4017 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4018 if (ret) {
4019 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4020 ret);
4021 ath10k_htt_tx_dec_pending(htt);
4022 spin_unlock_bh(&ar->htt.tx_lock);
4023 ieee80211_free_txskb(ar->hw, skb);
4024 return;
4025 }
4026 spin_unlock_bh(&ar->htt.tx_lock);
4027 }
4028
4029 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
4030 if (ret) {
4031 ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4032 if (is_htt) {
4033 spin_lock_bh(&ar->htt.tx_lock);
4034 ath10k_htt_tx_dec_pending(htt);
4035 if (is_mgmt)
4036 ath10k_htt_tx_mgmt_dec_pending(htt);
4037 spin_unlock_bh(&ar->htt.tx_lock);
4038 }
4039 return;
4040 }
4041 }
4042
4043 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4044 struct ieee80211_txq *txq)
4045 {
4046 struct ath10k *ar = hw->priv;
4047 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4048
4049 spin_lock_bh(&ar->txqs_lock);
4050 if (list_empty(&artxq->list))
4051 list_add_tail(&artxq->list, &ar->txqs);
4052 spin_unlock_bh(&ar->txqs_lock);
4053
4054 if (ath10k_mac_tx_can_push(hw, txq))
4055 tasklet_schedule(&ar->htt.txrx_compl_task);
4056
4057 ath10k_htt_tx_txq_update(hw, txq);
4058 }
4059
4060 /* Must not be called with conf_mutex held as workers can use that also. */
4061 void ath10k_drain_tx(struct ath10k *ar)
4062 {
4063 /* make sure rcu-protected mac80211 tx path itself is drained */
4064 synchronize_net();
4065
4066 ath10k_offchan_tx_purge(ar);
4067 ath10k_mgmt_over_wmi_tx_purge(ar);
4068
4069 cancel_work_sync(&ar->offchan_tx_work);
4070 cancel_work_sync(&ar->wmi_mgmt_tx_work);
4071 }
4072
4073 void ath10k_halt(struct ath10k *ar)
4074 {
4075 struct ath10k_vif *arvif;
4076
4077 lockdep_assert_held(&ar->conf_mutex);
4078
4079 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4080 ar->filter_flags = 0;
4081 ar->monitor = false;
4082 ar->monitor_arvif = NULL;
4083
4084 if (ar->monitor_started)
4085 ath10k_monitor_stop(ar);
4086
4087 ar->monitor_started = false;
4088 ar->tx_paused = 0;
4089
4090 ath10k_scan_finish(ar);
4091 ath10k_peer_cleanup_all(ar);
4092 ath10k_core_stop(ar);
4093 ath10k_hif_power_down(ar);
4094
4095 spin_lock_bh(&ar->data_lock);
4096 list_for_each_entry(arvif, &ar->arvifs, list)
4097 ath10k_mac_vif_beacon_cleanup(arvif);
4098 spin_unlock_bh(&ar->data_lock);
4099 }
4100
4101 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4102 {
4103 struct ath10k *ar = hw->priv;
4104
4105 mutex_lock(&ar->conf_mutex);
4106
4107 *tx_ant = ar->cfg_tx_chainmask;
4108 *rx_ant = ar->cfg_rx_chainmask;
4109
4110 mutex_unlock(&ar->conf_mutex);
4111
4112 return 0;
4113 }
4114
4115 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4116 {
4117 /* It is not clear that allowing gaps in chainmask
4118 * is helpful. Probably it will not do what user
4119 * is hoping for, so warn in that case.
4120 */
4121 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4122 return;
4123
4124 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
4125 dbg, cm);
4126 }
4127
4128 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4129 {
4130 int nsts = ar->vht_cap_info;
4131
4132 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4133 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4134
4135 /* If firmware does not deliver to host number of space-time
4136 * streams supported, assume it support up to 4 BF STS and return
4137 * the value for VHT CAP: nsts-1)
4138 */
4139 if (nsts == 0)
4140 return 3;
4141
4142 return nsts;
4143 }
4144
4145 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4146 {
4147 int sound_dim = ar->vht_cap_info;
4148
4149 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4150 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4151
4152 /* If the sounding dimension is not advertised by the firmware,
4153 * let's use a default value of 1
4154 */
4155 if (sound_dim == 0)
4156 return 1;
4157
4158 return sound_dim;
4159 }
4160
4161 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4162 {
4163 struct ieee80211_sta_vht_cap vht_cap = {0};
4164 u16 mcs_map;
4165 u32 val;
4166 int i;
4167
4168 vht_cap.vht_supported = 1;
4169 vht_cap.cap = ar->vht_cap_info;
4170
4171 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4172 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4173 val = ath10k_mac_get_vht_cap_bf_sts(ar);
4174 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4175 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4176
4177 vht_cap.cap |= val;
4178 }
4179
4180 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4181 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4182 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4183 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4184 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4185
4186 vht_cap.cap |= val;
4187 }
4188
4189 mcs_map = 0;
4190 for (i = 0; i < 8; i++) {
4191 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4192 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4193 else
4194 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4195 }
4196
4197 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4198 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4199
4200 return vht_cap;
4201 }
4202
4203 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4204 {
4205 int i;
4206 struct ieee80211_sta_ht_cap ht_cap = {0};
4207
4208 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4209 return ht_cap;
4210
4211 ht_cap.ht_supported = 1;
4212 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4213 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4214 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4215 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4216 ht_cap.cap |=
4217 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4218
4219 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4220 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4221
4222 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4223 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4224
4225 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4226 u32 smps;
4227
4228 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
4229 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4230
4231 ht_cap.cap |= smps;
4232 }
4233
4234 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
4235 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4236
4237 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4238 u32 stbc;
4239
4240 stbc = ar->ht_cap_info;
4241 stbc &= WMI_HT_CAP_RX_STBC;
4242 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4243 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4244 stbc &= IEEE80211_HT_CAP_RX_STBC;
4245
4246 ht_cap.cap |= stbc;
4247 }
4248
4249 if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4250 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4251
4252 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4253 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4254
4255 /* max AMSDU is implicitly taken from vht_cap_info */
4256 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4257 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4258
4259 for (i = 0; i < ar->num_rf_chains; i++) {
4260 if (ar->cfg_rx_chainmask & BIT(i))
4261 ht_cap.mcs.rx_mask[i] = 0xFF;
4262 }
4263
4264 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4265
4266 return ht_cap;
4267 }
4268
4269 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4270 {
4271 struct ieee80211_supported_band *band;
4272 struct ieee80211_sta_vht_cap vht_cap;
4273 struct ieee80211_sta_ht_cap ht_cap;
4274
4275 ht_cap = ath10k_get_ht_cap(ar);
4276 vht_cap = ath10k_create_vht_cap(ar);
4277
4278 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4279 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4280 band->ht_cap = ht_cap;
4281 }
4282 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4283 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4284 band->ht_cap = ht_cap;
4285 band->vht_cap = vht_cap;
4286 }
4287 }
4288
4289 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4290 {
4291 int ret;
4292
4293 lockdep_assert_held(&ar->conf_mutex);
4294
4295 ath10k_check_chain_mask(ar, tx_ant, "tx");
4296 ath10k_check_chain_mask(ar, rx_ant, "rx");
4297
4298 ar->cfg_tx_chainmask = tx_ant;
4299 ar->cfg_rx_chainmask = rx_ant;
4300
4301 if ((ar->state != ATH10K_STATE_ON) &&
4302 (ar->state != ATH10K_STATE_RESTARTED))
4303 return 0;
4304
4305 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4306 tx_ant);
4307 if (ret) {
4308 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4309 ret, tx_ant);
4310 return ret;
4311 }
4312
4313 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4314 rx_ant);
4315 if (ret) {
4316 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4317 ret, rx_ant);
4318 return ret;
4319 }
4320
4321 /* Reload HT/VHT capability */
4322 ath10k_mac_setup_ht_vht_cap(ar);
4323
4324 return 0;
4325 }
4326
4327 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4328 {
4329 struct ath10k *ar = hw->priv;
4330 int ret;
4331
4332 mutex_lock(&ar->conf_mutex);
4333 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4334 mutex_unlock(&ar->conf_mutex);
4335 return ret;
4336 }
4337
4338 static int ath10k_start(struct ieee80211_hw *hw)
4339 {
4340 struct ath10k *ar = hw->priv;
4341 u32 param;
4342 int ret = 0;
4343
4344 /*
4345 * This makes sense only when restarting hw. It is harmless to call
4346 * unconditionally. This is necessary to make sure no HTT/WMI tx
4347 * commands will be submitted while restarting.
4348 */
4349 ath10k_drain_tx(ar);
4350
4351 mutex_lock(&ar->conf_mutex);
4352
4353 switch (ar->state) {
4354 case ATH10K_STATE_OFF:
4355 ar->state = ATH10K_STATE_ON;
4356 break;
4357 case ATH10K_STATE_RESTARTING:
4358 ath10k_halt(ar);
4359 ar->state = ATH10K_STATE_RESTARTED;
4360 break;
4361 case ATH10K_STATE_ON:
4362 case ATH10K_STATE_RESTARTED:
4363 case ATH10K_STATE_WEDGED:
4364 WARN_ON(1);
4365 ret = -EINVAL;
4366 goto err;
4367 case ATH10K_STATE_UTF:
4368 ret = -EBUSY;
4369 goto err;
4370 }
4371
4372 ret = ath10k_hif_power_up(ar);
4373 if (ret) {
4374 ath10k_err(ar, "Could not init hif: %d\n", ret);
4375 goto err_off;
4376 }
4377
4378 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4379 &ar->normal_mode_fw);
4380 if (ret) {
4381 ath10k_err(ar, "Could not init core: %d\n", ret);
4382 goto err_power_down;
4383 }
4384
4385 param = ar->wmi.pdev_param->pmf_qos;
4386 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4387 if (ret) {
4388 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4389 goto err_core_stop;
4390 }
4391
4392 param = ar->wmi.pdev_param->dynamic_bw;
4393 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4394 if (ret) {
4395 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4396 goto err_core_stop;
4397 }
4398
4399 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4400 ret = ath10k_wmi_adaptive_qcs(ar, true);
4401 if (ret) {
4402 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4403 ret);
4404 goto err_core_stop;
4405 }
4406 }
4407
4408 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4409 param = ar->wmi.pdev_param->burst_enable;
4410 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4411 if (ret) {
4412 ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4413 goto err_core_stop;
4414 }
4415 }
4416
4417 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4418
4419 /*
4420 * By default FW set ARP frames ac to voice (6). In that case ARP
4421 * exchange is not working properly for UAPSD enabled AP. ARP requests
4422 * which arrives with access category 0 are processed by network stack
4423 * and send back with access category 0, but FW changes access category
4424 * to 6. Set ARP frames access category to best effort (0) solves
4425 * this problem.
4426 */
4427
4428 param = ar->wmi.pdev_param->arp_ac_override;
4429 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4430 if (ret) {
4431 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4432 ret);
4433 goto err_core_stop;
4434 }
4435
4436 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4437 ar->running_fw->fw_file.fw_features)) {
4438 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4439 WMI_CCA_DETECT_LEVEL_AUTO,
4440 WMI_CCA_DETECT_MARGIN_AUTO);
4441 if (ret) {
4442 ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4443 ret);
4444 goto err_core_stop;
4445 }
4446 }
4447
4448 param = ar->wmi.pdev_param->ani_enable;
4449 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4450 if (ret) {
4451 ath10k_warn(ar, "failed to enable ani by default: %d\n",
4452 ret);
4453 goto err_core_stop;
4454 }
4455
4456 ar->ani_enabled = true;
4457
4458 if (ath10k_peer_stats_enabled(ar)) {
4459 param = ar->wmi.pdev_param->peer_stats_update_period;
4460 ret = ath10k_wmi_pdev_set_param(ar, param,
4461 PEER_DEFAULT_STATS_UPDATE_PERIOD);
4462 if (ret) {
4463 ath10k_warn(ar,
4464 "failed to set peer stats period : %d\n",
4465 ret);
4466 goto err_core_stop;
4467 }
4468 }
4469
4470 ar->num_started_vdevs = 0;
4471 ath10k_regd_update(ar);
4472
4473 ath10k_spectral_start(ar);
4474 ath10k_thermal_set_throttling(ar);
4475
4476 mutex_unlock(&ar->conf_mutex);
4477 return 0;
4478
4479 err_core_stop:
4480 ath10k_core_stop(ar);
4481
4482 err_power_down:
4483 ath10k_hif_power_down(ar);
4484
4485 err_off:
4486 ar->state = ATH10K_STATE_OFF;
4487
4488 err:
4489 mutex_unlock(&ar->conf_mutex);
4490 return ret;
4491 }
4492
4493 static void ath10k_stop(struct ieee80211_hw *hw)
4494 {
4495 struct ath10k *ar = hw->priv;
4496
4497 ath10k_drain_tx(ar);
4498
4499 mutex_lock(&ar->conf_mutex);
4500 if (ar->state != ATH10K_STATE_OFF) {
4501 ath10k_halt(ar);
4502 ar->state = ATH10K_STATE_OFF;
4503 }
4504 mutex_unlock(&ar->conf_mutex);
4505
4506 cancel_delayed_work_sync(&ar->scan.timeout);
4507 cancel_work_sync(&ar->restart_work);
4508 }
4509
4510 static int ath10k_config_ps(struct ath10k *ar)
4511 {
4512 struct ath10k_vif *arvif;
4513 int ret = 0;
4514
4515 lockdep_assert_held(&ar->conf_mutex);
4516
4517 list_for_each_entry(arvif, &ar->arvifs, list) {
4518 ret = ath10k_mac_vif_setup_ps(arvif);
4519 if (ret) {
4520 ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4521 break;
4522 }
4523 }
4524
4525 return ret;
4526 }
4527
4528 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4529 {
4530 int ret;
4531 u32 param;
4532
4533 lockdep_assert_held(&ar->conf_mutex);
4534
4535 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4536
4537 param = ar->wmi.pdev_param->txpower_limit2g;
4538 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4539 if (ret) {
4540 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4541 txpower, ret);
4542 return ret;
4543 }
4544
4545 param = ar->wmi.pdev_param->txpower_limit5g;
4546 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4547 if (ret) {
4548 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4549 txpower, ret);
4550 return ret;
4551 }
4552
4553 return 0;
4554 }
4555
4556 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4557 {
4558 struct ath10k_vif *arvif;
4559 int ret, txpower = -1;
4560
4561 lockdep_assert_held(&ar->conf_mutex);
4562
4563 list_for_each_entry(arvif, &ar->arvifs, list) {
4564 WARN_ON(arvif->txpower < 0);
4565
4566 if (txpower == -1)
4567 txpower = arvif->txpower;
4568 else
4569 txpower = min(txpower, arvif->txpower);
4570 }
4571
4572 if (WARN_ON(txpower == -1))
4573 return -EINVAL;
4574
4575 ret = ath10k_mac_txpower_setup(ar, txpower);
4576 if (ret) {
4577 ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4578 txpower, ret);
4579 return ret;
4580 }
4581
4582 return 0;
4583 }
4584
4585 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4586 {
4587 struct ath10k *ar = hw->priv;
4588 struct ieee80211_conf *conf = &hw->conf;
4589 int ret = 0;
4590
4591 mutex_lock(&ar->conf_mutex);
4592
4593 if (changed & IEEE80211_CONF_CHANGE_PS)
4594 ath10k_config_ps(ar);
4595
4596 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4597 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4598 ret = ath10k_monitor_recalc(ar);
4599 if (ret)
4600 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4601 }
4602
4603 mutex_unlock(&ar->conf_mutex);
4604 return ret;
4605 }
4606
4607 static u32 get_nss_from_chainmask(u16 chain_mask)
4608 {
4609 if ((chain_mask & 0xf) == 0xf)
4610 return 4;
4611 else if ((chain_mask & 0x7) == 0x7)
4612 return 3;
4613 else if ((chain_mask & 0x3) == 0x3)
4614 return 2;
4615 return 1;
4616 }
4617
4618 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4619 {
4620 u32 value = 0;
4621 struct ath10k *ar = arvif->ar;
4622 int nsts;
4623 int sound_dim;
4624
4625 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4626 return 0;
4627
4628 nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4629 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4630 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4631 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4632
4633 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4634 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4635 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4636 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4637
4638 if (!value)
4639 return 0;
4640
4641 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4642 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4643
4644 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4645 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4646 WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4647
4648 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4649 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4650
4651 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4652 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4653 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4654
4655 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4656 ar->wmi.vdev_param->txbf, value);
4657 }
4658
4659 /*
4660 * TODO:
4661 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4662 * because we will send mgmt frames without CCK. This requirement
4663 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4664 * in the TX packet.
4665 */
4666 static int ath10k_add_interface(struct ieee80211_hw *hw,
4667 struct ieee80211_vif *vif)
4668 {
4669 struct ath10k *ar = hw->priv;
4670 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4671 struct ath10k_peer *peer;
4672 enum wmi_sta_powersave_param param;
4673 int ret = 0;
4674 u32 value;
4675 int bit;
4676 int i;
4677 u32 vdev_param;
4678
4679 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4680
4681 mutex_lock(&ar->conf_mutex);
4682
4683 memset(arvif, 0, sizeof(*arvif));
4684 ath10k_mac_txq_init(vif->txq);
4685
4686 arvif->ar = ar;
4687 arvif->vif = vif;
4688
4689 INIT_LIST_HEAD(&arvif->list);
4690 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
4691 INIT_DELAYED_WORK(&arvif->connection_loss_work,
4692 ath10k_mac_vif_sta_connection_loss_work);
4693
4694 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4695 arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4696 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4697 sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4698 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4699 sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4700 }
4701
4702 if (ar->num_peers >= ar->max_num_peers) {
4703 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
4704 ret = -ENOBUFS;
4705 goto err;
4706 }
4707
4708 if (ar->free_vdev_map == 0) {
4709 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
4710 ret = -EBUSY;
4711 goto err;
4712 }
4713 bit = __ffs64(ar->free_vdev_map);
4714
4715 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4716 bit, ar->free_vdev_map);
4717
4718 arvif->vdev_id = bit;
4719 arvif->vdev_subtype =
4720 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
4721
4722 switch (vif->type) {
4723 case NL80211_IFTYPE_P2P_DEVICE:
4724 arvif->vdev_type = WMI_VDEV_TYPE_STA;
4725 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4726 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
4727 break;
4728 case NL80211_IFTYPE_UNSPECIFIED:
4729 case NL80211_IFTYPE_STATION:
4730 arvif->vdev_type = WMI_VDEV_TYPE_STA;
4731 if (vif->p2p)
4732 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4733 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
4734 break;
4735 case NL80211_IFTYPE_ADHOC:
4736 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
4737 break;
4738 case NL80211_IFTYPE_MESH_POINT:
4739 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
4740 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4741 (ar, WMI_VDEV_SUBTYPE_MESH_11S);
4742 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4743 ret = -EINVAL;
4744 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
4745 goto err;
4746 }
4747 arvif->vdev_type = WMI_VDEV_TYPE_AP;
4748 break;
4749 case NL80211_IFTYPE_AP:
4750 arvif->vdev_type = WMI_VDEV_TYPE_AP;
4751
4752 if (vif->p2p)
4753 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4754 (ar, WMI_VDEV_SUBTYPE_P2P_GO);
4755 break;
4756 case NL80211_IFTYPE_MONITOR:
4757 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
4758 break;
4759 default:
4760 WARN_ON(1);
4761 break;
4762 }
4763
4764 /* Using vdev_id as queue number will make it very easy to do per-vif
4765 * tx queue locking. This shouldn't wrap due to interface combinations
4766 * but do a modulo for correctness sake and prevent using offchannel tx
4767 * queues for regular vif tx.
4768 */
4769 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4770 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
4771 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4772
4773 /* Some firmware revisions don't wait for beacon tx completion before
4774 * sending another SWBA event. This could lead to hardware using old
4775 * (freed) beacon data in some cases, e.g. tx credit starvation
4776 * combined with missed TBTT. This is very very rare.
4777 *
4778 * On non-IOMMU-enabled hosts this could be a possible security issue
4779 * because hw could beacon some random data on the air. On
4780 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
4781 * device would crash.
4782 *
4783 * Since there are no beacon tx completions (implicit nor explicit)
4784 * propagated to host the only workaround for this is to allocate a
4785 * DMA-coherent buffer for a lifetime of a vif and use it for all
4786 * beacon tx commands. Worst case for this approach is some beacons may
4787 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
4788 */
4789 if (vif->type == NL80211_IFTYPE_ADHOC ||
4790 vif->type == NL80211_IFTYPE_MESH_POINT ||
4791 vif->type == NL80211_IFTYPE_AP) {
4792 arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
4793 IEEE80211_MAX_FRAME_LEN,
4794 &arvif->beacon_paddr,
4795 GFP_ATOMIC);
4796 if (!arvif->beacon_buf) {
4797 ret = -ENOMEM;
4798 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
4799 ret);
4800 goto err;
4801 }
4802 }
4803 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
4804 arvif->nohwcrypt = true;
4805
4806 if (arvif->nohwcrypt &&
4807 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4808 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
4809 goto err;
4810 }
4811
4812 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
4813 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
4814 arvif->beacon_buf ? "single-buf" : "per-skb");
4815
4816 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
4817 arvif->vdev_subtype, vif->addr);
4818 if (ret) {
4819 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
4820 arvif->vdev_id, ret);
4821 goto err;
4822 }
4823
4824 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
4825 list_add(&arvif->list, &ar->arvifs);
4826
4827 /* It makes no sense to have firmware do keepalives. mac80211 already
4828 * takes care of this with idle connection polling.
4829 */
4830 ret = ath10k_mac_vif_disable_keepalive(arvif);
4831 if (ret) {
4832 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
4833 arvif->vdev_id, ret);
4834 goto err_vdev_delete;
4835 }
4836
4837 arvif->def_wep_key_idx = -1;
4838
4839 vdev_param = ar->wmi.vdev_param->tx_encap_type;
4840 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4841 ATH10K_HW_TXRX_NATIVE_WIFI);
4842 /* 10.X firmware does not support this VDEV parameter. Do not warn */
4843 if (ret && ret != -EOPNOTSUPP) {
4844 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
4845 arvif->vdev_id, ret);
4846 goto err_vdev_delete;
4847 }
4848
4849 /* Configuring number of spatial stream for monitor interface is causing
4850 * target assert in qca9888 and qca6174.
4851 */
4852 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
4853 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
4854
4855 vdev_param = ar->wmi.vdev_param->nss;
4856 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4857 nss);
4858 if (ret) {
4859 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
4860 arvif->vdev_id, ar->cfg_tx_chainmask, nss,
4861 ret);
4862 goto err_vdev_delete;
4863 }
4864 }
4865
4866 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4867 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
4868 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
4869 vif->addr, WMI_PEER_TYPE_DEFAULT);
4870 if (ret) {
4871 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
4872 arvif->vdev_id, ret);
4873 goto err_vdev_delete;
4874 }
4875
4876 spin_lock_bh(&ar->data_lock);
4877
4878 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
4879 if (!peer) {
4880 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
4881 vif->addr, arvif->vdev_id);
4882 spin_unlock_bh(&ar->data_lock);
4883 ret = -ENOENT;
4884 goto err_peer_delete;
4885 }
4886
4887 arvif->peer_id = find_first_bit(peer->peer_ids,
4888 ATH10K_MAX_NUM_PEER_IDS);
4889
4890 spin_unlock_bh(&ar->data_lock);
4891 } else {
4892 arvif->peer_id = HTT_INVALID_PEERID;
4893 }
4894
4895 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
4896 ret = ath10k_mac_set_kickout(arvif);
4897 if (ret) {
4898 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
4899 arvif->vdev_id, ret);
4900 goto err_peer_delete;
4901 }
4902 }
4903
4904 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
4905 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
4906 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
4907 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
4908 param, value);
4909 if (ret) {
4910 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
4911 arvif->vdev_id, ret);
4912 goto err_peer_delete;
4913 }
4914
4915 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
4916 if (ret) {
4917 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
4918 arvif->vdev_id, ret);
4919 goto err_peer_delete;
4920 }
4921
4922 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
4923 if (ret) {
4924 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
4925 arvif->vdev_id, ret);
4926 goto err_peer_delete;
4927 }
4928 }
4929
4930 ret = ath10k_mac_set_txbf_conf(arvif);
4931 if (ret) {
4932 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
4933 arvif->vdev_id, ret);
4934 goto err_peer_delete;
4935 }
4936
4937 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
4938 if (ret) {
4939 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
4940 arvif->vdev_id, ret);
4941 goto err_peer_delete;
4942 }
4943
4944 arvif->txpower = vif->bss_conf.txpower;
4945 ret = ath10k_mac_txpower_recalc(ar);
4946 if (ret) {
4947 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
4948 goto err_peer_delete;
4949 }
4950
4951 if (vif->type == NL80211_IFTYPE_MONITOR) {
4952 ar->monitor_arvif = arvif;
4953 ret = ath10k_monitor_recalc(ar);
4954 if (ret) {
4955 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4956 goto err_peer_delete;
4957 }
4958 }
4959
4960 spin_lock_bh(&ar->htt.tx_lock);
4961 if (!ar->tx_paused)
4962 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
4963 spin_unlock_bh(&ar->htt.tx_lock);
4964
4965 mutex_unlock(&ar->conf_mutex);
4966 return 0;
4967
4968 err_peer_delete:
4969 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4970 arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
4971 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
4972
4973 err_vdev_delete:
4974 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
4975 ar->free_vdev_map |= 1LL << arvif->vdev_id;
4976 list_del(&arvif->list);
4977
4978 err:
4979 if (arvif->beacon_buf) {
4980 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
4981 arvif->beacon_buf, arvif->beacon_paddr);
4982 arvif->beacon_buf = NULL;
4983 }
4984
4985 mutex_unlock(&ar->conf_mutex);
4986
4987 return ret;
4988 }
4989
4990 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
4991 {
4992 int i;
4993
4994 for (i = 0; i < BITS_PER_LONG; i++)
4995 ath10k_mac_vif_tx_unlock(arvif, i);
4996 }
4997
4998 static void ath10k_remove_interface(struct ieee80211_hw *hw,
4999 struct ieee80211_vif *vif)
5000 {
5001 struct ath10k *ar = hw->priv;
5002 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5003 struct ath10k_peer *peer;
5004 int ret;
5005 int i;
5006
5007 cancel_work_sync(&arvif->ap_csa_work);
5008 cancel_delayed_work_sync(&arvif->connection_loss_work);
5009
5010 mutex_lock(&ar->conf_mutex);
5011
5012 spin_lock_bh(&ar->data_lock);
5013 ath10k_mac_vif_beacon_cleanup(arvif);
5014 spin_unlock_bh(&ar->data_lock);
5015
5016 ret = ath10k_spectral_vif_stop(arvif);
5017 if (ret)
5018 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5019 arvif->vdev_id, ret);
5020
5021 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5022 list_del(&arvif->list);
5023
5024 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5025 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5026 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5027 vif->addr);
5028 if (ret)
5029 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5030 arvif->vdev_id, ret);
5031
5032 kfree(arvif->u.ap.noa_data);
5033 }
5034
5035 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5036 arvif->vdev_id);
5037
5038 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5039 if (ret)
5040 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5041 arvif->vdev_id, ret);
5042
5043 /* Some firmware revisions don't notify host about self-peer removal
5044 * until after associated vdev is deleted.
5045 */
5046 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5047 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5048 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5049 vif->addr);
5050 if (ret)
5051 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5052 arvif->vdev_id, ret);
5053
5054 spin_lock_bh(&ar->data_lock);
5055 ar->num_peers--;
5056 spin_unlock_bh(&ar->data_lock);
5057 }
5058
5059 spin_lock_bh(&ar->data_lock);
5060 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5061 peer = ar->peer_map[i];
5062 if (!peer)
5063 continue;
5064
5065 if (peer->vif == vif) {
5066 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5067 vif->addr, arvif->vdev_id);
5068 peer->vif = NULL;
5069 }
5070 }
5071 spin_unlock_bh(&ar->data_lock);
5072
5073 ath10k_peer_cleanup(ar, arvif->vdev_id);
5074 ath10k_mac_txq_unref(ar, vif->txq);
5075
5076 if (vif->type == NL80211_IFTYPE_MONITOR) {
5077 ar->monitor_arvif = NULL;
5078 ret = ath10k_monitor_recalc(ar);
5079 if (ret)
5080 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5081 }
5082
5083 spin_lock_bh(&ar->htt.tx_lock);
5084 ath10k_mac_vif_tx_unlock_all(arvif);
5085 spin_unlock_bh(&ar->htt.tx_lock);
5086
5087 ath10k_mac_txq_unref(ar, vif->txq);
5088
5089 mutex_unlock(&ar->conf_mutex);
5090 }
5091
5092 /*
5093 * FIXME: Has to be verified.
5094 */
5095 #define SUPPORTED_FILTERS \
5096 (FIF_ALLMULTI | \
5097 FIF_CONTROL | \
5098 FIF_PSPOLL | \
5099 FIF_OTHER_BSS | \
5100 FIF_BCN_PRBRESP_PROMISC | \
5101 FIF_PROBE_REQ | \
5102 FIF_FCSFAIL)
5103
5104 static void ath10k_configure_filter(struct ieee80211_hw *hw,
5105 unsigned int changed_flags,
5106 unsigned int *total_flags,
5107 u64 multicast)
5108 {
5109 struct ath10k *ar = hw->priv;
5110 int ret;
5111
5112 mutex_lock(&ar->conf_mutex);
5113
5114 changed_flags &= SUPPORTED_FILTERS;
5115 *total_flags &= SUPPORTED_FILTERS;
5116 ar->filter_flags = *total_flags;
5117
5118 ret = ath10k_monitor_recalc(ar);
5119 if (ret)
5120 ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
5121
5122 mutex_unlock(&ar->conf_mutex);
5123 }
5124
5125 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5126 struct ieee80211_vif *vif,
5127 struct ieee80211_bss_conf *info,
5128 u32 changed)
5129 {
5130 struct ath10k *ar = hw->priv;
5131 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5132 int ret = 0;
5133 u32 vdev_param, pdev_param, slottime, preamble;
5134
5135 mutex_lock(&ar->conf_mutex);
5136
5137 if (changed & BSS_CHANGED_IBSS)
5138 ath10k_control_ibss(arvif, info, vif->addr);
5139
5140 if (changed & BSS_CHANGED_BEACON_INT) {
5141 arvif->beacon_interval = info->beacon_int;
5142 vdev_param = ar->wmi.vdev_param->beacon_interval;
5143 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5144 arvif->beacon_interval);
5145 ath10k_dbg(ar, ATH10K_DBG_MAC,
5146 "mac vdev %d beacon_interval %d\n",
5147 arvif->vdev_id, arvif->beacon_interval);
5148
5149 if (ret)
5150 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5151 arvif->vdev_id, ret);
5152 }
5153
5154 if (changed & BSS_CHANGED_BEACON) {
5155 ath10k_dbg(ar, ATH10K_DBG_MAC,
5156 "vdev %d set beacon tx mode to staggered\n",
5157 arvif->vdev_id);
5158
5159 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5160 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5161 WMI_BEACON_STAGGERED_MODE);
5162 if (ret)
5163 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5164 arvif->vdev_id, ret);
5165
5166 ret = ath10k_mac_setup_bcn_tmpl(arvif);
5167 if (ret)
5168 ath10k_warn(ar, "failed to update beacon template: %d\n",
5169 ret);
5170
5171 if (ieee80211_vif_is_mesh(vif)) {
5172 /* mesh doesn't use SSID but firmware needs it */
5173 strncpy(arvif->u.ap.ssid, "mesh",
5174 sizeof(arvif->u.ap.ssid));
5175 arvif->u.ap.ssid_len = 4;
5176 }
5177 }
5178
5179 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5180 ret = ath10k_mac_setup_prb_tmpl(arvif);
5181 if (ret)
5182 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5183 arvif->vdev_id, ret);
5184 }
5185
5186 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5187 arvif->dtim_period = info->dtim_period;
5188
5189 ath10k_dbg(ar, ATH10K_DBG_MAC,
5190 "mac vdev %d dtim_period %d\n",
5191 arvif->vdev_id, arvif->dtim_period);
5192
5193 vdev_param = ar->wmi.vdev_param->dtim_period;
5194 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5195 arvif->dtim_period);
5196 if (ret)
5197 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5198 arvif->vdev_id, ret);
5199 }
5200
5201 if (changed & BSS_CHANGED_SSID &&
5202 vif->type == NL80211_IFTYPE_AP) {
5203 arvif->u.ap.ssid_len = info->ssid_len;
5204 if (info->ssid_len)
5205 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5206 arvif->u.ap.hidden_ssid = info->hidden_ssid;
5207 }
5208
5209 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5210 ether_addr_copy(arvif->bssid, info->bssid);
5211
5212 if (changed & BSS_CHANGED_BEACON_ENABLED)
5213 ath10k_control_beaconing(arvif, info);
5214
5215 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5216 arvif->use_cts_prot = info->use_cts_prot;
5217 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
5218 arvif->vdev_id, info->use_cts_prot);
5219
5220 ret = ath10k_recalc_rtscts_prot(arvif);
5221 if (ret)
5222 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5223 arvif->vdev_id, ret);
5224
5225 vdev_param = ar->wmi.vdev_param->protection_mode;
5226 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5227 info->use_cts_prot ? 1 : 0);
5228 if (ret)
5229 ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
5230 info->use_cts_prot, arvif->vdev_id, ret);
5231 }
5232
5233 if (changed & BSS_CHANGED_ERP_SLOT) {
5234 if (info->use_short_slot)
5235 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5236
5237 else
5238 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5239
5240 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5241 arvif->vdev_id, slottime);
5242
5243 vdev_param = ar->wmi.vdev_param->slot_time;
5244 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5245 slottime);
5246 if (ret)
5247 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5248 arvif->vdev_id, ret);
5249 }
5250
5251 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5252 if (info->use_short_preamble)
5253 preamble = WMI_VDEV_PREAMBLE_SHORT;
5254 else
5255 preamble = WMI_VDEV_PREAMBLE_LONG;
5256
5257 ath10k_dbg(ar, ATH10K_DBG_MAC,
5258 "mac vdev %d preamble %dn",
5259 arvif->vdev_id, preamble);
5260
5261 vdev_param = ar->wmi.vdev_param->preamble;
5262 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5263 preamble);
5264 if (ret)
5265 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5266 arvif->vdev_id, ret);
5267 }
5268
5269 if (changed & BSS_CHANGED_ASSOC) {
5270 if (info->assoc) {
5271 /* Workaround: Make sure monitor vdev is not running
5272 * when associating to prevent some firmware revisions
5273 * (e.g. 10.1 and 10.2) from crashing.
5274 */
5275 if (ar->monitor_started)
5276 ath10k_monitor_stop(ar);
5277 ath10k_bss_assoc(hw, vif, info);
5278 ath10k_monitor_recalc(ar);
5279 } else {
5280 ath10k_bss_disassoc(hw, vif);
5281 }
5282 }
5283
5284 if (changed & BSS_CHANGED_TXPOWER) {
5285 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5286 arvif->vdev_id, info->txpower);
5287
5288 arvif->txpower = info->txpower;
5289 ret = ath10k_mac_txpower_recalc(ar);
5290 if (ret)
5291 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5292 }
5293
5294 if (changed & BSS_CHANGED_PS) {
5295 arvif->ps = vif->bss_conf.ps;
5296
5297 ret = ath10k_config_ps(ar);
5298 if (ret)
5299 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5300 arvif->vdev_id, ret);
5301 }
5302
5303 mutex_unlock(&ar->conf_mutex);
5304 }
5305
5306 static int ath10k_hw_scan(struct ieee80211_hw *hw,
5307 struct ieee80211_vif *vif,
5308 struct ieee80211_scan_request *hw_req)
5309 {
5310 struct ath10k *ar = hw->priv;
5311 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5312 struct cfg80211_scan_request *req = &hw_req->req;
5313 struct wmi_start_scan_arg arg;
5314 int ret = 0;
5315 int i;
5316
5317 mutex_lock(&ar->conf_mutex);
5318
5319 spin_lock_bh(&ar->data_lock);
5320 switch (ar->scan.state) {
5321 case ATH10K_SCAN_IDLE:
5322 reinit_completion(&ar->scan.started);
5323 reinit_completion(&ar->scan.completed);
5324 ar->scan.state = ATH10K_SCAN_STARTING;
5325 ar->scan.is_roc = false;
5326 ar->scan.vdev_id = arvif->vdev_id;
5327 ret = 0;
5328 break;
5329 case ATH10K_SCAN_STARTING:
5330 case ATH10K_SCAN_RUNNING:
5331 case ATH10K_SCAN_ABORTING:
5332 ret = -EBUSY;
5333 break;
5334 }
5335 spin_unlock_bh(&ar->data_lock);
5336
5337 if (ret)
5338 goto exit;
5339
5340 memset(&arg, 0, sizeof(arg));
5341 ath10k_wmi_start_scan_init(ar, &arg);
5342 arg.vdev_id = arvif->vdev_id;
5343 arg.scan_id = ATH10K_SCAN_ID;
5344
5345 if (req->ie_len) {
5346 arg.ie_len = req->ie_len;
5347 memcpy(arg.ie, req->ie, arg.ie_len);
5348 }
5349
5350 if (req->n_ssids) {
5351 arg.n_ssids = req->n_ssids;
5352 for (i = 0; i < arg.n_ssids; i++) {
5353 arg.ssids[i].len = req->ssids[i].ssid_len;
5354 arg.ssids[i].ssid = req->ssids[i].ssid;
5355 }
5356 } else {
5357 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5358 }
5359
5360 if (req->n_channels) {
5361 arg.n_channels = req->n_channels;
5362 for (i = 0; i < arg.n_channels; i++)
5363 arg.channels[i] = req->channels[i]->center_freq;
5364 }
5365
5366 ret = ath10k_start_scan(ar, &arg);
5367 if (ret) {
5368 ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5369 spin_lock_bh(&ar->data_lock);
5370 ar->scan.state = ATH10K_SCAN_IDLE;
5371 spin_unlock_bh(&ar->data_lock);
5372 }
5373
5374 /* Add a 200ms margin to account for event/command processing */
5375 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5376 msecs_to_jiffies(arg.max_scan_time +
5377 200));
5378
5379 exit:
5380 mutex_unlock(&ar->conf_mutex);
5381 return ret;
5382 }
5383
5384 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5385 struct ieee80211_vif *vif)
5386 {
5387 struct ath10k *ar = hw->priv;
5388
5389 mutex_lock(&ar->conf_mutex);
5390 ath10k_scan_abort(ar);
5391 mutex_unlock(&ar->conf_mutex);
5392
5393 cancel_delayed_work_sync(&ar->scan.timeout);
5394 }
5395
5396 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5397 struct ath10k_vif *arvif,
5398 enum set_key_cmd cmd,
5399 struct ieee80211_key_conf *key)
5400 {
5401 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5402 int ret;
5403
5404 /* 10.1 firmware branch requires default key index to be set to group
5405 * key index after installing it. Otherwise FW/HW Txes corrupted
5406 * frames with multi-vif APs. This is not required for main firmware
5407 * branch (e.g. 636).
5408 *
5409 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5410 *
5411 * FIXME: It remains unknown if this is required for multi-vif STA
5412 * interfaces on 10.1.
5413 */
5414
5415 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5416 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5417 return;
5418
5419 if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5420 return;
5421
5422 if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5423 return;
5424
5425 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5426 return;
5427
5428 if (cmd != SET_KEY)
5429 return;
5430
5431 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5432 key->keyidx);
5433 if (ret)
5434 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5435 arvif->vdev_id, ret);
5436 }
5437
5438 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5439 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5440 struct ieee80211_key_conf *key)
5441 {
5442 struct ath10k *ar = hw->priv;
5443 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5444 struct ath10k_peer *peer;
5445 const u8 *peer_addr;
5446 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5447 key->cipher == WLAN_CIPHER_SUITE_WEP104;
5448 int ret = 0;
5449 int ret2;
5450 u32 flags = 0;
5451 u32 flags2;
5452
5453 /* this one needs to be done in software */
5454 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
5455 return 1;
5456
5457 if (arvif->nohwcrypt)
5458 return 1;
5459
5460 if (key->keyidx > WMI_MAX_KEY_INDEX)
5461 return -ENOSPC;
5462
5463 mutex_lock(&ar->conf_mutex);
5464
5465 if (sta)
5466 peer_addr = sta->addr;
5467 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5468 peer_addr = vif->bss_conf.bssid;
5469 else
5470 peer_addr = vif->addr;
5471
5472 key->hw_key_idx = key->keyidx;
5473
5474 if (is_wep) {
5475 if (cmd == SET_KEY)
5476 arvif->wep_keys[key->keyidx] = key;
5477 else
5478 arvif->wep_keys[key->keyidx] = NULL;
5479 }
5480
5481 /* the peer should not disappear in mid-way (unless FW goes awry) since
5482 * we already hold conf_mutex. we just make sure its there now. */
5483 spin_lock_bh(&ar->data_lock);
5484 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5485 spin_unlock_bh(&ar->data_lock);
5486
5487 if (!peer) {
5488 if (cmd == SET_KEY) {
5489 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5490 peer_addr);
5491 ret = -EOPNOTSUPP;
5492 goto exit;
5493 } else {
5494 /* if the peer doesn't exist there is no key to disable
5495 * anymore */
5496 goto exit;
5497 }
5498 }
5499
5500 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5501 flags |= WMI_KEY_PAIRWISE;
5502 else
5503 flags |= WMI_KEY_GROUP;
5504
5505 if (is_wep) {
5506 if (cmd == DISABLE_KEY)
5507 ath10k_clear_vdev_key(arvif, key);
5508
5509 /* When WEP keys are uploaded it's possible that there are
5510 * stations associated already (e.g. when merging) without any
5511 * keys. Static WEP needs an explicit per-peer key upload.
5512 */
5513 if (vif->type == NL80211_IFTYPE_ADHOC &&
5514 cmd == SET_KEY)
5515 ath10k_mac_vif_update_wep_key(arvif, key);
5516
5517 /* 802.1x never sets the def_wep_key_idx so each set_key()
5518 * call changes default tx key.
5519 *
5520 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5521 * after first set_key().
5522 */
5523 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5524 flags |= WMI_KEY_TX_USAGE;
5525 }
5526
5527 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5528 if (ret) {
5529 WARN_ON(ret > 0);
5530 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5531 arvif->vdev_id, peer_addr, ret);
5532 goto exit;
5533 }
5534
5535 /* mac80211 sets static WEP keys as groupwise while firmware requires
5536 * them to be installed twice as both pairwise and groupwise.
5537 */
5538 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5539 flags2 = flags;
5540 flags2 &= ~WMI_KEY_GROUP;
5541 flags2 |= WMI_KEY_PAIRWISE;
5542
5543 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5544 if (ret) {
5545 WARN_ON(ret > 0);
5546 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5547 arvif->vdev_id, peer_addr, ret);
5548 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5549 peer_addr, flags);
5550 if (ret2) {
5551 WARN_ON(ret2 > 0);
5552 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5553 arvif->vdev_id, peer_addr, ret2);
5554 }
5555 goto exit;
5556 }
5557 }
5558
5559 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5560
5561 spin_lock_bh(&ar->data_lock);
5562 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5563 if (peer && cmd == SET_KEY)
5564 peer->keys[key->keyidx] = key;
5565 else if (peer && cmd == DISABLE_KEY)
5566 peer->keys[key->keyidx] = NULL;
5567 else if (peer == NULL)
5568 /* impossible unless FW goes crazy */
5569 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
5570 spin_unlock_bh(&ar->data_lock);
5571
5572 exit:
5573 mutex_unlock(&ar->conf_mutex);
5574 return ret;
5575 }
5576
5577 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5578 struct ieee80211_vif *vif,
5579 int keyidx)
5580 {
5581 struct ath10k *ar = hw->priv;
5582 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5583 int ret;
5584
5585 mutex_lock(&arvif->ar->conf_mutex);
5586
5587 if (arvif->ar->state != ATH10K_STATE_ON)
5588 goto unlock;
5589
5590 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5591 arvif->vdev_id, keyidx);
5592
5593 ret = ath10k_wmi_vdev_set_param(arvif->ar,
5594 arvif->vdev_id,
5595 arvif->ar->wmi.vdev_param->def_keyid,
5596 keyidx);
5597
5598 if (ret) {
5599 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5600 arvif->vdev_id,
5601 ret);
5602 goto unlock;
5603 }
5604
5605 arvif->def_wep_key_idx = keyidx;
5606
5607 unlock:
5608 mutex_unlock(&arvif->ar->conf_mutex);
5609 }
5610
5611 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5612 {
5613 struct ath10k *ar;
5614 struct ath10k_vif *arvif;
5615 struct ath10k_sta *arsta;
5616 struct ieee80211_sta *sta;
5617 struct cfg80211_chan_def def;
5618 enum nl80211_band band;
5619 const u8 *ht_mcs_mask;
5620 const u16 *vht_mcs_mask;
5621 u32 changed, bw, nss, smps;
5622 int err;
5623
5624 arsta = container_of(wk, struct ath10k_sta, update_wk);
5625 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5626 arvif = arsta->arvif;
5627 ar = arvif->ar;
5628
5629 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5630 return;
5631
5632 band = def.chan->band;
5633 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
5634 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
5635
5636 spin_lock_bh(&ar->data_lock);
5637
5638 changed = arsta->changed;
5639 arsta->changed = 0;
5640
5641 bw = arsta->bw;
5642 nss = arsta->nss;
5643 smps = arsta->smps;
5644
5645 spin_unlock_bh(&ar->data_lock);
5646
5647 mutex_lock(&ar->conf_mutex);
5648
5649 nss = max_t(u32, 1, nss);
5650 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
5651 ath10k_mac_max_vht_nss(vht_mcs_mask)));
5652
5653 if (changed & IEEE80211_RC_BW_CHANGED) {
5654 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
5655 sta->addr, bw);
5656
5657 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5658 WMI_PEER_CHAN_WIDTH, bw);
5659 if (err)
5660 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
5661 sta->addr, bw, err);
5662 }
5663
5664 if (changed & IEEE80211_RC_NSS_CHANGED) {
5665 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
5666 sta->addr, nss);
5667
5668 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5669 WMI_PEER_NSS, nss);
5670 if (err)
5671 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
5672 sta->addr, nss, err);
5673 }
5674
5675 if (changed & IEEE80211_RC_SMPS_CHANGED) {
5676 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
5677 sta->addr, smps);
5678
5679 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5680 WMI_PEER_SMPS_STATE, smps);
5681 if (err)
5682 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
5683 sta->addr, smps, err);
5684 }
5685
5686 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
5687 changed & IEEE80211_RC_NSS_CHANGED) {
5688 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
5689 sta->addr);
5690
5691 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
5692 if (err)
5693 ath10k_warn(ar, "failed to reassociate station: %pM\n",
5694 sta->addr);
5695 }
5696
5697 mutex_unlock(&ar->conf_mutex);
5698 }
5699
5700 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
5701 struct ieee80211_sta *sta)
5702 {
5703 struct ath10k *ar = arvif->ar;
5704
5705 lockdep_assert_held(&ar->conf_mutex);
5706
5707 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5708 return 0;
5709
5710 if (ar->num_stations >= ar->max_num_stations)
5711 return -ENOBUFS;
5712
5713 ar->num_stations++;
5714
5715 return 0;
5716 }
5717
5718 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
5719 struct ieee80211_sta *sta)
5720 {
5721 struct ath10k *ar = arvif->ar;
5722
5723 lockdep_assert_held(&ar->conf_mutex);
5724
5725 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5726 return;
5727
5728 ar->num_stations--;
5729 }
5730
5731 struct ath10k_mac_tdls_iter_data {
5732 u32 num_tdls_stations;
5733 struct ieee80211_vif *curr_vif;
5734 };
5735
5736 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5737 struct ieee80211_sta *sta)
5738 {
5739 struct ath10k_mac_tdls_iter_data *iter_data = data;
5740 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5741 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5742
5743 if (sta->tdls && sta_vif == iter_data->curr_vif)
5744 iter_data->num_tdls_stations++;
5745 }
5746
5747 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5748 struct ieee80211_vif *vif)
5749 {
5750 struct ath10k_mac_tdls_iter_data data = {};
5751
5752 data.curr_vif = vif;
5753
5754 ieee80211_iterate_stations_atomic(hw,
5755 ath10k_mac_tdls_vif_stations_count_iter,
5756 &data);
5757 return data.num_tdls_stations;
5758 }
5759
5760 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5761 struct ieee80211_vif *vif)
5762 {
5763 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5764 int *num_tdls_vifs = data;
5765
5766 if (vif->type != NL80211_IFTYPE_STATION)
5767 return;
5768
5769 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5770 (*num_tdls_vifs)++;
5771 }
5772
5773 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5774 {
5775 int num_tdls_vifs = 0;
5776
5777 ieee80211_iterate_active_interfaces_atomic(hw,
5778 IEEE80211_IFACE_ITER_NORMAL,
5779 ath10k_mac_tdls_vifs_count_iter,
5780 &num_tdls_vifs);
5781 return num_tdls_vifs;
5782 }
5783
5784 static int ath10k_sta_state(struct ieee80211_hw *hw,
5785 struct ieee80211_vif *vif,
5786 struct ieee80211_sta *sta,
5787 enum ieee80211_sta_state old_state,
5788 enum ieee80211_sta_state new_state)
5789 {
5790 struct ath10k *ar = hw->priv;
5791 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5792 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5793 struct ath10k_peer *peer;
5794 int ret = 0;
5795 int i;
5796
5797 if (old_state == IEEE80211_STA_NOTEXIST &&
5798 new_state == IEEE80211_STA_NONE) {
5799 memset(arsta, 0, sizeof(*arsta));
5800 arsta->arvif = arvif;
5801 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
5802
5803 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5804 ath10k_mac_txq_init(sta->txq[i]);
5805 }
5806
5807 /* cancel must be done outside the mutex to avoid deadlock */
5808 if ((old_state == IEEE80211_STA_NONE &&
5809 new_state == IEEE80211_STA_NOTEXIST))
5810 cancel_work_sync(&arsta->update_wk);
5811
5812 mutex_lock(&ar->conf_mutex);
5813
5814 if (old_state == IEEE80211_STA_NOTEXIST &&
5815 new_state == IEEE80211_STA_NONE) {
5816 /*
5817 * New station addition.
5818 */
5819 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
5820 u32 num_tdls_stations;
5821 u32 num_tdls_vifs;
5822
5823 ath10k_dbg(ar, ATH10K_DBG_MAC,
5824 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
5825 arvif->vdev_id, sta->addr,
5826 ar->num_stations + 1, ar->max_num_stations,
5827 ar->num_peers + 1, ar->max_num_peers);
5828
5829 ret = ath10k_mac_inc_num_stations(arvif, sta);
5830 if (ret) {
5831 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
5832 ar->max_num_stations);
5833 goto exit;
5834 }
5835
5836 if (sta->tdls)
5837 peer_type = WMI_PEER_TYPE_TDLS;
5838
5839 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
5840 sta->addr, peer_type);
5841 if (ret) {
5842 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
5843 sta->addr, arvif->vdev_id, ret);
5844 ath10k_mac_dec_num_stations(arvif, sta);
5845 goto exit;
5846 }
5847
5848 spin_lock_bh(&ar->data_lock);
5849
5850 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5851 if (!peer) {
5852 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5853 vif->addr, arvif->vdev_id);
5854 spin_unlock_bh(&ar->data_lock);
5855 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5856 ath10k_mac_dec_num_stations(arvif, sta);
5857 ret = -ENOENT;
5858 goto exit;
5859 }
5860
5861 arsta->peer_id = find_first_bit(peer->peer_ids,
5862 ATH10K_MAX_NUM_PEER_IDS);
5863
5864 spin_unlock_bh(&ar->data_lock);
5865
5866 if (!sta->tdls)
5867 goto exit;
5868
5869 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
5870 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
5871
5872 if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
5873 num_tdls_stations == 0) {
5874 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
5875 arvif->vdev_id, ar->max_num_tdls_vdevs);
5876 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5877 ath10k_mac_dec_num_stations(arvif, sta);
5878 ret = -ENOBUFS;
5879 goto exit;
5880 }
5881
5882 if (num_tdls_stations == 0) {
5883 /* This is the first tdls peer in current vif */
5884 enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
5885
5886 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5887 state);
5888 if (ret) {
5889 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
5890 arvif->vdev_id, ret);
5891 ath10k_peer_delete(ar, arvif->vdev_id,
5892 sta->addr);
5893 ath10k_mac_dec_num_stations(arvif, sta);
5894 goto exit;
5895 }
5896 }
5897
5898 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
5899 WMI_TDLS_PEER_STATE_PEERING);
5900 if (ret) {
5901 ath10k_warn(ar,
5902 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
5903 sta->addr, arvif->vdev_id, ret);
5904 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5905 ath10k_mac_dec_num_stations(arvif, sta);
5906
5907 if (num_tdls_stations != 0)
5908 goto exit;
5909 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5910 WMI_TDLS_DISABLE);
5911 }
5912 } else if ((old_state == IEEE80211_STA_NONE &&
5913 new_state == IEEE80211_STA_NOTEXIST)) {
5914 /*
5915 * Existing station deletion.
5916 */
5917 ath10k_dbg(ar, ATH10K_DBG_MAC,
5918 "mac vdev %d peer delete %pM (sta gone)\n",
5919 arvif->vdev_id, sta->addr);
5920
5921 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5922 if (ret)
5923 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
5924 sta->addr, arvif->vdev_id, ret);
5925
5926 ath10k_mac_dec_num_stations(arvif, sta);
5927
5928 spin_lock_bh(&ar->data_lock);
5929 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5930 peer = ar->peer_map[i];
5931 if (!peer)
5932 continue;
5933
5934 if (peer->sta == sta) {
5935 ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
5936 sta->addr, arvif->vdev_id);
5937 peer->sta = NULL;
5938 }
5939 }
5940 spin_unlock_bh(&ar->data_lock);
5941
5942 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5943 ath10k_mac_txq_unref(ar, sta->txq[i]);
5944
5945 if (!sta->tdls)
5946 goto exit;
5947
5948 if (ath10k_mac_tdls_vif_stations_count(hw, vif))
5949 goto exit;
5950
5951 /* This was the last tdls peer in current vif */
5952 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5953 WMI_TDLS_DISABLE);
5954 if (ret) {
5955 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
5956 arvif->vdev_id, ret);
5957 }
5958 } else if (old_state == IEEE80211_STA_AUTH &&
5959 new_state == IEEE80211_STA_ASSOC &&
5960 (vif->type == NL80211_IFTYPE_AP ||
5961 vif->type == NL80211_IFTYPE_MESH_POINT ||
5962 vif->type == NL80211_IFTYPE_ADHOC)) {
5963 /*
5964 * New association.
5965 */
5966 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
5967 sta->addr);
5968
5969 ret = ath10k_station_assoc(ar, vif, sta, false);
5970 if (ret)
5971 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
5972 sta->addr, arvif->vdev_id, ret);
5973 } else if (old_state == IEEE80211_STA_ASSOC &&
5974 new_state == IEEE80211_STA_AUTHORIZED &&
5975 sta->tdls) {
5976 /*
5977 * Tdls station authorized.
5978 */
5979 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
5980 sta->addr);
5981
5982 ret = ath10k_station_assoc(ar, vif, sta, false);
5983 if (ret) {
5984 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
5985 sta->addr, arvif->vdev_id, ret);
5986 goto exit;
5987 }
5988
5989 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
5990 WMI_TDLS_PEER_STATE_CONNECTED);
5991 if (ret)
5992 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
5993 sta->addr, arvif->vdev_id, ret);
5994 } else if (old_state == IEEE80211_STA_ASSOC &&
5995 new_state == IEEE80211_STA_AUTH &&
5996 (vif->type == NL80211_IFTYPE_AP ||
5997 vif->type == NL80211_IFTYPE_MESH_POINT ||
5998 vif->type == NL80211_IFTYPE_ADHOC)) {
5999 /*
6000 * Disassociation.
6001 */
6002 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6003 sta->addr);
6004
6005 ret = ath10k_station_disassoc(ar, vif, sta);
6006 if (ret)
6007 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6008 sta->addr, arvif->vdev_id, ret);
6009 }
6010 exit:
6011 mutex_unlock(&ar->conf_mutex);
6012 return ret;
6013 }
6014
6015 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6016 u16 ac, bool enable)
6017 {
6018 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6019 struct wmi_sta_uapsd_auto_trig_arg arg = {};
6020 u32 prio = 0, acc = 0;
6021 u32 value = 0;
6022 int ret = 0;
6023
6024 lockdep_assert_held(&ar->conf_mutex);
6025
6026 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6027 return 0;
6028
6029 switch (ac) {
6030 case IEEE80211_AC_VO:
6031 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6032 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6033 prio = 7;
6034 acc = 3;
6035 break;
6036 case IEEE80211_AC_VI:
6037 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6038 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6039 prio = 5;
6040 acc = 2;
6041 break;
6042 case IEEE80211_AC_BE:
6043 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6044 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6045 prio = 2;
6046 acc = 1;
6047 break;
6048 case IEEE80211_AC_BK:
6049 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6050 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6051 prio = 0;
6052 acc = 0;
6053 break;
6054 }
6055
6056 if (enable)
6057 arvif->u.sta.uapsd |= value;
6058 else
6059 arvif->u.sta.uapsd &= ~value;
6060
6061 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6062 WMI_STA_PS_PARAM_UAPSD,
6063 arvif->u.sta.uapsd);
6064 if (ret) {
6065 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6066 goto exit;
6067 }
6068
6069 if (arvif->u.sta.uapsd)
6070 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6071 else
6072 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6073
6074 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6075 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6076 value);
6077 if (ret)
6078 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6079
6080 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6081 if (ret) {
6082 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6083 arvif->vdev_id, ret);
6084 return ret;
6085 }
6086
6087 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6088 if (ret) {
6089 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6090 arvif->vdev_id, ret);
6091 return ret;
6092 }
6093
6094 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6095 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6096 /* Only userspace can make an educated decision when to send
6097 * trigger frame. The following effectively disables u-UAPSD
6098 * autotrigger in firmware (which is enabled by default
6099 * provided the autotrigger service is available).
6100 */
6101
6102 arg.wmm_ac = acc;
6103 arg.user_priority = prio;
6104 arg.service_interval = 0;
6105 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6106 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6107
6108 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6109 arvif->bssid, &arg, 1);
6110 if (ret) {
6111 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6112 ret);
6113 return ret;
6114 }
6115 }
6116
6117 exit:
6118 return ret;
6119 }
6120
6121 static int ath10k_conf_tx(struct ieee80211_hw *hw,
6122 struct ieee80211_vif *vif, u16 ac,
6123 const struct ieee80211_tx_queue_params *params)
6124 {
6125 struct ath10k *ar = hw->priv;
6126 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6127 struct wmi_wmm_params_arg *p = NULL;
6128 int ret;
6129
6130 mutex_lock(&ar->conf_mutex);
6131
6132 switch (ac) {
6133 case IEEE80211_AC_VO:
6134 p = &arvif->wmm_params.ac_vo;
6135 break;
6136 case IEEE80211_AC_VI:
6137 p = &arvif->wmm_params.ac_vi;
6138 break;
6139 case IEEE80211_AC_BE:
6140 p = &arvif->wmm_params.ac_be;
6141 break;
6142 case IEEE80211_AC_BK:
6143 p = &arvif->wmm_params.ac_bk;
6144 break;
6145 }
6146
6147 if (WARN_ON(!p)) {
6148 ret = -EINVAL;
6149 goto exit;
6150 }
6151
6152 p->cwmin = params->cw_min;
6153 p->cwmax = params->cw_max;
6154 p->aifs = params->aifs;
6155
6156 /*
6157 * The channel time duration programmed in the HW is in absolute
6158 * microseconds, while mac80211 gives the txop in units of
6159 * 32 microseconds.
6160 */
6161 p->txop = params->txop * 32;
6162
6163 if (ar->wmi.ops->gen_vdev_wmm_conf) {
6164 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6165 &arvif->wmm_params);
6166 if (ret) {
6167 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6168 arvif->vdev_id, ret);
6169 goto exit;
6170 }
6171 } else {
6172 /* This won't work well with multi-interface cases but it's
6173 * better than nothing.
6174 */
6175 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6176 if (ret) {
6177 ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6178 goto exit;
6179 }
6180 }
6181
6182 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6183 if (ret)
6184 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6185
6186 exit:
6187 mutex_unlock(&ar->conf_mutex);
6188 return ret;
6189 }
6190
6191 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6192
6193 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6194 struct ieee80211_vif *vif,
6195 struct ieee80211_channel *chan,
6196 int duration,
6197 enum ieee80211_roc_type type)
6198 {
6199 struct ath10k *ar = hw->priv;
6200 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6201 struct wmi_start_scan_arg arg;
6202 int ret = 0;
6203 u32 scan_time_msec;
6204
6205 mutex_lock(&ar->conf_mutex);
6206
6207 spin_lock_bh(&ar->data_lock);
6208 switch (ar->scan.state) {
6209 case ATH10K_SCAN_IDLE:
6210 reinit_completion(&ar->scan.started);
6211 reinit_completion(&ar->scan.completed);
6212 reinit_completion(&ar->scan.on_channel);
6213 ar->scan.state = ATH10K_SCAN_STARTING;
6214 ar->scan.is_roc = true;
6215 ar->scan.vdev_id = arvif->vdev_id;
6216 ar->scan.roc_freq = chan->center_freq;
6217 ar->scan.roc_notify = true;
6218 ret = 0;
6219 break;
6220 case ATH10K_SCAN_STARTING:
6221 case ATH10K_SCAN_RUNNING:
6222 case ATH10K_SCAN_ABORTING:
6223 ret = -EBUSY;
6224 break;
6225 }
6226 spin_unlock_bh(&ar->data_lock);
6227
6228 if (ret)
6229 goto exit;
6230
6231 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6232
6233 memset(&arg, 0, sizeof(arg));
6234 ath10k_wmi_start_scan_init(ar, &arg);
6235 arg.vdev_id = arvif->vdev_id;
6236 arg.scan_id = ATH10K_SCAN_ID;
6237 arg.n_channels = 1;
6238 arg.channels[0] = chan->center_freq;
6239 arg.dwell_time_active = scan_time_msec;
6240 arg.dwell_time_passive = scan_time_msec;
6241 arg.max_scan_time = scan_time_msec;
6242 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6243 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6244 arg.burst_duration_ms = duration;
6245
6246 ret = ath10k_start_scan(ar, &arg);
6247 if (ret) {
6248 ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6249 spin_lock_bh(&ar->data_lock);
6250 ar->scan.state = ATH10K_SCAN_IDLE;
6251 spin_unlock_bh(&ar->data_lock);
6252 goto exit;
6253 }
6254
6255 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6256 if (ret == 0) {
6257 ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6258
6259 ret = ath10k_scan_stop(ar);
6260 if (ret)
6261 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6262
6263 ret = -ETIMEDOUT;
6264 goto exit;
6265 }
6266
6267 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6268 msecs_to_jiffies(duration));
6269
6270 ret = 0;
6271 exit:
6272 mutex_unlock(&ar->conf_mutex);
6273 return ret;
6274 }
6275
6276 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6277 {
6278 struct ath10k *ar = hw->priv;
6279
6280 mutex_lock(&ar->conf_mutex);
6281
6282 spin_lock_bh(&ar->data_lock);
6283 ar->scan.roc_notify = false;
6284 spin_unlock_bh(&ar->data_lock);
6285
6286 ath10k_scan_abort(ar);
6287
6288 mutex_unlock(&ar->conf_mutex);
6289
6290 cancel_delayed_work_sync(&ar->scan.timeout);
6291
6292 return 0;
6293 }
6294
6295 /*
6296 * Both RTS and Fragmentation threshold are interface-specific
6297 * in ath10k, but device-specific in mac80211.
6298 */
6299
6300 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6301 {
6302 struct ath10k *ar = hw->priv;
6303 struct ath10k_vif *arvif;
6304 int ret = 0;
6305
6306 mutex_lock(&ar->conf_mutex);
6307 list_for_each_entry(arvif, &ar->arvifs, list) {
6308 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6309 arvif->vdev_id, value);
6310
6311 ret = ath10k_mac_set_rts(arvif, value);
6312 if (ret) {
6313 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6314 arvif->vdev_id, ret);
6315 break;
6316 }
6317 }
6318 mutex_unlock(&ar->conf_mutex);
6319
6320 return ret;
6321 }
6322
6323 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6324 {
6325 /* Even though there's a WMI enum for fragmentation threshold no known
6326 * firmware actually implements it. Moreover it is not possible to rely
6327 * frame fragmentation to mac80211 because firmware clears the "more
6328 * fragments" bit in frame control making it impossible for remote
6329 * devices to reassemble frames.
6330 *
6331 * Hence implement a dummy callback just to say fragmentation isn't
6332 * supported. This effectively prevents mac80211 from doing frame
6333 * fragmentation in software.
6334 */
6335 return -EOPNOTSUPP;
6336 }
6337
6338 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6339 u32 queues, bool drop)
6340 {
6341 struct ath10k *ar = hw->priv;
6342 bool skip;
6343 long time_left;
6344
6345 /* mac80211 doesn't care if we really xmit queued frames or not
6346 * we'll collect those frames either way if we stop/delete vdevs */
6347 if (drop)
6348 return;
6349
6350 mutex_lock(&ar->conf_mutex);
6351
6352 if (ar->state == ATH10K_STATE_WEDGED)
6353 goto skip;
6354
6355 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6356 bool empty;
6357
6358 spin_lock_bh(&ar->htt.tx_lock);
6359 empty = (ar->htt.num_pending_tx == 0);
6360 spin_unlock_bh(&ar->htt.tx_lock);
6361
6362 skip = (ar->state == ATH10K_STATE_WEDGED) ||
6363 test_bit(ATH10K_FLAG_CRASH_FLUSH,
6364 &ar->dev_flags);
6365
6366 (empty || skip);
6367 }), ATH10K_FLUSH_TIMEOUT_HZ);
6368
6369 if (time_left == 0 || skip)
6370 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6371 skip, ar->state, time_left);
6372
6373 skip:
6374 mutex_unlock(&ar->conf_mutex);
6375 }
6376
6377 /* TODO: Implement this function properly
6378 * For now it is needed to reply to Probe Requests in IBSS mode.
6379 * Propably we need this information from FW.
6380 */
6381 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6382 {
6383 return 1;
6384 }
6385
6386 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6387 enum ieee80211_reconfig_type reconfig_type)
6388 {
6389 struct ath10k *ar = hw->priv;
6390
6391 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6392 return;
6393
6394 mutex_lock(&ar->conf_mutex);
6395
6396 /* If device failed to restart it will be in a different state, e.g.
6397 * ATH10K_STATE_WEDGED */
6398 if (ar->state == ATH10K_STATE_RESTARTED) {
6399 ath10k_info(ar, "device successfully recovered\n");
6400 ar->state = ATH10K_STATE_ON;
6401 ieee80211_wake_queues(ar->hw);
6402 }
6403
6404 mutex_unlock(&ar->conf_mutex);
6405 }
6406
6407 static void
6408 ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
6409 struct ieee80211_channel *channel)
6410 {
6411 int ret;
6412 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
6413
6414 lockdep_assert_held(&ar->conf_mutex);
6415
6416 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
6417 (ar->rx_channel != channel))
6418 return;
6419
6420 if (ar->scan.state != ATH10K_SCAN_IDLE) {
6421 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
6422 return;
6423 }
6424
6425 reinit_completion(&ar->bss_survey_done);
6426
6427 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
6428 if (ret) {
6429 ath10k_warn(ar, "failed to send pdev bss chan info request\n");
6430 return;
6431 }
6432
6433 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
6434 if (!ret) {
6435 ath10k_warn(ar, "bss channel survey timed out\n");
6436 return;
6437 }
6438 }
6439
6440 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6441 struct survey_info *survey)
6442 {
6443 struct ath10k *ar = hw->priv;
6444 struct ieee80211_supported_band *sband;
6445 struct survey_info *ar_survey = &ar->survey[idx];
6446 int ret = 0;
6447
6448 mutex_lock(&ar->conf_mutex);
6449
6450 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6451 if (sband && idx >= sband->n_channels) {
6452 idx -= sband->n_channels;
6453 sband = NULL;
6454 }
6455
6456 if (!sband)
6457 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6458
6459 if (!sband || idx >= sband->n_channels) {
6460 ret = -ENOENT;
6461 goto exit;
6462 }
6463
6464 ath10k_mac_update_bss_chan_survey(ar, survey->channel);
6465
6466 spin_lock_bh(&ar->data_lock);
6467 memcpy(survey, ar_survey, sizeof(*survey));
6468 spin_unlock_bh(&ar->data_lock);
6469
6470 survey->channel = &sband->channels[idx];
6471
6472 if (ar->rx_channel == survey->channel)
6473 survey->filled |= SURVEY_INFO_IN_USE;
6474
6475 exit:
6476 mutex_unlock(&ar->conf_mutex);
6477 return ret;
6478 }
6479
6480 static bool
6481 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6482 enum nl80211_band band,
6483 const struct cfg80211_bitrate_mask *mask)
6484 {
6485 int num_rates = 0;
6486 int i;
6487
6488 num_rates += hweight32(mask->control[band].legacy);
6489
6490 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6491 num_rates += hweight8(mask->control[band].ht_mcs[i]);
6492
6493 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6494 num_rates += hweight16(mask->control[band].vht_mcs[i]);
6495
6496 return num_rates == 1;
6497 }
6498
6499 static bool
6500 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6501 enum nl80211_band band,
6502 const struct cfg80211_bitrate_mask *mask,
6503 int *nss)
6504 {
6505 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6506 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6507 u8 ht_nss_mask = 0;
6508 u8 vht_nss_mask = 0;
6509 int i;
6510
6511 if (mask->control[band].legacy)
6512 return false;
6513
6514 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6515 if (mask->control[band].ht_mcs[i] == 0)
6516 continue;
6517 else if (mask->control[band].ht_mcs[i] ==
6518 sband->ht_cap.mcs.rx_mask[i])
6519 ht_nss_mask |= BIT(i);
6520 else
6521 return false;
6522 }
6523
6524 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6525 if (mask->control[band].vht_mcs[i] == 0)
6526 continue;
6527 else if (mask->control[band].vht_mcs[i] ==
6528 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6529 vht_nss_mask |= BIT(i);
6530 else
6531 return false;
6532 }
6533
6534 if (ht_nss_mask != vht_nss_mask)
6535 return false;
6536
6537 if (ht_nss_mask == 0)
6538 return false;
6539
6540 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6541 return false;
6542
6543 *nss = fls(ht_nss_mask);
6544
6545 return true;
6546 }
6547
6548 static int
6549 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6550 enum nl80211_band band,
6551 const struct cfg80211_bitrate_mask *mask,
6552 u8 *rate, u8 *nss)
6553 {
6554 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6555 int rate_idx;
6556 int i;
6557 u16 bitrate;
6558 u8 preamble;
6559 u8 hw_rate;
6560
6561 if (hweight32(mask->control[band].legacy) == 1) {
6562 rate_idx = ffs(mask->control[band].legacy) - 1;
6563
6564 hw_rate = sband->bitrates[rate_idx].hw_value;
6565 bitrate = sband->bitrates[rate_idx].bitrate;
6566
6567 if (ath10k_mac_bitrate_is_cck(bitrate))
6568 preamble = WMI_RATE_PREAMBLE_CCK;
6569 else
6570 preamble = WMI_RATE_PREAMBLE_OFDM;
6571
6572 *nss = 1;
6573 *rate = preamble << 6 |
6574 (*nss - 1) << 4 |
6575 hw_rate << 0;
6576
6577 return 0;
6578 }
6579
6580 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6581 if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6582 *nss = i + 1;
6583 *rate = WMI_RATE_PREAMBLE_HT << 6 |
6584 (*nss - 1) << 4 |
6585 (ffs(mask->control[band].ht_mcs[i]) - 1);
6586
6587 return 0;
6588 }
6589 }
6590
6591 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6592 if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6593 *nss = i + 1;
6594 *rate = WMI_RATE_PREAMBLE_VHT << 6 |
6595 (*nss - 1) << 4 |
6596 (ffs(mask->control[band].vht_mcs[i]) - 1);
6597
6598 return 0;
6599 }
6600 }
6601
6602 return -EINVAL;
6603 }
6604
6605 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6606 u8 rate, u8 nss, u8 sgi, u8 ldpc)
6607 {
6608 struct ath10k *ar = arvif->ar;
6609 u32 vdev_param;
6610 int ret;
6611
6612 lockdep_assert_held(&ar->conf_mutex);
6613
6614 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6615 arvif->vdev_id, rate, nss, sgi);
6616
6617 vdev_param = ar->wmi.vdev_param->fixed_rate;
6618 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
6619 if (ret) {
6620 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
6621 rate, ret);
6622 return ret;
6623 }
6624
6625 vdev_param = ar->wmi.vdev_param->nss;
6626 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
6627 if (ret) {
6628 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6629 return ret;
6630 }
6631
6632 vdev_param = ar->wmi.vdev_param->sgi;
6633 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
6634 if (ret) {
6635 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6636 return ret;
6637 }
6638
6639 vdev_param = ar->wmi.vdev_param->ldpc;
6640 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6641 if (ret) {
6642 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6643 return ret;
6644 }
6645
6646 return 0;
6647 }
6648
6649 static bool
6650 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6651 enum nl80211_band band,
6652 const struct cfg80211_bitrate_mask *mask)
6653 {
6654 int i;
6655 u16 vht_mcs;
6656
6657 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6658 * to express all VHT MCS rate masks. Effectively only the following
6659 * ranges can be used: none, 0-7, 0-8 and 0-9.
6660 */
6661 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6662 vht_mcs = mask->control[band].vht_mcs[i];
6663
6664 switch (vht_mcs) {
6665 case 0:
6666 case BIT(8) - 1:
6667 case BIT(9) - 1:
6668 case BIT(10) - 1:
6669 break;
6670 default:
6671 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
6672 return false;
6673 }
6674 }
6675
6676 return true;
6677 }
6678
6679 static void ath10k_mac_set_bitrate_mask_iter(void *data,
6680 struct ieee80211_sta *sta)
6681 {
6682 struct ath10k_vif *arvif = data;
6683 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6684 struct ath10k *ar = arvif->ar;
6685
6686 if (arsta->arvif != arvif)
6687 return;
6688
6689 spin_lock_bh(&ar->data_lock);
6690 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
6691 spin_unlock_bh(&ar->data_lock);
6692
6693 ieee80211_queue_work(ar->hw, &arsta->update_wk);
6694 }
6695
6696 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
6697 struct ieee80211_vif *vif,
6698 const struct cfg80211_bitrate_mask *mask)
6699 {
6700 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6701 struct cfg80211_chan_def def;
6702 struct ath10k *ar = arvif->ar;
6703 enum nl80211_band band;
6704 const u8 *ht_mcs_mask;
6705 const u16 *vht_mcs_mask;
6706 u8 rate;
6707 u8 nss;
6708 u8 sgi;
6709 u8 ldpc;
6710 int single_nss;
6711 int ret;
6712
6713 if (ath10k_mac_vif_chan(vif, &def))
6714 return -EPERM;
6715
6716 band = def.chan->band;
6717 ht_mcs_mask = mask->control[band].ht_mcs;
6718 vht_mcs_mask = mask->control[band].vht_mcs;
6719 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
6720
6721 sgi = mask->control[band].gi;
6722 if (sgi == NL80211_TXRATE_FORCE_LGI)
6723 return -EINVAL;
6724
6725 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
6726 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
6727 &rate, &nss);
6728 if (ret) {
6729 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
6730 arvif->vdev_id, ret);
6731 return ret;
6732 }
6733 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
6734 &single_nss)) {
6735 rate = WMI_FIXED_RATE_NONE;
6736 nss = single_nss;
6737 } else {
6738 rate = WMI_FIXED_RATE_NONE;
6739 nss = min(ar->num_rf_chains,
6740 max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6741 ath10k_mac_max_vht_nss(vht_mcs_mask)));
6742
6743 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
6744 return -EINVAL;
6745
6746 mutex_lock(&ar->conf_mutex);
6747
6748 arvif->bitrate_mask = *mask;
6749 ieee80211_iterate_stations_atomic(ar->hw,
6750 ath10k_mac_set_bitrate_mask_iter,
6751 arvif);
6752
6753 mutex_unlock(&ar->conf_mutex);
6754 }
6755
6756 mutex_lock(&ar->conf_mutex);
6757
6758 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
6759 if (ret) {
6760 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
6761 arvif->vdev_id, ret);
6762 goto exit;
6763 }
6764
6765 exit:
6766 mutex_unlock(&ar->conf_mutex);
6767
6768 return ret;
6769 }
6770
6771 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
6772 struct ieee80211_vif *vif,
6773 struct ieee80211_sta *sta,
6774 u32 changed)
6775 {
6776 struct ath10k *ar = hw->priv;
6777 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6778 u32 bw, smps;
6779
6780 spin_lock_bh(&ar->data_lock);
6781
6782 ath10k_dbg(ar, ATH10K_DBG_MAC,
6783 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
6784 sta->addr, changed, sta->bandwidth, sta->rx_nss,
6785 sta->smps_mode);
6786
6787 if (changed & IEEE80211_RC_BW_CHANGED) {
6788 bw = WMI_PEER_CHWIDTH_20MHZ;
6789
6790 switch (sta->bandwidth) {
6791 case IEEE80211_STA_RX_BW_20:
6792 bw = WMI_PEER_CHWIDTH_20MHZ;
6793 break;
6794 case IEEE80211_STA_RX_BW_40:
6795 bw = WMI_PEER_CHWIDTH_40MHZ;
6796 break;
6797 case IEEE80211_STA_RX_BW_80:
6798 bw = WMI_PEER_CHWIDTH_80MHZ;
6799 break;
6800 case IEEE80211_STA_RX_BW_160:
6801 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
6802 sta->bandwidth, sta->addr);
6803 bw = WMI_PEER_CHWIDTH_20MHZ;
6804 break;
6805 }
6806
6807 arsta->bw = bw;
6808 }
6809
6810 if (changed & IEEE80211_RC_NSS_CHANGED)
6811 arsta->nss = sta->rx_nss;
6812
6813 if (changed & IEEE80211_RC_SMPS_CHANGED) {
6814 smps = WMI_PEER_SMPS_PS_NONE;
6815
6816 switch (sta->smps_mode) {
6817 case IEEE80211_SMPS_AUTOMATIC:
6818 case IEEE80211_SMPS_OFF:
6819 smps = WMI_PEER_SMPS_PS_NONE;
6820 break;
6821 case IEEE80211_SMPS_STATIC:
6822 smps = WMI_PEER_SMPS_STATIC;
6823 break;
6824 case IEEE80211_SMPS_DYNAMIC:
6825 smps = WMI_PEER_SMPS_DYNAMIC;
6826 break;
6827 case IEEE80211_SMPS_NUM_MODES:
6828 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
6829 sta->smps_mode, sta->addr);
6830 smps = WMI_PEER_SMPS_PS_NONE;
6831 break;
6832 }
6833
6834 arsta->smps = smps;
6835 }
6836
6837 arsta->changed |= changed;
6838
6839 spin_unlock_bh(&ar->data_lock);
6840
6841 ieee80211_queue_work(hw, &arsta->update_wk);
6842 }
6843
6844 static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
6845 {
6846 /*
6847 * FIXME: Return 0 for time being. Need to figure out whether FW
6848 * has the API to fetch 64-bit local TSF
6849 */
6850
6851 return 0;
6852 }
6853
6854 static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6855 u64 tsf)
6856 {
6857 struct ath10k *ar = hw->priv;
6858 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6859 u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
6860 int ret;
6861
6862 /* Workaround:
6863 *
6864 * Given tsf argument is entire TSF value, but firmware accepts
6865 * only TSF offset to current TSF.
6866 *
6867 * get_tsf function is used to get offset value, however since
6868 * ath10k_get_tsf is not implemented properly, it will return 0 always.
6869 * Luckily all the caller functions to set_tsf, as of now, also rely on
6870 * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
6871 * final tsf offset value to firmware will be arithmetically correct.
6872 */
6873 tsf_offset = tsf - ath10k_get_tsf(hw, vif);
6874 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
6875 vdev_param, tsf_offset);
6876 if (ret && ret != -EOPNOTSUPP)
6877 ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
6878 }
6879
6880 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
6881 struct ieee80211_vif *vif,
6882 struct ieee80211_ampdu_params *params)
6883 {
6884 struct ath10k *ar = hw->priv;
6885 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6886 struct ieee80211_sta *sta = params->sta;
6887 enum ieee80211_ampdu_mlme_action action = params->action;
6888 u16 tid = params->tid;
6889
6890 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
6891 arvif->vdev_id, sta->addr, tid, action);
6892
6893 switch (action) {
6894 case IEEE80211_AMPDU_RX_START:
6895 case IEEE80211_AMPDU_RX_STOP:
6896 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
6897 * creation/removal. Do we need to verify this?
6898 */
6899 return 0;
6900 case IEEE80211_AMPDU_TX_START:
6901 case IEEE80211_AMPDU_TX_STOP_CONT:
6902 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6903 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6904 case IEEE80211_AMPDU_TX_OPERATIONAL:
6905 /* Firmware offloads Tx aggregation entirely so deny mac80211
6906 * Tx aggregation requests.
6907 */
6908 return -EOPNOTSUPP;
6909 }
6910
6911 return -EINVAL;
6912 }
6913
6914 static void
6915 ath10k_mac_update_rx_channel(struct ath10k *ar,
6916 struct ieee80211_chanctx_conf *ctx,
6917 struct ieee80211_vif_chanctx_switch *vifs,
6918 int n_vifs)
6919 {
6920 struct cfg80211_chan_def *def = NULL;
6921
6922 /* Both locks are required because ar->rx_channel is modified. This
6923 * allows readers to hold either lock.
6924 */
6925 lockdep_assert_held(&ar->conf_mutex);
6926 lockdep_assert_held(&ar->data_lock);
6927
6928 WARN_ON(ctx && vifs);
6929 WARN_ON(vifs && n_vifs != 1);
6930
6931 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
6932 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
6933 * ppdu on Rx may reduce performance on low-end systems. It should be
6934 * possible to make tables/hashmaps to speed the lookup up (be vary of
6935 * cpu data cache lines though regarding sizes) but to keep the initial
6936 * implementation simple and less intrusive fallback to the slow lookup
6937 * only for multi-channel cases. Single-channel cases will remain to
6938 * use the old channel derival and thus performance should not be
6939 * affected much.
6940 */
6941 rcu_read_lock();
6942 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
6943 ieee80211_iter_chan_contexts_atomic(ar->hw,
6944 ath10k_mac_get_any_chandef_iter,
6945 &def);
6946
6947 if (vifs)
6948 def = &vifs[0].new_ctx->def;
6949
6950 ar->rx_channel = def->chan;
6951 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
6952 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
6953 /* During driver restart due to firmware assert, since mac80211
6954 * already has valid channel context for given radio, channel
6955 * context iteration return num_chanctx > 0. So fix rx_channel
6956 * when restart is in progress.
6957 */
6958 ar->rx_channel = ctx->def.chan;
6959 } else {
6960 ar->rx_channel = NULL;
6961 }
6962 rcu_read_unlock();
6963 }
6964
6965 static void
6966 ath10k_mac_update_vif_chan(struct ath10k *ar,
6967 struct ieee80211_vif_chanctx_switch *vifs,
6968 int n_vifs)
6969 {
6970 struct ath10k_vif *arvif;
6971 int ret;
6972 int i;
6973
6974 lockdep_assert_held(&ar->conf_mutex);
6975
6976 /* First stop monitor interface. Some FW versions crash if there's a
6977 * lone monitor interface.
6978 */
6979 if (ar->monitor_started)
6980 ath10k_monitor_stop(ar);
6981
6982 for (i = 0; i < n_vifs; i++) {
6983 arvif = ath10k_vif_to_arvif(vifs[i].vif);
6984
6985 ath10k_dbg(ar, ATH10K_DBG_MAC,
6986 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
6987 arvif->vdev_id,
6988 vifs[i].old_ctx->def.chan->center_freq,
6989 vifs[i].new_ctx->def.chan->center_freq,
6990 vifs[i].old_ctx->def.width,
6991 vifs[i].new_ctx->def.width);
6992
6993 if (WARN_ON(!arvif->is_started))
6994 continue;
6995
6996 if (WARN_ON(!arvif->is_up))
6997 continue;
6998
6999 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7000 if (ret) {
7001 ath10k_warn(ar, "failed to down vdev %d: %d\n",
7002 arvif->vdev_id, ret);
7003 continue;
7004 }
7005 }
7006
7007 /* All relevant vdevs are downed and associated channel resources
7008 * should be available for the channel switch now.
7009 */
7010
7011 spin_lock_bh(&ar->data_lock);
7012 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
7013 spin_unlock_bh(&ar->data_lock);
7014
7015 for (i = 0; i < n_vifs; i++) {
7016 arvif = ath10k_vif_to_arvif(vifs[i].vif);
7017
7018 if (WARN_ON(!arvif->is_started))
7019 continue;
7020
7021 if (WARN_ON(!arvif->is_up))
7022 continue;
7023
7024 ret = ath10k_mac_setup_bcn_tmpl(arvif);
7025 if (ret)
7026 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
7027 ret);
7028
7029 ret = ath10k_mac_setup_prb_tmpl(arvif);
7030 if (ret)
7031 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7032 ret);
7033
7034 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7035 if (ret) {
7036 ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7037 arvif->vdev_id, ret);
7038 continue;
7039 }
7040
7041 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7042 arvif->bssid);
7043 if (ret) {
7044 ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7045 arvif->vdev_id, ret);
7046 continue;
7047 }
7048 }
7049
7050 ath10k_monitor_recalc(ar);
7051 }
7052
7053 static int
7054 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7055 struct ieee80211_chanctx_conf *ctx)
7056 {
7057 struct ath10k *ar = hw->priv;
7058
7059 ath10k_dbg(ar, ATH10K_DBG_MAC,
7060 "mac chanctx add freq %hu width %d ptr %p\n",
7061 ctx->def.chan->center_freq, ctx->def.width, ctx);
7062
7063 mutex_lock(&ar->conf_mutex);
7064
7065 spin_lock_bh(&ar->data_lock);
7066 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7067 spin_unlock_bh(&ar->data_lock);
7068
7069 ath10k_recalc_radar_detection(ar);
7070 ath10k_monitor_recalc(ar);
7071
7072 mutex_unlock(&ar->conf_mutex);
7073
7074 return 0;
7075 }
7076
7077 static void
7078 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7079 struct ieee80211_chanctx_conf *ctx)
7080 {
7081 struct ath10k *ar = hw->priv;
7082
7083 ath10k_dbg(ar, ATH10K_DBG_MAC,
7084 "mac chanctx remove freq %hu width %d ptr %p\n",
7085 ctx->def.chan->center_freq, ctx->def.width, ctx);
7086
7087 mutex_lock(&ar->conf_mutex);
7088
7089 spin_lock_bh(&ar->data_lock);
7090 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7091 spin_unlock_bh(&ar->data_lock);
7092
7093 ath10k_recalc_radar_detection(ar);
7094 ath10k_monitor_recalc(ar);
7095
7096 mutex_unlock(&ar->conf_mutex);
7097 }
7098
7099 struct ath10k_mac_change_chanctx_arg {
7100 struct ieee80211_chanctx_conf *ctx;
7101 struct ieee80211_vif_chanctx_switch *vifs;
7102 int n_vifs;
7103 int next_vif;
7104 };
7105
7106 static void
7107 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7108 struct ieee80211_vif *vif)
7109 {
7110 struct ath10k_mac_change_chanctx_arg *arg = data;
7111
7112 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7113 return;
7114
7115 arg->n_vifs++;
7116 }
7117
7118 static void
7119 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7120 struct ieee80211_vif *vif)
7121 {
7122 struct ath10k_mac_change_chanctx_arg *arg = data;
7123 struct ieee80211_chanctx_conf *ctx;
7124
7125 ctx = rcu_access_pointer(vif->chanctx_conf);
7126 if (ctx != arg->ctx)
7127 return;
7128
7129 if (WARN_ON(arg->next_vif == arg->n_vifs))
7130 return;
7131
7132 arg->vifs[arg->next_vif].vif = vif;
7133 arg->vifs[arg->next_vif].old_ctx = ctx;
7134 arg->vifs[arg->next_vif].new_ctx = ctx;
7135 arg->next_vif++;
7136 }
7137
7138 static void
7139 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7140 struct ieee80211_chanctx_conf *ctx,
7141 u32 changed)
7142 {
7143 struct ath10k *ar = hw->priv;
7144 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7145
7146 mutex_lock(&ar->conf_mutex);
7147
7148 ath10k_dbg(ar, ATH10K_DBG_MAC,
7149 "mac chanctx change freq %hu width %d ptr %p changed %x\n",
7150 ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7151
7152 /* This shouldn't really happen because channel switching should use
7153 * switch_vif_chanctx().
7154 */
7155 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7156 goto unlock;
7157
7158 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7159 ieee80211_iterate_active_interfaces_atomic(
7160 hw,
7161 IEEE80211_IFACE_ITER_NORMAL,
7162 ath10k_mac_change_chanctx_cnt_iter,
7163 &arg);
7164 if (arg.n_vifs == 0)
7165 goto radar;
7166
7167 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7168 GFP_KERNEL);
7169 if (!arg.vifs)
7170 goto radar;
7171
7172 ieee80211_iterate_active_interfaces_atomic(
7173 hw,
7174 IEEE80211_IFACE_ITER_NORMAL,
7175 ath10k_mac_change_chanctx_fill_iter,
7176 &arg);
7177 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7178 kfree(arg.vifs);
7179 }
7180
7181 radar:
7182 ath10k_recalc_radar_detection(ar);
7183
7184 /* FIXME: How to configure Rx chains properly? */
7185
7186 /* No other actions are actually necessary. Firmware maintains channel
7187 * definitions per vdev internally and there's no host-side channel
7188 * context abstraction to configure, e.g. channel width.
7189 */
7190
7191 unlock:
7192 mutex_unlock(&ar->conf_mutex);
7193 }
7194
7195 static int
7196 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7197 struct ieee80211_vif *vif,
7198 struct ieee80211_chanctx_conf *ctx)
7199 {
7200 struct ath10k *ar = hw->priv;
7201 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7202 int ret;
7203
7204 mutex_lock(&ar->conf_mutex);
7205
7206 ath10k_dbg(ar, ATH10K_DBG_MAC,
7207 "mac chanctx assign ptr %p vdev_id %i\n",
7208 ctx, arvif->vdev_id);
7209
7210 if (WARN_ON(arvif->is_started)) {
7211 mutex_unlock(&ar->conf_mutex);
7212 return -EBUSY;
7213 }
7214
7215 ret = ath10k_vdev_start(arvif, &ctx->def);
7216 if (ret) {
7217 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7218 arvif->vdev_id, vif->addr,
7219 ctx->def.chan->center_freq, ret);
7220 goto err;
7221 }
7222
7223 arvif->is_started = true;
7224
7225 ret = ath10k_mac_vif_setup_ps(arvif);
7226 if (ret) {
7227 ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7228 arvif->vdev_id, ret);
7229 goto err_stop;
7230 }
7231
7232 if (vif->type == NL80211_IFTYPE_MONITOR) {
7233 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7234 if (ret) {
7235 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7236 arvif->vdev_id, ret);
7237 goto err_stop;
7238 }
7239
7240 arvif->is_up = true;
7241 }
7242
7243 mutex_unlock(&ar->conf_mutex);
7244 return 0;
7245
7246 err_stop:
7247 ath10k_vdev_stop(arvif);
7248 arvif->is_started = false;
7249 ath10k_mac_vif_setup_ps(arvif);
7250
7251 err:
7252 mutex_unlock(&ar->conf_mutex);
7253 return ret;
7254 }
7255
7256 static void
7257 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7258 struct ieee80211_vif *vif,
7259 struct ieee80211_chanctx_conf *ctx)
7260 {
7261 struct ath10k *ar = hw->priv;
7262 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7263 int ret;
7264
7265 mutex_lock(&ar->conf_mutex);
7266
7267 ath10k_dbg(ar, ATH10K_DBG_MAC,
7268 "mac chanctx unassign ptr %p vdev_id %i\n",
7269 ctx, arvif->vdev_id);
7270
7271 WARN_ON(!arvif->is_started);
7272
7273 if (vif->type == NL80211_IFTYPE_MONITOR) {
7274 WARN_ON(!arvif->is_up);
7275
7276 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7277 if (ret)
7278 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7279 arvif->vdev_id, ret);
7280
7281 arvif->is_up = false;
7282 }
7283
7284 ret = ath10k_vdev_stop(arvif);
7285 if (ret)
7286 ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7287 arvif->vdev_id, ret);
7288
7289 arvif->is_started = false;
7290
7291 mutex_unlock(&ar->conf_mutex);
7292 }
7293
7294 static int
7295 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7296 struct ieee80211_vif_chanctx_switch *vifs,
7297 int n_vifs,
7298 enum ieee80211_chanctx_switch_mode mode)
7299 {
7300 struct ath10k *ar = hw->priv;
7301
7302 mutex_lock(&ar->conf_mutex);
7303
7304 ath10k_dbg(ar, ATH10K_DBG_MAC,
7305 "mac chanctx switch n_vifs %d mode %d\n",
7306 n_vifs, mode);
7307 ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7308
7309 mutex_unlock(&ar->conf_mutex);
7310 return 0;
7311 }
7312
7313 static const struct ieee80211_ops ath10k_ops = {
7314 .tx = ath10k_mac_op_tx,
7315 .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
7316 .start = ath10k_start,
7317 .stop = ath10k_stop,
7318 .config = ath10k_config,
7319 .add_interface = ath10k_add_interface,
7320 .remove_interface = ath10k_remove_interface,
7321 .configure_filter = ath10k_configure_filter,
7322 .bss_info_changed = ath10k_bss_info_changed,
7323 .hw_scan = ath10k_hw_scan,
7324 .cancel_hw_scan = ath10k_cancel_hw_scan,
7325 .set_key = ath10k_set_key,
7326 .set_default_unicast_key = ath10k_set_default_unicast_key,
7327 .sta_state = ath10k_sta_state,
7328 .conf_tx = ath10k_conf_tx,
7329 .remain_on_channel = ath10k_remain_on_channel,
7330 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
7331 .set_rts_threshold = ath10k_set_rts_threshold,
7332 .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
7333 .flush = ath10k_flush,
7334 .tx_last_beacon = ath10k_tx_last_beacon,
7335 .set_antenna = ath10k_set_antenna,
7336 .get_antenna = ath10k_get_antenna,
7337 .reconfig_complete = ath10k_reconfig_complete,
7338 .get_survey = ath10k_get_survey,
7339 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
7340 .sta_rc_update = ath10k_sta_rc_update,
7341 .get_tsf = ath10k_get_tsf,
7342 .set_tsf = ath10k_set_tsf,
7343 .ampdu_action = ath10k_ampdu_action,
7344 .get_et_sset_count = ath10k_debug_get_et_sset_count,
7345 .get_et_stats = ath10k_debug_get_et_stats,
7346 .get_et_strings = ath10k_debug_get_et_strings,
7347 .add_chanctx = ath10k_mac_op_add_chanctx,
7348 .remove_chanctx = ath10k_mac_op_remove_chanctx,
7349 .change_chanctx = ath10k_mac_op_change_chanctx,
7350 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
7351 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
7352 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
7353
7354 CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7355
7356 #ifdef CONFIG_PM
7357 .suspend = ath10k_wow_op_suspend,
7358 .resume = ath10k_wow_op_resume,
7359 #endif
7360 #ifdef CONFIG_MAC80211_DEBUGFS
7361 .sta_add_debugfs = ath10k_sta_add_debugfs,
7362 #endif
7363 };
7364
7365 #define CHAN2G(_channel, _freq, _flags) { \
7366 .band = NL80211_BAND_2GHZ, \
7367 .hw_value = (_channel), \
7368 .center_freq = (_freq), \
7369 .flags = (_flags), \
7370 .max_antenna_gain = 0, \
7371 .max_power = 30, \
7372 }
7373
7374 #define CHAN5G(_channel, _freq, _flags) { \
7375 .band = NL80211_BAND_5GHZ, \
7376 .hw_value = (_channel), \
7377 .center_freq = (_freq), \
7378 .flags = (_flags), \
7379 .max_antenna_gain = 0, \
7380 .max_power = 30, \
7381 }
7382
7383 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7384 CHAN2G(1, 2412, 0),
7385 CHAN2G(2, 2417, 0),
7386 CHAN2G(3, 2422, 0),
7387 CHAN2G(4, 2427, 0),
7388 CHAN2G(5, 2432, 0),
7389 CHAN2G(6, 2437, 0),
7390 CHAN2G(7, 2442, 0),
7391 CHAN2G(8, 2447, 0),
7392 CHAN2G(9, 2452, 0),
7393 CHAN2G(10, 2457, 0),
7394 CHAN2G(11, 2462, 0),
7395 CHAN2G(12, 2467, 0),
7396 CHAN2G(13, 2472, 0),
7397 CHAN2G(14, 2484, 0),
7398 };
7399
7400 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7401 CHAN5G(36, 5180, 0),
7402 CHAN5G(40, 5200, 0),
7403 CHAN5G(44, 5220, 0),
7404 CHAN5G(48, 5240, 0),
7405 CHAN5G(52, 5260, 0),
7406 CHAN5G(56, 5280, 0),
7407 CHAN5G(60, 5300, 0),
7408 CHAN5G(64, 5320, 0),
7409 CHAN5G(100, 5500, 0),
7410 CHAN5G(104, 5520, 0),
7411 CHAN5G(108, 5540, 0),
7412 CHAN5G(112, 5560, 0),
7413 CHAN5G(116, 5580, 0),
7414 CHAN5G(120, 5600, 0),
7415 CHAN5G(124, 5620, 0),
7416 CHAN5G(128, 5640, 0),
7417 CHAN5G(132, 5660, 0),
7418 CHAN5G(136, 5680, 0),
7419 CHAN5G(140, 5700, 0),
7420 CHAN5G(144, 5720, 0),
7421 CHAN5G(149, 5745, 0),
7422 CHAN5G(153, 5765, 0),
7423 CHAN5G(157, 5785, 0),
7424 CHAN5G(161, 5805, 0),
7425 CHAN5G(165, 5825, 0),
7426 };
7427
7428 struct ath10k *ath10k_mac_create(size_t priv_size)
7429 {
7430 struct ieee80211_hw *hw;
7431 struct ath10k *ar;
7432
7433 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
7434 if (!hw)
7435 return NULL;
7436
7437 ar = hw->priv;
7438 ar->hw = hw;
7439
7440 return ar;
7441 }
7442
7443 void ath10k_mac_destroy(struct ath10k *ar)
7444 {
7445 ieee80211_free_hw(ar->hw);
7446 }
7447
7448 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7449 {
7450 .max = 8,
7451 .types = BIT(NL80211_IFTYPE_STATION)
7452 | BIT(NL80211_IFTYPE_P2P_CLIENT)
7453 },
7454 {
7455 .max = 3,
7456 .types = BIT(NL80211_IFTYPE_P2P_GO)
7457 },
7458 {
7459 .max = 1,
7460 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
7461 },
7462 {
7463 .max = 7,
7464 .types = BIT(NL80211_IFTYPE_AP)
7465 #ifdef CONFIG_MAC80211_MESH
7466 | BIT(NL80211_IFTYPE_MESH_POINT)
7467 #endif
7468 },
7469 };
7470
7471 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7472 {
7473 .max = 8,
7474 .types = BIT(NL80211_IFTYPE_AP)
7475 #ifdef CONFIG_MAC80211_MESH
7476 | BIT(NL80211_IFTYPE_MESH_POINT)
7477 #endif
7478 },
7479 {
7480 .max = 1,
7481 .types = BIT(NL80211_IFTYPE_STATION)
7482 },
7483 };
7484
7485 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7486 {
7487 .limits = ath10k_if_limits,
7488 .n_limits = ARRAY_SIZE(ath10k_if_limits),
7489 .max_interfaces = 8,
7490 .num_different_channels = 1,
7491 .beacon_int_infra_match = true,
7492 },
7493 };
7494
7495 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
7496 {
7497 .limits = ath10k_10x_if_limits,
7498 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
7499 .max_interfaces = 8,
7500 .num_different_channels = 1,
7501 .beacon_int_infra_match = true,
7502 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7503 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7504 BIT(NL80211_CHAN_WIDTH_20) |
7505 BIT(NL80211_CHAN_WIDTH_40) |
7506 BIT(NL80211_CHAN_WIDTH_80),
7507 #endif
7508 },
7509 };
7510
7511 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7512 {
7513 .max = 2,
7514 .types = BIT(NL80211_IFTYPE_STATION),
7515 },
7516 {
7517 .max = 2,
7518 .types = BIT(NL80211_IFTYPE_AP) |
7519 #ifdef CONFIG_MAC80211_MESH
7520 BIT(NL80211_IFTYPE_MESH_POINT) |
7521 #endif
7522 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7523 BIT(NL80211_IFTYPE_P2P_GO),
7524 },
7525 {
7526 .max = 1,
7527 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7528 },
7529 };
7530
7531 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7532 {
7533 .max = 2,
7534 .types = BIT(NL80211_IFTYPE_STATION),
7535 },
7536 {
7537 .max = 2,
7538 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7539 },
7540 {
7541 .max = 1,
7542 .types = BIT(NL80211_IFTYPE_AP) |
7543 #ifdef CONFIG_MAC80211_MESH
7544 BIT(NL80211_IFTYPE_MESH_POINT) |
7545 #endif
7546 BIT(NL80211_IFTYPE_P2P_GO),
7547 },
7548 {
7549 .max = 1,
7550 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7551 },
7552 };
7553
7554 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7555 {
7556 .max = 1,
7557 .types = BIT(NL80211_IFTYPE_STATION),
7558 },
7559 {
7560 .max = 1,
7561 .types = BIT(NL80211_IFTYPE_ADHOC),
7562 },
7563 };
7564
7565 /* FIXME: This is not thouroughly tested. These combinations may over- or
7566 * underestimate hw/fw capabilities.
7567 */
7568 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7569 {
7570 .limits = ath10k_tlv_if_limit,
7571 .num_different_channels = 1,
7572 .max_interfaces = 4,
7573 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7574 },
7575 {
7576 .limits = ath10k_tlv_if_limit_ibss,
7577 .num_different_channels = 1,
7578 .max_interfaces = 2,
7579 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7580 },
7581 };
7582
7583 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7584 {
7585 .limits = ath10k_tlv_if_limit,
7586 .num_different_channels = 1,
7587 .max_interfaces = 4,
7588 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7589 },
7590 {
7591 .limits = ath10k_tlv_qcs_if_limit,
7592 .num_different_channels = 2,
7593 .max_interfaces = 4,
7594 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
7595 },
7596 {
7597 .limits = ath10k_tlv_if_limit_ibss,
7598 .num_different_channels = 1,
7599 .max_interfaces = 2,
7600 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7601 },
7602 };
7603
7604 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
7605 {
7606 .max = 1,
7607 .types = BIT(NL80211_IFTYPE_STATION),
7608 },
7609 {
7610 .max = 16,
7611 .types = BIT(NL80211_IFTYPE_AP)
7612 #ifdef CONFIG_MAC80211_MESH
7613 | BIT(NL80211_IFTYPE_MESH_POINT)
7614 #endif
7615 },
7616 };
7617
7618 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
7619 {
7620 .limits = ath10k_10_4_if_limits,
7621 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
7622 .max_interfaces = 16,
7623 .num_different_channels = 1,
7624 .beacon_int_infra_match = true,
7625 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7626 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7627 BIT(NL80211_CHAN_WIDTH_20) |
7628 BIT(NL80211_CHAN_WIDTH_40) |
7629 BIT(NL80211_CHAN_WIDTH_80),
7630 #endif
7631 },
7632 };
7633
7634 static void ath10k_get_arvif_iter(void *data, u8 *mac,
7635 struct ieee80211_vif *vif)
7636 {
7637 struct ath10k_vif_iter *arvif_iter = data;
7638 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7639
7640 if (arvif->vdev_id == arvif_iter->vdev_id)
7641 arvif_iter->arvif = arvif;
7642 }
7643
7644 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
7645 {
7646 struct ath10k_vif_iter arvif_iter;
7647 u32 flags;
7648
7649 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
7650 arvif_iter.vdev_id = vdev_id;
7651
7652 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
7653 ieee80211_iterate_active_interfaces_atomic(ar->hw,
7654 flags,
7655 ath10k_get_arvif_iter,
7656 &arvif_iter);
7657 if (!arvif_iter.arvif) {
7658 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
7659 return NULL;
7660 }
7661
7662 return arvif_iter.arvif;
7663 }
7664
7665 int ath10k_mac_register(struct ath10k *ar)
7666 {
7667 static const u32 cipher_suites[] = {
7668 WLAN_CIPHER_SUITE_WEP40,
7669 WLAN_CIPHER_SUITE_WEP104,
7670 WLAN_CIPHER_SUITE_TKIP,
7671 WLAN_CIPHER_SUITE_CCMP,
7672 WLAN_CIPHER_SUITE_AES_CMAC,
7673 };
7674 struct ieee80211_supported_band *band;
7675 void *channels;
7676 int ret;
7677
7678 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
7679
7680 SET_IEEE80211_DEV(ar->hw, ar->dev);
7681
7682 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
7683 ARRAY_SIZE(ath10k_5ghz_channels)) !=
7684 ATH10K_NUM_CHANS);
7685
7686 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
7687 channels = kmemdup(ath10k_2ghz_channels,
7688 sizeof(ath10k_2ghz_channels),
7689 GFP_KERNEL);
7690 if (!channels) {
7691 ret = -ENOMEM;
7692 goto err_free;
7693 }
7694
7695 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
7696 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7697 band->channels = channels;
7698 band->n_bitrates = ath10k_g_rates_size;
7699 band->bitrates = ath10k_g_rates;
7700
7701 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
7702 }
7703
7704 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
7705 channels = kmemdup(ath10k_5ghz_channels,
7706 sizeof(ath10k_5ghz_channels),
7707 GFP_KERNEL);
7708 if (!channels) {
7709 ret = -ENOMEM;
7710 goto err_free;
7711 }
7712
7713 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
7714 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
7715 band->channels = channels;
7716 band->n_bitrates = ath10k_a_rates_size;
7717 band->bitrates = ath10k_a_rates;
7718 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
7719 }
7720
7721 ath10k_mac_setup_ht_vht_cap(ar);
7722
7723 ar->hw->wiphy->interface_modes =
7724 BIT(NL80211_IFTYPE_STATION) |
7725 BIT(NL80211_IFTYPE_AP) |
7726 BIT(NL80211_IFTYPE_MESH_POINT);
7727
7728 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
7729 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
7730
7731 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
7732 ar->hw->wiphy->interface_modes |=
7733 BIT(NL80211_IFTYPE_P2P_DEVICE) |
7734 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7735 BIT(NL80211_IFTYPE_P2P_GO);
7736
7737 ieee80211_hw_set(ar->hw, SIGNAL_DBM);
7738 ieee80211_hw_set(ar->hw, SUPPORTS_PS);
7739 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
7740 ieee80211_hw_set(ar->hw, MFP_CAPABLE);
7741 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
7742 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
7743 ieee80211_hw_set(ar->hw, AP_LINK_PS);
7744 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
7745 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
7746 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
7747 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
7748 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
7749 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
7750 ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
7751
7752 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7753 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
7754
7755 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
7756 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
7757
7758 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
7759 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
7760
7761 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
7762 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
7763 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
7764 }
7765
7766 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
7767 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
7768
7769 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
7770 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
7771 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
7772
7773 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
7774
7775 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
7776 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
7777
7778 /* Firmware delivers WPS/P2P Probe Requests frames to driver so
7779 * that userspace (e.g. wpa_supplicant/hostapd) can generate
7780 * correct Probe Responses. This is more of a hack advert..
7781 */
7782 ar->hw->wiphy->probe_resp_offload |=
7783 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
7784 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
7785 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
7786 }
7787
7788 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
7789 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
7790
7791 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
7792 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
7793 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
7794
7795 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
7796 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
7797 NL80211_FEATURE_AP_SCAN;
7798
7799 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
7800
7801 ret = ath10k_wow_init(ar);
7802 if (ret) {
7803 ath10k_warn(ar, "failed to init wow: %d\n", ret);
7804 goto err_free;
7805 }
7806
7807 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
7808
7809 /*
7810 * on LL hardware queues are managed entirely by the FW
7811 * so we only advertise to mac we can do the queues thing
7812 */
7813 ar->hw->queues = IEEE80211_MAX_QUEUES;
7814
7815 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
7816 * something that vdev_ids can't reach so that we don't stop the queue
7817 * accidentally.
7818 */
7819 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
7820
7821 switch (ar->running_fw->fw_file.wmi_op_version) {
7822 case ATH10K_FW_WMI_OP_VERSION_MAIN:
7823 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
7824 ar->hw->wiphy->n_iface_combinations =
7825 ARRAY_SIZE(ath10k_if_comb);
7826 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7827 break;
7828 case ATH10K_FW_WMI_OP_VERSION_TLV:
7829 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
7830 ar->hw->wiphy->iface_combinations =
7831 ath10k_tlv_qcs_if_comb;
7832 ar->hw->wiphy->n_iface_combinations =
7833 ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
7834 } else {
7835 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
7836 ar->hw->wiphy->n_iface_combinations =
7837 ARRAY_SIZE(ath10k_tlv_if_comb);
7838 }
7839 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7840 break;
7841 case ATH10K_FW_WMI_OP_VERSION_10_1:
7842 case ATH10K_FW_WMI_OP_VERSION_10_2:
7843 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
7844 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
7845 ar->hw->wiphy->n_iface_combinations =
7846 ARRAY_SIZE(ath10k_10x_if_comb);
7847 break;
7848 case ATH10K_FW_WMI_OP_VERSION_10_4:
7849 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
7850 ar->hw->wiphy->n_iface_combinations =
7851 ARRAY_SIZE(ath10k_10_4_if_comb);
7852 break;
7853 case ATH10K_FW_WMI_OP_VERSION_UNSET:
7854 case ATH10K_FW_WMI_OP_VERSION_MAX:
7855 WARN_ON(1);
7856 ret = -EINVAL;
7857 goto err_free;
7858 }
7859
7860 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7861 ar->hw->netdev_features = NETIF_F_HW_CSUM;
7862
7863 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
7864 /* Init ath dfs pattern detector */
7865 ar->ath_common.debug_mask = ATH_DBG_DFS;
7866 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
7867 NL80211_DFS_UNSET);
7868
7869 if (!ar->dfs_detector)
7870 ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
7871 }
7872
7873 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
7874 ath10k_reg_notifier);
7875 if (ret) {
7876 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
7877 goto err_dfs_detector_exit;
7878 }
7879
7880 ar->hw->wiphy->cipher_suites = cipher_suites;
7881 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
7882
7883 ret = ieee80211_register_hw(ar->hw);
7884 if (ret) {
7885 ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
7886 goto err_dfs_detector_exit;
7887 }
7888
7889 if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
7890 ret = regulatory_hint(ar->hw->wiphy,
7891 ar->ath_common.regulatory.alpha2);
7892 if (ret)
7893 goto err_unregister;
7894 }
7895
7896 return 0;
7897
7898 err_unregister:
7899 ieee80211_unregister_hw(ar->hw);
7900
7901 err_dfs_detector_exit:
7902 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
7903 ar->dfs_detector->exit(ar->dfs_detector);
7904
7905 err_free:
7906 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
7907 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
7908
7909 SET_IEEE80211_DEV(ar->hw, NULL);
7910 return ret;
7911 }
7912
7913 void ath10k_mac_unregister(struct ath10k *ar)
7914 {
7915 ieee80211_unregister_hw(ar->hw);
7916
7917 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
7918 ar->dfs_detector->exit(ar->dfs_detector);
7919
7920 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
7921 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
7922
7923 SET_IEEE80211_DEV(ar->hw, NULL);
7924 }
This page took 0.225077 seconds and 5 git commands to generate.