iwlwifi: split the drivers for agn and legacy devices 3945/4965
[deliverable/linux.git] / drivers / net / wireless / iwlegacy / iwl-core.c
1 /******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <net/mac80211.h>
35
36 #include "iwl-eeprom.h"
37 #include "iwl-dev.h"
38 #include "iwl-debug.h"
39 #include "iwl-core.h"
40 #include "iwl-io.h"
41 #include "iwl-power.h"
42 #include "iwl-sta.h"
43 #include "iwl-helpers.h"
44
45
46 MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47 MODULE_VERSION(IWLWIFI_VERSION);
48 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49 MODULE_LICENSE("GPL");
50
51 /*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
67 bool bt_coex_active = true;
68 EXPORT_SYMBOL_GPL(bt_coex_active);
69 module_param(bt_coex_active, bool, S_IRUGO);
70 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
71
72 u32 iwl_debug_level;
73 EXPORT_SYMBOL(iwl_debug_level);
74
75 const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
76 EXPORT_SYMBOL(iwl_bcast_addr);
77
78
79 /* This function both allocates and initializes hw and priv. */
80 struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
81 {
82 struct iwl_priv *priv;
83 /* mac80211 allocates memory for this device instance, including
84 * space for this driver's private structure */
85 struct ieee80211_hw *hw;
86
87 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
88 cfg->ops->ieee80211_ops);
89 if (hw == NULL) {
90 pr_err("%s: Can not allocate network device\n",
91 cfg->name);
92 goto out;
93 }
94
95 priv = hw->priv;
96 priv->hw = hw;
97
98 out:
99 return hw;
100 }
101 EXPORT_SYMBOL(iwl_legacy_alloc_all);
102
103 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
104 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
105 static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
106 struct ieee80211_sta_ht_cap *ht_info,
107 enum ieee80211_band band)
108 {
109 u16 max_bit_rate = 0;
110 u8 rx_chains_num = priv->hw_params.rx_chains_num;
111 u8 tx_chains_num = priv->hw_params.tx_chains_num;
112
113 ht_info->cap = 0;
114 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
115
116 ht_info->ht_supported = true;
117
118 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
119 max_bit_rate = MAX_BIT_RATE_20_MHZ;
120 if (priv->hw_params.ht40_channel & BIT(band)) {
121 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
122 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
123 ht_info->mcs.rx_mask[4] = 0x01;
124 max_bit_rate = MAX_BIT_RATE_40_MHZ;
125 }
126
127 if (priv->cfg->mod_params->amsdu_size_8K)
128 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
129
130 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
131 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
132
133 ht_info->mcs.rx_mask[0] = 0xFF;
134 if (rx_chains_num >= 2)
135 ht_info->mcs.rx_mask[1] = 0xFF;
136 if (rx_chains_num >= 3)
137 ht_info->mcs.rx_mask[2] = 0xFF;
138
139 /* Highest supported Rx data rate */
140 max_bit_rate *= rx_chains_num;
141 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
142 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
143
144 /* Tx MCS capabilities */
145 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
146 if (tx_chains_num != rx_chains_num) {
147 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
148 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
149 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
150 }
151 }
152
153 /**
154 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
155 */
156 int iwl_legacy_init_geos(struct iwl_priv *priv)
157 {
158 struct iwl_channel_info *ch;
159 struct ieee80211_supported_band *sband;
160 struct ieee80211_channel *channels;
161 struct ieee80211_channel *geo_ch;
162 struct ieee80211_rate *rates;
163 int i = 0;
164
165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
167 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
168 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
169 return 0;
170 }
171
172 channels = kzalloc(sizeof(struct ieee80211_channel) *
173 priv->channel_count, GFP_KERNEL);
174 if (!channels)
175 return -ENOMEM;
176
177 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
178 GFP_KERNEL);
179 if (!rates) {
180 kfree(channels);
181 return -ENOMEM;
182 }
183
184 /* 5.2GHz channels start after the 2.4GHz channels */
185 sband = &priv->bands[IEEE80211_BAND_5GHZ];
186 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
187 /* just OFDM */
188 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
189 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
190
191 if (priv->cfg->sku & IWL_SKU_N)
192 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
193 IEEE80211_BAND_5GHZ);
194
195 sband = &priv->bands[IEEE80211_BAND_2GHZ];
196 sband->channels = channels;
197 /* OFDM & CCK */
198 sband->bitrates = rates;
199 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
200
201 if (priv->cfg->sku & IWL_SKU_N)
202 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
203 IEEE80211_BAND_2GHZ);
204
205 priv->ieee_channels = channels;
206 priv->ieee_rates = rates;
207
208 for (i = 0; i < priv->channel_count; i++) {
209 ch = &priv->channel_info[i];
210
211 if (!iwl_legacy_is_channel_valid(ch))
212 continue;
213
214 if (iwl_legacy_is_channel_a_band(ch))
215 sband = &priv->bands[IEEE80211_BAND_5GHZ];
216 else
217 sband = &priv->bands[IEEE80211_BAND_2GHZ];
218
219 geo_ch = &sband->channels[sband->n_channels++];
220
221 geo_ch->center_freq =
222 ieee80211_channel_to_frequency(ch->channel, ch->band);
223 geo_ch->max_power = ch->max_power_avg;
224 geo_ch->max_antenna_gain = 0xff;
225 geo_ch->hw_value = ch->channel;
226
227 if (iwl_legacy_is_channel_valid(ch)) {
228 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
229 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
230
231 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
232 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
233
234 if (ch->flags & EEPROM_CHANNEL_RADAR)
235 geo_ch->flags |= IEEE80211_CHAN_RADAR;
236
237 geo_ch->flags |= ch->ht40_extension_channel;
238
239 if (ch->max_power_avg > priv->tx_power_device_lmt)
240 priv->tx_power_device_lmt = ch->max_power_avg;
241 } else {
242 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
243 }
244
245 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
246 ch->channel, geo_ch->center_freq,
247 iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
248 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
249 "restricted" : "valid",
250 geo_ch->flags);
251 }
252
253 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
254 priv->cfg->sku & IWL_SKU_A) {
255 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
256 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
257 priv->pci_dev->device,
258 priv->pci_dev->subsystem_device);
259 priv->cfg->sku &= ~IWL_SKU_A;
260 }
261
262 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
263 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
264 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
265
266 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
267
268 return 0;
269 }
270 EXPORT_SYMBOL(iwl_legacy_init_geos);
271
272 /*
273 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
274 */
275 void iwl_legacy_free_geos(struct iwl_priv *priv)
276 {
277 kfree(priv->ieee_channels);
278 kfree(priv->ieee_rates);
279 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
280 }
281 EXPORT_SYMBOL(iwl_legacy_free_geos);
282
283 static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
284 enum ieee80211_band band,
285 u16 channel, u8 extension_chan_offset)
286 {
287 const struct iwl_channel_info *ch_info;
288
289 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
290 if (!iwl_legacy_is_channel_valid(ch_info))
291 return false;
292
293 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
294 return !(ch_info->ht40_extension_channel &
295 IEEE80211_CHAN_NO_HT40PLUS);
296 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
297 return !(ch_info->ht40_extension_channel &
298 IEEE80211_CHAN_NO_HT40MINUS);
299
300 return false;
301 }
302
303 bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
304 struct iwl_rxon_context *ctx,
305 struct ieee80211_sta_ht_cap *ht_cap)
306 {
307 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
308 return false;
309
310 /*
311 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
312 * the bit will not set if it is pure 40MHz case
313 */
314 if (ht_cap && !ht_cap->ht_supported)
315 return false;
316
317 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
318 if (priv->disable_ht40)
319 return false;
320 #endif
321
322 return iwl_legacy_is_channel_extension(priv, priv->band,
323 le16_to_cpu(ctx->staging.channel),
324 ctx->ht.extension_chan_offset);
325 }
326 EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
327
328 static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
329 {
330 u16 new_val;
331 u16 beacon_factor;
332
333 /*
334 * If mac80211 hasn't given us a beacon interval, program
335 * the default into the device.
336 */
337 if (!beacon_val)
338 return DEFAULT_BEACON_INTERVAL;
339
340 /*
341 * If the beacon interval we obtained from the peer
342 * is too large, we'll have to wake up more often
343 * (and in IBSS case, we'll beacon too much)
344 *
345 * For example, if max_beacon_val is 4096, and the
346 * requested beacon interval is 7000, we'll have to
347 * use 3500 to be able to wake up on the beacons.
348 *
349 * This could badly influence beacon detection stats.
350 */
351
352 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
353 new_val = beacon_val / beacon_factor;
354
355 if (!new_val)
356 new_val = max_beacon_val;
357
358 return new_val;
359 }
360
361 int
362 iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
363 {
364 u64 tsf;
365 s32 interval_tm, rem;
366 struct ieee80211_conf *conf = NULL;
367 u16 beacon_int;
368 struct ieee80211_vif *vif = ctx->vif;
369
370 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
371
372 lockdep_assert_held(&priv->mutex);
373
374 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
375
376 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
377 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
378
379 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
380
381 /*
382 * TODO: For IBSS we need to get atim_window from mac80211,
383 * for now just always use 0
384 */
385 ctx->timing.atim_window = 0;
386
387 beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
388 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
389 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
390
391 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
392 interval_tm = beacon_int * TIME_UNIT;
393 rem = do_div(tsf, interval_tm);
394 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
395
396 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
397
398 IWL_DEBUG_ASSOC(priv,
399 "beacon interval %d beacon timer %d beacon tim %d\n",
400 le16_to_cpu(ctx->timing.beacon_interval),
401 le32_to_cpu(ctx->timing.beacon_init_val),
402 le16_to_cpu(ctx->timing.atim_window));
403
404 return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
405 sizeof(ctx->timing), &ctx->timing);
406 }
407 EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
408
409 void
410 iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
411 struct iwl_rxon_context *ctx,
412 int hw_decrypt)
413 {
414 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
415
416 if (hw_decrypt)
417 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
418 else
419 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
420
421 }
422 EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
423
424 /* validate RXON structure is valid */
425 int
426 iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
427 {
428 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
429 bool error = false;
430
431 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
432 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
433 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
434 error = true;
435 }
436 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
437 IWL_WARN(priv, "check 2.4G: wrong radar\n");
438 error = true;
439 }
440 } else {
441 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
442 IWL_WARN(priv, "check 5.2G: not short slot!\n");
443 error = true;
444 }
445 if (rxon->flags & RXON_FLG_CCK_MSK) {
446 IWL_WARN(priv, "check 5.2G: CCK!\n");
447 error = true;
448 }
449 }
450 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
451 IWL_WARN(priv, "mac/bssid mcast!\n");
452 error = true;
453 }
454
455 /* make sure basic rates 6Mbps and 1Mbps are supported */
456 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
457 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
458 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
459 error = true;
460 }
461
462 if (le16_to_cpu(rxon->assoc_id) > 2007) {
463 IWL_WARN(priv, "aid > 2007\n");
464 error = true;
465 }
466
467 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
468 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
469 IWL_WARN(priv, "CCK and short slot\n");
470 error = true;
471 }
472
473 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
474 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
475 IWL_WARN(priv, "CCK and auto detect");
476 error = true;
477 }
478
479 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
480 RXON_FLG_TGG_PROTECT_MSK)) ==
481 RXON_FLG_TGG_PROTECT_MSK) {
482 IWL_WARN(priv, "TGg but no auto-detect\n");
483 error = true;
484 }
485
486 if (error)
487 IWL_WARN(priv, "Tuning to channel %d\n",
488 le16_to_cpu(rxon->channel));
489
490 if (error) {
491 IWL_ERR(priv, "Invalid RXON\n");
492 return -EINVAL;
493 }
494 return 0;
495 }
496 EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
497
498 /**
499 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
500 * @priv: staging_rxon is compared to active_rxon
501 *
502 * If the RXON structure is changing enough to require a new tune,
503 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
504 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
505 */
506 int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
507 struct iwl_rxon_context *ctx)
508 {
509 const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
510 const struct iwl_legacy_rxon_cmd *active = &ctx->active;
511
512 #define CHK(cond) \
513 if ((cond)) { \
514 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
515 return 1; \
516 }
517
518 #define CHK_NEQ(c1, c2) \
519 if ((c1) != (c2)) { \
520 IWL_DEBUG_INFO(priv, "need full RXON - " \
521 #c1 " != " #c2 " - %d != %d\n", \
522 (c1), (c2)); \
523 return 1; \
524 }
525
526 /* These items are only settable from the full RXON command */
527 CHK(!iwl_legacy_is_associated_ctx(ctx));
528 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
529 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
530 CHK(compare_ether_addr(staging->wlap_bssid_addr,
531 active->wlap_bssid_addr));
532 CHK_NEQ(staging->dev_type, active->dev_type);
533 CHK_NEQ(staging->channel, active->channel);
534 CHK_NEQ(staging->air_propagation, active->air_propagation);
535 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
536 active->ofdm_ht_single_stream_basic_rates);
537 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
538 active->ofdm_ht_dual_stream_basic_rates);
539 CHK_NEQ(staging->assoc_id, active->assoc_id);
540
541 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
542 * be updated with the RXON_ASSOC command -- however only some
543 * flag transitions are allowed using RXON_ASSOC */
544
545 /* Check if we are not switching bands */
546 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
547 active->flags & RXON_FLG_BAND_24G_MSK);
548
549 /* Check if we are switching association toggle */
550 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
551 active->filter_flags & RXON_FILTER_ASSOC_MSK);
552
553 #undef CHK
554 #undef CHK_NEQ
555
556 return 0;
557 }
558 EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
559
560 u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
561 struct iwl_rxon_context *ctx)
562 {
563 /*
564 * Assign the lowest rate -- should really get this from
565 * the beacon skb from mac80211.
566 */
567 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
568 return IWL_RATE_1M_PLCP;
569 else
570 return IWL_RATE_6M_PLCP;
571 }
572 EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
573
574 static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
575 struct iwl_ht_config *ht_conf,
576 struct iwl_rxon_context *ctx)
577 {
578 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
579
580 if (!ctx->ht.enabled) {
581 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
582 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
583 RXON_FLG_HT40_PROT_MSK |
584 RXON_FLG_HT_PROT_MSK);
585 return;
586 }
587
588 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
589 RXON_FLG_HT_OPERATING_MODE_POS);
590
591 /* Set up channel bandwidth:
592 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
593 /* clear the HT channel mode before set the mode */
594 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
595 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
596 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
597 /* pure ht40 */
598 if (ctx->ht.protection ==
599 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
600 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
601 /* Note: control channel is opposite of extension channel */
602 switch (ctx->ht.extension_chan_offset) {
603 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
604 rxon->flags &=
605 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
606 break;
607 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
608 rxon->flags |=
609 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
610 break;
611 }
612 } else {
613 /* Note: control channel is opposite of extension channel */
614 switch (ctx->ht.extension_chan_offset) {
615 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
616 rxon->flags &=
617 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
618 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
619 break;
620 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
621 rxon->flags |=
622 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
623 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
624 break;
625 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
626 default:
627 /* channel location only valid if in Mixed mode */
628 IWL_ERR(priv,
629 "invalid extension channel offset\n");
630 break;
631 }
632 }
633 } else {
634 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
635 }
636
637 if (priv->cfg->ops->hcmd->set_rxon_chain)
638 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
639
640 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
641 "extension channel offset 0x%x\n",
642 le32_to_cpu(rxon->flags), ctx->ht.protection,
643 ctx->ht.extension_chan_offset);
644 }
645
646 void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
647 {
648 struct iwl_rxon_context *ctx;
649
650 for_each_context(priv, ctx)
651 _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
652 }
653 EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
654
655 /* Return valid, unused, channel for a passive scan to reset the RF */
656 u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
657 enum ieee80211_band band)
658 {
659 const struct iwl_channel_info *ch_info;
660 int i;
661 u8 channel = 0;
662 u8 min, max;
663 struct iwl_rxon_context *ctx;
664
665 if (band == IEEE80211_BAND_5GHZ) {
666 min = 14;
667 max = priv->channel_count;
668 } else {
669 min = 0;
670 max = 14;
671 }
672
673 for (i = min; i < max; i++) {
674 bool busy = false;
675
676 for_each_context(priv, ctx) {
677 busy = priv->channel_info[i].channel ==
678 le16_to_cpu(ctx->staging.channel);
679 if (busy)
680 break;
681 }
682
683 if (busy)
684 continue;
685
686 channel = priv->channel_info[i].channel;
687 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
688 if (iwl_legacy_is_channel_valid(ch_info))
689 break;
690 }
691
692 return channel;
693 }
694 EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
695
696 /**
697 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
698 * @ch: requested channel as a pointer to struct ieee80211_channel
699
700 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
701 * in the staging RXON flag structure based on the ch->band
702 */
703 int
704 iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
705 struct iwl_rxon_context *ctx)
706 {
707 enum ieee80211_band band = ch->band;
708 u16 channel = ch->hw_value;
709
710 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
711 (priv->band == band))
712 return 0;
713
714 ctx->staging.channel = cpu_to_le16(channel);
715 if (band == IEEE80211_BAND_5GHZ)
716 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
717 else
718 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
719
720 priv->band = band;
721
722 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
723
724 return 0;
725 }
726 EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
727
728 void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
729 struct iwl_rxon_context *ctx,
730 enum ieee80211_band band,
731 struct ieee80211_vif *vif)
732 {
733 if (band == IEEE80211_BAND_5GHZ) {
734 ctx->staging.flags &=
735 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
736 | RXON_FLG_CCK_MSK);
737 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
738 } else {
739 /* Copied from iwl_post_associate() */
740 if (vif && vif->bss_conf.use_short_slot)
741 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
742 else
743 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
744
745 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
746 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
747 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
748 }
749 }
750 EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
751
752 /*
753 * initialize rxon structure with default values from eeprom
754 */
755 void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
756 struct iwl_rxon_context *ctx)
757 {
758 const struct iwl_channel_info *ch_info;
759
760 memset(&ctx->staging, 0, sizeof(ctx->staging));
761
762 if (!ctx->vif) {
763 ctx->staging.dev_type = ctx->unused_devtype;
764 } else
765 switch (ctx->vif->type) {
766
767 case NL80211_IFTYPE_STATION:
768 ctx->staging.dev_type = ctx->station_devtype;
769 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
770 break;
771
772 case NL80211_IFTYPE_ADHOC:
773 ctx->staging.dev_type = ctx->ibss_devtype;
774 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
775 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
776 RXON_FILTER_ACCEPT_GRP_MSK;
777 break;
778
779 default:
780 IWL_ERR(priv, "Unsupported interface type %d\n",
781 ctx->vif->type);
782 break;
783 }
784
785 #if 0
786 /* TODO: Figure out when short_preamble would be set and cache from
787 * that */
788 if (!hw_to_local(priv->hw)->short_preamble)
789 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
790 else
791 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
792 #endif
793
794 ch_info = iwl_legacy_get_channel_info(priv, priv->band,
795 le16_to_cpu(ctx->active.channel));
796
797 if (!ch_info)
798 ch_info = &priv->channel_info[0];
799
800 ctx->staging.channel = cpu_to_le16(ch_info->channel);
801 priv->band = ch_info->band;
802
803 iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
804
805 ctx->staging.ofdm_basic_rates =
806 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
807 ctx->staging.cck_basic_rates =
808 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
809
810 /* clear both MIX and PURE40 mode flag */
811 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
812 RXON_FLG_CHANNEL_MODE_PURE_40);
813 if (ctx->vif)
814 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
815
816 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
817 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
818 }
819 EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
820
821 void iwl_legacy_set_rate(struct iwl_priv *priv)
822 {
823 const struct ieee80211_supported_band *hw = NULL;
824 struct ieee80211_rate *rate;
825 struct iwl_rxon_context *ctx;
826 int i;
827
828 hw = iwl_get_hw_mode(priv, priv->band);
829 if (!hw) {
830 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
831 return;
832 }
833
834 priv->active_rate = 0;
835
836 for (i = 0; i < hw->n_bitrates; i++) {
837 rate = &(hw->bitrates[i]);
838 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
839 priv->active_rate |= (1 << rate->hw_value);
840 }
841
842 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
843
844 for_each_context(priv, ctx) {
845 ctx->staging.cck_basic_rates =
846 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
847
848 ctx->staging.ofdm_basic_rates =
849 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
850 }
851 }
852 EXPORT_SYMBOL(iwl_legacy_set_rate);
853
854 void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
855 {
856 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
857
858 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
859 return;
860
861 if (priv->switch_rxon.switch_in_progress) {
862 ieee80211_chswitch_done(ctx->vif, is_success);
863 mutex_lock(&priv->mutex);
864 priv->switch_rxon.switch_in_progress = false;
865 mutex_unlock(&priv->mutex);
866 }
867 }
868 EXPORT_SYMBOL(iwl_legacy_chswitch_done);
869
870 void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
871 {
872 struct iwl_rx_packet *pkt = rxb_addr(rxb);
873 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
874
875 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
876 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
877
878 if (priv->switch_rxon.switch_in_progress) {
879 if (!le32_to_cpu(csa->status) &&
880 (csa->channel == priv->switch_rxon.channel)) {
881 rxon->channel = csa->channel;
882 ctx->staging.channel = csa->channel;
883 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
884 le16_to_cpu(csa->channel));
885 iwl_legacy_chswitch_done(priv, true);
886 } else {
887 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
888 le16_to_cpu(csa->channel));
889 iwl_legacy_chswitch_done(priv, false);
890 }
891 }
892 }
893 EXPORT_SYMBOL(iwl_legacy_rx_csa);
894
895 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
896 void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
897 struct iwl_rxon_context *ctx)
898 {
899 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
900
901 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
902 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
903 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
904 le16_to_cpu(rxon->channel));
905 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
906 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
907 le32_to_cpu(rxon->filter_flags));
908 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
909 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
910 rxon->ofdm_basic_rates);
911 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
912 rxon->cck_basic_rates);
913 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
914 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
915 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
916 le16_to_cpu(rxon->assoc_id));
917 }
918 EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
919 #endif
920 /**
921 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
922 */
923 void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
924 {
925 /* Set the FW error flag -- cleared on iwl_down */
926 set_bit(STATUS_FW_ERROR, &priv->status);
927
928 /* Cancel currently queued command. */
929 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
930
931 IWL_ERR(priv, "Loaded firmware version: %s\n",
932 priv->hw->wiphy->fw_version);
933
934 priv->cfg->ops->lib->dump_nic_error_log(priv);
935 if (priv->cfg->ops->lib->dump_fh)
936 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
937 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
938 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
939 if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
940 iwl_legacy_print_rx_config_cmd(priv,
941 &priv->contexts[IWL_RXON_CTX_BSS]);
942 #endif
943
944 wake_up_interruptible(&priv->wait_command_queue);
945
946 /* Keep the restart process from trying to send host
947 * commands by clearing the INIT status bit */
948 clear_bit(STATUS_READY, &priv->status);
949
950 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
951 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
952 "Restarting adapter due to uCode error.\n");
953
954 if (priv->cfg->mod_params->restart_fw)
955 queue_work(priv->workqueue, &priv->restart);
956 }
957 }
958 EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
959
960 static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
961 {
962 int ret = 0;
963
964 /* stop device's busmaster DMA activity */
965 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
966
967 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
968 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
969 if (ret)
970 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
971
972 IWL_DEBUG_INFO(priv, "stop master\n");
973
974 return ret;
975 }
976
977 void iwl_legacy_apm_stop(struct iwl_priv *priv)
978 {
979 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
980
981 /* Stop device's DMA activity */
982 iwl_legacy_apm_stop_master(priv);
983
984 /* Reset the entire device */
985 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
986
987 udelay(10);
988
989 /*
990 * Clear "initialization complete" bit to move adapter from
991 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
992 */
993 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
994 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
995 }
996 EXPORT_SYMBOL(iwl_legacy_apm_stop);
997
998
999 /*
1000 * Start up NIC's basic functionality after it has been reset
1001 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
1002 * NOTE: This does not load uCode nor start the embedded processor
1003 */
1004 int iwl_legacy_apm_init(struct iwl_priv *priv)
1005 {
1006 int ret = 0;
1007 u16 lctl;
1008
1009 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1010
1011 /*
1012 * Use "set_bit" below rather than "write", to preserve any hardware
1013 * bits already set by default after reset.
1014 */
1015
1016 /* Disable L0S exit timer (platform NMI Work/Around) */
1017 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1018 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1019
1020 /*
1021 * Disable L0s without affecting L1;
1022 * don't wait for ICH L0s (ICH bug W/A)
1023 */
1024 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1025 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1026
1027 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1028 iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
1029 CSR_DBG_HPET_MEM_REG_VAL);
1030
1031 /*
1032 * Enable HAP INTA (interrupt from management bus) to
1033 * wake device's PCI Express link L1a -> L0s
1034 * NOTE: This is no-op for 3945 (non-existant bit)
1035 */
1036 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1037 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1038
1039 /*
1040 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1041 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1042 * If so (likely), disable L0S, so device moves directly L0->L1;
1043 * costs negligible amount of power savings.
1044 * If not (unlikely), enable L0S, so there is at least some
1045 * power savings, even without L1.
1046 */
1047 if (priv->cfg->base_params->set_l0s) {
1048 lctl = iwl_legacy_pcie_link_ctl(priv);
1049 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1050 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1051 /* L1-ASPM enabled; disable(!) L0S */
1052 iwl_legacy_set_bit(priv, CSR_GIO_REG,
1053 CSR_GIO_REG_VAL_L0S_ENABLED);
1054 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1055 } else {
1056 /* L1-ASPM disabled; enable(!) L0S */
1057 iwl_legacy_clear_bit(priv, CSR_GIO_REG,
1058 CSR_GIO_REG_VAL_L0S_ENABLED);
1059 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1060 }
1061 }
1062
1063 /* Configure analog phase-lock-loop before activating to D0A */
1064 if (priv->cfg->base_params->pll_cfg_val)
1065 iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
1066 priv->cfg->base_params->pll_cfg_val);
1067
1068 /*
1069 * Set "initialization complete" bit to move adapter from
1070 * D0U* --> D0A* (powered-up active) state.
1071 */
1072 iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1073
1074 /*
1075 * Wait for clock stabilization; once stabilized, access to
1076 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1077 * and accesses to uCode SRAM.
1078 */
1079 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1080 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1081 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1082 if (ret < 0) {
1083 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1084 goto out;
1085 }
1086
1087 /*
1088 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1089 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1090 *
1091 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1092 * do not disable clocks. This preserves any hardware bits already
1093 * set by default in "CLK_CTRL_REG" after reset.
1094 */
1095 if (priv->cfg->base_params->use_bsm)
1096 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1097 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1098 else
1099 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1100 APMG_CLK_VAL_DMA_CLK_RQT);
1101 udelay(20);
1102
1103 /* Disable L1-Active */
1104 iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1105 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1106
1107 out:
1108 return ret;
1109 }
1110 EXPORT_SYMBOL(iwl_legacy_apm_init);
1111
1112
1113 int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1114 {
1115 int ret;
1116 s8 prev_tx_power;
1117
1118 lockdep_assert_held(&priv->mutex);
1119
1120 if (priv->tx_power_user_lmt == tx_power && !force)
1121 return 0;
1122
1123 if (!priv->cfg->ops->lib->send_tx_power)
1124 return -EOPNOTSUPP;
1125
1126 if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
1127 IWL_WARN(priv,
1128 "Requested user TXPOWER %d below lower limit %d.\n",
1129 tx_power,
1130 IWL4965_TX_POWER_TARGET_POWER_MIN);
1131 return -EINVAL;
1132 }
1133
1134 if (tx_power > priv->tx_power_device_lmt) {
1135 IWL_WARN(priv,
1136 "Requested user TXPOWER %d above upper limit %d.\n",
1137 tx_power, priv->tx_power_device_lmt);
1138 return -EINVAL;
1139 }
1140
1141 if (!iwl_legacy_is_ready_rf(priv))
1142 return -EIO;
1143
1144 /* scan complete use tx_power_next, need to be updated */
1145 priv->tx_power_next = tx_power;
1146 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
1147 IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
1148 return 0;
1149 }
1150
1151 prev_tx_power = priv->tx_power_user_lmt;
1152 priv->tx_power_user_lmt = tx_power;
1153
1154 ret = priv->cfg->ops->lib->send_tx_power(priv);
1155
1156 /* if fail to set tx_power, restore the orig. tx power */
1157 if (ret) {
1158 priv->tx_power_user_lmt = prev_tx_power;
1159 priv->tx_power_next = prev_tx_power;
1160 }
1161 return ret;
1162 }
1163 EXPORT_SYMBOL(iwl_legacy_set_tx_power);
1164
1165 void iwl_legacy_send_bt_config(struct iwl_priv *priv)
1166 {
1167 struct iwl_bt_cmd bt_cmd = {
1168 .lead_time = BT_LEAD_TIME_DEF,
1169 .max_kill = BT_MAX_KILL_DEF,
1170 .kill_ack_mask = 0,
1171 .kill_cts_mask = 0,
1172 };
1173
1174 if (!bt_coex_active)
1175 bt_cmd.flags = BT_COEX_DISABLE;
1176 else
1177 bt_cmd.flags = BT_COEX_ENABLE;
1178
1179 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1180 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1181
1182 if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1183 sizeof(struct iwl_bt_cmd), &bt_cmd))
1184 IWL_ERR(priv, "failed to send BT Coex Config\n");
1185 }
1186 EXPORT_SYMBOL(iwl_legacy_send_bt_config);
1187
1188 int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1189 {
1190 struct iwl_statistics_cmd statistics_cmd = {
1191 .configuration_flags =
1192 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1193 };
1194
1195 if (flags & CMD_ASYNC)
1196 return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1197 sizeof(struct iwl_statistics_cmd),
1198 &statistics_cmd, NULL);
1199 else
1200 return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1201 sizeof(struct iwl_statistics_cmd),
1202 &statistics_cmd);
1203 }
1204 EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
1205
1206 void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
1207 struct iwl_rx_mem_buffer *rxb)
1208 {
1209 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1210 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1211 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1212 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1213 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1214 #endif
1215 }
1216 EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
1217
1218 void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1219 struct iwl_rx_mem_buffer *rxb)
1220 {
1221 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1222 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1223 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1224 "notification for %s:\n", len,
1225 iwl_legacy_get_cmd_string(pkt->hdr.cmd));
1226 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1227 }
1228 EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
1229
1230 void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
1231 struct iwl_rx_mem_buffer *rxb)
1232 {
1233 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1234
1235 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1236 "seq 0x%04X ser 0x%08X\n",
1237 le32_to_cpu(pkt->u.err_resp.error_type),
1238 iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
1239 pkt->u.err_resp.cmd_id,
1240 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1241 le32_to_cpu(pkt->u.err_resp.error_info));
1242 }
1243 EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
1244
1245 void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
1246 {
1247 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1248 }
1249
1250 int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1251 const struct ieee80211_tx_queue_params *params)
1252 {
1253 struct iwl_priv *priv = hw->priv;
1254 struct iwl_rxon_context *ctx;
1255 unsigned long flags;
1256 int q;
1257
1258 IWL_DEBUG_MAC80211(priv, "enter\n");
1259
1260 if (!iwl_legacy_is_ready_rf(priv)) {
1261 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1262 return -EIO;
1263 }
1264
1265 if (queue >= AC_NUM) {
1266 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1267 return 0;
1268 }
1269
1270 q = AC_NUM - 1 - queue;
1271
1272 spin_lock_irqsave(&priv->lock, flags);
1273
1274 for_each_context(priv, ctx) {
1275 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1276 cpu_to_le16(params->cw_min);
1277 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1278 cpu_to_le16(params->cw_max);
1279 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1280 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1281 cpu_to_le16((params->txop * 32));
1282
1283 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1284 }
1285
1286 spin_unlock_irqrestore(&priv->lock, flags);
1287
1288 IWL_DEBUG_MAC80211(priv, "leave\n");
1289 return 0;
1290 }
1291 EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
1292
1293 int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
1294 {
1295 struct iwl_priv *priv = hw->priv;
1296
1297 return priv->ibss_manager == IWL_IBSS_MANAGER;
1298 }
1299 EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
1300
1301 static int
1302 iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1303 {
1304 iwl_legacy_connection_init_rx_config(priv, ctx);
1305
1306 if (priv->cfg->ops->hcmd->set_rxon_chain)
1307 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1308
1309 return iwl_legacy_commit_rxon(priv, ctx);
1310 }
1311
1312 static int iwl_legacy_setup_interface(struct iwl_priv *priv,
1313 struct iwl_rxon_context *ctx)
1314 {
1315 struct ieee80211_vif *vif = ctx->vif;
1316 int err;
1317
1318 lockdep_assert_held(&priv->mutex);
1319
1320 /*
1321 * This variable will be correct only when there's just
1322 * a single context, but all code using it is for hardware
1323 * that supports only one context.
1324 */
1325 priv->iw_mode = vif->type;
1326
1327 ctx->is_active = true;
1328
1329 err = iwl_legacy_set_mode(priv, ctx);
1330 if (err) {
1331 if (!ctx->always_active)
1332 ctx->is_active = false;
1333 return err;
1334 }
1335
1336 return 0;
1337 }
1338
1339 int
1340 iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1341 {
1342 struct iwl_priv *priv = hw->priv;
1343 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1344 struct iwl_rxon_context *tmp, *ctx = NULL;
1345 int err;
1346
1347 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1348 vif->type, vif->addr);
1349
1350 mutex_lock(&priv->mutex);
1351
1352 if (!iwl_legacy_is_ready_rf(priv)) {
1353 IWL_WARN(priv, "Try to add interface when device not ready\n");
1354 err = -EINVAL;
1355 goto out;
1356 }
1357
1358 for_each_context(priv, tmp) {
1359 u32 possible_modes =
1360 tmp->interface_modes | tmp->exclusive_interface_modes;
1361
1362 if (tmp->vif) {
1363 /* check if this busy context is exclusive */
1364 if (tmp->exclusive_interface_modes &
1365 BIT(tmp->vif->type)) {
1366 err = -EINVAL;
1367 goto out;
1368 }
1369 continue;
1370 }
1371
1372 if (!(possible_modes & BIT(vif->type)))
1373 continue;
1374
1375 /* have maybe usable context w/o interface */
1376 ctx = tmp;
1377 break;
1378 }
1379
1380 if (!ctx) {
1381 err = -EOPNOTSUPP;
1382 goto out;
1383 }
1384
1385 vif_priv->ctx = ctx;
1386 ctx->vif = vif;
1387
1388 err = iwl_legacy_setup_interface(priv, ctx);
1389 if (!err)
1390 goto out;
1391
1392 ctx->vif = NULL;
1393 priv->iw_mode = NL80211_IFTYPE_STATION;
1394 out:
1395 mutex_unlock(&priv->mutex);
1396
1397 IWL_DEBUG_MAC80211(priv, "leave\n");
1398 return err;
1399 }
1400 EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
1401
1402 static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
1403 struct ieee80211_vif *vif,
1404 bool mode_change)
1405 {
1406 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1407
1408 lockdep_assert_held(&priv->mutex);
1409
1410 if (priv->scan_vif == vif) {
1411 iwl_legacy_scan_cancel_timeout(priv, 200);
1412 iwl_legacy_force_scan_end(priv);
1413 }
1414
1415 if (!mode_change) {
1416 iwl_legacy_set_mode(priv, ctx);
1417 if (!ctx->always_active)
1418 ctx->is_active = false;
1419 }
1420 }
1421
1422 void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
1423 struct ieee80211_vif *vif)
1424 {
1425 struct iwl_priv *priv = hw->priv;
1426 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1427
1428 IWL_DEBUG_MAC80211(priv, "enter\n");
1429
1430 mutex_lock(&priv->mutex);
1431
1432 WARN_ON(ctx->vif != vif);
1433 ctx->vif = NULL;
1434
1435 iwl_legacy_teardown_interface(priv, vif, false);
1436
1437 memset(priv->bssid, 0, ETH_ALEN);
1438 mutex_unlock(&priv->mutex);
1439
1440 IWL_DEBUG_MAC80211(priv, "leave\n");
1441
1442 }
1443 EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
1444
1445 int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
1446 {
1447 if (!priv->txq)
1448 priv->txq = kzalloc(
1449 sizeof(struct iwl_tx_queue) *
1450 priv->cfg->base_params->num_of_queues,
1451 GFP_KERNEL);
1452 if (!priv->txq) {
1453 IWL_ERR(priv, "Not enough memory for txq\n");
1454 return -ENOMEM;
1455 }
1456 return 0;
1457 }
1458 EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
1459
1460 void iwl_legacy_txq_mem(struct iwl_priv *priv)
1461 {
1462 kfree(priv->txq);
1463 priv->txq = NULL;
1464 }
1465 EXPORT_SYMBOL(iwl_legacy_txq_mem);
1466
1467 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1468
1469 #define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1470
1471 void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
1472 {
1473 priv->tx_traffic_idx = 0;
1474 priv->rx_traffic_idx = 0;
1475 if (priv->tx_traffic)
1476 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1477 if (priv->rx_traffic)
1478 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1479 }
1480
1481 int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
1482 {
1483 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1484
1485 if (iwl_debug_level & IWL_DL_TX) {
1486 if (!priv->tx_traffic) {
1487 priv->tx_traffic =
1488 kzalloc(traffic_size, GFP_KERNEL);
1489 if (!priv->tx_traffic)
1490 return -ENOMEM;
1491 }
1492 }
1493 if (iwl_debug_level & IWL_DL_RX) {
1494 if (!priv->rx_traffic) {
1495 priv->rx_traffic =
1496 kzalloc(traffic_size, GFP_KERNEL);
1497 if (!priv->rx_traffic)
1498 return -ENOMEM;
1499 }
1500 }
1501 iwl_legacy_reset_traffic_log(priv);
1502 return 0;
1503 }
1504 EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
1505
1506 void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
1507 {
1508 kfree(priv->tx_traffic);
1509 priv->tx_traffic = NULL;
1510
1511 kfree(priv->rx_traffic);
1512 priv->rx_traffic = NULL;
1513 }
1514 EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
1515
1516 void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
1517 u16 length, struct ieee80211_hdr *header)
1518 {
1519 __le16 fc;
1520 u16 len;
1521
1522 if (likely(!(iwl_debug_level & IWL_DL_TX)))
1523 return;
1524
1525 if (!priv->tx_traffic)
1526 return;
1527
1528 fc = header->frame_control;
1529 if (ieee80211_is_data(fc)) {
1530 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1531 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1532 memcpy((priv->tx_traffic +
1533 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1534 header, len);
1535 priv->tx_traffic_idx =
1536 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1537 }
1538 }
1539 EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
1540
1541 void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
1542 u16 length, struct ieee80211_hdr *header)
1543 {
1544 __le16 fc;
1545 u16 len;
1546
1547 if (likely(!(iwl_debug_level & IWL_DL_RX)))
1548 return;
1549
1550 if (!priv->rx_traffic)
1551 return;
1552
1553 fc = header->frame_control;
1554 if (ieee80211_is_data(fc)) {
1555 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1556 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1557 memcpy((priv->rx_traffic +
1558 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1559 header, len);
1560 priv->rx_traffic_idx =
1561 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1562 }
1563 }
1564 EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
1565
1566 const char *iwl_legacy_get_mgmt_string(int cmd)
1567 {
1568 switch (cmd) {
1569 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1570 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1571 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1572 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1573 IWL_CMD(MANAGEMENT_PROBE_REQ);
1574 IWL_CMD(MANAGEMENT_PROBE_RESP);
1575 IWL_CMD(MANAGEMENT_BEACON);
1576 IWL_CMD(MANAGEMENT_ATIM);
1577 IWL_CMD(MANAGEMENT_DISASSOC);
1578 IWL_CMD(MANAGEMENT_AUTH);
1579 IWL_CMD(MANAGEMENT_DEAUTH);
1580 IWL_CMD(MANAGEMENT_ACTION);
1581 default:
1582 return "UNKNOWN";
1583
1584 }
1585 }
1586
1587 const char *iwl_legacy_get_ctrl_string(int cmd)
1588 {
1589 switch (cmd) {
1590 IWL_CMD(CONTROL_BACK_REQ);
1591 IWL_CMD(CONTROL_BACK);
1592 IWL_CMD(CONTROL_PSPOLL);
1593 IWL_CMD(CONTROL_RTS);
1594 IWL_CMD(CONTROL_CTS);
1595 IWL_CMD(CONTROL_ACK);
1596 IWL_CMD(CONTROL_CFEND);
1597 IWL_CMD(CONTROL_CFENDACK);
1598 default:
1599 return "UNKNOWN";
1600
1601 }
1602 }
1603
1604 void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
1605 {
1606 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1607 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1608 }
1609
1610 /*
1611 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1612 * iwl_legacy_update_stats function will
1613 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1614 * Use debugFs to display the rx/rx_statistics
1615 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1616 * information will be recorded, but DATA pkt still will be recorded
1617 * for the reason of iwl_led.c need to control the led blinking based on
1618 * number of tx and rx data.
1619 *
1620 */
1621 void
1622 iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1623 {
1624 struct traffic_stats *stats;
1625
1626 if (is_tx)
1627 stats = &priv->tx_stats;
1628 else
1629 stats = &priv->rx_stats;
1630
1631 if (ieee80211_is_mgmt(fc)) {
1632 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1633 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1634 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1635 break;
1636 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1637 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1638 break;
1639 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1640 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1641 break;
1642 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1643 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1644 break;
1645 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1646 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1647 break;
1648 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1649 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1650 break;
1651 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1652 stats->mgmt[MANAGEMENT_BEACON]++;
1653 break;
1654 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1655 stats->mgmt[MANAGEMENT_ATIM]++;
1656 break;
1657 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1658 stats->mgmt[MANAGEMENT_DISASSOC]++;
1659 break;
1660 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1661 stats->mgmt[MANAGEMENT_AUTH]++;
1662 break;
1663 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1664 stats->mgmt[MANAGEMENT_DEAUTH]++;
1665 break;
1666 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1667 stats->mgmt[MANAGEMENT_ACTION]++;
1668 break;
1669 }
1670 } else if (ieee80211_is_ctl(fc)) {
1671 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1672 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1673 stats->ctrl[CONTROL_BACK_REQ]++;
1674 break;
1675 case cpu_to_le16(IEEE80211_STYPE_BACK):
1676 stats->ctrl[CONTROL_BACK]++;
1677 break;
1678 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1679 stats->ctrl[CONTROL_PSPOLL]++;
1680 break;
1681 case cpu_to_le16(IEEE80211_STYPE_RTS):
1682 stats->ctrl[CONTROL_RTS]++;
1683 break;
1684 case cpu_to_le16(IEEE80211_STYPE_CTS):
1685 stats->ctrl[CONTROL_CTS]++;
1686 break;
1687 case cpu_to_le16(IEEE80211_STYPE_ACK):
1688 stats->ctrl[CONTROL_ACK]++;
1689 break;
1690 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1691 stats->ctrl[CONTROL_CFEND]++;
1692 break;
1693 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1694 stats->ctrl[CONTROL_CFENDACK]++;
1695 break;
1696 }
1697 } else {
1698 /* data */
1699 stats->data_cnt++;
1700 stats->data_bytes += len;
1701 }
1702 }
1703 EXPORT_SYMBOL(iwl_legacy_update_stats);
1704 #endif
1705
1706 static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
1707 {
1708 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1709 return;
1710
1711 if (!iwl_legacy_is_any_associated(priv)) {
1712 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1713 return;
1714 }
1715 /*
1716 * There is no easy and better way to force reset the radio,
1717 * the only known method is switching channel which will force to
1718 * reset and tune the radio.
1719 * Use internal short scan (single channel) operation to should
1720 * achieve this objective.
1721 * Driver should reset the radio when number of consecutive missed
1722 * beacon, or any other uCode error condition detected.
1723 */
1724 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1725 iwl_legacy_internal_short_hw_scan(priv);
1726 }
1727
1728
1729 int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
1730 {
1731 struct iwl_force_reset *force_reset;
1732
1733 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1734 return -EINVAL;
1735
1736 if (mode >= IWL_MAX_FORCE_RESET) {
1737 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1738 return -EINVAL;
1739 }
1740 force_reset = &priv->force_reset[mode];
1741 force_reset->reset_request_count++;
1742 if (!external) {
1743 if (force_reset->last_force_reset_jiffies &&
1744 time_after(force_reset->last_force_reset_jiffies +
1745 force_reset->reset_duration, jiffies)) {
1746 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1747 force_reset->reset_reject_count++;
1748 return -EAGAIN;
1749 }
1750 }
1751 force_reset->reset_success_count++;
1752 force_reset->last_force_reset_jiffies = jiffies;
1753 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
1754 switch (mode) {
1755 case IWL_RF_RESET:
1756 _iwl_legacy_force_rf_reset(priv);
1757 break;
1758 case IWL_FW_RESET:
1759 /*
1760 * if the request is from external(ex: debugfs),
1761 * then always perform the request in regardless the module
1762 * parameter setting
1763 * if the request is from internal (uCode error or driver
1764 * detect failure), then fw_restart module parameter
1765 * need to be check before performing firmware reload
1766 */
1767 if (!external && !priv->cfg->mod_params->restart_fw) {
1768 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1769 "module parameter setting\n");
1770 break;
1771 }
1772 IWL_ERR(priv, "On demand firmware reload\n");
1773 /* Set the FW error flag -- cleared on iwl_down */
1774 set_bit(STATUS_FW_ERROR, &priv->status);
1775 wake_up_interruptible(&priv->wait_command_queue);
1776 /*
1777 * Keep the restart process from trying to send host
1778 * commands by clearing the INIT status bit
1779 */
1780 clear_bit(STATUS_READY, &priv->status);
1781 queue_work(priv->workqueue, &priv->restart);
1782 break;
1783 }
1784 return 0;
1785 }
1786
1787 int
1788 iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1789 struct ieee80211_vif *vif,
1790 enum nl80211_iftype newtype, bool newp2p)
1791 {
1792 struct iwl_priv *priv = hw->priv;
1793 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1794 struct iwl_rxon_context *tmp;
1795 u32 interface_modes;
1796 int err;
1797
1798 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1799
1800 mutex_lock(&priv->mutex);
1801
1802 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1803
1804 if (!(interface_modes & BIT(newtype))) {
1805 err = -EBUSY;
1806 goto out;
1807 }
1808
1809 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1810 for_each_context(priv, tmp) {
1811 if (ctx == tmp)
1812 continue;
1813
1814 if (!tmp->vif)
1815 continue;
1816
1817 /*
1818 * The current mode switch would be exclusive, but
1819 * another context is active ... refuse the switch.
1820 */
1821 err = -EBUSY;
1822 goto out;
1823 }
1824 }
1825
1826 /* success */
1827 iwl_legacy_teardown_interface(priv, vif, true);
1828 vif->type = newtype;
1829 err = iwl_legacy_setup_interface(priv, ctx);
1830 WARN_ON(err);
1831 /*
1832 * We've switched internally, but submitting to the
1833 * device may have failed for some reason. Mask this
1834 * error, because otherwise mac80211 will not switch
1835 * (and set the interface type back) and we'll be
1836 * out of sync with it.
1837 */
1838 err = 0;
1839
1840 out:
1841 mutex_unlock(&priv->mutex);
1842 return err;
1843 }
1844 EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
1845
1846 /*
1847 * On every watchdog tick we check (latest) time stamp. If it does not
1848 * change during timeout period and queue is not empty we reset firmware.
1849 */
1850 static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
1851 {
1852 struct iwl_tx_queue *txq = &priv->txq[cnt];
1853 struct iwl_queue *q = &txq->q;
1854 unsigned long timeout;
1855 int ret;
1856
1857 if (q->read_ptr == q->write_ptr) {
1858 txq->time_stamp = jiffies;
1859 return 0;
1860 }
1861
1862 timeout = txq->time_stamp +
1863 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1864
1865 if (time_after(jiffies, timeout)) {
1866 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1867 q->id, priv->cfg->base_params->wd_timeout);
1868 ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
1869 return (ret == -EAGAIN) ? 0 : 1;
1870 }
1871
1872 return 0;
1873 }
1874
1875 /*
1876 * Making watchdog tick be a quarter of timeout assure we will
1877 * discover the queue hung between timeout and 1.25*timeout
1878 */
1879 #define IWL_WD_TICK(timeout) ((timeout) / 4)
1880
1881 /*
1882 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1883 * we reset the firmware. If everything is fine just rearm the timer.
1884 */
1885 void iwl_legacy_bg_watchdog(unsigned long data)
1886 {
1887 struct iwl_priv *priv = (struct iwl_priv *)data;
1888 int cnt;
1889 unsigned long timeout;
1890
1891 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1892 return;
1893
1894 timeout = priv->cfg->base_params->wd_timeout;
1895 if (timeout == 0)
1896 return;
1897
1898 /* monitor and check for stuck cmd queue */
1899 if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
1900 return;
1901
1902 /* monitor and check for other stuck queues */
1903 if (iwl_legacy_is_any_associated(priv)) {
1904 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1905 /* skip as we already checked the command queue */
1906 if (cnt == priv->cmd_queue)
1907 continue;
1908 if (iwl_legacy_check_stuck_queue(priv, cnt))
1909 return;
1910 }
1911 }
1912
1913 mod_timer(&priv->watchdog, jiffies +
1914 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1915 }
1916 EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
1917
1918 void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
1919 {
1920 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1921
1922 if (timeout)
1923 mod_timer(&priv->watchdog,
1924 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1925 else
1926 del_timer(&priv->watchdog);
1927 }
1928 EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
1929
1930 /*
1931 * extended beacon time format
1932 * time in usec will be changed into a 32-bit value in extended:internal format
1933 * the extended part is the beacon counts
1934 * the internal part is the time in usec within one beacon interval
1935 */
1936 u32
1937 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
1938 u32 usec, u32 beacon_interval)
1939 {
1940 u32 quot;
1941 u32 rem;
1942 u32 interval = beacon_interval * TIME_UNIT;
1943
1944 if (!interval || !usec)
1945 return 0;
1946
1947 quot = (usec / interval) &
1948 (iwl_legacy_beacon_time_mask_high(priv,
1949 priv->hw_params.beacon_time_tsf_bits) >>
1950 priv->hw_params.beacon_time_tsf_bits);
1951 rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
1952 priv->hw_params.beacon_time_tsf_bits);
1953
1954 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1955 }
1956 EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
1957
1958 /* base is usually what we get from ucode with each received frame,
1959 * the same as HW timer counter counting down
1960 */
1961 __le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
1962 u32 addon, u32 beacon_interval)
1963 {
1964 u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
1965 priv->hw_params.beacon_time_tsf_bits);
1966 u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
1967 priv->hw_params.beacon_time_tsf_bits);
1968 u32 interval = beacon_interval * TIME_UNIT;
1969 u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
1970 priv->hw_params.beacon_time_tsf_bits)) +
1971 (addon & iwl_legacy_beacon_time_mask_high(priv,
1972 priv->hw_params.beacon_time_tsf_bits));
1973
1974 if (base_low > addon_low)
1975 res += base_low - addon_low;
1976 else if (base_low < addon_low) {
1977 res += interval + base_low - addon_low;
1978 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1979 } else
1980 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1981
1982 return cpu_to_le32(res);
1983 }
1984 EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
1985
1986 #ifdef CONFIG_PM
1987
1988 int iwl_legacy_pci_suspend(struct device *device)
1989 {
1990 struct pci_dev *pdev = to_pci_dev(device);
1991 struct iwl_priv *priv = pci_get_drvdata(pdev);
1992
1993 /*
1994 * This function is called when system goes into suspend state
1995 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1996 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1997 * it will not call apm_ops.stop() to stop the DMA operation.
1998 * Calling apm_ops.stop here to make sure we stop the DMA.
1999 */
2000 iwl_legacy_apm_stop(priv);
2001
2002 return 0;
2003 }
2004 EXPORT_SYMBOL(iwl_legacy_pci_suspend);
2005
2006 int iwl_legacy_pci_resume(struct device *device)
2007 {
2008 struct pci_dev *pdev = to_pci_dev(device);
2009 struct iwl_priv *priv = pci_get_drvdata(pdev);
2010 bool hw_rfkill = false;
2011
2012 /*
2013 * We disable the RETRY_TIMEOUT register (0x41) to keep
2014 * PCI Tx retries from interfering with C3 CPU state.
2015 */
2016 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2017
2018 iwl_legacy_enable_interrupts(priv);
2019
2020 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2021 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2022 hw_rfkill = true;
2023
2024 if (hw_rfkill)
2025 set_bit(STATUS_RF_KILL_HW, &priv->status);
2026 else
2027 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2028
2029 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2030
2031 return 0;
2032 }
2033 EXPORT_SYMBOL(iwl_legacy_pci_resume);
2034
2035 const struct dev_pm_ops iwl_legacy_pm_ops = {
2036 .suspend = iwl_legacy_pci_suspend,
2037 .resume = iwl_legacy_pci_resume,
2038 .freeze = iwl_legacy_pci_suspend,
2039 .thaw = iwl_legacy_pci_resume,
2040 .poweroff = iwl_legacy_pci_suspend,
2041 .restore = iwl_legacy_pci_resume,
2042 };
2043 EXPORT_SYMBOL(iwl_legacy_pm_ops);
2044
2045 #endif /* CONFIG_PM */
2046
2047 static void
2048 iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2049 {
2050 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2051 return;
2052
2053 if (!ctx->is_active)
2054 return;
2055
2056 ctx->qos_data.def_qos_parm.qos_flags = 0;
2057
2058 if (ctx->qos_data.qos_active)
2059 ctx->qos_data.def_qos_parm.qos_flags |=
2060 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2061
2062 if (ctx->ht.enabled)
2063 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
2064
2065 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2066 ctx->qos_data.qos_active,
2067 ctx->qos_data.def_qos_parm.qos_flags);
2068
2069 iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
2070 sizeof(struct iwl_qosparam_cmd),
2071 &ctx->qos_data.def_qos_parm, NULL);
2072 }
2073
2074 /**
2075 * iwl_legacy_mac_config - mac80211 config callback
2076 */
2077 int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2078 {
2079 struct iwl_priv *priv = hw->priv;
2080 const struct iwl_channel_info *ch_info;
2081 struct ieee80211_conf *conf = &hw->conf;
2082 struct ieee80211_channel *channel = conf->channel;
2083 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2084 struct iwl_rxon_context *ctx;
2085 unsigned long flags = 0;
2086 int ret = 0;
2087 u16 ch;
2088 int scan_active = 0;
2089 bool ht_changed[NUM_IWL_RXON_CTX] = {};
2090
2091 if (WARN_ON(!priv->cfg->ops->legacy))
2092 return -EOPNOTSUPP;
2093
2094 mutex_lock(&priv->mutex);
2095
2096 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2097 channel->hw_value, changed);
2098
2099 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2100 test_bit(STATUS_SCANNING, &priv->status))) {
2101 scan_active = 1;
2102 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2103 }
2104
2105 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2106 IEEE80211_CONF_CHANGE_CHANNEL)) {
2107 /* mac80211 uses static for non-HT which is what we want */
2108 priv->current_ht_config.smps = conf->smps_mode;
2109
2110 /*
2111 * Recalculate chain counts.
2112 *
2113 * If monitor mode is enabled then mac80211 will
2114 * set up the SM PS mode to OFF if an HT channel is
2115 * configured.
2116 */
2117 if (priv->cfg->ops->hcmd->set_rxon_chain)
2118 for_each_context(priv, ctx)
2119 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2120 }
2121
2122 /* during scanning mac80211 will delay channel setting until
2123 * scan finish with changed = 0
2124 */
2125 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2126 if (scan_active)
2127 goto set_ch_out;
2128
2129 ch = channel->hw_value;
2130 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2131 if (!iwl_legacy_is_channel_valid(ch_info)) {
2132 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2133 ret = -EINVAL;
2134 goto set_ch_out;
2135 }
2136
2137 spin_lock_irqsave(&priv->lock, flags);
2138
2139 for_each_context(priv, ctx) {
2140 /* Configure HT40 channels */
2141 if (ctx->ht.enabled != conf_is_ht(conf)) {
2142 ctx->ht.enabled = conf_is_ht(conf);
2143 ht_changed[ctx->ctxid] = true;
2144 }
2145 if (ctx->ht.enabled) {
2146 if (conf_is_ht40_minus(conf)) {
2147 ctx->ht.extension_chan_offset =
2148 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2149 ctx->ht.is_40mhz = true;
2150 } else if (conf_is_ht40_plus(conf)) {
2151 ctx->ht.extension_chan_offset =
2152 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2153 ctx->ht.is_40mhz = true;
2154 } else {
2155 ctx->ht.extension_chan_offset =
2156 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2157 ctx->ht.is_40mhz = false;
2158 }
2159 } else
2160 ctx->ht.is_40mhz = false;
2161
2162 /*
2163 * Default to no protection. Protection mode will
2164 * later be set from BSS config in iwl_ht_conf
2165 */
2166 ctx->ht.protection =
2167 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2168
2169 /* if we are switching from ht to 2.4 clear flags
2170 * from any ht related info since 2.4 does not
2171 * support ht */
2172 if ((le16_to_cpu(ctx->staging.channel) != ch))
2173 ctx->staging.flags = 0;
2174
2175 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2176 iwl_legacy_set_rxon_ht(priv, ht_conf);
2177
2178 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2179 ctx->vif);
2180 }
2181
2182 spin_unlock_irqrestore(&priv->lock, flags);
2183
2184 if (priv->cfg->ops->legacy->update_bcast_stations)
2185 ret =
2186 priv->cfg->ops->legacy->update_bcast_stations(priv);
2187
2188 set_ch_out:
2189 /* The list of supported rates and rate mask can be different
2190 * for each band; since the band may have changed, reset
2191 * the rate mask to what mac80211 lists */
2192 iwl_legacy_set_rate(priv);
2193 }
2194
2195 if (changed & (IEEE80211_CONF_CHANGE_PS |
2196 IEEE80211_CONF_CHANGE_IDLE)) {
2197 ret = iwl_legacy_power_update_mode(priv, false);
2198 if (ret)
2199 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2200 }
2201
2202 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2203 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2204 priv->tx_power_user_lmt, conf->power_level);
2205
2206 iwl_legacy_set_tx_power(priv, conf->power_level, false);
2207 }
2208
2209 if (!iwl_legacy_is_ready(priv)) {
2210 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2211 goto out;
2212 }
2213
2214 if (scan_active)
2215 goto out;
2216
2217 for_each_context(priv, ctx) {
2218 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2219 iwl_legacy_commit_rxon(priv, ctx);
2220 else
2221 IWL_DEBUG_INFO(priv,
2222 "Not re-sending same RXON configuration.\n");
2223 if (ht_changed[ctx->ctxid])
2224 iwl_legacy_update_qos(priv, ctx);
2225 }
2226
2227 out:
2228 IWL_DEBUG_MAC80211(priv, "leave\n");
2229 mutex_unlock(&priv->mutex);
2230 return ret;
2231 }
2232 EXPORT_SYMBOL(iwl_legacy_mac_config);
2233
2234 void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
2235 {
2236 struct iwl_priv *priv = hw->priv;
2237 unsigned long flags;
2238 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2239 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2240
2241 if (WARN_ON(!priv->cfg->ops->legacy))
2242 return;
2243
2244 mutex_lock(&priv->mutex);
2245 IWL_DEBUG_MAC80211(priv, "enter\n");
2246
2247 spin_lock_irqsave(&priv->lock, flags);
2248 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2249 spin_unlock_irqrestore(&priv->lock, flags);
2250
2251 spin_lock_irqsave(&priv->lock, flags);
2252
2253 /* new association get rid of ibss beacon skb */
2254 if (priv->beacon_skb)
2255 dev_kfree_skb(priv->beacon_skb);
2256
2257 priv->beacon_skb = NULL;
2258
2259 priv->timestamp = 0;
2260
2261 spin_unlock_irqrestore(&priv->lock, flags);
2262
2263 iwl_legacy_scan_cancel_timeout(priv, 100);
2264 if (!iwl_legacy_is_ready_rf(priv)) {
2265 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2266 mutex_unlock(&priv->mutex);
2267 return;
2268 }
2269
2270 /* we are restarting association process
2271 * clear RXON_FILTER_ASSOC_MSK bit
2272 */
2273 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2274 iwl_legacy_commit_rxon(priv, ctx);
2275
2276 iwl_legacy_set_rate(priv);
2277
2278 mutex_unlock(&priv->mutex);
2279
2280 IWL_DEBUG_MAC80211(priv, "leave\n");
2281 }
2282 EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
2283
2284 static void iwl_legacy_ht_conf(struct iwl_priv *priv,
2285 struct ieee80211_vif *vif)
2286 {
2287 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2288 struct ieee80211_sta *sta;
2289 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2290 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2291
2292 IWL_DEBUG_ASSOC(priv, "enter:\n");
2293
2294 if (!ctx->ht.enabled)
2295 return;
2296
2297 ctx->ht.protection =
2298 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2299 ctx->ht.non_gf_sta_present =
2300 !!(bss_conf->ht_operation_mode &
2301 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2302
2303 ht_conf->single_chain_sufficient = false;
2304
2305 switch (vif->type) {
2306 case NL80211_IFTYPE_STATION:
2307 rcu_read_lock();
2308 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2309 if (sta) {
2310 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2311 int maxstreams;
2312
2313 maxstreams = (ht_cap->mcs.tx_params &
2314 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2315 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2316 maxstreams += 1;
2317
2318 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2319 (ht_cap->mcs.rx_mask[2] == 0))
2320 ht_conf->single_chain_sufficient = true;
2321 if (maxstreams <= 1)
2322 ht_conf->single_chain_sufficient = true;
2323 } else {
2324 /*
2325 * If at all, this can only happen through a race
2326 * when the AP disconnects us while we're still
2327 * setting up the connection, in that case mac80211
2328 * will soon tell us about that.
2329 */
2330 ht_conf->single_chain_sufficient = true;
2331 }
2332 rcu_read_unlock();
2333 break;
2334 case NL80211_IFTYPE_ADHOC:
2335 ht_conf->single_chain_sufficient = true;
2336 break;
2337 default:
2338 break;
2339 }
2340
2341 IWL_DEBUG_ASSOC(priv, "leave\n");
2342 }
2343
2344 static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
2345 struct ieee80211_vif *vif)
2346 {
2347 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2348
2349 /*
2350 * inform the ucode that there is no longer an
2351 * association and that no more packets should be
2352 * sent
2353 */
2354 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2355 ctx->staging.assoc_id = 0;
2356 iwl_legacy_commit_rxon(priv, ctx);
2357 }
2358
2359 static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
2360 struct ieee80211_vif *vif)
2361 {
2362 struct iwl_priv *priv = hw->priv;
2363 unsigned long flags;
2364 __le64 timestamp;
2365 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
2366
2367 if (!skb)
2368 return;
2369
2370 IWL_DEBUG_MAC80211(priv, "enter\n");
2371
2372 lockdep_assert_held(&priv->mutex);
2373
2374 if (!priv->beacon_ctx) {
2375 IWL_ERR(priv, "update beacon but no beacon context!\n");
2376 dev_kfree_skb(skb);
2377 return;
2378 }
2379
2380 spin_lock_irqsave(&priv->lock, flags);
2381
2382 if (priv->beacon_skb)
2383 dev_kfree_skb(priv->beacon_skb);
2384
2385 priv->beacon_skb = skb;
2386
2387 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2388 priv->timestamp = le64_to_cpu(timestamp);
2389
2390 IWL_DEBUG_MAC80211(priv, "leave\n");
2391 spin_unlock_irqrestore(&priv->lock, flags);
2392
2393 if (!iwl_legacy_is_ready_rf(priv)) {
2394 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2395 return;
2396 }
2397
2398 priv->cfg->ops->legacy->post_associate(priv);
2399 }
2400
2401 void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2402 struct ieee80211_vif *vif,
2403 struct ieee80211_bss_conf *bss_conf,
2404 u32 changes)
2405 {
2406 struct iwl_priv *priv = hw->priv;
2407 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2408 int ret;
2409
2410 if (WARN_ON(!priv->cfg->ops->legacy))
2411 return;
2412
2413 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2414
2415 if (!iwl_legacy_is_alive(priv))
2416 return;
2417
2418 mutex_lock(&priv->mutex);
2419
2420 if (changes & BSS_CHANGED_QOS) {
2421 unsigned long flags;
2422
2423 spin_lock_irqsave(&priv->lock, flags);
2424 ctx->qos_data.qos_active = bss_conf->qos;
2425 iwl_legacy_update_qos(priv, ctx);
2426 spin_unlock_irqrestore(&priv->lock, flags);
2427 }
2428
2429 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2430 /*
2431 * the add_interface code must make sure we only ever
2432 * have a single interface that could be beaconing at
2433 * any time.
2434 */
2435 if (vif->bss_conf.enable_beacon)
2436 priv->beacon_ctx = ctx;
2437 else
2438 priv->beacon_ctx = NULL;
2439 }
2440
2441 if (changes & BSS_CHANGED_BSSID) {
2442 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2443
2444 /*
2445 * If there is currently a HW scan going on in the
2446 * background then we need to cancel it else the RXON
2447 * below/in post_associate will fail.
2448 */
2449 if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
2450 IWL_WARN(priv,
2451 "Aborted scan still in progress after 100ms\n");
2452 IWL_DEBUG_MAC80211(priv,
2453 "leaving - scan abort failed.\n");
2454 mutex_unlock(&priv->mutex);
2455 return;
2456 }
2457
2458 /* mac80211 only sets assoc when in STATION mode */
2459 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2460 memcpy(ctx->staging.bssid_addr,
2461 bss_conf->bssid, ETH_ALEN);
2462
2463 /* currently needed in a few places */
2464 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2465 } else {
2466 ctx->staging.filter_flags &=
2467 ~RXON_FILTER_ASSOC_MSK;
2468 }
2469
2470 }
2471
2472 /*
2473 * This needs to be after setting the BSSID in case
2474 * mac80211 decides to do both changes at once because
2475 * it will invoke post_associate.
2476 */
2477 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
2478 iwl_legacy_beacon_update(hw, vif);
2479
2480 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2481 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2482 bss_conf->use_short_preamble);
2483 if (bss_conf->use_short_preamble)
2484 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2485 else
2486 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2487 }
2488
2489 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2490 IWL_DEBUG_MAC80211(priv,
2491 "ERP_CTS %d\n", bss_conf->use_cts_prot);
2492 if (bss_conf->use_cts_prot &&
2493 (priv->band != IEEE80211_BAND_5GHZ))
2494 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
2495 else
2496 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2497 if (bss_conf->use_cts_prot)
2498 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
2499 else
2500 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
2501 }
2502
2503 if (changes & BSS_CHANGED_BASIC_RATES) {
2504 /* XXX use this information
2505 *
2506 * To do that, remove code from iwl_legacy_set_rate() and put something
2507 * like this here:
2508 *
2509 if (A-band)
2510 ctx->staging.ofdm_basic_rates =
2511 bss_conf->basic_rates;
2512 else
2513 ctx->staging.ofdm_basic_rates =
2514 bss_conf->basic_rates >> 4;
2515 ctx->staging.cck_basic_rates =
2516 bss_conf->basic_rates & 0xF;
2517 */
2518 }
2519
2520 if (changes & BSS_CHANGED_HT) {
2521 iwl_legacy_ht_conf(priv, vif);
2522
2523 if (priv->cfg->ops->hcmd->set_rxon_chain)
2524 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2525 }
2526
2527 if (changes & BSS_CHANGED_ASSOC) {
2528 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2529 if (bss_conf->assoc) {
2530 priv->timestamp = bss_conf->timestamp;
2531
2532 if (!iwl_legacy_is_rfkill(priv))
2533 priv->cfg->ops->legacy->post_associate(priv);
2534 } else
2535 iwl_legacy_set_no_assoc(priv, vif);
2536 }
2537
2538 if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
2539 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2540 changes);
2541 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
2542 if (!ret) {
2543 /* Sync active_rxon with latest change. */
2544 memcpy((void *)&ctx->active,
2545 &ctx->staging,
2546 sizeof(struct iwl_legacy_rxon_cmd));
2547 }
2548 }
2549
2550 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2551 if (vif->bss_conf.enable_beacon) {
2552 memcpy(ctx->staging.bssid_addr,
2553 bss_conf->bssid, ETH_ALEN);
2554 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2555 priv->cfg->ops->legacy->config_ap(priv);
2556 } else
2557 iwl_legacy_set_no_assoc(priv, vif);
2558 }
2559
2560 if (changes & BSS_CHANGED_IBSS) {
2561 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
2562 bss_conf->ibss_joined);
2563 if (ret)
2564 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
2565 bss_conf->ibss_joined ? "add" : "remove",
2566 bss_conf->bssid);
2567 }
2568
2569 mutex_unlock(&priv->mutex);
2570
2571 IWL_DEBUG_MAC80211(priv, "leave\n");
2572 }
2573 EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
2574
2575 irqreturn_t iwl_legacy_isr(int irq, void *data)
2576 {
2577 struct iwl_priv *priv = data;
2578 u32 inta, inta_mask;
2579 u32 inta_fh;
2580 unsigned long flags;
2581 if (!priv)
2582 return IRQ_NONE;
2583
2584 spin_lock_irqsave(&priv->lock, flags);
2585
2586 /* Disable (but don't clear!) interrupts here to avoid
2587 * back-to-back ISRs and sporadic interrupts from our NIC.
2588 * If we have something to service, the tasklet will re-enable ints.
2589 * If we *don't* have something, we'll re-enable before leaving here. */
2590 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
2591 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
2592
2593 /* Discover which interrupts are active/pending */
2594 inta = iwl_read32(priv, CSR_INT);
2595 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
2596
2597 /* Ignore interrupt if there's nothing in NIC to service.
2598 * This may be due to IRQ shared with another device,
2599 * or due to sporadic interrupts thrown from our NIC. */
2600 if (!inta && !inta_fh) {
2601 IWL_DEBUG_ISR(priv,
2602 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2603 goto none;
2604 }
2605
2606 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
2607 /* Hardware disappeared. It might have already raised
2608 * an interrupt */
2609 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
2610 goto unplugged;
2611 }
2612
2613 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2614 inta, inta_mask, inta_fh);
2615
2616 inta &= ~CSR_INT_BIT_SCD;
2617
2618 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2619 if (likely(inta || inta_fh))
2620 tasklet_schedule(&priv->irq_tasklet);
2621
2622 unplugged:
2623 spin_unlock_irqrestore(&priv->lock, flags);
2624 return IRQ_HANDLED;
2625
2626 none:
2627 /* re-enable interrupts here since we don't have anything to service. */
2628 /* only Re-enable if diabled by irq */
2629 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2630 iwl_legacy_enable_interrupts(priv);
2631 spin_unlock_irqrestore(&priv->lock, flags);
2632 return IRQ_NONE;
2633 }
2634 EXPORT_SYMBOL(iwl_legacy_isr);
2635
2636 /*
2637 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2638 * function.
2639 */
2640 void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
2641 struct ieee80211_tx_info *info,
2642 __le16 fc, __le32 *tx_flags)
2643 {
2644 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2645 *tx_flags |= TX_CMD_FLG_RTS_MSK;
2646 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2647 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2648
2649 if (!ieee80211_is_mgmt(fc))
2650 return;
2651
2652 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2653 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2654 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2655 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2656 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2657 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2658 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2659 break;
2660 }
2661 } else if (info->control.rates[0].flags &
2662 IEEE80211_TX_RC_USE_CTS_PROTECT) {
2663 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2664 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2665 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2666 }
2667 }
2668 EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
This page took 0.146142 seconds and 5 git commands to generate.