ath9k: Lock config_interface() callback with a mutex
[deliverable/linux.git] / drivers / net / wireless / ath9k / main.c
1 /*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/nl80211.h>
18 #include "ath9k.h"
19
20 #define ATH_PCI_VERSION "0.1"
21
22 static char *dev_info = "ath9k";
23
24 MODULE_AUTHOR("Atheros Communications");
25 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
26 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
27 MODULE_LICENSE("Dual BSD/GPL");
28
29 static int modparam_nohwcrypt;
30 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
31 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
32
33 /* We use the hw_value as an index into our private channel structure */
34
35 #define CHAN2G(_freq, _idx) { \
36 .center_freq = (_freq), \
37 .hw_value = (_idx), \
38 .max_power = 30, \
39 }
40
41 #define CHAN5G(_freq, _idx) { \
42 .band = IEEE80211_BAND_5GHZ, \
43 .center_freq = (_freq), \
44 .hw_value = (_idx), \
45 .max_power = 30, \
46 }
47
48 /* Some 2 GHz radios are actually tunable on 2312-2732
49 * on 5 MHz steps, we support the channels which we know
50 * we have calibration data for all cards though to make
51 * this static */
52 static struct ieee80211_channel ath9k_2ghz_chantable[] = {
53 CHAN2G(2412, 0), /* Channel 1 */
54 CHAN2G(2417, 1), /* Channel 2 */
55 CHAN2G(2422, 2), /* Channel 3 */
56 CHAN2G(2427, 3), /* Channel 4 */
57 CHAN2G(2432, 4), /* Channel 5 */
58 CHAN2G(2437, 5), /* Channel 6 */
59 CHAN2G(2442, 6), /* Channel 7 */
60 CHAN2G(2447, 7), /* Channel 8 */
61 CHAN2G(2452, 8), /* Channel 9 */
62 CHAN2G(2457, 9), /* Channel 10 */
63 CHAN2G(2462, 10), /* Channel 11 */
64 CHAN2G(2467, 11), /* Channel 12 */
65 CHAN2G(2472, 12), /* Channel 13 */
66 CHAN2G(2484, 13), /* Channel 14 */
67 };
68
69 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
70 * on 5 MHz steps, we support the channels which we know
71 * we have calibration data for all cards though to make
72 * this static */
73 static struct ieee80211_channel ath9k_5ghz_chantable[] = {
74 /* _We_ call this UNII 1 */
75 CHAN5G(5180, 14), /* Channel 36 */
76 CHAN5G(5200, 15), /* Channel 40 */
77 CHAN5G(5220, 16), /* Channel 44 */
78 CHAN5G(5240, 17), /* Channel 48 */
79 /* _We_ call this UNII 2 */
80 CHAN5G(5260, 18), /* Channel 52 */
81 CHAN5G(5280, 19), /* Channel 56 */
82 CHAN5G(5300, 20), /* Channel 60 */
83 CHAN5G(5320, 21), /* Channel 64 */
84 /* _We_ call this "Middle band" */
85 CHAN5G(5500, 22), /* Channel 100 */
86 CHAN5G(5520, 23), /* Channel 104 */
87 CHAN5G(5540, 24), /* Channel 108 */
88 CHAN5G(5560, 25), /* Channel 112 */
89 CHAN5G(5580, 26), /* Channel 116 */
90 CHAN5G(5600, 27), /* Channel 120 */
91 CHAN5G(5620, 28), /* Channel 124 */
92 CHAN5G(5640, 29), /* Channel 128 */
93 CHAN5G(5660, 30), /* Channel 132 */
94 CHAN5G(5680, 31), /* Channel 136 */
95 CHAN5G(5700, 32), /* Channel 140 */
96 /* _We_ call this UNII 3 */
97 CHAN5G(5745, 33), /* Channel 149 */
98 CHAN5G(5765, 34), /* Channel 153 */
99 CHAN5G(5785, 35), /* Channel 157 */
100 CHAN5G(5805, 36), /* Channel 161 */
101 CHAN5G(5825, 37), /* Channel 165 */
102 };
103
104 static void ath_cache_conf_rate(struct ath_softc *sc,
105 struct ieee80211_conf *conf)
106 {
107 switch (conf->channel->band) {
108 case IEEE80211_BAND_2GHZ:
109 if (conf_is_ht20(conf))
110 sc->cur_rate_table =
111 sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
112 else if (conf_is_ht40_minus(conf))
113 sc->cur_rate_table =
114 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
115 else if (conf_is_ht40_plus(conf))
116 sc->cur_rate_table =
117 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
118 else
119 sc->cur_rate_table =
120 sc->hw_rate_table[ATH9K_MODE_11G];
121 break;
122 case IEEE80211_BAND_5GHZ:
123 if (conf_is_ht20(conf))
124 sc->cur_rate_table =
125 sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
126 else if (conf_is_ht40_minus(conf))
127 sc->cur_rate_table =
128 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
129 else if (conf_is_ht40_plus(conf))
130 sc->cur_rate_table =
131 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
132 else
133 sc->cur_rate_table =
134 sc->hw_rate_table[ATH9K_MODE_11A];
135 break;
136 default:
137 BUG_ON(1);
138 break;
139 }
140 }
141
142 static void ath_update_txpow(struct ath_softc *sc)
143 {
144 struct ath_hw *ah = sc->sc_ah;
145 u32 txpow;
146
147 if (sc->curtxpow != sc->config.txpowlimit) {
148 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
149 /* read back in case value is clamped */
150 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
151 sc->curtxpow = txpow;
152 }
153 }
154
155 static u8 parse_mpdudensity(u8 mpdudensity)
156 {
157 /*
158 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
159 * 0 for no restriction
160 * 1 for 1/4 us
161 * 2 for 1/2 us
162 * 3 for 1 us
163 * 4 for 2 us
164 * 5 for 4 us
165 * 6 for 8 us
166 * 7 for 16 us
167 */
168 switch (mpdudensity) {
169 case 0:
170 return 0;
171 case 1:
172 case 2:
173 case 3:
174 /* Our lower layer calculations limit our precision to
175 1 microsecond */
176 return 1;
177 case 4:
178 return 2;
179 case 5:
180 return 4;
181 case 6:
182 return 8;
183 case 7:
184 return 16;
185 default:
186 return 0;
187 }
188 }
189
190 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
191 {
192 struct ath_rate_table *rate_table = NULL;
193 struct ieee80211_supported_band *sband;
194 struct ieee80211_rate *rate;
195 int i, maxrates;
196
197 switch (band) {
198 case IEEE80211_BAND_2GHZ:
199 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
200 break;
201 case IEEE80211_BAND_5GHZ:
202 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
203 break;
204 default:
205 break;
206 }
207
208 if (rate_table == NULL)
209 return;
210
211 sband = &sc->sbands[band];
212 rate = sc->rates[band];
213
214 if (rate_table->rate_cnt > ATH_RATE_MAX)
215 maxrates = ATH_RATE_MAX;
216 else
217 maxrates = rate_table->rate_cnt;
218
219 for (i = 0; i < maxrates; i++) {
220 rate[i].bitrate = rate_table->info[i].ratekbps / 100;
221 rate[i].hw_value = rate_table->info[i].ratecode;
222 if (rate_table->info[i].short_preamble) {
223 rate[i].hw_value_short = rate_table->info[i].ratecode |
224 rate_table->info[i].short_preamble;
225 rate[i].flags = IEEE80211_RATE_SHORT_PREAMBLE;
226 }
227 sband->n_bitrates++;
228
229 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n",
230 rate[i].bitrate / 10, rate[i].hw_value);
231 }
232 }
233
234 /*
235 * Set/change channels. If the channel is really being changed, it's done
236 * by reseting the chip. To accomplish this we must first cleanup any pending
237 * DMA, then restart stuff.
238 */
239 static int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
240 {
241 struct ath_hw *ah = sc->sc_ah;
242 bool fastcc = true, stopped;
243 struct ieee80211_hw *hw = sc->hw;
244 struct ieee80211_channel *channel = hw->conf.channel;
245 int r;
246
247 if (sc->sc_flags & SC_OP_INVALID)
248 return -EIO;
249
250 ath9k_ps_wakeup(sc);
251
252 /*
253 * This is only performed if the channel settings have
254 * actually changed.
255 *
256 * To switch channels clear any pending DMA operations;
257 * wait long enough for the RX fifo to drain, reset the
258 * hardware at the new frequency, and then re-enable
259 * the relevant bits of the h/w.
260 */
261 ath9k_hw_set_interrupts(ah, 0);
262 ath_drain_all_txq(sc, false);
263 stopped = ath_stoprecv(sc);
264
265 /* XXX: do not flush receive queue here. We don't want
266 * to flush data frames already in queue because of
267 * changing channel. */
268
269 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
270 fastcc = false;
271
272 DPRINTF(sc, ATH_DBG_CONFIG,
273 "(%u MHz) -> (%u MHz), chanwidth: %d\n",
274 sc->sc_ah->curchan->channel,
275 channel->center_freq, sc->tx_chan_width);
276
277 spin_lock_bh(&sc->sc_resetlock);
278
279 r = ath9k_hw_reset(ah, hchan, fastcc);
280 if (r) {
281 DPRINTF(sc, ATH_DBG_FATAL,
282 "Unable to reset channel (%u Mhz) "
283 "reset status %u\n",
284 channel->center_freq, r);
285 spin_unlock_bh(&sc->sc_resetlock);
286 return r;
287 }
288 spin_unlock_bh(&sc->sc_resetlock);
289
290 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
291 sc->sc_flags &= ~SC_OP_FULL_RESET;
292
293 if (ath_startrecv(sc) != 0) {
294 DPRINTF(sc, ATH_DBG_FATAL,
295 "Unable to restart recv logic\n");
296 return -EIO;
297 }
298
299 ath_cache_conf_rate(sc, &hw->conf);
300 ath_update_txpow(sc);
301 ath9k_hw_set_interrupts(ah, sc->imask);
302 ath9k_ps_restore(sc);
303 return 0;
304 }
305
306 /*
307 * This routine performs the periodic noise floor calibration function
308 * that is used to adjust and optimize the chip performance. This
309 * takes environmental changes (location, temperature) into account.
310 * When the task is complete, it reschedules itself depending on the
311 * appropriate interval that was calculated.
312 */
313 static void ath_ani_calibrate(unsigned long data)
314 {
315 struct ath_softc *sc = (struct ath_softc *)data;
316 struct ath_hw *ah = sc->sc_ah;
317 bool longcal = false;
318 bool shortcal = false;
319 bool aniflag = false;
320 unsigned int timestamp = jiffies_to_msecs(jiffies);
321 u32 cal_interval, short_cal_interval;
322
323 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
324 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
325
326 /*
327 * don't calibrate when we're scanning.
328 * we are most likely not on our home channel.
329 */
330 if (sc->sc_flags & SC_OP_SCANNING)
331 goto set_timer;
332
333 /* Long calibration runs independently of short calibration. */
334 if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
335 longcal = true;
336 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
337 sc->ani.longcal_timer = timestamp;
338 }
339
340 /* Short calibration applies only while caldone is false */
341 if (!sc->ani.caldone) {
342 if ((timestamp - sc->ani.shortcal_timer) >= short_cal_interval) {
343 shortcal = true;
344 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies);
345 sc->ani.shortcal_timer = timestamp;
346 sc->ani.resetcal_timer = timestamp;
347 }
348 } else {
349 if ((timestamp - sc->ani.resetcal_timer) >=
350 ATH_RESTART_CALINTERVAL) {
351 sc->ani.caldone = ath9k_hw_reset_calvalid(ah);
352 if (sc->ani.caldone)
353 sc->ani.resetcal_timer = timestamp;
354 }
355 }
356
357 /* Verify whether we must check ANI */
358 if ((timestamp - sc->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
359 aniflag = true;
360 sc->ani.checkani_timer = timestamp;
361 }
362
363 /* Skip all processing if there's nothing to do. */
364 if (longcal || shortcal || aniflag) {
365 /* Call ANI routine if necessary */
366 if (aniflag)
367 ath9k_hw_ani_monitor(ah, &sc->nodestats, ah->curchan);
368
369 /* Perform calibration if necessary */
370 if (longcal || shortcal) {
371 bool iscaldone = false;
372
373 if (ath9k_hw_calibrate(ah, ah->curchan,
374 sc->rx_chainmask, longcal,
375 &iscaldone)) {
376 if (longcal)
377 sc->ani.noise_floor =
378 ath9k_hw_getchan_noise(ah,
379 ah->curchan);
380
381 DPRINTF(sc, ATH_DBG_ANI,
382 "calibrate chan %u/%x nf: %d\n",
383 ah->curchan->channel,
384 ah->curchan->channelFlags,
385 sc->ani.noise_floor);
386 } else {
387 DPRINTF(sc, ATH_DBG_ANY,
388 "calibrate chan %u/%x failed\n",
389 ah->curchan->channel,
390 ah->curchan->channelFlags);
391 }
392 sc->ani.caldone = iscaldone;
393 }
394 }
395
396 set_timer:
397 /*
398 * Set timer interval based on previous results.
399 * The interval must be the shortest necessary to satisfy ANI,
400 * short calibration and long calibration.
401 */
402 cal_interval = ATH_LONG_CALINTERVAL;
403 if (sc->sc_ah->config.enable_ani)
404 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
405 if (!sc->ani.caldone)
406 cal_interval = min(cal_interval, (u32)short_cal_interval);
407
408 mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
409 }
410
411 /*
412 * Update tx/rx chainmask. For legacy association,
413 * hard code chainmask to 1x1, for 11n association, use
414 * the chainmask configuration, for bt coexistence, use
415 * the chainmask configuration even in legacy mode.
416 */
417 static void ath_update_chainmask(struct ath_softc *sc, int is_ht)
418 {
419 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
420 if (is_ht ||
421 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)) {
422 sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
423 sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
424 } else {
425 sc->tx_chainmask = 1;
426 sc->rx_chainmask = 1;
427 }
428
429 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n",
430 sc->tx_chainmask, sc->rx_chainmask);
431 }
432
433 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
434 {
435 struct ath_node *an;
436
437 an = (struct ath_node *)sta->drv_priv;
438
439 if (sc->sc_flags & SC_OP_TXAGGR)
440 ath_tx_node_init(sc, an);
441
442 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
443 sta->ht_cap.ampdu_factor);
444 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
445 }
446
447 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
448 {
449 struct ath_node *an = (struct ath_node *)sta->drv_priv;
450
451 if (sc->sc_flags & SC_OP_TXAGGR)
452 ath_tx_node_cleanup(sc, an);
453 }
454
455 static void ath9k_tasklet(unsigned long data)
456 {
457 struct ath_softc *sc = (struct ath_softc *)data;
458 u32 status = sc->intrstatus;
459
460 if (status & ATH9K_INT_FATAL) {
461 /* need a chip reset */
462 ath_reset(sc, false);
463 return;
464 } else {
465
466 if (status &
467 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
468 spin_lock_bh(&sc->rx.rxflushlock);
469 ath_rx_tasklet(sc, 0);
470 spin_unlock_bh(&sc->rx.rxflushlock);
471 }
472 /* XXX: optimize this */
473 if (status & ATH9K_INT_TX)
474 ath_tx_tasklet(sc);
475 }
476
477 /* re-enable hardware interrupt */
478 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
479 }
480
481 irqreturn_t ath_isr(int irq, void *dev)
482 {
483 struct ath_softc *sc = dev;
484 struct ath_hw *ah = sc->sc_ah;
485 enum ath9k_int status;
486 bool sched = false;
487
488 do {
489 if (sc->sc_flags & SC_OP_INVALID) {
490 /*
491 * The hardware is not ready/present, don't
492 * touch anything. Note this can happen early
493 * on if the IRQ is shared.
494 */
495 return IRQ_NONE;
496 }
497 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
498 return IRQ_NONE;
499 }
500
501 /*
502 * Figure out the reason(s) for the interrupt. Note
503 * that the hal returns a pseudo-ISR that may include
504 * bits we haven't explicitly enabled so we mask the
505 * value to insure we only process bits we requested.
506 */
507 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
508
509 status &= sc->imask; /* discard unasked-for bits */
510
511 /*
512 * If there are no status bits set, then this interrupt was not
513 * for me (should have been caught above).
514 */
515 if (!status)
516 return IRQ_NONE;
517
518 sc->intrstatus = status;
519 ath9k_ps_wakeup(sc);
520
521 if (status & ATH9K_INT_FATAL) {
522 /* need a chip reset */
523 sched = true;
524 } else if (status & ATH9K_INT_RXORN) {
525 /* need a chip reset */
526 sched = true;
527 } else {
528 if (status & ATH9K_INT_SWBA) {
529 /* schedule a tasklet for beacon handling */
530 tasklet_schedule(&sc->bcon_tasklet);
531 }
532 if (status & ATH9K_INT_RXEOL) {
533 /*
534 * NB: the hardware should re-read the link when
535 * RXE bit is written, but it doesn't work
536 * at least on older hardware revs.
537 */
538 sched = true;
539 }
540
541 if (status & ATH9K_INT_TXURN)
542 /* bump tx trigger level */
543 ath9k_hw_updatetxtriglevel(ah, true);
544 /* XXX: optimize this */
545 if (status & ATH9K_INT_RX)
546 sched = true;
547 if (status & ATH9K_INT_TX)
548 sched = true;
549 if (status & ATH9K_INT_BMISS)
550 sched = true;
551 /* carrier sense timeout */
552 if (status & ATH9K_INT_CST)
553 sched = true;
554 if (status & ATH9K_INT_MIB) {
555 /*
556 * Disable interrupts until we service the MIB
557 * interrupt; otherwise it will continue to
558 * fire.
559 */
560 ath9k_hw_set_interrupts(ah, 0);
561 /*
562 * Let the hal handle the event. We assume
563 * it will clear whatever condition caused
564 * the interrupt.
565 */
566 ath9k_hw_procmibevent(ah, &sc->nodestats);
567 ath9k_hw_set_interrupts(ah, sc->imask);
568 }
569 if (status & ATH9K_INT_TIM_TIMER) {
570 if (!(ah->caps.hw_caps &
571 ATH9K_HW_CAP_AUTOSLEEP)) {
572 /* Clear RxAbort bit so that we can
573 * receive frames */
574 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
575 ath9k_hw_setrxabort(ah, 0);
576 sched = true;
577 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
578 }
579 }
580 if (status & ATH9K_INT_TSFOOR) {
581 /* FIXME: Handle this interrupt for power save */
582 sched = true;
583 }
584 }
585 ath9k_ps_restore(sc);
586 } while (0);
587
588 ath_debug_stat_interrupt(sc, status);
589
590 if (sched) {
591 /* turn off every interrupt except SWBA */
592 ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
593 tasklet_schedule(&sc->intr_tq);
594 }
595
596 return IRQ_HANDLED;
597 }
598
599 static u32 ath_get_extchanmode(struct ath_softc *sc,
600 struct ieee80211_channel *chan,
601 enum nl80211_channel_type channel_type)
602 {
603 u32 chanmode = 0;
604
605 switch (chan->band) {
606 case IEEE80211_BAND_2GHZ:
607 switch(channel_type) {
608 case NL80211_CHAN_NO_HT:
609 case NL80211_CHAN_HT20:
610 chanmode = CHANNEL_G_HT20;
611 break;
612 case NL80211_CHAN_HT40PLUS:
613 chanmode = CHANNEL_G_HT40PLUS;
614 break;
615 case NL80211_CHAN_HT40MINUS:
616 chanmode = CHANNEL_G_HT40MINUS;
617 break;
618 }
619 break;
620 case IEEE80211_BAND_5GHZ:
621 switch(channel_type) {
622 case NL80211_CHAN_NO_HT:
623 case NL80211_CHAN_HT20:
624 chanmode = CHANNEL_A_HT20;
625 break;
626 case NL80211_CHAN_HT40PLUS:
627 chanmode = CHANNEL_A_HT40PLUS;
628 break;
629 case NL80211_CHAN_HT40MINUS:
630 chanmode = CHANNEL_A_HT40MINUS;
631 break;
632 }
633 break;
634 default:
635 break;
636 }
637
638 return chanmode;
639 }
640
641 static int ath_keyset(struct ath_softc *sc, u16 keyix,
642 struct ath9k_keyval *hk, const u8 mac[ETH_ALEN])
643 {
644 bool status;
645
646 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
647 keyix, hk, mac);
648
649 return status != false;
650 }
651
652 static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
653 struct ath9k_keyval *hk, const u8 *addr,
654 bool authenticator)
655 {
656 const u8 *key_rxmic;
657 const u8 *key_txmic;
658
659 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
660 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
661
662 if (addr == NULL) {
663 /* Group key installation */
664 if (authenticator) {
665 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
666 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
667 } else {
668 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
669 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
670 }
671 return ath_keyset(sc, keyix, hk, addr);
672 }
673 if (!sc->splitmic) {
674 /*
675 * data key goes at first index,
676 * the hal handles the MIC keys at index+64.
677 */
678 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
679 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
680 return ath_keyset(sc, keyix, hk, addr);
681 }
682 /*
683 * TX key goes at first index, RX key at +32.
684 * The hal handles the MIC keys at index+64.
685 */
686 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
687 if (!ath_keyset(sc, keyix, hk, NULL)) {
688 /* Txmic entry failed. No need to proceed further */
689 DPRINTF(sc, ATH_DBG_KEYCACHE,
690 "Setting TX MIC Key Failed\n");
691 return 0;
692 }
693
694 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
695 /* XXX delete tx key on failure? */
696 return ath_keyset(sc, keyix + 32, hk, addr);
697 }
698
699 static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
700 {
701 int i;
702
703 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
704 if (test_bit(i, sc->keymap) ||
705 test_bit(i + 64, sc->keymap))
706 continue; /* At least one part of TKIP key allocated */
707 if (sc->splitmic &&
708 (test_bit(i + 32, sc->keymap) ||
709 test_bit(i + 64 + 32, sc->keymap)))
710 continue; /* At least one part of TKIP key allocated */
711
712 /* Found a free slot for a TKIP key */
713 return i;
714 }
715 return -1;
716 }
717
718 static int ath_reserve_key_cache_slot(struct ath_softc *sc)
719 {
720 int i;
721
722 /* First, try to find slots that would not be available for TKIP. */
723 if (sc->splitmic) {
724 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 4; i++) {
725 if (!test_bit(i, sc->keymap) &&
726 (test_bit(i + 32, sc->keymap) ||
727 test_bit(i + 64, sc->keymap) ||
728 test_bit(i + 64 + 32, sc->keymap)))
729 return i;
730 if (!test_bit(i + 32, sc->keymap) &&
731 (test_bit(i, sc->keymap) ||
732 test_bit(i + 64, sc->keymap) ||
733 test_bit(i + 64 + 32, sc->keymap)))
734 return i + 32;
735 if (!test_bit(i + 64, sc->keymap) &&
736 (test_bit(i , sc->keymap) ||
737 test_bit(i + 32, sc->keymap) ||
738 test_bit(i + 64 + 32, sc->keymap)))
739 return i + 64;
740 if (!test_bit(i + 64 + 32, sc->keymap) &&
741 (test_bit(i, sc->keymap) ||
742 test_bit(i + 32, sc->keymap) ||
743 test_bit(i + 64, sc->keymap)))
744 return i + 64 + 32;
745 }
746 } else {
747 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
748 if (!test_bit(i, sc->keymap) &&
749 test_bit(i + 64, sc->keymap))
750 return i;
751 if (test_bit(i, sc->keymap) &&
752 !test_bit(i + 64, sc->keymap))
753 return i + 64;
754 }
755 }
756
757 /* No partially used TKIP slots, pick any available slot */
758 for (i = IEEE80211_WEP_NKID; i < sc->keymax; i++) {
759 /* Do not allow slots that could be needed for TKIP group keys
760 * to be used. This limitation could be removed if we know that
761 * TKIP will not be used. */
762 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
763 continue;
764 if (sc->splitmic) {
765 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
766 continue;
767 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
768 continue;
769 }
770
771 if (!test_bit(i, sc->keymap))
772 return i; /* Found a free slot for a key */
773 }
774
775 /* No free slot found */
776 return -1;
777 }
778
779 static int ath_key_config(struct ath_softc *sc,
780 struct ieee80211_vif *vif,
781 struct ieee80211_sta *sta,
782 struct ieee80211_key_conf *key)
783 {
784 struct ath9k_keyval hk;
785 const u8 *mac = NULL;
786 int ret = 0;
787 int idx;
788
789 memset(&hk, 0, sizeof(hk));
790
791 switch (key->alg) {
792 case ALG_WEP:
793 hk.kv_type = ATH9K_CIPHER_WEP;
794 break;
795 case ALG_TKIP:
796 hk.kv_type = ATH9K_CIPHER_TKIP;
797 break;
798 case ALG_CCMP:
799 hk.kv_type = ATH9K_CIPHER_AES_CCM;
800 break;
801 default:
802 return -EOPNOTSUPP;
803 }
804
805 hk.kv_len = key->keylen;
806 memcpy(hk.kv_val, key->key, key->keylen);
807
808 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
809 /* For now, use the default keys for broadcast keys. This may
810 * need to change with virtual interfaces. */
811 idx = key->keyidx;
812 } else if (key->keyidx) {
813 struct ieee80211_vif *vif;
814
815 if (WARN_ON(!sta))
816 return -EOPNOTSUPP;
817 mac = sta->addr;
818
819 vif = sc->vifs[0];
820 if (vif->type != NL80211_IFTYPE_AP) {
821 /* Only keyidx 0 should be used with unicast key, but
822 * allow this for client mode for now. */
823 idx = key->keyidx;
824 } else
825 return -EIO;
826 } else {
827 if (WARN_ON(!sta))
828 return -EOPNOTSUPP;
829 mac = sta->addr;
830
831 if (key->alg == ALG_TKIP)
832 idx = ath_reserve_key_cache_slot_tkip(sc);
833 else
834 idx = ath_reserve_key_cache_slot(sc);
835 if (idx < 0)
836 return -ENOSPC; /* no free key cache entries */
837 }
838
839 if (key->alg == ALG_TKIP)
840 ret = ath_setkey_tkip(sc, idx, key->key, &hk, mac,
841 vif->type == NL80211_IFTYPE_AP);
842 else
843 ret = ath_keyset(sc, idx, &hk, mac);
844
845 if (!ret)
846 return -EIO;
847
848 set_bit(idx, sc->keymap);
849 if (key->alg == ALG_TKIP) {
850 set_bit(idx + 64, sc->keymap);
851 if (sc->splitmic) {
852 set_bit(idx + 32, sc->keymap);
853 set_bit(idx + 64 + 32, sc->keymap);
854 }
855 }
856
857 return idx;
858 }
859
860 static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
861 {
862 ath9k_hw_keyreset(sc->sc_ah, key->hw_key_idx);
863 if (key->hw_key_idx < IEEE80211_WEP_NKID)
864 return;
865
866 clear_bit(key->hw_key_idx, sc->keymap);
867 if (key->alg != ALG_TKIP)
868 return;
869
870 clear_bit(key->hw_key_idx + 64, sc->keymap);
871 if (sc->splitmic) {
872 clear_bit(key->hw_key_idx + 32, sc->keymap);
873 clear_bit(key->hw_key_idx + 64 + 32, sc->keymap);
874 }
875 }
876
877 static void setup_ht_cap(struct ath_softc *sc,
878 struct ieee80211_sta_ht_cap *ht_info)
879 {
880 #define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
881 #define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
882
883 ht_info->ht_supported = true;
884 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
885 IEEE80211_HT_CAP_SM_PS |
886 IEEE80211_HT_CAP_SGI_40 |
887 IEEE80211_HT_CAP_DSSSCCK40;
888
889 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
890 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
891
892 /* set up supported mcs set */
893 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
894
895 switch(sc->rx_chainmask) {
896 case 1:
897 ht_info->mcs.rx_mask[0] = 0xff;
898 break;
899 case 3:
900 case 5:
901 case 7:
902 default:
903 ht_info->mcs.rx_mask[0] = 0xff;
904 ht_info->mcs.rx_mask[1] = 0xff;
905 break;
906 }
907
908 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
909 }
910
911 static void ath9k_bss_assoc_info(struct ath_softc *sc,
912 struct ieee80211_vif *vif,
913 struct ieee80211_bss_conf *bss_conf)
914 {
915 struct ath_vif *avp = (void *)vif->drv_priv;
916
917 if (bss_conf->assoc) {
918 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
919 bss_conf->aid, sc->curbssid);
920
921 /* New association, store aid */
922 if (avp->av_opmode == NL80211_IFTYPE_STATION) {
923 sc->curaid = bss_conf->aid;
924 ath9k_hw_write_associd(sc);
925 }
926
927 /* Configure the beacon */
928 ath_beacon_config(sc, 0);
929
930 /* Reset rssi stats */
931 sc->nodestats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
932 sc->nodestats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
933 sc->nodestats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
934 sc->nodestats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
935
936 /* Start ANI */
937 mod_timer(&sc->ani.timer,
938 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
939 } else {
940 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISSOC\n");
941 sc->curaid = 0;
942 }
943 }
944
945 /********************************/
946 /* LED functions */
947 /********************************/
948
949 static void ath_led_blink_work(struct work_struct *work)
950 {
951 struct ath_softc *sc = container_of(work, struct ath_softc,
952 ath_led_blink_work.work);
953
954 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
955 return;
956 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
957 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
958
959 queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work,
960 (sc->sc_flags & SC_OP_LED_ON) ?
961 msecs_to_jiffies(sc->led_off_duration) :
962 msecs_to_jiffies(sc->led_on_duration));
963
964 sc->led_on_duration =
965 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25);
966 sc->led_off_duration =
967 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10);
968 sc->led_on_cnt = sc->led_off_cnt = 0;
969 if (sc->sc_flags & SC_OP_LED_ON)
970 sc->sc_flags &= ~SC_OP_LED_ON;
971 else
972 sc->sc_flags |= SC_OP_LED_ON;
973 }
974
975 static void ath_led_brightness(struct led_classdev *led_cdev,
976 enum led_brightness brightness)
977 {
978 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
979 struct ath_softc *sc = led->sc;
980
981 switch (brightness) {
982 case LED_OFF:
983 if (led->led_type == ATH_LED_ASSOC ||
984 led->led_type == ATH_LED_RADIO) {
985 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
986 (led->led_type == ATH_LED_RADIO));
987 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
988 if (led->led_type == ATH_LED_RADIO)
989 sc->sc_flags &= ~SC_OP_LED_ON;
990 } else {
991 sc->led_off_cnt++;
992 }
993 break;
994 case LED_FULL:
995 if (led->led_type == ATH_LED_ASSOC) {
996 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
997 queue_delayed_work(sc->hw->workqueue,
998 &sc->ath_led_blink_work, 0);
999 } else if (led->led_type == ATH_LED_RADIO) {
1000 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
1001 sc->sc_flags |= SC_OP_LED_ON;
1002 } else {
1003 sc->led_on_cnt++;
1004 }
1005 break;
1006 default:
1007 break;
1008 }
1009 }
1010
1011 static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
1012 char *trigger)
1013 {
1014 int ret;
1015
1016 led->sc = sc;
1017 led->led_cdev.name = led->name;
1018 led->led_cdev.default_trigger = trigger;
1019 led->led_cdev.brightness_set = ath_led_brightness;
1020
1021 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1022 if (ret)
1023 DPRINTF(sc, ATH_DBG_FATAL,
1024 "Failed to register led:%s", led->name);
1025 else
1026 led->registered = 1;
1027 return ret;
1028 }
1029
1030 static void ath_unregister_led(struct ath_led *led)
1031 {
1032 if (led->registered) {
1033 led_classdev_unregister(&led->led_cdev);
1034 led->registered = 0;
1035 }
1036 }
1037
1038 static void ath_deinit_leds(struct ath_softc *sc)
1039 {
1040 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1041 ath_unregister_led(&sc->assoc_led);
1042 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1043 ath_unregister_led(&sc->tx_led);
1044 ath_unregister_led(&sc->rx_led);
1045 ath_unregister_led(&sc->radio_led);
1046 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1047 }
1048
1049 static void ath_init_leds(struct ath_softc *sc)
1050 {
1051 char *trigger;
1052 int ret;
1053
1054 /* Configure gpio 1 for output */
1055 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
1056 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1057 /* LED off, active low */
1058 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1059
1060 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1061
1062 trigger = ieee80211_get_radio_led_name(sc->hw);
1063 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1064 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1065 ret = ath_register_led(sc, &sc->radio_led, trigger);
1066 sc->radio_led.led_type = ATH_LED_RADIO;
1067 if (ret)
1068 goto fail;
1069
1070 trigger = ieee80211_get_assoc_led_name(sc->hw);
1071 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1072 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1073 ret = ath_register_led(sc, &sc->assoc_led, trigger);
1074 sc->assoc_led.led_type = ATH_LED_ASSOC;
1075 if (ret)
1076 goto fail;
1077
1078 trigger = ieee80211_get_tx_led_name(sc->hw);
1079 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1080 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1081 ret = ath_register_led(sc, &sc->tx_led, trigger);
1082 sc->tx_led.led_type = ATH_LED_TX;
1083 if (ret)
1084 goto fail;
1085
1086 trigger = ieee80211_get_rx_led_name(sc->hw);
1087 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1088 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1089 ret = ath_register_led(sc, &sc->rx_led, trigger);
1090 sc->rx_led.led_type = ATH_LED_RX;
1091 if (ret)
1092 goto fail;
1093
1094 return;
1095
1096 fail:
1097 ath_deinit_leds(sc);
1098 }
1099
1100 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1101
1102 /*******************/
1103 /* Rfkill */
1104 /*******************/
1105
1106 static void ath_radio_enable(struct ath_softc *sc)
1107 {
1108 struct ath_hw *ah = sc->sc_ah;
1109 struct ieee80211_channel *channel = sc->hw->conf.channel;
1110 int r;
1111
1112 ath9k_ps_wakeup(sc);
1113 spin_lock_bh(&sc->sc_resetlock);
1114
1115 r = ath9k_hw_reset(ah, ah->curchan, false);
1116
1117 if (r) {
1118 DPRINTF(sc, ATH_DBG_FATAL,
1119 "Unable to reset channel %u (%uMhz) ",
1120 "reset status %u\n",
1121 channel->center_freq, r);
1122 }
1123 spin_unlock_bh(&sc->sc_resetlock);
1124
1125 ath_update_txpow(sc);
1126 if (ath_startrecv(sc) != 0) {
1127 DPRINTF(sc, ATH_DBG_FATAL,
1128 "Unable to restart recv logic\n");
1129 return;
1130 }
1131
1132 if (sc->sc_flags & SC_OP_BEACONS)
1133 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
1134
1135 /* Re-Enable interrupts */
1136 ath9k_hw_set_interrupts(ah, sc->imask);
1137
1138 /* Enable LED */
1139 ath9k_hw_cfg_output(ah, ATH_LED_PIN,
1140 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1141 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0);
1142
1143 ieee80211_wake_queues(sc->hw);
1144 ath9k_ps_restore(sc);
1145 }
1146
1147 static void ath_radio_disable(struct ath_softc *sc)
1148 {
1149 struct ath_hw *ah = sc->sc_ah;
1150 struct ieee80211_channel *channel = sc->hw->conf.channel;
1151 int r;
1152
1153 ath9k_ps_wakeup(sc);
1154 ieee80211_stop_queues(sc->hw);
1155
1156 /* Disable LED */
1157 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 1);
1158 ath9k_hw_cfg_gpio_input(ah, ATH_LED_PIN);
1159
1160 /* Disable interrupts */
1161 ath9k_hw_set_interrupts(ah, 0);
1162
1163 ath_drain_all_txq(sc, false); /* clear pending tx frames */
1164 ath_stoprecv(sc); /* turn off frame recv */
1165 ath_flushrecv(sc); /* flush recv queue */
1166
1167 spin_lock_bh(&sc->sc_resetlock);
1168 r = ath9k_hw_reset(ah, ah->curchan, false);
1169 if (r) {
1170 DPRINTF(sc, ATH_DBG_FATAL,
1171 "Unable to reset channel %u (%uMhz) "
1172 "reset status %u\n",
1173 channel->center_freq, r);
1174 }
1175 spin_unlock_bh(&sc->sc_resetlock);
1176
1177 ath9k_hw_phy_disable(ah);
1178 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1179 ath9k_ps_restore(sc);
1180 }
1181
1182 static bool ath_is_rfkill_set(struct ath_softc *sc)
1183 {
1184 struct ath_hw *ah = sc->sc_ah;
1185
1186 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1187 ah->rfkill_polarity;
1188 }
1189
1190 /* h/w rfkill poll function */
1191 static void ath_rfkill_poll(struct work_struct *work)
1192 {
1193 struct ath_softc *sc = container_of(work, struct ath_softc,
1194 rf_kill.rfkill_poll.work);
1195 bool radio_on;
1196
1197 if (sc->sc_flags & SC_OP_INVALID)
1198 return;
1199
1200 radio_on = !ath_is_rfkill_set(sc);
1201
1202 /*
1203 * enable/disable radio only when there is a
1204 * state change in RF switch
1205 */
1206 if (radio_on == !!(sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED)) {
1207 enum rfkill_state state;
1208
1209 if (sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED) {
1210 state = radio_on ? RFKILL_STATE_SOFT_BLOCKED
1211 : RFKILL_STATE_HARD_BLOCKED;
1212 } else if (radio_on) {
1213 ath_radio_enable(sc);
1214 state = RFKILL_STATE_UNBLOCKED;
1215 } else {
1216 ath_radio_disable(sc);
1217 state = RFKILL_STATE_HARD_BLOCKED;
1218 }
1219
1220 if (state == RFKILL_STATE_HARD_BLOCKED)
1221 sc->sc_flags |= SC_OP_RFKILL_HW_BLOCKED;
1222 else
1223 sc->sc_flags &= ~SC_OP_RFKILL_HW_BLOCKED;
1224
1225 rfkill_force_state(sc->rf_kill.rfkill, state);
1226 }
1227
1228 queue_delayed_work(sc->hw->workqueue, &sc->rf_kill.rfkill_poll,
1229 msecs_to_jiffies(ATH_RFKILL_POLL_INTERVAL));
1230 }
1231
1232 /* s/w rfkill handler */
1233 static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
1234 {
1235 struct ath_softc *sc = data;
1236
1237 switch (state) {
1238 case RFKILL_STATE_SOFT_BLOCKED:
1239 if (!(sc->sc_flags & (SC_OP_RFKILL_HW_BLOCKED |
1240 SC_OP_RFKILL_SW_BLOCKED)))
1241 ath_radio_disable(sc);
1242 sc->sc_flags |= SC_OP_RFKILL_SW_BLOCKED;
1243 return 0;
1244 case RFKILL_STATE_UNBLOCKED:
1245 if ((sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED)) {
1246 sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
1247 if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
1248 DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
1249 "radio as it is disabled by h/w\n");
1250 return -EPERM;
1251 }
1252 ath_radio_enable(sc);
1253 }
1254 return 0;
1255 default:
1256 return -EINVAL;
1257 }
1258 }
1259
1260 /* Init s/w rfkill */
1261 static int ath_init_sw_rfkill(struct ath_softc *sc)
1262 {
1263 sc->rf_kill.rfkill = rfkill_allocate(wiphy_dev(sc->hw->wiphy),
1264 RFKILL_TYPE_WLAN);
1265 if (!sc->rf_kill.rfkill) {
1266 DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
1267 return -ENOMEM;
1268 }
1269
1270 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
1271 "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
1272 sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
1273 sc->rf_kill.rfkill->data = sc;
1274 sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
1275 sc->rf_kill.rfkill->state = RFKILL_STATE_UNBLOCKED;
1276 sc->rf_kill.rfkill->user_claim_unsupported = 1;
1277
1278 return 0;
1279 }
1280
1281 /* Deinitialize rfkill */
1282 static void ath_deinit_rfkill(struct ath_softc *sc)
1283 {
1284 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1285 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1286
1287 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
1288 rfkill_unregister(sc->rf_kill.rfkill);
1289 sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
1290 sc->rf_kill.rfkill = NULL;
1291 }
1292 }
1293
1294 static int ath_start_rfkill_poll(struct ath_softc *sc)
1295 {
1296 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1297 queue_delayed_work(sc->hw->workqueue,
1298 &sc->rf_kill.rfkill_poll, 0);
1299
1300 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
1301 if (rfkill_register(sc->rf_kill.rfkill)) {
1302 DPRINTF(sc, ATH_DBG_FATAL,
1303 "Unable to register rfkill\n");
1304 rfkill_free(sc->rf_kill.rfkill);
1305
1306 /* Deinitialize the device */
1307 ath_cleanup(sc);
1308 return -EIO;
1309 } else {
1310 sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
1311 }
1312 }
1313
1314 return 0;
1315 }
1316 #endif /* CONFIG_RFKILL */
1317
1318 void ath_cleanup(struct ath_softc *sc)
1319 {
1320 ath_detach(sc);
1321 free_irq(sc->irq, sc);
1322 ath_bus_cleanup(sc);
1323 ieee80211_free_hw(sc->hw);
1324 }
1325
1326 void ath_detach(struct ath_softc *sc)
1327 {
1328 struct ieee80211_hw *hw = sc->hw;
1329 int i = 0;
1330
1331 ath9k_ps_wakeup(sc);
1332
1333 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
1334
1335 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1336 ath_deinit_rfkill(sc);
1337 #endif
1338 ath_deinit_leds(sc);
1339
1340 ieee80211_unregister_hw(hw);
1341 ath_rx_cleanup(sc);
1342 ath_tx_cleanup(sc);
1343
1344 tasklet_kill(&sc->intr_tq);
1345 tasklet_kill(&sc->bcon_tasklet);
1346
1347 if (!(sc->sc_flags & SC_OP_INVALID))
1348 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1349
1350 /* cleanup tx queues */
1351 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1352 if (ATH_TXQ_SETUP(sc, i))
1353 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1354
1355 ath9k_hw_detach(sc->sc_ah);
1356 ath9k_exit_debug(sc);
1357 ath9k_ps_restore(sc);
1358 }
1359
1360 static int ath_init(u16 devid, struct ath_softc *sc)
1361 {
1362 struct ath_hw *ah = NULL;
1363 int status;
1364 int error = 0, i;
1365 int csz = 0;
1366
1367 /* XXX: hardware will not be ready until ath_open() being called */
1368 sc->sc_flags |= SC_OP_INVALID;
1369
1370 if (ath9k_init_debug(sc) < 0)
1371 printk(KERN_ERR "Unable to create debugfs files\n");
1372
1373 spin_lock_init(&sc->sc_resetlock);
1374 mutex_init(&sc->mutex);
1375 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1376 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1377 (unsigned long)sc);
1378
1379 /*
1380 * Cache line size is used to size and align various
1381 * structures used to communicate with the hardware.
1382 */
1383 ath_read_cachesize(sc, &csz);
1384 /* XXX assert csz is non-zero */
1385 sc->cachelsz = csz << 2; /* convert to bytes */
1386
1387 ah = ath9k_hw_attach(devid, sc, &status);
1388 if (ah == NULL) {
1389 DPRINTF(sc, ATH_DBG_FATAL,
1390 "Unable to attach hardware; HAL status %d\n", status);
1391 error = -ENXIO;
1392 goto bad;
1393 }
1394 sc->sc_ah = ah;
1395
1396 /* Get the hardware key cache size. */
1397 sc->keymax = ah->caps.keycache_size;
1398 if (sc->keymax > ATH_KEYMAX) {
1399 DPRINTF(sc, ATH_DBG_KEYCACHE,
1400 "Warning, using only %u entries in %u key cache\n",
1401 ATH_KEYMAX, sc->keymax);
1402 sc->keymax = ATH_KEYMAX;
1403 }
1404
1405 /*
1406 * Reset the key cache since some parts do not
1407 * reset the contents on initial power up.
1408 */
1409 for (i = 0; i < sc->keymax; i++)
1410 ath9k_hw_keyreset(ah, (u16) i);
1411
1412 if (ath9k_regd_init(sc->sc_ah))
1413 goto bad;
1414
1415 /* default to MONITOR mode */
1416 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1417
1418 /* Setup rate tables */
1419
1420 ath_rate_attach(sc);
1421 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1422 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1423
1424 /*
1425 * Allocate hardware transmit queues: one queue for
1426 * beacon frames and one data queue for each QoS
1427 * priority. Note that the hal handles reseting
1428 * these queues at the needed time.
1429 */
1430 sc->beacon.beaconq = ath_beaconq_setup(ah);
1431 if (sc->beacon.beaconq == -1) {
1432 DPRINTF(sc, ATH_DBG_FATAL,
1433 "Unable to setup a beacon xmit queue\n");
1434 error = -EIO;
1435 goto bad2;
1436 }
1437 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1438 if (sc->beacon.cabq == NULL) {
1439 DPRINTF(sc, ATH_DBG_FATAL,
1440 "Unable to setup CAB xmit queue\n");
1441 error = -EIO;
1442 goto bad2;
1443 }
1444
1445 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1446 ath_cabq_update(sc);
1447
1448 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1449 sc->tx.hwq_map[i] = -1;
1450
1451 /* Setup data queues */
1452 /* NB: ensure BK queue is the lowest priority h/w queue */
1453 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1454 DPRINTF(sc, ATH_DBG_FATAL,
1455 "Unable to setup xmit queue for BK traffic\n");
1456 error = -EIO;
1457 goto bad2;
1458 }
1459
1460 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1461 DPRINTF(sc, ATH_DBG_FATAL,
1462 "Unable to setup xmit queue for BE traffic\n");
1463 error = -EIO;
1464 goto bad2;
1465 }
1466 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1467 DPRINTF(sc, ATH_DBG_FATAL,
1468 "Unable to setup xmit queue for VI traffic\n");
1469 error = -EIO;
1470 goto bad2;
1471 }
1472 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1473 DPRINTF(sc, ATH_DBG_FATAL,
1474 "Unable to setup xmit queue for VO traffic\n");
1475 error = -EIO;
1476 goto bad2;
1477 }
1478
1479 /* Initializes the noise floor to a reasonable default value.
1480 * Later on this will be updated during ANI processing. */
1481
1482 sc->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1483 setup_timer(&sc->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1484
1485 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1486 ATH9K_CIPHER_TKIP, NULL)) {
1487 /*
1488 * Whether we should enable h/w TKIP MIC.
1489 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1490 * report WMM capable, so it's always safe to turn on
1491 * TKIP MIC in this case.
1492 */
1493 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1494 0, 1, NULL);
1495 }
1496
1497 /*
1498 * Check whether the separate key cache entries
1499 * are required to handle both tx+rx MIC keys.
1500 * With split mic keys the number of stations is limited
1501 * to 27 otherwise 59.
1502 */
1503 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1504 ATH9K_CIPHER_TKIP, NULL)
1505 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1506 ATH9K_CIPHER_MIC, NULL)
1507 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1508 0, NULL))
1509 sc->splitmic = 1;
1510
1511 /* turn on mcast key search if possible */
1512 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1513 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1514 1, NULL);
1515
1516 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1517
1518 /* 11n Capabilities */
1519 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1520 sc->sc_flags |= SC_OP_TXAGGR;
1521 sc->sc_flags |= SC_OP_RXAGGR;
1522 }
1523
1524 sc->tx_chainmask = ah->caps.tx_chainmask;
1525 sc->rx_chainmask = ah->caps.rx_chainmask;
1526
1527 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1528 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1529
1530 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1531 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
1532 ATH_SET_VIF_BSSID_MASK(sc->bssidmask);
1533 ath9k_hw_setbssidmask(sc);
1534 }
1535
1536 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1537
1538 /* initialize beacon slots */
1539 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
1540 sc->beacon.bslot[i] = ATH_IF_ID_ANY;
1541
1542 /* save MISC configurations */
1543 sc->config.swBeaconProcess = 1;
1544
1545 /* setup channels and rates */
1546
1547 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1548 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1549 sc->rates[IEEE80211_BAND_2GHZ];
1550 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1551 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1552 ARRAY_SIZE(ath9k_2ghz_chantable);
1553
1554 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1555 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1556 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1557 sc->rates[IEEE80211_BAND_5GHZ];
1558 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1559 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1560 ARRAY_SIZE(ath9k_5ghz_chantable);
1561 }
1562
1563 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)
1564 ath9k_hw_btcoex_enable(sc->sc_ah);
1565
1566 return 0;
1567 bad2:
1568 /* cleanup tx queues */
1569 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1570 if (ATH_TXQ_SETUP(sc, i))
1571 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1572 bad:
1573 if (ah)
1574 ath9k_hw_detach(ah);
1575 ath9k_exit_debug(sc);
1576
1577 return error;
1578 }
1579
1580 int ath_attach(u16 devid, struct ath_softc *sc)
1581 {
1582 struct ieee80211_hw *hw = sc->hw;
1583 const struct ieee80211_regdomain *regd;
1584 int error = 0, i;
1585
1586 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
1587
1588 error = ath_init(devid, sc);
1589 if (error != 0)
1590 return error;
1591
1592 /* get mac address from hardware and set in mac80211 */
1593
1594 SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr);
1595
1596 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1597 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1598 IEEE80211_HW_SIGNAL_DBM |
1599 IEEE80211_HW_AMPDU_AGGREGATION |
1600 IEEE80211_HW_SUPPORTS_PS |
1601 IEEE80211_HW_PS_NULLFUNC_STACK;
1602
1603 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1604 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1605
1606 hw->wiphy->interface_modes =
1607 BIT(NL80211_IFTYPE_AP) |
1608 BIT(NL80211_IFTYPE_STATION) |
1609 BIT(NL80211_IFTYPE_ADHOC);
1610
1611 hw->wiphy->reg_notifier = ath9k_reg_notifier;
1612 hw->wiphy->strict_regulatory = true;
1613
1614 hw->queues = 4;
1615 hw->max_rates = 4;
1616 hw->channel_change_time = 5000;
1617 hw->max_rate_tries = ATH_11N_TXMAXTRY;
1618 hw->sta_data_size = sizeof(struct ath_node);
1619 hw->vif_data_size = sizeof(struct ath_vif);
1620
1621 hw->rate_control_algorithm = "ath9k_rate_control";
1622
1623 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1624 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1625 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1626 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1627 }
1628
1629 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &sc->sbands[IEEE80211_BAND_2GHZ];
1630 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1631 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1632 &sc->sbands[IEEE80211_BAND_5GHZ];
1633
1634 /* initialize tx/rx engine */
1635 error = ath_tx_init(sc, ATH_TXBUF);
1636 if (error != 0)
1637 goto error_attach;
1638
1639 error = ath_rx_init(sc, ATH_RXBUF);
1640 if (error != 0)
1641 goto error_attach;
1642
1643 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1644 /* Initialze h/w Rfkill */
1645 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1646 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
1647
1648 /* Initialize s/w rfkill */
1649 error = ath_init_sw_rfkill(sc);
1650 if (error)
1651 goto error_attach;
1652 #endif
1653
1654 if (ath9k_is_world_regd(sc->sc_ah)) {
1655 /* Anything applied here (prior to wiphy registration) gets
1656 * saved on the wiphy orig_* parameters */
1657 regd = ath9k_world_regdomain(sc->sc_ah);
1658 hw->wiphy->custom_regulatory = true;
1659 hw->wiphy->strict_regulatory = false;
1660 } else {
1661 /* This gets applied in the case of the absense of CRDA,
1662 * it's our own custom world regulatory domain, similar to
1663 * cfg80211's but we enable passive scanning */
1664 regd = ath9k_default_world_regdomain();
1665 }
1666 wiphy_apply_custom_regulatory(hw->wiphy, regd);
1667 ath9k_reg_apply_radar_flags(hw->wiphy);
1668 ath9k_reg_apply_world_flags(hw->wiphy, REGDOM_SET_BY_INIT);
1669
1670 error = ieee80211_register_hw(hw);
1671
1672 if (!ath9k_is_world_regd(sc->sc_ah)) {
1673 error = regulatory_hint(hw->wiphy,
1674 sc->sc_ah->regulatory.alpha2);
1675 if (error)
1676 goto error_attach;
1677 }
1678
1679 /* Initialize LED control */
1680 ath_init_leds(sc);
1681
1682
1683 return 0;
1684
1685 error_attach:
1686 /* cleanup tx queues */
1687 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1688 if (ATH_TXQ_SETUP(sc, i))
1689 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1690
1691 ath9k_hw_detach(sc->sc_ah);
1692 ath9k_exit_debug(sc);
1693
1694 return error;
1695 }
1696
1697 int ath_reset(struct ath_softc *sc, bool retry_tx)
1698 {
1699 struct ath_hw *ah = sc->sc_ah;
1700 struct ieee80211_hw *hw = sc->hw;
1701 int r;
1702
1703 ath9k_hw_set_interrupts(ah, 0);
1704 ath_drain_all_txq(sc, retry_tx);
1705 ath_stoprecv(sc);
1706 ath_flushrecv(sc);
1707
1708 spin_lock_bh(&sc->sc_resetlock);
1709 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1710 if (r)
1711 DPRINTF(sc, ATH_DBG_FATAL,
1712 "Unable to reset hardware; reset status %u\n", r);
1713 spin_unlock_bh(&sc->sc_resetlock);
1714
1715 if (ath_startrecv(sc) != 0)
1716 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
1717
1718 /*
1719 * We may be doing a reset in response to a request
1720 * that changes the channel so update any state that
1721 * might change as a result.
1722 */
1723 ath_cache_conf_rate(sc, &hw->conf);
1724
1725 ath_update_txpow(sc);
1726
1727 if (sc->sc_flags & SC_OP_BEACONS)
1728 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
1729
1730 ath9k_hw_set_interrupts(ah, sc->imask);
1731
1732 if (retry_tx) {
1733 int i;
1734 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1735 if (ATH_TXQ_SETUP(sc, i)) {
1736 spin_lock_bh(&sc->tx.txq[i].axq_lock);
1737 ath_txq_schedule(sc, &sc->tx.txq[i]);
1738 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
1739 }
1740 }
1741 }
1742
1743 return r;
1744 }
1745
1746 /*
1747 * This function will allocate both the DMA descriptor structure, and the
1748 * buffers it contains. These are used to contain the descriptors used
1749 * by the system.
1750 */
1751 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1752 struct list_head *head, const char *name,
1753 int nbuf, int ndesc)
1754 {
1755 #define DS2PHYS(_dd, _ds) \
1756 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1757 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1758 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1759
1760 struct ath_desc *ds;
1761 struct ath_buf *bf;
1762 int i, bsize, error;
1763
1764 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
1765 name, nbuf, ndesc);
1766
1767 /* ath_desc must be a multiple of DWORDs */
1768 if ((sizeof(struct ath_desc) % 4) != 0) {
1769 DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n");
1770 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1771 error = -ENOMEM;
1772 goto fail;
1773 }
1774
1775 dd->dd_name = name;
1776 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1777
1778 /*
1779 * Need additional DMA memory because we can't use
1780 * descriptors that cross the 4K page boundary. Assume
1781 * one skipped descriptor per 4K page.
1782 */
1783 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1784 u32 ndesc_skipped =
1785 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1786 u32 dma_len;
1787
1788 while (ndesc_skipped) {
1789 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1790 dd->dd_desc_len += dma_len;
1791
1792 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1793 };
1794 }
1795
1796 /* allocate descriptors */
1797 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
1798 &dd->dd_desc_paddr, GFP_ATOMIC);
1799 if (dd->dd_desc == NULL) {
1800 error = -ENOMEM;
1801 goto fail;
1802 }
1803 ds = dd->dd_desc;
1804 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
1805 dd->dd_name, ds, (u32) dd->dd_desc_len,
1806 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1807
1808 /* allocate buffers */
1809 bsize = sizeof(struct ath_buf) * nbuf;
1810 bf = kmalloc(bsize, GFP_KERNEL);
1811 if (bf == NULL) {
1812 error = -ENOMEM;
1813 goto fail2;
1814 }
1815 memset(bf, 0, bsize);
1816 dd->dd_bufptr = bf;
1817
1818 INIT_LIST_HEAD(head);
1819 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1820 bf->bf_desc = ds;
1821 bf->bf_daddr = DS2PHYS(dd, ds);
1822
1823 if (!(sc->sc_ah->caps.hw_caps &
1824 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1825 /*
1826 * Skip descriptor addresses which can cause 4KB
1827 * boundary crossing (addr + length) with a 32 dword
1828 * descriptor fetch.
1829 */
1830 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1831 ASSERT((caddr_t) bf->bf_desc <
1832 ((caddr_t) dd->dd_desc +
1833 dd->dd_desc_len));
1834
1835 ds += ndesc;
1836 bf->bf_desc = ds;
1837 bf->bf_daddr = DS2PHYS(dd, ds);
1838 }
1839 }
1840 list_add_tail(&bf->list, head);
1841 }
1842 return 0;
1843 fail2:
1844 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1845 dd->dd_desc_paddr);
1846 fail:
1847 memset(dd, 0, sizeof(*dd));
1848 return error;
1849 #undef ATH_DESC_4KB_BOUND_CHECK
1850 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1851 #undef DS2PHYS
1852 }
1853
1854 void ath_descdma_cleanup(struct ath_softc *sc,
1855 struct ath_descdma *dd,
1856 struct list_head *head)
1857 {
1858 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1859 dd->dd_desc_paddr);
1860
1861 INIT_LIST_HEAD(head);
1862 kfree(dd->dd_bufptr);
1863 memset(dd, 0, sizeof(*dd));
1864 }
1865
1866 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1867 {
1868 int qnum;
1869
1870 switch (queue) {
1871 case 0:
1872 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
1873 break;
1874 case 1:
1875 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
1876 break;
1877 case 2:
1878 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1879 break;
1880 case 3:
1881 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
1882 break;
1883 default:
1884 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1885 break;
1886 }
1887
1888 return qnum;
1889 }
1890
1891 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1892 {
1893 int qnum;
1894
1895 switch (queue) {
1896 case ATH9K_WME_AC_VO:
1897 qnum = 0;
1898 break;
1899 case ATH9K_WME_AC_VI:
1900 qnum = 1;
1901 break;
1902 case ATH9K_WME_AC_BE:
1903 qnum = 2;
1904 break;
1905 case ATH9K_WME_AC_BK:
1906 qnum = 3;
1907 break;
1908 default:
1909 qnum = -1;
1910 break;
1911 }
1912
1913 return qnum;
1914 }
1915
1916 /* XXX: Remove me once we don't depend on ath9k_channel for all
1917 * this redundant data */
1918 static void ath9k_update_ichannel(struct ath_softc *sc,
1919 struct ath9k_channel *ichan)
1920 {
1921 struct ieee80211_hw *hw = sc->hw;
1922 struct ieee80211_channel *chan = hw->conf.channel;
1923 struct ieee80211_conf *conf = &hw->conf;
1924
1925 ichan->channel = chan->center_freq;
1926 ichan->chan = chan;
1927
1928 if (chan->band == IEEE80211_BAND_2GHZ) {
1929 ichan->chanmode = CHANNEL_G;
1930 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
1931 } else {
1932 ichan->chanmode = CHANNEL_A;
1933 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1934 }
1935
1936 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1937
1938 if (conf_is_ht(conf)) {
1939 if (conf_is_ht40(conf))
1940 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1941
1942 ichan->chanmode = ath_get_extchanmode(sc, chan,
1943 conf->channel_type);
1944 }
1945 }
1946
1947 /**********************/
1948 /* mac80211 callbacks */
1949 /**********************/
1950
1951 static int ath9k_start(struct ieee80211_hw *hw)
1952 {
1953 struct ath_softc *sc = hw->priv;
1954 struct ieee80211_channel *curchan = hw->conf.channel;
1955 struct ath9k_channel *init_channel;
1956 int r, pos;
1957
1958 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
1959 "initial channel: %d MHz\n", curchan->center_freq);
1960
1961 mutex_lock(&sc->mutex);
1962
1963 /* setup initial channel */
1964
1965 pos = curchan->hw_value;
1966
1967 init_channel = &sc->sc_ah->channels[pos];
1968 ath9k_update_ichannel(sc, init_channel);
1969
1970 /* Reset SERDES registers */
1971 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
1972
1973 /*
1974 * The basic interface to setting the hardware in a good
1975 * state is ``reset''. On return the hardware is known to
1976 * be powered up and with interrupts disabled. This must
1977 * be followed by initialization of the appropriate bits
1978 * and then setup of the interrupt mask.
1979 */
1980 spin_lock_bh(&sc->sc_resetlock);
1981 r = ath9k_hw_reset(sc->sc_ah, init_channel, false);
1982 if (r) {
1983 DPRINTF(sc, ATH_DBG_FATAL,
1984 "Unable to reset hardware; reset status %u "
1985 "(freq %u MHz)\n", r,
1986 curchan->center_freq);
1987 spin_unlock_bh(&sc->sc_resetlock);
1988 goto mutex_unlock;
1989 }
1990 spin_unlock_bh(&sc->sc_resetlock);
1991
1992 /*
1993 * This is needed only to setup initial state
1994 * but it's best done after a reset.
1995 */
1996 ath_update_txpow(sc);
1997
1998 /*
1999 * Setup the hardware after reset:
2000 * The receive engine is set going.
2001 * Frame transmit is handled entirely
2002 * in the frame output path; there's nothing to do
2003 * here except setup the interrupt mask.
2004 */
2005 if (ath_startrecv(sc) != 0) {
2006 DPRINTF(sc, ATH_DBG_FATAL,
2007 "Unable to start recv logic\n");
2008 r = -EIO;
2009 goto mutex_unlock;
2010 }
2011
2012 /* Setup our intr mask. */
2013 sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
2014 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
2015 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2016
2017 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
2018 sc->imask |= ATH9K_INT_GTT;
2019
2020 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2021 sc->imask |= ATH9K_INT_CST;
2022
2023 ath_cache_conf_rate(sc, &hw->conf);
2024
2025 sc->sc_flags &= ~SC_OP_INVALID;
2026
2027 /* Disable BMISS interrupt when we're not associated */
2028 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2029 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2030
2031 ieee80211_wake_queues(sc->hw);
2032
2033 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2034 r = ath_start_rfkill_poll(sc);
2035 #endif
2036
2037 mutex_unlock:
2038 mutex_unlock(&sc->mutex);
2039
2040 return r;
2041 }
2042
2043 static int ath9k_tx(struct ieee80211_hw *hw,
2044 struct sk_buff *skb)
2045 {
2046 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2047 struct ath_softc *sc = hw->priv;
2048 struct ath_tx_control txctl;
2049 int hdrlen, padsize;
2050
2051 memset(&txctl, 0, sizeof(struct ath_tx_control));
2052
2053 /*
2054 * As a temporary workaround, assign seq# here; this will likely need
2055 * to be cleaned up to work better with Beacon transmission and virtual
2056 * BSSes.
2057 */
2058 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2059 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2060 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2061 sc->tx.seq_no += 0x10;
2062 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2063 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2064 }
2065
2066 /* Add the padding after the header if this is not already done */
2067 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2068 if (hdrlen & 3) {
2069 padsize = hdrlen % 4;
2070 if (skb_headroom(skb) < padsize)
2071 return -1;
2072 skb_push(skb, padsize);
2073 memmove(skb->data, skb->data + padsize, hdrlen);
2074 }
2075
2076 /* Check if a tx queue is available */
2077
2078 txctl.txq = ath_test_get_txq(sc, skb);
2079 if (!txctl.txq)
2080 goto exit;
2081
2082 DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
2083
2084 if (ath_tx_start(sc, skb, &txctl) != 0) {
2085 DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n");
2086 goto exit;
2087 }
2088
2089 return 0;
2090 exit:
2091 dev_kfree_skb_any(skb);
2092 return 0;
2093 }
2094
2095 static void ath9k_stop(struct ieee80211_hw *hw)
2096 {
2097 struct ath_softc *sc = hw->priv;
2098
2099 if (sc->sc_flags & SC_OP_INVALID) {
2100 DPRINTF(sc, ATH_DBG_ANY, "Device not present\n");
2101 return;
2102 }
2103
2104 mutex_lock(&sc->mutex);
2105
2106 ieee80211_stop_queues(sc->hw);
2107
2108 /* make sure h/w will not generate any interrupt
2109 * before setting the invalid flag. */
2110 ath9k_hw_set_interrupts(sc->sc_ah, 0);
2111
2112 if (!(sc->sc_flags & SC_OP_INVALID)) {
2113 ath_drain_all_txq(sc, false);
2114 ath_stoprecv(sc);
2115 ath9k_hw_phy_disable(sc->sc_ah);
2116 } else
2117 sc->rx.rxlink = NULL;
2118
2119 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2120 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2121 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
2122 #endif
2123 /* disable HAL and put h/w to sleep */
2124 ath9k_hw_disable(sc->sc_ah);
2125 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2126
2127 sc->sc_flags |= SC_OP_INVALID;
2128
2129 mutex_unlock(&sc->mutex);
2130
2131 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n");
2132 }
2133
2134 static int ath9k_add_interface(struct ieee80211_hw *hw,
2135 struct ieee80211_if_init_conf *conf)
2136 {
2137 struct ath_softc *sc = hw->priv;
2138 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2139 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2140
2141 /* Support only vif for now */
2142
2143 if (sc->nvifs)
2144 return -ENOBUFS;
2145
2146 mutex_lock(&sc->mutex);
2147
2148 switch (conf->type) {
2149 case NL80211_IFTYPE_STATION:
2150 ic_opmode = NL80211_IFTYPE_STATION;
2151 break;
2152 case NL80211_IFTYPE_ADHOC:
2153 ic_opmode = NL80211_IFTYPE_ADHOC;
2154 break;
2155 case NL80211_IFTYPE_AP:
2156 ic_opmode = NL80211_IFTYPE_AP;
2157 break;
2158 default:
2159 DPRINTF(sc, ATH_DBG_FATAL,
2160 "Interface type %d not yet supported\n", conf->type);
2161 mutex_unlock(&sc->mutex);
2162 return -EOPNOTSUPP;
2163 }
2164
2165 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode);
2166
2167 /* Set the VIF opmode */
2168 avp->av_opmode = ic_opmode;
2169 avp->av_bslot = -1;
2170
2171 if (ic_opmode == NL80211_IFTYPE_AP) {
2172 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
2173 sc->sc_flags |= SC_OP_TSF_RESET;
2174 }
2175
2176 sc->vifs[0] = conf->vif;
2177 sc->nvifs++;
2178
2179 /* Set the device opmode */
2180 sc->sc_ah->opmode = ic_opmode;
2181
2182 /*
2183 * Enable MIB interrupts when there are hardware phy counters.
2184 * Note we only do this (at the moment) for station mode.
2185 */
2186 if ((conf->type == NL80211_IFTYPE_STATION) ||
2187 (conf->type == NL80211_IFTYPE_ADHOC)) {
2188 if (ath9k_hw_phycounters(sc->sc_ah))
2189 sc->imask |= ATH9K_INT_MIB;
2190 sc->imask |= ATH9K_INT_TSFOOR;
2191 }
2192
2193 /*
2194 * Some hardware processes the TIM IE and fires an
2195 * interrupt when the TIM bit is set. For hardware
2196 * that does, if not overridden by configuration,
2197 * enable the TIM interrupt when operating as station.
2198 */
2199 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2200 (conf->type == NL80211_IFTYPE_STATION) &&
2201 !sc->config.swBeaconProcess)
2202 sc->imask |= ATH9K_INT_TIM;
2203
2204 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2205
2206 if (conf->type == NL80211_IFTYPE_AP) {
2207 /* TODO: is this a suitable place to start ANI for AP mode? */
2208 /* Start ANI */
2209 mod_timer(&sc->ani.timer,
2210 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
2211 }
2212
2213 mutex_unlock(&sc->mutex);
2214
2215 return 0;
2216 }
2217
2218 static void ath9k_remove_interface(struct ieee80211_hw *hw,
2219 struct ieee80211_if_init_conf *conf)
2220 {
2221 struct ath_softc *sc = hw->priv;
2222 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2223
2224 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n");
2225
2226 mutex_lock(&sc->mutex);
2227
2228 /* Stop ANI */
2229 del_timer_sync(&sc->ani.timer);
2230
2231 /* Reclaim beacon resources */
2232 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
2233 sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) {
2234 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2235 ath_beacon_return(sc, avp);
2236 }
2237
2238 sc->sc_flags &= ~SC_OP_BEACONS;
2239
2240 sc->vifs[0] = NULL;
2241 sc->nvifs--;
2242
2243 mutex_unlock(&sc->mutex);
2244 }
2245
2246 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2247 {
2248 struct ath_softc *sc = hw->priv;
2249 struct ieee80211_conf *conf = &hw->conf;
2250
2251 mutex_lock(&sc->mutex);
2252
2253 if (changed & IEEE80211_CONF_CHANGE_PS) {
2254 if (conf->flags & IEEE80211_CONF_PS) {
2255 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
2256 sc->imask |= ATH9K_INT_TIM_TIMER;
2257 ath9k_hw_set_interrupts(sc->sc_ah,
2258 sc->imask);
2259 }
2260 ath9k_hw_setrxabort(sc->sc_ah, 1);
2261 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
2262 } else {
2263 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
2264 ath9k_hw_setrxabort(sc->sc_ah, 0);
2265 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
2266 if (sc->imask & ATH9K_INT_TIM_TIMER) {
2267 sc->imask &= ~ATH9K_INT_TIM_TIMER;
2268 ath9k_hw_set_interrupts(sc->sc_ah,
2269 sc->imask);
2270 }
2271 }
2272 }
2273
2274 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2275 struct ieee80211_channel *curchan = hw->conf.channel;
2276 int pos = curchan->hw_value;
2277
2278 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
2279 curchan->center_freq);
2280
2281 /* XXX: remove me eventualy */
2282 ath9k_update_ichannel(sc, &sc->sc_ah->channels[pos]);
2283
2284 ath_update_chainmask(sc, conf_is_ht(conf));
2285
2286 if (ath_set_channel(sc, &sc->sc_ah->channels[pos]) < 0) {
2287 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
2288 mutex_unlock(&sc->mutex);
2289 return -EINVAL;
2290 }
2291 }
2292
2293 if (changed & IEEE80211_CONF_CHANGE_POWER)
2294 sc->config.txpowlimit = 2 * conf->power_level;
2295
2296 /*
2297 * The HW TSF has to be reset when the beacon interval changes.
2298 * We set the flag here, and ath_beacon_config_ap() would take this
2299 * into account when it gets called through the subsequent
2300 * config_interface() call - with IFCC_BEACON in the changed field.
2301 */
2302
2303 if (changed & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
2304 sc->sc_flags |= SC_OP_TSF_RESET;
2305
2306 mutex_unlock(&sc->mutex);
2307
2308 return 0;
2309 }
2310
2311 static int ath9k_config_interface(struct ieee80211_hw *hw,
2312 struct ieee80211_vif *vif,
2313 struct ieee80211_if_conf *conf)
2314 {
2315 struct ath_softc *sc = hw->priv;
2316 struct ath_hw *ah = sc->sc_ah;
2317 struct ath_vif *avp = (void *)vif->drv_priv;
2318 u32 rfilt = 0;
2319 int error, i;
2320
2321 mutex_lock(&sc->mutex);
2322
2323 /* TODO: Need to decide which hw opmode to use for multi-interface
2324 * cases */
2325 if (vif->type == NL80211_IFTYPE_AP &&
2326 ah->opmode != NL80211_IFTYPE_AP) {
2327 ah->opmode = NL80211_IFTYPE_STATION;
2328 ath9k_hw_setopmode(ah);
2329 memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
2330 sc->curaid = 0;
2331 ath9k_hw_write_associd(sc);
2332 /* Request full reset to get hw opmode changed properly */
2333 sc->sc_flags |= SC_OP_FULL_RESET;
2334 }
2335
2336 if ((conf->changed & IEEE80211_IFCC_BSSID) &&
2337 !is_zero_ether_addr(conf->bssid)) {
2338 switch (vif->type) {
2339 case NL80211_IFTYPE_STATION:
2340 case NL80211_IFTYPE_ADHOC:
2341 /* Set BSSID */
2342 memcpy(sc->curbssid, conf->bssid, ETH_ALEN);
2343 sc->curaid = 0;
2344 ath9k_hw_write_associd(sc);
2345
2346 /* Set aggregation protection mode parameters */
2347 sc->config.ath_aggr_prot = 0;
2348
2349 DPRINTF(sc, ATH_DBG_CONFIG,
2350 "RX filter 0x%x bssid %pM aid 0x%x\n",
2351 rfilt, sc->curbssid, sc->curaid);
2352
2353 /* need to reconfigure the beacon */
2354 sc->sc_flags &= ~SC_OP_BEACONS ;
2355
2356 break;
2357 default:
2358 break;
2359 }
2360 }
2361
2362 if ((vif->type == NL80211_IFTYPE_ADHOC) ||
2363 (vif->type == NL80211_IFTYPE_AP)) {
2364 if ((conf->changed & IEEE80211_IFCC_BEACON) ||
2365 (conf->changed & IEEE80211_IFCC_BEACON_ENABLED &&
2366 conf->enable_beacon)) {
2367 /*
2368 * Allocate and setup the beacon frame.
2369 *
2370 * Stop any previous beacon DMA. This may be
2371 * necessary, for example, when an ibss merge
2372 * causes reconfiguration; we may be called
2373 * with beacon transmission active.
2374 */
2375 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2376
2377 error = ath_beacon_alloc(sc, 0);
2378 if (error != 0) {
2379 mutex_unlock(&sc->mutex);
2380 return error;
2381 }
2382
2383 ath_beacon_config(sc, 0);
2384 }
2385 }
2386
2387 /* Check for WLAN_CAPABILITY_PRIVACY ? */
2388 if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
2389 for (i = 0; i < IEEE80211_WEP_NKID; i++)
2390 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
2391 ath9k_hw_keysetmac(sc->sc_ah,
2392 (u16)i,
2393 sc->curbssid);
2394 }
2395
2396 /* Only legacy IBSS for now */
2397 if (vif->type == NL80211_IFTYPE_ADHOC)
2398 ath_update_chainmask(sc, 0);
2399
2400 mutex_unlock(&sc->mutex);
2401
2402 return 0;
2403 }
2404
2405 #define SUPPORTED_FILTERS \
2406 (FIF_PROMISC_IN_BSS | \
2407 FIF_ALLMULTI | \
2408 FIF_CONTROL | \
2409 FIF_OTHER_BSS | \
2410 FIF_BCN_PRBRESP_PROMISC | \
2411 FIF_FCSFAIL)
2412
2413 /* FIXME: sc->sc_full_reset ? */
2414 static void ath9k_configure_filter(struct ieee80211_hw *hw,
2415 unsigned int changed_flags,
2416 unsigned int *total_flags,
2417 int mc_count,
2418 struct dev_mc_list *mclist)
2419 {
2420 struct ath_softc *sc = hw->priv;
2421 u32 rfilt;
2422
2423 changed_flags &= SUPPORTED_FILTERS;
2424 *total_flags &= SUPPORTED_FILTERS;
2425
2426 sc->rx.rxfilter = *total_flags;
2427 rfilt = ath_calcrxfilter(sc);
2428 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
2429
2430 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
2431 }
2432
2433 static void ath9k_sta_notify(struct ieee80211_hw *hw,
2434 struct ieee80211_vif *vif,
2435 enum sta_notify_cmd cmd,
2436 struct ieee80211_sta *sta)
2437 {
2438 struct ath_softc *sc = hw->priv;
2439
2440 switch (cmd) {
2441 case STA_NOTIFY_ADD:
2442 ath_node_attach(sc, sta);
2443 break;
2444 case STA_NOTIFY_REMOVE:
2445 ath_node_detach(sc, sta);
2446 break;
2447 default:
2448 break;
2449 }
2450 }
2451
2452 static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2453 const struct ieee80211_tx_queue_params *params)
2454 {
2455 struct ath_softc *sc = hw->priv;
2456 struct ath9k_tx_queue_info qi;
2457 int ret = 0, qnum;
2458
2459 if (queue >= WME_NUM_AC)
2460 return 0;
2461
2462 mutex_lock(&sc->mutex);
2463
2464 qi.tqi_aifs = params->aifs;
2465 qi.tqi_cwmin = params->cw_min;
2466 qi.tqi_cwmax = params->cw_max;
2467 qi.tqi_burstTime = params->txop;
2468 qnum = ath_get_hal_qnum(queue, sc);
2469
2470 DPRINTF(sc, ATH_DBG_CONFIG,
2471 "Configure tx [queue/halq] [%d/%d], "
2472 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
2473 queue, qnum, params->aifs, params->cw_min,
2474 params->cw_max, params->txop);
2475
2476 ret = ath_txq_update(sc, qnum, &qi);
2477 if (ret)
2478 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n");
2479
2480 mutex_unlock(&sc->mutex);
2481
2482 return ret;
2483 }
2484
2485 static int ath9k_set_key(struct ieee80211_hw *hw,
2486 enum set_key_cmd cmd,
2487 struct ieee80211_vif *vif,
2488 struct ieee80211_sta *sta,
2489 struct ieee80211_key_conf *key)
2490 {
2491 struct ath_softc *sc = hw->priv;
2492 int ret = 0;
2493
2494 if (modparam_nohwcrypt)
2495 return -ENOSPC;
2496
2497 mutex_lock(&sc->mutex);
2498 ath9k_ps_wakeup(sc);
2499 DPRINTF(sc, ATH_DBG_KEYCACHE, "Set HW Key\n");
2500
2501 switch (cmd) {
2502 case SET_KEY:
2503 ret = ath_key_config(sc, vif, sta, key);
2504 if (ret >= 0) {
2505 key->hw_key_idx = ret;
2506 /* push IV and Michael MIC generation to stack */
2507 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2508 if (key->alg == ALG_TKIP)
2509 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2510 if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
2511 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
2512 ret = 0;
2513 }
2514 break;
2515 case DISABLE_KEY:
2516 ath_key_delete(sc, key);
2517 break;
2518 default:
2519 ret = -EINVAL;
2520 }
2521
2522 ath9k_ps_restore(sc);
2523 mutex_unlock(&sc->mutex);
2524
2525 return ret;
2526 }
2527
2528 static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2529 struct ieee80211_vif *vif,
2530 struct ieee80211_bss_conf *bss_conf,
2531 u32 changed)
2532 {
2533 struct ath_softc *sc = hw->priv;
2534
2535 mutex_lock(&sc->mutex);
2536
2537 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2538 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
2539 bss_conf->use_short_preamble);
2540 if (bss_conf->use_short_preamble)
2541 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
2542 else
2543 sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
2544 }
2545
2546 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2547 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
2548 bss_conf->use_cts_prot);
2549 if (bss_conf->use_cts_prot &&
2550 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
2551 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
2552 else
2553 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
2554 }
2555
2556 if (changed & BSS_CHANGED_ASSOC) {
2557 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
2558 bss_conf->assoc);
2559 ath9k_bss_assoc_info(sc, vif, bss_conf);
2560 }
2561
2562 mutex_unlock(&sc->mutex);
2563 }
2564
2565 static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
2566 {
2567 u64 tsf;
2568 struct ath_softc *sc = hw->priv;
2569
2570 mutex_lock(&sc->mutex);
2571 tsf = ath9k_hw_gettsf64(sc->sc_ah);
2572 mutex_unlock(&sc->mutex);
2573
2574 return tsf;
2575 }
2576
2577 static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
2578 {
2579 struct ath_softc *sc = hw->priv;
2580
2581 mutex_lock(&sc->mutex);
2582 ath9k_hw_settsf64(sc->sc_ah, tsf);
2583 mutex_unlock(&sc->mutex);
2584 }
2585
2586 static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2587 {
2588 struct ath_softc *sc = hw->priv;
2589
2590 mutex_lock(&sc->mutex);
2591 ath9k_hw_reset_tsf(sc->sc_ah);
2592 mutex_unlock(&sc->mutex);
2593 }
2594
2595 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2596 enum ieee80211_ampdu_mlme_action action,
2597 struct ieee80211_sta *sta,
2598 u16 tid, u16 *ssn)
2599 {
2600 struct ath_softc *sc = hw->priv;
2601 int ret = 0;
2602
2603 switch (action) {
2604 case IEEE80211_AMPDU_RX_START:
2605 if (!(sc->sc_flags & SC_OP_RXAGGR))
2606 ret = -ENOTSUPP;
2607 break;
2608 case IEEE80211_AMPDU_RX_STOP:
2609 break;
2610 case IEEE80211_AMPDU_TX_START:
2611 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
2612 if (ret < 0)
2613 DPRINTF(sc, ATH_DBG_FATAL,
2614 "Unable to start TX aggregation\n");
2615 else
2616 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2617 break;
2618 case IEEE80211_AMPDU_TX_STOP:
2619 ret = ath_tx_aggr_stop(sc, sta, tid);
2620 if (ret < 0)
2621 DPRINTF(sc, ATH_DBG_FATAL,
2622 "Unable to stop TX aggregation\n");
2623
2624 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2625 break;
2626 case IEEE80211_AMPDU_TX_RESUME:
2627 ath_tx_aggr_resume(sc, sta, tid);
2628 break;
2629 default:
2630 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
2631 }
2632
2633 return ret;
2634 }
2635
2636 static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2637 {
2638 struct ath_softc *sc = hw->priv;
2639
2640 mutex_lock(&sc->mutex);
2641 sc->sc_flags |= SC_OP_SCANNING;
2642 mutex_unlock(&sc->mutex);
2643 }
2644
2645 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2646 {
2647 struct ath_softc *sc = hw->priv;
2648
2649 mutex_lock(&sc->mutex);
2650 sc->sc_flags &= ~SC_OP_SCANNING;
2651 mutex_unlock(&sc->mutex);
2652 }
2653
2654 struct ieee80211_ops ath9k_ops = {
2655 .tx = ath9k_tx,
2656 .start = ath9k_start,
2657 .stop = ath9k_stop,
2658 .add_interface = ath9k_add_interface,
2659 .remove_interface = ath9k_remove_interface,
2660 .config = ath9k_config,
2661 .config_interface = ath9k_config_interface,
2662 .configure_filter = ath9k_configure_filter,
2663 .sta_notify = ath9k_sta_notify,
2664 .conf_tx = ath9k_conf_tx,
2665 .bss_info_changed = ath9k_bss_info_changed,
2666 .set_key = ath9k_set_key,
2667 .get_tsf = ath9k_get_tsf,
2668 .set_tsf = ath9k_set_tsf,
2669 .reset_tsf = ath9k_reset_tsf,
2670 .ampdu_action = ath9k_ampdu_action,
2671 .sw_scan_start = ath9k_sw_scan_start,
2672 .sw_scan_complete = ath9k_sw_scan_complete,
2673 };
2674
2675 static struct {
2676 u32 version;
2677 const char * name;
2678 } ath_mac_bb_names[] = {
2679 { AR_SREV_VERSION_5416_PCI, "5416" },
2680 { AR_SREV_VERSION_5416_PCIE, "5418" },
2681 { AR_SREV_VERSION_9100, "9100" },
2682 { AR_SREV_VERSION_9160, "9160" },
2683 { AR_SREV_VERSION_9280, "9280" },
2684 { AR_SREV_VERSION_9285, "9285" }
2685 };
2686
2687 static struct {
2688 u16 version;
2689 const char * name;
2690 } ath_rf_names[] = {
2691 { 0, "5133" },
2692 { AR_RAD5133_SREV_MAJOR, "5133" },
2693 { AR_RAD5122_SREV_MAJOR, "5122" },
2694 { AR_RAD2133_SREV_MAJOR, "2133" },
2695 { AR_RAD2122_SREV_MAJOR, "2122" }
2696 };
2697
2698 /*
2699 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
2700 */
2701 const char *
2702 ath_mac_bb_name(u32 mac_bb_version)
2703 {
2704 int i;
2705
2706 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
2707 if (ath_mac_bb_names[i].version == mac_bb_version) {
2708 return ath_mac_bb_names[i].name;
2709 }
2710 }
2711
2712 return "????";
2713 }
2714
2715 /*
2716 * Return the RF name. "????" is returned if the RF is unknown.
2717 */
2718 const char *
2719 ath_rf_name(u16 rf_version)
2720 {
2721 int i;
2722
2723 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
2724 if (ath_rf_names[i].version == rf_version) {
2725 return ath_rf_names[i].name;
2726 }
2727 }
2728
2729 return "????";
2730 }
2731
2732 static int __init ath9k_init(void)
2733 {
2734 int error;
2735
2736 /* Register rate control algorithm */
2737 error = ath_rate_control_register();
2738 if (error != 0) {
2739 printk(KERN_ERR
2740 "ath9k: Unable to register rate control "
2741 "algorithm: %d\n",
2742 error);
2743 goto err_out;
2744 }
2745
2746 error = ath_pci_init();
2747 if (error < 0) {
2748 printk(KERN_ERR
2749 "ath9k: No PCI devices found, driver not installed.\n");
2750 error = -ENODEV;
2751 goto err_rate_unregister;
2752 }
2753
2754 error = ath_ahb_init();
2755 if (error < 0) {
2756 error = -ENODEV;
2757 goto err_pci_exit;
2758 }
2759
2760 return 0;
2761
2762 err_pci_exit:
2763 ath_pci_exit();
2764
2765 err_rate_unregister:
2766 ath_rate_control_unregister();
2767 err_out:
2768 return error;
2769 }
2770 module_init(ath9k_init);
2771
2772 static void __exit ath9k_exit(void)
2773 {
2774 ath_ahb_exit();
2775 ath_pci_exit();
2776 ath_rate_control_unregister();
2777 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
2778 }
2779 module_exit(ath9k_exit);
This page took 0.244769 seconds and 6 git commands to generate.