ath9k: Remove all the useless ah_ variable prefixes
[deliverable/linux.git] / drivers / net / wireless / ath9k / main.c
1 /*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/nl80211.h>
18 #include "ath9k.h"
19
20 #define ATH_PCI_VERSION "0.1"
21
22 static char *dev_info = "ath9k";
23
24 MODULE_AUTHOR("Atheros Communications");
25 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
26 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
27 MODULE_LICENSE("Dual BSD/GPL");
28
29 /* We use the hw_value as an index into our private channel structure */
30
31 #define CHAN2G(_freq, _idx) { \
32 .center_freq = (_freq), \
33 .hw_value = (_idx), \
34 .max_power = 30, \
35 }
36
37 #define CHAN5G(_freq, _idx) { \
38 .band = IEEE80211_BAND_5GHZ, \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 30, \
42 }
43
44 /* Some 2 GHz radios are actually tunable on 2312-2732
45 * on 5 MHz steps, we support the channels which we know
46 * we have calibration data for all cards though to make
47 * this static */
48 static struct ieee80211_channel ath9k_2ghz_chantable[] = {
49 CHAN2G(2412, 0), /* Channel 1 */
50 CHAN2G(2417, 1), /* Channel 2 */
51 CHAN2G(2422, 2), /* Channel 3 */
52 CHAN2G(2427, 3), /* Channel 4 */
53 CHAN2G(2432, 4), /* Channel 5 */
54 CHAN2G(2437, 5), /* Channel 6 */
55 CHAN2G(2442, 6), /* Channel 7 */
56 CHAN2G(2447, 7), /* Channel 8 */
57 CHAN2G(2452, 8), /* Channel 9 */
58 CHAN2G(2457, 9), /* Channel 10 */
59 CHAN2G(2462, 10), /* Channel 11 */
60 CHAN2G(2467, 11), /* Channel 12 */
61 CHAN2G(2472, 12), /* Channel 13 */
62 CHAN2G(2484, 13), /* Channel 14 */
63 };
64
65 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
66 * on 5 MHz steps, we support the channels which we know
67 * we have calibration data for all cards though to make
68 * this static */
69 static struct ieee80211_channel ath9k_5ghz_chantable[] = {
70 /* _We_ call this UNII 1 */
71 CHAN5G(5180, 14), /* Channel 36 */
72 CHAN5G(5200, 15), /* Channel 40 */
73 CHAN5G(5220, 16), /* Channel 44 */
74 CHAN5G(5240, 17), /* Channel 48 */
75 /* _We_ call this UNII 2 */
76 CHAN5G(5260, 18), /* Channel 52 */
77 CHAN5G(5280, 19), /* Channel 56 */
78 CHAN5G(5300, 20), /* Channel 60 */
79 CHAN5G(5320, 21), /* Channel 64 */
80 /* _We_ call this "Middle band" */
81 CHAN5G(5500, 22), /* Channel 100 */
82 CHAN5G(5520, 23), /* Channel 104 */
83 CHAN5G(5540, 24), /* Channel 108 */
84 CHAN5G(5560, 25), /* Channel 112 */
85 CHAN5G(5580, 26), /* Channel 116 */
86 CHAN5G(5600, 27), /* Channel 120 */
87 CHAN5G(5620, 28), /* Channel 124 */
88 CHAN5G(5640, 29), /* Channel 128 */
89 CHAN5G(5660, 30), /* Channel 132 */
90 CHAN5G(5680, 31), /* Channel 136 */
91 CHAN5G(5700, 32), /* Channel 140 */
92 /* _We_ call this UNII 3 */
93 CHAN5G(5745, 33), /* Channel 149 */
94 CHAN5G(5765, 34), /* Channel 153 */
95 CHAN5G(5785, 35), /* Channel 157 */
96 CHAN5G(5805, 36), /* Channel 161 */
97 CHAN5G(5825, 37), /* Channel 165 */
98 };
99
100 static void ath_cache_conf_rate(struct ath_softc *sc,
101 struct ieee80211_conf *conf)
102 {
103 switch (conf->channel->band) {
104 case IEEE80211_BAND_2GHZ:
105 if (conf_is_ht20(conf))
106 sc->cur_rate_table =
107 sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
108 else if (conf_is_ht40_minus(conf))
109 sc->cur_rate_table =
110 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
111 else if (conf_is_ht40_plus(conf))
112 sc->cur_rate_table =
113 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
114 else
115 sc->cur_rate_table =
116 sc->hw_rate_table[ATH9K_MODE_11G];
117 break;
118 case IEEE80211_BAND_5GHZ:
119 if (conf_is_ht20(conf))
120 sc->cur_rate_table =
121 sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
122 else if (conf_is_ht40_minus(conf))
123 sc->cur_rate_table =
124 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
125 else if (conf_is_ht40_plus(conf))
126 sc->cur_rate_table =
127 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
128 else
129 sc->cur_rate_table =
130 sc->hw_rate_table[ATH9K_MODE_11A];
131 break;
132 default:
133 BUG_ON(1);
134 break;
135 }
136 }
137
138 static void ath_update_txpow(struct ath_softc *sc)
139 {
140 struct ath_hw *ah = sc->sc_ah;
141 u32 txpow;
142
143 if (sc->curtxpow != sc->config.txpowlimit) {
144 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
145 /* read back in case value is clamped */
146 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
147 sc->curtxpow = txpow;
148 }
149 }
150
151 static u8 parse_mpdudensity(u8 mpdudensity)
152 {
153 /*
154 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
155 * 0 for no restriction
156 * 1 for 1/4 us
157 * 2 for 1/2 us
158 * 3 for 1 us
159 * 4 for 2 us
160 * 5 for 4 us
161 * 6 for 8 us
162 * 7 for 16 us
163 */
164 switch (mpdudensity) {
165 case 0:
166 return 0;
167 case 1:
168 case 2:
169 case 3:
170 /* Our lower layer calculations limit our precision to
171 1 microsecond */
172 return 1;
173 case 4:
174 return 2;
175 case 5:
176 return 4;
177 case 6:
178 return 8;
179 case 7:
180 return 16;
181 default:
182 return 0;
183 }
184 }
185
186 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
187 {
188 struct ath_rate_table *rate_table = NULL;
189 struct ieee80211_supported_band *sband;
190 struct ieee80211_rate *rate;
191 int i, maxrates;
192
193 switch (band) {
194 case IEEE80211_BAND_2GHZ:
195 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
196 break;
197 case IEEE80211_BAND_5GHZ:
198 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
199 break;
200 default:
201 break;
202 }
203
204 if (rate_table == NULL)
205 return;
206
207 sband = &sc->sbands[band];
208 rate = sc->rates[band];
209
210 if (rate_table->rate_cnt > ATH_RATE_MAX)
211 maxrates = ATH_RATE_MAX;
212 else
213 maxrates = rate_table->rate_cnt;
214
215 for (i = 0; i < maxrates; i++) {
216 rate[i].bitrate = rate_table->info[i].ratekbps / 100;
217 rate[i].hw_value = rate_table->info[i].ratecode;
218 if (rate_table->info[i].short_preamble) {
219 rate[i].hw_value_short = rate_table->info[i].ratecode |
220 rate_table->info[i].short_preamble;
221 rate[i].flags = IEEE80211_RATE_SHORT_PREAMBLE;
222 }
223 sband->n_bitrates++;
224
225 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n",
226 rate[i].bitrate / 10, rate[i].hw_value);
227 }
228 }
229
230 /*
231 * Set/change channels. If the channel is really being changed, it's done
232 * by reseting the chip. To accomplish this we must first cleanup any pending
233 * DMA, then restart stuff.
234 */
235 static int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
236 {
237 struct ath_hw *ah = sc->sc_ah;
238 bool fastcc = true, stopped;
239 struct ieee80211_hw *hw = sc->hw;
240 struct ieee80211_channel *channel = hw->conf.channel;
241 int r;
242
243 if (sc->sc_flags & SC_OP_INVALID)
244 return -EIO;
245
246 ath9k_ps_wakeup(sc);
247
248 /*
249 * This is only performed if the channel settings have
250 * actually changed.
251 *
252 * To switch channels clear any pending DMA operations;
253 * wait long enough for the RX fifo to drain, reset the
254 * hardware at the new frequency, and then re-enable
255 * the relevant bits of the h/w.
256 */
257 ath9k_hw_set_interrupts(ah, 0);
258 ath_drain_all_txq(sc, false);
259 stopped = ath_stoprecv(sc);
260
261 /* XXX: do not flush receive queue here. We don't want
262 * to flush data frames already in queue because of
263 * changing channel. */
264
265 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
266 fastcc = false;
267
268 DPRINTF(sc, ATH_DBG_CONFIG,
269 "(%u MHz) -> (%u MHz), chanwidth: %d\n",
270 sc->sc_ah->curchan->channel,
271 channel->center_freq, sc->tx_chan_width);
272
273 spin_lock_bh(&sc->sc_resetlock);
274
275 r = ath9k_hw_reset(ah, hchan, fastcc);
276 if (r) {
277 DPRINTF(sc, ATH_DBG_FATAL,
278 "Unable to reset channel (%u Mhz) "
279 "reset status %u\n",
280 channel->center_freq, r);
281 spin_unlock_bh(&sc->sc_resetlock);
282 return r;
283 }
284 spin_unlock_bh(&sc->sc_resetlock);
285
286 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
287 sc->sc_flags &= ~SC_OP_FULL_RESET;
288
289 if (ath_startrecv(sc) != 0) {
290 DPRINTF(sc, ATH_DBG_FATAL,
291 "Unable to restart recv logic\n");
292 return -EIO;
293 }
294
295 ath_cache_conf_rate(sc, &hw->conf);
296 ath_update_txpow(sc);
297 ath9k_hw_set_interrupts(ah, sc->imask);
298 ath9k_ps_restore(sc);
299 return 0;
300 }
301
302 /*
303 * This routine performs the periodic noise floor calibration function
304 * that is used to adjust and optimize the chip performance. This
305 * takes environmental changes (location, temperature) into account.
306 * When the task is complete, it reschedules itself depending on the
307 * appropriate interval that was calculated.
308 */
309 static void ath_ani_calibrate(unsigned long data)
310 {
311 struct ath_softc *sc;
312 struct ath_hw *ah;
313 bool longcal = false;
314 bool shortcal = false;
315 bool aniflag = false;
316 unsigned int timestamp = jiffies_to_msecs(jiffies);
317 u32 cal_interval;
318
319 sc = (struct ath_softc *)data;
320 ah = sc->sc_ah;
321
322 /*
323 * don't calibrate when we're scanning.
324 * we are most likely not on our home channel.
325 */
326 if (sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)
327 return;
328
329 /* Long calibration runs independently of short calibration. */
330 if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
331 longcal = true;
332 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
333 sc->ani.longcal_timer = timestamp;
334 }
335
336 /* Short calibration applies only while caldone is false */
337 if (!sc->ani.caldone) {
338 if ((timestamp - sc->ani.shortcal_timer) >=
339 ATH_SHORT_CALINTERVAL) {
340 shortcal = true;
341 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies);
342 sc->ani.shortcal_timer = timestamp;
343 sc->ani.resetcal_timer = timestamp;
344 }
345 } else {
346 if ((timestamp - sc->ani.resetcal_timer) >=
347 ATH_RESTART_CALINTERVAL) {
348 sc->ani.caldone = ath9k_hw_reset_calvalid(ah);
349 if (sc->ani.caldone)
350 sc->ani.resetcal_timer = timestamp;
351 }
352 }
353
354 /* Verify whether we must check ANI */
355 if ((timestamp - sc->ani.checkani_timer) >=
356 ATH_ANI_POLLINTERVAL) {
357 aniflag = true;
358 sc->ani.checkani_timer = timestamp;
359 }
360
361 /* Skip all processing if there's nothing to do. */
362 if (longcal || shortcal || aniflag) {
363 /* Call ANI routine if necessary */
364 if (aniflag)
365 ath9k_hw_ani_monitor(ah, &sc->nodestats,
366 ah->curchan);
367
368 /* Perform calibration if necessary */
369 if (longcal || shortcal) {
370 bool iscaldone = false;
371
372 if (ath9k_hw_calibrate(ah, ah->curchan,
373 sc->rx_chainmask, longcal,
374 &iscaldone)) {
375 if (longcal)
376 sc->ani.noise_floor =
377 ath9k_hw_getchan_noise(ah,
378 ah->curchan);
379
380 DPRINTF(sc, ATH_DBG_ANI,
381 "calibrate chan %u/%x nf: %d\n",
382 ah->curchan->channel,
383 ah->curchan->channelFlags,
384 sc->ani.noise_floor);
385 } else {
386 DPRINTF(sc, ATH_DBG_ANY,
387 "calibrate chan %u/%x failed\n",
388 ah->curchan->channel,
389 ah->curchan->channelFlags);
390 }
391 sc->ani.caldone = iscaldone;
392 }
393 }
394
395 /*
396 * Set timer interval based on previous results.
397 * The interval must be the shortest necessary to satisfy ANI,
398 * short calibration and long calibration.
399 */
400 cal_interval = ATH_LONG_CALINTERVAL;
401 if (sc->sc_ah->config.enable_ani)
402 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
403 if (!sc->ani.caldone)
404 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
405
406 mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
407 }
408
409 /*
410 * Update tx/rx chainmask. For legacy association,
411 * hard code chainmask to 1x1, for 11n association, use
412 * the chainmask configuration, for bt coexistence, use
413 * the chainmask configuration even in legacy mode.
414 */
415 static void ath_update_chainmask(struct ath_softc *sc, int is_ht)
416 {
417 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
418 if (is_ht ||
419 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)) {
420 sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
421 sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
422 } else {
423 sc->tx_chainmask = 1;
424 sc->rx_chainmask = 1;
425 }
426
427 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n",
428 sc->tx_chainmask, sc->rx_chainmask);
429 }
430
431 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
432 {
433 struct ath_node *an;
434
435 an = (struct ath_node *)sta->drv_priv;
436
437 if (sc->sc_flags & SC_OP_TXAGGR)
438 ath_tx_node_init(sc, an);
439
440 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
441 sta->ht_cap.ampdu_factor);
442 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
443 }
444
445 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
446 {
447 struct ath_node *an = (struct ath_node *)sta->drv_priv;
448
449 if (sc->sc_flags & SC_OP_TXAGGR)
450 ath_tx_node_cleanup(sc, an);
451 }
452
453 static void ath9k_tasklet(unsigned long data)
454 {
455 struct ath_softc *sc = (struct ath_softc *)data;
456 u32 status = sc->intrstatus;
457
458 if (status & ATH9K_INT_FATAL) {
459 /* need a chip reset */
460 ath_reset(sc, false);
461 return;
462 } else {
463
464 if (status &
465 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
466 spin_lock_bh(&sc->rx.rxflushlock);
467 ath_rx_tasklet(sc, 0);
468 spin_unlock_bh(&sc->rx.rxflushlock);
469 }
470 /* XXX: optimize this */
471 if (status & ATH9K_INT_TX)
472 ath_tx_tasklet(sc);
473 }
474
475 /* re-enable hardware interrupt */
476 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
477 }
478
479 irqreturn_t ath_isr(int irq, void *dev)
480 {
481 struct ath_softc *sc = dev;
482 struct ath_hw *ah = sc->sc_ah;
483 enum ath9k_int status;
484 bool sched = false;
485
486 do {
487 if (sc->sc_flags & SC_OP_INVALID) {
488 /*
489 * The hardware is not ready/present, don't
490 * touch anything. Note this can happen early
491 * on if the IRQ is shared.
492 */
493 return IRQ_NONE;
494 }
495 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
496 return IRQ_NONE;
497 }
498
499 /*
500 * Figure out the reason(s) for the interrupt. Note
501 * that the hal returns a pseudo-ISR that may include
502 * bits we haven't explicitly enabled so we mask the
503 * value to insure we only process bits we requested.
504 */
505 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
506
507 status &= sc->imask; /* discard unasked-for bits */
508
509 /*
510 * If there are no status bits set, then this interrupt was not
511 * for me (should have been caught above).
512 */
513 if (!status)
514 return IRQ_NONE;
515
516 sc->intrstatus = status;
517
518 if (status & ATH9K_INT_FATAL) {
519 /* need a chip reset */
520 sched = true;
521 } else if (status & ATH9K_INT_RXORN) {
522 /* need a chip reset */
523 sched = true;
524 } else {
525 if (status & ATH9K_INT_SWBA) {
526 /* schedule a tasklet for beacon handling */
527 tasklet_schedule(&sc->bcon_tasklet);
528 }
529 if (status & ATH9K_INT_RXEOL) {
530 /*
531 * NB: the hardware should re-read the link when
532 * RXE bit is written, but it doesn't work
533 * at least on older hardware revs.
534 */
535 sched = true;
536 }
537
538 if (status & ATH9K_INT_TXURN)
539 /* bump tx trigger level */
540 ath9k_hw_updatetxtriglevel(ah, true);
541 /* XXX: optimize this */
542 if (status & ATH9K_INT_RX)
543 sched = true;
544 if (status & ATH9K_INT_TX)
545 sched = true;
546 if (status & ATH9K_INT_BMISS)
547 sched = true;
548 /* carrier sense timeout */
549 if (status & ATH9K_INT_CST)
550 sched = true;
551 if (status & ATH9K_INT_MIB) {
552 /*
553 * Disable interrupts until we service the MIB
554 * interrupt; otherwise it will continue to
555 * fire.
556 */
557 ath9k_hw_set_interrupts(ah, 0);
558 /*
559 * Let the hal handle the event. We assume
560 * it will clear whatever condition caused
561 * the interrupt.
562 */
563 ath9k_hw_procmibevent(ah, &sc->nodestats);
564 ath9k_hw_set_interrupts(ah, sc->imask);
565 }
566 if (status & ATH9K_INT_TIM_TIMER) {
567 if (!(ah->caps.hw_caps &
568 ATH9K_HW_CAP_AUTOSLEEP)) {
569 /* Clear RxAbort bit so that we can
570 * receive frames */
571 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
572 ath9k_hw_setrxabort(ah, 0);
573 sched = true;
574 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
575 }
576 }
577 }
578 } while (0);
579
580 ath_debug_stat_interrupt(sc, status);
581
582 if (sched) {
583 /* turn off every interrupt except SWBA */
584 ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
585 tasklet_schedule(&sc->intr_tq);
586 }
587
588 return IRQ_HANDLED;
589 }
590
591 static u32 ath_get_extchanmode(struct ath_softc *sc,
592 struct ieee80211_channel *chan,
593 enum nl80211_channel_type channel_type)
594 {
595 u32 chanmode = 0;
596
597 switch (chan->band) {
598 case IEEE80211_BAND_2GHZ:
599 switch(channel_type) {
600 case NL80211_CHAN_NO_HT:
601 case NL80211_CHAN_HT20:
602 chanmode = CHANNEL_G_HT20;
603 break;
604 case NL80211_CHAN_HT40PLUS:
605 chanmode = CHANNEL_G_HT40PLUS;
606 break;
607 case NL80211_CHAN_HT40MINUS:
608 chanmode = CHANNEL_G_HT40MINUS;
609 break;
610 }
611 break;
612 case IEEE80211_BAND_5GHZ:
613 switch(channel_type) {
614 case NL80211_CHAN_NO_HT:
615 case NL80211_CHAN_HT20:
616 chanmode = CHANNEL_A_HT20;
617 break;
618 case NL80211_CHAN_HT40PLUS:
619 chanmode = CHANNEL_A_HT40PLUS;
620 break;
621 case NL80211_CHAN_HT40MINUS:
622 chanmode = CHANNEL_A_HT40MINUS;
623 break;
624 }
625 break;
626 default:
627 break;
628 }
629
630 return chanmode;
631 }
632
633 static int ath_keyset(struct ath_softc *sc, u16 keyix,
634 struct ath9k_keyval *hk, const u8 mac[ETH_ALEN])
635 {
636 bool status;
637
638 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
639 keyix, hk, mac, false);
640
641 return status != false;
642 }
643
644 static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
645 struct ath9k_keyval *hk,
646 const u8 *addr)
647 {
648 const u8 *key_rxmic;
649 const u8 *key_txmic;
650
651 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
652 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
653
654 if (addr == NULL) {
655 /* Group key installation */
656 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
657 return ath_keyset(sc, keyix, hk, addr);
658 }
659 if (!sc->splitmic) {
660 /*
661 * data key goes at first index,
662 * the hal handles the MIC keys at index+64.
663 */
664 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
665 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
666 return ath_keyset(sc, keyix, hk, addr);
667 }
668 /*
669 * TX key goes at first index, RX key at +32.
670 * The hal handles the MIC keys at index+64.
671 */
672 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
673 if (!ath_keyset(sc, keyix, hk, NULL)) {
674 /* Txmic entry failed. No need to proceed further */
675 DPRINTF(sc, ATH_DBG_KEYCACHE,
676 "Setting TX MIC Key Failed\n");
677 return 0;
678 }
679
680 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
681 /* XXX delete tx key on failure? */
682 return ath_keyset(sc, keyix + 32, hk, addr);
683 }
684
685 static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
686 {
687 int i;
688
689 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
690 if (test_bit(i, sc->keymap) ||
691 test_bit(i + 64, sc->keymap))
692 continue; /* At least one part of TKIP key allocated */
693 if (sc->splitmic &&
694 (test_bit(i + 32, sc->keymap) ||
695 test_bit(i + 64 + 32, sc->keymap)))
696 continue; /* At least one part of TKIP key allocated */
697
698 /* Found a free slot for a TKIP key */
699 return i;
700 }
701 return -1;
702 }
703
704 static int ath_reserve_key_cache_slot(struct ath_softc *sc)
705 {
706 int i;
707
708 /* First, try to find slots that would not be available for TKIP. */
709 if (sc->splitmic) {
710 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 4; i++) {
711 if (!test_bit(i, sc->keymap) &&
712 (test_bit(i + 32, sc->keymap) ||
713 test_bit(i + 64, sc->keymap) ||
714 test_bit(i + 64 + 32, sc->keymap)))
715 return i;
716 if (!test_bit(i + 32, sc->keymap) &&
717 (test_bit(i, sc->keymap) ||
718 test_bit(i + 64, sc->keymap) ||
719 test_bit(i + 64 + 32, sc->keymap)))
720 return i + 32;
721 if (!test_bit(i + 64, sc->keymap) &&
722 (test_bit(i , sc->keymap) ||
723 test_bit(i + 32, sc->keymap) ||
724 test_bit(i + 64 + 32, sc->keymap)))
725 return i + 64;
726 if (!test_bit(i + 64 + 32, sc->keymap) &&
727 (test_bit(i, sc->keymap) ||
728 test_bit(i + 32, sc->keymap) ||
729 test_bit(i + 64, sc->keymap)))
730 return i + 64 + 32;
731 }
732 } else {
733 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
734 if (!test_bit(i, sc->keymap) &&
735 test_bit(i + 64, sc->keymap))
736 return i;
737 if (test_bit(i, sc->keymap) &&
738 !test_bit(i + 64, sc->keymap))
739 return i + 64;
740 }
741 }
742
743 /* No partially used TKIP slots, pick any available slot */
744 for (i = IEEE80211_WEP_NKID; i < sc->keymax; i++) {
745 /* Do not allow slots that could be needed for TKIP group keys
746 * to be used. This limitation could be removed if we know that
747 * TKIP will not be used. */
748 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
749 continue;
750 if (sc->splitmic) {
751 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
752 continue;
753 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
754 continue;
755 }
756
757 if (!test_bit(i, sc->keymap))
758 return i; /* Found a free slot for a key */
759 }
760
761 /* No free slot found */
762 return -1;
763 }
764
765 static int ath_key_config(struct ath_softc *sc,
766 struct ieee80211_sta *sta,
767 struct ieee80211_key_conf *key)
768 {
769 struct ath9k_keyval hk;
770 const u8 *mac = NULL;
771 int ret = 0;
772 int idx;
773
774 memset(&hk, 0, sizeof(hk));
775
776 switch (key->alg) {
777 case ALG_WEP:
778 hk.kv_type = ATH9K_CIPHER_WEP;
779 break;
780 case ALG_TKIP:
781 hk.kv_type = ATH9K_CIPHER_TKIP;
782 break;
783 case ALG_CCMP:
784 hk.kv_type = ATH9K_CIPHER_AES_CCM;
785 break;
786 default:
787 return -EOPNOTSUPP;
788 }
789
790 hk.kv_len = key->keylen;
791 memcpy(hk.kv_val, key->key, key->keylen);
792
793 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
794 /* For now, use the default keys for broadcast keys. This may
795 * need to change with virtual interfaces. */
796 idx = key->keyidx;
797 } else if (key->keyidx) {
798 struct ieee80211_vif *vif;
799
800 if (WARN_ON(!sta))
801 return -EOPNOTSUPP;
802 mac = sta->addr;
803
804 vif = sc->vifs[0];
805 if (vif->type != NL80211_IFTYPE_AP) {
806 /* Only keyidx 0 should be used with unicast key, but
807 * allow this for client mode for now. */
808 idx = key->keyidx;
809 } else
810 return -EIO;
811 } else {
812 if (WARN_ON(!sta))
813 return -EOPNOTSUPP;
814 mac = sta->addr;
815
816 if (key->alg == ALG_TKIP)
817 idx = ath_reserve_key_cache_slot_tkip(sc);
818 else
819 idx = ath_reserve_key_cache_slot(sc);
820 if (idx < 0)
821 return -ENOSPC; /* no free key cache entries */
822 }
823
824 if (key->alg == ALG_TKIP)
825 ret = ath_setkey_tkip(sc, idx, key->key, &hk, mac);
826 else
827 ret = ath_keyset(sc, idx, &hk, mac);
828
829 if (!ret)
830 return -EIO;
831
832 set_bit(idx, sc->keymap);
833 if (key->alg == ALG_TKIP) {
834 set_bit(idx + 64, sc->keymap);
835 if (sc->splitmic) {
836 set_bit(idx + 32, sc->keymap);
837 set_bit(idx + 64 + 32, sc->keymap);
838 }
839 }
840
841 return idx;
842 }
843
844 static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
845 {
846 ath9k_hw_keyreset(sc->sc_ah, key->hw_key_idx);
847 if (key->hw_key_idx < IEEE80211_WEP_NKID)
848 return;
849
850 clear_bit(key->hw_key_idx, sc->keymap);
851 if (key->alg != ALG_TKIP)
852 return;
853
854 clear_bit(key->hw_key_idx + 64, sc->keymap);
855 if (sc->splitmic) {
856 clear_bit(key->hw_key_idx + 32, sc->keymap);
857 clear_bit(key->hw_key_idx + 64 + 32, sc->keymap);
858 }
859 }
860
861 static void setup_ht_cap(struct ath_softc *sc,
862 struct ieee80211_sta_ht_cap *ht_info)
863 {
864 #define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
865 #define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
866
867 ht_info->ht_supported = true;
868 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
869 IEEE80211_HT_CAP_SM_PS |
870 IEEE80211_HT_CAP_SGI_40 |
871 IEEE80211_HT_CAP_DSSSCCK40;
872
873 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
874 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
875
876 /* set up supported mcs set */
877 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
878
879 switch(sc->rx_chainmask) {
880 case 1:
881 ht_info->mcs.rx_mask[0] = 0xff;
882 break;
883 case 3:
884 case 5:
885 case 7:
886 default:
887 ht_info->mcs.rx_mask[0] = 0xff;
888 ht_info->mcs.rx_mask[1] = 0xff;
889 break;
890 }
891
892 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
893 }
894
895 static void ath9k_bss_assoc_info(struct ath_softc *sc,
896 struct ieee80211_vif *vif,
897 struct ieee80211_bss_conf *bss_conf)
898 {
899 struct ath_vif *avp = (void *)vif->drv_priv;
900
901 if (bss_conf->assoc) {
902 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
903 bss_conf->aid, sc->curbssid);
904
905 /* New association, store aid */
906 if (avp->av_opmode == NL80211_IFTYPE_STATION) {
907 sc->curaid = bss_conf->aid;
908 ath9k_hw_write_associd(sc);
909 }
910
911 /* Configure the beacon */
912 ath_beacon_config(sc, 0);
913 sc->sc_flags |= SC_OP_BEACONS;
914
915 /* Reset rssi stats */
916 sc->nodestats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
917 sc->nodestats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
918 sc->nodestats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
919 sc->nodestats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
920
921 /* Start ANI */
922 mod_timer(&sc->ani.timer,
923 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
924
925 } else {
926 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISSOC\n");
927 sc->curaid = 0;
928 }
929 }
930
931 /********************************/
932 /* LED functions */
933 /********************************/
934
935 static void ath_led_blink_work(struct work_struct *work)
936 {
937 struct ath_softc *sc = container_of(work, struct ath_softc,
938 ath_led_blink_work.work);
939
940 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
941 return;
942 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
943 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
944
945 queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work,
946 (sc->sc_flags & SC_OP_LED_ON) ?
947 msecs_to_jiffies(sc->led_off_duration) :
948 msecs_to_jiffies(sc->led_on_duration));
949
950 sc->led_on_duration =
951 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25);
952 sc->led_off_duration =
953 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10);
954 sc->led_on_cnt = sc->led_off_cnt = 0;
955 if (sc->sc_flags & SC_OP_LED_ON)
956 sc->sc_flags &= ~SC_OP_LED_ON;
957 else
958 sc->sc_flags |= SC_OP_LED_ON;
959 }
960
961 static void ath_led_brightness(struct led_classdev *led_cdev,
962 enum led_brightness brightness)
963 {
964 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
965 struct ath_softc *sc = led->sc;
966
967 switch (brightness) {
968 case LED_OFF:
969 if (led->led_type == ATH_LED_ASSOC ||
970 led->led_type == ATH_LED_RADIO) {
971 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
972 (led->led_type == ATH_LED_RADIO));
973 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
974 if (led->led_type == ATH_LED_RADIO)
975 sc->sc_flags &= ~SC_OP_LED_ON;
976 } else {
977 sc->led_off_cnt++;
978 }
979 break;
980 case LED_FULL:
981 if (led->led_type == ATH_LED_ASSOC) {
982 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
983 queue_delayed_work(sc->hw->workqueue,
984 &sc->ath_led_blink_work, 0);
985 } else if (led->led_type == ATH_LED_RADIO) {
986 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
987 sc->sc_flags |= SC_OP_LED_ON;
988 } else {
989 sc->led_on_cnt++;
990 }
991 break;
992 default:
993 break;
994 }
995 }
996
997 static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
998 char *trigger)
999 {
1000 int ret;
1001
1002 led->sc = sc;
1003 led->led_cdev.name = led->name;
1004 led->led_cdev.default_trigger = trigger;
1005 led->led_cdev.brightness_set = ath_led_brightness;
1006
1007 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1008 if (ret)
1009 DPRINTF(sc, ATH_DBG_FATAL,
1010 "Failed to register led:%s", led->name);
1011 else
1012 led->registered = 1;
1013 return ret;
1014 }
1015
1016 static void ath_unregister_led(struct ath_led *led)
1017 {
1018 if (led->registered) {
1019 led_classdev_unregister(&led->led_cdev);
1020 led->registered = 0;
1021 }
1022 }
1023
1024 static void ath_deinit_leds(struct ath_softc *sc)
1025 {
1026 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1027 ath_unregister_led(&sc->assoc_led);
1028 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1029 ath_unregister_led(&sc->tx_led);
1030 ath_unregister_led(&sc->rx_led);
1031 ath_unregister_led(&sc->radio_led);
1032 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1033 }
1034
1035 static void ath_init_leds(struct ath_softc *sc)
1036 {
1037 char *trigger;
1038 int ret;
1039
1040 /* Configure gpio 1 for output */
1041 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
1042 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1043 /* LED off, active low */
1044 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1045
1046 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1047
1048 trigger = ieee80211_get_radio_led_name(sc->hw);
1049 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1050 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1051 ret = ath_register_led(sc, &sc->radio_led, trigger);
1052 sc->radio_led.led_type = ATH_LED_RADIO;
1053 if (ret)
1054 goto fail;
1055
1056 trigger = ieee80211_get_assoc_led_name(sc->hw);
1057 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1058 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1059 ret = ath_register_led(sc, &sc->assoc_led, trigger);
1060 sc->assoc_led.led_type = ATH_LED_ASSOC;
1061 if (ret)
1062 goto fail;
1063
1064 trigger = ieee80211_get_tx_led_name(sc->hw);
1065 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1066 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1067 ret = ath_register_led(sc, &sc->tx_led, trigger);
1068 sc->tx_led.led_type = ATH_LED_TX;
1069 if (ret)
1070 goto fail;
1071
1072 trigger = ieee80211_get_rx_led_name(sc->hw);
1073 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1074 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1075 ret = ath_register_led(sc, &sc->rx_led, trigger);
1076 sc->rx_led.led_type = ATH_LED_RX;
1077 if (ret)
1078 goto fail;
1079
1080 return;
1081
1082 fail:
1083 ath_deinit_leds(sc);
1084 }
1085
1086 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1087
1088 /*******************/
1089 /* Rfkill */
1090 /*******************/
1091
1092 static void ath_radio_enable(struct ath_softc *sc)
1093 {
1094 struct ath_hw *ah = sc->sc_ah;
1095 struct ieee80211_channel *channel = sc->hw->conf.channel;
1096 int r;
1097
1098 ath9k_ps_wakeup(sc);
1099 spin_lock_bh(&sc->sc_resetlock);
1100
1101 r = ath9k_hw_reset(ah, ah->curchan, false);
1102
1103 if (r) {
1104 DPRINTF(sc, ATH_DBG_FATAL,
1105 "Unable to reset channel %u (%uMhz) ",
1106 "reset status %u\n",
1107 channel->center_freq, r);
1108 }
1109 spin_unlock_bh(&sc->sc_resetlock);
1110
1111 ath_update_txpow(sc);
1112 if (ath_startrecv(sc) != 0) {
1113 DPRINTF(sc, ATH_DBG_FATAL,
1114 "Unable to restart recv logic\n");
1115 return;
1116 }
1117
1118 if (sc->sc_flags & SC_OP_BEACONS)
1119 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
1120
1121 /* Re-Enable interrupts */
1122 ath9k_hw_set_interrupts(ah, sc->imask);
1123
1124 /* Enable LED */
1125 ath9k_hw_cfg_output(ah, ATH_LED_PIN,
1126 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1127 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0);
1128
1129 ieee80211_wake_queues(sc->hw);
1130 ath9k_ps_restore(sc);
1131 }
1132
1133 static void ath_radio_disable(struct ath_softc *sc)
1134 {
1135 struct ath_hw *ah = sc->sc_ah;
1136 struct ieee80211_channel *channel = sc->hw->conf.channel;
1137 int r;
1138
1139 ath9k_ps_wakeup(sc);
1140 ieee80211_stop_queues(sc->hw);
1141
1142 /* Disable LED */
1143 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 1);
1144 ath9k_hw_cfg_gpio_input(ah, ATH_LED_PIN);
1145
1146 /* Disable interrupts */
1147 ath9k_hw_set_interrupts(ah, 0);
1148
1149 ath_drain_all_txq(sc, false); /* clear pending tx frames */
1150 ath_stoprecv(sc); /* turn off frame recv */
1151 ath_flushrecv(sc); /* flush recv queue */
1152
1153 spin_lock_bh(&sc->sc_resetlock);
1154 r = ath9k_hw_reset(ah, ah->curchan, false);
1155 if (r) {
1156 DPRINTF(sc, ATH_DBG_FATAL,
1157 "Unable to reset channel %u (%uMhz) "
1158 "reset status %u\n",
1159 channel->center_freq, r);
1160 }
1161 spin_unlock_bh(&sc->sc_resetlock);
1162
1163 ath9k_hw_phy_disable(ah);
1164 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1165 ath9k_ps_restore(sc);
1166 }
1167
1168 static bool ath_is_rfkill_set(struct ath_softc *sc)
1169 {
1170 struct ath_hw *ah = sc->sc_ah;
1171
1172 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1173 ah->rfkill_polarity;
1174 }
1175
1176 /* h/w rfkill poll function */
1177 static void ath_rfkill_poll(struct work_struct *work)
1178 {
1179 struct ath_softc *sc = container_of(work, struct ath_softc,
1180 rf_kill.rfkill_poll.work);
1181 bool radio_on;
1182
1183 if (sc->sc_flags & SC_OP_INVALID)
1184 return;
1185
1186 radio_on = !ath_is_rfkill_set(sc);
1187
1188 /*
1189 * enable/disable radio only when there is a
1190 * state change in RF switch
1191 */
1192 if (radio_on == !!(sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED)) {
1193 enum rfkill_state state;
1194
1195 if (sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED) {
1196 state = radio_on ? RFKILL_STATE_SOFT_BLOCKED
1197 : RFKILL_STATE_HARD_BLOCKED;
1198 } else if (radio_on) {
1199 ath_radio_enable(sc);
1200 state = RFKILL_STATE_UNBLOCKED;
1201 } else {
1202 ath_radio_disable(sc);
1203 state = RFKILL_STATE_HARD_BLOCKED;
1204 }
1205
1206 if (state == RFKILL_STATE_HARD_BLOCKED)
1207 sc->sc_flags |= SC_OP_RFKILL_HW_BLOCKED;
1208 else
1209 sc->sc_flags &= ~SC_OP_RFKILL_HW_BLOCKED;
1210
1211 rfkill_force_state(sc->rf_kill.rfkill, state);
1212 }
1213
1214 queue_delayed_work(sc->hw->workqueue, &sc->rf_kill.rfkill_poll,
1215 msecs_to_jiffies(ATH_RFKILL_POLL_INTERVAL));
1216 }
1217
1218 /* s/w rfkill handler */
1219 static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
1220 {
1221 struct ath_softc *sc = data;
1222
1223 switch (state) {
1224 case RFKILL_STATE_SOFT_BLOCKED:
1225 if (!(sc->sc_flags & (SC_OP_RFKILL_HW_BLOCKED |
1226 SC_OP_RFKILL_SW_BLOCKED)))
1227 ath_radio_disable(sc);
1228 sc->sc_flags |= SC_OP_RFKILL_SW_BLOCKED;
1229 return 0;
1230 case RFKILL_STATE_UNBLOCKED:
1231 if ((sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED)) {
1232 sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
1233 if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
1234 DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
1235 "radio as it is disabled by h/w\n");
1236 return -EPERM;
1237 }
1238 ath_radio_enable(sc);
1239 }
1240 return 0;
1241 default:
1242 return -EINVAL;
1243 }
1244 }
1245
1246 /* Init s/w rfkill */
1247 static int ath_init_sw_rfkill(struct ath_softc *sc)
1248 {
1249 sc->rf_kill.rfkill = rfkill_allocate(wiphy_dev(sc->hw->wiphy),
1250 RFKILL_TYPE_WLAN);
1251 if (!sc->rf_kill.rfkill) {
1252 DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
1253 return -ENOMEM;
1254 }
1255
1256 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
1257 "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
1258 sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
1259 sc->rf_kill.rfkill->data = sc;
1260 sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
1261 sc->rf_kill.rfkill->state = RFKILL_STATE_UNBLOCKED;
1262 sc->rf_kill.rfkill->user_claim_unsupported = 1;
1263
1264 return 0;
1265 }
1266
1267 /* Deinitialize rfkill */
1268 static void ath_deinit_rfkill(struct ath_softc *sc)
1269 {
1270 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1271 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1272
1273 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
1274 rfkill_unregister(sc->rf_kill.rfkill);
1275 sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
1276 sc->rf_kill.rfkill = NULL;
1277 }
1278 }
1279
1280 static int ath_start_rfkill_poll(struct ath_softc *sc)
1281 {
1282 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1283 queue_delayed_work(sc->hw->workqueue,
1284 &sc->rf_kill.rfkill_poll, 0);
1285
1286 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
1287 if (rfkill_register(sc->rf_kill.rfkill)) {
1288 DPRINTF(sc, ATH_DBG_FATAL,
1289 "Unable to register rfkill\n");
1290 rfkill_free(sc->rf_kill.rfkill);
1291
1292 /* Deinitialize the device */
1293 ath_cleanup(sc);
1294 return -EIO;
1295 } else {
1296 sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
1297 }
1298 }
1299
1300 return 0;
1301 }
1302 #endif /* CONFIG_RFKILL */
1303
1304 void ath_cleanup(struct ath_softc *sc)
1305 {
1306 ath_detach(sc);
1307 free_irq(sc->irq, sc);
1308 ath_bus_cleanup(sc);
1309 ieee80211_free_hw(sc->hw);
1310 }
1311
1312 void ath_detach(struct ath_softc *sc)
1313 {
1314 struct ieee80211_hw *hw = sc->hw;
1315 int i = 0;
1316
1317 ath9k_ps_wakeup(sc);
1318
1319 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
1320
1321 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1322 ath_deinit_rfkill(sc);
1323 #endif
1324 ath_deinit_leds(sc);
1325
1326 ieee80211_unregister_hw(hw);
1327 ath_rx_cleanup(sc);
1328 ath_tx_cleanup(sc);
1329
1330 tasklet_kill(&sc->intr_tq);
1331 tasklet_kill(&sc->bcon_tasklet);
1332
1333 if (!(sc->sc_flags & SC_OP_INVALID))
1334 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1335
1336 /* cleanup tx queues */
1337 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1338 if (ATH_TXQ_SETUP(sc, i))
1339 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1340
1341 ath9k_hw_detach(sc->sc_ah);
1342 ath9k_exit_debug(sc);
1343 ath9k_ps_restore(sc);
1344 }
1345
1346 static int ath_init(u16 devid, struct ath_softc *sc)
1347 {
1348 struct ath_hw *ah = NULL;
1349 int status;
1350 int error = 0, i;
1351 int csz = 0;
1352
1353 /* XXX: hardware will not be ready until ath_open() being called */
1354 sc->sc_flags |= SC_OP_INVALID;
1355
1356 if (ath9k_init_debug(sc) < 0)
1357 printk(KERN_ERR "Unable to create debugfs files\n");
1358
1359 spin_lock_init(&sc->sc_resetlock);
1360 mutex_init(&sc->mutex);
1361 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1362 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1363 (unsigned long)sc);
1364
1365 /*
1366 * Cache line size is used to size and align various
1367 * structures used to communicate with the hardware.
1368 */
1369 ath_read_cachesize(sc, &csz);
1370 /* XXX assert csz is non-zero */
1371 sc->cachelsz = csz << 2; /* convert to bytes */
1372
1373 ah = ath9k_hw_attach(devid, sc, &status);
1374 if (ah == NULL) {
1375 DPRINTF(sc, ATH_DBG_FATAL,
1376 "Unable to attach hardware; HAL status %d\n", status);
1377 error = -ENXIO;
1378 goto bad;
1379 }
1380 sc->sc_ah = ah;
1381
1382 /* Get the hardware key cache size. */
1383 sc->keymax = ah->caps.keycache_size;
1384 if (sc->keymax > ATH_KEYMAX) {
1385 DPRINTF(sc, ATH_DBG_KEYCACHE,
1386 "Warning, using only %u entries in %u key cache\n",
1387 ATH_KEYMAX, sc->keymax);
1388 sc->keymax = ATH_KEYMAX;
1389 }
1390
1391 /*
1392 * Reset the key cache since some parts do not
1393 * reset the contents on initial power up.
1394 */
1395 for (i = 0; i < sc->keymax; i++)
1396 ath9k_hw_keyreset(ah, (u16) i);
1397
1398 if (ath9k_regd_init(sc->sc_ah))
1399 goto bad;
1400
1401 /* default to MONITOR mode */
1402 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1403
1404 /* Setup rate tables */
1405
1406 ath_rate_attach(sc);
1407 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1408 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1409
1410 /*
1411 * Allocate hardware transmit queues: one queue for
1412 * beacon frames and one data queue for each QoS
1413 * priority. Note that the hal handles reseting
1414 * these queues at the needed time.
1415 */
1416 sc->beacon.beaconq = ath_beaconq_setup(ah);
1417 if (sc->beacon.beaconq == -1) {
1418 DPRINTF(sc, ATH_DBG_FATAL,
1419 "Unable to setup a beacon xmit queue\n");
1420 error = -EIO;
1421 goto bad2;
1422 }
1423 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1424 if (sc->beacon.cabq == NULL) {
1425 DPRINTF(sc, ATH_DBG_FATAL,
1426 "Unable to setup CAB xmit queue\n");
1427 error = -EIO;
1428 goto bad2;
1429 }
1430
1431 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1432 ath_cabq_update(sc);
1433
1434 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1435 sc->tx.hwq_map[i] = -1;
1436
1437 /* Setup data queues */
1438 /* NB: ensure BK queue is the lowest priority h/w queue */
1439 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1440 DPRINTF(sc, ATH_DBG_FATAL,
1441 "Unable to setup xmit queue for BK traffic\n");
1442 error = -EIO;
1443 goto bad2;
1444 }
1445
1446 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1447 DPRINTF(sc, ATH_DBG_FATAL,
1448 "Unable to setup xmit queue for BE traffic\n");
1449 error = -EIO;
1450 goto bad2;
1451 }
1452 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1453 DPRINTF(sc, ATH_DBG_FATAL,
1454 "Unable to setup xmit queue for VI traffic\n");
1455 error = -EIO;
1456 goto bad2;
1457 }
1458 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1459 DPRINTF(sc, ATH_DBG_FATAL,
1460 "Unable to setup xmit queue for VO traffic\n");
1461 error = -EIO;
1462 goto bad2;
1463 }
1464
1465 /* Initializes the noise floor to a reasonable default value.
1466 * Later on this will be updated during ANI processing. */
1467
1468 sc->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1469 setup_timer(&sc->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1470
1471 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1472 ATH9K_CIPHER_TKIP, NULL)) {
1473 /*
1474 * Whether we should enable h/w TKIP MIC.
1475 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1476 * report WMM capable, so it's always safe to turn on
1477 * TKIP MIC in this case.
1478 */
1479 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1480 0, 1, NULL);
1481 }
1482
1483 /*
1484 * Check whether the separate key cache entries
1485 * are required to handle both tx+rx MIC keys.
1486 * With split mic keys the number of stations is limited
1487 * to 27 otherwise 59.
1488 */
1489 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1490 ATH9K_CIPHER_TKIP, NULL)
1491 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1492 ATH9K_CIPHER_MIC, NULL)
1493 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1494 0, NULL))
1495 sc->splitmic = 1;
1496
1497 /* turn on mcast key search if possible */
1498 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1499 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1500 1, NULL);
1501
1502 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1503
1504 /* 11n Capabilities */
1505 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1506 sc->sc_flags |= SC_OP_TXAGGR;
1507 sc->sc_flags |= SC_OP_RXAGGR;
1508 }
1509
1510 sc->tx_chainmask = ah->caps.tx_chainmask;
1511 sc->rx_chainmask = ah->caps.rx_chainmask;
1512
1513 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1514 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1515
1516 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1517 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
1518 ATH_SET_VIF_BSSID_MASK(sc->bssidmask);
1519 ath9k_hw_setbssidmask(sc);
1520 }
1521
1522 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1523
1524 /* initialize beacon slots */
1525 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
1526 sc->beacon.bslot[i] = ATH_IF_ID_ANY;
1527
1528 /* save MISC configurations */
1529 sc->config.swBeaconProcess = 1;
1530
1531 /* setup channels and rates */
1532
1533 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1534 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1535 sc->rates[IEEE80211_BAND_2GHZ];
1536 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1537 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1538 ARRAY_SIZE(ath9k_2ghz_chantable);
1539
1540 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1541 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1542 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1543 sc->rates[IEEE80211_BAND_5GHZ];
1544 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1545 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1546 ARRAY_SIZE(ath9k_5ghz_chantable);
1547 }
1548
1549 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)
1550 ath9k_hw_btcoex_enable(sc->sc_ah);
1551
1552 return 0;
1553 bad2:
1554 /* cleanup tx queues */
1555 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1556 if (ATH_TXQ_SETUP(sc, i))
1557 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1558 bad:
1559 if (ah)
1560 ath9k_hw_detach(ah);
1561
1562 return error;
1563 }
1564
1565 int ath_attach(u16 devid, struct ath_softc *sc)
1566 {
1567 struct ieee80211_hw *hw = sc->hw;
1568 int error = 0;
1569
1570 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
1571
1572 error = ath_init(devid, sc);
1573 if (error != 0)
1574 return error;
1575
1576 /* get mac address from hardware and set in mac80211 */
1577
1578 SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr);
1579
1580 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1581 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1582 IEEE80211_HW_SIGNAL_DBM |
1583 IEEE80211_HW_AMPDU_AGGREGATION |
1584 IEEE80211_HW_SUPPORTS_PS |
1585 IEEE80211_HW_PS_NULLFUNC_STACK;
1586
1587 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah))
1588 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1589
1590 hw->wiphy->interface_modes =
1591 BIT(NL80211_IFTYPE_AP) |
1592 BIT(NL80211_IFTYPE_STATION) |
1593 BIT(NL80211_IFTYPE_ADHOC);
1594
1595 hw->wiphy->reg_notifier = ath9k_reg_notifier;
1596 hw->wiphy->strict_regulatory = true;
1597
1598 hw->queues = 4;
1599 hw->max_rates = 4;
1600 hw->max_rate_tries = ATH_11N_TXMAXTRY;
1601 hw->sta_data_size = sizeof(struct ath_node);
1602 hw->vif_data_size = sizeof(struct ath_vif);
1603
1604 hw->rate_control_algorithm = "ath9k_rate_control";
1605
1606 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1607 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1608 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1609 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1610 }
1611
1612 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &sc->sbands[IEEE80211_BAND_2GHZ];
1613 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1614 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1615 &sc->sbands[IEEE80211_BAND_5GHZ];
1616
1617 /* initialize tx/rx engine */
1618 error = ath_tx_init(sc, ATH_TXBUF);
1619 if (error != 0)
1620 goto detach;
1621
1622 error = ath_rx_init(sc, ATH_RXBUF);
1623 if (error != 0)
1624 goto detach;
1625
1626 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1627 /* Initialze h/w Rfkill */
1628 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1629 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
1630
1631 /* Initialize s/w rfkill */
1632 if (ath_init_sw_rfkill(sc))
1633 goto detach;
1634 #endif
1635
1636 if (ath9k_is_world_regd(sc->sc_ah)) {
1637 /* Anything applied here (prior to wiphy registratoin) gets
1638 * saved on the wiphy orig_* parameters */
1639 const struct ieee80211_regdomain *regd =
1640 ath9k_world_regdomain(sc->sc_ah);
1641 hw->wiphy->custom_regulatory = true;
1642 hw->wiphy->strict_regulatory = false;
1643 wiphy_apply_custom_regulatory(sc->hw->wiphy, regd);
1644 ath9k_reg_apply_radar_flags(hw->wiphy);
1645 ath9k_reg_apply_world_flags(hw->wiphy, REGDOM_SET_BY_INIT);
1646 } else {
1647 /* This gets applied in the case of the absense of CRDA,
1648 * its our own custom world regulatory domain, similar to
1649 * cfg80211's but we enable passive scanning */
1650 const struct ieee80211_regdomain *regd =
1651 ath9k_default_world_regdomain();
1652 wiphy_apply_custom_regulatory(sc->hw->wiphy, regd);
1653 ath9k_reg_apply_radar_flags(hw->wiphy);
1654 ath9k_reg_apply_world_flags(hw->wiphy, REGDOM_SET_BY_INIT);
1655 }
1656
1657 error = ieee80211_register_hw(hw);
1658
1659 if (!ath9k_is_world_regd(sc->sc_ah))
1660 regulatory_hint(hw->wiphy, sc->sc_ah->regulatory.alpha2);
1661
1662 /* Initialize LED control */
1663 ath_init_leds(sc);
1664
1665
1666 return 0;
1667 detach:
1668 ath_detach(sc);
1669 return error;
1670 }
1671
1672 int ath_reset(struct ath_softc *sc, bool retry_tx)
1673 {
1674 struct ath_hw *ah = sc->sc_ah;
1675 struct ieee80211_hw *hw = sc->hw;
1676 int r;
1677
1678 ath9k_hw_set_interrupts(ah, 0);
1679 ath_drain_all_txq(sc, retry_tx);
1680 ath_stoprecv(sc);
1681 ath_flushrecv(sc);
1682
1683 spin_lock_bh(&sc->sc_resetlock);
1684 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1685 if (r)
1686 DPRINTF(sc, ATH_DBG_FATAL,
1687 "Unable to reset hardware; reset status %u\n", r);
1688 spin_unlock_bh(&sc->sc_resetlock);
1689
1690 if (ath_startrecv(sc) != 0)
1691 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
1692
1693 /*
1694 * We may be doing a reset in response to a request
1695 * that changes the channel so update any state that
1696 * might change as a result.
1697 */
1698 ath_cache_conf_rate(sc, &hw->conf);
1699
1700 ath_update_txpow(sc);
1701
1702 if (sc->sc_flags & SC_OP_BEACONS)
1703 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
1704
1705 ath9k_hw_set_interrupts(ah, sc->imask);
1706
1707 if (retry_tx) {
1708 int i;
1709 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1710 if (ATH_TXQ_SETUP(sc, i)) {
1711 spin_lock_bh(&sc->tx.txq[i].axq_lock);
1712 ath_txq_schedule(sc, &sc->tx.txq[i]);
1713 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
1714 }
1715 }
1716 }
1717
1718 return r;
1719 }
1720
1721 /*
1722 * This function will allocate both the DMA descriptor structure, and the
1723 * buffers it contains. These are used to contain the descriptors used
1724 * by the system.
1725 */
1726 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1727 struct list_head *head, const char *name,
1728 int nbuf, int ndesc)
1729 {
1730 #define DS2PHYS(_dd, _ds) \
1731 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1732 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1733 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1734
1735 struct ath_desc *ds;
1736 struct ath_buf *bf;
1737 int i, bsize, error;
1738
1739 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
1740 name, nbuf, ndesc);
1741
1742 /* ath_desc must be a multiple of DWORDs */
1743 if ((sizeof(struct ath_desc) % 4) != 0) {
1744 DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n");
1745 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1746 error = -ENOMEM;
1747 goto fail;
1748 }
1749
1750 dd->dd_name = name;
1751 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1752
1753 /*
1754 * Need additional DMA memory because we can't use
1755 * descriptors that cross the 4K page boundary. Assume
1756 * one skipped descriptor per 4K page.
1757 */
1758 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1759 u32 ndesc_skipped =
1760 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1761 u32 dma_len;
1762
1763 while (ndesc_skipped) {
1764 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1765 dd->dd_desc_len += dma_len;
1766
1767 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1768 };
1769 }
1770
1771 /* allocate descriptors */
1772 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
1773 &dd->dd_desc_paddr, GFP_ATOMIC);
1774 if (dd->dd_desc == NULL) {
1775 error = -ENOMEM;
1776 goto fail;
1777 }
1778 ds = dd->dd_desc;
1779 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
1780 dd->dd_name, ds, (u32) dd->dd_desc_len,
1781 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1782
1783 /* allocate buffers */
1784 bsize = sizeof(struct ath_buf) * nbuf;
1785 bf = kmalloc(bsize, GFP_KERNEL);
1786 if (bf == NULL) {
1787 error = -ENOMEM;
1788 goto fail2;
1789 }
1790 memset(bf, 0, bsize);
1791 dd->dd_bufptr = bf;
1792
1793 INIT_LIST_HEAD(head);
1794 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1795 bf->bf_desc = ds;
1796 bf->bf_daddr = DS2PHYS(dd, ds);
1797
1798 if (!(sc->sc_ah->caps.hw_caps &
1799 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1800 /*
1801 * Skip descriptor addresses which can cause 4KB
1802 * boundary crossing (addr + length) with a 32 dword
1803 * descriptor fetch.
1804 */
1805 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1806 ASSERT((caddr_t) bf->bf_desc <
1807 ((caddr_t) dd->dd_desc +
1808 dd->dd_desc_len));
1809
1810 ds += ndesc;
1811 bf->bf_desc = ds;
1812 bf->bf_daddr = DS2PHYS(dd, ds);
1813 }
1814 }
1815 list_add_tail(&bf->list, head);
1816 }
1817 return 0;
1818 fail2:
1819 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1820 dd->dd_desc_paddr);
1821 fail:
1822 memset(dd, 0, sizeof(*dd));
1823 return error;
1824 #undef ATH_DESC_4KB_BOUND_CHECK
1825 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1826 #undef DS2PHYS
1827 }
1828
1829 void ath_descdma_cleanup(struct ath_softc *sc,
1830 struct ath_descdma *dd,
1831 struct list_head *head)
1832 {
1833 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1834 dd->dd_desc_paddr);
1835
1836 INIT_LIST_HEAD(head);
1837 kfree(dd->dd_bufptr);
1838 memset(dd, 0, sizeof(*dd));
1839 }
1840
1841 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1842 {
1843 int qnum;
1844
1845 switch (queue) {
1846 case 0:
1847 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
1848 break;
1849 case 1:
1850 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
1851 break;
1852 case 2:
1853 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1854 break;
1855 case 3:
1856 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
1857 break;
1858 default:
1859 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1860 break;
1861 }
1862
1863 return qnum;
1864 }
1865
1866 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1867 {
1868 int qnum;
1869
1870 switch (queue) {
1871 case ATH9K_WME_AC_VO:
1872 qnum = 0;
1873 break;
1874 case ATH9K_WME_AC_VI:
1875 qnum = 1;
1876 break;
1877 case ATH9K_WME_AC_BE:
1878 qnum = 2;
1879 break;
1880 case ATH9K_WME_AC_BK:
1881 qnum = 3;
1882 break;
1883 default:
1884 qnum = -1;
1885 break;
1886 }
1887
1888 return qnum;
1889 }
1890
1891 /* XXX: Remove me once we don't depend on ath9k_channel for all
1892 * this redundant data */
1893 static void ath9k_update_ichannel(struct ath_softc *sc,
1894 struct ath9k_channel *ichan)
1895 {
1896 struct ieee80211_hw *hw = sc->hw;
1897 struct ieee80211_channel *chan = hw->conf.channel;
1898 struct ieee80211_conf *conf = &hw->conf;
1899
1900 ichan->channel = chan->center_freq;
1901 ichan->chan = chan;
1902
1903 if (chan->band == IEEE80211_BAND_2GHZ) {
1904 ichan->chanmode = CHANNEL_G;
1905 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
1906 } else {
1907 ichan->chanmode = CHANNEL_A;
1908 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1909 }
1910
1911 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1912
1913 if (conf_is_ht(conf)) {
1914 if (conf_is_ht40(conf))
1915 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1916
1917 ichan->chanmode = ath_get_extchanmode(sc, chan,
1918 conf->channel_type);
1919 }
1920 }
1921
1922 /**********************/
1923 /* mac80211 callbacks */
1924 /**********************/
1925
1926 static int ath9k_start(struct ieee80211_hw *hw)
1927 {
1928 struct ath_softc *sc = hw->priv;
1929 struct ieee80211_channel *curchan = hw->conf.channel;
1930 struct ath9k_channel *init_channel;
1931 int r, pos;
1932
1933 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
1934 "initial channel: %d MHz\n", curchan->center_freq);
1935
1936 mutex_lock(&sc->mutex);
1937
1938 /* setup initial channel */
1939
1940 pos = curchan->hw_value;
1941
1942 init_channel = &sc->sc_ah->channels[pos];
1943 ath9k_update_ichannel(sc, init_channel);
1944
1945 /* Reset SERDES registers */
1946 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
1947
1948 /*
1949 * The basic interface to setting the hardware in a good
1950 * state is ``reset''. On return the hardware is known to
1951 * be powered up and with interrupts disabled. This must
1952 * be followed by initialization of the appropriate bits
1953 * and then setup of the interrupt mask.
1954 */
1955 spin_lock_bh(&sc->sc_resetlock);
1956 r = ath9k_hw_reset(sc->sc_ah, init_channel, false);
1957 if (r) {
1958 DPRINTF(sc, ATH_DBG_FATAL,
1959 "Unable to reset hardware; reset status %u "
1960 "(freq %u MHz)\n", r,
1961 curchan->center_freq);
1962 spin_unlock_bh(&sc->sc_resetlock);
1963 goto mutex_unlock;
1964 }
1965 spin_unlock_bh(&sc->sc_resetlock);
1966
1967 /*
1968 * This is needed only to setup initial state
1969 * but it's best done after a reset.
1970 */
1971 ath_update_txpow(sc);
1972
1973 /*
1974 * Setup the hardware after reset:
1975 * The receive engine is set going.
1976 * Frame transmit is handled entirely
1977 * in the frame output path; there's nothing to do
1978 * here except setup the interrupt mask.
1979 */
1980 if (ath_startrecv(sc) != 0) {
1981 DPRINTF(sc, ATH_DBG_FATAL,
1982 "Unable to start recv logic\n");
1983 r = -EIO;
1984 goto mutex_unlock;
1985 }
1986
1987 /* Setup our intr mask. */
1988 sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
1989 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
1990 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
1991
1992 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
1993 sc->imask |= ATH9K_INT_GTT;
1994
1995 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1996 sc->imask |= ATH9K_INT_CST;
1997
1998 ath_cache_conf_rate(sc, &hw->conf);
1999
2000 sc->sc_flags &= ~SC_OP_INVALID;
2001
2002 /* Disable BMISS interrupt when we're not associated */
2003 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2004 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2005
2006 ieee80211_wake_queues(sc->hw);
2007
2008 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2009 r = ath_start_rfkill_poll(sc);
2010 #endif
2011
2012 mutex_unlock:
2013 mutex_unlock(&sc->mutex);
2014
2015 return r;
2016 }
2017
2018 static int ath9k_tx(struct ieee80211_hw *hw,
2019 struct sk_buff *skb)
2020 {
2021 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2022 struct ath_softc *sc = hw->priv;
2023 struct ath_tx_control txctl;
2024 int hdrlen, padsize;
2025
2026 memset(&txctl, 0, sizeof(struct ath_tx_control));
2027
2028 /*
2029 * As a temporary workaround, assign seq# here; this will likely need
2030 * to be cleaned up to work better with Beacon transmission and virtual
2031 * BSSes.
2032 */
2033 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2034 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2035 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2036 sc->tx.seq_no += 0x10;
2037 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2038 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2039 }
2040
2041 /* Add the padding after the header if this is not already done */
2042 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2043 if (hdrlen & 3) {
2044 padsize = hdrlen % 4;
2045 if (skb_headroom(skb) < padsize)
2046 return -1;
2047 skb_push(skb, padsize);
2048 memmove(skb->data, skb->data + padsize, hdrlen);
2049 }
2050
2051 /* Check if a tx queue is available */
2052
2053 txctl.txq = ath_test_get_txq(sc, skb);
2054 if (!txctl.txq)
2055 goto exit;
2056
2057 DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
2058
2059 if (ath_tx_start(sc, skb, &txctl) != 0) {
2060 DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n");
2061 goto exit;
2062 }
2063
2064 return 0;
2065 exit:
2066 dev_kfree_skb_any(skb);
2067 return 0;
2068 }
2069
2070 static void ath9k_stop(struct ieee80211_hw *hw)
2071 {
2072 struct ath_softc *sc = hw->priv;
2073
2074 if (sc->sc_flags & SC_OP_INVALID) {
2075 DPRINTF(sc, ATH_DBG_ANY, "Device not present\n");
2076 return;
2077 }
2078
2079 mutex_lock(&sc->mutex);
2080
2081 ieee80211_stop_queues(sc->hw);
2082
2083 /* make sure h/w will not generate any interrupt
2084 * before setting the invalid flag. */
2085 ath9k_hw_set_interrupts(sc->sc_ah, 0);
2086
2087 if (!(sc->sc_flags & SC_OP_INVALID)) {
2088 ath_drain_all_txq(sc, false);
2089 ath_stoprecv(sc);
2090 ath9k_hw_phy_disable(sc->sc_ah);
2091 } else
2092 sc->rx.rxlink = NULL;
2093
2094 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2095 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2096 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
2097 #endif
2098 /* disable HAL and put h/w to sleep */
2099 ath9k_hw_disable(sc->sc_ah);
2100 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2101
2102 sc->sc_flags |= SC_OP_INVALID;
2103
2104 mutex_unlock(&sc->mutex);
2105
2106 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n");
2107 }
2108
2109 static int ath9k_add_interface(struct ieee80211_hw *hw,
2110 struct ieee80211_if_init_conf *conf)
2111 {
2112 struct ath_softc *sc = hw->priv;
2113 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2114 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2115
2116 /* Support only vif for now */
2117
2118 if (sc->nvifs)
2119 return -ENOBUFS;
2120
2121 mutex_lock(&sc->mutex);
2122
2123 switch (conf->type) {
2124 case NL80211_IFTYPE_STATION:
2125 ic_opmode = NL80211_IFTYPE_STATION;
2126 break;
2127 case NL80211_IFTYPE_ADHOC:
2128 ic_opmode = NL80211_IFTYPE_ADHOC;
2129 break;
2130 case NL80211_IFTYPE_AP:
2131 ic_opmode = NL80211_IFTYPE_AP;
2132 break;
2133 default:
2134 DPRINTF(sc, ATH_DBG_FATAL,
2135 "Interface type %d not yet supported\n", conf->type);
2136 return -EOPNOTSUPP;
2137 }
2138
2139 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode);
2140
2141 /* Set the VIF opmode */
2142 avp->av_opmode = ic_opmode;
2143 avp->av_bslot = -1;
2144
2145 if (ic_opmode == NL80211_IFTYPE_AP)
2146 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
2147
2148 sc->vifs[0] = conf->vif;
2149 sc->nvifs++;
2150
2151 /* Set the device opmode */
2152 sc->sc_ah->opmode = ic_opmode;
2153
2154 /*
2155 * Enable MIB interrupts when there are hardware phy counters.
2156 * Note we only do this (at the moment) for station mode.
2157 */
2158 if (ath9k_hw_phycounters(sc->sc_ah) &&
2159 ((conf->type == NL80211_IFTYPE_STATION) ||
2160 (conf->type == NL80211_IFTYPE_ADHOC)))
2161 sc->imask |= ATH9K_INT_MIB;
2162 /*
2163 * Some hardware processes the TIM IE and fires an
2164 * interrupt when the TIM bit is set. For hardware
2165 * that does, if not overridden by configuration,
2166 * enable the TIM interrupt when operating as station.
2167 */
2168 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2169 (conf->type == NL80211_IFTYPE_STATION) &&
2170 !sc->config.swBeaconProcess)
2171 sc->imask |= ATH9K_INT_TIM;
2172
2173 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2174
2175 if (conf->type == NL80211_IFTYPE_AP) {
2176 /* TODO: is this a suitable place to start ANI for AP mode? */
2177 /* Start ANI */
2178 mod_timer(&sc->ani.timer,
2179 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
2180 }
2181
2182 mutex_unlock(&sc->mutex);
2183
2184 return 0;
2185 }
2186
2187 static void ath9k_remove_interface(struct ieee80211_hw *hw,
2188 struct ieee80211_if_init_conf *conf)
2189 {
2190 struct ath_softc *sc = hw->priv;
2191 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2192
2193 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n");
2194
2195 mutex_lock(&sc->mutex);
2196
2197 /* Stop ANI */
2198 del_timer_sync(&sc->ani.timer);
2199
2200 /* Reclaim beacon resources */
2201 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
2202 sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) {
2203 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2204 ath_beacon_return(sc, avp);
2205 }
2206
2207 sc->sc_flags &= ~SC_OP_BEACONS;
2208
2209 sc->vifs[0] = NULL;
2210 sc->nvifs--;
2211
2212 mutex_unlock(&sc->mutex);
2213 }
2214
2215 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2216 {
2217 struct ath_softc *sc = hw->priv;
2218 struct ieee80211_conf *conf = &hw->conf;
2219
2220 mutex_lock(&sc->mutex);
2221
2222 if (changed & IEEE80211_CONF_CHANGE_PS) {
2223 if (conf->flags & IEEE80211_CONF_PS) {
2224 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
2225 sc->imask |= ATH9K_INT_TIM_TIMER;
2226 ath9k_hw_set_interrupts(sc->sc_ah,
2227 sc->imask);
2228 }
2229 ath9k_hw_setrxabort(sc->sc_ah, 1);
2230 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
2231 } else {
2232 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
2233 ath9k_hw_setrxabort(sc->sc_ah, 0);
2234 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
2235 if (sc->imask & ATH9K_INT_TIM_TIMER) {
2236 sc->imask &= ~ATH9K_INT_TIM_TIMER;
2237 ath9k_hw_set_interrupts(sc->sc_ah,
2238 sc->imask);
2239 }
2240 }
2241 }
2242
2243 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2244 struct ieee80211_channel *curchan = hw->conf.channel;
2245 int pos = curchan->hw_value;
2246
2247 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
2248 curchan->center_freq);
2249
2250 /* XXX: remove me eventualy */
2251 ath9k_update_ichannel(sc, &sc->sc_ah->channels[pos]);
2252
2253 ath_update_chainmask(sc, conf_is_ht(conf));
2254
2255 if (ath_set_channel(sc, &sc->sc_ah->channels[pos]) < 0) {
2256 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
2257 mutex_unlock(&sc->mutex);
2258 return -EINVAL;
2259 }
2260 }
2261
2262 if (changed & IEEE80211_CONF_CHANGE_POWER)
2263 sc->config.txpowlimit = 2 * conf->power_level;
2264
2265 mutex_unlock(&sc->mutex);
2266
2267 return 0;
2268 }
2269
2270 static int ath9k_config_interface(struct ieee80211_hw *hw,
2271 struct ieee80211_vif *vif,
2272 struct ieee80211_if_conf *conf)
2273 {
2274 struct ath_softc *sc = hw->priv;
2275 struct ath_hw *ah = sc->sc_ah;
2276 struct ath_vif *avp = (void *)vif->drv_priv;
2277 u32 rfilt = 0;
2278 int error, i;
2279
2280 /* TODO: Need to decide which hw opmode to use for multi-interface
2281 * cases */
2282 if (vif->type == NL80211_IFTYPE_AP &&
2283 ah->opmode != NL80211_IFTYPE_AP) {
2284 ah->opmode = NL80211_IFTYPE_STATION;
2285 ath9k_hw_setopmode(ah);
2286 memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
2287 sc->curaid = 0;
2288 ath9k_hw_write_associd(sc);
2289 /* Request full reset to get hw opmode changed properly */
2290 sc->sc_flags |= SC_OP_FULL_RESET;
2291 }
2292
2293 if ((conf->changed & IEEE80211_IFCC_BSSID) &&
2294 !is_zero_ether_addr(conf->bssid)) {
2295 switch (vif->type) {
2296 case NL80211_IFTYPE_STATION:
2297 case NL80211_IFTYPE_ADHOC:
2298 /* Set BSSID */
2299 memcpy(sc->curbssid, conf->bssid, ETH_ALEN);
2300 sc->curaid = 0;
2301 ath9k_hw_write_associd(sc);
2302
2303 /* Set aggregation protection mode parameters */
2304 sc->config.ath_aggr_prot = 0;
2305
2306 DPRINTF(sc, ATH_DBG_CONFIG,
2307 "RX filter 0x%x bssid %pM aid 0x%x\n",
2308 rfilt, sc->curbssid, sc->curaid);
2309
2310 /* need to reconfigure the beacon */
2311 sc->sc_flags &= ~SC_OP_BEACONS ;
2312
2313 break;
2314 default:
2315 break;
2316 }
2317 }
2318
2319 if ((vif->type == NL80211_IFTYPE_ADHOC) ||
2320 (vif->type == NL80211_IFTYPE_AP)) {
2321 if ((conf->changed & IEEE80211_IFCC_BEACON) ||
2322 (conf->changed & IEEE80211_IFCC_BEACON_ENABLED &&
2323 conf->enable_beacon)) {
2324 /*
2325 * Allocate and setup the beacon frame.
2326 *
2327 * Stop any previous beacon DMA. This may be
2328 * necessary, for example, when an ibss merge
2329 * causes reconfiguration; we may be called
2330 * with beacon transmission active.
2331 */
2332 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2333
2334 error = ath_beacon_alloc(sc, 0);
2335 if (error != 0)
2336 return error;
2337
2338 ath_beacon_sync(sc, 0);
2339 }
2340 }
2341
2342 /* Check for WLAN_CAPABILITY_PRIVACY ? */
2343 if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
2344 for (i = 0; i < IEEE80211_WEP_NKID; i++)
2345 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
2346 ath9k_hw_keysetmac(sc->sc_ah,
2347 (u16)i,
2348 sc->curbssid);
2349 }
2350
2351 /* Only legacy IBSS for now */
2352 if (vif->type == NL80211_IFTYPE_ADHOC)
2353 ath_update_chainmask(sc, 0);
2354
2355 return 0;
2356 }
2357
2358 #define SUPPORTED_FILTERS \
2359 (FIF_PROMISC_IN_BSS | \
2360 FIF_ALLMULTI | \
2361 FIF_CONTROL | \
2362 FIF_OTHER_BSS | \
2363 FIF_BCN_PRBRESP_PROMISC | \
2364 FIF_FCSFAIL)
2365
2366 /* FIXME: sc->sc_full_reset ? */
2367 static void ath9k_configure_filter(struct ieee80211_hw *hw,
2368 unsigned int changed_flags,
2369 unsigned int *total_flags,
2370 int mc_count,
2371 struct dev_mc_list *mclist)
2372 {
2373 struct ath_softc *sc = hw->priv;
2374 u32 rfilt;
2375
2376 changed_flags &= SUPPORTED_FILTERS;
2377 *total_flags &= SUPPORTED_FILTERS;
2378
2379 sc->rx.rxfilter = *total_flags;
2380 rfilt = ath_calcrxfilter(sc);
2381 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
2382
2383 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
2384 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
2385 memcpy(sc->curbssid, ath_bcast_mac, ETH_ALEN);
2386 sc->curaid = 0;
2387 ath9k_hw_write_associd(sc);
2388 }
2389 }
2390
2391 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
2392 }
2393
2394 static void ath9k_sta_notify(struct ieee80211_hw *hw,
2395 struct ieee80211_vif *vif,
2396 enum sta_notify_cmd cmd,
2397 struct ieee80211_sta *sta)
2398 {
2399 struct ath_softc *sc = hw->priv;
2400
2401 switch (cmd) {
2402 case STA_NOTIFY_ADD:
2403 ath_node_attach(sc, sta);
2404 break;
2405 case STA_NOTIFY_REMOVE:
2406 ath_node_detach(sc, sta);
2407 break;
2408 default:
2409 break;
2410 }
2411 }
2412
2413 static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2414 const struct ieee80211_tx_queue_params *params)
2415 {
2416 struct ath_softc *sc = hw->priv;
2417 struct ath9k_tx_queue_info qi;
2418 int ret = 0, qnum;
2419
2420 if (queue >= WME_NUM_AC)
2421 return 0;
2422
2423 mutex_lock(&sc->mutex);
2424
2425 qi.tqi_aifs = params->aifs;
2426 qi.tqi_cwmin = params->cw_min;
2427 qi.tqi_cwmax = params->cw_max;
2428 qi.tqi_burstTime = params->txop;
2429 qnum = ath_get_hal_qnum(queue, sc);
2430
2431 DPRINTF(sc, ATH_DBG_CONFIG,
2432 "Configure tx [queue/halq] [%d/%d], "
2433 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
2434 queue, qnum, params->aifs, params->cw_min,
2435 params->cw_max, params->txop);
2436
2437 ret = ath_txq_update(sc, qnum, &qi);
2438 if (ret)
2439 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n");
2440
2441 mutex_unlock(&sc->mutex);
2442
2443 return ret;
2444 }
2445
2446 static int ath9k_set_key(struct ieee80211_hw *hw,
2447 enum set_key_cmd cmd,
2448 struct ieee80211_vif *vif,
2449 struct ieee80211_sta *sta,
2450 struct ieee80211_key_conf *key)
2451 {
2452 struct ath_softc *sc = hw->priv;
2453 int ret = 0;
2454
2455 mutex_lock(&sc->mutex);
2456 ath9k_ps_wakeup(sc);
2457 DPRINTF(sc, ATH_DBG_KEYCACHE, "Set HW Key\n");
2458
2459 switch (cmd) {
2460 case SET_KEY:
2461 ret = ath_key_config(sc, sta, key);
2462 if (ret >= 0) {
2463 key->hw_key_idx = ret;
2464 /* push IV and Michael MIC generation to stack */
2465 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2466 if (key->alg == ALG_TKIP)
2467 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2468 if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
2469 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
2470 ret = 0;
2471 }
2472 break;
2473 case DISABLE_KEY:
2474 ath_key_delete(sc, key);
2475 break;
2476 default:
2477 ret = -EINVAL;
2478 }
2479
2480 ath9k_ps_restore(sc);
2481 mutex_unlock(&sc->mutex);
2482
2483 return ret;
2484 }
2485
2486 static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2487 struct ieee80211_vif *vif,
2488 struct ieee80211_bss_conf *bss_conf,
2489 u32 changed)
2490 {
2491 struct ath_softc *sc = hw->priv;
2492
2493 mutex_lock(&sc->mutex);
2494
2495 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2496 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
2497 bss_conf->use_short_preamble);
2498 if (bss_conf->use_short_preamble)
2499 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
2500 else
2501 sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
2502 }
2503
2504 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2505 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
2506 bss_conf->use_cts_prot);
2507 if (bss_conf->use_cts_prot &&
2508 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
2509 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
2510 else
2511 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
2512 }
2513
2514 if (changed & BSS_CHANGED_ASSOC) {
2515 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
2516 bss_conf->assoc);
2517 ath9k_bss_assoc_info(sc, vif, bss_conf);
2518 }
2519
2520 mutex_unlock(&sc->mutex);
2521 }
2522
2523 static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
2524 {
2525 u64 tsf;
2526 struct ath_softc *sc = hw->priv;
2527
2528 mutex_lock(&sc->mutex);
2529 tsf = ath9k_hw_gettsf64(sc->sc_ah);
2530 mutex_unlock(&sc->mutex);
2531
2532 return tsf;
2533 }
2534
2535 static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
2536 {
2537 struct ath_softc *sc = hw->priv;
2538
2539 mutex_lock(&sc->mutex);
2540 ath9k_hw_settsf64(sc->sc_ah, tsf);
2541 mutex_unlock(&sc->mutex);
2542 }
2543
2544 static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2545 {
2546 struct ath_softc *sc = hw->priv;
2547
2548 mutex_lock(&sc->mutex);
2549 ath9k_hw_reset_tsf(sc->sc_ah);
2550 mutex_unlock(&sc->mutex);
2551 }
2552
2553 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2554 enum ieee80211_ampdu_mlme_action action,
2555 struct ieee80211_sta *sta,
2556 u16 tid, u16 *ssn)
2557 {
2558 struct ath_softc *sc = hw->priv;
2559 int ret = 0;
2560
2561 switch (action) {
2562 case IEEE80211_AMPDU_RX_START:
2563 if (!(sc->sc_flags & SC_OP_RXAGGR))
2564 ret = -ENOTSUPP;
2565 break;
2566 case IEEE80211_AMPDU_RX_STOP:
2567 break;
2568 case IEEE80211_AMPDU_TX_START:
2569 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
2570 if (ret < 0)
2571 DPRINTF(sc, ATH_DBG_FATAL,
2572 "Unable to start TX aggregation\n");
2573 else
2574 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2575 break;
2576 case IEEE80211_AMPDU_TX_STOP:
2577 ret = ath_tx_aggr_stop(sc, sta, tid);
2578 if (ret < 0)
2579 DPRINTF(sc, ATH_DBG_FATAL,
2580 "Unable to stop TX aggregation\n");
2581
2582 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2583 break;
2584 case IEEE80211_AMPDU_TX_RESUME:
2585 ath_tx_aggr_resume(sc, sta, tid);
2586 break;
2587 default:
2588 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
2589 }
2590
2591 return ret;
2592 }
2593
2594 struct ieee80211_ops ath9k_ops = {
2595 .tx = ath9k_tx,
2596 .start = ath9k_start,
2597 .stop = ath9k_stop,
2598 .add_interface = ath9k_add_interface,
2599 .remove_interface = ath9k_remove_interface,
2600 .config = ath9k_config,
2601 .config_interface = ath9k_config_interface,
2602 .configure_filter = ath9k_configure_filter,
2603 .sta_notify = ath9k_sta_notify,
2604 .conf_tx = ath9k_conf_tx,
2605 .bss_info_changed = ath9k_bss_info_changed,
2606 .set_key = ath9k_set_key,
2607 .get_tsf = ath9k_get_tsf,
2608 .set_tsf = ath9k_set_tsf,
2609 .reset_tsf = ath9k_reset_tsf,
2610 .ampdu_action = ath9k_ampdu_action,
2611 };
2612
2613 static struct {
2614 u32 version;
2615 const char * name;
2616 } ath_mac_bb_names[] = {
2617 { AR_SREV_VERSION_5416_PCI, "5416" },
2618 { AR_SREV_VERSION_5416_PCIE, "5418" },
2619 { AR_SREV_VERSION_9100, "9100" },
2620 { AR_SREV_VERSION_9160, "9160" },
2621 { AR_SREV_VERSION_9280, "9280" },
2622 { AR_SREV_VERSION_9285, "9285" }
2623 };
2624
2625 static struct {
2626 u16 version;
2627 const char * name;
2628 } ath_rf_names[] = {
2629 { 0, "5133" },
2630 { AR_RAD5133_SREV_MAJOR, "5133" },
2631 { AR_RAD5122_SREV_MAJOR, "5122" },
2632 { AR_RAD2133_SREV_MAJOR, "2133" },
2633 { AR_RAD2122_SREV_MAJOR, "2122" }
2634 };
2635
2636 /*
2637 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
2638 */
2639 const char *
2640 ath_mac_bb_name(u32 mac_bb_version)
2641 {
2642 int i;
2643
2644 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
2645 if (ath_mac_bb_names[i].version == mac_bb_version) {
2646 return ath_mac_bb_names[i].name;
2647 }
2648 }
2649
2650 return "????";
2651 }
2652
2653 /*
2654 * Return the RF name. "????" is returned if the RF is unknown.
2655 */
2656 const char *
2657 ath_rf_name(u16 rf_version)
2658 {
2659 int i;
2660
2661 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
2662 if (ath_rf_names[i].version == rf_version) {
2663 return ath_rf_names[i].name;
2664 }
2665 }
2666
2667 return "????";
2668 }
2669
2670 static int __init ath9k_init(void)
2671 {
2672 int error;
2673
2674 /* Register rate control algorithm */
2675 error = ath_rate_control_register();
2676 if (error != 0) {
2677 printk(KERN_ERR
2678 "ath9k: Unable to register rate control "
2679 "algorithm: %d\n",
2680 error);
2681 goto err_out;
2682 }
2683
2684 error = ath_pci_init();
2685 if (error < 0) {
2686 printk(KERN_ERR
2687 "ath9k: No PCI devices found, driver not installed.\n");
2688 error = -ENODEV;
2689 goto err_rate_unregister;
2690 }
2691
2692 error = ath_ahb_init();
2693 if (error < 0) {
2694 error = -ENODEV;
2695 goto err_pci_exit;
2696 }
2697
2698 return 0;
2699
2700 err_pci_exit:
2701 ath_pci_exit();
2702
2703 err_rate_unregister:
2704 ath_rate_control_unregister();
2705 err_out:
2706 return error;
2707 }
2708 module_init(ath9k_init);
2709
2710 static void __exit ath9k_exit(void)
2711 {
2712 ath_ahb_exit();
2713 ath_pci_exit();
2714 ath_rate_control_unregister();
2715 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
2716 }
2717 module_exit(ath9k_exit);
This page took 0.16798 seconds and 6 git commands to generate.