2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
21 static char *dev_info
= "ath9k";
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
28 static unsigned int ath9k_debug
= ATH_DBG_DEFAULT
;
29 module_param_named(debug
, ath9k_debug
, uint
, 0);
30 MODULE_PARM_DESC(debug
, "Debugging mask");
32 int ath9k_modparam_nohwcrypt
;
33 module_param_named(nohwcrypt
, ath9k_modparam_nohwcrypt
, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt
, "Disable hardware encryption");
37 module_param_named(blink
, led_blink
, int, 0444);
38 MODULE_PARM_DESC(blink
, "Enable LED blink on activity");
40 static int ath9k_btcoex_enable
;
41 module_param_named(btcoex_enable
, ath9k_btcoex_enable
, int, 0444);
42 MODULE_PARM_DESC(btcoex_enable
, "Enable wifi-BT coexistence");
44 bool is_ath9k_unloaded
;
45 /* We use the hw_value as an index into our private channel structure */
47 #define CHAN2G(_freq, _idx) { \
48 .band = IEEE80211_BAND_2GHZ, \
49 .center_freq = (_freq), \
54 #define CHAN5G(_freq, _idx) { \
55 .band = IEEE80211_BAND_5GHZ, \
56 .center_freq = (_freq), \
61 /* Some 2 GHz radios are actually tunable on 2312-2732
62 * on 5 MHz steps, we support the channels which we know
63 * we have calibration data for all cards though to make
65 static const struct ieee80211_channel ath9k_2ghz_chantable
[] = {
66 CHAN2G(2412, 0), /* Channel 1 */
67 CHAN2G(2417, 1), /* Channel 2 */
68 CHAN2G(2422, 2), /* Channel 3 */
69 CHAN2G(2427, 3), /* Channel 4 */
70 CHAN2G(2432, 4), /* Channel 5 */
71 CHAN2G(2437, 5), /* Channel 6 */
72 CHAN2G(2442, 6), /* Channel 7 */
73 CHAN2G(2447, 7), /* Channel 8 */
74 CHAN2G(2452, 8), /* Channel 9 */
75 CHAN2G(2457, 9), /* Channel 10 */
76 CHAN2G(2462, 10), /* Channel 11 */
77 CHAN2G(2467, 11), /* Channel 12 */
78 CHAN2G(2472, 12), /* Channel 13 */
79 CHAN2G(2484, 13), /* Channel 14 */
82 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
83 * on 5 MHz steps, we support the channels which we know
84 * we have calibration data for all cards though to make
86 static const struct ieee80211_channel ath9k_5ghz_chantable
[] = {
87 /* _We_ call this UNII 1 */
88 CHAN5G(5180, 14), /* Channel 36 */
89 CHAN5G(5200, 15), /* Channel 40 */
90 CHAN5G(5220, 16), /* Channel 44 */
91 CHAN5G(5240, 17), /* Channel 48 */
92 /* _We_ call this UNII 2 */
93 CHAN5G(5260, 18), /* Channel 52 */
94 CHAN5G(5280, 19), /* Channel 56 */
95 CHAN5G(5300, 20), /* Channel 60 */
96 CHAN5G(5320, 21), /* Channel 64 */
97 /* _We_ call this "Middle band" */
98 CHAN5G(5500, 22), /* Channel 100 */
99 CHAN5G(5520, 23), /* Channel 104 */
100 CHAN5G(5540, 24), /* Channel 108 */
101 CHAN5G(5560, 25), /* Channel 112 */
102 CHAN5G(5580, 26), /* Channel 116 */
103 CHAN5G(5600, 27), /* Channel 120 */
104 CHAN5G(5620, 28), /* Channel 124 */
105 CHAN5G(5640, 29), /* Channel 128 */
106 CHAN5G(5660, 30), /* Channel 132 */
107 CHAN5G(5680, 31), /* Channel 136 */
108 CHAN5G(5700, 32), /* Channel 140 */
109 /* _We_ call this UNII 3 */
110 CHAN5G(5745, 33), /* Channel 149 */
111 CHAN5G(5765, 34), /* Channel 153 */
112 CHAN5G(5785, 35), /* Channel 157 */
113 CHAN5G(5805, 36), /* Channel 161 */
114 CHAN5G(5825, 37), /* Channel 165 */
117 /* Atheros hardware rate code addition for short premble */
118 #define SHPCHECK(__hw_rate, __flags) \
119 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
121 #define RATE(_bitrate, _hw_rate, _flags) { \
122 .bitrate = (_bitrate), \
124 .hw_value = (_hw_rate), \
125 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
128 static struct ieee80211_rate ath9k_legacy_rates
[] = {
130 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE
),
131 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE
),
132 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE
),
143 static void ath9k_deinit_softc(struct ath_softc
*sc
);
146 * Read and write, they both share the same lock. We do this to serialize
147 * reads and writes on Atheros 802.11n PCI devices only. This is required
148 * as the FIFO on these devices can only accept sanely 2 requests.
151 static void ath9k_iowrite32(void *hw_priv
, u32 val
, u32 reg_offset
)
153 struct ath_hw
*ah
= (struct ath_hw
*) hw_priv
;
154 struct ath_common
*common
= ath9k_hw_common(ah
);
155 struct ath_softc
*sc
= (struct ath_softc
*) common
->priv
;
157 if (ah
->config
.serialize_regmode
== SER_REG_MODE_ON
) {
159 spin_lock_irqsave(&sc
->sc_serial_rw
, flags
);
160 iowrite32(val
, sc
->mem
+ reg_offset
);
161 spin_unlock_irqrestore(&sc
->sc_serial_rw
, flags
);
163 iowrite32(val
, sc
->mem
+ reg_offset
);
166 static unsigned int ath9k_ioread32(void *hw_priv
, u32 reg_offset
)
168 struct ath_hw
*ah
= (struct ath_hw
*) hw_priv
;
169 struct ath_common
*common
= ath9k_hw_common(ah
);
170 struct ath_softc
*sc
= (struct ath_softc
*) common
->priv
;
173 if (ah
->config
.serialize_regmode
== SER_REG_MODE_ON
) {
175 spin_lock_irqsave(&sc
->sc_serial_rw
, flags
);
176 val
= ioread32(sc
->mem
+ reg_offset
);
177 spin_unlock_irqrestore(&sc
->sc_serial_rw
, flags
);
179 val
= ioread32(sc
->mem
+ reg_offset
);
183 static const struct ath_ops ath9k_common_ops
= {
184 .read
= ath9k_ioread32
,
185 .write
= ath9k_iowrite32
,
188 /**************************/
190 /**************************/
192 static void setup_ht_cap(struct ath_softc
*sc
,
193 struct ieee80211_sta_ht_cap
*ht_info
)
195 struct ath_hw
*ah
= sc
->sc_ah
;
196 struct ath_common
*common
= ath9k_hw_common(ah
);
197 u8 tx_streams
, rx_streams
;
200 ht_info
->ht_supported
= true;
201 ht_info
->cap
= IEEE80211_HT_CAP_SUP_WIDTH_20_40
|
202 IEEE80211_HT_CAP_SM_PS
|
203 IEEE80211_HT_CAP_SGI_40
|
204 IEEE80211_HT_CAP_DSSSCCK40
;
206 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_LDPC
)
207 ht_info
->cap
|= IEEE80211_HT_CAP_LDPC_CODING
;
209 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_SGI_20
)
210 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_20
;
212 ht_info
->ampdu_factor
= IEEE80211_HT_MAX_AMPDU_64K
;
213 ht_info
->ampdu_density
= IEEE80211_HT_MPDU_DENSITY_8
;
215 if (AR_SREV_9485(ah
))
217 else if (AR_SREV_9300_20_OR_LATER(ah
))
222 if (AR_SREV_9280_20_OR_LATER(ah
)) {
223 if (max_streams
>= 2)
224 ht_info
->cap
|= IEEE80211_HT_CAP_TX_STBC
;
225 ht_info
->cap
|= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT
);
228 /* set up supported mcs set */
229 memset(&ht_info
->mcs
, 0, sizeof(ht_info
->mcs
));
230 tx_streams
= ath9k_cmn_count_streams(common
->tx_chainmask
, max_streams
);
231 rx_streams
= ath9k_cmn_count_streams(common
->rx_chainmask
, max_streams
);
233 ath_dbg(common
, ATH_DBG_CONFIG
,
234 "TX streams %d, RX streams: %d\n",
235 tx_streams
, rx_streams
);
237 if (tx_streams
!= rx_streams
) {
238 ht_info
->mcs
.tx_params
|= IEEE80211_HT_MCS_TX_RX_DIFF
;
239 ht_info
->mcs
.tx_params
|= ((tx_streams
- 1) <<
240 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
);
243 for (i
= 0; i
< rx_streams
; i
++)
244 ht_info
->mcs
.rx_mask
[i
] = 0xff;
246 ht_info
->mcs
.tx_params
|= IEEE80211_HT_MCS_TX_DEFINED
;
249 static int ath9k_reg_notifier(struct wiphy
*wiphy
,
250 struct regulatory_request
*request
)
252 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
253 struct ath_wiphy
*aphy
= hw
->priv
;
254 struct ath_softc
*sc
= aphy
->sc
;
255 struct ath_regulatory
*reg
= ath9k_hw_regulatory(sc
->sc_ah
);
257 return ath_reg_notifier_apply(wiphy
, request
, reg
);
261 * This function will allocate both the DMA descriptor structure, and the
262 * buffers it contains. These are used to contain the descriptors used
265 int ath_descdma_setup(struct ath_softc
*sc
, struct ath_descdma
*dd
,
266 struct list_head
*head
, const char *name
,
267 int nbuf
, int ndesc
, bool is_tx
)
269 #define DS2PHYS(_dd, _ds) \
270 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
271 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
272 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
273 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
276 int i
, bsize
, error
, desc_len
;
278 ath_dbg(common
, ATH_DBG_CONFIG
, "%s DMA: %u buffers %u desc/buf\n",
281 INIT_LIST_HEAD(head
);
284 desc_len
= sc
->sc_ah
->caps
.tx_desc_len
;
286 desc_len
= sizeof(struct ath_desc
);
288 /* ath_desc must be a multiple of DWORDs */
289 if ((desc_len
% 4) != 0) {
290 ath_err(common
, "ath_desc not DWORD aligned\n");
291 BUG_ON((desc_len
% 4) != 0);
296 dd
->dd_desc_len
= desc_len
* nbuf
* ndesc
;
299 * Need additional DMA memory because we can't use
300 * descriptors that cross the 4K page boundary. Assume
301 * one skipped descriptor per 4K page.
303 if (!(sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_4KB_SPLITTRANS
)) {
305 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd
->dd_desc_len
);
308 while (ndesc_skipped
) {
309 dma_len
= ndesc_skipped
* desc_len
;
310 dd
->dd_desc_len
+= dma_len
;
312 ndesc_skipped
= ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len
);
316 /* allocate descriptors */
317 dd
->dd_desc
= dma_alloc_coherent(sc
->dev
, dd
->dd_desc_len
,
318 &dd
->dd_desc_paddr
, GFP_KERNEL
);
319 if (dd
->dd_desc
== NULL
) {
323 ds
= (u8
*) dd
->dd_desc
;
324 ath_dbg(common
, ATH_DBG_CONFIG
, "%s DMA map: %p (%u) -> %llx (%u)\n",
325 name
, ds
, (u32
) dd
->dd_desc_len
,
326 ito64(dd
->dd_desc_paddr
), /*XXX*/(u32
) dd
->dd_desc_len
);
328 /* allocate buffers */
329 bsize
= sizeof(struct ath_buf
) * nbuf
;
330 bf
= kzalloc(bsize
, GFP_KERNEL
);
337 for (i
= 0; i
< nbuf
; i
++, bf
++, ds
+= (desc_len
* ndesc
)) {
339 bf
->bf_daddr
= DS2PHYS(dd
, ds
);
341 if (!(sc
->sc_ah
->caps
.hw_caps
&
342 ATH9K_HW_CAP_4KB_SPLITTRANS
)) {
344 * Skip descriptor addresses which can cause 4KB
345 * boundary crossing (addr + length) with a 32 dword
348 while (ATH_DESC_4KB_BOUND_CHECK(bf
->bf_daddr
)) {
349 BUG_ON((caddr_t
) bf
->bf_desc
>=
350 ((caddr_t
) dd
->dd_desc
+
353 ds
+= (desc_len
* ndesc
);
355 bf
->bf_daddr
= DS2PHYS(dd
, ds
);
358 list_add_tail(&bf
->list
, head
);
362 dma_free_coherent(sc
->dev
, dd
->dd_desc_len
, dd
->dd_desc
,
365 memset(dd
, 0, sizeof(*dd
));
367 #undef ATH_DESC_4KB_BOUND_CHECK
368 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
372 void ath9k_init_crypto(struct ath_softc
*sc
)
374 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
377 /* Get the hardware key cache size. */
378 common
->keymax
= sc
->sc_ah
->caps
.keycache_size
;
379 if (common
->keymax
> ATH_KEYMAX
) {
380 ath_dbg(common
, ATH_DBG_ANY
,
381 "Warning, using only %u entries in %u key cache\n",
382 ATH_KEYMAX
, common
->keymax
);
383 common
->keymax
= ATH_KEYMAX
;
387 * Reset the key cache since some parts do not
388 * reset the contents on initial power up.
390 for (i
= 0; i
< common
->keymax
; i
++)
391 ath_hw_keyreset(common
, (u16
) i
);
394 * Check whether the separate key cache entries
395 * are required to handle both tx+rx MIC keys.
396 * With split mic keys the number of stations is limited
397 * to 27 otherwise 59.
399 if (sc
->sc_ah
->misc_mode
& AR_PCU_MIC_NEW_LOC_ENA
)
400 common
->crypt_caps
|= ATH_CRYPT_CAP_MIC_COMBINED
;
403 static int ath9k_init_btcoex(struct ath_softc
*sc
)
408 switch (sc
->sc_ah
->btcoex_hw
.scheme
) {
409 case ATH_BTCOEX_CFG_NONE
:
411 case ATH_BTCOEX_CFG_2WIRE
:
412 ath9k_hw_btcoex_init_2wire(sc
->sc_ah
);
414 case ATH_BTCOEX_CFG_3WIRE
:
415 ath9k_hw_btcoex_init_3wire(sc
->sc_ah
);
416 r
= ath_init_btcoex_timer(sc
);
419 txq
= sc
->tx
.txq_map
[WME_AC_BE
];
420 ath9k_hw_init_btcoex_hw(sc
->sc_ah
, txq
->axq_qnum
);
421 sc
->btcoex
.bt_stomp_type
= ATH_BTCOEX_STOMP_LOW
;
431 static int ath9k_init_queues(struct ath_softc
*sc
)
435 sc
->beacon
.beaconq
= ath9k_hw_beaconq_setup(sc
->sc_ah
);
436 sc
->beacon
.cabq
= ath_txq_setup(sc
, ATH9K_TX_QUEUE_CAB
, 0);
438 sc
->config
.cabqReadytime
= ATH_CABQ_READY_TIME
;
441 for (i
= 0; i
< WME_NUM_AC
; i
++)
442 sc
->tx
.txq_map
[i
] = ath_txq_setup(sc
, ATH9K_TX_QUEUE_DATA
, i
);
447 static int ath9k_init_channels_rates(struct ath_softc
*sc
)
451 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable
) +
452 ARRAY_SIZE(ath9k_5ghz_chantable
) !=
455 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_2GHZ
) {
456 channels
= kmemdup(ath9k_2ghz_chantable
,
457 sizeof(ath9k_2ghz_chantable
), GFP_KERNEL
);
461 sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
= channels
;
462 sc
->sbands
[IEEE80211_BAND_2GHZ
].band
= IEEE80211_BAND_2GHZ
;
463 sc
->sbands
[IEEE80211_BAND_2GHZ
].n_channels
=
464 ARRAY_SIZE(ath9k_2ghz_chantable
);
465 sc
->sbands
[IEEE80211_BAND_2GHZ
].bitrates
= ath9k_legacy_rates
;
466 sc
->sbands
[IEEE80211_BAND_2GHZ
].n_bitrates
=
467 ARRAY_SIZE(ath9k_legacy_rates
);
470 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_5GHZ
) {
471 channels
= kmemdup(ath9k_5ghz_chantable
,
472 sizeof(ath9k_5ghz_chantable
), GFP_KERNEL
);
474 if (sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
)
475 kfree(sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
);
479 sc
->sbands
[IEEE80211_BAND_5GHZ
].channels
= channels
;
480 sc
->sbands
[IEEE80211_BAND_5GHZ
].band
= IEEE80211_BAND_5GHZ
;
481 sc
->sbands
[IEEE80211_BAND_5GHZ
].n_channels
=
482 ARRAY_SIZE(ath9k_5ghz_chantable
);
483 sc
->sbands
[IEEE80211_BAND_5GHZ
].bitrates
=
484 ath9k_legacy_rates
+ 4;
485 sc
->sbands
[IEEE80211_BAND_5GHZ
].n_bitrates
=
486 ARRAY_SIZE(ath9k_legacy_rates
) - 4;
491 static void ath9k_init_misc(struct ath_softc
*sc
)
493 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
496 setup_timer(&common
->ani
.timer
, ath_ani_calibrate
, (unsigned long)sc
);
498 sc
->config
.txpowlimit
= ATH_TXPOWER_MAX
;
500 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_HT
) {
501 sc
->sc_flags
|= SC_OP_TXAGGR
;
502 sc
->sc_flags
|= SC_OP_RXAGGR
;
505 common
->tx_chainmask
= sc
->sc_ah
->caps
.tx_chainmask
;
506 common
->rx_chainmask
= sc
->sc_ah
->caps
.rx_chainmask
;
508 ath9k_hw_set_diversity(sc
->sc_ah
, true);
509 sc
->rx
.defant
= ath9k_hw_getdefantenna(sc
->sc_ah
);
511 memcpy(common
->bssidmask
, ath_bcast_mac
, ETH_ALEN
);
513 sc
->beacon
.slottime
= ATH9K_SLOT_TIME_9
;
515 for (i
= 0; i
< ARRAY_SIZE(sc
->beacon
.bslot
); i
++) {
516 sc
->beacon
.bslot
[i
] = NULL
;
517 sc
->beacon
.bslot_aphy
[i
] = NULL
;
520 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
)
521 sc
->ant_comb
.count
= ATH_ANT_DIV_COMB_INIT_COUNT
;
524 static int ath9k_init_softc(u16 devid
, struct ath_softc
*sc
, u16 subsysid
,
525 const struct ath_bus_ops
*bus_ops
)
527 struct ath_hw
*ah
= NULL
;
528 struct ath_common
*common
;
532 ah
= kzalloc(sizeof(struct ath_hw
), GFP_KERNEL
);
536 ah
->hw_version
.devid
= devid
;
537 ah
->hw_version
.subsysid
= subsysid
;
540 if (!sc
->dev
->platform_data
)
541 ah
->ah_flags
|= AH_USE_EEPROM
;
543 common
= ath9k_hw_common(ah
);
544 common
->ops
= &ath9k_common_ops
;
545 common
->bus_ops
= bus_ops
;
549 common
->debug_mask
= ath9k_debug
;
550 common
->btcoex_enabled
= ath9k_btcoex_enable
== 1;
551 spin_lock_init(&common
->cc_lock
);
553 spin_lock_init(&sc
->wiphy_lock
);
554 spin_lock_init(&sc
->sc_serial_rw
);
555 spin_lock_init(&sc
->sc_pm_lock
);
556 mutex_init(&sc
->mutex
);
557 tasklet_init(&sc
->intr_tq
, ath9k_tasklet
, (unsigned long)sc
);
558 tasklet_init(&sc
->bcon_tasklet
, ath_beacon_tasklet
,
562 * Cache line size is used to size and align various
563 * structures used to communicate with the hardware.
565 ath_read_cachesize(common
, &csz
);
566 common
->cachelsz
= csz
<< 2; /* convert to bytes */
568 /* Initializes the hardware for all supported chipsets */
569 ret
= ath9k_hw_init(ah
);
573 ret
= ath9k_init_queues(sc
);
577 ret
= ath9k_init_btcoex(sc
);
581 ret
= ath9k_init_channels_rates(sc
);
585 ath9k_init_crypto(sc
);
591 for (i
= 0; i
< ATH9K_NUM_TX_QUEUES
; i
++)
592 if (ATH_TXQ_SETUP(sc
, i
))
593 ath_tx_cleanupq(sc
, &sc
->tx
.txq
[i
]);
604 static void ath9k_init_band_txpower(struct ath_softc
*sc
, int band
)
606 struct ieee80211_supported_band
*sband
;
607 struct ieee80211_channel
*chan
;
608 struct ath_hw
*ah
= sc
->sc_ah
;
609 struct ath_regulatory
*reg
= ath9k_hw_regulatory(ah
);
612 sband
= &sc
->sbands
[band
];
613 for (i
= 0; i
< sband
->n_channels
; i
++) {
614 chan
= &sband
->channels
[i
];
615 ah
->curchan
= &ah
->channels
[chan
->hw_value
];
616 ath9k_cmn_update_ichannel(ah
->curchan
, chan
, NL80211_CHAN_HT20
);
617 ath9k_hw_set_txpowerlimit(ah
, MAX_RATE_POWER
, true);
618 chan
->max_power
= reg
->max_power_level
/ 2;
622 static void ath9k_init_txpower_limits(struct ath_softc
*sc
)
624 struct ath_hw
*ah
= sc
->sc_ah
;
625 struct ath9k_channel
*curchan
= ah
->curchan
;
627 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_2GHZ
)
628 ath9k_init_band_txpower(sc
, IEEE80211_BAND_2GHZ
);
629 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_5GHZ
)
630 ath9k_init_band_txpower(sc
, IEEE80211_BAND_5GHZ
);
632 ah
->curchan
= curchan
;
635 void ath9k_set_hw_capab(struct ath_softc
*sc
, struct ieee80211_hw
*hw
)
637 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
639 hw
->flags
= IEEE80211_HW_RX_INCLUDES_FCS
|
640 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING
|
641 IEEE80211_HW_SIGNAL_DBM
|
642 IEEE80211_HW_SUPPORTS_PS
|
643 IEEE80211_HW_PS_NULLFUNC_STACK
|
644 IEEE80211_HW_SPECTRUM_MGMT
|
645 IEEE80211_HW_REPORTS_TX_ACK_STATUS
;
647 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_HT
)
648 hw
->flags
|= IEEE80211_HW_AMPDU_AGGREGATION
;
650 if (AR_SREV_9160_10_OR_LATER(sc
->sc_ah
) || ath9k_modparam_nohwcrypt
)
651 hw
->flags
|= IEEE80211_HW_MFP_CAPABLE
;
653 hw
->wiphy
->interface_modes
=
654 BIT(NL80211_IFTYPE_P2P_GO
) |
655 BIT(NL80211_IFTYPE_P2P_CLIENT
) |
656 BIT(NL80211_IFTYPE_AP
) |
657 BIT(NL80211_IFTYPE_WDS
) |
658 BIT(NL80211_IFTYPE_STATION
) |
659 BIT(NL80211_IFTYPE_ADHOC
) |
660 BIT(NL80211_IFTYPE_MESH_POINT
);
662 if (AR_SREV_5416(sc
->sc_ah
))
663 hw
->wiphy
->flags
&= ~WIPHY_FLAG_PS_ON_BY_DEFAULT
;
667 hw
->channel_change_time
= 5000;
668 hw
->max_listen_interval
= 10;
669 hw
->max_rate_tries
= 10;
670 hw
->sta_data_size
= sizeof(struct ath_node
);
671 hw
->vif_data_size
= sizeof(struct ath_vif
);
673 #ifdef CONFIG_ATH9K_RATE_CONTROL
674 hw
->rate_control_algorithm
= "ath9k_rate_control";
677 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_2GHZ
)
678 hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
679 &sc
->sbands
[IEEE80211_BAND_2GHZ
];
680 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_5GHZ
)
681 hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
682 &sc
->sbands
[IEEE80211_BAND_5GHZ
];
684 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_HT
) {
685 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_2GHZ
)
686 setup_ht_cap(sc
, &sc
->sbands
[IEEE80211_BAND_2GHZ
].ht_cap
);
687 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_5GHZ
)
688 setup_ht_cap(sc
, &sc
->sbands
[IEEE80211_BAND_5GHZ
].ht_cap
);
691 SET_IEEE80211_PERM_ADDR(hw
, common
->macaddr
);
694 int ath9k_init_device(u16 devid
, struct ath_softc
*sc
, u16 subsysid
,
695 const struct ath_bus_ops
*bus_ops
)
697 struct ieee80211_hw
*hw
= sc
->hw
;
698 struct ath_wiphy
*aphy
= hw
->priv
;
699 struct ath_common
*common
;
702 struct ath_regulatory
*reg
;
704 /* Bring up device */
705 error
= ath9k_init_softc(devid
, sc
, subsysid
, bus_ops
);
710 common
= ath9k_hw_common(ah
);
711 ath9k_set_hw_capab(sc
, hw
);
713 /* Initialize regulatory */
714 error
= ath_regd_init(&common
->regulatory
, sc
->hw
->wiphy
,
719 reg
= &common
->regulatory
;
722 error
= ath_tx_init(sc
, ATH_TXBUF
);
727 error
= ath_rx_init(sc
, ATH_RXBUF
);
731 ath9k_init_txpower_limits(sc
);
733 /* Register with mac80211 */
734 error
= ieee80211_register_hw(hw
);
738 error
= ath9k_init_debug(ah
);
740 ath_err(common
, "Unable to create debugfs files\n");
744 /* Handle world regulatory */
745 if (!ath_is_world_regd(reg
)) {
746 error
= regulatory_hint(hw
->wiphy
, reg
->alpha2
);
751 INIT_WORK(&sc
->hw_check_work
, ath_hw_check
);
752 INIT_WORK(&sc
->paprd_work
, ath_paprd_calibrate
);
753 INIT_WORK(&sc
->chan_work
, ath9k_wiphy_chan_work
);
754 INIT_DELAYED_WORK(&sc
->wiphy_work
, ath9k_wiphy_work
);
755 sc
->wiphy_scheduler_int
= msecs_to_jiffies(500);
756 aphy
->last_rssi
= ATH_RSSI_DUMMY_MARKER
;
759 ath_start_rfkill_poll(sc
);
764 ieee80211_unregister_hw(hw
);
772 ath9k_deinit_softc(sc
);
777 /*****************************/
778 /* De-Initialization */
779 /*****************************/
781 static void ath9k_deinit_softc(struct ath_softc
*sc
)
785 if (sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
)
786 kfree(sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
);
788 if (sc
->sbands
[IEEE80211_BAND_5GHZ
].channels
)
789 kfree(sc
->sbands
[IEEE80211_BAND_5GHZ
].channels
);
791 if ((sc
->btcoex
.no_stomp_timer
) &&
792 sc
->sc_ah
->btcoex_hw
.scheme
== ATH_BTCOEX_CFG_3WIRE
)
793 ath_gen_timer_free(sc
->sc_ah
, sc
->btcoex
.no_stomp_timer
);
795 for (i
= 0; i
< ATH9K_NUM_TX_QUEUES
; i
++)
796 if (ATH_TXQ_SETUP(sc
, i
))
797 ath_tx_cleanupq(sc
, &sc
->tx
.txq
[i
]);
799 ath9k_hw_deinit(sc
->sc_ah
);
805 void ath9k_deinit_device(struct ath_softc
*sc
)
807 struct ieee80211_hw
*hw
= sc
->hw
;
812 wiphy_rfkill_stop_polling(sc
->hw
->wiphy
);
815 ath9k_ps_restore(sc
);
817 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
818 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
821 sc
->sec_wiphy
[i
] = NULL
;
822 ieee80211_unregister_hw(aphy
->hw
);
823 ieee80211_free_hw(aphy
->hw
);
826 ieee80211_unregister_hw(hw
);
829 ath9k_deinit_softc(sc
);
830 kfree(sc
->sec_wiphy
);
833 void ath_descdma_cleanup(struct ath_softc
*sc
,
834 struct ath_descdma
*dd
,
835 struct list_head
*head
)
837 dma_free_coherent(sc
->dev
, dd
->dd_desc_len
, dd
->dd_desc
,
840 INIT_LIST_HEAD(head
);
841 kfree(dd
->dd_bufptr
);
842 memset(dd
, 0, sizeof(*dd
));
845 /************************/
847 /************************/
849 static int __init
ath9k_init(void)
853 /* Register rate control algorithm */
854 error
= ath_rate_control_register();
857 "ath9k: Unable to register rate control "
863 error
= ath_pci_init();
866 "ath9k: No PCI devices found, driver not installed.\n");
868 goto err_rate_unregister
;
871 error
= ath_ahb_init();
883 ath_rate_control_unregister();
887 module_init(ath9k_init
);
889 static void __exit
ath9k_exit(void)
891 is_ath9k_unloaded
= true;
894 ath_rate_control_unregister();
895 printk(KERN_INFO
"%s: Driver unloaded\n", dev_info
);
897 module_exit(ath9k_exit
);