2 * Atheros AR9170 driver
4 * mac80211 interaction code
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
23 * This file incorporates work covered by the following copyright and
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
40 #include <linux/init.h>
41 #include <linux/module.h>
42 #include <linux/etherdevice.h>
43 #include <net/mac80211.h>
48 static int modparam_nohwcrypt
;
49 module_param_named(nohwcrypt
, modparam_nohwcrypt
, bool, S_IRUGO
);
50 MODULE_PARM_DESC(nohwcrypt
, "Disable hardware encryption.");
52 static int modparam_ht
;
53 module_param_named(ht
, modparam_ht
, bool, S_IRUGO
);
54 MODULE_PARM_DESC(ht
, "enable MPDU aggregation.");
56 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
57 .bitrate = (_bitrate), \
59 .hw_value = (_hw_rate) | (_txpidx) << 4, \
62 static struct ieee80211_rate __ar9170_ratetable
[] = {
64 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE
),
65 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE
),
66 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE
),
78 #define ar9170_g_ratetable (__ar9170_ratetable + 0)
79 #define ar9170_g_ratetable_size 12
80 #define ar9170_a_ratetable (__ar9170_ratetable + 4)
81 #define ar9170_a_ratetable_size 8
84 * NB: The hw_value is used as an index into the ar9170_phy_freq_params
85 * array in phy.c so that we don't have to do frequency lookups!
87 #define CHAN(_freq, _idx) { \
88 .center_freq = (_freq), \
90 .max_power = 18, /* XXX */ \
93 static struct ieee80211_channel ar9170_2ghz_chantable
[] = {
110 static struct ieee80211_channel ar9170_5ghz_chantable
[] = {
149 #define AR9170_HT_CAP \
151 .ht_supported = true, \
152 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
153 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
154 IEEE80211_HT_CAP_SGI_40 | \
155 IEEE80211_HT_CAP_GRN_FLD | \
156 IEEE80211_HT_CAP_DSSSCCK40 | \
157 IEEE80211_HT_CAP_SM_PS, \
159 .ampdu_density = 6, \
161 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
162 .rx_highest = cpu_to_le16(300), \
163 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
167 static struct ieee80211_supported_band ar9170_band_2GHz
= {
168 .channels
= ar9170_2ghz_chantable
,
169 .n_channels
= ARRAY_SIZE(ar9170_2ghz_chantable
),
170 .bitrates
= ar9170_g_ratetable
,
171 .n_bitrates
= ar9170_g_ratetable_size
,
172 .ht_cap
= AR9170_HT_CAP
,
175 static struct ieee80211_supported_band ar9170_band_5GHz
= {
176 .channels
= ar9170_5ghz_chantable
,
177 .n_channels
= ARRAY_SIZE(ar9170_5ghz_chantable
),
178 .bitrates
= ar9170_a_ratetable
,
179 .n_bitrates
= ar9170_a_ratetable_size
,
180 .ht_cap
= AR9170_HT_CAP
,
183 static void ar9170_tx(struct ar9170
*ar
);
184 static bool ar9170_tx_ampdu(struct ar9170
*ar
);
186 static inline u16
ar9170_get_seq_h(struct ieee80211_hdr
*hdr
)
188 return le16_to_cpu(hdr
->seq_ctrl
) >> 4;
191 static inline u16
ar9170_get_seq(struct sk_buff
*skb
)
193 struct ar9170_tx_control
*txc
= (void *) skb
->data
;
194 return ar9170_get_seq_h((void *) txc
->frame_data
);
197 static inline u16
ar9170_get_tid(struct sk_buff
*skb
)
199 struct ar9170_tx_control
*txc
= (void *) skb
->data
;
200 struct ieee80211_hdr
*hdr
= (void *) txc
->frame_data
;
202 return (ieee80211_get_qos_ctl(hdr
))[0] & IEEE80211_QOS_CTL_TID_MASK
;
205 #define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
206 #define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
208 #if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
209 static void ar9170_print_txheader(struct ar9170
*ar
, struct sk_buff
*skb
)
211 struct ar9170_tx_control
*txc
= (void *) skb
->data
;
212 struct ieee80211_tx_info
*txinfo
= IEEE80211_SKB_CB(skb
);
213 struct ar9170_tx_info
*arinfo
= (void *) txinfo
->rate_driver_data
;
214 struct ieee80211_hdr
*hdr
= (void *) txc
->frame_data
;
216 printk(KERN_DEBUG
"%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d "
217 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
218 wiphy_name(ar
->hw
->wiphy
), skb
, skb_get_queue_mapping(skb
),
219 ieee80211_get_DA(hdr
), arinfo
->flags
, ar9170_get_seq_h(hdr
),
220 le16_to_cpu(txc
->mac_control
), le32_to_cpu(txc
->phy_control
),
221 jiffies_to_msecs(arinfo
->timeout
- jiffies
));
224 static void __ar9170_dump_txqueue(struct ar9170
*ar
,
225 struct sk_buff_head
*queue
)
230 printk(KERN_DEBUG
"---[ cut here ]---\n");
231 printk(KERN_DEBUG
"%s: %d entries in queue.\n",
232 wiphy_name(ar
->hw
->wiphy
), skb_queue_len(queue
));
234 skb_queue_walk(queue
, skb
) {
235 printk(KERN_DEBUG
"index:%d => \n", i
++);
236 ar9170_print_txheader(ar
, skb
);
238 if (i
!= skb_queue_len(queue
))
239 printk(KERN_DEBUG
"WARNING: queue frame counter "
240 "mismatch %d != %d\n", skb_queue_len(queue
), i
);
241 printk(KERN_DEBUG
"---[ end ]---\n");
243 #endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */
245 #ifdef AR9170_QUEUE_DEBUG
246 static void ar9170_dump_txqueue(struct ar9170
*ar
,
247 struct sk_buff_head
*queue
)
251 spin_lock_irqsave(&queue
->lock
, flags
);
252 __ar9170_dump_txqueue(ar
, queue
);
253 spin_unlock_irqrestore(&queue
->lock
, flags
);
255 #endif /* AR9170_QUEUE_DEBUG */
257 #ifdef AR9170_QUEUE_STOP_DEBUG
258 static void __ar9170_dump_txstats(struct ar9170
*ar
)
262 printk(KERN_DEBUG
"%s: QoS queue stats\n",
263 wiphy_name(ar
->hw
->wiphy
));
265 for (i
= 0; i
< __AR9170_NUM_TXQ
; i
++)
266 printk(KERN_DEBUG
"%s: queue:%d limit:%d len:%d waitack:%d "
267 " stopped:%d\n", wiphy_name(ar
->hw
->wiphy
), i
,
268 ar
->tx_stats
[i
].limit
, ar
->tx_stats
[i
].len
,
269 skb_queue_len(&ar
->tx_status
[i
]),
270 ieee80211_queue_stopped(ar
->hw
, i
));
272 #endif /* AR9170_QUEUE_STOP_DEBUG */
274 #ifdef AR9170_TXAGG_DEBUG
275 static void ar9170_dump_tx_status_ampdu(struct ar9170
*ar
)
279 spin_lock_irqsave(&ar
->tx_status_ampdu
.lock
, flags
);
280 printk(KERN_DEBUG
"%s: A-MPDU tx_status queue => \n",
281 wiphy_name(ar
->hw
->wiphy
));
282 __ar9170_dump_txqueue(ar
, &ar
->tx_status_ampdu
);
283 spin_unlock_irqrestore(&ar
->tx_status_ampdu
.lock
, flags
);
286 #endif /* AR9170_TXAGG_DEBUG */
288 /* caller must guarantee exclusive access for _bin_ queue. */
289 static void ar9170_recycle_expired(struct ar9170
*ar
,
290 struct sk_buff_head
*queue
,
291 struct sk_buff_head
*bin
)
293 struct sk_buff
*skb
, *old
= NULL
;
296 spin_lock_irqsave(&queue
->lock
, flags
);
297 while ((skb
= skb_peek(queue
))) {
298 struct ieee80211_tx_info
*txinfo
;
299 struct ar9170_tx_info
*arinfo
;
301 txinfo
= IEEE80211_SKB_CB(skb
);
302 arinfo
= (void *) txinfo
->rate_driver_data
;
304 if (time_is_before_jiffies(arinfo
->timeout
)) {
305 #ifdef AR9170_QUEUE_DEBUG
306 printk(KERN_DEBUG
"%s: [%ld > %ld] frame expired => "
307 "recycle \n", wiphy_name(ar
->hw
->wiphy
),
308 jiffies
, arinfo
->timeout
);
309 ar9170_print_txheader(ar
, skb
);
310 #endif /* AR9170_QUEUE_DEBUG */
311 __skb_unlink(skb
, queue
);
312 __skb_queue_tail(bin
, skb
);
317 if (unlikely(old
== skb
)) {
318 /* bail out - queue is shot. */
325 spin_unlock_irqrestore(&queue
->lock
, flags
);
328 static void ar9170_tx_status(struct ar9170
*ar
, struct sk_buff
*skb
,
331 struct ieee80211_tx_info
*txinfo
;
332 unsigned int retries
= 0;
334 txinfo
= IEEE80211_SKB_CB(skb
);
335 ieee80211_tx_info_clear_status(txinfo
);
338 case AR9170_TX_STATUS_RETRY
:
340 case AR9170_TX_STATUS_COMPLETE
:
341 txinfo
->flags
|= IEEE80211_TX_STAT_ACK
;
344 case AR9170_TX_STATUS_FAILED
:
345 retries
= ar
->hw
->conf
.long_frame_max_tx_count
;
349 printk(KERN_ERR
"%s: invalid tx_status response (%x).\n",
350 wiphy_name(ar
->hw
->wiphy
), tx_status
);
354 txinfo
->status
.rates
[0].count
= retries
+ 1;
355 skb_pull(skb
, sizeof(struct ar9170_tx_control
));
356 ieee80211_tx_status_irqsafe(ar
->hw
, skb
);
359 static void ar9170_tx_fake_ampdu_status(struct ar9170
*ar
)
361 struct sk_buff_head success
;
364 unsigned long queue_bitmap
= 0;
366 skb_queue_head_init(&success
);
368 while (skb_queue_len(&ar
->tx_status_ampdu
) > AR9170_NUM_TX_STATUS
)
369 __skb_queue_tail(&success
, skb_dequeue(&ar
->tx_status_ampdu
));
371 ar9170_recycle_expired(ar
, &ar
->tx_status_ampdu
, &success
);
373 #ifdef AR9170_TXAGG_DEBUG
374 printk(KERN_DEBUG
"%s: collected %d A-MPDU frames.\n",
375 wiphy_name(ar
->hw
->wiphy
), skb_queue_len(&success
));
376 __ar9170_dump_txqueue(ar
, &success
);
377 #endif /* AR9170_TXAGG_DEBUG */
379 while ((skb
= __skb_dequeue(&success
))) {
380 struct ieee80211_tx_info
*txinfo
;
382 queue_bitmap
|= BIT(skb_get_queue_mapping(skb
));
384 txinfo
= IEEE80211_SKB_CB(skb
);
385 ieee80211_tx_info_clear_status(txinfo
);
387 txinfo
->flags
|= IEEE80211_TX_STAT_ACK
;
388 txinfo
->status
.rates
[0].count
= 1;
390 skb_pull(skb
, sizeof(struct ar9170_tx_control
));
391 ieee80211_tx_status_irqsafe(ar
->hw
, skb
);
394 for_each_bit(i
, &queue_bitmap
, BITS_PER_BYTE
) {
395 #ifdef AR9170_QUEUE_STOP_DEBUG
396 printk(KERN_DEBUG
"%s: wake queue %d\n",
397 wiphy_name(ar
->hw
->wiphy
), i
);
398 __ar9170_dump_txstats(ar
);
399 #endif /* AR9170_QUEUE_STOP_DEBUG */
400 ieee80211_wake_queue(ar
->hw
, i
);
407 static void ar9170_tx_ampdu_callback(struct ar9170
*ar
, struct sk_buff
*skb
)
409 struct ieee80211_tx_info
*txinfo
= IEEE80211_SKB_CB(skb
);
410 struct ar9170_tx_info
*arinfo
= (void *) txinfo
->rate_driver_data
;
412 arinfo
->timeout
= jiffies
+
413 msecs_to_jiffies(AR9170_BA_TIMEOUT
);
415 skb_queue_tail(&ar
->tx_status_ampdu
, skb
);
416 ar9170_tx_fake_ampdu_status(ar
);
417 ar
->tx_ampdu_pending
--;
419 if (!list_empty(&ar
->tx_ampdu_list
) && !ar
->tx_ampdu_pending
)
423 void ar9170_tx_callback(struct ar9170
*ar
, struct sk_buff
*skb
)
425 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
426 struct ar9170_tx_info
*arinfo
= (void *) info
->rate_driver_data
;
427 unsigned int queue
= skb_get_queue_mapping(skb
);
430 spin_lock_irqsave(&ar
->tx_stats_lock
, flags
);
431 ar
->tx_stats
[queue
].len
--;
433 if (skb_queue_empty(&ar
->tx_pending
[queue
])) {
434 #ifdef AR9170_QUEUE_STOP_DEBUG
435 printk(KERN_DEBUG
"%s: wake queue %d\n",
436 wiphy_name(ar
->hw
->wiphy
), queue
);
437 __ar9170_dump_txstats(ar
);
438 #endif /* AR9170_QUEUE_STOP_DEBUG */
439 ieee80211_wake_queue(ar
->hw
, queue
);
441 spin_unlock_irqrestore(&ar
->tx_stats_lock
, flags
);
443 if (arinfo
->flags
& AR9170_TX_FLAG_BLOCK_ACK
) {
444 ar9170_tx_ampdu_callback(ar
, skb
);
445 } else if (arinfo
->flags
& AR9170_TX_FLAG_WAIT_FOR_ACK
) {
446 arinfo
->timeout
= jiffies
+
447 msecs_to_jiffies(AR9170_TX_TIMEOUT
);
449 skb_queue_tail(&ar
->tx_status
[queue
], skb
);
450 } else if (arinfo
->flags
& AR9170_TX_FLAG_NO_ACK
) {
451 ar9170_tx_status(ar
, skb
, AR9170_TX_STATUS_FAILED
);
453 #ifdef AR9170_QUEUE_DEBUG
454 printk(KERN_DEBUG
"%s: unsupported frame flags!\n",
455 wiphy_name(ar
->hw
->wiphy
));
456 ar9170_print_txheader(ar
, skb
);
457 #endif /* AR9170_QUEUE_DEBUG */
458 dev_kfree_skb_any(skb
);
461 if (!ar
->tx_stats
[queue
].len
&&
462 !skb_queue_empty(&ar
->tx_pending
[queue
])) {
467 static struct sk_buff
*ar9170_get_queued_skb(struct ar9170
*ar
,
469 struct sk_buff_head
*queue
,
476 * Unfortunately, the firmware does not tell to which (queued) frame
477 * this transmission status report belongs to.
479 * So we have to make risky guesses - with the scarce information
480 * the firmware provided (-> destination MAC, and phy_control) -
481 * and hope that we picked the right one...
484 spin_lock_irqsave(&queue
->lock
, flags
);
485 skb_queue_walk(queue
, skb
) {
486 struct ar9170_tx_control
*txc
= (void *) skb
->data
;
487 struct ieee80211_hdr
*hdr
= (void *) txc
->frame_data
;
490 if (mac
&& compare_ether_addr(ieee80211_get_DA(hdr
), mac
)) {
491 #ifdef AR9170_QUEUE_DEBUG
492 printk(KERN_DEBUG
"%s: skip frame => DA %pM != %pM\n",
493 wiphy_name(ar
->hw
->wiphy
), mac
,
494 ieee80211_get_DA(hdr
));
495 ar9170_print_txheader(ar
, skb
);
496 #endif /* AR9170_QUEUE_DEBUG */
500 r
= (le32_to_cpu(txc
->phy_control
) & AR9170_TX_PHY_MCS_MASK
) >>
501 AR9170_TX_PHY_MCS_SHIFT
;
503 if ((rate
!= AR9170_TX_INVALID_RATE
) && (r
!= rate
)) {
504 #ifdef AR9170_QUEUE_DEBUG
505 printk(KERN_DEBUG
"%s: skip frame => rate %d != %d\n",
506 wiphy_name(ar
->hw
->wiphy
), rate
, r
);
507 ar9170_print_txheader(ar
, skb
);
508 #endif /* AR9170_QUEUE_DEBUG */
512 __skb_unlink(skb
, queue
);
513 spin_unlock_irqrestore(&queue
->lock
, flags
);
517 #ifdef AR9170_QUEUE_DEBUG
518 printk(KERN_ERR
"%s: ESS:[%pM] does not have any "
519 "outstanding frames in queue.\n",
520 wiphy_name(ar
->hw
->wiphy
), mac
);
521 __ar9170_dump_txqueue(ar
, queue
);
522 #endif /* AR9170_QUEUE_DEBUG */
523 spin_unlock_irqrestore(&queue
->lock
, flags
);
528 static void ar9170_handle_block_ack(struct ar9170
*ar
, u16 count
, u16 r
)
531 struct ieee80211_tx_info
*txinfo
;
534 skb
= ar9170_get_queued_skb(ar
, NULL
, &ar
->tx_status_ampdu
, r
);
538 txinfo
= IEEE80211_SKB_CB(skb
);
539 ieee80211_tx_info_clear_status(txinfo
);
541 /* FIXME: maybe more ? */
542 txinfo
->status
.rates
[0].count
= 1;
544 skb_pull(skb
, sizeof(struct ar9170_tx_control
));
545 ieee80211_tx_status_irqsafe(ar
->hw
, skb
);
549 #ifdef AR9170_TXAGG_DEBUG
551 printk(KERN_DEBUG
"%s: got %d more failed mpdus, but no more "
552 "suitable frames left in tx_status queue.\n",
553 wiphy_name(ar
->hw
->wiphy
), count
);
555 ar9170_dump_tx_status_ampdu(ar
);
557 #endif /* AR9170_TXAGG_DEBUG */
561 * This worker tries to keeps an maintain tx_status queues.
562 * So we can guarantee that incoming tx_status reports are
563 * actually for a pending frame.
566 static void ar9170_tx_janitor(struct work_struct
*work
)
568 struct ar9170
*ar
= container_of(work
, struct ar9170
,
570 struct sk_buff_head waste
;
572 bool resched
= false;
574 if (unlikely(!IS_STARTED(ar
)))
577 skb_queue_head_init(&waste
);
579 for (i
= 0; i
< __AR9170_NUM_TXQ
; i
++) {
580 #ifdef AR9170_QUEUE_DEBUG
581 printk(KERN_DEBUG
"%s: garbage collector scans queue:%d\n",
582 wiphy_name(ar
->hw
->wiphy
), i
);
583 ar9170_dump_txqueue(ar
, &ar
->tx_pending
[i
]);
584 ar9170_dump_txqueue(ar
, &ar
->tx_status
[i
]);
585 #endif /* AR9170_QUEUE_DEBUG */
587 ar9170_recycle_expired(ar
, &ar
->tx_status
[i
], &waste
);
588 ar9170_recycle_expired(ar
, &ar
->tx_pending
[i
], &waste
);
589 skb_queue_purge(&waste
);
591 if (!skb_queue_empty(&ar
->tx_status
[i
]) ||
592 !skb_queue_empty(&ar
->tx_pending
[i
]))
596 ar9170_tx_fake_ampdu_status(ar
);
601 ieee80211_queue_delayed_work(ar
->hw
,
603 msecs_to_jiffies(AR9170_JANITOR_DELAY
));
606 void ar9170_handle_command_response(struct ar9170
*ar
, void *buf
, u32 len
)
608 struct ar9170_cmd_response
*cmd
= (void *) buf
;
610 if ((cmd
->type
& 0xc0) != 0xc0) {
611 ar
->callback_cmd(ar
, len
, buf
);
615 /* hardware event handlers */
619 * TX status notification:
620 * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
624 * M1-M6 is the MAC address
625 * R1-R4 is the transmit rate
626 * S1-S2 is the transmit status
630 u32 phy
= le32_to_cpu(cmd
->tx_status
.rate
);
631 u32 q
= (phy
& AR9170_TX_PHY_QOS_MASK
) >>
632 AR9170_TX_PHY_QOS_SHIFT
;
633 #ifdef AR9170_QUEUE_DEBUG
634 printk(KERN_DEBUG
"%s: recv tx_status for %pM, p:%08x, q:%d\n",
635 wiphy_name(ar
->hw
->wiphy
), cmd
->tx_status
.dst
, phy
, q
);
636 #endif /* AR9170_QUEUE_DEBUG */
638 skb
= ar9170_get_queued_skb(ar
, cmd
->tx_status
.dst
,
640 AR9170_TX_INVALID_RATE
);
644 ar9170_tx_status(ar
, skb
, le16_to_cpu(cmd
->tx_status
.status
));
652 if (ar
->vif
&& ar
->vif
->type
== NL80211_IFTYPE_AP
)
653 ieee80211_queue_work(ar
->hw
, &ar
->beacon_work
);
658 * (IBSS) beacon send notification
659 * bytes: 04 c2 XX YY B4 B3 B2 B1
663 * B1-B4 "should" be the number of send out beacons.
668 /* End of Atim Window */
672 /* BlockACK bitmap */
676 /* BlockACK events */
677 ar9170_handle_block_ack(ar
,
678 le16_to_cpu(cmd
->ba_fail_cnt
.failed
),
679 le16_to_cpu(cmd
->ba_fail_cnt
.rate
));
680 ar9170_tx_fake_ampdu_status(ar
);
684 /* Watchdog Interrupt */
688 /* retransmission issue / SIFS/EIFS collision ?! */
693 printk(KERN_DEBUG
"ar9170 FW: %.*s\n", len
- 4, (char *)buf
+ 4);
700 printk(KERN_DEBUG
"ar9170 FW: u8: %#.2x\n",
704 printk(KERN_DEBUG
"ar9170 FW: u8: %#.4x\n",
705 le16_to_cpup((__le16
*)((char *)buf
+ 4)));
708 printk(KERN_DEBUG
"ar9170 FW: u8: %#.8x\n",
709 le32_to_cpup((__le32
*)((char *)buf
+ 4)));
712 printk(KERN_DEBUG
"ar9170 FW: u8: %#.16lx\n",
713 (unsigned long)le64_to_cpup(
714 (__le64
*)((char *)buf
+ 4)));
719 print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE
,
720 (char *)buf
+ 4, len
- 4);
724 printk(KERN_INFO
"received unhandled event %x\n", cmd
->type
);
725 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE
, buf
, len
);
730 static void ar9170_rx_reset_rx_mpdu(struct ar9170
*ar
)
732 memset(&ar
->rx_mpdu
.plcp
, 0, sizeof(struct ar9170_rx_head
));
733 ar
->rx_mpdu
.has_plcp
= false;
736 int ar9170_nag_limiter(struct ar9170
*ar
)
741 * we expect all sorts of errors in promiscuous mode.
742 * don't bother with it, it's OK!
744 if (ar
->sniffer_enabled
)
748 * only go for frequent errors! The hardware tends to
749 * do some stupid thing once in a while under load, in
750 * noisy environments or just for fun!
752 if (time_before(jiffies
, ar
->bad_hw_nagger
) && net_ratelimit())
753 print_message
= true;
755 print_message
= false;
757 /* reset threshold for "once in a while" */
758 ar
->bad_hw_nagger
= jiffies
+ HZ
/ 4;
759 return print_message
;
762 static int ar9170_rx_mac_status(struct ar9170
*ar
,
763 struct ar9170_rx_head
*head
,
764 struct ar9170_rx_macstatus
*mac
,
765 struct ieee80211_rx_status
*status
)
769 BUILD_BUG_ON(sizeof(struct ar9170_rx_head
) != 12);
770 BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus
) != 4);
773 if (error
& AR9170_RX_ERROR_MMIC
) {
774 status
->flag
|= RX_FLAG_MMIC_ERROR
;
775 error
&= ~AR9170_RX_ERROR_MMIC
;
778 if (error
& AR9170_RX_ERROR_PLCP
) {
779 status
->flag
|= RX_FLAG_FAILED_PLCP_CRC
;
780 error
&= ~AR9170_RX_ERROR_PLCP
;
782 if (!(ar
->filter_state
& FIF_PLCPFAIL
))
786 if (error
& AR9170_RX_ERROR_FCS
) {
787 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
788 error
&= ~AR9170_RX_ERROR_FCS
;
790 if (!(ar
->filter_state
& FIF_FCSFAIL
))
794 decrypt
= ar9170_get_decrypt_type(mac
);
795 if (!(decrypt
& AR9170_RX_ENC_SOFTWARE
) &&
796 decrypt
!= AR9170_ENC_ALG_NONE
)
797 status
->flag
|= RX_FLAG_DECRYPTED
;
799 /* ignore wrong RA errors */
800 error
&= ~AR9170_RX_ERROR_WRONG_RA
;
802 if (error
& AR9170_RX_ERROR_DECRYPT
) {
803 error
&= ~AR9170_RX_ERROR_DECRYPT
;
805 * Rx decryption is done in place,
806 * the original data is lost anyway.
812 /* drop any other error frames */
813 if (unlikely(error
)) {
814 /* TODO: update netdevice's RX dropped/errors statistics */
816 if (ar9170_nag_limiter(ar
))
817 printk(KERN_DEBUG
"%s: received frame with "
818 "suspicious error code (%#x).\n",
819 wiphy_name(ar
->hw
->wiphy
), error
);
824 status
->band
= ar
->channel
->band
;
825 status
->freq
= ar
->channel
->center_freq
;
827 switch (mac
->status
& AR9170_RX_STATUS_MODULATION_MASK
) {
828 case AR9170_RX_STATUS_MODULATION_CCK
:
829 if (mac
->status
& AR9170_RX_STATUS_SHORT_PREAMBLE
)
830 status
->flag
|= RX_FLAG_SHORTPRE
;
831 switch (head
->plcp
[0]) {
833 status
->rate_idx
= 0;
836 status
->rate_idx
= 1;
839 status
->rate_idx
= 2;
842 status
->rate_idx
= 3;
845 if (ar9170_nag_limiter(ar
))
846 printk(KERN_ERR
"%s: invalid plcp cck rate "
847 "(%x).\n", wiphy_name(ar
->hw
->wiphy
),
853 case AR9170_RX_STATUS_MODULATION_OFDM
:
854 switch (head
->plcp
[0] & 0xf) {
856 status
->rate_idx
= 0;
859 status
->rate_idx
= 1;
862 status
->rate_idx
= 2;
865 status
->rate_idx
= 3;
868 status
->rate_idx
= 4;
871 status
->rate_idx
= 5;
874 status
->rate_idx
= 6;
877 status
->rate_idx
= 7;
880 if (ar9170_nag_limiter(ar
))
881 printk(KERN_ERR
"%s: invalid plcp ofdm rate "
882 "(%x).\n", wiphy_name(ar
->hw
->wiphy
),
886 if (status
->band
== IEEE80211_BAND_2GHZ
)
887 status
->rate_idx
+= 4;
890 case AR9170_RX_STATUS_MODULATION_HT
:
891 if (head
->plcp
[3] & 0x80)
892 status
->flag
|= RX_FLAG_40MHZ
;
893 if (head
->plcp
[6] & 0x80)
894 status
->flag
|= RX_FLAG_SHORT_GI
;
896 status
->rate_idx
= clamp(0, 75, head
->plcp
[6] & 0x7f);
897 status
->flag
|= RX_FLAG_HT
;
900 case AR9170_RX_STATUS_MODULATION_DUPOFDM
:
902 if (ar9170_nag_limiter(ar
))
903 printk(KERN_ERR
"%s: invalid modulation\n",
904 wiphy_name(ar
->hw
->wiphy
));
911 static void ar9170_rx_phy_status(struct ar9170
*ar
,
912 struct ar9170_rx_phystatus
*phy
,
913 struct ieee80211_rx_status
*status
)
917 BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus
) != 20);
919 for (i
= 0; i
< 3; i
++)
920 if (phy
->rssi
[i
] != 0x80)
921 status
->antenna
|= BIT(i
);
923 /* post-process RSSI */
924 for (i
= 0; i
< 7; i
++)
925 if (phy
->rssi
[i
] & 0x80)
926 phy
->rssi
[i
] = ((phy
->rssi
[i
] & 0x7f) + 1) & 0x7f;
928 /* TODO: we could do something with phy_errors */
929 status
->signal
= ar
->noise
[0] + phy
->rssi_combined
;
930 status
->noise
= ar
->noise
[0];
933 static struct sk_buff
*ar9170_rx_copy_data(u8
*buf
, int len
)
937 struct ieee80211_hdr
*hdr
= (void *) buf
;
939 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
940 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
941 reserved
+= NET_IP_ALIGN
;
943 if (*qc
& IEEE80211_QOS_CONTROL_A_MSDU_PRESENT
)
944 reserved
+= NET_IP_ALIGN
;
947 if (ieee80211_has_a4(hdr
->frame_control
))
948 reserved
+= NET_IP_ALIGN
;
950 reserved
= 32 + (reserved
& NET_IP_ALIGN
);
952 skb
= dev_alloc_skb(len
+ reserved
);
954 skb_reserve(skb
, reserved
);
955 memcpy(skb_put(skb
, len
), buf
, len
);
962 * If the frame alignment is right (or the kernel has
963 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
964 * is only a single MPDU in the USB frame, then we could
965 * submit to mac80211 the SKB directly. However, since
966 * there may be multiple packets in one SKB in stream
967 * mode, and we need to observe the proper ordering,
968 * this is non-trivial.
971 static void ar9170_handle_mpdu(struct ar9170
*ar
, u8
*buf
, int len
)
973 struct ar9170_rx_head
*head
;
974 struct ar9170_rx_macstatus
*mac
;
975 struct ar9170_rx_phystatus
*phy
= NULL
;
976 struct ieee80211_rx_status status
;
980 if (unlikely(!IS_STARTED(ar
) || len
< (sizeof(*mac
))))
984 mpdu_len
= len
- sizeof(*mac
);
986 mac
= (void *)(buf
+ mpdu_len
);
987 if (unlikely(mac
->error
& AR9170_RX_ERROR_FATAL
)) {
988 /* this frame is too damaged and can't be used - drop it */
993 switch (mac
->status
& AR9170_RX_STATUS_MPDU_MASK
) {
994 case AR9170_RX_STATUS_MPDU_FIRST
:
995 /* first mpdu packet has the plcp header */
996 if (likely(mpdu_len
>= sizeof(struct ar9170_rx_head
))) {
998 memcpy(&ar
->rx_mpdu
.plcp
, (void *) buf
,
999 sizeof(struct ar9170_rx_head
));
1001 mpdu_len
-= sizeof(struct ar9170_rx_head
);
1002 buf
+= sizeof(struct ar9170_rx_head
);
1003 ar
->rx_mpdu
.has_plcp
= true;
1005 if (ar9170_nag_limiter(ar
))
1006 printk(KERN_ERR
"%s: plcp info is clipped.\n",
1007 wiphy_name(ar
->hw
->wiphy
));
1012 case AR9170_RX_STATUS_MPDU_LAST
:
1013 /* last mpdu has a extra tail with phy status information */
1015 if (likely(mpdu_len
>= sizeof(struct ar9170_rx_phystatus
))) {
1016 mpdu_len
-= sizeof(struct ar9170_rx_phystatus
);
1017 phy
= (void *)(buf
+ mpdu_len
);
1019 if (ar9170_nag_limiter(ar
))
1020 printk(KERN_ERR
"%s: frame tail is clipped.\n",
1021 wiphy_name(ar
->hw
->wiphy
));
1025 case AR9170_RX_STATUS_MPDU_MIDDLE
:
1026 /* middle mpdus are just data */
1027 if (unlikely(!ar
->rx_mpdu
.has_plcp
)) {
1028 if (!ar9170_nag_limiter(ar
))
1031 printk(KERN_ERR
"%s: rx stream did not start "
1032 "with a first_mpdu frame tag.\n",
1033 wiphy_name(ar
->hw
->wiphy
));
1038 head
= &ar
->rx_mpdu
.plcp
;
1041 case AR9170_RX_STATUS_MPDU_SINGLE
:
1042 /* single mpdu - has plcp (head) and phy status (tail) */
1043 head
= (void *) buf
;
1045 mpdu_len
-= sizeof(struct ar9170_rx_head
);
1046 mpdu_len
-= sizeof(struct ar9170_rx_phystatus
);
1048 buf
+= sizeof(struct ar9170_rx_head
);
1049 phy
= (void *)(buf
+ mpdu_len
);
1057 if (unlikely(mpdu_len
< FCS_LEN
))
1060 memset(&status
, 0, sizeof(status
));
1061 if (unlikely(ar9170_rx_mac_status(ar
, head
, mac
, &status
)))
1065 ar9170_rx_phy_status(ar
, phy
, &status
);
1067 skb
= ar9170_rx_copy_data(buf
, mpdu_len
);
1069 memcpy(IEEE80211_SKB_RXCB(skb
), &status
, sizeof(status
));
1070 ieee80211_rx_irqsafe(ar
->hw
, skb
);
1074 void ar9170_rx(struct ar9170
*ar
, struct sk_buff
*skb
)
1076 unsigned int i
, tlen
, resplen
, wlen
= 0, clen
= 0;
1083 clen
= tbuf
[1] << 8 | tbuf
[0];
1084 wlen
= ALIGN(clen
, 4);
1086 /* check if this is stream has a valid tag.*/
1087 if (tbuf
[2] != 0 || tbuf
[3] != 0x4e) {
1089 * TODO: handle the highly unlikely event that the
1090 * corrupted stream has the TAG at the right position.
1093 /* check if the frame can be repaired. */
1094 if (!ar
->rx_failover_missing
) {
1095 /* this is no "short read". */
1096 if (ar9170_nag_limiter(ar
)) {
1097 printk(KERN_ERR
"%s: missing tag!\n",
1098 wiphy_name(ar
->hw
->wiphy
));
1104 if (ar
->rx_failover_missing
> tlen
) {
1105 if (ar9170_nag_limiter(ar
)) {
1106 printk(KERN_ERR
"%s: possible multi "
1107 "stream corruption!\n",
1108 wiphy_name(ar
->hw
->wiphy
));
1114 memcpy(skb_put(ar
->rx_failover
, tlen
), tbuf
, tlen
);
1115 ar
->rx_failover_missing
-= tlen
;
1117 if (ar
->rx_failover_missing
<= 0) {
1119 * nested ar9170_rx call!
1120 * termination is guranteed, even when the
1121 * combined frame also have a element with
1125 ar
->rx_failover_missing
= 0;
1126 ar9170_rx(ar
, ar
->rx_failover
);
1128 skb_reset_tail_pointer(ar
->rx_failover
);
1129 skb_trim(ar
->rx_failover
, 0);
1135 /* check if stream is clipped */
1136 if (wlen
> tlen
- 4) {
1137 if (ar
->rx_failover_missing
) {
1138 /* TODO: handle double stream corruption. */
1139 if (ar9170_nag_limiter(ar
)) {
1140 printk(KERN_ERR
"%s: double rx stream "
1142 wiphy_name(ar
->hw
->wiphy
));
1149 * save incomplete data set.
1150 * the firmware will resend the missing bits when
1151 * the rx - descriptor comes round again.
1154 memcpy(skb_put(ar
->rx_failover
, tlen
), tbuf
, tlen
);
1155 ar
->rx_failover_missing
= clen
- tlen
;
1165 /* weird thing, but this is the same in the original driver */
1166 while (resplen
> 2 && i
< 12 &&
1167 respbuf
[0] == 0xff && respbuf
[1] == 0xff) {
1176 /* found the 6 * 0xffff marker? */
1178 ar9170_handle_command_response(ar
, respbuf
, resplen
);
1180 ar9170_handle_mpdu(ar
, respbuf
, clen
);
1184 if (net_ratelimit())
1185 printk(KERN_ERR
"%s: %d bytes of unprocessed "
1186 "data left in rx stream!\n",
1187 wiphy_name(ar
->hw
->wiphy
), tlen
);
1195 printk(KERN_ERR
"%s: damaged RX stream data [want:%d, "
1196 "data:%d, rx:%d, pending:%d ]\n",
1197 wiphy_name(ar
->hw
->wiphy
), clen
, wlen
, tlen
,
1198 ar
->rx_failover_missing
);
1200 if (ar
->rx_failover_missing
)
1201 print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET
,
1202 ar
->rx_failover
->data
,
1203 ar
->rx_failover
->len
);
1205 print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET
,
1206 skb
->data
, skb
->len
);
1208 printk(KERN_ERR
"%s: please check your hardware and cables, if "
1209 "you see this message frequently.\n",
1210 wiphy_name(ar
->hw
->wiphy
));
1213 if (ar
->rx_failover_missing
) {
1214 skb_reset_tail_pointer(ar
->rx_failover
);
1215 skb_trim(ar
->rx_failover
, 0);
1216 ar
->rx_failover_missing
= 0;
1220 #define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
1222 queue.aifs = ai_fs; \
1223 queue.cw_min = cwmin; \
1224 queue.cw_max = cwmax; \
1225 queue.txop = _txop; \
1228 static int ar9170_op_start(struct ieee80211_hw
*hw
)
1230 struct ar9170
*ar
= hw
->priv
;
1233 mutex_lock(&ar
->mutex
);
1235 ar
->filter_changed
= 0;
1237 /* reinitialize queues statistics */
1238 memset(&ar
->tx_stats
, 0, sizeof(ar
->tx_stats
));
1239 for (i
= 0; i
< __AR9170_NUM_TXQ
; i
++)
1240 ar
->tx_stats
[i
].limit
= AR9170_TXQ_DEPTH
;
1242 /* reset QoS defaults */
1243 AR9170_FILL_QUEUE(ar
->edcf
[0], 3, 15, 1023, 0); /* BEST EFFORT*/
1244 AR9170_FILL_QUEUE(ar
->edcf
[1], 7, 15, 1023, 0); /* BACKGROUND */
1245 AR9170_FILL_QUEUE(ar
->edcf
[2], 2, 7, 15, 94); /* VIDEO */
1246 AR9170_FILL_QUEUE(ar
->edcf
[3], 2, 3, 7, 47); /* VOICE */
1247 AR9170_FILL_QUEUE(ar
->edcf
[4], 2, 3, 7, 0); /* SPECIAL */
1249 /* set sane AMPDU defaults */
1250 ar
->global_ampdu_density
= 6;
1251 ar
->global_ampdu_factor
= 3;
1253 ar
->bad_hw_nagger
= jiffies
;
1259 err
= ar9170_init_mac(ar
);
1263 err
= ar9170_set_qos(ar
);
1267 err
= ar9170_init_phy(ar
, IEEE80211_BAND_2GHZ
);
1271 err
= ar9170_init_rf(ar
);
1276 err
= ar9170_write_reg(ar
, 0x1c3d30, 0x100);
1280 ar
->state
= AR9170_STARTED
;
1283 mutex_unlock(&ar
->mutex
);
1287 static void ar9170_op_stop(struct ieee80211_hw
*hw
)
1289 struct ar9170
*ar
= hw
->priv
;
1293 ar
->state
= AR9170_IDLE
;
1295 cancel_delayed_work_sync(&ar
->tx_janitor
);
1296 #ifdef CONFIG_AR9170_LEDS
1297 cancel_delayed_work_sync(&ar
->led_work
);
1299 cancel_work_sync(&ar
->filter_config_work
);
1300 cancel_work_sync(&ar
->beacon_work
);
1302 mutex_lock(&ar
->mutex
);
1304 if (IS_ACCEPTING_CMD(ar
)) {
1305 ar9170_set_leds_state(ar
, 0);
1308 ar9170_write_reg(ar
, 0x1c3d30, 0);
1312 for (i
= 0; i
< __AR9170_NUM_TXQ
; i
++) {
1313 skb_queue_purge(&ar
->tx_pending
[i
]);
1314 skb_queue_purge(&ar
->tx_status
[i
]);
1316 skb_queue_purge(&ar
->tx_status_ampdu
);
1318 mutex_unlock(&ar
->mutex
);
1321 static void ar9170_tx_indicate_immba(struct ar9170
*ar
, struct sk_buff
*skb
)
1323 struct ar9170_tx_control
*txc
= (void *) skb
->data
;
1325 txc
->mac_control
|= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU
);
1328 static void ar9170_tx_copy_phy(struct ar9170
*ar
, struct sk_buff
*dst
,
1329 struct sk_buff
*src
)
1331 struct ar9170_tx_control
*dst_txc
, *src_txc
;
1332 struct ieee80211_tx_info
*dst_info
, *src_info
;
1333 struct ar9170_tx_info
*dst_arinfo
, *src_arinfo
;
1335 src_txc
= (void *) src
->data
;
1336 src_info
= IEEE80211_SKB_CB(src
);
1337 src_arinfo
= (void *) src_info
->rate_driver_data
;
1339 dst_txc
= (void *) dst
->data
;
1340 dst_info
= IEEE80211_SKB_CB(dst
);
1341 dst_arinfo
= (void *) dst_info
->rate_driver_data
;
1343 dst_txc
->phy_control
= src_txc
->phy_control
;
1345 /* same MCS for the whole aggregate */
1346 memcpy(dst_info
->driver_rates
, src_info
->driver_rates
,
1347 sizeof(dst_info
->driver_rates
));
1350 static int ar9170_tx_prepare(struct ar9170
*ar
, struct sk_buff
*skb
)
1352 struct ieee80211_hdr
*hdr
;
1353 struct ar9170_tx_control
*txc
;
1354 struct ieee80211_tx_info
*info
;
1355 struct ieee80211_tx_rate
*txrate
;
1356 struct ar9170_tx_info
*arinfo
;
1357 unsigned int queue
= skb_get_queue_mapping(skb
);
1361 BUILD_BUG_ON(sizeof(*arinfo
) > sizeof(info
->rate_driver_data
));
1363 hdr
= (void *)skb
->data
;
1364 info
= IEEE80211_SKB_CB(skb
);
1367 txc
= (void *)skb_push(skb
, sizeof(*txc
));
1369 if (info
->control
.hw_key
) {
1370 icv
= info
->control
.hw_key
->icv_len
;
1372 switch (info
->control
.hw_key
->alg
) {
1374 keytype
= AR9170_TX_MAC_ENCR_RC4
;
1377 keytype
= AR9170_TX_MAC_ENCR_RC4
;
1380 keytype
= AR9170_TX_MAC_ENCR_AES
;
1389 txc
->length
= cpu_to_le16(len
+ icv
+ 4);
1391 txc
->mac_control
= cpu_to_le16(AR9170_TX_MAC_HW_DURATION
|
1392 AR9170_TX_MAC_BACKOFF
);
1393 txc
->mac_control
|= cpu_to_le16(ar9170_qos_hwmap
[queue
] <<
1394 AR9170_TX_MAC_QOS_SHIFT
);
1395 txc
->mac_control
|= cpu_to_le16(keytype
);
1396 txc
->phy_control
= cpu_to_le32(0);
1398 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
1399 txc
->mac_control
|= cpu_to_le16(AR9170_TX_MAC_NO_ACK
);
1401 txrate
= &info
->control
.rates
[0];
1402 if (txrate
->flags
& IEEE80211_TX_RC_USE_CTS_PROTECT
)
1403 txc
->mac_control
|= cpu_to_le16(AR9170_TX_MAC_PROT_CTS
);
1404 else if (txrate
->flags
& IEEE80211_TX_RC_USE_RTS_CTS
)
1405 txc
->mac_control
|= cpu_to_le16(AR9170_TX_MAC_PROT_RTS
);
1407 arinfo
= (void *)info
->rate_driver_data
;
1408 arinfo
->timeout
= jiffies
+ msecs_to_jiffies(AR9170_QUEUE_TIMEOUT
);
1410 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
) &&
1411 (is_valid_ether_addr(ieee80211_get_DA(hdr
)))) {
1412 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
) {
1413 if (unlikely(!info
->control
.sta
))
1416 txc
->mac_control
|= cpu_to_le16(AR9170_TX_MAC_AGGR
);
1417 arinfo
->flags
= AR9170_TX_FLAG_BLOCK_ACK
;
1422 txc
->mac_control
|= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE
);
1425 * Putting the QoS queue bits into an unexplored territory is
1426 * certainly not elegant.
1428 * In my defense: This idea provides a reasonable way to
1429 * smuggle valuable information to the tx_status callback.
1430 * Also, the idea behind this bit-abuse came straight from
1431 * the original driver code.
1435 cpu_to_le32(queue
<< AR9170_TX_PHY_QOS_SHIFT
);
1436 arinfo
->flags
= AR9170_TX_FLAG_WAIT_FOR_ACK
;
1438 arinfo
->flags
= AR9170_TX_FLAG_NO_ACK
;
1445 skb_pull(skb
, sizeof(*txc
));
1449 static void ar9170_tx_prepare_phy(struct ar9170
*ar
, struct sk_buff
*skb
)
1451 struct ar9170_tx_control
*txc
;
1452 struct ieee80211_tx_info
*info
;
1453 struct ieee80211_rate
*rate
= NULL
;
1454 struct ieee80211_tx_rate
*txrate
;
1457 txc
= (void *) skb
->data
;
1458 info
= IEEE80211_SKB_CB(skb
);
1459 txrate
= &info
->control
.rates
[0];
1461 if (txrate
->flags
& IEEE80211_TX_RC_GREEN_FIELD
)
1462 txc
->phy_control
|= cpu_to_le32(AR9170_TX_PHY_GREENFIELD
);
1464 if (txrate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
1465 txc
->phy_control
|= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE
);
1467 if (txrate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
1468 txc
->phy_control
|= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ
);
1469 /* this works because 40 MHz is 2 and dup is 3 */
1470 if (txrate
->flags
& IEEE80211_TX_RC_DUP_DATA
)
1471 txc
->phy_control
|= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP
);
1473 if (txrate
->flags
& IEEE80211_TX_RC_SHORT_GI
)
1474 txc
->phy_control
|= cpu_to_le32(AR9170_TX_PHY_SHORT_GI
);
1476 if (txrate
->flags
& IEEE80211_TX_RC_MCS
) {
1477 u32 r
= txrate
->idx
;
1480 /* heavy clip control */
1481 txc
->phy_control
|= cpu_to_le32((r
& 0x7) << 7);
1483 r
<<= AR9170_TX_PHY_MCS_SHIFT
;
1484 BUG_ON(r
& ~AR9170_TX_PHY_MCS_MASK
);
1486 txc
->phy_control
|= cpu_to_le32(r
& AR9170_TX_PHY_MCS_MASK
);
1487 txc
->phy_control
|= cpu_to_le32(AR9170_TX_PHY_MOD_HT
);
1489 if (txrate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
) {
1490 if (info
->band
== IEEE80211_BAND_5GHZ
)
1491 txpower
= ar
->power_5G_ht40
;
1493 txpower
= ar
->power_2G_ht40
;
1495 if (info
->band
== IEEE80211_BAND_5GHZ
)
1496 txpower
= ar
->power_5G_ht20
;
1498 txpower
= ar
->power_2G_ht20
;
1501 power
= txpower
[(txrate
->idx
) & 7];
1506 u8 idx
= txrate
->idx
;
1508 if (info
->band
!= IEEE80211_BAND_2GHZ
) {
1510 txpower
= ar
->power_5G_leg
;
1511 mod
= AR9170_TX_PHY_MOD_OFDM
;
1514 txpower
= ar
->power_2G_cck
;
1515 mod
= AR9170_TX_PHY_MOD_CCK
;
1517 mod
= AR9170_TX_PHY_MOD_OFDM
;
1518 txpower
= ar
->power_2G_ofdm
;
1522 rate
= &__ar9170_ratetable
[idx
];
1524 phyrate
= rate
->hw_value
& 0xF;
1525 power
= txpower
[(rate
->hw_value
& 0x30) >> 4];
1526 phyrate
<<= AR9170_TX_PHY_MCS_SHIFT
;
1528 txc
->phy_control
|= cpu_to_le32(mod
);
1529 txc
->phy_control
|= cpu_to_le32(phyrate
);
1532 power
<<= AR9170_TX_PHY_TX_PWR_SHIFT
;
1533 power
&= AR9170_TX_PHY_TX_PWR_MASK
;
1534 txc
->phy_control
|= cpu_to_le32(power
);
1537 if (ar
->eeprom
.tx_mask
== 1) {
1538 chains
= AR9170_TX_PHY_TXCHAIN_1
;
1540 chains
= AR9170_TX_PHY_TXCHAIN_2
;
1542 /* >= 36M legacy OFDM - use only one chain */
1543 if (rate
&& rate
->bitrate
>= 360)
1544 chains
= AR9170_TX_PHY_TXCHAIN_1
;
1546 txc
->phy_control
|= cpu_to_le32(chains
<< AR9170_TX_PHY_TXCHAIN_SHIFT
);
1549 static bool ar9170_tx_ampdu(struct ar9170
*ar
)
1551 struct sk_buff_head agg
;
1552 struct ar9170_sta_tid
*tid_info
= NULL
, *tmp
;
1553 struct sk_buff
*skb
, *first
= NULL
;
1554 unsigned long flags
, f2
;
1556 u16 seq
, queue
, tmpssn
;
1559 skb_queue_head_init(&agg
);
1561 spin_lock_irqsave(&ar
->tx_ampdu_list_lock
, flags
);
1562 if (list_empty(&ar
->tx_ampdu_list
)) {
1563 #ifdef AR9170_TXAGG_DEBUG
1564 printk(KERN_DEBUG
"%s: aggregation list is empty.\n",
1565 wiphy_name(ar
->hw
->wiphy
));
1566 #endif /* AR9170_TXAGG_DEBUG */
1570 list_for_each_entry_safe(tid_info
, tmp
, &ar
->tx_ampdu_list
, list
) {
1571 if (tid_info
->state
!= AR9170_TID_STATE_COMPLETE
) {
1572 #ifdef AR9170_TXAGG_DEBUG
1573 printk(KERN_DEBUG
"%s: dangling aggregation entry!\n",
1574 wiphy_name(ar
->hw
->wiphy
));
1575 #endif /* AR9170_TXAGG_DEBUG */
1580 #ifdef AR9170_TXAGG_DEBUG
1581 printk(KERN_DEBUG
"%s: enough frames aggregated.\n",
1582 wiphy_name(ar
->hw
->wiphy
));
1583 #endif /* AR9170_TXAGG_DEBUG */
1587 queue
= TID_TO_WME_AC(tid_info
->tid
);
1589 if (skb_queue_len(&ar
->tx_pending
[queue
]) >=
1590 AR9170_NUM_TX_AGG_MAX
) {
1591 #ifdef AR9170_TXAGG_DEBUG
1592 printk(KERN_DEBUG
"%s: queue %d full.\n",
1593 wiphy_name(ar
->hw
->wiphy
), queue
);
1594 #endif /* AR9170_TXAGG_DEBUG */
1598 list_del_init(&tid_info
->list
);
1600 spin_lock_irqsave(&tid_info
->queue
.lock
, f2
);
1601 tmpssn
= seq
= tid_info
->ssn
;
1602 first
= skb_peek(&tid_info
->queue
);
1605 tmpssn
= ar9170_get_seq(first
);
1607 if (unlikely(tmpssn
!= seq
)) {
1608 #ifdef AR9170_TXAGG_DEBUG
1609 printk(KERN_DEBUG
"%s: ssn mismatch [%d != %d]\n.",
1610 wiphy_name(ar
->hw
->wiphy
), seq
, tmpssn
);
1611 #endif /* AR9170_TXAGG_DEBUG */
1612 tid_info
->ssn
= tmpssn
;
1615 #ifdef AR9170_TXAGG_DEBUG
1616 printk(KERN_DEBUG
"%s: generate A-MPDU for tid:%d ssn:%d with "
1617 "%d queued frames.\n", wiphy_name(ar
->hw
->wiphy
),
1618 tid_info
->tid
, tid_info
->ssn
,
1619 skb_queue_len(&tid_info
->queue
));
1620 __ar9170_dump_txqueue(ar
, &tid_info
->queue
);
1621 #endif /* AR9170_TXAGG_DEBUG */
1623 while ((skb
= skb_peek(&tid_info
->queue
))) {
1624 if (unlikely(ar9170_get_seq(skb
) != seq
))
1627 __skb_unlink(skb
, &tid_info
->queue
);
1628 tid_info
->ssn
= seq
= GET_NEXT_SEQ(seq
);
1630 if (unlikely(skb_get_queue_mapping(skb
) != queue
)) {
1631 #ifdef AR9170_TXAGG_DEBUG
1632 printk(KERN_DEBUG
"%s: tid:%d(q:%d) queue:%d "
1633 "!match.\n", wiphy_name(ar
->hw
->wiphy
),
1635 TID_TO_WME_AC(tid_info
->tid
),
1636 skb_get_queue_mapping(skb
));
1637 #endif /* AR9170_TXAGG_DEBUG */
1638 dev_kfree_skb_any(skb
);
1642 if (unlikely(first
== skb
)) {
1643 ar9170_tx_prepare_phy(ar
, skb
);
1644 __skb_queue_tail(&agg
, skb
);
1647 ar9170_tx_copy_phy(ar
, skb
, first
);
1648 __skb_queue_tail(&agg
, skb
);
1651 if (unlikely(skb_queue_len(&agg
) ==
1652 AR9170_NUM_TX_AGG_MAX
))
1656 if (skb_queue_empty(&tid_info
->queue
))
1657 tid_info
->active
= false;
1659 list_add_tail(&tid_info
->list
,
1660 &ar
->tx_ampdu_list
);
1662 spin_unlock_irqrestore(&tid_info
->queue
.lock
, f2
);
1664 if (unlikely(skb_queue_empty(&agg
))) {
1665 #ifdef AR9170_TXAGG_DEBUG
1666 printk(KERN_DEBUG
"%s: queued empty list!\n",
1667 wiphy_name(ar
->hw
->wiphy
));
1668 #endif /* AR9170_TXAGG_DEBUG */
1673 * tell the FW/HW that this is the last frame,
1674 * that way it will wait for the immediate block ack.
1676 if (likely(skb_peek_tail(&agg
)))
1677 ar9170_tx_indicate_immba(ar
, skb_peek_tail(&agg
));
1679 #ifdef AR9170_TXAGG_DEBUG
1680 printk(KERN_DEBUG
"%s: generated A-MPDU looks like this:\n",
1681 wiphy_name(ar
->hw
->wiphy
));
1682 __ar9170_dump_txqueue(ar
, &agg
);
1683 #endif /* AR9170_TXAGG_DEBUG */
1685 spin_unlock_irqrestore(&ar
->tx_ampdu_list_lock
, flags
);
1687 spin_lock_irqsave(&ar
->tx_pending
[queue
].lock
, flags
);
1688 skb_queue_splice_tail_init(&agg
, &ar
->tx_pending
[queue
]);
1689 spin_unlock_irqrestore(&ar
->tx_pending
[queue
].lock
, flags
);
1692 spin_lock_irqsave(&ar
->tx_ampdu_list_lock
, flags
);
1696 spin_unlock_irqrestore(&ar
->tx_ampdu_list_lock
, flags
);
1697 __skb_queue_purge(&agg
);
1702 static void ar9170_tx(struct ar9170
*ar
)
1704 struct sk_buff
*skb
;
1705 unsigned long flags
;
1706 struct ieee80211_tx_info
*info
;
1707 struct ar9170_tx_info
*arinfo
;
1708 unsigned int i
, frames
, frames_failed
, remaining_space
;
1710 bool schedule_garbagecollector
= false;
1712 BUILD_BUG_ON(sizeof(*arinfo
) > sizeof(info
->rate_driver_data
));
1714 if (unlikely(!IS_STARTED(ar
)))
1717 remaining_space
= AR9170_TX_MAX_PENDING
;
1719 for (i
= 0; i
< __AR9170_NUM_TXQ
; i
++) {
1720 spin_lock_irqsave(&ar
->tx_stats_lock
, flags
);
1721 if (ar
->tx_stats
[i
].len
>= ar
->tx_stats
[i
].limit
) {
1722 #ifdef AR9170_QUEUE_DEBUG
1723 printk(KERN_DEBUG
"%s: queue %d full\n",
1724 wiphy_name(ar
->hw
->wiphy
), i
);
1726 printk(KERN_DEBUG
"%s: stuck frames: ===> \n",
1727 wiphy_name(ar
->hw
->wiphy
));
1728 ar9170_dump_txqueue(ar
, &ar
->tx_pending
[i
]);
1729 ar9170_dump_txqueue(ar
, &ar
->tx_status
[i
]);
1730 #endif /* AR9170_QUEUE_DEBUG */
1732 #ifdef AR9170_QUEUE_STOP_DEBUG
1733 printk(KERN_DEBUG
"%s: stop queue %d\n",
1734 wiphy_name(ar
->hw
->wiphy
), i
);
1735 __ar9170_dump_txstats(ar
);
1736 #endif /* AR9170_QUEUE_STOP_DEBUG */
1737 ieee80211_stop_queue(ar
->hw
, i
);
1738 spin_unlock_irqrestore(&ar
->tx_stats_lock
, flags
);
1742 frames
= min(ar
->tx_stats
[i
].limit
- ar
->tx_stats
[i
].len
,
1743 skb_queue_len(&ar
->tx_pending
[i
]));
1745 if (remaining_space
< frames
) {
1746 #ifdef AR9170_QUEUE_DEBUG
1747 printk(KERN_DEBUG
"%s: tx quota reached queue:%d, "
1748 "remaining slots:%d, needed:%d\n",
1749 wiphy_name(ar
->hw
->wiphy
), i
, remaining_space
,
1751 #endif /* AR9170_QUEUE_DEBUG */
1752 frames
= remaining_space
;
1755 ar
->tx_stats
[i
].len
+= frames
;
1756 ar
->tx_stats
[i
].count
+= frames
;
1757 spin_unlock_irqrestore(&ar
->tx_stats_lock
, flags
);
1764 skb
= skb_dequeue(&ar
->tx_pending
[i
]);
1765 if (unlikely(!skb
)) {
1766 frames_failed
+= frames
;
1771 info
= IEEE80211_SKB_CB(skb
);
1772 arinfo
= (void *) info
->rate_driver_data
;
1774 /* TODO: cancel stuck frames */
1775 arinfo
->timeout
= jiffies
+
1776 msecs_to_jiffies(AR9170_TX_TIMEOUT
);
1778 if (arinfo
->flags
== AR9170_TX_FLAG_BLOCK_ACK
)
1779 ar
->tx_ampdu_pending
++;
1781 #ifdef AR9170_QUEUE_DEBUG
1782 printk(KERN_DEBUG
"%s: send frame q:%d =>\n",
1783 wiphy_name(ar
->hw
->wiphy
), i
);
1784 ar9170_print_txheader(ar
, skb
);
1785 #endif /* AR9170_QUEUE_DEBUG */
1787 err
= ar
->tx(ar
, skb
);
1788 if (unlikely(err
)) {
1789 if (arinfo
->flags
== AR9170_TX_FLAG_BLOCK_ACK
)
1790 ar
->tx_ampdu_pending
--;
1793 dev_kfree_skb_any(skb
);
1796 schedule_garbagecollector
= true;
1802 #ifdef AR9170_QUEUE_DEBUG
1803 printk(KERN_DEBUG
"%s: ar9170_tx report for queue %d\n",
1804 wiphy_name(ar
->hw
->wiphy
), i
);
1806 printk(KERN_DEBUG
"%s: unprocessed pending frames left:\n",
1807 wiphy_name(ar
->hw
->wiphy
));
1808 ar9170_dump_txqueue(ar
, &ar
->tx_pending
[i
]);
1809 #endif /* AR9170_QUEUE_DEBUG */
1811 if (unlikely(frames_failed
)) {
1812 #ifdef AR9170_QUEUE_DEBUG
1813 printk(KERN_DEBUG
"%s: frames failed %d =>\n",
1814 wiphy_name(ar
->hw
->wiphy
), frames_failed
);
1815 #endif /* AR9170_QUEUE_DEBUG */
1817 spin_lock_irqsave(&ar
->tx_stats_lock
, flags
);
1818 ar
->tx_stats
[i
].len
-= frames_failed
;
1819 ar
->tx_stats
[i
].count
-= frames_failed
;
1820 #ifdef AR9170_QUEUE_STOP_DEBUG
1821 printk(KERN_DEBUG
"%s: wake queue %d\n",
1822 wiphy_name(ar
->hw
->wiphy
), i
);
1823 __ar9170_dump_txstats(ar
);
1824 #endif /* AR9170_QUEUE_STOP_DEBUG */
1825 ieee80211_wake_queue(ar
->hw
, i
);
1826 spin_unlock_irqrestore(&ar
->tx_stats_lock
, flags
);
1830 if (!schedule_garbagecollector
)
1833 ieee80211_queue_delayed_work(ar
->hw
,
1835 msecs_to_jiffies(AR9170_JANITOR_DELAY
));
1838 static bool ar9170_tx_ampdu_queue(struct ar9170
*ar
, struct sk_buff
*skb
)
1840 struct ieee80211_tx_info
*txinfo
;
1841 struct ar9170_sta_info
*sta_info
;
1842 struct ar9170_sta_tid
*agg
;
1843 struct sk_buff
*iter
;
1844 unsigned long flags
, f2
;
1847 bool run
= false, queue
= false;
1849 tid
= ar9170_get_tid(skb
);
1850 seq
= ar9170_get_seq(skb
);
1851 txinfo
= IEEE80211_SKB_CB(skb
);
1852 sta_info
= (void *) txinfo
->control
.sta
->drv_priv
;
1853 agg
= &sta_info
->agg
[tid
];
1854 max
= sta_info
->ampdu_max_len
;
1856 spin_lock_irqsave(&ar
->tx_ampdu_list_lock
, flags
);
1858 if (unlikely(agg
->state
!= AR9170_TID_STATE_COMPLETE
)) {
1859 #ifdef AR9170_TXAGG_DEBUG
1860 printk(KERN_DEBUG
"%s: BlockACK session not fully initialized "
1861 "for ESS:%pM tid:%d state:%d.\n",
1862 wiphy_name(ar
->hw
->wiphy
), agg
->addr
, agg
->tid
,
1864 #endif /* AR9170_TXAGG_DEBUG */
1874 /* check if seq is within the BA window */
1875 if (unlikely(!BAW_WITHIN(agg
->ssn
, max
, seq
))) {
1876 #ifdef AR9170_TXAGG_DEBUG
1877 printk(KERN_DEBUG
"%s: frame with tid:%d seq:%d does not "
1878 "fit into BA window (%d - %d)\n",
1879 wiphy_name(ar
->hw
->wiphy
), tid
, seq
, agg
->ssn
,
1880 (agg
->ssn
+ max
) & 0xfff);
1881 #endif /* AR9170_TXAGG_DEBUG */
1885 spin_lock_irqsave(&agg
->queue
.lock
, f2
);
1887 skb_queue_reverse_walk(&agg
->queue
, iter
) {
1888 qseq
= ar9170_get_seq(iter
);
1890 if (GET_NEXT_SEQ(qseq
) == seq
) {
1891 __skb_queue_after(&agg
->queue
, iter
, skb
);
1896 __skb_queue_head(&agg
->queue
, skb
);
1899 spin_unlock_irqrestore(&agg
->queue
.lock
, f2
);
1901 #ifdef AR9170_TXAGG_DEBUG
1902 printk(KERN_DEBUG
"%s: new aggregate %p queued.\n",
1903 wiphy_name(ar
->hw
->wiphy
), skb
);
1904 __ar9170_dump_txqueue(ar
, &agg
->queue
);
1905 #endif /* AR9170_TXAGG_DEBUG */
1907 if (skb_queue_len(&agg
->queue
) >= AR9170_NUM_TX_AGG_MAX
)
1911 list_add_tail(&agg
->list
, &ar
->tx_ampdu_list
);
1913 spin_unlock_irqrestore(&ar
->tx_ampdu_list_lock
, flags
);
1917 spin_unlock_irqrestore(&ar
->tx_ampdu_list_lock
, flags
);
1918 dev_kfree_skb_irq(skb
);
1922 int ar9170_op_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
1924 struct ar9170
*ar
= hw
->priv
;
1925 struct ieee80211_tx_info
*info
;
1927 if (unlikely(!IS_STARTED(ar
)))
1930 if (unlikely(ar9170_tx_prepare(ar
, skb
)))
1933 info
= IEEE80211_SKB_CB(skb
);
1934 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
) {
1935 bool run
= ar9170_tx_ampdu_queue(ar
, skb
);
1937 if (run
|| !ar
->tx_ampdu_pending
)
1938 ar9170_tx_ampdu(ar
);
1940 unsigned int queue
= skb_get_queue_mapping(skb
);
1942 ar9170_tx_prepare_phy(ar
, skb
);
1943 skb_queue_tail(&ar
->tx_pending
[queue
], skb
);
1947 return NETDEV_TX_OK
;
1950 dev_kfree_skb_any(skb
);
1951 return NETDEV_TX_OK
;
1954 static int ar9170_op_add_interface(struct ieee80211_hw
*hw
,
1955 struct ieee80211_if_init_conf
*conf
)
1957 struct ar9170
*ar
= hw
->priv
;
1960 mutex_lock(&ar
->mutex
);
1967 ar
->vif
= conf
->vif
;
1968 memcpy(ar
->mac_addr
, conf
->mac_addr
, ETH_ALEN
);
1970 if (modparam_nohwcrypt
|| (ar
->vif
->type
!= NL80211_IFTYPE_STATION
)) {
1971 ar
->rx_software_decryption
= true;
1972 ar
->disable_offload
= true;
1976 ar
->want_filter
= AR9170_MAC_REG_FTF_DEFAULTS
;
1977 err
= ar9170_update_frame_filter(ar
);
1981 err
= ar9170_set_operating_mode(ar
);
1984 mutex_unlock(&ar
->mutex
);
1988 static void ar9170_op_remove_interface(struct ieee80211_hw
*hw
,
1989 struct ieee80211_if_init_conf
*conf
)
1991 struct ar9170
*ar
= hw
->priv
;
1993 mutex_lock(&ar
->mutex
);
1995 ar
->want_filter
= 0;
1996 ar9170_update_frame_filter(ar
);
1997 ar9170_set_beacon_timers(ar
);
1998 dev_kfree_skb(ar
->beacon
);
2000 ar
->sniffer_enabled
= false;
2001 ar
->rx_software_decryption
= false;
2002 ar9170_set_operating_mode(ar
);
2003 mutex_unlock(&ar
->mutex
);
2006 static int ar9170_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2008 struct ar9170
*ar
= hw
->priv
;
2011 mutex_lock(&ar
->mutex
);
2013 if (changed
& IEEE80211_CONF_CHANGE_LISTEN_INTERVAL
) {
2018 if (changed
& IEEE80211_CONF_CHANGE_PS
) {
2023 if (changed
& IEEE80211_CONF_CHANGE_POWER
) {
2028 if (changed
& IEEE80211_CONF_CHANGE_RETRY_LIMITS
) {
2030 * is it long_frame_max_tx_count or short_frame_max_tx_count?
2033 err
= ar9170_set_hwretry_limit(ar
,
2034 ar
->hw
->conf
.long_frame_max_tx_count
);
2039 if (changed
& BSS_CHANGED_BEACON_INT
) {
2040 err
= ar9170_set_beacon_timers(ar
);
2045 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
) {
2047 /* adjust slot time for 5 GHz */
2048 err
= ar9170_set_slot_time(ar
);
2052 err
= ar9170_set_dyn_sifs_ack(ar
);
2056 err
= ar9170_set_channel(ar
, hw
->conf
.channel
,
2058 nl80211_to_ar9170(hw
->conf
.channel_type
));
2064 mutex_unlock(&ar
->mutex
);
2068 static void ar9170_set_filters(struct work_struct
*work
)
2070 struct ar9170
*ar
= container_of(work
, struct ar9170
,
2071 filter_config_work
);
2074 if (unlikely(!IS_STARTED(ar
)))
2077 mutex_lock(&ar
->mutex
);
2078 if (test_and_clear_bit(AR9170_FILTER_CHANGED_MODE
,
2079 &ar
->filter_changed
)) {
2080 err
= ar9170_set_operating_mode(ar
);
2085 if (test_and_clear_bit(AR9170_FILTER_CHANGED_MULTICAST
,
2086 &ar
->filter_changed
)) {
2087 err
= ar9170_update_multicast(ar
);
2092 if (test_and_clear_bit(AR9170_FILTER_CHANGED_FRAMEFILTER
,
2093 &ar
->filter_changed
)) {
2094 err
= ar9170_update_frame_filter(ar
);
2100 mutex_unlock(&ar
->mutex
);
2103 static u64
ar9170_op_prepare_multicast(struct ieee80211_hw
*hw
, int mc_count
,
2104 struct dev_addr_list
*mclist
)
2109 /* always get broadcast frames */
2110 mchash
= 1ULL << (0xff >> 2);
2112 for (i
= 0; i
< mc_count
; i
++) {
2113 if (WARN_ON(!mclist
))
2115 mchash
|= 1ULL << (mclist
->dmi_addr
[5] >> 2);
2116 mclist
= mclist
->next
;
2122 static void ar9170_op_configure_filter(struct ieee80211_hw
*hw
,
2123 unsigned int changed_flags
,
2124 unsigned int *new_flags
,
2127 struct ar9170
*ar
= hw
->priv
;
2129 /* mask supported flags */
2130 *new_flags
&= FIF_ALLMULTI
| FIF_CONTROL
| FIF_BCN_PRBRESP_PROMISC
|
2131 FIF_PROMISC_IN_BSS
| FIF_FCSFAIL
| FIF_PLCPFAIL
;
2132 ar
->filter_state
= *new_flags
;
2134 * We can support more by setting the sniffer bit and
2135 * then checking the error flags, later.
2138 if (changed_flags
& FIF_ALLMULTI
&& *new_flags
& FIF_ALLMULTI
)
2141 if (multicast
!= ar
->want_mc_hash
) {
2142 ar
->want_mc_hash
= multicast
;
2143 set_bit(AR9170_FILTER_CHANGED_MULTICAST
, &ar
->filter_changed
);
2146 if (changed_flags
& FIF_CONTROL
) {
2147 u32 filter
= AR9170_MAC_REG_FTF_PSPOLL
|
2148 AR9170_MAC_REG_FTF_RTS
|
2149 AR9170_MAC_REG_FTF_CTS
|
2150 AR9170_MAC_REG_FTF_ACK
|
2151 AR9170_MAC_REG_FTF_CFE
|
2152 AR9170_MAC_REG_FTF_CFE_ACK
;
2154 if (*new_flags
& FIF_CONTROL
)
2155 ar
->want_filter
= ar
->cur_filter
| filter
;
2157 ar
->want_filter
= ar
->cur_filter
& ~filter
;
2159 set_bit(AR9170_FILTER_CHANGED_FRAMEFILTER
,
2160 &ar
->filter_changed
);
2163 if (changed_flags
& FIF_PROMISC_IN_BSS
) {
2164 ar
->sniffer_enabled
= ((*new_flags
) & FIF_PROMISC_IN_BSS
) != 0;
2165 set_bit(AR9170_FILTER_CHANGED_MODE
,
2166 &ar
->filter_changed
);
2169 if (likely(IS_STARTED(ar
)))
2170 ieee80211_queue_work(ar
->hw
, &ar
->filter_config_work
);
2173 static void ar9170_op_bss_info_changed(struct ieee80211_hw
*hw
,
2174 struct ieee80211_vif
*vif
,
2175 struct ieee80211_bss_conf
*bss_conf
,
2178 struct ar9170
*ar
= hw
->priv
;
2181 mutex_lock(&ar
->mutex
);
2183 if (changed
& BSS_CHANGED_BSSID
) {
2184 memcpy(ar
->bssid
, bss_conf
->bssid
, ETH_ALEN
);
2185 err
= ar9170_set_operating_mode(ar
);
2190 if (changed
& (BSS_CHANGED_BEACON
| BSS_CHANGED_BEACON_ENABLED
)) {
2191 err
= ar9170_update_beacon(ar
);
2195 err
= ar9170_set_beacon_timers(ar
);
2200 if (changed
& BSS_CHANGED_ASSOC
) {
2201 #ifndef CONFIG_AR9170_LEDS
2202 /* enable assoc LED. */
2203 err
= ar9170_set_leds_state(ar
, bss_conf
->assoc
? 2 : 0);
2204 #endif /* CONFIG_AR9170_LEDS */
2207 if (changed
& BSS_CHANGED_BEACON_INT
) {
2208 err
= ar9170_set_beacon_timers(ar
);
2213 if (changed
& BSS_CHANGED_HT
) {
2218 if (changed
& BSS_CHANGED_ERP_SLOT
) {
2219 err
= ar9170_set_slot_time(ar
);
2224 if (changed
& BSS_CHANGED_BASIC_RATES
) {
2225 err
= ar9170_set_basic_rates(ar
);
2231 mutex_unlock(&ar
->mutex
);
2234 static u64
ar9170_op_get_tsf(struct ieee80211_hw
*hw
)
2236 struct ar9170
*ar
= hw
->priv
;
2242 mutex_lock(&ar
->mutex
);
2243 err
= ar9170_read_reg(ar
, AR9170_MAC_REG_TSF_L
, &tsf_low
);
2245 err
= ar9170_read_reg(ar
, AR9170_MAC_REG_TSF_H
, &tsf_high
);
2246 mutex_unlock(&ar
->mutex
);
2252 tsf
= (tsf
<< 32) | tsf_low
;
2256 static int ar9170_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
2257 struct ieee80211_vif
*vif
, struct ieee80211_sta
*sta
,
2258 struct ieee80211_key_conf
*key
)
2260 struct ar9170
*ar
= hw
->priv
;
2264 if ((!ar
->vif
) || (ar
->disable_offload
))
2269 if (key
->keylen
== WLAN_KEY_LEN_WEP40
)
2270 ktype
= AR9170_ENC_ALG_WEP64
;
2272 ktype
= AR9170_ENC_ALG_WEP128
;
2275 ktype
= AR9170_ENC_ALG_TKIP
;
2278 ktype
= AR9170_ENC_ALG_AESCCMP
;
2284 mutex_lock(&ar
->mutex
);
2285 if (cmd
== SET_KEY
) {
2286 if (unlikely(!IS_STARTED(ar
))) {
2291 /* group keys need all-zeroes address */
2292 if (!(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
))
2295 if (key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) {
2296 for (i
= 0; i
< 64; i
++)
2297 if (!(ar
->usedkeys
& BIT(i
)))
2300 ar
->rx_software_decryption
= true;
2301 ar9170_set_operating_mode(ar
);
2306 i
= 64 + key
->keyidx
;
2309 key
->hw_key_idx
= i
;
2311 err
= ar9170_upload_key(ar
, i
, sta
? sta
->addr
: NULL
, ktype
, 0,
2312 key
->key
, min_t(u8
, 16, key
->keylen
));
2316 if (key
->alg
== ALG_TKIP
) {
2317 err
= ar9170_upload_key(ar
, i
, sta
? sta
->addr
: NULL
,
2318 ktype
, 1, key
->key
+ 16, 16);
2323 * hardware is not capable generating the MMIC
2324 * for fragmented frames!
2326 key
->flags
|= IEEE80211_KEY_FLAG_GENERATE_MMIC
;
2330 ar
->usedkeys
|= BIT(i
);
2332 key
->flags
|= IEEE80211_KEY_FLAG_GENERATE_IV
;
2334 if (unlikely(!IS_STARTED(ar
))) {
2335 /* The device is gone... together with the key ;-) */
2340 err
= ar9170_disable_key(ar
, key
->hw_key_idx
);
2344 if (key
->hw_key_idx
< 64) {
2345 ar
->usedkeys
&= ~BIT(key
->hw_key_idx
);
2347 err
= ar9170_upload_key(ar
, key
->hw_key_idx
, NULL
,
2348 AR9170_ENC_ALG_NONE
, 0,
2353 if (key
->alg
== ALG_TKIP
) {
2354 err
= ar9170_upload_key(ar
, key
->hw_key_idx
,
2356 AR9170_ENC_ALG_NONE
, 1,
2365 ar9170_regwrite_begin(ar
);
2366 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L
, ar
->usedkeys
);
2367 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H
, ar
->usedkeys
>> 32);
2368 ar9170_regwrite_finish();
2369 err
= ar9170_regwrite_result();
2372 mutex_unlock(&ar
->mutex
);
2377 static void ar9170_sta_notify(struct ieee80211_hw
*hw
,
2378 struct ieee80211_vif
*vif
,
2379 enum sta_notify_cmd cmd
,
2380 struct ieee80211_sta
*sta
)
2382 struct ar9170
*ar
= hw
->priv
;
2383 struct ar9170_sta_info
*sta_info
= (void *) sta
->drv_priv
;
2387 case STA_NOTIFY_ADD
:
2388 memset(sta_info
, 0, sizeof(*sta_info
));
2390 if (!sta
->ht_cap
.ht_supported
)
2393 if (sta
->ht_cap
.ampdu_density
> ar
->global_ampdu_density
)
2394 ar
->global_ampdu_density
= sta
->ht_cap
.ampdu_density
;
2396 if (sta
->ht_cap
.ampdu_factor
< ar
->global_ampdu_factor
)
2397 ar
->global_ampdu_factor
= sta
->ht_cap
.ampdu_factor
;
2399 for (i
= 0; i
< AR9170_NUM_TID
; i
++) {
2400 sta_info
->agg
[i
].state
= AR9170_TID_STATE_SHUTDOWN
;
2401 sta_info
->agg
[i
].active
= false;
2402 sta_info
->agg
[i
].ssn
= 0;
2403 sta_info
->agg
[i
].retry
= 0;
2404 sta_info
->agg
[i
].tid
= i
;
2405 INIT_LIST_HEAD(&sta_info
->agg
[i
].list
);
2406 skb_queue_head_init(&sta_info
->agg
[i
].queue
);
2409 sta_info
->ampdu_max_len
= 1 << (3 + sta
->ht_cap
.ampdu_factor
);
2412 case STA_NOTIFY_REMOVE
:
2413 if (!sta
->ht_cap
.ht_supported
)
2416 for (i
= 0; i
< AR9170_NUM_TID
; i
++) {
2417 sta_info
->agg
[i
].state
= AR9170_TID_STATE_INVALID
;
2418 skb_queue_purge(&sta_info
->agg
[i
].queue
);
2427 if (IS_STARTED(ar
) && ar
->filter_changed
)
2428 ieee80211_queue_work(ar
->hw
, &ar
->filter_config_work
);
2431 static int ar9170_get_stats(struct ieee80211_hw
*hw
,
2432 struct ieee80211_low_level_stats
*stats
)
2434 struct ar9170
*ar
= hw
->priv
;
2438 mutex_lock(&ar
->mutex
);
2439 err
= ar9170_read_reg(ar
, AR9170_MAC_REG_TX_RETRY
, &val
);
2440 ar
->stats
.dot11ACKFailureCount
+= val
;
2442 memcpy(stats
, &ar
->stats
, sizeof(*stats
));
2443 mutex_unlock(&ar
->mutex
);
2448 static int ar9170_get_tx_stats(struct ieee80211_hw
*hw
,
2449 struct ieee80211_tx_queue_stats
*tx_stats
)
2451 struct ar9170
*ar
= hw
->priv
;
2453 spin_lock_bh(&ar
->tx_stats_lock
);
2454 memcpy(tx_stats
, ar
->tx_stats
, sizeof(tx_stats
[0]) * hw
->queues
);
2455 spin_unlock_bh(&ar
->tx_stats_lock
);
2460 static int ar9170_conf_tx(struct ieee80211_hw
*hw
, u16 queue
,
2461 const struct ieee80211_tx_queue_params
*param
)
2463 struct ar9170
*ar
= hw
->priv
;
2466 mutex_lock(&ar
->mutex
);
2467 if (queue
< __AR9170_NUM_TXQ
) {
2468 memcpy(&ar
->edcf
[ar9170_qos_hwmap
[queue
]],
2469 param
, sizeof(*param
));
2471 ret
= ar9170_set_qos(ar
);
2476 mutex_unlock(&ar
->mutex
);
2480 static int ar9170_ampdu_action(struct ieee80211_hw
*hw
,
2481 enum ieee80211_ampdu_mlme_action action
,
2482 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
2484 struct ar9170
*ar
= hw
->priv
;
2485 struct ar9170_sta_info
*sta_info
= (void *) sta
->drv_priv
;
2486 struct ar9170_sta_tid
*tid_info
= &sta_info
->agg
[tid
];
2487 unsigned long flags
;
2493 case IEEE80211_AMPDU_TX_START
:
2494 spin_lock_irqsave(&ar
->tx_ampdu_list_lock
, flags
);
2495 if (tid_info
->state
!= AR9170_TID_STATE_SHUTDOWN
||
2496 !list_empty(&tid_info
->list
)) {
2497 spin_unlock_irqrestore(&ar
->tx_ampdu_list_lock
, flags
);
2498 #ifdef AR9170_TXAGG_DEBUG
2499 printk(KERN_INFO
"%s: A-MPDU [ESS:[%pM] tid:[%d]] "
2500 "is in a very bad state!\n",
2501 wiphy_name(hw
->wiphy
), sta
->addr
, tid
);
2502 #endif /* AR9170_TXAGG_DEBUG */
2506 *ssn
= tid_info
->ssn
;
2507 tid_info
->state
= AR9170_TID_STATE_PROGRESS
;
2508 tid_info
->active
= false;
2509 spin_unlock_irqrestore(&ar
->tx_ampdu_list_lock
, flags
);
2510 ieee80211_start_tx_ba_cb_irqsafe(hw
, sta
->addr
, tid
);
2513 case IEEE80211_AMPDU_TX_STOP
:
2514 spin_lock_irqsave(&ar
->tx_ampdu_list_lock
, flags
);
2515 tid_info
->state
= AR9170_TID_STATE_SHUTDOWN
;
2516 list_del_init(&tid_info
->list
);
2517 tid_info
->active
= false;
2518 skb_queue_purge(&tid_info
->queue
);
2519 spin_unlock_irqrestore(&ar
->tx_ampdu_list_lock
, flags
);
2520 ieee80211_stop_tx_ba_cb_irqsafe(hw
, sta
->addr
, tid
);
2523 case IEEE80211_AMPDU_TX_OPERATIONAL
:
2524 #ifdef AR9170_TXAGG_DEBUG
2525 printk(KERN_INFO
"%s: A-MPDU for %pM [tid:%d] Operational.\n",
2526 wiphy_name(hw
->wiphy
), sta
->addr
, tid
);
2527 #endif /* AR9170_TXAGG_DEBUG */
2528 spin_lock_irqsave(&ar
->tx_ampdu_list_lock
, flags
);
2529 sta_info
->agg
[tid
].state
= AR9170_TID_STATE_COMPLETE
;
2530 spin_unlock_irqrestore(&ar
->tx_ampdu_list_lock
, flags
);
2533 case IEEE80211_AMPDU_RX_START
:
2534 case IEEE80211_AMPDU_RX_STOP
:
2535 /* Handled by firmware */
2545 static const struct ieee80211_ops ar9170_ops
= {
2546 .start
= ar9170_op_start
,
2547 .stop
= ar9170_op_stop
,
2549 .add_interface
= ar9170_op_add_interface
,
2550 .remove_interface
= ar9170_op_remove_interface
,
2551 .config
= ar9170_op_config
,
2552 .prepare_multicast
= ar9170_op_prepare_multicast
,
2553 .configure_filter
= ar9170_op_configure_filter
,
2554 .conf_tx
= ar9170_conf_tx
,
2555 .bss_info_changed
= ar9170_op_bss_info_changed
,
2556 .get_tsf
= ar9170_op_get_tsf
,
2557 .set_key
= ar9170_set_key
,
2558 .sta_notify
= ar9170_sta_notify
,
2559 .get_stats
= ar9170_get_stats
,
2560 .get_tx_stats
= ar9170_get_tx_stats
,
2561 .ampdu_action
= ar9170_ampdu_action
,
2564 void *ar9170_alloc(size_t priv_size
)
2566 struct ieee80211_hw
*hw
;
2568 struct sk_buff
*skb
;
2572 * this buffer is used for rx stream reconstruction.
2573 * Under heavy load this device (or the transport layer?)
2574 * tends to split the streams into seperate rx descriptors.
2577 skb
= __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE
, GFP_KERNEL
);
2581 hw
= ieee80211_alloc_hw(priv_size
, &ar9170_ops
);
2587 ar
->rx_failover
= skb
;
2589 mutex_init(&ar
->mutex
);
2590 spin_lock_init(&ar
->cmdlock
);
2591 spin_lock_init(&ar
->tx_stats_lock
);
2592 spin_lock_init(&ar
->tx_ampdu_list_lock
);
2593 skb_queue_head_init(&ar
->tx_status_ampdu
);
2594 for (i
= 0; i
< __AR9170_NUM_TXQ
; i
++) {
2595 skb_queue_head_init(&ar
->tx_status
[i
]);
2596 skb_queue_head_init(&ar
->tx_pending
[i
]);
2598 ar9170_rx_reset_rx_mpdu(ar
);
2599 INIT_WORK(&ar
->filter_config_work
, ar9170_set_filters
);
2600 INIT_WORK(&ar
->beacon_work
, ar9170_new_beacon
);
2601 INIT_DELAYED_WORK(&ar
->tx_janitor
, ar9170_tx_janitor
);
2602 INIT_LIST_HEAD(&ar
->tx_ampdu_list
);
2604 /* all hw supports 2.4 GHz, so set channel to 1 by default */
2605 ar
->channel
= &ar9170_2ghz_chantable
[0];
2607 /* first part of wiphy init */
2608 ar
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
2609 BIT(NL80211_IFTYPE_WDS
) |
2610 BIT(NL80211_IFTYPE_ADHOC
);
2611 ar
->hw
->flags
|= IEEE80211_HW_RX_INCLUDES_FCS
|
2612 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING
|
2613 IEEE80211_HW_SIGNAL_DBM
|
2614 IEEE80211_HW_NOISE_DBM
;
2617 ar
->hw
->flags
|= IEEE80211_HW_AMPDU_AGGREGATION
;
2619 ar9170_band_2GHz
.ht_cap
.ht_supported
= false;
2620 ar9170_band_5GHz
.ht_cap
.ht_supported
= false;
2623 ar
->hw
->queues
= __AR9170_NUM_TXQ
;
2624 ar
->hw
->extra_tx_headroom
= 8;
2625 ar
->hw
->sta_data_size
= sizeof(struct ar9170_sta_info
);
2627 ar
->hw
->max_rates
= 1;
2628 ar
->hw
->max_rate_tries
= 3;
2630 for (i
= 0; i
< ARRAY_SIZE(ar
->noise
); i
++)
2631 ar
->noise
[i
] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
2637 return ERR_PTR(-ENOMEM
);
2640 static int ar9170_read_eeprom(struct ar9170
*ar
)
2642 #define RW 8 /* number of words to read at once */
2643 #define RB (sizeof(u32) * RW)
2644 struct ath_regulatory
*regulatory
= &ar
->common
.regulatory
;
2645 u8
*eeprom
= (void *)&ar
->eeprom
;
2646 u8
*addr
= ar
->eeprom
.mac_address
;
2648 unsigned int rx_streams
, tx_streams
, tx_params
= 0;
2649 int i
, j
, err
, bands
= 0;
2651 BUILD_BUG_ON(sizeof(ar
->eeprom
) & 3);
2653 BUILD_BUG_ON(RB
> AR9170_MAX_CMD_LEN
- 4);
2655 /* don't want to handle trailing remains */
2656 BUILD_BUG_ON(sizeof(ar
->eeprom
) % RB
);
2659 for (i
= 0; i
< sizeof(ar
->eeprom
)/RB
; i
++) {
2660 for (j
= 0; j
< RW
; j
++)
2661 offsets
[j
] = cpu_to_le32(AR9170_EEPROM_START
+
2664 err
= ar
->exec_cmd(ar
, AR9170_CMD_RREG
,
2665 RB
, (u8
*) &offsets
,
2666 RB
, eeprom
+ RB
* i
);
2674 if (ar
->eeprom
.length
== cpu_to_le16(0xFFFF))
2677 if (ar
->eeprom
.operating_flags
& AR9170_OPFLAG_2GHZ
) {
2678 ar
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] = &ar9170_band_2GHz
;
2681 if (ar
->eeprom
.operating_flags
& AR9170_OPFLAG_5GHZ
) {
2682 ar
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] = &ar9170_band_5GHz
;
2686 rx_streams
= hweight8(ar
->eeprom
.rx_mask
);
2687 tx_streams
= hweight8(ar
->eeprom
.tx_mask
);
2689 if (rx_streams
!= tx_streams
)
2690 tx_params
= IEEE80211_HT_MCS_TX_RX_DIFF
;
2692 if (tx_streams
>= 1 && tx_streams
<= IEEE80211_HT_MCS_TX_MAX_STREAMS
)
2693 tx_params
= (tx_streams
- 1) <<
2694 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
;
2696 ar9170_band_2GHz
.ht_cap
.mcs
.tx_params
|= tx_params
;
2697 ar9170_band_5GHz
.ht_cap
.mcs
.tx_params
|= tx_params
;
2700 * I measured this, a bandswitch takes roughly
2701 * 135 ms and a frequency switch about 80.
2703 * FIXME: measure these values again once EEPROM settings
2704 * are used, that will influence them!
2707 ar
->hw
->channel_change_time
= 135 * 1000;
2709 ar
->hw
->channel_change_time
= 80 * 1000;
2711 regulatory
->current_rd
= le16_to_cpu(ar
->eeprom
.reg_domain
[0]);
2712 regulatory
->current_rd_ext
= le16_to_cpu(ar
->eeprom
.reg_domain
[1]);
2714 /* second part of wiphy init */
2715 SET_IEEE80211_PERM_ADDR(ar
->hw
, addr
);
2717 return bands
? 0 : -EINVAL
;
2720 static int ar9170_reg_notifier(struct wiphy
*wiphy
,
2721 struct regulatory_request
*request
)
2723 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
2724 struct ar9170
*ar
= hw
->priv
;
2726 return ath_reg_notifier_apply(wiphy
, request
, &ar
->common
.regulatory
);
2729 int ar9170_register(struct ar9170
*ar
, struct device
*pdev
)
2731 struct ath_regulatory
*regulatory
= &ar
->common
.regulatory
;
2734 /* try to read EEPROM, init MAC addr */
2735 err
= ar9170_read_eeprom(ar
);
2739 err
= ath_regd_init(regulatory
, ar
->hw
->wiphy
,
2740 ar9170_reg_notifier
);
2744 err
= ieee80211_register_hw(ar
->hw
);
2748 if (!ath_is_world_regd(regulatory
))
2749 regulatory_hint(ar
->hw
->wiphy
, regulatory
->alpha2
);
2751 err
= ar9170_init_leds(ar
);
2755 #ifdef CONFIG_AR9170_LEDS
2756 err
= ar9170_register_leds(ar
);
2759 #endif /* CONFIG_AR9170_LEDS */
2761 dev_info(pdev
, "Atheros AR9170 is registered as '%s'\n",
2762 wiphy_name(ar
->hw
->wiphy
));
2767 ieee80211_unregister_hw(ar
->hw
);
2773 void ar9170_unregister(struct ar9170
*ar
)
2775 #ifdef CONFIG_AR9170_LEDS
2776 ar9170_unregister_leds(ar
);
2777 #endif /* CONFIG_AR9170_LEDS */
2779 kfree_skb(ar
->rx_failover
);
2780 ieee80211_unregister_hw(ar
->hw
);
2781 mutex_destroy(&ar
->mutex
);