3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param
;
59 static bool bug_on_recovery
;
60 static bool no_recovery
;
62 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
63 struct ieee80211_vif
*vif
,
64 bool reset_tx_queues
);
65 static void wl1271_op_stop(struct ieee80211_hw
*hw
);
66 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
68 static int wl12xx_set_authorized(struct wl1271
*wl
,
69 struct wl12xx_vif
*wlvif
)
73 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
82 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
->sta
.hlid
);
86 wl12xx_croc(wl
, wlvif
->role_id
);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy
*wiphy
,
93 struct regulatory_request
*request
)
95 struct ieee80211_supported_band
*band
;
96 struct ieee80211_channel
*ch
;
99 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
100 for (i
= 0; i
< band
->n_channels
; i
++) {
101 ch
= &band
->channels
[i
];
102 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
105 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
106 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
107 IEEE80211_CHAN_PASSIVE_SCAN
;
114 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
119 /* we should hold wl->mutex */
120 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
139 int period
= wl
->conf
.rx_streaming
.interval
;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
148 (wl
->conf
.rx_streaming
.always
||
149 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
150 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
152 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif
->rx_streaming_timer
);
160 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
163 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
164 rx_streaming_enable_work
);
165 struct wl1271
*wl
= wlvif
->wl
;
167 mutex_lock(&wl
->mutex
);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
171 (!wl
->conf
.rx_streaming
.always
&&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
175 if (!wl
->conf
.rx_streaming
.interval
)
178 ret
= wl1271_ps_elp_wakeup(wl
);
182 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif
->rx_streaming_timer
,
188 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
191 wl1271_ps_elp_sleep(wl
);
193 mutex_unlock(&wl
->mutex
);
196 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
199 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
200 rx_streaming_disable_work
);
201 struct wl1271
*wl
= wlvif
->wl
;
203 mutex_lock(&wl
->mutex
);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
208 ret
= wl1271_ps_elp_wakeup(wl
);
212 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
217 wl1271_ps_elp_sleep(wl
);
219 mutex_unlock(&wl
->mutex
);
222 static void wl1271_rx_streaming_timer(unsigned long data
)
224 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
225 struct wl1271
*wl
= wlvif
->wl
;
226 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl
->tx_allocated_blocks
== 0)
236 cancel_delayed_work(&wl
->tx_watchdog_work
);
237 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
238 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
241 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
243 struct delayed_work
*dwork
;
246 dwork
= container_of(work
, struct delayed_work
, work
);
247 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
249 mutex_lock(&wl
->mutex
);
251 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl
->tx_allocated_blocks
== 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
263 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
264 wl
->conf
.tx
.tx_watchdog_timeout
);
265 wl12xx_rearm_tx_watchdog_locked(wl
);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
274 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
275 wl
->conf
.tx
.tx_watchdog_timeout
);
276 wl12xx_rearm_tx_watchdog_locked(wl
);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl
->active_sta_count
) {
287 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
289 wl
->conf
.tx
.tx_watchdog_timeout
,
290 wl
->active_sta_count
);
291 wl12xx_rearm_tx_watchdog_locked(wl
);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl
->conf
.tx
.tx_watchdog_timeout
);
297 wl12xx_queue_recovery_work(wl
);
300 mutex_unlock(&wl
->mutex
);
303 static void wlcore_adjust_conf(struct wl1271
*wl
)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param
, "continuous")) {
308 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
309 } else if (!strcmp(fwlog_param
, "ondemand")) {
310 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
311 } else if (!strcmp(fwlog_param
, "dbgpins")) {
312 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
313 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
314 } else if (!strcmp(fwlog_param
, "disable")) {
315 wl
->conf
.fwlog
.mem_blocks
= 0;
316 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
323 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
324 struct wl12xx_vif
*wlvif
,
327 bool fw_ps
, single_sta
;
329 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
330 single_sta
= (wl
->active_sta_count
== 1);
333 * Wake up from high level PS if the STA is asleep with too little
334 * packets in FW or if the STA is awake.
336 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
337 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
340 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 * Make an exception if this is the only connected station. In this
342 * case FW-memory congestion is not a problem.
344 else if (!single_sta
&& fw_ps
&& tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
345 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
348 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
349 struct wl12xx_vif
*wlvif
,
350 struct wl_fw_status_2
*status
)
352 struct wl1271_link
*lnk
;
356 /* TODO: also use link_fast_bitmap here */
358 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
359 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
360 wl1271_debug(DEBUG_PSM
,
361 "link ps prev 0x%x cur 0x%x changed 0x%x",
362 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
363 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
365 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
368 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
) {
369 lnk
= &wl
->links
[hlid
];
370 cnt
= status
->counters
.tx_lnk_free_pkts
[hlid
] -
371 lnk
->prev_freed_pkts
;
373 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[hlid
];
374 lnk
->allocated_pkts
-= cnt
;
376 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
377 lnk
->allocated_pkts
);
381 static int wlcore_fw_status(struct wl1271
*wl
,
382 struct wl_fw_status_1
*status_1
,
383 struct wl_fw_status_2
*status_2
)
385 struct wl12xx_vif
*wlvif
;
387 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
388 int avail
, freed_blocks
;
393 status_len
= WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
394 sizeof(*status_2
) + wl
->fw_status_priv_len
;
396 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status_1
,
401 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
404 status_1
->fw_rx_counter
,
405 status_1
->drv_rx_counter
,
406 status_1
->tx_results_counter
);
408 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl
->tx_allocated_pkts
[i
] -=
411 (status_2
->counters
.tx_released_pkts
[i
] -
412 wl
->tx_pkts_freed
[i
]) & 0xff;
414 wl
->tx_pkts_freed
[i
] = status_2
->counters
.tx_released_pkts
[i
];
417 /* prevent wrap-around in total blocks counter */
418 if (likely(wl
->tx_blocks_freed
<=
419 le32_to_cpu(status_2
->total_released_blks
)))
420 freed_blocks
= le32_to_cpu(status_2
->total_released_blks
) -
423 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
424 le32_to_cpu(status_2
->total_released_blks
);
426 wl
->tx_blocks_freed
= le32_to_cpu(status_2
->total_released_blks
);
428 wl
->tx_allocated_blocks
-= freed_blocks
;
431 * If the FW freed some blocks:
432 * If we still have allocated blocks - re-arm the timer, Tx is
433 * not stuck. Otherwise, cancel the timer (no Tx currently).
436 if (wl
->tx_allocated_blocks
)
437 wl12xx_rearm_tx_watchdog_locked(wl
);
439 cancel_delayed_work(&wl
->tx_watchdog_work
);
442 avail
= le32_to_cpu(status_2
->tx_total
) - wl
->tx_allocated_blocks
;
445 * The FW might change the total number of TX memblocks before
446 * we get a notification about blocks being released. Thus, the
447 * available blocks calculation might yield a temporary result
448 * which is lower than the actual available blocks. Keeping in
449 * mind that only blocks that were allocated can be moved from
450 * TX to RX, tx_blocks_available should never decrease here.
452 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
455 /* if more blocks are available now, tx work can be scheduled */
456 if (wl
->tx_blocks_available
> old_tx_blk_count
)
457 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
459 /* for AP update num of allocated TX blocks per link and ps status */
460 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
461 wl12xx_irq_update_links_status(wl
, wlvif
, status_2
);
464 /* update the host-chipset time offset */
466 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
467 (s64
)le32_to_cpu(status_2
->fw_localtime
);
472 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
476 /* Pass all received frames to the network stack */
477 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
478 ieee80211_rx_ni(wl
->hw
, skb
);
480 /* Return sent skbs to the network stack */
481 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
482 ieee80211_tx_status_ni(wl
->hw
, skb
);
485 static void wl1271_netstack_work(struct work_struct
*work
)
488 container_of(work
, struct wl1271
, netstack_work
);
491 wl1271_flush_deferred_work(wl
);
492 } while (skb_queue_len(&wl
->deferred_rx_queue
));
495 #define WL1271_IRQ_MAX_LOOPS 256
497 static irqreturn_t
wl1271_irq(int irq
, void *cookie
)
501 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
502 struct wl1271
*wl
= (struct wl1271
*)cookie
;
504 unsigned int defer_count
;
507 /* TX might be handled here, avoid redundant work */
508 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
509 cancel_work_sync(&wl
->tx_work
);
512 * In case edge triggered interrupt must be used, we cannot iterate
513 * more than once without introducing race conditions with the hardirq.
515 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
518 mutex_lock(&wl
->mutex
);
520 wl1271_debug(DEBUG_IRQ
, "IRQ work");
522 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
525 ret
= wl1271_ps_elp_wakeup(wl
);
529 while (!done
&& loopcount
--) {
531 * In order to avoid a race with the hardirq, clear the flag
532 * before acknowledging the chip. Since the mutex is held,
533 * wl1271_ps_elp_wakeup cannot be called concurrently.
535 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
536 smp_mb__after_clear_bit();
538 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
540 wl12xx_queue_recovery_work(wl
);
544 wlcore_hw_tx_immediate_compl(wl
);
546 intr
= le32_to_cpu(wl
->fw_status_1
->intr
);
547 intr
&= WLCORE_ALL_INTR_MASK
;
553 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
554 wl1271_error("HW watchdog interrupt received! starting recovery.");
555 wl
->watchdog_recovery
= true;
556 wl12xx_queue_recovery_work(wl
);
558 /* restarting the chip. ignore any other interrupt. */
562 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
563 wl1271_error("SW watchdog interrupt received! "
564 "starting recovery.");
565 wl
->watchdog_recovery
= true;
566 wl12xx_queue_recovery_work(wl
);
568 /* restarting the chip. ignore any other interrupt. */
572 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
573 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
575 ret
= wlcore_rx(wl
, wl
->fw_status_1
);
577 wl12xx_queue_recovery_work(wl
);
581 /* Check if any tx blocks were freed */
582 spin_lock_irqsave(&wl
->wl_lock
, flags
);
583 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
584 wl1271_tx_total_queue_count(wl
) > 0) {
585 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
587 * In order to avoid starvation of the TX path,
588 * call the work function directly.
590 ret
= wlcore_tx_work_locked(wl
);
592 wl12xx_queue_recovery_work(wl
);
596 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
599 /* check for tx results */
600 ret
= wlcore_hw_tx_delayed_compl(wl
);
602 wl12xx_queue_recovery_work(wl
);
606 /* Make sure the deferred queues don't get too long */
607 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
608 skb_queue_len(&wl
->deferred_rx_queue
);
609 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
610 wl1271_flush_deferred_work(wl
);
613 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
614 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
615 ret
= wl1271_event_handle(wl
, 0);
617 wl12xx_queue_recovery_work(wl
);
622 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
623 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
624 ret
= wl1271_event_handle(wl
, 1);
626 wl12xx_queue_recovery_work(wl
);
631 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
632 wl1271_debug(DEBUG_IRQ
,
633 "WL1271_ACX_INTR_INIT_COMPLETE");
635 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
636 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
639 wl1271_ps_elp_sleep(wl
);
642 spin_lock_irqsave(&wl
->wl_lock
, flags
);
643 /* In case TX was not handled here, queue TX work */
644 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
645 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
646 wl1271_tx_total_queue_count(wl
) > 0)
647 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
648 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
650 mutex_unlock(&wl
->mutex
);
655 struct vif_counter_data
{
658 struct ieee80211_vif
*cur_vif
;
659 bool cur_vif_running
;
662 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
663 struct ieee80211_vif
*vif
)
665 struct vif_counter_data
*counter
= data
;
668 if (counter
->cur_vif
== vif
)
669 counter
->cur_vif_running
= true;
672 /* caller must not hold wl->mutex, as it might deadlock */
673 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
674 struct ieee80211_vif
*cur_vif
,
675 struct vif_counter_data
*data
)
677 memset(data
, 0, sizeof(*data
));
678 data
->cur_vif
= cur_vif
;
680 ieee80211_iterate_active_interfaces(hw
,
681 wl12xx_vif_count_iter
, data
);
684 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
686 const struct firmware
*fw
;
688 enum wl12xx_fw_type fw_type
;
692 fw_type
= WL12XX_FW_TYPE_PLT
;
693 fw_name
= wl
->plt_fw_name
;
696 * we can't call wl12xx_get_vif_count() here because
697 * wl->mutex is taken, so use the cached last_vif_count value
699 if (wl
->last_vif_count
> 1) {
700 fw_type
= WL12XX_FW_TYPE_MULTI
;
701 fw_name
= wl
->mr_fw_name
;
703 fw_type
= WL12XX_FW_TYPE_NORMAL
;
704 fw_name
= wl
->sr_fw_name
;
708 if (wl
->fw_type
== fw_type
)
711 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
713 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
716 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
721 wl1271_error("firmware size is not multiple of 32 bits: %zu",
728 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
729 wl
->fw_len
= fw
->size
;
730 wl
->fw
= vmalloc(wl
->fw_len
);
733 wl1271_error("could not allocate memory for the firmware");
738 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
740 wl
->fw_type
= fw_type
;
742 release_firmware(fw
);
747 static void wl1271_fetch_nvs(struct wl1271
*wl
)
749 const struct firmware
*fw
;
752 ret
= request_firmware(&fw
, WL12XX_NVS_NAME
, wl
->dev
);
755 wl1271_debug(DEBUG_BOOT
, "could not get nvs file %s: %d",
756 WL12XX_NVS_NAME
, ret
);
760 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
763 wl1271_error("could not allocate memory for the nvs file");
767 wl
->nvs_len
= fw
->size
;
770 release_firmware(fw
);
773 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
775 /* Avoid a recursive recovery */
776 if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
777 wlcore_disable_interrupts_nosync(wl
);
778 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
782 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
786 /* The FW log is a length-value list, find where the log end */
787 while (len
< maxlen
) {
788 if (memblock
[len
] == 0)
790 if (len
+ memblock
[len
] + 1 > maxlen
)
792 len
+= memblock
[len
] + 1;
795 /* Make sure we have enough room */
796 len
= min(len
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
798 /* Fill the FW log file, consumed by the sysfs fwlog entry */
799 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
800 wl
->fwlog_size
+= len
;
805 #define WLCORE_FW_LOG_END 0x2000000
807 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
815 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
816 (wl
->conf
.fwlog
.mem_blocks
== 0))
819 wl1271_info("Reading FW panic log");
821 block
= kmalloc(WL12XX_HW_BLOCK_SIZE
, GFP_KERNEL
);
826 * Make sure the chip is awake and the logger isn't active.
827 * Do not send a stop fwlog command if the fw is hanged.
829 if (wl1271_ps_elp_wakeup(wl
))
831 if (!wl
->watchdog_recovery
)
832 wl12xx_cmd_stop_fwlog(wl
);
834 /* Read the first memory block address */
835 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
839 addr
= le32_to_cpu(wl
->fw_status_2
->log_start_addr
);
843 if (wl
->conf
.fwlog
.mode
== WL12XX_FWLOG_CONTINUOUS
) {
844 offset
= sizeof(addr
) + sizeof(struct wl1271_rx_descriptor
);
845 end_of_log
= WLCORE_FW_LOG_END
;
847 offset
= sizeof(addr
);
851 /* Traverse the memory blocks linked list */
853 memset(block
, 0, WL12XX_HW_BLOCK_SIZE
);
854 ret
= wlcore_read_hwaddr(wl
, addr
, block
, WL12XX_HW_BLOCK_SIZE
,
860 * Memory blocks are linked to one another. The first 4 bytes
861 * of each memory block hold the hardware address of the next
862 * one. The last memory block points to the first one in
863 * on demand mode and is equal to 0x2000000 in continuous mode.
865 addr
= le32_to_cpup((__le32
*)block
);
866 if (!wl12xx_copy_fwlog(wl
, block
+ offset
,
867 WL12XX_HW_BLOCK_SIZE
- offset
))
869 } while (addr
&& (addr
!= end_of_log
));
871 wake_up_interruptible(&wl
->fwlog_waitq
);
877 static void wlcore_print_recovery(struct wl1271
*wl
)
883 wl1271_info("Hardware recovery in progress. FW ver: %s",
884 wl
->chip
.fw_ver_str
);
886 /* change partitions momentarily so we can read the FW pc */
887 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
891 ret
= wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
, &pc
);
895 ret
= wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
, &hint_sts
);
899 wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc
, hint_sts
);
901 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
905 static void wl1271_recovery_work(struct work_struct
*work
)
908 container_of(work
, struct wl1271
, recovery_work
);
909 struct wl12xx_vif
*wlvif
;
910 struct ieee80211_vif
*vif
;
912 mutex_lock(&wl
->mutex
);
914 if (wl
->state
!= WL1271_STATE_ON
|| wl
->plt
)
917 wl12xx_read_fwlog_panic(wl
);
919 wlcore_print_recovery(wl
);
921 BUG_ON(bug_on_recovery
&&
922 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
925 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
926 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
931 * Advance security sequence number to overcome potential progress
932 * in the firmware during recovery. This doens't hurt if the network is
935 wl12xx_for_each_wlvif(wl
, wlvif
) {
936 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
937 test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
938 wlvif
->tx_security_seq
+=
939 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
942 /* Prevent spurious TX during FW restart */
943 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
945 if (wl
->sched_scanning
) {
946 ieee80211_sched_scan_stopped(wl
->hw
);
947 wl
->sched_scanning
= false;
950 /* reboot the chipset */
951 while (!list_empty(&wl
->wlvif_list
)) {
952 wlvif
= list_first_entry(&wl
->wlvif_list
,
953 struct wl12xx_vif
, list
);
954 vif
= wl12xx_wlvif_to_vif(wlvif
);
955 __wl1271_op_remove_interface(wl
, vif
, false);
957 wl
->watchdog_recovery
= false;
958 mutex_unlock(&wl
->mutex
);
959 wl1271_op_stop(wl
->hw
);
961 ieee80211_restart_hw(wl
->hw
);
964 * Its safe to enable TX now - the queues are stopped after a request
967 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
970 wl
->watchdog_recovery
= false;
971 mutex_unlock(&wl
->mutex
);
974 static int wlcore_fw_wakeup(struct wl1271
*wl
)
976 return wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
979 static int wl1271_setup(struct wl1271
*wl
)
981 wl
->fw_status_1
= kmalloc(WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
982 sizeof(*wl
->fw_status_2
) +
983 wl
->fw_status_priv_len
, GFP_KERNEL
);
984 if (!wl
->fw_status_1
)
987 wl
->fw_status_2
= (struct wl_fw_status_2
*)
988 (((u8
*) wl
->fw_status_1
) +
989 WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
));
991 wl
->tx_res_if
= kmalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
992 if (!wl
->tx_res_if
) {
993 kfree(wl
->fw_status_1
);
1000 static int wl12xx_set_power_on(struct wl1271
*wl
)
1004 msleep(WL1271_PRE_POWER_ON_SLEEP
);
1005 ret
= wl1271_power_on(wl
);
1008 msleep(WL1271_POWER_ON_SLEEP
);
1009 wl1271_io_reset(wl
);
1012 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
1016 /* ELP module wake up */
1017 ret
= wlcore_fw_wakeup(wl
);
1025 wl1271_power_off(wl
);
1029 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
1033 ret
= wl12xx_set_power_on(wl
);
1038 * For wl127x based devices we could use the default block
1039 * size (512 bytes), but due to a bug in the sdio driver, we
1040 * need to set it explicitly after the chip is powered on. To
1041 * simplify the code and since the performance impact is
1042 * negligible, we use the same block size for all different
1045 * Check if the bus supports blocksize alignment and, if it
1046 * doesn't, make sure we don't have the quirk.
1048 if (!wl1271_set_block_size(wl
))
1049 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1051 /* TODO: make sure the lower driver has set things up correctly */
1053 ret
= wl1271_setup(wl
);
1057 ret
= wl12xx_fetch_firmware(wl
, plt
);
1065 int wl1271_plt_start(struct wl1271
*wl
)
1067 int retries
= WL1271_BOOT_RETRIES
;
1068 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1071 mutex_lock(&wl
->mutex
);
1073 wl1271_notice("power up");
1075 if (wl
->state
!= WL1271_STATE_OFF
) {
1076 wl1271_error("cannot go into PLT state because not "
1077 "in off state: %d", wl
->state
);
1084 ret
= wl12xx_chip_wakeup(wl
, true);
1088 ret
= wl
->ops
->plt_init(wl
);
1093 wl
->state
= WL1271_STATE_ON
;
1094 wl1271_notice("firmware booted in PLT mode (%s)",
1095 wl
->chip
.fw_ver_str
);
1097 /* update hw/fw version info in wiphy struct */
1098 wiphy
->hw_version
= wl
->chip
.id
;
1099 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1100 sizeof(wiphy
->fw_version
));
1105 wl1271_power_off(wl
);
1108 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1109 WL1271_BOOT_RETRIES
);
1111 mutex_unlock(&wl
->mutex
);
1116 int wl1271_plt_stop(struct wl1271
*wl
)
1120 wl1271_notice("power down");
1123 * Interrupts must be disabled before setting the state to OFF.
1124 * Otherwise, the interrupt handler might be called and exit without
1125 * reading the interrupt status.
1127 wlcore_disable_interrupts(wl
);
1128 mutex_lock(&wl
->mutex
);
1130 mutex_unlock(&wl
->mutex
);
1133 * This will not necessarily enable interrupts as interrupts
1134 * may have been disabled when op_stop was called. It will,
1135 * however, balance the above call to disable_interrupts().
1137 wlcore_enable_interrupts(wl
);
1139 wl1271_error("cannot power down because not in PLT "
1140 "state: %d", wl
->state
);
1145 mutex_unlock(&wl
->mutex
);
1147 wl1271_flush_deferred_work(wl
);
1148 cancel_work_sync(&wl
->netstack_work
);
1149 cancel_work_sync(&wl
->recovery_work
);
1150 cancel_delayed_work_sync(&wl
->elp_work
);
1151 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1152 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1154 mutex_lock(&wl
->mutex
);
1155 wl1271_power_off(wl
);
1157 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1158 wl
->state
= WL1271_STATE_OFF
;
1161 mutex_unlock(&wl
->mutex
);
1167 static void wl1271_op_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
1169 struct wl1271
*wl
= hw
->priv
;
1170 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1171 struct ieee80211_vif
*vif
= info
->control
.vif
;
1172 struct wl12xx_vif
*wlvif
= NULL
;
1173 unsigned long flags
;
1178 wlvif
= wl12xx_vif_to_data(vif
);
1180 mapping
= skb_get_queue_mapping(skb
);
1181 q
= wl1271_tx_get_queue(mapping
);
1183 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
);
1185 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1188 * drop the packet if the link is invalid or the queue is stopped
1189 * for any reason but watermark. Watermark is a "soft"-stop so we
1190 * allow these packets through.
1192 if (hlid
== WL12XX_INVALID_LINK_ID
||
1193 (wlvif
&& !test_bit(hlid
, wlvif
->links_map
)) ||
1194 (wlcore_is_queue_stopped(wl
, q
) &&
1195 !wlcore_is_queue_stopped_by_reason(wl
, q
,
1196 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1197 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1198 ieee80211_free_txskb(hw
, skb
);
1202 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1204 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1206 wl
->tx_queue_count
[q
]++;
1209 * The workqueue is slow to process the tx_queue and we need stop
1210 * the queue here, otherwise the queue will get too long.
1212 if (wl
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
) {
1213 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1214 wlcore_stop_queue_locked(wl
, q
,
1215 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1219 * The chip specific setup must run before the first TX packet -
1220 * before that, the tx_work will not be initialized!
1223 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1224 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1225 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1228 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1231 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1233 unsigned long flags
;
1236 /* no need to queue a new dummy packet if one is already pending */
1237 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1240 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1242 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1243 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1244 wl
->tx_queue_count
[q
]++;
1245 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1247 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1248 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1249 return wlcore_tx_work_locked(wl
);
1252 * If the FW TX is busy, TX work will be scheduled by the threaded
1253 * interrupt handler function
1259 * The size of the dummy packet should be at least 1400 bytes. However, in
1260 * order to minimize the number of bus transactions, aligning it to 512 bytes
1261 * boundaries could be beneficial, performance wise
1263 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1265 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1267 struct sk_buff
*skb
;
1268 struct ieee80211_hdr_3addr
*hdr
;
1269 unsigned int dummy_packet_size
;
1271 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1272 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1274 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1276 wl1271_warning("Failed to allocate a dummy packet skb");
1280 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1282 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1283 memset(hdr
, 0, sizeof(*hdr
));
1284 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1285 IEEE80211_STYPE_NULLFUNC
|
1286 IEEE80211_FCTL_TODS
);
1288 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1290 /* Dummy packets require the TID to be management */
1291 skb
->priority
= WL1271_TID_MGMT
;
1293 /* Initialize all fields that might be used */
1294 skb_set_queue_mapping(skb
, 0);
1295 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1303 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern
*p
)
1305 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1306 int i
, pattern_len
= 0;
1309 wl1271_warning("No mask in WoWLAN pattern");
1314 * The pattern is broken up into segments of bytes at different offsets
1315 * that need to be checked by the FW filter. Each segment is called
1316 * a field in the FW API. We verify that the total number of fields
1317 * required for this pattern won't exceed FW limits (8)
1318 * as well as the total fields buffer won't exceed the FW limit.
1319 * Note that if there's a pattern which crosses Ethernet/IP header
1320 * boundary a new field is required.
1322 for (i
= 0; i
< p
->pattern_len
; i
++) {
1323 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1328 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1330 fields_size
+= pattern_len
+
1331 RX_FILTER_FIELD_OVERHEAD
;
1339 fields_size
+= pattern_len
+
1340 RX_FILTER_FIELD_OVERHEAD
;
1347 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1351 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1352 wl1271_warning("RX Filter too complex. Too many segments");
1356 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1357 wl1271_warning("RX filter pattern is too big");
1364 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1366 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1369 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1376 for (i
= 0; i
< filter
->num_fields
; i
++)
1377 kfree(filter
->fields
[i
].pattern
);
1382 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1383 u16 offset
, u8 flags
,
1384 u8
*pattern
, u8 len
)
1386 struct wl12xx_rx_filter_field
*field
;
1388 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1389 wl1271_warning("Max fields per RX filter. can't alloc another");
1393 field
= &filter
->fields
[filter
->num_fields
];
1395 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1396 if (!field
->pattern
) {
1397 wl1271_warning("Failed to allocate RX filter pattern");
1401 filter
->num_fields
++;
1403 field
->offset
= cpu_to_le16(offset
);
1404 field
->flags
= flags
;
1406 memcpy(field
->pattern
, pattern
, len
);
1411 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1413 int i
, fields_size
= 0;
1415 for (i
= 0; i
< filter
->num_fields
; i
++)
1416 fields_size
+= filter
->fields
[i
].len
+
1417 sizeof(struct wl12xx_rx_filter_field
) -
1423 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1427 struct wl12xx_rx_filter_field
*field
;
1429 for (i
= 0; i
< filter
->num_fields
; i
++) {
1430 field
= (struct wl12xx_rx_filter_field
*)buf
;
1432 field
->offset
= filter
->fields
[i
].offset
;
1433 field
->flags
= filter
->fields
[i
].flags
;
1434 field
->len
= filter
->fields
[i
].len
;
1436 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1437 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1438 sizeof(u8
*) + field
->len
;
1443 * Allocates an RX filter returned through f
1444 * which needs to be freed using rx_filter_free()
1446 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1447 struct cfg80211_wowlan_trig_pkt_pattern
*p
,
1448 struct wl12xx_rx_filter
**f
)
1451 struct wl12xx_rx_filter
*filter
;
1455 filter
= wl1271_rx_filter_alloc();
1457 wl1271_warning("Failed to alloc rx filter");
1463 while (i
< p
->pattern_len
) {
1464 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1469 for (j
= i
; j
< p
->pattern_len
; j
++) {
1470 if (!test_bit(j
, (unsigned long *)p
->mask
))
1473 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1474 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1478 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1480 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1482 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1483 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1488 ret
= wl1271_rx_filter_alloc_field(filter
,
1491 &p
->pattern
[i
], len
);
1498 filter
->action
= FILTER_SIGNAL
;
1504 wl1271_rx_filter_free(filter
);
1510 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1511 struct cfg80211_wowlan
*wow
)
1515 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1516 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0,
1521 ret
= wl1271_rx_filter_clear_all(wl
);
1528 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1531 /* Validate all incoming patterns before clearing current FW state */
1532 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1533 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1535 wl1271_warning("Bad wowlan pattern %d", i
);
1540 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1544 ret
= wl1271_rx_filter_clear_all(wl
);
1548 /* Translate WoWLAN patterns into filters */
1549 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1550 struct cfg80211_wowlan_trig_pkt_pattern
*p
;
1551 struct wl12xx_rx_filter
*filter
= NULL
;
1553 p
= &wow
->patterns
[i
];
1555 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1557 wl1271_warning("Failed to create an RX filter from "
1558 "wowlan pattern %d", i
);
1562 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1564 wl1271_rx_filter_free(filter
);
1569 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1575 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1576 struct wl12xx_vif
*wlvif
,
1577 struct cfg80211_wowlan
*wow
)
1581 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1584 ret
= wl1271_ps_elp_wakeup(wl
);
1588 ret
= wl1271_configure_wowlan(wl
, wow
);
1592 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1593 wl
->conf
.conn
.suspend_wake_up_event
,
1594 wl
->conf
.conn
.suspend_listen_interval
);
1597 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1600 wl1271_ps_elp_sleep(wl
);
1606 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1607 struct wl12xx_vif
*wlvif
)
1611 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1614 ret
= wl1271_ps_elp_wakeup(wl
);
1618 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1620 wl1271_ps_elp_sleep(wl
);
1626 static int wl1271_configure_suspend(struct wl1271
*wl
,
1627 struct wl12xx_vif
*wlvif
,
1628 struct cfg80211_wowlan
*wow
)
1630 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1631 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1632 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1633 return wl1271_configure_suspend_ap(wl
, wlvif
);
1637 static void wl1271_configure_resume(struct wl1271
*wl
,
1638 struct wl12xx_vif
*wlvif
)
1641 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1642 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1644 if ((!is_ap
) && (!is_sta
))
1647 ret
= wl1271_ps_elp_wakeup(wl
);
1652 wl1271_configure_wowlan(wl
, NULL
);
1654 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1655 wl
->conf
.conn
.wake_up_event
,
1656 wl
->conf
.conn
.listen_interval
);
1659 wl1271_error("resume: wake up conditions failed: %d",
1663 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1666 wl1271_ps_elp_sleep(wl
);
1669 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1670 struct cfg80211_wowlan
*wow
)
1672 struct wl1271
*wl
= hw
->priv
;
1673 struct wl12xx_vif
*wlvif
;
1676 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1679 /* we want to perform the recovery before suspending */
1680 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
1681 wl1271_warning("postponing suspend to perform recovery");
1685 wl1271_tx_flush(wl
);
1687 mutex_lock(&wl
->mutex
);
1688 wl
->wow_enabled
= true;
1689 wl12xx_for_each_wlvif(wl
, wlvif
) {
1690 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1692 mutex_unlock(&wl
->mutex
);
1693 wl1271_warning("couldn't prepare device to suspend");
1697 mutex_unlock(&wl
->mutex
);
1698 /* flush any remaining work */
1699 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1702 * disable and re-enable interrupts in order to flush
1705 wlcore_disable_interrupts(wl
);
1708 * set suspended flag to avoid triggering a new threaded_irq
1709 * work. no need for spinlock as interrupts are disabled.
1711 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1713 wlcore_enable_interrupts(wl
);
1714 flush_work(&wl
->tx_work
);
1715 flush_delayed_work(&wl
->elp_work
);
1720 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1722 struct wl1271
*wl
= hw
->priv
;
1723 struct wl12xx_vif
*wlvif
;
1724 unsigned long flags
;
1725 bool run_irq_work
= false, pending_recovery
;
1727 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1729 WARN_ON(!wl
->wow_enabled
);
1732 * re-enable irq_work enqueuing, and call irq_work directly if
1733 * there is a pending work.
1735 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1736 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1737 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1738 run_irq_work
= true;
1739 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1741 /* test the recovery flag before calling any SDIO functions */
1742 pending_recovery
= test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1746 wl1271_debug(DEBUG_MAC80211
,
1747 "run postponed irq_work directly");
1749 /* don't talk to the HW if recovery is pending */
1750 if (!pending_recovery
)
1753 wlcore_enable_interrupts(wl
);
1756 mutex_lock(&wl
->mutex
);
1757 if (pending_recovery
) {
1758 wl1271_warning("queuing forgotten recovery on resume");
1759 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
1763 wl12xx_for_each_wlvif(wl
, wlvif
) {
1764 wl1271_configure_resume(wl
, wlvif
);
1768 wl
->wow_enabled
= false;
1769 mutex_unlock(&wl
->mutex
);
1775 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1777 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1780 * We have to delay the booting of the hardware because
1781 * we need to know the local MAC address before downloading and
1782 * initializing the firmware. The MAC address cannot be changed
1783 * after boot, and without the proper MAC address, the firmware
1784 * will not function properly.
1786 * The MAC address is first known when the corresponding interface
1787 * is added. That is where we will initialize the hardware.
1793 static void wl1271_op_stop(struct ieee80211_hw
*hw
)
1795 struct wl1271
*wl
= hw
->priv
;
1798 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1801 * Interrupts must be disabled before setting the state to OFF.
1802 * Otherwise, the interrupt handler might be called and exit without
1803 * reading the interrupt status.
1805 wlcore_disable_interrupts(wl
);
1806 mutex_lock(&wl
->mutex
);
1807 if (wl
->state
== WL1271_STATE_OFF
) {
1808 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1810 wlcore_enable_interrupts(wl
);
1812 mutex_unlock(&wl
->mutex
);
1815 * This will not necessarily enable interrupts as interrupts
1816 * may have been disabled when op_stop was called. It will,
1817 * however, balance the above call to disable_interrupts().
1819 wlcore_enable_interrupts(wl
);
1824 * this must be before the cancel_work calls below, so that the work
1825 * functions don't perform further work.
1827 wl
->state
= WL1271_STATE_OFF
;
1828 mutex_unlock(&wl
->mutex
);
1830 wl1271_flush_deferred_work(wl
);
1831 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1832 cancel_work_sync(&wl
->netstack_work
);
1833 cancel_work_sync(&wl
->tx_work
);
1834 cancel_delayed_work_sync(&wl
->elp_work
);
1835 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1836 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1838 /* let's notify MAC80211 about the remaining pending TX frames */
1839 wl12xx_tx_reset(wl
);
1840 mutex_lock(&wl
->mutex
);
1842 wl1271_power_off(wl
);
1844 * In case a recovery was scheduled, interrupts were disabled to avoid
1845 * an interrupt storm. Now that the power is down, it is safe to
1846 * re-enable interrupts to balance the disable depth
1848 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1849 wlcore_enable_interrupts(wl
);
1851 wl
->band
= IEEE80211_BAND_2GHZ
;
1854 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1855 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1856 wl
->tx_blocks_available
= 0;
1857 wl
->tx_allocated_blocks
= 0;
1858 wl
->tx_results_count
= 0;
1859 wl
->tx_packets_count
= 0;
1860 wl
->time_offset
= 0;
1861 wl
->ap_fw_ps_map
= 0;
1863 wl
->sched_scanning
= false;
1864 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1865 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1866 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1867 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1868 wl
->active_sta_count
= 0;
1870 /* The system link is always allocated */
1871 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1874 * this is performed after the cancel_work calls and the associated
1875 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1876 * get executed before all these vars have been reset.
1880 wl
->tx_blocks_freed
= 0;
1882 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1883 wl
->tx_pkts_freed
[i
] = 0;
1884 wl
->tx_allocated_pkts
[i
] = 0;
1887 wl1271_debugfs_reset(wl
);
1889 kfree(wl
->fw_status_1
);
1890 wl
->fw_status_1
= NULL
;
1891 wl
->fw_status_2
= NULL
;
1892 kfree(wl
->tx_res_if
);
1893 wl
->tx_res_if
= NULL
;
1894 kfree(wl
->target_mem_map
);
1895 wl
->target_mem_map
= NULL
;
1897 mutex_unlock(&wl
->mutex
);
1900 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
1902 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
1903 WL12XX_MAX_RATE_POLICIES
);
1904 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
1907 __set_bit(policy
, wl
->rate_policies_map
);
1912 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
1914 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
1917 __clear_bit(*idx
, wl
->rate_policies_map
);
1918 *idx
= WL12XX_MAX_RATE_POLICIES
;
1921 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1923 switch (wlvif
->bss_type
) {
1924 case BSS_TYPE_AP_BSS
:
1926 return WL1271_ROLE_P2P_GO
;
1928 return WL1271_ROLE_AP
;
1930 case BSS_TYPE_STA_BSS
:
1932 return WL1271_ROLE_P2P_CL
;
1934 return WL1271_ROLE_STA
;
1937 return WL1271_ROLE_IBSS
;
1940 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
1942 return WL12XX_INVALID_ROLE_TYPE
;
1945 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
1947 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
1950 /* clear everything but the persistent data */
1951 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
1953 switch (ieee80211_vif_type_p2p(vif
)) {
1954 case NL80211_IFTYPE_P2P_CLIENT
:
1957 case NL80211_IFTYPE_STATION
:
1958 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
1960 case NL80211_IFTYPE_ADHOC
:
1961 wlvif
->bss_type
= BSS_TYPE_IBSS
;
1963 case NL80211_IFTYPE_P2P_GO
:
1966 case NL80211_IFTYPE_AP
:
1967 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
1970 wlvif
->bss_type
= MAX_BSS_TYPE
;
1974 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
1975 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
1976 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
1978 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
1979 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
1980 /* init sta/ibss data */
1981 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
1982 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
1983 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
1984 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
1985 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
1986 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
1987 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
1990 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
1991 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
1992 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
1993 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
1994 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
1995 wl12xx_allocate_rate_policy(wl
,
1996 &wlvif
->ap
.ucast_rate_idx
[i
]);
1997 wlvif
->basic_rate_set
= CONF_TX_AP_ENABLED_RATES
;
1999 * TODO: check if basic_rate shouldn't be
2000 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2001 * instead (the same thing for STA above).
2003 wlvif
->basic_rate
= CONF_TX_AP_ENABLED_RATES
;
2004 /* TODO: this seems to be used only for STA, check it */
2005 wlvif
->rate_set
= CONF_TX_AP_ENABLED_RATES
;
2008 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
2009 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
2010 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
2013 * mac80211 configures some values globally, while we treat them
2014 * per-interface. thus, on init, we have to copy them from wl
2016 wlvif
->band
= wl
->band
;
2017 wlvif
->channel
= wl
->channel
;
2018 wlvif
->power_level
= wl
->power_level
;
2019 wlvif
->channel_type
= wl
->channel_type
;
2021 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
2022 wl1271_rx_streaming_enable_work
);
2023 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
2024 wl1271_rx_streaming_disable_work
);
2025 INIT_LIST_HEAD(&wlvif
->list
);
2027 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
2028 (unsigned long) wlvif
);
2032 static bool wl12xx_init_fw(struct wl1271
*wl
)
2034 int retries
= WL1271_BOOT_RETRIES
;
2035 bool booted
= false;
2036 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
2041 ret
= wl12xx_chip_wakeup(wl
, false);
2045 ret
= wl
->ops
->boot(wl
);
2049 ret
= wl1271_hw_init(wl
);
2057 mutex_unlock(&wl
->mutex
);
2058 /* Unlocking the mutex in the middle of handling is
2059 inherently unsafe. In this case we deem it safe to do,
2060 because we need to let any possibly pending IRQ out of
2061 the system (and while we are WL1271_STATE_OFF the IRQ
2062 work function will not do anything.) Also, any other
2063 possible concurrent operations will fail due to the
2064 current state, hence the wl1271 struct should be safe. */
2065 wlcore_disable_interrupts(wl
);
2066 wl1271_flush_deferred_work(wl
);
2067 cancel_work_sync(&wl
->netstack_work
);
2068 mutex_lock(&wl
->mutex
);
2070 wl1271_power_off(wl
);
2074 wl1271_error("firmware boot failed despite %d retries",
2075 WL1271_BOOT_RETRIES
);
2079 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2081 /* update hw/fw version info in wiphy struct */
2082 wiphy
->hw_version
= wl
->chip
.id
;
2083 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2084 sizeof(wiphy
->fw_version
));
2087 * Now we know if 11a is supported (info from the NVS), so disable
2088 * 11a channels if not supported
2090 if (!wl
->enable_11a
)
2091 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2093 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2094 wl
->enable_11a
? "" : "not ");
2096 wl
->state
= WL1271_STATE_ON
;
2101 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2103 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2107 * Check whether a fw switch (i.e. moving from one loaded
2108 * fw to another) is needed. This function is also responsible
2109 * for updating wl->last_vif_count, so it must be called before
2110 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2113 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2114 struct vif_counter_data vif_counter_data
,
2117 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2118 u8 vif_count
= vif_counter_data
.counter
;
2120 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2123 /* increase the vif count if this is a new vif */
2124 if (add
&& !vif_counter_data
.cur_vif_running
)
2127 wl
->last_vif_count
= vif_count
;
2129 /* no need for fw change if the device is OFF */
2130 if (wl
->state
== WL1271_STATE_OFF
)
2133 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2135 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2142 * Enter "forced psm". Make sure the sta is in psm against the ap,
2143 * to make the fw switch a bit more disconnection-persistent.
2145 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2147 struct wl12xx_vif
*wlvif
;
2149 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2150 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2154 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2155 struct ieee80211_vif
*vif
)
2157 struct wl1271
*wl
= hw
->priv
;
2158 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2159 struct vif_counter_data vif_count
;
2162 bool booted
= false;
2164 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2165 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2167 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2168 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2170 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2172 mutex_lock(&wl
->mutex
);
2173 ret
= wl1271_ps_elp_wakeup(wl
);
2178 * in some very corner case HW recovery scenarios its possible to
2179 * get here before __wl1271_op_remove_interface is complete, so
2180 * opt out if that is the case.
2182 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2183 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2189 ret
= wl12xx_init_vif_data(wl
, vif
);
2194 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2195 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2200 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2201 wl12xx_force_active_psm(wl
);
2202 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2203 mutex_unlock(&wl
->mutex
);
2204 wl1271_recovery_work(&wl
->recovery_work
);
2209 * TODO: after the nvs issue will be solved, move this block
2210 * to start(), and make sure here the driver is ON.
2212 if (wl
->state
== WL1271_STATE_OFF
) {
2214 * we still need this in order to configure the fw
2215 * while uploading the nvs
2217 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2219 booted
= wl12xx_init_fw(wl
);
2226 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2227 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2229 * The device role is a special role used for
2230 * rx and tx frames prior to association (as
2231 * the STA role can get packets only from
2232 * its associated bssid)
2234 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2236 &wlvif
->dev_role_id
);
2241 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2242 role_type
, &wlvif
->role_id
);
2246 ret
= wl1271_init_vif_specific(wl
, vif
);
2250 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2251 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2253 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2258 wl1271_ps_elp_sleep(wl
);
2260 mutex_unlock(&wl
->mutex
);
2265 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2266 struct ieee80211_vif
*vif
,
2267 bool reset_tx_queues
)
2269 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2271 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2273 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2275 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2278 /* because of hardware recovery, we may get here twice */
2279 if (wl
->state
!= WL1271_STATE_ON
)
2282 wl1271_info("down");
2284 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2285 wl
->scan_vif
== vif
) {
2287 * Rearm the tx watchdog just before idling scan. This
2288 * prevents just-finished scans from triggering the watchdog
2290 wl12xx_rearm_tx_watchdog_locked(wl
);
2292 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2293 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2294 wl
->scan_vif
= NULL
;
2295 wl
->scan
.req
= NULL
;
2296 ieee80211_scan_completed(wl
->hw
, true);
2299 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2300 /* disable active roles */
2301 ret
= wl1271_ps_elp_wakeup(wl
);
2305 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2306 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2307 if (wl12xx_dev_role_started(wlvif
))
2308 wl12xx_stop_dev(wl
, wlvif
);
2310 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->dev_role_id
);
2315 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2319 wl1271_ps_elp_sleep(wl
);
2322 /* clear all hlids (except system_hlid) */
2323 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2325 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2326 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2327 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2328 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2329 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2330 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2332 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2333 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2334 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2335 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2336 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2337 wl12xx_free_rate_policy(wl
,
2338 &wlvif
->ap
.ucast_rate_idx
[i
]);
2339 wl1271_free_ap_keys(wl
, wlvif
);
2342 dev_kfree_skb(wlvif
->probereq
);
2343 wlvif
->probereq
= NULL
;
2344 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2345 if (wl
->last_wlvif
== wlvif
)
2346 wl
->last_wlvif
= NULL
;
2347 list_del(&wlvif
->list
);
2348 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2349 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2350 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2357 /* Last AP, have more stations. Configure according to STA. */
2358 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2359 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2360 /* Configure for power according to debugfs */
2361 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2362 wl1271_acx_sleep_auth(wl
, sta_auth
);
2363 /* Configure for power always on */
2364 else if (wl
->quirks
& WLCORE_QUIRK_NO_ELP
)
2365 wl1271_acx_sleep_auth(wl
, WL1271_PSM_CAM
);
2366 /* Configure for ELP power saving */
2368 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2371 mutex_unlock(&wl
->mutex
);
2373 del_timer_sync(&wlvif
->rx_streaming_timer
);
2374 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2375 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2377 mutex_lock(&wl
->mutex
);
2380 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2381 struct ieee80211_vif
*vif
)
2383 struct wl1271
*wl
= hw
->priv
;
2384 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2385 struct wl12xx_vif
*iter
;
2386 struct vif_counter_data vif_count
;
2387 bool cancel_recovery
= true;
2389 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2390 mutex_lock(&wl
->mutex
);
2392 if (wl
->state
== WL1271_STATE_OFF
||
2393 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2397 * wl->vif can be null here if someone shuts down the interface
2398 * just when hardware recovery has been started.
2400 wl12xx_for_each_wlvif(wl
, iter
) {
2404 __wl1271_op_remove_interface(wl
, vif
, true);
2407 WARN_ON(iter
!= wlvif
);
2408 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2409 wl12xx_force_active_psm(wl
);
2410 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2411 wl12xx_queue_recovery_work(wl
);
2412 cancel_recovery
= false;
2415 mutex_unlock(&wl
->mutex
);
2416 if (cancel_recovery
)
2417 cancel_work_sync(&wl
->recovery_work
);
2420 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2421 struct ieee80211_vif
*vif
,
2422 enum nl80211_iftype new_type
, bool p2p
)
2424 struct wl1271
*wl
= hw
->priv
;
2427 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2428 wl1271_op_remove_interface(hw
, vif
);
2430 vif
->type
= new_type
;
2432 ret
= wl1271_op_add_interface(hw
, vif
);
2434 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2438 static int wl1271_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2442 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2445 * One of the side effects of the JOIN command is that is clears
2446 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2447 * to a WPA/WPA2 access point will therefore kill the data-path.
2448 * Currently the only valid scenario for JOIN during association
2449 * is on roaming, in which case we will also be given new keys.
2450 * Keep the below message for now, unless it starts bothering
2451 * users who really like to roam a lot :)
2453 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2454 wl1271_info("JOIN while associated.");
2456 /* clear encryption type */
2457 wlvif
->encryption_type
= KEY_NONE
;
2460 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2463 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2465 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2469 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2473 * The join command disable the keep-alive mode, shut down its process,
2474 * and also clear the template config, so we need to reset it all after
2475 * the join. The acx_aid starts the keep-alive process, and the order
2476 * of the commands below is relevant.
2478 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2482 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2486 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2490 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2491 CMD_TEMPL_KLV_IDX_NULL_DATA
,
2492 ACX_KEEP_ALIVE_TPL_VALID
);
2500 static int wl1271_unjoin(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2504 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2505 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2507 wl12xx_cmd_stop_channel_switch(wl
);
2508 ieee80211_chswitch_done(vif
, false);
2511 /* to stop listening to a channel, we disconnect */
2512 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2516 /* reset TX security counters on a clean disconnect */
2517 wlvif
->tx_security_last_seq_lsb
= 0;
2518 wlvif
->tx_security_seq
= 0;
2524 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2526 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2527 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2530 static int wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2534 bool cur_idle
= !test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2536 if (idle
== cur_idle
)
2540 /* no need to croc if we weren't busy (e.g. during boot) */
2541 if (wl12xx_dev_role_started(wlvif
)) {
2542 ret
= wl12xx_stop_dev(wl
, wlvif
);
2547 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
2548 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2551 ret
= wl1271_acx_keep_alive_config(
2552 wl
, wlvif
, CMD_TEMPL_KLV_IDX_NULL_DATA
,
2553 ACX_KEEP_ALIVE_TPL_INVALID
);
2556 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2558 /* The current firmware only supports sched_scan in idle */
2559 if (wl
->sched_scanning
) {
2560 wl1271_scan_sched_scan_stop(wl
, wlvif
);
2561 ieee80211_sched_scan_stopped(wl
->hw
);
2564 ret
= wl12xx_start_dev(wl
, wlvif
);
2567 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2574 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2575 struct ieee80211_conf
*conf
, u32 changed
)
2577 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2580 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2582 /* if the channel changes while joined, join again */
2583 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
&&
2584 ((wlvif
->band
!= conf
->channel
->band
) ||
2585 (wlvif
->channel
!= channel
) ||
2586 (wlvif
->channel_type
!= conf
->channel_type
))) {
2587 /* send all pending packets */
2588 ret
= wlcore_tx_work_locked(wl
);
2592 wlvif
->band
= conf
->channel
->band
;
2593 wlvif
->channel
= channel
;
2594 wlvif
->channel_type
= conf
->channel_type
;
2597 wl1271_set_band_rate(wl
, wlvif
);
2598 ret
= wl1271_init_ap_rates(wl
, wlvif
);
2600 wl1271_error("AP rate policy change failed %d",
2604 * FIXME: the mac80211 should really provide a fixed
2605 * rate to use here. for now, just use the smallest
2606 * possible rate for the band as a fixed rate for
2607 * association frames and other control messages.
2609 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2610 wl1271_set_band_rate(wl
, wlvif
);
2613 wl1271_tx_min_rate_get(wl
,
2614 wlvif
->basic_rate_set
);
2615 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2617 wl1271_warning("rate policy for channel "
2621 * change the ROC channel. do it only if we are
2622 * not idle. otherwise, CROC will be called
2625 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
,
2627 wl12xx_dev_role_started(wlvif
) &&
2628 !(conf
->flags
& IEEE80211_CONF_IDLE
)) {
2629 ret
= wl12xx_stop_dev(wl
, wlvif
);
2633 ret
= wl12xx_start_dev(wl
, wlvif
);
2640 if ((changed
& IEEE80211_CONF_CHANGE_PS
) && !is_ap
) {
2642 if ((conf
->flags
& IEEE80211_CONF_PS
) &&
2643 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
2644 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2649 if (wl
->conf
.conn
.forced_ps
) {
2650 ps_mode
= STATION_POWER_SAVE_MODE
;
2651 ps_mode_str
= "forced";
2653 ps_mode
= STATION_AUTO_PS_MODE
;
2654 ps_mode_str
= "auto";
2657 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
2659 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
2662 wl1271_warning("enter %s ps failed %d",
2665 } else if (!(conf
->flags
& IEEE80211_CONF_PS
) &&
2666 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2668 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
2670 ret
= wl1271_ps_set_mode(wl
, wlvif
,
2671 STATION_ACTIVE_MODE
);
2673 wl1271_warning("exit auto ps failed %d", ret
);
2677 if (conf
->power_level
!= wlvif
->power_level
) {
2678 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2682 wlvif
->power_level
= conf
->power_level
;
2688 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2690 struct wl1271
*wl
= hw
->priv
;
2691 struct wl12xx_vif
*wlvif
;
2692 struct ieee80211_conf
*conf
= &hw
->conf
;
2693 int channel
, ret
= 0;
2695 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2697 wl1271_debug(DEBUG_MAC80211
, "mac80211 config ch %d psm %s power %d %s"
2700 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
2702 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
2706 * mac80211 will go to idle nearly immediately after transmitting some
2707 * frames, such as the deauth. To make sure those frames reach the air,
2708 * wait here until the TX queue is fully flushed.
2710 if ((changed
& IEEE80211_CONF_CHANGE_CHANNEL
) ||
2711 ((changed
& IEEE80211_CONF_CHANGE_IDLE
) &&
2712 (conf
->flags
& IEEE80211_CONF_IDLE
)))
2713 wl1271_tx_flush(wl
);
2715 mutex_lock(&wl
->mutex
);
2717 /* we support configuring the channel and band even while off */
2718 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
) {
2719 wl
->band
= conf
->channel
->band
;
2720 wl
->channel
= channel
;
2721 wl
->channel_type
= conf
->channel_type
;
2724 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
2725 wl
->power_level
= conf
->power_level
;
2727 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2730 ret
= wl1271_ps_elp_wakeup(wl
);
2734 /* configure each interface */
2735 wl12xx_for_each_wlvif(wl
, wlvif
) {
2736 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
2742 wl1271_ps_elp_sleep(wl
);
2745 mutex_unlock(&wl
->mutex
);
2750 struct wl1271_filter_params
{
2753 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
2756 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
2757 struct netdev_hw_addr_list
*mc_list
)
2759 struct wl1271_filter_params
*fp
;
2760 struct netdev_hw_addr
*ha
;
2761 struct wl1271
*wl
= hw
->priv
;
2763 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2766 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
2768 wl1271_error("Out of memory setting filters.");
2772 /* update multicast filtering parameters */
2773 fp
->mc_list_length
= 0;
2774 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
2775 fp
->enabled
= false;
2778 netdev_hw_addr_list_for_each(ha
, mc_list
) {
2779 memcpy(fp
->mc_list
[fp
->mc_list_length
],
2780 ha
->addr
, ETH_ALEN
);
2781 fp
->mc_list_length
++;
2785 return (u64
)(unsigned long)fp
;
2788 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2791 FIF_BCN_PRBRESP_PROMISC | \
2795 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
2796 unsigned int changed
,
2797 unsigned int *total
, u64 multicast
)
2799 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
2800 struct wl1271
*wl
= hw
->priv
;
2801 struct wl12xx_vif
*wlvif
;
2805 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
2806 " total %x", changed
, *total
);
2808 mutex_lock(&wl
->mutex
);
2810 *total
&= WL1271_SUPPORTED_FILTERS
;
2811 changed
&= WL1271_SUPPORTED_FILTERS
;
2813 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2816 ret
= wl1271_ps_elp_wakeup(wl
);
2820 wl12xx_for_each_wlvif(wl
, wlvif
) {
2821 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
2822 if (*total
& FIF_ALLMULTI
)
2823 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2827 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2830 fp
->mc_list_length
);
2837 * the fw doesn't provide an api to configure the filters. instead,
2838 * the filters configuration is based on the active roles / ROC
2843 wl1271_ps_elp_sleep(wl
);
2846 mutex_unlock(&wl
->mutex
);
2850 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2851 u8 id
, u8 key_type
, u8 key_size
,
2852 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
2855 struct wl1271_ap_key
*ap_key
;
2858 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
2860 if (key_size
> MAX_KEY_SIZE
)
2864 * Find next free entry in ap_keys. Also check we are not replacing
2867 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2868 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2871 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
2872 wl1271_warning("trying to record key replacement");
2877 if (i
== MAX_NUM_KEYS
)
2880 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
2885 ap_key
->key_type
= key_type
;
2886 ap_key
->key_size
= key_size
;
2887 memcpy(ap_key
->key
, key
, key_size
);
2888 ap_key
->hlid
= hlid
;
2889 ap_key
->tx_seq_32
= tx_seq_32
;
2890 ap_key
->tx_seq_16
= tx_seq_16
;
2892 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
2896 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2900 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2901 kfree(wlvif
->ap
.recorded_keys
[i
]);
2902 wlvif
->ap
.recorded_keys
[i
] = NULL
;
2906 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2909 struct wl1271_ap_key
*key
;
2910 bool wep_key_added
= false;
2912 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2914 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2917 key
= wlvif
->ap
.recorded_keys
[i
];
2919 if (hlid
== WL12XX_INVALID_LINK_ID
)
2920 hlid
= wlvif
->ap
.bcast_hlid
;
2922 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2923 key
->id
, key
->key_type
,
2924 key
->key_size
, key
->key
,
2925 hlid
, key
->tx_seq_32
,
2930 if (key
->key_type
== KEY_WEP
)
2931 wep_key_added
= true;
2934 if (wep_key_added
) {
2935 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
2936 wlvif
->ap
.bcast_hlid
);
2942 wl1271_free_ap_keys(wl
, wlvif
);
2946 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2947 u16 action
, u8 id
, u8 key_type
,
2948 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
2949 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
2952 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2955 struct wl1271_station
*wl_sta
;
2959 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
2960 hlid
= wl_sta
->hlid
;
2962 hlid
= wlvif
->ap
.bcast_hlid
;
2965 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
2967 * We do not support removing keys after AP shutdown.
2968 * Pretend we do to make mac80211 happy.
2970 if (action
!= KEY_ADD_OR_REPLACE
)
2973 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
2975 key
, hlid
, tx_seq_32
,
2978 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
2979 id
, key_type
, key_size
,
2980 key
, hlid
, tx_seq_32
,
2988 static const u8 bcast_addr
[ETH_ALEN
] = {
2989 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2992 addr
= sta
? sta
->addr
: bcast_addr
;
2994 if (is_zero_ether_addr(addr
)) {
2995 /* We dont support TX only encryption */
2999 /* The wl1271 does not allow to remove unicast keys - they
3000 will be cleared automatically on next CMD_JOIN. Ignore the
3001 request silently, as we dont want the mac80211 to emit
3002 an error message. */
3003 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
3006 /* don't remove key if hlid was already deleted */
3007 if (action
== KEY_REMOVE
&&
3008 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
3011 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
3012 id
, key_type
, key_size
,
3013 key
, addr
, tx_seq_32
,
3018 /* the default WEP key needs to be configured at least once */
3019 if (key_type
== KEY_WEP
) {
3020 ret
= wl12xx_cmd_set_default_wep_key(wl
,
3031 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
3032 struct ieee80211_vif
*vif
,
3033 struct ieee80211_sta
*sta
,
3034 struct ieee80211_key_conf
*key_conf
)
3036 struct wl1271
*wl
= hw
->priv
;
3038 return wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
3041 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
3042 struct ieee80211_vif
*vif
,
3043 struct ieee80211_sta
*sta
,
3044 struct ieee80211_key_conf
*key_conf
)
3046 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3052 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
3054 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
3055 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3056 key_conf
->cipher
, key_conf
->keyidx
,
3057 key_conf
->keylen
, key_conf
->flags
);
3058 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
3060 mutex_lock(&wl
->mutex
);
3062 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3067 ret
= wl1271_ps_elp_wakeup(wl
);
3071 switch (key_conf
->cipher
) {
3072 case WLAN_CIPHER_SUITE_WEP40
:
3073 case WLAN_CIPHER_SUITE_WEP104
:
3076 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3078 case WLAN_CIPHER_SUITE_TKIP
:
3079 key_type
= KEY_TKIP
;
3081 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3082 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3083 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3085 case WLAN_CIPHER_SUITE_CCMP
:
3088 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3089 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3090 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3092 case WL1271_CIPHER_SUITE_GEM
:
3094 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3095 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3098 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3106 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3107 key_conf
->keyidx
, key_type
,
3108 key_conf
->keylen
, key_conf
->key
,
3109 tx_seq_32
, tx_seq_16
, sta
);
3111 wl1271_error("Could not add or replace key");
3116 * reconfiguring arp response if the unicast (or common)
3117 * encryption key type was changed
3119 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3120 (sta
|| key_type
== KEY_WEP
) &&
3121 wlvif
->encryption_type
!= key_type
) {
3122 wlvif
->encryption_type
= key_type
;
3123 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3125 wl1271_warning("build arp rsp failed: %d", ret
);
3132 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3133 key_conf
->keyidx
, key_type
,
3134 key_conf
->keylen
, key_conf
->key
,
3137 wl1271_error("Could not remove key");
3143 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3149 wl1271_ps_elp_sleep(wl
);
3152 mutex_unlock(&wl
->mutex
);
3156 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3158 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3159 struct ieee80211_vif
*vif
,
3160 struct cfg80211_scan_request
*req
)
3162 struct wl1271
*wl
= hw
->priv
;
3167 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3170 ssid
= req
->ssids
[0].ssid
;
3171 len
= req
->ssids
[0].ssid_len
;
3174 mutex_lock(&wl
->mutex
);
3176 if (wl
->state
== WL1271_STATE_OFF
) {
3178 * We cannot return -EBUSY here because cfg80211 will expect
3179 * a call to ieee80211_scan_completed if we do - in this case
3180 * there won't be any call.
3186 ret
= wl1271_ps_elp_wakeup(wl
);
3190 /* fail if there is any role in ROC */
3191 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3192 /* don't allow scanning right now */
3197 ret
= wl1271_scan(hw
->priv
, vif
, ssid
, len
, req
);
3199 wl1271_ps_elp_sleep(wl
);
3201 mutex_unlock(&wl
->mutex
);
3206 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3207 struct ieee80211_vif
*vif
)
3209 struct wl1271
*wl
= hw
->priv
;
3212 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3214 mutex_lock(&wl
->mutex
);
3216 if (wl
->state
== WL1271_STATE_OFF
)
3219 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3222 ret
= wl1271_ps_elp_wakeup(wl
);
3226 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3227 ret
= wl1271_scan_stop(wl
);
3233 * Rearm the tx watchdog just before idling scan. This
3234 * prevents just-finished scans from triggering the watchdog
3236 wl12xx_rearm_tx_watchdog_locked(wl
);
3238 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3239 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3240 wl
->scan_vif
= NULL
;
3241 wl
->scan
.req
= NULL
;
3242 ieee80211_scan_completed(wl
->hw
, true);
3245 wl1271_ps_elp_sleep(wl
);
3247 mutex_unlock(&wl
->mutex
);
3249 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3252 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3253 struct ieee80211_vif
*vif
,
3254 struct cfg80211_sched_scan_request
*req
,
3255 struct ieee80211_sched_scan_ies
*ies
)
3257 struct wl1271
*wl
= hw
->priv
;
3258 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3261 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3263 mutex_lock(&wl
->mutex
);
3265 if (wl
->state
== WL1271_STATE_OFF
) {
3270 ret
= wl1271_ps_elp_wakeup(wl
);
3274 ret
= wl1271_scan_sched_scan_config(wl
, wlvif
, req
, ies
);
3278 ret
= wl1271_scan_sched_scan_start(wl
, wlvif
);
3282 wl
->sched_scanning
= true;
3285 wl1271_ps_elp_sleep(wl
);
3287 mutex_unlock(&wl
->mutex
);
3291 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3292 struct ieee80211_vif
*vif
)
3294 struct wl1271
*wl
= hw
->priv
;
3295 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3298 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3300 mutex_lock(&wl
->mutex
);
3302 if (wl
->state
== WL1271_STATE_OFF
)
3305 ret
= wl1271_ps_elp_wakeup(wl
);
3309 wl1271_scan_sched_scan_stop(wl
, wlvif
);
3311 wl1271_ps_elp_sleep(wl
);
3313 mutex_unlock(&wl
->mutex
);
3316 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3318 struct wl1271
*wl
= hw
->priv
;
3321 mutex_lock(&wl
->mutex
);
3323 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3328 ret
= wl1271_ps_elp_wakeup(wl
);
3332 ret
= wl1271_acx_frag_threshold(wl
, value
);
3334 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3336 wl1271_ps_elp_sleep(wl
);
3339 mutex_unlock(&wl
->mutex
);
3344 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3346 struct wl1271
*wl
= hw
->priv
;
3347 struct wl12xx_vif
*wlvif
;
3350 mutex_lock(&wl
->mutex
);
3352 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3357 ret
= wl1271_ps_elp_wakeup(wl
);
3361 wl12xx_for_each_wlvif(wl
, wlvif
) {
3362 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3364 wl1271_warning("set rts threshold failed: %d", ret
);
3366 wl1271_ps_elp_sleep(wl
);
3369 mutex_unlock(&wl
->mutex
);
3374 static int wl1271_ssid_set(struct ieee80211_vif
*vif
, struct sk_buff
*skb
,
3377 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3379 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
3383 wl1271_error("No SSID in IEs!");
3388 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
3389 wl1271_error("SSID is too long!");
3393 wlvif
->ssid_len
= ssid_len
;
3394 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
3398 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3401 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3402 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3403 skb
->len
- ieoffset
);
3408 memmove(ie
, next
, end
- next
);
3409 skb_trim(skb
, skb
->len
- len
);
3412 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3413 unsigned int oui
, u8 oui_type
,
3417 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3418 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3419 skb
->data
+ ieoffset
,
3420 skb
->len
- ieoffset
);
3425 memmove(ie
, next
, end
- next
);
3426 skb_trim(skb
, skb
->len
- len
);
3429 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3430 struct ieee80211_vif
*vif
)
3432 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3433 struct sk_buff
*skb
;
3436 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3440 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3441 CMD_TEMPL_AP_PROBE_RESPONSE
,
3450 wl1271_debug(DEBUG_AP
, "probe response updated");
3451 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3457 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3458 struct ieee80211_vif
*vif
,
3460 size_t probe_rsp_len
,
3463 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3464 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3465 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3466 int ssid_ie_offset
, ie_offset
, templ_len
;
3469 /* no need to change probe response if the SSID is set correctly */
3470 if (wlvif
->ssid_len
> 0)
3471 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3472 CMD_TEMPL_AP_PROBE_RESPONSE
,
3477 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3478 wl1271_error("probe_rsp template too big");
3482 /* start searching from IE offset */
3483 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3485 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3486 probe_rsp_len
- ie_offset
);
3488 wl1271_error("No SSID in beacon!");
3492 ssid_ie_offset
= ptr
- probe_rsp_data
;
3493 ptr
+= (ptr
[1] + 2);
3495 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3497 /* insert SSID from bss_conf */
3498 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3499 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3500 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3501 bss_conf
->ssid
, bss_conf
->ssid_len
);
3502 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3504 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3505 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3506 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3508 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3509 CMD_TEMPL_AP_PROBE_RESPONSE
,
3515 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3516 struct ieee80211_vif
*vif
,
3517 struct ieee80211_bss_conf
*bss_conf
,
3520 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3523 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3524 if (bss_conf
->use_short_slot
)
3525 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3527 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3529 wl1271_warning("Set slot time failed %d", ret
);
3534 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3535 if (bss_conf
->use_short_preamble
)
3536 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3538 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3541 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3542 if (bss_conf
->use_cts_prot
)
3543 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3546 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3547 CTSPROTECT_DISABLE
);
3549 wl1271_warning("Set ctsprotect failed %d", ret
);
3558 static int wlcore_set_beacon_template(struct wl1271
*wl
,
3559 struct ieee80211_vif
*vif
,
3562 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3563 struct ieee80211_hdr
*hdr
;
3566 int ieoffset
= offsetof(struct ieee80211_mgmt
,
3568 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3576 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3578 ret
= wl1271_ssid_set(vif
, beacon
, ieoffset
);
3580 dev_kfree_skb(beacon
);
3583 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3584 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3586 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3591 dev_kfree_skb(beacon
);
3596 * In case we already have a probe-resp beacon set explicitly
3597 * by usermode, don't use the beacon data.
3599 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3602 /* remove TIM ie from probe response */
3603 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3606 * remove p2p ie from probe response.
3607 * the fw reponds to probe requests that don't include
3608 * the p2p ie. probe requests with p2p ie will be passed,
3609 * and will be responded by the supplicant (the spec
3610 * forbids including the p2p ie when responding to probe
3611 * requests that didn't include it).
3613 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3614 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3616 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3617 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3618 IEEE80211_STYPE_PROBE_RESP
);
3620 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3625 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3626 CMD_TEMPL_PROBE_RESPONSE
,
3631 dev_kfree_skb(beacon
);
3639 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3640 struct ieee80211_vif
*vif
,
3641 struct ieee80211_bss_conf
*bss_conf
,
3644 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3645 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3648 if ((changed
& BSS_CHANGED_BEACON_INT
)) {
3649 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3650 bss_conf
->beacon_int
);
3652 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3655 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3656 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3658 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
3661 if ((changed
& BSS_CHANGED_BEACON
)) {
3662 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
3669 wl1271_error("beacon info change failed: %d", ret
);
3673 /* AP mode changes */
3674 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
3675 struct ieee80211_vif
*vif
,
3676 struct ieee80211_bss_conf
*bss_conf
,
3679 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3682 if ((changed
& BSS_CHANGED_BASIC_RATES
)) {
3683 u32 rates
= bss_conf
->basic_rates
;
3685 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
3687 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
3688 wlvif
->basic_rate_set
);
3690 ret
= wl1271_init_ap_rates(wl
, wlvif
);
3692 wl1271_error("AP rate policy change failed %d", ret
);
3696 ret
= wl1271_ap_init_templates(wl
, vif
);
3700 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
3704 ret
= wlcore_set_beacon_template(wl
, vif
, true);
3709 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
3713 if ((changed
& BSS_CHANGED_BEACON_ENABLED
)) {
3714 if (bss_conf
->enable_beacon
) {
3715 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3716 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
3720 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
3724 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3725 wl1271_debug(DEBUG_AP
, "started AP");
3728 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3729 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
3733 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3734 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
3736 wl1271_debug(DEBUG_AP
, "stopped AP");
3741 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3745 /* Handle HT information change */
3746 if ((changed
& BSS_CHANGED_HT
) &&
3747 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3748 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3749 bss_conf
->ht_operation_mode
);
3751 wl1271_warning("Set ht information failed %d", ret
);
3760 /* STA/IBSS mode changes */
3761 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
3762 struct ieee80211_vif
*vif
,
3763 struct ieee80211_bss_conf
*bss_conf
,
3766 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3767 bool do_join
= false, set_assoc
= false;
3768 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
3769 bool ibss_joined
= false;
3770 u32 sta_rate_set
= 0;
3772 struct ieee80211_sta
*sta
;
3773 bool sta_exists
= false;
3774 struct ieee80211_sta_ht_cap sta_ht_cap
;
3777 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
3783 if (changed
& BSS_CHANGED_IBSS
) {
3784 if (bss_conf
->ibss_joined
) {
3785 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
3788 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
,
3790 wl1271_unjoin(wl
, wlvif
);
3794 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
3797 /* Need to update the SSID (for filtering etc) */
3798 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
3801 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
3802 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
3803 bss_conf
->enable_beacon
? "enabled" : "disabled");
3808 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
) {
3809 ret
= wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
3811 wl1271_warning("idle mode change failed %d", ret
);
3814 if ((changed
& BSS_CHANGED_CQM
)) {
3815 bool enable
= false;
3816 if (bss_conf
->cqm_rssi_thold
)
3818 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
3819 bss_conf
->cqm_rssi_thold
,
3820 bss_conf
->cqm_rssi_hyst
);
3823 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
3826 if (changed
& BSS_CHANGED_BSSID
)
3827 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
3828 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
3832 ret
= wl1271_build_qos_null_data(wl
, vif
);
3837 if (changed
& (BSS_CHANGED_ASSOC
| BSS_CHANGED_HT
)) {
3839 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
3843 /* save the supp_rates of the ap */
3844 sta_rate_set
= sta
->supp_rates
[wl
->hw
->conf
.channel
->band
];
3845 if (sta
->ht_cap
.ht_supported
)
3847 (sta
->ht_cap
.mcs
.rx_mask
[0] << HW_HT_RATES_OFFSET
) |
3848 (sta
->ht_cap
.mcs
.rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
3849 sta_ht_cap
= sta
->ht_cap
;
3856 if ((changed
& BSS_CHANGED_ASSOC
)) {
3857 if (bss_conf
->assoc
) {
3860 wlvif
->aid
= bss_conf
->aid
;
3861 wlvif
->channel_type
= bss_conf
->channel_type
;
3862 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3867 * use basic rates from AP, and determine lowest rate
3868 * to use with control frames.
3870 rates
= bss_conf
->basic_rates
;
3871 wlvif
->basic_rate_set
=
3872 wl1271_tx_enabled_rates_get(wl
, rates
,
3875 wl1271_tx_min_rate_get(wl
,
3876 wlvif
->basic_rate_set
);
3879 wl1271_tx_enabled_rates_get(wl
,
3882 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3887 * with wl1271, we don't need to update the
3888 * beacon_int and dtim_period, because the firmware
3889 * updates it by itself when the first beacon is
3890 * received after a join.
3892 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
3897 * Get a template for hardware connection maintenance
3899 dev_kfree_skb(wlvif
->probereq
);
3900 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
3903 ieoffset
= offsetof(struct ieee80211_mgmt
,
3904 u
.probe_req
.variable
);
3905 wl1271_ssid_set(vif
, wlvif
->probereq
, ieoffset
);
3907 /* enable the connection monitoring feature */
3908 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
3912 /* use defaults when not associated */
3914 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
,
3917 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT
,
3921 /* free probe-request template */
3922 dev_kfree_skb(wlvif
->probereq
);
3923 wlvif
->probereq
= NULL
;
3925 /* revert back to minimum rates for the current band */
3926 wl1271_set_band_rate(wl
, wlvif
);
3928 wl1271_tx_min_rate_get(wl
,
3929 wlvif
->basic_rate_set
);
3930 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3934 /* disable connection monitor features */
3935 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
3937 /* Disable the keep-alive feature */
3938 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
3942 /* restore the bssid filter and go to dummy bssid */
3945 * we might have to disable roc, if there was
3946 * no IF_OPER_UP notification.
3949 ret
= wl12xx_croc(wl
, wlvif
->role_id
);
3954 * (we also need to disable roc in case of
3955 * roaming on the same channel. until we will
3956 * have a better flow...)
3958 if (test_bit(wlvif
->dev_role_id
, wl
->roc_map
)) {
3959 ret
= wl12xx_croc(wl
,
3960 wlvif
->dev_role_id
);
3965 wl1271_unjoin(wl
, wlvif
);
3966 if (!bss_conf
->idle
)
3967 wl12xx_start_dev(wl
, wlvif
);
3972 if (changed
& BSS_CHANGED_IBSS
) {
3973 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
3974 bss_conf
->ibss_joined
);
3976 if (bss_conf
->ibss_joined
) {
3977 u32 rates
= bss_conf
->basic_rates
;
3978 wlvif
->basic_rate_set
=
3979 wl1271_tx_enabled_rates_get(wl
, rates
,
3982 wl1271_tx_min_rate_get(wl
,
3983 wlvif
->basic_rate_set
);
3985 /* by default, use 11b + OFDM rates */
3986 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
3987 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3993 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3998 ret
= wl1271_join(wl
, wlvif
, set_assoc
);
4000 wl1271_warning("cmd join failed %d", ret
);
4004 /* ROC until connected (after EAPOL exchange) */
4006 ret
= wl12xx_roc(wl
, wlvif
, wlvif
->role_id
);
4010 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
4011 wl12xx_set_authorized(wl
, wlvif
);
4014 * stop device role if started (we might already be in
4017 if (wl12xx_dev_role_started(wlvif
)) {
4018 ret
= wl12xx_stop_dev(wl
, wlvif
);
4024 /* Handle new association with HT. Do this after join. */
4026 if ((changed
& BSS_CHANGED_HT
) &&
4027 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
4028 ret
= wl1271_acx_set_ht_capabilities(wl
,
4033 wl1271_warning("Set ht cap true failed %d",
4038 /* handle new association without HT and disassociation */
4039 else if (changed
& BSS_CHANGED_ASSOC
) {
4040 ret
= wl1271_acx_set_ht_capabilities(wl
,
4045 wl1271_warning("Set ht cap false failed %d",
4052 /* Handle HT information change. Done after join. */
4053 if ((changed
& BSS_CHANGED_HT
) &&
4054 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
4055 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4056 bss_conf
->ht_operation_mode
);
4058 wl1271_warning("Set ht information failed %d", ret
);
4063 /* Handle arp filtering. Done after join. */
4064 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
4065 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
4066 __be32 addr
= bss_conf
->arp_addr_list
[0];
4067 wlvif
->sta
.qos
= bss_conf
->qos
;
4068 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
4070 if (bss_conf
->arp_addr_cnt
== 1 &&
4071 bss_conf
->arp_filter_enabled
) {
4072 wlvif
->ip_addr
= addr
;
4074 * The template should have been configured only upon
4075 * association. however, it seems that the correct ip
4076 * isn't being set (when sending), so we have to
4077 * reconfigure the template upon every ip change.
4079 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4081 wl1271_warning("build arp rsp failed: %d", ret
);
4085 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4086 (ACX_ARP_FILTER_ARP_FILTERING
|
4087 ACX_ARP_FILTER_AUTO_ARP
),
4091 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4102 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4103 struct ieee80211_vif
*vif
,
4104 struct ieee80211_bss_conf
*bss_conf
,
4107 struct wl1271
*wl
= hw
->priv
;
4108 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4109 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4112 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info changed 0x%x",
4116 * make sure to cancel pending disconnections if our association
4119 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4120 cancel_delayed_work_sync(&wl
->connection_loss_work
);
4122 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4123 !bss_conf
->enable_beacon
)
4124 wl1271_tx_flush(wl
);
4126 mutex_lock(&wl
->mutex
);
4128 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4131 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4134 ret
= wl1271_ps_elp_wakeup(wl
);
4139 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4141 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4143 wl1271_ps_elp_sleep(wl
);
4146 mutex_unlock(&wl
->mutex
);
4149 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4150 struct ieee80211_vif
*vif
, u16 queue
,
4151 const struct ieee80211_tx_queue_params
*params
)
4153 struct wl1271
*wl
= hw
->priv
;
4154 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4158 mutex_lock(&wl
->mutex
);
4160 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4163 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4165 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4167 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4170 ret
= wl1271_ps_elp_wakeup(wl
);
4175 * the txop is confed in units of 32us by the mac80211,
4178 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4179 params
->cw_min
, params
->cw_max
,
4180 params
->aifs
, params
->txop
<< 5);
4184 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4185 CONF_CHANNEL_TYPE_EDCF
,
4186 wl1271_tx_get_queue(queue
),
4187 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4191 wl1271_ps_elp_sleep(wl
);
4194 mutex_unlock(&wl
->mutex
);
4199 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4200 struct ieee80211_vif
*vif
)
4203 struct wl1271
*wl
= hw
->priv
;
4204 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4205 u64 mactime
= ULLONG_MAX
;
4208 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4210 mutex_lock(&wl
->mutex
);
4212 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4215 ret
= wl1271_ps_elp_wakeup(wl
);
4219 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4224 wl1271_ps_elp_sleep(wl
);
4227 mutex_unlock(&wl
->mutex
);
4231 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4232 struct survey_info
*survey
)
4234 struct ieee80211_conf
*conf
= &hw
->conf
;
4239 survey
->channel
= conf
->channel
;
4244 static int wl1271_allocate_sta(struct wl1271
*wl
,
4245 struct wl12xx_vif
*wlvif
,
4246 struct ieee80211_sta
*sta
)
4248 struct wl1271_station
*wl_sta
;
4252 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4253 wl1271_warning("could not allocate HLID - too much stations");
4257 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4258 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4260 wl1271_warning("could not allocate HLID - too many links");
4264 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4265 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4266 wl
->active_sta_count
++;
4270 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4272 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4275 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4276 memset(wl
->links
[hlid
].addr
, 0, ETH_ALEN
);
4277 wl
->links
[hlid
].ba_bitmap
= 0;
4278 __clear_bit(hlid
, &wl
->ap_ps_map
);
4279 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4280 wl12xx_free_link(wl
, wlvif
, &hlid
);
4281 wl
->active_sta_count
--;
4284 * rearm the tx watchdog when the last STA is freed - give the FW a
4285 * chance to return STA-buffered packets before complaining.
4287 if (wl
->active_sta_count
== 0)
4288 wl12xx_rearm_tx_watchdog_locked(wl
);
4291 static int wl12xx_sta_add(struct wl1271
*wl
,
4292 struct wl12xx_vif
*wlvif
,
4293 struct ieee80211_sta
*sta
)
4295 struct wl1271_station
*wl_sta
;
4299 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4301 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4305 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4306 hlid
= wl_sta
->hlid
;
4308 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4310 wl1271_free_sta(wl
, wlvif
, hlid
);
4315 static int wl12xx_sta_remove(struct wl1271
*wl
,
4316 struct wl12xx_vif
*wlvif
,
4317 struct ieee80211_sta
*sta
)
4319 struct wl1271_station
*wl_sta
;
4322 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4324 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4326 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4329 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4333 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4337 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4338 struct wl12xx_vif
*wlvif
,
4339 struct ieee80211_sta
*sta
,
4340 enum ieee80211_sta_state old_state
,
4341 enum ieee80211_sta_state new_state
)
4343 struct wl1271_station
*wl_sta
;
4345 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4346 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4349 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4350 hlid
= wl_sta
->hlid
;
4352 /* Add station (AP mode) */
4354 old_state
== IEEE80211_STA_NOTEXIST
&&
4355 new_state
== IEEE80211_STA_NONE
)
4356 return wl12xx_sta_add(wl
, wlvif
, sta
);
4358 /* Remove station (AP mode) */
4360 old_state
== IEEE80211_STA_NONE
&&
4361 new_state
== IEEE80211_STA_NOTEXIST
) {
4363 wl12xx_sta_remove(wl
, wlvif
, sta
);
4367 /* Authorize station (AP mode) */
4369 new_state
== IEEE80211_STA_AUTHORIZED
) {
4370 ret
= wl12xx_cmd_set_peer_state(wl
, hlid
);
4374 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4379 /* Authorize station */
4381 new_state
== IEEE80211_STA_AUTHORIZED
) {
4382 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4383 return wl12xx_set_authorized(wl
, wlvif
);
4387 old_state
== IEEE80211_STA_AUTHORIZED
&&
4388 new_state
== IEEE80211_STA_ASSOC
) {
4389 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4396 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4397 struct ieee80211_vif
*vif
,
4398 struct ieee80211_sta
*sta
,
4399 enum ieee80211_sta_state old_state
,
4400 enum ieee80211_sta_state new_state
)
4402 struct wl1271
*wl
= hw
->priv
;
4403 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4406 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4407 sta
->aid
, old_state
, new_state
);
4409 mutex_lock(&wl
->mutex
);
4411 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4416 ret
= wl1271_ps_elp_wakeup(wl
);
4420 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4422 wl1271_ps_elp_sleep(wl
);
4424 mutex_unlock(&wl
->mutex
);
4425 if (new_state
< old_state
)
4430 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4431 struct ieee80211_vif
*vif
,
4432 enum ieee80211_ampdu_mlme_action action
,
4433 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4436 struct wl1271
*wl
= hw
->priv
;
4437 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4439 u8 hlid
, *ba_bitmap
;
4441 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4444 /* sanity check - the fields in FW are only 8bits wide */
4445 if (WARN_ON(tid
> 0xFF))
4448 mutex_lock(&wl
->mutex
);
4450 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4455 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4456 hlid
= wlvif
->sta
.hlid
;
4457 ba_bitmap
= &wlvif
->sta
.ba_rx_bitmap
;
4458 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4459 struct wl1271_station
*wl_sta
;
4461 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4462 hlid
= wl_sta
->hlid
;
4463 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4469 ret
= wl1271_ps_elp_wakeup(wl
);
4473 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4477 case IEEE80211_AMPDU_RX_START
:
4478 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4483 if (wl
->ba_rx_session_count
>= RX_BA_MAX_SESSIONS
) {
4485 wl1271_error("exceeded max RX BA sessions");
4489 if (*ba_bitmap
& BIT(tid
)) {
4491 wl1271_error("cannot enable RX BA session on active "
4496 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
4499 *ba_bitmap
|= BIT(tid
);
4500 wl
->ba_rx_session_count
++;
4504 case IEEE80211_AMPDU_RX_STOP
:
4505 if (!(*ba_bitmap
& BIT(tid
))) {
4507 * this happens on reconfig - so only output a debug
4508 * message for now, and don't fail the function.
4510 wl1271_debug(DEBUG_MAC80211
,
4511 "no active RX BA session on tid: %d",
4517 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
4520 *ba_bitmap
&= ~BIT(tid
);
4521 wl
->ba_rx_session_count
--;
4526 * The BA initiator session management in FW independently.
4527 * Falling break here on purpose for all TX APDU commands.
4529 case IEEE80211_AMPDU_TX_START
:
4530 case IEEE80211_AMPDU_TX_STOP
:
4531 case IEEE80211_AMPDU_TX_OPERATIONAL
:
4536 wl1271_error("Incorrect ampdu action id=%x\n", action
);
4540 wl1271_ps_elp_sleep(wl
);
4543 mutex_unlock(&wl
->mutex
);
4548 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
4549 struct ieee80211_vif
*vif
,
4550 const struct cfg80211_bitrate_mask
*mask
)
4552 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4553 struct wl1271
*wl
= hw
->priv
;
4556 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
4557 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
4558 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
4560 mutex_lock(&wl
->mutex
);
4562 for (i
= 0; i
< IEEE80211_NUM_BANDS
; i
++)
4563 wlvif
->bitrate_masks
[i
] =
4564 wl1271_tx_enabled_rates_get(wl
,
4565 mask
->control
[i
].legacy
,
4568 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4571 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4572 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
4574 ret
= wl1271_ps_elp_wakeup(wl
);
4578 wl1271_set_band_rate(wl
, wlvif
);
4580 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4581 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4583 wl1271_ps_elp_sleep(wl
);
4586 mutex_unlock(&wl
->mutex
);
4591 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
4592 struct ieee80211_channel_switch
*ch_switch
)
4594 struct wl1271
*wl
= hw
->priv
;
4595 struct wl12xx_vif
*wlvif
;
4598 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
4600 wl1271_tx_flush(wl
);
4602 mutex_lock(&wl
->mutex
);
4604 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4605 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4606 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4607 ieee80211_chswitch_done(vif
, false);
4612 ret
= wl1271_ps_elp_wakeup(wl
);
4616 /* TODO: change mac80211 to pass vif as param */
4617 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4618 ret
= wl12xx_cmd_channel_switch(wl
, wlvif
, ch_switch
);
4621 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
4624 wl1271_ps_elp_sleep(wl
);
4627 mutex_unlock(&wl
->mutex
);
4630 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
4632 struct wl1271
*wl
= hw
->priv
;
4635 mutex_lock(&wl
->mutex
);
4637 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4640 /* packets are considered pending if in the TX queue or the FW */
4641 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
4643 mutex_unlock(&wl
->mutex
);
4648 /* can't be const, mac80211 writes to this */
4649 static struct ieee80211_rate wl1271_rates
[] = {
4651 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
4652 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
4654 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
4655 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
4656 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4658 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
4659 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
4660 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4662 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
4663 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
4664 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4666 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4667 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4669 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4670 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4672 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4673 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4675 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4676 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4678 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4679 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4681 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4682 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4684 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4685 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4687 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4688 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4691 /* can't be const, mac80211 writes to this */
4692 static struct ieee80211_channel wl1271_channels
[] = {
4693 { .hw_value
= 1, .center_freq
= 2412, .max_power
= 25 },
4694 { .hw_value
= 2, .center_freq
= 2417, .max_power
= 25 },
4695 { .hw_value
= 3, .center_freq
= 2422, .max_power
= 25 },
4696 { .hw_value
= 4, .center_freq
= 2427, .max_power
= 25 },
4697 { .hw_value
= 5, .center_freq
= 2432, .max_power
= 25 },
4698 { .hw_value
= 6, .center_freq
= 2437, .max_power
= 25 },
4699 { .hw_value
= 7, .center_freq
= 2442, .max_power
= 25 },
4700 { .hw_value
= 8, .center_freq
= 2447, .max_power
= 25 },
4701 { .hw_value
= 9, .center_freq
= 2452, .max_power
= 25 },
4702 { .hw_value
= 10, .center_freq
= 2457, .max_power
= 25 },
4703 { .hw_value
= 11, .center_freq
= 2462, .max_power
= 25 },
4704 { .hw_value
= 12, .center_freq
= 2467, .max_power
= 25 },
4705 { .hw_value
= 13, .center_freq
= 2472, .max_power
= 25 },
4706 { .hw_value
= 14, .center_freq
= 2484, .max_power
= 25 },
4709 /* can't be const, mac80211 writes to this */
4710 static struct ieee80211_supported_band wl1271_band_2ghz
= {
4711 .channels
= wl1271_channels
,
4712 .n_channels
= ARRAY_SIZE(wl1271_channels
),
4713 .bitrates
= wl1271_rates
,
4714 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
4717 /* 5 GHz data rates for WL1273 */
4718 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
4720 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4721 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4723 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4724 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4726 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4727 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4729 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4730 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4732 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4733 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4735 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4736 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4738 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4739 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4741 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4742 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4745 /* 5 GHz band channels for WL1273 */
4746 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
4747 { .hw_value
= 7, .center_freq
= 5035, .max_power
= 25 },
4748 { .hw_value
= 8, .center_freq
= 5040, .max_power
= 25 },
4749 { .hw_value
= 9, .center_freq
= 5045, .max_power
= 25 },
4750 { .hw_value
= 11, .center_freq
= 5055, .max_power
= 25 },
4751 { .hw_value
= 12, .center_freq
= 5060, .max_power
= 25 },
4752 { .hw_value
= 16, .center_freq
= 5080, .max_power
= 25 },
4753 { .hw_value
= 34, .center_freq
= 5170, .max_power
= 25 },
4754 { .hw_value
= 36, .center_freq
= 5180, .max_power
= 25 },
4755 { .hw_value
= 38, .center_freq
= 5190, .max_power
= 25 },
4756 { .hw_value
= 40, .center_freq
= 5200, .max_power
= 25 },
4757 { .hw_value
= 42, .center_freq
= 5210, .max_power
= 25 },
4758 { .hw_value
= 44, .center_freq
= 5220, .max_power
= 25 },
4759 { .hw_value
= 46, .center_freq
= 5230, .max_power
= 25 },
4760 { .hw_value
= 48, .center_freq
= 5240, .max_power
= 25 },
4761 { .hw_value
= 52, .center_freq
= 5260, .max_power
= 25 },
4762 { .hw_value
= 56, .center_freq
= 5280, .max_power
= 25 },
4763 { .hw_value
= 60, .center_freq
= 5300, .max_power
= 25 },
4764 { .hw_value
= 64, .center_freq
= 5320, .max_power
= 25 },
4765 { .hw_value
= 100, .center_freq
= 5500, .max_power
= 25 },
4766 { .hw_value
= 104, .center_freq
= 5520, .max_power
= 25 },
4767 { .hw_value
= 108, .center_freq
= 5540, .max_power
= 25 },
4768 { .hw_value
= 112, .center_freq
= 5560, .max_power
= 25 },
4769 { .hw_value
= 116, .center_freq
= 5580, .max_power
= 25 },
4770 { .hw_value
= 120, .center_freq
= 5600, .max_power
= 25 },
4771 { .hw_value
= 124, .center_freq
= 5620, .max_power
= 25 },
4772 { .hw_value
= 128, .center_freq
= 5640, .max_power
= 25 },
4773 { .hw_value
= 132, .center_freq
= 5660, .max_power
= 25 },
4774 { .hw_value
= 136, .center_freq
= 5680, .max_power
= 25 },
4775 { .hw_value
= 140, .center_freq
= 5700, .max_power
= 25 },
4776 { .hw_value
= 149, .center_freq
= 5745, .max_power
= 25 },
4777 { .hw_value
= 153, .center_freq
= 5765, .max_power
= 25 },
4778 { .hw_value
= 157, .center_freq
= 5785, .max_power
= 25 },
4779 { .hw_value
= 161, .center_freq
= 5805, .max_power
= 25 },
4780 { .hw_value
= 165, .center_freq
= 5825, .max_power
= 25 },
4783 static struct ieee80211_supported_band wl1271_band_5ghz
= {
4784 .channels
= wl1271_channels_5ghz
,
4785 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
4786 .bitrates
= wl1271_rates_5ghz
,
4787 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
4790 static const struct ieee80211_ops wl1271_ops
= {
4791 .start
= wl1271_op_start
,
4792 .stop
= wl1271_op_stop
,
4793 .add_interface
= wl1271_op_add_interface
,
4794 .remove_interface
= wl1271_op_remove_interface
,
4795 .change_interface
= wl12xx_op_change_interface
,
4797 .suspend
= wl1271_op_suspend
,
4798 .resume
= wl1271_op_resume
,
4800 .config
= wl1271_op_config
,
4801 .prepare_multicast
= wl1271_op_prepare_multicast
,
4802 .configure_filter
= wl1271_op_configure_filter
,
4804 .set_key
= wlcore_op_set_key
,
4805 .hw_scan
= wl1271_op_hw_scan
,
4806 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
4807 .sched_scan_start
= wl1271_op_sched_scan_start
,
4808 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
4809 .bss_info_changed
= wl1271_op_bss_info_changed
,
4810 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
4811 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
4812 .conf_tx
= wl1271_op_conf_tx
,
4813 .get_tsf
= wl1271_op_get_tsf
,
4814 .get_survey
= wl1271_op_get_survey
,
4815 .sta_state
= wl12xx_op_sta_state
,
4816 .ampdu_action
= wl1271_op_ampdu_action
,
4817 .tx_frames_pending
= wl1271_tx_frames_pending
,
4818 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
4819 .channel_switch
= wl12xx_op_channel_switch
,
4820 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
4824 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
4830 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
4831 wl1271_error("Illegal RX rate from HW: %d", rate
);
4835 idx
= wl
->band_rate_to_idx
[band
][rate
];
4836 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
4837 wl1271_error("Unsupported RX rate from HW: %d", rate
);
4844 static ssize_t
wl1271_sysfs_show_bt_coex_state(struct device
*dev
,
4845 struct device_attribute
*attr
,
4848 struct wl1271
*wl
= dev_get_drvdata(dev
);
4853 mutex_lock(&wl
->mutex
);
4854 len
= snprintf(buf
, len
, "%d\n\n0 - off\n1 - on\n",
4856 mutex_unlock(&wl
->mutex
);
4862 static ssize_t
wl1271_sysfs_store_bt_coex_state(struct device
*dev
,
4863 struct device_attribute
*attr
,
4864 const char *buf
, size_t count
)
4866 struct wl1271
*wl
= dev_get_drvdata(dev
);
4870 ret
= kstrtoul(buf
, 10, &res
);
4872 wl1271_warning("incorrect value written to bt_coex_mode");
4876 mutex_lock(&wl
->mutex
);
4880 if (res
== wl
->sg_enabled
)
4883 wl
->sg_enabled
= res
;
4885 if (wl
->state
== WL1271_STATE_OFF
)
4888 ret
= wl1271_ps_elp_wakeup(wl
);
4892 wl1271_acx_sg_enable(wl
, wl
->sg_enabled
);
4893 wl1271_ps_elp_sleep(wl
);
4896 mutex_unlock(&wl
->mutex
);
4900 static DEVICE_ATTR(bt_coex_state
, S_IRUGO
| S_IWUSR
,
4901 wl1271_sysfs_show_bt_coex_state
,
4902 wl1271_sysfs_store_bt_coex_state
);
4904 static ssize_t
wl1271_sysfs_show_hw_pg_ver(struct device
*dev
,
4905 struct device_attribute
*attr
,
4908 struct wl1271
*wl
= dev_get_drvdata(dev
);
4913 mutex_lock(&wl
->mutex
);
4914 if (wl
->hw_pg_ver
>= 0)
4915 len
= snprintf(buf
, len
, "%d\n", wl
->hw_pg_ver
);
4917 len
= snprintf(buf
, len
, "n/a\n");
4918 mutex_unlock(&wl
->mutex
);
4923 static DEVICE_ATTR(hw_pg_ver
, S_IRUGO
,
4924 wl1271_sysfs_show_hw_pg_ver
, NULL
);
4926 static ssize_t
wl1271_sysfs_read_fwlog(struct file
*filp
, struct kobject
*kobj
,
4927 struct bin_attribute
*bin_attr
,
4928 char *buffer
, loff_t pos
, size_t count
)
4930 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
4931 struct wl1271
*wl
= dev_get_drvdata(dev
);
4935 ret
= mutex_lock_interruptible(&wl
->mutex
);
4937 return -ERESTARTSYS
;
4939 /* Let only one thread read the log at a time, blocking others */
4940 while (wl
->fwlog_size
== 0) {
4943 prepare_to_wait_exclusive(&wl
->fwlog_waitq
,
4945 TASK_INTERRUPTIBLE
);
4947 if (wl
->fwlog_size
!= 0) {
4948 finish_wait(&wl
->fwlog_waitq
, &wait
);
4952 mutex_unlock(&wl
->mutex
);
4955 finish_wait(&wl
->fwlog_waitq
, &wait
);
4957 if (signal_pending(current
))
4958 return -ERESTARTSYS
;
4960 ret
= mutex_lock_interruptible(&wl
->mutex
);
4962 return -ERESTARTSYS
;
4965 /* Check if the fwlog is still valid */
4966 if (wl
->fwlog_size
< 0) {
4967 mutex_unlock(&wl
->mutex
);
4971 /* Seeking is not supported - old logs are not kept. Disregard pos. */
4972 len
= min(count
, (size_t)wl
->fwlog_size
);
4973 wl
->fwlog_size
-= len
;
4974 memcpy(buffer
, wl
->fwlog
, len
);
4976 /* Make room for new messages */
4977 memmove(wl
->fwlog
, wl
->fwlog
+ len
, wl
->fwlog_size
);
4979 mutex_unlock(&wl
->mutex
);
4984 static struct bin_attribute fwlog_attr
= {
4985 .attr
= {.name
= "fwlog", .mode
= S_IRUSR
},
4986 .read
= wl1271_sysfs_read_fwlog
,
4989 static void wl1271_connection_loss_work(struct work_struct
*work
)
4991 struct delayed_work
*dwork
;
4993 struct ieee80211_vif
*vif
;
4994 struct wl12xx_vif
*wlvif
;
4996 dwork
= container_of(work
, struct delayed_work
, work
);
4997 wl
= container_of(dwork
, struct wl1271
, connection_loss_work
);
4999 wl1271_info("Connection loss work.");
5001 mutex_lock(&wl
->mutex
);
5003 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
5006 /* Call mac80211 connection loss */
5007 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
5008 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
5010 vif
= wl12xx_wlvif_to_vif(wlvif
);
5011 ieee80211_connection_loss(vif
);
5014 mutex_unlock(&wl
->mutex
);
5017 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
,
5018 u32 oui
, u32 nic
, int n
)
5022 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x, n %d",
5025 if (nic
+ n
- 1 > 0xffffff)
5026 wl1271_warning("NIC part of the MAC address wraps around!");
5028 for (i
= 0; i
< n
; i
++) {
5029 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
5030 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
5031 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
5032 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
5033 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
5034 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
5038 wl
->hw
->wiphy
->n_addresses
= n
;
5039 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
5042 static int wl12xx_get_hw_info(struct wl1271
*wl
)
5046 ret
= wl12xx_set_power_on(wl
);
5050 ret
= wlcore_read_reg(wl
, REG_CHIP_ID_B
, &wl
->chip
.id
);
5054 wl
->fuse_oui_addr
= 0;
5055 wl
->fuse_nic_addr
= 0;
5057 ret
= wl
->ops
->get_pg_ver(wl
, &wl
->hw_pg_ver
);
5061 if (wl
->ops
->get_mac
)
5062 ret
= wl
->ops
->get_mac(wl
);
5065 wl1271_power_off(wl
);
5069 static int wl1271_register_hw(struct wl1271
*wl
)
5072 u32 oui_addr
= 0, nic_addr
= 0;
5074 if (wl
->mac80211_registered
)
5077 wl1271_fetch_nvs(wl
);
5078 if (wl
->nvs
!= NULL
) {
5079 /* NOTE: The wl->nvs->nvs element must be first, in
5080 * order to simplify the casting, we assume it is at
5081 * the beginning of the wl->nvs structure.
5083 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
5086 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
5088 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
5091 /* if the MAC address is zeroed in the NVS derive from fuse */
5092 if (oui_addr
== 0 && nic_addr
== 0) {
5093 oui_addr
= wl
->fuse_oui_addr
;
5094 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5095 nic_addr
= wl
->fuse_nic_addr
+ 1;
5098 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
, 2);
5100 ret
= ieee80211_register_hw(wl
->hw
);
5102 wl1271_error("unable to register mac80211 hw: %d", ret
);
5106 wl
->mac80211_registered
= true;
5108 wl1271_debugfs_init(wl
);
5110 wl1271_notice("loaded");
5116 static void wl1271_unregister_hw(struct wl1271
*wl
)
5119 wl1271_plt_stop(wl
);
5121 ieee80211_unregister_hw(wl
->hw
);
5122 wl
->mac80211_registered
= false;
5126 static const struct ieee80211_iface_limit wlcore_iface_limits
[] = {
5129 .types
= BIT(NL80211_IFTYPE_STATION
),
5133 .types
= BIT(NL80211_IFTYPE_AP
) |
5134 BIT(NL80211_IFTYPE_P2P_GO
) |
5135 BIT(NL80211_IFTYPE_P2P_CLIENT
),
5139 static const struct ieee80211_iface_combination
5140 wlcore_iface_combinations
[] = {
5142 .num_different_channels
= 1,
5143 .max_interfaces
= 2,
5144 .limits
= wlcore_iface_limits
,
5145 .n_limits
= ARRAY_SIZE(wlcore_iface_limits
),
5149 static int wl1271_init_ieee80211(struct wl1271
*wl
)
5151 static const u32 cipher_suites
[] = {
5152 WLAN_CIPHER_SUITE_WEP40
,
5153 WLAN_CIPHER_SUITE_WEP104
,
5154 WLAN_CIPHER_SUITE_TKIP
,
5155 WLAN_CIPHER_SUITE_CCMP
,
5156 WL1271_CIPHER_SUITE_GEM
,
5159 /* The tx descriptor buffer */
5160 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
5162 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
5163 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
5166 /* FIXME: find a proper value */
5167 wl
->hw
->channel_change_time
= 10000;
5168 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
5170 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
5171 IEEE80211_HW_SUPPORTS_PS
|
5172 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5173 IEEE80211_HW_SUPPORTS_UAPSD
|
5174 IEEE80211_HW_HAS_RATE_CONTROL
|
5175 IEEE80211_HW_CONNECTION_MONITOR
|
5176 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5177 IEEE80211_HW_SPECTRUM_MGMT
|
5178 IEEE80211_HW_AP_LINK_PS
|
5179 IEEE80211_HW_AMPDU_AGGREGATION
|
5180 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5181 IEEE80211_HW_SCAN_WHILE_IDLE
;
5183 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5184 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5186 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5187 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5188 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5189 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5190 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5191 wl
->hw
->wiphy
->max_match_sets
= 16;
5193 * Maximum length of elements in scanning probe request templates
5194 * should be the maximum length possible for a template, without
5195 * the IEEE80211 header of the template
5197 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5198 sizeof(struct ieee80211_header
);
5200 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5201 sizeof(struct ieee80211_header
);
5203 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5204 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
;
5206 /* make sure all our channels fit in the scanned_ch bitmask */
5207 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5208 ARRAY_SIZE(wl1271_channels_5ghz
) >
5209 WL1271_MAX_CHANNELS
);
5211 * We keep local copies of the band structs because we need to
5212 * modify them on a per-device basis.
5214 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5215 sizeof(wl1271_band_2ghz
));
5216 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5217 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5218 sizeof(*wl
->ht_cap
));
5219 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5220 sizeof(wl1271_band_5ghz
));
5221 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5222 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5223 sizeof(*wl
->ht_cap
));
5225 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5226 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5227 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5228 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5231 wl
->hw
->max_rates
= 1;
5233 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5235 /* the FW answers probe-requests in AP-mode */
5236 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5237 wl
->hw
->wiphy
->probe_resp_offload
=
5238 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5239 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5240 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5242 /* allowed interface combinations */
5243 wl
->hw
->wiphy
->iface_combinations
= wlcore_iface_combinations
;
5244 wl
->hw
->wiphy
->n_iface_combinations
=
5245 ARRAY_SIZE(wlcore_iface_combinations
);
5247 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5249 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5250 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5252 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5257 #define WL1271_DEFAULT_CHANNEL 0
5259 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
)
5261 struct ieee80211_hw
*hw
;
5266 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5268 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5270 wl1271_error("could not alloc ieee80211_hw");
5276 memset(wl
, 0, sizeof(*wl
));
5278 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5280 wl1271_error("could not alloc wl priv");
5282 goto err_priv_alloc
;
5285 INIT_LIST_HEAD(&wl
->wlvif_list
);
5289 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5290 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5291 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5293 skb_queue_head_init(&wl
->deferred_rx_queue
);
5294 skb_queue_head_init(&wl
->deferred_tx_queue
);
5296 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5297 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5298 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5299 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5300 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5301 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5302 INIT_DELAYED_WORK(&wl
->connection_loss_work
,
5303 wl1271_connection_loss_work
);
5305 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5306 if (!wl
->freezable_wq
) {
5311 wl
->channel
= WL1271_DEFAULT_CHANNEL
;
5313 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5314 wl
->band
= IEEE80211_BAND_2GHZ
;
5315 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5317 wl
->sg_enabled
= true;
5318 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
5321 wl
->ap_fw_ps_map
= 0;
5323 wl
->platform_quirks
= 0;
5324 wl
->sched_scanning
= false;
5325 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5326 wl
->active_sta_count
= 0;
5328 init_waitqueue_head(&wl
->fwlog_waitq
);
5330 /* The system link is always allocated */
5331 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5333 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5334 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5335 wl
->tx_frames
[i
] = NULL
;
5337 spin_lock_init(&wl
->wl_lock
);
5339 wl
->state
= WL1271_STATE_OFF
;
5340 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5341 mutex_init(&wl
->mutex
);
5342 mutex_init(&wl
->flush_mutex
);
5344 order
= get_order(WL1271_AGGR_BUFFER_SIZE
);
5345 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5346 if (!wl
->aggr_buf
) {
5351 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5352 if (!wl
->dummy_packet
) {
5357 /* Allocate one page for the FW log */
5358 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5361 goto err_dummy_packet
;
5364 wl
->mbox
= kmalloc(sizeof(*wl
->mbox
), GFP_KERNEL
| GFP_DMA
);
5373 free_page((unsigned long)wl
->fwlog
);
5376 dev_kfree_skb(wl
->dummy_packet
);
5379 free_pages((unsigned long)wl
->aggr_buf
, order
);
5382 destroy_workqueue(wl
->freezable_wq
);
5385 wl1271_debugfs_exit(wl
);
5389 ieee80211_free_hw(hw
);
5393 return ERR_PTR(ret
);
5395 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5397 int wlcore_free_hw(struct wl1271
*wl
)
5399 /* Unblock any fwlog readers */
5400 mutex_lock(&wl
->mutex
);
5401 wl
->fwlog_size
= -1;
5402 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5403 mutex_unlock(&wl
->mutex
);
5405 device_remove_bin_file(wl
->dev
, &fwlog_attr
);
5407 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5409 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5410 free_page((unsigned long)wl
->fwlog
);
5411 dev_kfree_skb(wl
->dummy_packet
);
5412 free_pages((unsigned long)wl
->aggr_buf
,
5413 get_order(WL1271_AGGR_BUFFER_SIZE
));
5415 wl1271_debugfs_exit(wl
);
5419 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5423 kfree(wl
->fw_status_1
);
5424 kfree(wl
->tx_res_if
);
5425 destroy_workqueue(wl
->freezable_wq
);
5428 ieee80211_free_hw(wl
->hw
);
5432 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
5434 static irqreturn_t
wl12xx_hardirq(int irq
, void *cookie
)
5436 struct wl1271
*wl
= cookie
;
5437 unsigned long flags
;
5439 wl1271_debug(DEBUG_IRQ
, "IRQ");
5441 /* complete the ELP completion */
5442 spin_lock_irqsave(&wl
->wl_lock
, flags
);
5443 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
5444 if (wl
->elp_compl
) {
5445 complete(wl
->elp_compl
);
5446 wl
->elp_compl
= NULL
;
5449 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
5450 /* don't enqueue a work right now. mark it as pending */
5451 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
5452 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
5453 disable_irq_nosync(wl
->irq
);
5454 pm_wakeup_event(wl
->dev
, 0);
5455 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5458 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5460 return IRQ_WAKE_THREAD
;
5463 int __devinit
wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
5465 struct wl12xx_platform_data
*pdata
= pdev
->dev
.platform_data
;
5466 unsigned long irqflags
;
5469 if (!wl
->ops
|| !wl
->ptable
) {
5474 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
5476 /* adjust some runtime configuration parameters */
5477 wlcore_adjust_conf(wl
);
5479 wl
->irq
= platform_get_irq(pdev
, 0);
5480 wl
->platform_quirks
= pdata
->platform_quirks
;
5481 wl
->set_power
= pdata
->set_power
;
5482 wl
->dev
= &pdev
->dev
;
5483 wl
->if_ops
= pdata
->ops
;
5485 platform_set_drvdata(pdev
, wl
);
5487 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
5488 irqflags
= IRQF_TRIGGER_RISING
;
5490 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
5492 ret
= request_threaded_irq(wl
->irq
, wl12xx_hardirq
, wl1271_irq
,
5496 wl1271_error("request_irq() failed: %d", ret
);
5500 ret
= enable_irq_wake(wl
->irq
);
5502 wl
->irq_wake_enabled
= true;
5503 device_init_wakeup(wl
->dev
, 1);
5504 if (pdata
->pwr_in_suspend
) {
5505 wl
->hw
->wiphy
->wowlan
.flags
= WIPHY_WOWLAN_ANY
;
5506 wl
->hw
->wiphy
->wowlan
.n_patterns
=
5507 WL1271_MAX_RX_FILTERS
;
5508 wl
->hw
->wiphy
->wowlan
.pattern_min_len
= 1;
5509 wl
->hw
->wiphy
->wowlan
.pattern_max_len
=
5510 WL1271_RX_FILTER_MAX_PATTERN_SIZE
;
5513 disable_irq(wl
->irq
);
5515 ret
= wl12xx_get_hw_info(wl
);
5517 wl1271_error("couldn't get hw info");
5521 ret
= wl
->ops
->identify_chip(wl
);
5525 ret
= wl1271_init_ieee80211(wl
);
5529 ret
= wl1271_register_hw(wl
);
5533 /* Create sysfs file to control bt coex state */
5534 ret
= device_create_file(wl
->dev
, &dev_attr_bt_coex_state
);
5536 wl1271_error("failed to create sysfs file bt_coex_state");
5540 /* Create sysfs file to get HW PG version */
5541 ret
= device_create_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5543 wl1271_error("failed to create sysfs file hw_pg_ver");
5544 goto out_bt_coex_state
;
5547 /* Create sysfs file for the FW log */
5548 ret
= device_create_bin_file(wl
->dev
, &fwlog_attr
);
5550 wl1271_error("failed to create sysfs file fwlog");
5557 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5560 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5563 free_irq(wl
->irq
, wl
);
5571 EXPORT_SYMBOL_GPL(wlcore_probe
);
5573 int __devexit
wlcore_remove(struct platform_device
*pdev
)
5575 struct wl1271
*wl
= platform_get_drvdata(pdev
);
5577 if (wl
->irq_wake_enabled
) {
5578 device_init_wakeup(wl
->dev
, 0);
5579 disable_irq_wake(wl
->irq
);
5581 wl1271_unregister_hw(wl
);
5582 free_irq(wl
->irq
, wl
);
5587 EXPORT_SYMBOL_GPL(wlcore_remove
);
5589 u32 wl12xx_debug_level
= DEBUG_NONE
;
5590 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
5591 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
5592 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
5594 module_param_named(fwlog
, fwlog_param
, charp
, 0);
5595 MODULE_PARM_DESC(fwlog
,
5596 "FW logger options: continuous, ondemand, dbgpins or disable");
5598 module_param(bug_on_recovery
, bool, S_IRUSR
| S_IWUSR
);
5599 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
5601 module_param(no_recovery
, bool, S_IRUSR
| S_IWUSR
);
5602 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
5604 MODULE_LICENSE("GPL");
5605 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5606 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");