3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param
;
59 static bool bug_on_recovery
;
60 static bool no_recovery
;
62 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
63 struct ieee80211_vif
*vif
,
64 bool reset_tx_queues
);
65 static void wl1271_op_stop(struct ieee80211_hw
*hw
);
66 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
68 static int wl12xx_set_authorized(struct wl1271
*wl
,
69 struct wl12xx_vif
*wlvif
)
73 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
82 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
->sta
.hlid
);
86 wl12xx_croc(wl
, wlvif
->role_id
);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy
*wiphy
,
93 struct regulatory_request
*request
)
95 struct ieee80211_supported_band
*band
;
96 struct ieee80211_channel
*ch
;
99 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
100 for (i
= 0; i
< band
->n_channels
; i
++) {
101 ch
= &band
->channels
[i
];
102 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
105 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
106 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
107 IEEE80211_CHAN_PASSIVE_SCAN
;
114 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
119 /* we should hold wl->mutex */
120 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
139 int period
= wl
->conf
.rx_streaming
.interval
;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
148 (wl
->conf
.rx_streaming
.always
||
149 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
150 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
152 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif
->rx_streaming_timer
);
160 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
163 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
164 rx_streaming_enable_work
);
165 struct wl1271
*wl
= wlvif
->wl
;
167 mutex_lock(&wl
->mutex
);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
171 (!wl
->conf
.rx_streaming
.always
&&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
175 if (!wl
->conf
.rx_streaming
.interval
)
178 ret
= wl1271_ps_elp_wakeup(wl
);
182 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif
->rx_streaming_timer
,
188 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
191 wl1271_ps_elp_sleep(wl
);
193 mutex_unlock(&wl
->mutex
);
196 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
199 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
200 rx_streaming_disable_work
);
201 struct wl1271
*wl
= wlvif
->wl
;
203 mutex_lock(&wl
->mutex
);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
208 ret
= wl1271_ps_elp_wakeup(wl
);
212 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
217 wl1271_ps_elp_sleep(wl
);
219 mutex_unlock(&wl
->mutex
);
222 static void wl1271_rx_streaming_timer(unsigned long data
)
224 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
225 struct wl1271
*wl
= wlvif
->wl
;
226 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl
->tx_allocated_blocks
== 0)
236 cancel_delayed_work(&wl
->tx_watchdog_work
);
237 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
238 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
241 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
243 struct delayed_work
*dwork
;
246 dwork
= container_of(work
, struct delayed_work
, work
);
247 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
249 mutex_lock(&wl
->mutex
);
251 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl
->tx_allocated_blocks
== 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
263 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
264 wl
->conf
.tx
.tx_watchdog_timeout
);
265 wl12xx_rearm_tx_watchdog_locked(wl
);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
274 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
275 wl
->conf
.tx
.tx_watchdog_timeout
);
276 wl12xx_rearm_tx_watchdog_locked(wl
);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl
->active_sta_count
) {
287 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
289 wl
->conf
.tx
.tx_watchdog_timeout
,
290 wl
->active_sta_count
);
291 wl12xx_rearm_tx_watchdog_locked(wl
);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl
->conf
.tx
.tx_watchdog_timeout
);
297 wl12xx_queue_recovery_work(wl
);
300 mutex_unlock(&wl
->mutex
);
303 static void wlcore_adjust_conf(struct wl1271
*wl
)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param
, "continuous")) {
308 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
309 } else if (!strcmp(fwlog_param
, "ondemand")) {
310 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
311 } else if (!strcmp(fwlog_param
, "dbgpins")) {
312 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
313 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
314 } else if (!strcmp(fwlog_param
, "disable")) {
315 wl
->conf
.fwlog
.mem_blocks
= 0;
316 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
323 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
324 struct wl12xx_vif
*wlvif
,
327 bool fw_ps
, single_sta
;
329 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
330 single_sta
= (wl
->active_sta_count
== 1);
333 * Wake up from high level PS if the STA is asleep with too little
334 * packets in FW or if the STA is awake.
336 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
337 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
340 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 * Make an exception if this is the only connected station. In this
342 * case FW-memory congestion is not a problem.
344 else if (!single_sta
&& fw_ps
&& tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
345 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
348 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
349 struct wl12xx_vif
*wlvif
,
350 struct wl_fw_status_2
*status
)
352 struct wl1271_link
*lnk
;
356 /* TODO: also use link_fast_bitmap here */
358 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
359 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
360 wl1271_debug(DEBUG_PSM
,
361 "link ps prev 0x%x cur 0x%x changed 0x%x",
362 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
363 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
365 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
368 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
) {
369 lnk
= &wl
->links
[hlid
];
370 cnt
= status
->counters
.tx_lnk_free_pkts
[hlid
] -
371 lnk
->prev_freed_pkts
;
373 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[hlid
];
374 lnk
->allocated_pkts
-= cnt
;
376 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
377 lnk
->allocated_pkts
);
381 static void wl12xx_fw_status(struct wl1271
*wl
,
382 struct wl_fw_status_1
*status_1
,
383 struct wl_fw_status_2
*status_2
)
385 struct wl12xx_vif
*wlvif
;
387 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
388 int avail
, freed_blocks
;
392 status_len
= WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
393 sizeof(*status_2
) + wl
->fw_status_priv_len
;
395 wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status_1
,
398 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
399 "drv_rx_counter = %d, tx_results_counter = %d)",
401 status_1
->fw_rx_counter
,
402 status_1
->drv_rx_counter
,
403 status_1
->tx_results_counter
);
405 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
406 /* prevent wrap-around in freed-packets counter */
407 wl
->tx_allocated_pkts
[i
] -=
408 (status_2
->counters
.tx_released_pkts
[i
] -
409 wl
->tx_pkts_freed
[i
]) & 0xff;
411 wl
->tx_pkts_freed
[i
] = status_2
->counters
.tx_released_pkts
[i
];
414 /* prevent wrap-around in total blocks counter */
415 if (likely(wl
->tx_blocks_freed
<=
416 le32_to_cpu(status_2
->total_released_blks
)))
417 freed_blocks
= le32_to_cpu(status_2
->total_released_blks
) -
420 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
421 le32_to_cpu(status_2
->total_released_blks
);
423 wl
->tx_blocks_freed
= le32_to_cpu(status_2
->total_released_blks
);
425 wl
->tx_allocated_blocks
-= freed_blocks
;
428 * If the FW freed some blocks:
429 * If we still have allocated blocks - re-arm the timer, Tx is
430 * not stuck. Otherwise, cancel the timer (no Tx currently).
433 if (wl
->tx_allocated_blocks
)
434 wl12xx_rearm_tx_watchdog_locked(wl
);
436 cancel_delayed_work(&wl
->tx_watchdog_work
);
439 avail
= le32_to_cpu(status_2
->tx_total
) - wl
->tx_allocated_blocks
;
442 * The FW might change the total number of TX memblocks before
443 * we get a notification about blocks being released. Thus, the
444 * available blocks calculation might yield a temporary result
445 * which is lower than the actual available blocks. Keeping in
446 * mind that only blocks that were allocated can be moved from
447 * TX to RX, tx_blocks_available should never decrease here.
449 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
452 /* if more blocks are available now, tx work can be scheduled */
453 if (wl
->tx_blocks_available
> old_tx_blk_count
)
454 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
456 /* for AP update num of allocated TX blocks per link and ps status */
457 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
458 wl12xx_irq_update_links_status(wl
, wlvif
, status_2
);
461 /* update the host-chipset time offset */
463 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
464 (s64
)le32_to_cpu(status_2
->fw_localtime
);
467 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
471 /* Pass all received frames to the network stack */
472 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
473 ieee80211_rx_ni(wl
->hw
, skb
);
475 /* Return sent skbs to the network stack */
476 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
477 ieee80211_tx_status_ni(wl
->hw
, skb
);
480 static void wl1271_netstack_work(struct work_struct
*work
)
483 container_of(work
, struct wl1271
, netstack_work
);
486 wl1271_flush_deferred_work(wl
);
487 } while (skb_queue_len(&wl
->deferred_rx_queue
));
490 #define WL1271_IRQ_MAX_LOOPS 256
492 static irqreturn_t
wl1271_irq(int irq
, void *cookie
)
496 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
497 struct wl1271
*wl
= (struct wl1271
*)cookie
;
499 unsigned int defer_count
;
502 /* TX might be handled here, avoid redundant work */
503 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
504 cancel_work_sync(&wl
->tx_work
);
507 * In case edge triggered interrupt must be used, we cannot iterate
508 * more than once without introducing race conditions with the hardirq.
510 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
513 mutex_lock(&wl
->mutex
);
515 wl1271_debug(DEBUG_IRQ
, "IRQ work");
517 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
520 ret
= wl1271_ps_elp_wakeup(wl
);
524 while (!done
&& loopcount
--) {
526 * In order to avoid a race with the hardirq, clear the flag
527 * before acknowledging the chip. Since the mutex is held,
528 * wl1271_ps_elp_wakeup cannot be called concurrently.
530 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
531 smp_mb__after_clear_bit();
533 wl12xx_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
535 wlcore_hw_tx_immediate_compl(wl
);
537 intr
= le32_to_cpu(wl
->fw_status_1
->intr
);
538 intr
&= WL1271_INTR_MASK
;
544 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
545 wl1271_error("watchdog interrupt received! "
546 "starting recovery.");
547 wl12xx_queue_recovery_work(wl
);
549 /* restarting the chip. ignore any other interrupt. */
553 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
554 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
556 wl12xx_rx(wl
, wl
->fw_status_1
);
558 /* Check if any tx blocks were freed */
559 spin_lock_irqsave(&wl
->wl_lock
, flags
);
560 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
561 wl1271_tx_total_queue_count(wl
) > 0) {
562 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
564 * In order to avoid starvation of the TX path,
565 * call the work function directly.
567 wl1271_tx_work_locked(wl
);
569 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
572 /* check for tx results */
573 wlcore_hw_tx_delayed_compl(wl
);
575 /* Make sure the deferred queues don't get too long */
576 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
577 skb_queue_len(&wl
->deferred_rx_queue
);
578 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
579 wl1271_flush_deferred_work(wl
);
582 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
583 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
584 wl1271_event_handle(wl
, 0);
587 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
588 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
589 wl1271_event_handle(wl
, 1);
592 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
593 wl1271_debug(DEBUG_IRQ
,
594 "WL1271_ACX_INTR_INIT_COMPLETE");
596 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
597 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
600 wl1271_ps_elp_sleep(wl
);
603 spin_lock_irqsave(&wl
->wl_lock
, flags
);
604 /* In case TX was not handled here, queue TX work */
605 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
606 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
607 wl1271_tx_total_queue_count(wl
) > 0)
608 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
609 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
611 mutex_unlock(&wl
->mutex
);
616 struct vif_counter_data
{
619 struct ieee80211_vif
*cur_vif
;
620 bool cur_vif_running
;
623 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
624 struct ieee80211_vif
*vif
)
626 struct vif_counter_data
*counter
= data
;
629 if (counter
->cur_vif
== vif
)
630 counter
->cur_vif_running
= true;
633 /* caller must not hold wl->mutex, as it might deadlock */
634 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
635 struct ieee80211_vif
*cur_vif
,
636 struct vif_counter_data
*data
)
638 memset(data
, 0, sizeof(*data
));
639 data
->cur_vif
= cur_vif
;
641 ieee80211_iterate_active_interfaces(hw
,
642 wl12xx_vif_count_iter
, data
);
645 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
647 const struct firmware
*fw
;
649 enum wl12xx_fw_type fw_type
;
653 fw_type
= WL12XX_FW_TYPE_PLT
;
654 fw_name
= wl
->plt_fw_name
;
657 * we can't call wl12xx_get_vif_count() here because
658 * wl->mutex is taken, so use the cached last_vif_count value
660 if (wl
->last_vif_count
> 1) {
661 fw_type
= WL12XX_FW_TYPE_MULTI
;
662 fw_name
= wl
->mr_fw_name
;
664 fw_type
= WL12XX_FW_TYPE_NORMAL
;
665 fw_name
= wl
->sr_fw_name
;
669 if (wl
->fw_type
== fw_type
)
672 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
674 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
677 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
682 wl1271_error("firmware size is not multiple of 32 bits: %zu",
689 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
690 wl
->fw_len
= fw
->size
;
691 wl
->fw
= vmalloc(wl
->fw_len
);
694 wl1271_error("could not allocate memory for the firmware");
699 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
701 wl
->fw_type
= fw_type
;
703 release_firmware(fw
);
708 static int wl1271_fetch_nvs(struct wl1271
*wl
)
710 const struct firmware
*fw
;
713 ret
= request_firmware(&fw
, WL12XX_NVS_NAME
, wl
->dev
);
716 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME
,
721 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
724 wl1271_error("could not allocate memory for the nvs file");
729 wl
->nvs_len
= fw
->size
;
732 release_firmware(fw
);
737 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
739 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
740 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
743 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
747 /* The FW log is a length-value list, find where the log end */
748 while (len
< maxlen
) {
749 if (memblock
[len
] == 0)
751 if (len
+ memblock
[len
] + 1 > maxlen
)
753 len
+= memblock
[len
] + 1;
756 /* Make sure we have enough room */
757 len
= min(len
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
759 /* Fill the FW log file, consumed by the sysfs fwlog entry */
760 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
761 wl
->fwlog_size
+= len
;
766 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
772 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
773 (wl
->conf
.fwlog
.mode
!= WL12XX_FWLOG_ON_DEMAND
) ||
774 (wl
->conf
.fwlog
.mem_blocks
== 0))
777 wl1271_info("Reading FW panic log");
779 block
= kmalloc(WL12XX_HW_BLOCK_SIZE
, GFP_KERNEL
);
784 * Make sure the chip is awake and the logger isn't active.
785 * This might fail if the firmware hanged.
787 if (!wl1271_ps_elp_wakeup(wl
))
788 wl12xx_cmd_stop_fwlog(wl
);
790 /* Read the first memory block address */
791 wl12xx_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
792 first_addr
= le32_to_cpu(wl
->fw_status_2
->log_start_addr
);
796 /* Traverse the memory blocks linked list */
799 memset(block
, 0, WL12XX_HW_BLOCK_SIZE
);
800 wl1271_read_hwaddr(wl
, addr
, block
, WL12XX_HW_BLOCK_SIZE
,
804 * Memory blocks are linked to one another. The first 4 bytes
805 * of each memory block hold the hardware address of the next
806 * one. The last memory block points to the first one.
808 addr
= le32_to_cpup((__le32
*)block
);
809 if (!wl12xx_copy_fwlog(wl
, block
+ sizeof(addr
),
810 WL12XX_HW_BLOCK_SIZE
- sizeof(addr
)))
812 } while (addr
&& (addr
!= first_addr
));
814 wake_up_interruptible(&wl
->fwlog_waitq
);
820 static void wl1271_recovery_work(struct work_struct
*work
)
823 container_of(work
, struct wl1271
, recovery_work
);
824 struct wl12xx_vif
*wlvif
;
825 struct ieee80211_vif
*vif
;
827 mutex_lock(&wl
->mutex
);
829 if (wl
->state
!= WL1271_STATE_ON
|| wl
->plt
)
832 /* Avoid a recursive recovery */
833 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
835 wl12xx_read_fwlog_panic(wl
);
837 /* change partitions momentarily so we can read the FW pc */
838 wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
839 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x "
842 wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
),
843 wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
));
844 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
846 BUG_ON(bug_on_recovery
&&
847 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
850 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
851 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
856 * Advance security sequence number to overcome potential progress
857 * in the firmware during recovery. This doens't hurt if the network is
860 wl12xx_for_each_wlvif(wl
, wlvif
) {
861 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
862 test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
863 wlvif
->tx_security_seq
+=
864 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
867 /* Prevent spurious TX during FW restart */
868 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
870 if (wl
->sched_scanning
) {
871 ieee80211_sched_scan_stopped(wl
->hw
);
872 wl
->sched_scanning
= false;
875 /* reboot the chipset */
876 while (!list_empty(&wl
->wlvif_list
)) {
877 wlvif
= list_first_entry(&wl
->wlvif_list
,
878 struct wl12xx_vif
, list
);
879 vif
= wl12xx_wlvif_to_vif(wlvif
);
880 __wl1271_op_remove_interface(wl
, vif
, false);
882 mutex_unlock(&wl
->mutex
);
883 wl1271_op_stop(wl
->hw
);
885 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
887 ieee80211_restart_hw(wl
->hw
);
890 * Its safe to enable TX now - the queues are stopped after a request
893 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
896 mutex_unlock(&wl
->mutex
);
899 static void wl1271_fw_wakeup(struct wl1271
*wl
)
901 wl1271_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
904 static int wl1271_setup(struct wl1271
*wl
)
906 wl
->fw_status_1
= kmalloc(WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
907 sizeof(*wl
->fw_status_2
) +
908 wl
->fw_status_priv_len
, GFP_KERNEL
);
909 if (!wl
->fw_status_1
)
912 wl
->fw_status_2
= (struct wl_fw_status_2
*)
913 (((u8
*) wl
->fw_status_1
) +
914 WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
));
916 wl
->tx_res_if
= kmalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
917 if (!wl
->tx_res_if
) {
918 kfree(wl
->fw_status_1
);
925 static int wl12xx_set_power_on(struct wl1271
*wl
)
929 msleep(WL1271_PRE_POWER_ON_SLEEP
);
930 ret
= wl1271_power_on(wl
);
933 msleep(WL1271_POWER_ON_SLEEP
);
937 wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
939 /* ELP module wake up */
940 wl1271_fw_wakeup(wl
);
946 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
950 ret
= wl12xx_set_power_on(wl
);
955 * For wl127x based devices we could use the default block
956 * size (512 bytes), but due to a bug in the sdio driver, we
957 * need to set it explicitly after the chip is powered on. To
958 * simplify the code and since the performance impact is
959 * negligible, we use the same block size for all different
962 * Check if the bus supports blocksize alignment and, if it
963 * doesn't, make sure we don't have the quirk.
965 if (!wl1271_set_block_size(wl
))
966 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
968 /* TODO: make sure the lower driver has set things up correctly */
970 ret
= wl1271_setup(wl
);
974 ret
= wl12xx_fetch_firmware(wl
, plt
);
978 /* No NVS from netlink, try to get it from the filesystem */
979 if (wl
->nvs
== NULL
) {
980 ret
= wl1271_fetch_nvs(wl
);
989 int wl1271_plt_start(struct wl1271
*wl
)
991 int retries
= WL1271_BOOT_RETRIES
;
992 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
995 mutex_lock(&wl
->mutex
);
997 wl1271_notice("power up");
999 if (wl
->state
!= WL1271_STATE_OFF
) {
1000 wl1271_error("cannot go into PLT state because not "
1001 "in off state: %d", wl
->state
);
1008 ret
= wl12xx_chip_wakeup(wl
, true);
1012 ret
= wl
->ops
->plt_init(wl
);
1017 wl
->state
= WL1271_STATE_ON
;
1018 wl1271_notice("firmware booted in PLT mode (%s)",
1019 wl
->chip
.fw_ver_str
);
1021 /* update hw/fw version info in wiphy struct */
1022 wiphy
->hw_version
= wl
->chip
.id
;
1023 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1024 sizeof(wiphy
->fw_version
));
1029 wl1271_power_off(wl
);
1032 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1033 WL1271_BOOT_RETRIES
);
1035 mutex_unlock(&wl
->mutex
);
1040 int wl1271_plt_stop(struct wl1271
*wl
)
1044 wl1271_notice("power down");
1047 * Interrupts must be disabled before setting the state to OFF.
1048 * Otherwise, the interrupt handler might be called and exit without
1049 * reading the interrupt status.
1051 wlcore_disable_interrupts(wl
);
1052 mutex_lock(&wl
->mutex
);
1054 mutex_unlock(&wl
->mutex
);
1057 * This will not necessarily enable interrupts as interrupts
1058 * may have been disabled when op_stop was called. It will,
1059 * however, balance the above call to disable_interrupts().
1061 wlcore_enable_interrupts(wl
);
1063 wl1271_error("cannot power down because not in PLT "
1064 "state: %d", wl
->state
);
1069 mutex_unlock(&wl
->mutex
);
1071 wl1271_flush_deferred_work(wl
);
1072 cancel_work_sync(&wl
->netstack_work
);
1073 cancel_work_sync(&wl
->recovery_work
);
1074 cancel_delayed_work_sync(&wl
->elp_work
);
1075 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1076 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1078 mutex_lock(&wl
->mutex
);
1079 wl1271_power_off(wl
);
1081 wl
->state
= WL1271_STATE_OFF
;
1084 mutex_unlock(&wl
->mutex
);
1090 static void wl1271_op_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
1092 struct wl1271
*wl
= hw
->priv
;
1093 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1094 struct ieee80211_vif
*vif
= info
->control
.vif
;
1095 struct wl12xx_vif
*wlvif
= NULL
;
1096 unsigned long flags
;
1101 wlvif
= wl12xx_vif_to_data(vif
);
1103 mapping
= skb_get_queue_mapping(skb
);
1104 q
= wl1271_tx_get_queue(mapping
);
1106 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
);
1108 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1111 * drop the packet if the link is invalid or the queue is stopped
1112 * for any reason but watermark. Watermark is a "soft"-stop so we
1113 * allow these packets through.
1115 if (hlid
== WL12XX_INVALID_LINK_ID
||
1116 (wlvif
&& !test_bit(hlid
, wlvif
->links_map
)) ||
1117 (wlcore_is_queue_stopped(wl
, q
) &&
1118 !wlcore_is_queue_stopped_by_reason(wl
, q
,
1119 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1120 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1121 ieee80211_free_txskb(hw
, skb
);
1125 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1127 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1129 wl
->tx_queue_count
[q
]++;
1132 * The workqueue is slow to process the tx_queue and we need stop
1133 * the queue here, otherwise the queue will get too long.
1135 if (wl
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
) {
1136 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1137 wlcore_stop_queue_locked(wl
, q
,
1138 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1142 * The chip specific setup must run before the first TX packet -
1143 * before that, the tx_work will not be initialized!
1146 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1147 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1148 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1151 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1154 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1156 unsigned long flags
;
1159 /* no need to queue a new dummy packet if one is already pending */
1160 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1163 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1165 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1166 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1167 wl
->tx_queue_count
[q
]++;
1168 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1170 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1171 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1172 wl1271_tx_work_locked(wl
);
1175 * If the FW TX is busy, TX work will be scheduled by the threaded
1176 * interrupt handler function
1182 * The size of the dummy packet should be at least 1400 bytes. However, in
1183 * order to minimize the number of bus transactions, aligning it to 512 bytes
1184 * boundaries could be beneficial, performance wise
1186 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1188 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1190 struct sk_buff
*skb
;
1191 struct ieee80211_hdr_3addr
*hdr
;
1192 unsigned int dummy_packet_size
;
1194 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1195 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1197 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1199 wl1271_warning("Failed to allocate a dummy packet skb");
1203 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1205 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1206 memset(hdr
, 0, sizeof(*hdr
));
1207 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1208 IEEE80211_STYPE_NULLFUNC
|
1209 IEEE80211_FCTL_TODS
);
1211 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1213 /* Dummy packets require the TID to be management */
1214 skb
->priority
= WL1271_TID_MGMT
;
1216 /* Initialize all fields that might be used */
1217 skb_set_queue_mapping(skb
, 0);
1218 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1226 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern
*p
)
1228 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1229 int i
, pattern_len
= 0;
1232 wl1271_warning("No mask in WoWLAN pattern");
1237 * The pattern is broken up into segments of bytes at different offsets
1238 * that need to be checked by the FW filter. Each segment is called
1239 * a field in the FW API. We verify that the total number of fields
1240 * required for this pattern won't exceed FW limits (8)
1241 * as well as the total fields buffer won't exceed the FW limit.
1242 * Note that if there's a pattern which crosses Ethernet/IP header
1243 * boundary a new field is required.
1245 for (i
= 0; i
< p
->pattern_len
; i
++) {
1246 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1251 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1253 fields_size
+= pattern_len
+
1254 RX_FILTER_FIELD_OVERHEAD
;
1262 fields_size
+= pattern_len
+
1263 RX_FILTER_FIELD_OVERHEAD
;
1270 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1274 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1275 wl1271_warning("RX Filter too complex. Too many segments");
1279 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1280 wl1271_warning("RX filter pattern is too big");
1287 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1289 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1292 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1299 for (i
= 0; i
< filter
->num_fields
; i
++)
1300 kfree(filter
->fields
[i
].pattern
);
1305 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1306 u16 offset
, u8 flags
,
1307 u8
*pattern
, u8 len
)
1309 struct wl12xx_rx_filter_field
*field
;
1311 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1312 wl1271_warning("Max fields per RX filter. can't alloc another");
1316 field
= &filter
->fields
[filter
->num_fields
];
1318 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1319 if (!field
->pattern
) {
1320 wl1271_warning("Failed to allocate RX filter pattern");
1324 filter
->num_fields
++;
1326 field
->offset
= cpu_to_le16(offset
);
1327 field
->flags
= flags
;
1329 memcpy(field
->pattern
, pattern
, len
);
1334 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1336 int i
, fields_size
= 0;
1338 for (i
= 0; i
< filter
->num_fields
; i
++)
1339 fields_size
+= filter
->fields
[i
].len
+
1340 sizeof(struct wl12xx_rx_filter_field
) -
1346 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1350 struct wl12xx_rx_filter_field
*field
;
1352 for (i
= 0; i
< filter
->num_fields
; i
++) {
1353 field
= (struct wl12xx_rx_filter_field
*)buf
;
1355 field
->offset
= filter
->fields
[i
].offset
;
1356 field
->flags
= filter
->fields
[i
].flags
;
1357 field
->len
= filter
->fields
[i
].len
;
1359 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1360 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1361 sizeof(u8
*) + field
->len
;
1366 * Allocates an RX filter returned through f
1367 * which needs to be freed using rx_filter_free()
1369 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1370 struct cfg80211_wowlan_trig_pkt_pattern
*p
,
1371 struct wl12xx_rx_filter
**f
)
1374 struct wl12xx_rx_filter
*filter
;
1378 filter
= wl1271_rx_filter_alloc();
1380 wl1271_warning("Failed to alloc rx filter");
1386 while (i
< p
->pattern_len
) {
1387 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1392 for (j
= i
; j
< p
->pattern_len
; j
++) {
1393 if (!test_bit(j
, (unsigned long *)p
->mask
))
1396 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1397 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1401 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1403 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1405 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1406 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1411 ret
= wl1271_rx_filter_alloc_field(filter
,
1414 &p
->pattern
[i
], len
);
1421 filter
->action
= FILTER_SIGNAL
;
1427 wl1271_rx_filter_free(filter
);
1433 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1434 struct cfg80211_wowlan
*wow
)
1438 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1439 wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1440 wl1271_rx_filter_clear_all(wl
);
1444 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1447 /* Validate all incoming patterns before clearing current FW state */
1448 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1449 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1451 wl1271_warning("Bad wowlan pattern %d", i
);
1456 wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1457 wl1271_rx_filter_clear_all(wl
);
1459 /* Translate WoWLAN patterns into filters */
1460 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1461 struct cfg80211_wowlan_trig_pkt_pattern
*p
;
1462 struct wl12xx_rx_filter
*filter
= NULL
;
1464 p
= &wow
->patterns
[i
];
1466 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1468 wl1271_warning("Failed to create an RX filter from "
1469 "wowlan pattern %d", i
);
1473 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1475 wl1271_rx_filter_free(filter
);
1480 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1486 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1487 struct wl12xx_vif
*wlvif
,
1488 struct cfg80211_wowlan
*wow
)
1492 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1495 ret
= wl1271_ps_elp_wakeup(wl
);
1499 wl1271_configure_wowlan(wl
, wow
);
1500 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1501 wl
->conf
.conn
.suspend_wake_up_event
,
1502 wl
->conf
.conn
.suspend_listen_interval
);
1505 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1507 wl1271_ps_elp_sleep(wl
);
1514 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1515 struct wl12xx_vif
*wlvif
)
1519 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1522 ret
= wl1271_ps_elp_wakeup(wl
);
1526 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1528 wl1271_ps_elp_sleep(wl
);
1534 static int wl1271_configure_suspend(struct wl1271
*wl
,
1535 struct wl12xx_vif
*wlvif
,
1536 struct cfg80211_wowlan
*wow
)
1538 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1539 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1540 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1541 return wl1271_configure_suspend_ap(wl
, wlvif
);
1545 static void wl1271_configure_resume(struct wl1271
*wl
,
1546 struct wl12xx_vif
*wlvif
)
1549 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1550 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1552 if ((!is_ap
) && (!is_sta
))
1555 ret
= wl1271_ps_elp_wakeup(wl
);
1560 wl1271_configure_wowlan(wl
, NULL
);
1562 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1563 wl
->conf
.conn
.wake_up_event
,
1564 wl
->conf
.conn
.listen_interval
);
1567 wl1271_error("resume: wake up conditions failed: %d",
1571 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1574 wl1271_ps_elp_sleep(wl
);
1577 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1578 struct cfg80211_wowlan
*wow
)
1580 struct wl1271
*wl
= hw
->priv
;
1581 struct wl12xx_vif
*wlvif
;
1584 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1587 wl1271_tx_flush(wl
);
1589 mutex_lock(&wl
->mutex
);
1590 wl
->wow_enabled
= true;
1591 wl12xx_for_each_wlvif(wl
, wlvif
) {
1592 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1594 mutex_unlock(&wl
->mutex
);
1595 wl1271_warning("couldn't prepare device to suspend");
1599 mutex_unlock(&wl
->mutex
);
1600 /* flush any remaining work */
1601 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1604 * disable and re-enable interrupts in order to flush
1607 wlcore_disable_interrupts(wl
);
1610 * set suspended flag to avoid triggering a new threaded_irq
1611 * work. no need for spinlock as interrupts are disabled.
1613 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1615 wlcore_enable_interrupts(wl
);
1616 flush_work(&wl
->tx_work
);
1617 flush_delayed_work(&wl
->elp_work
);
1622 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1624 struct wl1271
*wl
= hw
->priv
;
1625 struct wl12xx_vif
*wlvif
;
1626 unsigned long flags
;
1627 bool run_irq_work
= false;
1629 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1631 WARN_ON(!wl
->wow_enabled
);
1634 * re-enable irq_work enqueuing, and call irq_work directly if
1635 * there is a pending work.
1637 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1638 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1639 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1640 run_irq_work
= true;
1641 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1644 wl1271_debug(DEBUG_MAC80211
,
1645 "run postponed irq_work directly");
1647 wlcore_enable_interrupts(wl
);
1650 mutex_lock(&wl
->mutex
);
1651 wl12xx_for_each_wlvif(wl
, wlvif
) {
1652 wl1271_configure_resume(wl
, wlvif
);
1654 wl
->wow_enabled
= false;
1655 mutex_unlock(&wl
->mutex
);
1661 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1663 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1666 * We have to delay the booting of the hardware because
1667 * we need to know the local MAC address before downloading and
1668 * initializing the firmware. The MAC address cannot be changed
1669 * after boot, and without the proper MAC address, the firmware
1670 * will not function properly.
1672 * The MAC address is first known when the corresponding interface
1673 * is added. That is where we will initialize the hardware.
1679 static void wl1271_op_stop(struct ieee80211_hw
*hw
)
1681 struct wl1271
*wl
= hw
->priv
;
1684 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1687 * Interrupts must be disabled before setting the state to OFF.
1688 * Otherwise, the interrupt handler might be called and exit without
1689 * reading the interrupt status.
1691 wlcore_disable_interrupts(wl
);
1692 mutex_lock(&wl
->mutex
);
1693 if (wl
->state
== WL1271_STATE_OFF
) {
1694 mutex_unlock(&wl
->mutex
);
1697 * This will not necessarily enable interrupts as interrupts
1698 * may have been disabled when op_stop was called. It will,
1699 * however, balance the above call to disable_interrupts().
1701 wlcore_enable_interrupts(wl
);
1706 * this must be before the cancel_work calls below, so that the work
1707 * functions don't perform further work.
1709 wl
->state
= WL1271_STATE_OFF
;
1710 mutex_unlock(&wl
->mutex
);
1712 wl1271_flush_deferred_work(wl
);
1713 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1714 cancel_work_sync(&wl
->netstack_work
);
1715 cancel_work_sync(&wl
->tx_work
);
1716 cancel_delayed_work_sync(&wl
->elp_work
);
1717 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1718 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1720 /* let's notify MAC80211 about the remaining pending TX frames */
1721 wl12xx_tx_reset(wl
);
1722 mutex_lock(&wl
->mutex
);
1724 wl1271_power_off(wl
);
1726 wl
->band
= IEEE80211_BAND_2GHZ
;
1729 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1730 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1731 wl
->tx_blocks_available
= 0;
1732 wl
->tx_allocated_blocks
= 0;
1733 wl
->tx_results_count
= 0;
1734 wl
->tx_packets_count
= 0;
1735 wl
->time_offset
= 0;
1736 wl
->ap_fw_ps_map
= 0;
1738 wl
->sched_scanning
= false;
1739 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1740 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1741 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1742 wl
->active_sta_count
= 0;
1744 /* The system link is always allocated */
1745 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1748 * this is performed after the cancel_work calls and the associated
1749 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1750 * get executed before all these vars have been reset.
1754 wl
->tx_blocks_freed
= 0;
1756 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1757 wl
->tx_pkts_freed
[i
] = 0;
1758 wl
->tx_allocated_pkts
[i
] = 0;
1761 wl1271_debugfs_reset(wl
);
1763 kfree(wl
->fw_status_1
);
1764 wl
->fw_status_1
= NULL
;
1765 wl
->fw_status_2
= NULL
;
1766 kfree(wl
->tx_res_if
);
1767 wl
->tx_res_if
= NULL
;
1768 kfree(wl
->target_mem_map
);
1769 wl
->target_mem_map
= NULL
;
1771 mutex_unlock(&wl
->mutex
);
1774 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
1776 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
1777 WL12XX_MAX_RATE_POLICIES
);
1778 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
1781 __set_bit(policy
, wl
->rate_policies_map
);
1786 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
1788 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
1791 __clear_bit(*idx
, wl
->rate_policies_map
);
1792 *idx
= WL12XX_MAX_RATE_POLICIES
;
1795 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1797 switch (wlvif
->bss_type
) {
1798 case BSS_TYPE_AP_BSS
:
1800 return WL1271_ROLE_P2P_GO
;
1802 return WL1271_ROLE_AP
;
1804 case BSS_TYPE_STA_BSS
:
1806 return WL1271_ROLE_P2P_CL
;
1808 return WL1271_ROLE_STA
;
1811 return WL1271_ROLE_IBSS
;
1814 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
1816 return WL12XX_INVALID_ROLE_TYPE
;
1819 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
1821 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
1824 /* clear everything but the persistent data */
1825 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
1827 switch (ieee80211_vif_type_p2p(vif
)) {
1828 case NL80211_IFTYPE_P2P_CLIENT
:
1831 case NL80211_IFTYPE_STATION
:
1832 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
1834 case NL80211_IFTYPE_ADHOC
:
1835 wlvif
->bss_type
= BSS_TYPE_IBSS
;
1837 case NL80211_IFTYPE_P2P_GO
:
1840 case NL80211_IFTYPE_AP
:
1841 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
1844 wlvif
->bss_type
= MAX_BSS_TYPE
;
1848 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
1849 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
1850 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
1852 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
1853 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
1854 /* init sta/ibss data */
1855 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
1856 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
1857 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
1858 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
1859 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
1860 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
1861 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
1864 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
1865 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
1866 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
1867 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
1868 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
1869 wl12xx_allocate_rate_policy(wl
,
1870 &wlvif
->ap
.ucast_rate_idx
[i
]);
1871 wlvif
->basic_rate_set
= CONF_TX_AP_ENABLED_RATES
;
1873 * TODO: check if basic_rate shouldn't be
1874 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
1875 * instead (the same thing for STA above).
1877 wlvif
->basic_rate
= CONF_TX_AP_ENABLED_RATES
;
1878 /* TODO: this seems to be used only for STA, check it */
1879 wlvif
->rate_set
= CONF_TX_AP_ENABLED_RATES
;
1882 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
1883 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
1884 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
1887 * mac80211 configures some values globally, while we treat them
1888 * per-interface. thus, on init, we have to copy them from wl
1890 wlvif
->band
= wl
->band
;
1891 wlvif
->channel
= wl
->channel
;
1892 wlvif
->power_level
= wl
->power_level
;
1893 wlvif
->channel_type
= wl
->channel_type
;
1895 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
1896 wl1271_rx_streaming_enable_work
);
1897 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
1898 wl1271_rx_streaming_disable_work
);
1899 INIT_LIST_HEAD(&wlvif
->list
);
1901 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
1902 (unsigned long) wlvif
);
1906 static bool wl12xx_init_fw(struct wl1271
*wl
)
1908 int retries
= WL1271_BOOT_RETRIES
;
1909 bool booted
= false;
1910 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1915 ret
= wl12xx_chip_wakeup(wl
, false);
1919 ret
= wl
->ops
->boot(wl
);
1923 ret
= wl1271_hw_init(wl
);
1931 mutex_unlock(&wl
->mutex
);
1932 /* Unlocking the mutex in the middle of handling is
1933 inherently unsafe. In this case we deem it safe to do,
1934 because we need to let any possibly pending IRQ out of
1935 the system (and while we are WL1271_STATE_OFF the IRQ
1936 work function will not do anything.) Also, any other
1937 possible concurrent operations will fail due to the
1938 current state, hence the wl1271 struct should be safe. */
1939 wlcore_disable_interrupts(wl
);
1940 wl1271_flush_deferred_work(wl
);
1941 cancel_work_sync(&wl
->netstack_work
);
1942 mutex_lock(&wl
->mutex
);
1944 wl1271_power_off(wl
);
1948 wl1271_error("firmware boot failed despite %d retries",
1949 WL1271_BOOT_RETRIES
);
1953 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
1955 /* update hw/fw version info in wiphy struct */
1956 wiphy
->hw_version
= wl
->chip
.id
;
1957 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1958 sizeof(wiphy
->fw_version
));
1961 * Now we know if 11a is supported (info from the NVS), so disable
1962 * 11a channels if not supported
1964 if (!wl
->enable_11a
)
1965 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
1967 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
1968 wl
->enable_11a
? "" : "not ");
1970 wl
->state
= WL1271_STATE_ON
;
1975 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
1977 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
1981 * Check whether a fw switch (i.e. moving from one loaded
1982 * fw to another) is needed. This function is also responsible
1983 * for updating wl->last_vif_count, so it must be called before
1984 * loading a non-plt fw (so the correct fw (single-role/multi-role)
1987 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
1988 struct vif_counter_data vif_counter_data
,
1991 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
1992 u8 vif_count
= vif_counter_data
.counter
;
1994 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
1997 /* increase the vif count if this is a new vif */
1998 if (add
&& !vif_counter_data
.cur_vif_running
)
2001 wl
->last_vif_count
= vif_count
;
2003 /* no need for fw change if the device is OFF */
2004 if (wl
->state
== WL1271_STATE_OFF
)
2007 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2009 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2016 * Enter "forced psm". Make sure the sta is in psm against the ap,
2017 * to make the fw switch a bit more disconnection-persistent.
2019 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2021 struct wl12xx_vif
*wlvif
;
2023 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2024 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2028 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2029 struct ieee80211_vif
*vif
)
2031 struct wl1271
*wl
= hw
->priv
;
2032 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2033 struct vif_counter_data vif_count
;
2036 bool booted
= false;
2038 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2039 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2041 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2042 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2044 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2046 mutex_lock(&wl
->mutex
);
2047 ret
= wl1271_ps_elp_wakeup(wl
);
2052 * in some very corner case HW recovery scenarios its possible to
2053 * get here before __wl1271_op_remove_interface is complete, so
2054 * opt out if that is the case.
2056 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2057 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2063 ret
= wl12xx_init_vif_data(wl
, vif
);
2068 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2069 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2074 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2075 wl12xx_force_active_psm(wl
);
2076 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2077 mutex_unlock(&wl
->mutex
);
2078 wl1271_recovery_work(&wl
->recovery_work
);
2083 * TODO: after the nvs issue will be solved, move this block
2084 * to start(), and make sure here the driver is ON.
2086 if (wl
->state
== WL1271_STATE_OFF
) {
2088 * we still need this in order to configure the fw
2089 * while uploading the nvs
2091 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2093 booted
= wl12xx_init_fw(wl
);
2100 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2101 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2103 * The device role is a special role used for
2104 * rx and tx frames prior to association (as
2105 * the STA role can get packets only from
2106 * its associated bssid)
2108 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2110 &wlvif
->dev_role_id
);
2115 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2116 role_type
, &wlvif
->role_id
);
2120 ret
= wl1271_init_vif_specific(wl
, vif
);
2124 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2125 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2127 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2132 wl1271_ps_elp_sleep(wl
);
2134 mutex_unlock(&wl
->mutex
);
2139 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2140 struct ieee80211_vif
*vif
,
2141 bool reset_tx_queues
)
2143 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2146 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2148 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2151 /* because of hardware recovery, we may get here twice */
2152 if (wl
->state
!= WL1271_STATE_ON
)
2155 wl1271_info("down");
2157 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2158 wl
->scan_vif
== vif
) {
2160 * Rearm the tx watchdog just before idling scan. This
2161 * prevents just-finished scans from triggering the watchdog
2163 wl12xx_rearm_tx_watchdog_locked(wl
);
2165 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2166 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2167 wl
->scan_vif
= NULL
;
2168 wl
->scan
.req
= NULL
;
2169 ieee80211_scan_completed(wl
->hw
, true);
2172 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2173 /* disable active roles */
2174 ret
= wl1271_ps_elp_wakeup(wl
);
2178 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2179 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2180 if (wl12xx_dev_role_started(wlvif
))
2181 wl12xx_stop_dev(wl
, wlvif
);
2183 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->dev_role_id
);
2188 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2192 wl1271_ps_elp_sleep(wl
);
2195 /* clear all hlids (except system_hlid) */
2196 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2198 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2199 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2200 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2201 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2202 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2203 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2205 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2206 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2207 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2208 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2209 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2210 wl12xx_free_rate_policy(wl
,
2211 &wlvif
->ap
.ucast_rate_idx
[i
]);
2212 wl1271_free_ap_keys(wl
, wlvif
);
2215 dev_kfree_skb(wlvif
->probereq
);
2216 wlvif
->probereq
= NULL
;
2217 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2218 if (wl
->last_wlvif
== wlvif
)
2219 wl
->last_wlvif
= NULL
;
2220 list_del(&wlvif
->list
);
2221 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2222 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2223 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2225 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2230 mutex_unlock(&wl
->mutex
);
2232 del_timer_sync(&wlvif
->rx_streaming_timer
);
2233 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2234 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2236 mutex_lock(&wl
->mutex
);
2239 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2240 struct ieee80211_vif
*vif
)
2242 struct wl1271
*wl
= hw
->priv
;
2243 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2244 struct wl12xx_vif
*iter
;
2245 struct vif_counter_data vif_count
;
2246 bool cancel_recovery
= true;
2248 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2249 mutex_lock(&wl
->mutex
);
2251 if (wl
->state
== WL1271_STATE_OFF
||
2252 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2256 * wl->vif can be null here if someone shuts down the interface
2257 * just when hardware recovery has been started.
2259 wl12xx_for_each_wlvif(wl
, iter
) {
2263 __wl1271_op_remove_interface(wl
, vif
, true);
2266 WARN_ON(iter
!= wlvif
);
2267 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2268 wl12xx_force_active_psm(wl
);
2269 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2270 wl12xx_queue_recovery_work(wl
);
2271 cancel_recovery
= false;
2274 mutex_unlock(&wl
->mutex
);
2275 if (cancel_recovery
)
2276 cancel_work_sync(&wl
->recovery_work
);
2279 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2280 struct ieee80211_vif
*vif
,
2281 enum nl80211_iftype new_type
, bool p2p
)
2283 struct wl1271
*wl
= hw
->priv
;
2286 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2287 wl1271_op_remove_interface(hw
, vif
);
2289 vif
->type
= new_type
;
2291 ret
= wl1271_op_add_interface(hw
, vif
);
2293 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2297 static int wl1271_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2301 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2304 * One of the side effects of the JOIN command is that is clears
2305 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2306 * to a WPA/WPA2 access point will therefore kill the data-path.
2307 * Currently the only valid scenario for JOIN during association
2308 * is on roaming, in which case we will also be given new keys.
2309 * Keep the below message for now, unless it starts bothering
2310 * users who really like to roam a lot :)
2312 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2313 wl1271_info("JOIN while associated.");
2315 /* clear encryption type */
2316 wlvif
->encryption_type
= KEY_NONE
;
2319 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2322 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2324 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2328 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2332 * The join command disable the keep-alive mode, shut down its process,
2333 * and also clear the template config, so we need to reset it all after
2334 * the join. The acx_aid starts the keep-alive process, and the order
2335 * of the commands below is relevant.
2337 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2341 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2345 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2349 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2350 CMD_TEMPL_KLV_IDX_NULL_DATA
,
2351 ACX_KEEP_ALIVE_TPL_VALID
);
2359 static int wl1271_unjoin(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2363 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2364 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2366 wl12xx_cmd_stop_channel_switch(wl
);
2367 ieee80211_chswitch_done(vif
, false);
2370 /* to stop listening to a channel, we disconnect */
2371 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2375 /* reset TX security counters on a clean disconnect */
2376 wlvif
->tx_security_last_seq_lsb
= 0;
2377 wlvif
->tx_security_seq
= 0;
2383 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2385 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2386 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2389 static int wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2393 bool cur_idle
= !test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2395 if (idle
== cur_idle
)
2399 /* no need to croc if we weren't busy (e.g. during boot) */
2400 if (wl12xx_dev_role_started(wlvif
)) {
2401 ret
= wl12xx_stop_dev(wl
, wlvif
);
2406 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
2407 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2410 ret
= wl1271_acx_keep_alive_config(
2411 wl
, wlvif
, CMD_TEMPL_KLV_IDX_NULL_DATA
,
2412 ACX_KEEP_ALIVE_TPL_INVALID
);
2415 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2417 /* The current firmware only supports sched_scan in idle */
2418 if (wl
->sched_scanning
) {
2419 wl1271_scan_sched_scan_stop(wl
);
2420 ieee80211_sched_scan_stopped(wl
->hw
);
2423 ret
= wl12xx_start_dev(wl
, wlvif
);
2426 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2433 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2434 struct ieee80211_conf
*conf
, u32 changed
)
2436 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2439 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2441 /* if the channel changes while joined, join again */
2442 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
&&
2443 ((wlvif
->band
!= conf
->channel
->band
) ||
2444 (wlvif
->channel
!= channel
) ||
2445 (wlvif
->channel_type
!= conf
->channel_type
))) {
2446 /* send all pending packets */
2447 wl1271_tx_work_locked(wl
);
2448 wlvif
->band
= conf
->channel
->band
;
2449 wlvif
->channel
= channel
;
2450 wlvif
->channel_type
= conf
->channel_type
;
2453 ret
= wl1271_init_ap_rates(wl
, wlvif
);
2455 wl1271_error("AP rate policy change failed %d",
2459 * FIXME: the mac80211 should really provide a fixed
2460 * rate to use here. for now, just use the smallest
2461 * possible rate for the band as a fixed rate for
2462 * association frames and other control messages.
2464 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2465 wl1271_set_band_rate(wl
, wlvif
);
2468 wl1271_tx_min_rate_get(wl
,
2469 wlvif
->basic_rate_set
);
2470 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2472 wl1271_warning("rate policy for channel "
2476 * change the ROC channel. do it only if we are
2477 * not idle. otherwise, CROC will be called
2480 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
,
2482 wl12xx_dev_role_started(wlvif
) &&
2483 !(conf
->flags
& IEEE80211_CONF_IDLE
)) {
2484 ret
= wl12xx_stop_dev(wl
, wlvif
);
2488 ret
= wl12xx_start_dev(wl
, wlvif
);
2495 if ((changed
& IEEE80211_CONF_CHANGE_PS
) && !is_ap
) {
2497 if ((conf
->flags
& IEEE80211_CONF_PS
) &&
2498 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
2499 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2504 if (wl
->conf
.conn
.forced_ps
) {
2505 ps_mode
= STATION_POWER_SAVE_MODE
;
2506 ps_mode_str
= "forced";
2508 ps_mode
= STATION_AUTO_PS_MODE
;
2509 ps_mode_str
= "auto";
2512 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
2514 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
2517 wl1271_warning("enter %s ps failed %d",
2520 } else if (!(conf
->flags
& IEEE80211_CONF_PS
) &&
2521 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2523 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
2525 ret
= wl1271_ps_set_mode(wl
, wlvif
,
2526 STATION_ACTIVE_MODE
);
2528 wl1271_warning("exit auto ps failed %d", ret
);
2532 if (conf
->power_level
!= wlvif
->power_level
) {
2533 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2537 wlvif
->power_level
= conf
->power_level
;
2543 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2545 struct wl1271
*wl
= hw
->priv
;
2546 struct wl12xx_vif
*wlvif
;
2547 struct ieee80211_conf
*conf
= &hw
->conf
;
2548 int channel
, ret
= 0;
2550 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2552 wl1271_debug(DEBUG_MAC80211
, "mac80211 config ch %d psm %s power %d %s"
2555 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
2557 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
2561 * mac80211 will go to idle nearly immediately after transmitting some
2562 * frames, such as the deauth. To make sure those frames reach the air,
2563 * wait here until the TX queue is fully flushed.
2565 if ((changed
& IEEE80211_CONF_CHANGE_CHANNEL
) ||
2566 ((changed
& IEEE80211_CONF_CHANGE_IDLE
) &&
2567 (conf
->flags
& IEEE80211_CONF_IDLE
)))
2568 wl1271_tx_flush(wl
);
2570 mutex_lock(&wl
->mutex
);
2572 /* we support configuring the channel and band even while off */
2573 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
) {
2574 wl
->band
= conf
->channel
->band
;
2575 wl
->channel
= channel
;
2576 wl
->channel_type
= conf
->channel_type
;
2579 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
2580 wl
->power_level
= conf
->power_level
;
2582 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2585 ret
= wl1271_ps_elp_wakeup(wl
);
2589 /* configure each interface */
2590 wl12xx_for_each_wlvif(wl
, wlvif
) {
2591 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
2597 wl1271_ps_elp_sleep(wl
);
2600 mutex_unlock(&wl
->mutex
);
2605 struct wl1271_filter_params
{
2608 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
2611 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
2612 struct netdev_hw_addr_list
*mc_list
)
2614 struct wl1271_filter_params
*fp
;
2615 struct netdev_hw_addr
*ha
;
2616 struct wl1271
*wl
= hw
->priv
;
2618 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2621 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
2623 wl1271_error("Out of memory setting filters.");
2627 /* update multicast filtering parameters */
2628 fp
->mc_list_length
= 0;
2629 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
2630 fp
->enabled
= false;
2633 netdev_hw_addr_list_for_each(ha
, mc_list
) {
2634 memcpy(fp
->mc_list
[fp
->mc_list_length
],
2635 ha
->addr
, ETH_ALEN
);
2636 fp
->mc_list_length
++;
2640 return (u64
)(unsigned long)fp
;
2643 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2646 FIF_BCN_PRBRESP_PROMISC | \
2650 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
2651 unsigned int changed
,
2652 unsigned int *total
, u64 multicast
)
2654 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
2655 struct wl1271
*wl
= hw
->priv
;
2656 struct wl12xx_vif
*wlvif
;
2660 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
2661 " total %x", changed
, *total
);
2663 mutex_lock(&wl
->mutex
);
2665 *total
&= WL1271_SUPPORTED_FILTERS
;
2666 changed
&= WL1271_SUPPORTED_FILTERS
;
2668 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2671 ret
= wl1271_ps_elp_wakeup(wl
);
2675 wl12xx_for_each_wlvif(wl
, wlvif
) {
2676 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
2677 if (*total
& FIF_ALLMULTI
)
2678 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2682 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2685 fp
->mc_list_length
);
2692 * the fw doesn't provide an api to configure the filters. instead,
2693 * the filters configuration is based on the active roles / ROC
2698 wl1271_ps_elp_sleep(wl
);
2701 mutex_unlock(&wl
->mutex
);
2705 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2706 u8 id
, u8 key_type
, u8 key_size
,
2707 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
2710 struct wl1271_ap_key
*ap_key
;
2713 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
2715 if (key_size
> MAX_KEY_SIZE
)
2719 * Find next free entry in ap_keys. Also check we are not replacing
2722 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2723 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2726 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
2727 wl1271_warning("trying to record key replacement");
2732 if (i
== MAX_NUM_KEYS
)
2735 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
2740 ap_key
->key_type
= key_type
;
2741 ap_key
->key_size
= key_size
;
2742 memcpy(ap_key
->key
, key
, key_size
);
2743 ap_key
->hlid
= hlid
;
2744 ap_key
->tx_seq_32
= tx_seq_32
;
2745 ap_key
->tx_seq_16
= tx_seq_16
;
2747 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
2751 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2755 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2756 kfree(wlvif
->ap
.recorded_keys
[i
]);
2757 wlvif
->ap
.recorded_keys
[i
] = NULL
;
2761 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2764 struct wl1271_ap_key
*key
;
2765 bool wep_key_added
= false;
2767 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2769 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2772 key
= wlvif
->ap
.recorded_keys
[i
];
2774 if (hlid
== WL12XX_INVALID_LINK_ID
)
2775 hlid
= wlvif
->ap
.bcast_hlid
;
2777 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2778 key
->id
, key
->key_type
,
2779 key
->key_size
, key
->key
,
2780 hlid
, key
->tx_seq_32
,
2785 if (key
->key_type
== KEY_WEP
)
2786 wep_key_added
= true;
2789 if (wep_key_added
) {
2790 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
2791 wlvif
->ap
.bcast_hlid
);
2797 wl1271_free_ap_keys(wl
, wlvif
);
2801 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2802 u16 action
, u8 id
, u8 key_type
,
2803 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
2804 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
2807 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2810 struct wl1271_station
*wl_sta
;
2814 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
2815 hlid
= wl_sta
->hlid
;
2817 hlid
= wlvif
->ap
.bcast_hlid
;
2820 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
2822 * We do not support removing keys after AP shutdown.
2823 * Pretend we do to make mac80211 happy.
2825 if (action
!= KEY_ADD_OR_REPLACE
)
2828 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
2830 key
, hlid
, tx_seq_32
,
2833 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
2834 id
, key_type
, key_size
,
2835 key
, hlid
, tx_seq_32
,
2843 static const u8 bcast_addr
[ETH_ALEN
] = {
2844 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2847 addr
= sta
? sta
->addr
: bcast_addr
;
2849 if (is_zero_ether_addr(addr
)) {
2850 /* We dont support TX only encryption */
2854 /* The wl1271 does not allow to remove unicast keys - they
2855 will be cleared automatically on next CMD_JOIN. Ignore the
2856 request silently, as we dont want the mac80211 to emit
2857 an error message. */
2858 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
2861 /* don't remove key if hlid was already deleted */
2862 if (action
== KEY_REMOVE
&&
2863 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
2866 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
2867 id
, key_type
, key_size
,
2868 key
, addr
, tx_seq_32
,
2873 /* the default WEP key needs to be configured at least once */
2874 if (key_type
== KEY_WEP
) {
2875 ret
= wl12xx_cmd_set_default_wep_key(wl
,
2886 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
2887 struct ieee80211_vif
*vif
,
2888 struct ieee80211_sta
*sta
,
2889 struct ieee80211_key_conf
*key_conf
)
2891 struct wl1271
*wl
= hw
->priv
;
2893 return wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
2896 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
2897 struct ieee80211_vif
*vif
,
2898 struct ieee80211_sta
*sta
,
2899 struct ieee80211_key_conf
*key_conf
)
2901 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2907 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
2909 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
2910 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
2911 key_conf
->cipher
, key_conf
->keyidx
,
2912 key_conf
->keylen
, key_conf
->flags
);
2913 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
2915 mutex_lock(&wl
->mutex
);
2917 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
2922 ret
= wl1271_ps_elp_wakeup(wl
);
2926 switch (key_conf
->cipher
) {
2927 case WLAN_CIPHER_SUITE_WEP40
:
2928 case WLAN_CIPHER_SUITE_WEP104
:
2931 key_conf
->hw_key_idx
= key_conf
->keyidx
;
2933 case WLAN_CIPHER_SUITE_TKIP
:
2934 key_type
= KEY_TKIP
;
2936 key_conf
->hw_key_idx
= key_conf
->keyidx
;
2937 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
2938 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
2940 case WLAN_CIPHER_SUITE_CCMP
:
2943 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
2944 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
2945 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
2947 case WL1271_CIPHER_SUITE_GEM
:
2949 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
2950 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
2953 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
2961 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2962 key_conf
->keyidx
, key_type
,
2963 key_conf
->keylen
, key_conf
->key
,
2964 tx_seq_32
, tx_seq_16
, sta
);
2966 wl1271_error("Could not add or replace key");
2971 * reconfiguring arp response if the unicast (or common)
2972 * encryption key type was changed
2974 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
2975 (sta
|| key_type
== KEY_WEP
) &&
2976 wlvif
->encryption_type
!= key_type
) {
2977 wlvif
->encryption_type
= key_type
;
2978 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
2980 wl1271_warning("build arp rsp failed: %d", ret
);
2987 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
2988 key_conf
->keyidx
, key_type
,
2989 key_conf
->keylen
, key_conf
->key
,
2992 wl1271_error("Could not remove key");
2998 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3004 wl1271_ps_elp_sleep(wl
);
3007 mutex_unlock(&wl
->mutex
);
3011 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3013 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3014 struct ieee80211_vif
*vif
,
3015 struct cfg80211_scan_request
*req
)
3017 struct wl1271
*wl
= hw
->priv
;
3022 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3025 ssid
= req
->ssids
[0].ssid
;
3026 len
= req
->ssids
[0].ssid_len
;
3029 mutex_lock(&wl
->mutex
);
3031 if (wl
->state
== WL1271_STATE_OFF
) {
3033 * We cannot return -EBUSY here because cfg80211 will expect
3034 * a call to ieee80211_scan_completed if we do - in this case
3035 * there won't be any call.
3041 ret
= wl1271_ps_elp_wakeup(wl
);
3045 /* fail if there is any role in ROC */
3046 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3047 /* don't allow scanning right now */
3052 ret
= wl1271_scan(hw
->priv
, vif
, ssid
, len
, req
);
3054 wl1271_ps_elp_sleep(wl
);
3056 mutex_unlock(&wl
->mutex
);
3061 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3062 struct ieee80211_vif
*vif
)
3064 struct wl1271
*wl
= hw
->priv
;
3067 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3069 mutex_lock(&wl
->mutex
);
3071 if (wl
->state
== WL1271_STATE_OFF
)
3074 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3077 ret
= wl1271_ps_elp_wakeup(wl
);
3081 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3082 ret
= wl1271_scan_stop(wl
);
3088 * Rearm the tx watchdog just before idling scan. This
3089 * prevents just-finished scans from triggering the watchdog
3091 wl12xx_rearm_tx_watchdog_locked(wl
);
3093 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3094 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3095 wl
->scan_vif
= NULL
;
3096 wl
->scan
.req
= NULL
;
3097 ieee80211_scan_completed(wl
->hw
, true);
3100 wl1271_ps_elp_sleep(wl
);
3102 mutex_unlock(&wl
->mutex
);
3104 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3107 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3108 struct ieee80211_vif
*vif
,
3109 struct cfg80211_sched_scan_request
*req
,
3110 struct ieee80211_sched_scan_ies
*ies
)
3112 struct wl1271
*wl
= hw
->priv
;
3113 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3116 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3118 mutex_lock(&wl
->mutex
);
3120 if (wl
->state
== WL1271_STATE_OFF
) {
3125 ret
= wl1271_ps_elp_wakeup(wl
);
3129 ret
= wl1271_scan_sched_scan_config(wl
, wlvif
, req
, ies
);
3133 ret
= wl1271_scan_sched_scan_start(wl
, wlvif
);
3137 wl
->sched_scanning
= true;
3140 wl1271_ps_elp_sleep(wl
);
3142 mutex_unlock(&wl
->mutex
);
3146 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3147 struct ieee80211_vif
*vif
)
3149 struct wl1271
*wl
= hw
->priv
;
3152 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3154 mutex_lock(&wl
->mutex
);
3156 if (wl
->state
== WL1271_STATE_OFF
)
3159 ret
= wl1271_ps_elp_wakeup(wl
);
3163 wl1271_scan_sched_scan_stop(wl
);
3165 wl1271_ps_elp_sleep(wl
);
3167 mutex_unlock(&wl
->mutex
);
3170 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3172 struct wl1271
*wl
= hw
->priv
;
3175 mutex_lock(&wl
->mutex
);
3177 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3182 ret
= wl1271_ps_elp_wakeup(wl
);
3186 ret
= wl1271_acx_frag_threshold(wl
, value
);
3188 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3190 wl1271_ps_elp_sleep(wl
);
3193 mutex_unlock(&wl
->mutex
);
3198 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3200 struct wl1271
*wl
= hw
->priv
;
3201 struct wl12xx_vif
*wlvif
;
3204 mutex_lock(&wl
->mutex
);
3206 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3211 ret
= wl1271_ps_elp_wakeup(wl
);
3215 wl12xx_for_each_wlvif(wl
, wlvif
) {
3216 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3218 wl1271_warning("set rts threshold failed: %d", ret
);
3220 wl1271_ps_elp_sleep(wl
);
3223 mutex_unlock(&wl
->mutex
);
3228 static int wl1271_ssid_set(struct ieee80211_vif
*vif
, struct sk_buff
*skb
,
3231 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3233 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
3237 wl1271_error("No SSID in IEs!");
3242 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
3243 wl1271_error("SSID is too long!");
3247 wlvif
->ssid_len
= ssid_len
;
3248 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
3252 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3255 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3256 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3257 skb
->len
- ieoffset
);
3262 memmove(ie
, next
, end
- next
);
3263 skb_trim(skb
, skb
->len
- len
);
3266 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3267 unsigned int oui
, u8 oui_type
,
3271 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3272 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3273 skb
->data
+ ieoffset
,
3274 skb
->len
- ieoffset
);
3279 memmove(ie
, next
, end
- next
);
3280 skb_trim(skb
, skb
->len
- len
);
3283 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3284 struct ieee80211_vif
*vif
)
3286 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3287 struct sk_buff
*skb
;
3290 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3294 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3295 CMD_TEMPL_AP_PROBE_RESPONSE
,
3304 wl1271_debug(DEBUG_AP
, "probe response updated");
3305 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3311 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3312 struct ieee80211_vif
*vif
,
3314 size_t probe_rsp_len
,
3317 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3318 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3319 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3320 int ssid_ie_offset
, ie_offset
, templ_len
;
3323 /* no need to change probe response if the SSID is set correctly */
3324 if (wlvif
->ssid_len
> 0)
3325 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3326 CMD_TEMPL_AP_PROBE_RESPONSE
,
3331 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3332 wl1271_error("probe_rsp template too big");
3336 /* start searching from IE offset */
3337 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3339 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3340 probe_rsp_len
- ie_offset
);
3342 wl1271_error("No SSID in beacon!");
3346 ssid_ie_offset
= ptr
- probe_rsp_data
;
3347 ptr
+= (ptr
[1] + 2);
3349 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3351 /* insert SSID from bss_conf */
3352 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3353 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3354 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3355 bss_conf
->ssid
, bss_conf
->ssid_len
);
3356 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3358 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3359 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3360 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3362 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3363 CMD_TEMPL_AP_PROBE_RESPONSE
,
3369 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3370 struct ieee80211_vif
*vif
,
3371 struct ieee80211_bss_conf
*bss_conf
,
3374 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3377 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3378 if (bss_conf
->use_short_slot
)
3379 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3381 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3383 wl1271_warning("Set slot time failed %d", ret
);
3388 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3389 if (bss_conf
->use_short_preamble
)
3390 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3392 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3395 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3396 if (bss_conf
->use_cts_prot
)
3397 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3400 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3401 CTSPROTECT_DISABLE
);
3403 wl1271_warning("Set ctsprotect failed %d", ret
);
3412 static int wlcore_set_beacon_template(struct wl1271
*wl
,
3413 struct ieee80211_vif
*vif
,
3416 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3417 struct ieee80211_hdr
*hdr
;
3420 int ieoffset
= offsetof(struct ieee80211_mgmt
,
3422 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3430 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3432 ret
= wl1271_ssid_set(vif
, beacon
, ieoffset
);
3434 dev_kfree_skb(beacon
);
3437 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3438 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3440 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3445 dev_kfree_skb(beacon
);
3450 * In case we already have a probe-resp beacon set explicitly
3451 * by usermode, don't use the beacon data.
3453 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3456 /* remove TIM ie from probe response */
3457 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3460 * remove p2p ie from probe response.
3461 * the fw reponds to probe requests that don't include
3462 * the p2p ie. probe requests with p2p ie will be passed,
3463 * and will be responded by the supplicant (the spec
3464 * forbids including the p2p ie when responding to probe
3465 * requests that didn't include it).
3467 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3468 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3470 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3471 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3472 IEEE80211_STYPE_PROBE_RESP
);
3474 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3479 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3480 CMD_TEMPL_PROBE_RESPONSE
,
3485 dev_kfree_skb(beacon
);
3493 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3494 struct ieee80211_vif
*vif
,
3495 struct ieee80211_bss_conf
*bss_conf
,
3498 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3499 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3502 if ((changed
& BSS_CHANGED_BEACON_INT
)) {
3503 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3504 bss_conf
->beacon_int
);
3506 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3509 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3510 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3512 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
3515 if ((changed
& BSS_CHANGED_BEACON
)) {
3516 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
3523 wl1271_error("beacon info change failed: %d", ret
);
3527 /* AP mode changes */
3528 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
3529 struct ieee80211_vif
*vif
,
3530 struct ieee80211_bss_conf
*bss_conf
,
3533 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3536 if ((changed
& BSS_CHANGED_BASIC_RATES
)) {
3537 u32 rates
= bss_conf
->basic_rates
;
3539 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
3541 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
3542 wlvif
->basic_rate_set
);
3544 ret
= wl1271_init_ap_rates(wl
, wlvif
);
3546 wl1271_error("AP rate policy change failed %d", ret
);
3550 ret
= wl1271_ap_init_templates(wl
, vif
);
3554 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
3558 ret
= wlcore_set_beacon_template(wl
, vif
, true);
3563 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
3567 if ((changed
& BSS_CHANGED_BEACON_ENABLED
)) {
3568 if (bss_conf
->enable_beacon
) {
3569 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3570 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
3574 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
3578 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3579 wl1271_debug(DEBUG_AP
, "started AP");
3582 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3583 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
3587 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3588 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
3590 wl1271_debug(DEBUG_AP
, "stopped AP");
3595 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3599 /* Handle HT information change */
3600 if ((changed
& BSS_CHANGED_HT
) &&
3601 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3602 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3603 bss_conf
->ht_operation_mode
);
3605 wl1271_warning("Set ht information failed %d", ret
);
3614 /* STA/IBSS mode changes */
3615 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
3616 struct ieee80211_vif
*vif
,
3617 struct ieee80211_bss_conf
*bss_conf
,
3620 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3621 bool do_join
= false, set_assoc
= false;
3622 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
3623 bool ibss_joined
= false;
3624 u32 sta_rate_set
= 0;
3626 struct ieee80211_sta
*sta
;
3627 bool sta_exists
= false;
3628 struct ieee80211_sta_ht_cap sta_ht_cap
;
3631 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
3637 if (changed
& BSS_CHANGED_IBSS
) {
3638 if (bss_conf
->ibss_joined
) {
3639 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
3642 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
,
3644 wl1271_unjoin(wl
, wlvif
);
3648 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
3651 /* Need to update the SSID (for filtering etc) */
3652 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
3655 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
3656 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
3657 bss_conf
->enable_beacon
? "enabled" : "disabled");
3662 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
) {
3663 ret
= wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
3665 wl1271_warning("idle mode change failed %d", ret
);
3668 if ((changed
& BSS_CHANGED_CQM
)) {
3669 bool enable
= false;
3670 if (bss_conf
->cqm_rssi_thold
)
3672 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
3673 bss_conf
->cqm_rssi_thold
,
3674 bss_conf
->cqm_rssi_hyst
);
3677 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
3680 if (changed
& BSS_CHANGED_BSSID
)
3681 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
3682 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
3686 ret
= wl1271_build_qos_null_data(wl
, vif
);
3691 if (changed
& (BSS_CHANGED_ASSOC
| BSS_CHANGED_HT
)) {
3693 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
3697 /* save the supp_rates of the ap */
3698 sta_rate_set
= sta
->supp_rates
[wl
->hw
->conf
.channel
->band
];
3699 if (sta
->ht_cap
.ht_supported
)
3701 (sta
->ht_cap
.mcs
.rx_mask
[0] << HW_HT_RATES_OFFSET
) |
3702 (sta
->ht_cap
.mcs
.rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
3703 sta_ht_cap
= sta
->ht_cap
;
3710 if ((changed
& BSS_CHANGED_ASSOC
)) {
3711 if (bss_conf
->assoc
) {
3714 wlvif
->aid
= bss_conf
->aid
;
3715 wlvif
->channel_type
= bss_conf
->channel_type
;
3716 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3721 * use basic rates from AP, and determine lowest rate
3722 * to use with control frames.
3724 rates
= bss_conf
->basic_rates
;
3725 wlvif
->basic_rate_set
=
3726 wl1271_tx_enabled_rates_get(wl
, rates
,
3729 wl1271_tx_min_rate_get(wl
,
3730 wlvif
->basic_rate_set
);
3733 wl1271_tx_enabled_rates_get(wl
,
3736 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3741 * with wl1271, we don't need to update the
3742 * beacon_int and dtim_period, because the firmware
3743 * updates it by itself when the first beacon is
3744 * received after a join.
3746 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
3751 * Get a template for hardware connection maintenance
3753 dev_kfree_skb(wlvif
->probereq
);
3754 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
3757 ieoffset
= offsetof(struct ieee80211_mgmt
,
3758 u
.probe_req
.variable
);
3759 wl1271_ssid_set(vif
, wlvif
->probereq
, ieoffset
);
3761 /* enable the connection monitoring feature */
3762 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
3766 /* use defaults when not associated */
3768 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
,
3771 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT
,
3775 /* free probe-request template */
3776 dev_kfree_skb(wlvif
->probereq
);
3777 wlvif
->probereq
= NULL
;
3779 /* revert back to minimum rates for the current band */
3780 wl1271_set_band_rate(wl
, wlvif
);
3782 wl1271_tx_min_rate_get(wl
,
3783 wlvif
->basic_rate_set
);
3784 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3788 /* disable connection monitor features */
3789 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
3791 /* Disable the keep-alive feature */
3792 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
3796 /* restore the bssid filter and go to dummy bssid */
3799 * we might have to disable roc, if there was
3800 * no IF_OPER_UP notification.
3803 ret
= wl12xx_croc(wl
, wlvif
->role_id
);
3808 * (we also need to disable roc in case of
3809 * roaming on the same channel. until we will
3810 * have a better flow...)
3812 if (test_bit(wlvif
->dev_role_id
, wl
->roc_map
)) {
3813 ret
= wl12xx_croc(wl
,
3814 wlvif
->dev_role_id
);
3819 wl1271_unjoin(wl
, wlvif
);
3820 if (!bss_conf
->idle
)
3821 wl12xx_start_dev(wl
, wlvif
);
3826 if (changed
& BSS_CHANGED_IBSS
) {
3827 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
3828 bss_conf
->ibss_joined
);
3830 if (bss_conf
->ibss_joined
) {
3831 u32 rates
= bss_conf
->basic_rates
;
3832 wlvif
->basic_rate_set
=
3833 wl1271_tx_enabled_rates_get(wl
, rates
,
3836 wl1271_tx_min_rate_get(wl
,
3837 wlvif
->basic_rate_set
);
3839 /* by default, use 11b + OFDM rates */
3840 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
3841 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3847 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3852 ret
= wl1271_join(wl
, wlvif
, set_assoc
);
3854 wl1271_warning("cmd join failed %d", ret
);
3858 /* ROC until connected (after EAPOL exchange) */
3860 ret
= wl12xx_roc(wl
, wlvif
, wlvif
->role_id
);
3864 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
3865 wl12xx_set_authorized(wl
, wlvif
);
3868 * stop device role if started (we might already be in
3871 if (wl12xx_dev_role_started(wlvif
)) {
3872 ret
= wl12xx_stop_dev(wl
, wlvif
);
3878 /* Handle new association with HT. Do this after join. */
3880 if ((changed
& BSS_CHANGED_HT
) &&
3881 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3882 ret
= wl1271_acx_set_ht_capabilities(wl
,
3887 wl1271_warning("Set ht cap true failed %d",
3892 /* handle new association without HT and disassociation */
3893 else if (changed
& BSS_CHANGED_ASSOC
) {
3894 ret
= wl1271_acx_set_ht_capabilities(wl
,
3899 wl1271_warning("Set ht cap false failed %d",
3906 /* Handle HT information change. Done after join. */
3907 if ((changed
& BSS_CHANGED_HT
) &&
3908 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3909 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3910 bss_conf
->ht_operation_mode
);
3912 wl1271_warning("Set ht information failed %d", ret
);
3917 /* Handle arp filtering. Done after join. */
3918 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
3919 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
3920 __be32 addr
= bss_conf
->arp_addr_list
[0];
3921 wlvif
->sta
.qos
= bss_conf
->qos
;
3922 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
3924 if (bss_conf
->arp_addr_cnt
== 1 &&
3925 bss_conf
->arp_filter_enabled
) {
3926 wlvif
->ip_addr
= addr
;
3928 * The template should have been configured only upon
3929 * association. however, it seems that the correct ip
3930 * isn't being set (when sending), so we have to
3931 * reconfigure the template upon every ip change.
3933 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3935 wl1271_warning("build arp rsp failed: %d", ret
);
3939 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
3940 (ACX_ARP_FILTER_ARP_FILTERING
|
3941 ACX_ARP_FILTER_AUTO_ARP
),
3945 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
3956 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
3957 struct ieee80211_vif
*vif
,
3958 struct ieee80211_bss_conf
*bss_conf
,
3961 struct wl1271
*wl
= hw
->priv
;
3962 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3963 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3966 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info changed 0x%x",
3970 * make sure to cancel pending disconnections if our association
3973 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
3974 cancel_delayed_work_sync(&wl
->connection_loss_work
);
3976 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
3977 !bss_conf
->enable_beacon
)
3978 wl1271_tx_flush(wl
);
3980 mutex_lock(&wl
->mutex
);
3982 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
3985 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
3988 ret
= wl1271_ps_elp_wakeup(wl
);
3993 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
3995 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
3997 wl1271_ps_elp_sleep(wl
);
4000 mutex_unlock(&wl
->mutex
);
4003 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4004 struct ieee80211_vif
*vif
, u16 queue
,
4005 const struct ieee80211_tx_queue_params
*params
)
4007 struct wl1271
*wl
= hw
->priv
;
4008 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4012 mutex_lock(&wl
->mutex
);
4014 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4017 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4019 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4021 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4024 ret
= wl1271_ps_elp_wakeup(wl
);
4029 * the txop is confed in units of 32us by the mac80211,
4032 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4033 params
->cw_min
, params
->cw_max
,
4034 params
->aifs
, params
->txop
<< 5);
4038 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4039 CONF_CHANNEL_TYPE_EDCF
,
4040 wl1271_tx_get_queue(queue
),
4041 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4045 wl1271_ps_elp_sleep(wl
);
4048 mutex_unlock(&wl
->mutex
);
4053 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4054 struct ieee80211_vif
*vif
)
4057 struct wl1271
*wl
= hw
->priv
;
4058 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4059 u64 mactime
= ULLONG_MAX
;
4062 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4064 mutex_lock(&wl
->mutex
);
4066 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4069 ret
= wl1271_ps_elp_wakeup(wl
);
4073 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4078 wl1271_ps_elp_sleep(wl
);
4081 mutex_unlock(&wl
->mutex
);
4085 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4086 struct survey_info
*survey
)
4088 struct wl1271
*wl
= hw
->priv
;
4089 struct ieee80211_conf
*conf
= &hw
->conf
;
4094 survey
->channel
= conf
->channel
;
4095 survey
->filled
= SURVEY_INFO_NOISE_DBM
;
4096 survey
->noise
= wl
->noise
;
4101 static int wl1271_allocate_sta(struct wl1271
*wl
,
4102 struct wl12xx_vif
*wlvif
,
4103 struct ieee80211_sta
*sta
)
4105 struct wl1271_station
*wl_sta
;
4109 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4110 wl1271_warning("could not allocate HLID - too much stations");
4114 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4115 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4117 wl1271_warning("could not allocate HLID - too many links");
4121 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4122 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4123 wl
->active_sta_count
++;
4127 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4129 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4132 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4133 memset(wl
->links
[hlid
].addr
, 0, ETH_ALEN
);
4134 wl
->links
[hlid
].ba_bitmap
= 0;
4135 __clear_bit(hlid
, &wl
->ap_ps_map
);
4136 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4137 wl12xx_free_link(wl
, wlvif
, &hlid
);
4138 wl
->active_sta_count
--;
4141 * rearm the tx watchdog when the last STA is freed - give the FW a
4142 * chance to return STA-buffered packets before complaining.
4144 if (wl
->active_sta_count
== 0)
4145 wl12xx_rearm_tx_watchdog_locked(wl
);
4148 static int wl12xx_sta_add(struct wl1271
*wl
,
4149 struct wl12xx_vif
*wlvif
,
4150 struct ieee80211_sta
*sta
)
4152 struct wl1271_station
*wl_sta
;
4156 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4158 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4162 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4163 hlid
= wl_sta
->hlid
;
4165 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4167 wl1271_free_sta(wl
, wlvif
, hlid
);
4172 static int wl12xx_sta_remove(struct wl1271
*wl
,
4173 struct wl12xx_vif
*wlvif
,
4174 struct ieee80211_sta
*sta
)
4176 struct wl1271_station
*wl_sta
;
4179 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4181 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4183 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4186 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4190 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4194 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4195 struct wl12xx_vif
*wlvif
,
4196 struct ieee80211_sta
*sta
,
4197 enum ieee80211_sta_state old_state
,
4198 enum ieee80211_sta_state new_state
)
4200 struct wl1271_station
*wl_sta
;
4202 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4203 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4206 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4207 hlid
= wl_sta
->hlid
;
4209 /* Add station (AP mode) */
4211 old_state
== IEEE80211_STA_NOTEXIST
&&
4212 new_state
== IEEE80211_STA_NONE
)
4213 return wl12xx_sta_add(wl
, wlvif
, sta
);
4215 /* Remove station (AP mode) */
4217 old_state
== IEEE80211_STA_NONE
&&
4218 new_state
== IEEE80211_STA_NOTEXIST
) {
4220 wl12xx_sta_remove(wl
, wlvif
, sta
);
4224 /* Authorize station (AP mode) */
4226 new_state
== IEEE80211_STA_AUTHORIZED
) {
4227 ret
= wl12xx_cmd_set_peer_state(wl
, hlid
);
4231 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4236 /* Authorize station */
4238 new_state
== IEEE80211_STA_AUTHORIZED
) {
4239 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4240 return wl12xx_set_authorized(wl
, wlvif
);
4244 old_state
== IEEE80211_STA_AUTHORIZED
&&
4245 new_state
== IEEE80211_STA_ASSOC
) {
4246 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4253 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4254 struct ieee80211_vif
*vif
,
4255 struct ieee80211_sta
*sta
,
4256 enum ieee80211_sta_state old_state
,
4257 enum ieee80211_sta_state new_state
)
4259 struct wl1271
*wl
= hw
->priv
;
4260 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4263 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4264 sta
->aid
, old_state
, new_state
);
4266 mutex_lock(&wl
->mutex
);
4268 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4273 ret
= wl1271_ps_elp_wakeup(wl
);
4277 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4279 wl1271_ps_elp_sleep(wl
);
4281 mutex_unlock(&wl
->mutex
);
4282 if (new_state
< old_state
)
4287 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4288 struct ieee80211_vif
*vif
,
4289 enum ieee80211_ampdu_mlme_action action
,
4290 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4293 struct wl1271
*wl
= hw
->priv
;
4294 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4296 u8 hlid
, *ba_bitmap
;
4298 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4301 /* sanity check - the fields in FW are only 8bits wide */
4302 if (WARN_ON(tid
> 0xFF))
4305 mutex_lock(&wl
->mutex
);
4307 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4312 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4313 hlid
= wlvif
->sta
.hlid
;
4314 ba_bitmap
= &wlvif
->sta
.ba_rx_bitmap
;
4315 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4316 struct wl1271_station
*wl_sta
;
4318 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4319 hlid
= wl_sta
->hlid
;
4320 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4326 ret
= wl1271_ps_elp_wakeup(wl
);
4330 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4334 case IEEE80211_AMPDU_RX_START
:
4335 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4340 if (wl
->ba_rx_session_count
>= RX_BA_MAX_SESSIONS
) {
4342 wl1271_error("exceeded max RX BA sessions");
4346 if (*ba_bitmap
& BIT(tid
)) {
4348 wl1271_error("cannot enable RX BA session on active "
4353 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
4356 *ba_bitmap
|= BIT(tid
);
4357 wl
->ba_rx_session_count
++;
4361 case IEEE80211_AMPDU_RX_STOP
:
4362 if (!(*ba_bitmap
& BIT(tid
))) {
4364 wl1271_error("no active RX BA session on tid: %d",
4369 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
4372 *ba_bitmap
&= ~BIT(tid
);
4373 wl
->ba_rx_session_count
--;
4378 * The BA initiator session management in FW independently.
4379 * Falling break here on purpose for all TX APDU commands.
4381 case IEEE80211_AMPDU_TX_START
:
4382 case IEEE80211_AMPDU_TX_STOP
:
4383 case IEEE80211_AMPDU_TX_OPERATIONAL
:
4388 wl1271_error("Incorrect ampdu action id=%x\n", action
);
4392 wl1271_ps_elp_sleep(wl
);
4395 mutex_unlock(&wl
->mutex
);
4400 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
4401 struct ieee80211_vif
*vif
,
4402 const struct cfg80211_bitrate_mask
*mask
)
4404 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4405 struct wl1271
*wl
= hw
->priv
;
4408 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
4409 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
4410 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
4412 mutex_lock(&wl
->mutex
);
4414 for (i
= 0; i
< IEEE80211_NUM_BANDS
; i
++)
4415 wlvif
->bitrate_masks
[i
] =
4416 wl1271_tx_enabled_rates_get(wl
,
4417 mask
->control
[i
].legacy
,
4420 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4423 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4424 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
4426 ret
= wl1271_ps_elp_wakeup(wl
);
4430 wl1271_set_band_rate(wl
, wlvif
);
4432 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4433 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4435 wl1271_ps_elp_sleep(wl
);
4438 mutex_unlock(&wl
->mutex
);
4443 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
4444 struct ieee80211_channel_switch
*ch_switch
)
4446 struct wl1271
*wl
= hw
->priv
;
4447 struct wl12xx_vif
*wlvif
;
4450 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
4452 wl1271_tx_flush(wl
);
4454 mutex_lock(&wl
->mutex
);
4456 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4457 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4458 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4459 ieee80211_chswitch_done(vif
, false);
4464 ret
= wl1271_ps_elp_wakeup(wl
);
4468 /* TODO: change mac80211 to pass vif as param */
4469 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4470 ret
= wl12xx_cmd_channel_switch(wl
, wlvif
, ch_switch
);
4473 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
4476 wl1271_ps_elp_sleep(wl
);
4479 mutex_unlock(&wl
->mutex
);
4482 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
4484 struct wl1271
*wl
= hw
->priv
;
4487 mutex_lock(&wl
->mutex
);
4489 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4492 /* packets are considered pending if in the TX queue or the FW */
4493 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
4495 mutex_unlock(&wl
->mutex
);
4500 /* can't be const, mac80211 writes to this */
4501 static struct ieee80211_rate wl1271_rates
[] = {
4503 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
4504 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
4506 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
4507 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
4508 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4510 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
4511 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
4512 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4514 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
4515 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
4516 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4518 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4519 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4521 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4522 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4524 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4525 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4527 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4528 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4530 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4531 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4533 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4534 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4536 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4537 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4539 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4540 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4543 /* can't be const, mac80211 writes to this */
4544 static struct ieee80211_channel wl1271_channels
[] = {
4545 { .hw_value
= 1, .center_freq
= 2412, .max_power
= 25 },
4546 { .hw_value
= 2, .center_freq
= 2417, .max_power
= 25 },
4547 { .hw_value
= 3, .center_freq
= 2422, .max_power
= 25 },
4548 { .hw_value
= 4, .center_freq
= 2427, .max_power
= 25 },
4549 { .hw_value
= 5, .center_freq
= 2432, .max_power
= 25 },
4550 { .hw_value
= 6, .center_freq
= 2437, .max_power
= 25 },
4551 { .hw_value
= 7, .center_freq
= 2442, .max_power
= 25 },
4552 { .hw_value
= 8, .center_freq
= 2447, .max_power
= 25 },
4553 { .hw_value
= 9, .center_freq
= 2452, .max_power
= 25 },
4554 { .hw_value
= 10, .center_freq
= 2457, .max_power
= 25 },
4555 { .hw_value
= 11, .center_freq
= 2462, .max_power
= 25 },
4556 { .hw_value
= 12, .center_freq
= 2467, .max_power
= 25 },
4557 { .hw_value
= 13, .center_freq
= 2472, .max_power
= 25 },
4558 { .hw_value
= 14, .center_freq
= 2484, .max_power
= 25 },
4561 /* can't be const, mac80211 writes to this */
4562 static struct ieee80211_supported_band wl1271_band_2ghz
= {
4563 .channels
= wl1271_channels
,
4564 .n_channels
= ARRAY_SIZE(wl1271_channels
),
4565 .bitrates
= wl1271_rates
,
4566 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
4569 /* 5 GHz data rates for WL1273 */
4570 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
4572 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4573 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4575 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4576 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4578 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4579 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4581 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4582 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4584 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4585 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4587 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4588 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4590 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4591 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4593 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4594 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4597 /* 5 GHz band channels for WL1273 */
4598 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
4599 { .hw_value
= 7, .center_freq
= 5035, .max_power
= 25 },
4600 { .hw_value
= 8, .center_freq
= 5040, .max_power
= 25 },
4601 { .hw_value
= 9, .center_freq
= 5045, .max_power
= 25 },
4602 { .hw_value
= 11, .center_freq
= 5055, .max_power
= 25 },
4603 { .hw_value
= 12, .center_freq
= 5060, .max_power
= 25 },
4604 { .hw_value
= 16, .center_freq
= 5080, .max_power
= 25 },
4605 { .hw_value
= 34, .center_freq
= 5170, .max_power
= 25 },
4606 { .hw_value
= 36, .center_freq
= 5180, .max_power
= 25 },
4607 { .hw_value
= 38, .center_freq
= 5190, .max_power
= 25 },
4608 { .hw_value
= 40, .center_freq
= 5200, .max_power
= 25 },
4609 { .hw_value
= 42, .center_freq
= 5210, .max_power
= 25 },
4610 { .hw_value
= 44, .center_freq
= 5220, .max_power
= 25 },
4611 { .hw_value
= 46, .center_freq
= 5230, .max_power
= 25 },
4612 { .hw_value
= 48, .center_freq
= 5240, .max_power
= 25 },
4613 { .hw_value
= 52, .center_freq
= 5260, .max_power
= 25 },
4614 { .hw_value
= 56, .center_freq
= 5280, .max_power
= 25 },
4615 { .hw_value
= 60, .center_freq
= 5300, .max_power
= 25 },
4616 { .hw_value
= 64, .center_freq
= 5320, .max_power
= 25 },
4617 { .hw_value
= 100, .center_freq
= 5500, .max_power
= 25 },
4618 { .hw_value
= 104, .center_freq
= 5520, .max_power
= 25 },
4619 { .hw_value
= 108, .center_freq
= 5540, .max_power
= 25 },
4620 { .hw_value
= 112, .center_freq
= 5560, .max_power
= 25 },
4621 { .hw_value
= 116, .center_freq
= 5580, .max_power
= 25 },
4622 { .hw_value
= 120, .center_freq
= 5600, .max_power
= 25 },
4623 { .hw_value
= 124, .center_freq
= 5620, .max_power
= 25 },
4624 { .hw_value
= 128, .center_freq
= 5640, .max_power
= 25 },
4625 { .hw_value
= 132, .center_freq
= 5660, .max_power
= 25 },
4626 { .hw_value
= 136, .center_freq
= 5680, .max_power
= 25 },
4627 { .hw_value
= 140, .center_freq
= 5700, .max_power
= 25 },
4628 { .hw_value
= 149, .center_freq
= 5745, .max_power
= 25 },
4629 { .hw_value
= 153, .center_freq
= 5765, .max_power
= 25 },
4630 { .hw_value
= 157, .center_freq
= 5785, .max_power
= 25 },
4631 { .hw_value
= 161, .center_freq
= 5805, .max_power
= 25 },
4632 { .hw_value
= 165, .center_freq
= 5825, .max_power
= 25 },
4635 static struct ieee80211_supported_band wl1271_band_5ghz
= {
4636 .channels
= wl1271_channels_5ghz
,
4637 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
4638 .bitrates
= wl1271_rates_5ghz
,
4639 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
4642 static const struct ieee80211_ops wl1271_ops
= {
4643 .start
= wl1271_op_start
,
4644 .stop
= wl1271_op_stop
,
4645 .add_interface
= wl1271_op_add_interface
,
4646 .remove_interface
= wl1271_op_remove_interface
,
4647 .change_interface
= wl12xx_op_change_interface
,
4649 .suspend
= wl1271_op_suspend
,
4650 .resume
= wl1271_op_resume
,
4652 .config
= wl1271_op_config
,
4653 .prepare_multicast
= wl1271_op_prepare_multicast
,
4654 .configure_filter
= wl1271_op_configure_filter
,
4656 .set_key
= wlcore_op_set_key
,
4657 .hw_scan
= wl1271_op_hw_scan
,
4658 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
4659 .sched_scan_start
= wl1271_op_sched_scan_start
,
4660 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
4661 .bss_info_changed
= wl1271_op_bss_info_changed
,
4662 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
4663 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
4664 .conf_tx
= wl1271_op_conf_tx
,
4665 .get_tsf
= wl1271_op_get_tsf
,
4666 .get_survey
= wl1271_op_get_survey
,
4667 .sta_state
= wl12xx_op_sta_state
,
4668 .ampdu_action
= wl1271_op_ampdu_action
,
4669 .tx_frames_pending
= wl1271_tx_frames_pending
,
4670 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
4671 .channel_switch
= wl12xx_op_channel_switch
,
4672 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
4676 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
4682 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
4683 wl1271_error("Illegal RX rate from HW: %d", rate
);
4687 idx
= wl
->band_rate_to_idx
[band
][rate
];
4688 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
4689 wl1271_error("Unsupported RX rate from HW: %d", rate
);
4696 static ssize_t
wl1271_sysfs_show_bt_coex_state(struct device
*dev
,
4697 struct device_attribute
*attr
,
4700 struct wl1271
*wl
= dev_get_drvdata(dev
);
4705 mutex_lock(&wl
->mutex
);
4706 len
= snprintf(buf
, len
, "%d\n\n0 - off\n1 - on\n",
4708 mutex_unlock(&wl
->mutex
);
4714 static ssize_t
wl1271_sysfs_store_bt_coex_state(struct device
*dev
,
4715 struct device_attribute
*attr
,
4716 const char *buf
, size_t count
)
4718 struct wl1271
*wl
= dev_get_drvdata(dev
);
4722 ret
= kstrtoul(buf
, 10, &res
);
4724 wl1271_warning("incorrect value written to bt_coex_mode");
4728 mutex_lock(&wl
->mutex
);
4732 if (res
== wl
->sg_enabled
)
4735 wl
->sg_enabled
= res
;
4737 if (wl
->state
== WL1271_STATE_OFF
)
4740 ret
= wl1271_ps_elp_wakeup(wl
);
4744 wl1271_acx_sg_enable(wl
, wl
->sg_enabled
);
4745 wl1271_ps_elp_sleep(wl
);
4748 mutex_unlock(&wl
->mutex
);
4752 static DEVICE_ATTR(bt_coex_state
, S_IRUGO
| S_IWUSR
,
4753 wl1271_sysfs_show_bt_coex_state
,
4754 wl1271_sysfs_store_bt_coex_state
);
4756 static ssize_t
wl1271_sysfs_show_hw_pg_ver(struct device
*dev
,
4757 struct device_attribute
*attr
,
4760 struct wl1271
*wl
= dev_get_drvdata(dev
);
4765 mutex_lock(&wl
->mutex
);
4766 if (wl
->hw_pg_ver
>= 0)
4767 len
= snprintf(buf
, len
, "%d\n", wl
->hw_pg_ver
);
4769 len
= snprintf(buf
, len
, "n/a\n");
4770 mutex_unlock(&wl
->mutex
);
4775 static DEVICE_ATTR(hw_pg_ver
, S_IRUGO
,
4776 wl1271_sysfs_show_hw_pg_ver
, NULL
);
4778 static ssize_t
wl1271_sysfs_read_fwlog(struct file
*filp
, struct kobject
*kobj
,
4779 struct bin_attribute
*bin_attr
,
4780 char *buffer
, loff_t pos
, size_t count
)
4782 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
4783 struct wl1271
*wl
= dev_get_drvdata(dev
);
4787 ret
= mutex_lock_interruptible(&wl
->mutex
);
4789 return -ERESTARTSYS
;
4791 /* Let only one thread read the log at a time, blocking others */
4792 while (wl
->fwlog_size
== 0) {
4795 prepare_to_wait_exclusive(&wl
->fwlog_waitq
,
4797 TASK_INTERRUPTIBLE
);
4799 if (wl
->fwlog_size
!= 0) {
4800 finish_wait(&wl
->fwlog_waitq
, &wait
);
4804 mutex_unlock(&wl
->mutex
);
4807 finish_wait(&wl
->fwlog_waitq
, &wait
);
4809 if (signal_pending(current
))
4810 return -ERESTARTSYS
;
4812 ret
= mutex_lock_interruptible(&wl
->mutex
);
4814 return -ERESTARTSYS
;
4817 /* Check if the fwlog is still valid */
4818 if (wl
->fwlog_size
< 0) {
4819 mutex_unlock(&wl
->mutex
);
4823 /* Seeking is not supported - old logs are not kept. Disregard pos. */
4824 len
= min(count
, (size_t)wl
->fwlog_size
);
4825 wl
->fwlog_size
-= len
;
4826 memcpy(buffer
, wl
->fwlog
, len
);
4828 /* Make room for new messages */
4829 memmove(wl
->fwlog
, wl
->fwlog
+ len
, wl
->fwlog_size
);
4831 mutex_unlock(&wl
->mutex
);
4836 static struct bin_attribute fwlog_attr
= {
4837 .attr
= {.name
= "fwlog", .mode
= S_IRUSR
},
4838 .read
= wl1271_sysfs_read_fwlog
,
4841 static void wl1271_connection_loss_work(struct work_struct
*work
)
4843 struct delayed_work
*dwork
;
4845 struct ieee80211_vif
*vif
;
4846 struct wl12xx_vif
*wlvif
;
4848 dwork
= container_of(work
, struct delayed_work
, work
);
4849 wl
= container_of(dwork
, struct wl1271
, connection_loss_work
);
4851 wl1271_info("Connection loss work.");
4853 mutex_lock(&wl
->mutex
);
4855 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4858 /* Call mac80211 connection loss */
4859 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4860 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
4862 vif
= wl12xx_wlvif_to_vif(wlvif
);
4863 ieee80211_connection_loss(vif
);
4866 mutex_unlock(&wl
->mutex
);
4869 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
,
4870 u32 oui
, u32 nic
, int n
)
4874 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x, n %d",
4877 if (nic
+ n
- 1 > 0xffffff)
4878 wl1271_warning("NIC part of the MAC address wraps around!");
4880 for (i
= 0; i
< n
; i
++) {
4881 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
4882 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
4883 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
4884 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
4885 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
4886 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
4890 wl
->hw
->wiphy
->n_addresses
= n
;
4891 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
4894 static int wl12xx_get_hw_info(struct wl1271
*wl
)
4898 ret
= wl12xx_set_power_on(wl
);
4902 wl
->chip
.id
= wlcore_read_reg(wl
, REG_CHIP_ID_B
);
4904 wl
->fuse_oui_addr
= 0;
4905 wl
->fuse_nic_addr
= 0;
4907 wl
->hw_pg_ver
= wl
->ops
->get_pg_ver(wl
);
4909 if (wl
->ops
->get_mac
)
4910 wl
->ops
->get_mac(wl
);
4912 wl1271_power_off(wl
);
4917 static int wl1271_register_hw(struct wl1271
*wl
)
4920 u32 oui_addr
= 0, nic_addr
= 0;
4922 if (wl
->mac80211_registered
)
4925 ret
= wl1271_fetch_nvs(wl
);
4927 /* NOTE: The wl->nvs->nvs element must be first, in
4928 * order to simplify the casting, we assume it is at
4929 * the beginning of the wl->nvs structure.
4931 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
4934 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
4936 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
4939 /* if the MAC address is zeroed in the NVS derive from fuse */
4940 if (oui_addr
== 0 && nic_addr
== 0) {
4941 oui_addr
= wl
->fuse_oui_addr
;
4942 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
4943 nic_addr
= wl
->fuse_nic_addr
+ 1;
4946 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
, 2);
4948 ret
= ieee80211_register_hw(wl
->hw
);
4950 wl1271_error("unable to register mac80211 hw: %d", ret
);
4954 wl
->mac80211_registered
= true;
4956 wl1271_debugfs_init(wl
);
4958 wl1271_notice("loaded");
4964 static void wl1271_unregister_hw(struct wl1271
*wl
)
4967 wl1271_plt_stop(wl
);
4969 ieee80211_unregister_hw(wl
->hw
);
4970 wl
->mac80211_registered
= false;
4974 static int wl1271_init_ieee80211(struct wl1271
*wl
)
4976 static const u32 cipher_suites
[] = {
4977 WLAN_CIPHER_SUITE_WEP40
,
4978 WLAN_CIPHER_SUITE_WEP104
,
4979 WLAN_CIPHER_SUITE_TKIP
,
4980 WLAN_CIPHER_SUITE_CCMP
,
4981 WL1271_CIPHER_SUITE_GEM
,
4984 /* The tx descriptor buffer */
4985 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
4987 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
4988 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
4991 /* FIXME: find a proper value */
4992 wl
->hw
->channel_change_time
= 10000;
4993 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
4995 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
4996 IEEE80211_HW_SUPPORTS_PS
|
4997 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
4998 IEEE80211_HW_SUPPORTS_UAPSD
|
4999 IEEE80211_HW_HAS_RATE_CONTROL
|
5000 IEEE80211_HW_CONNECTION_MONITOR
|
5001 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5002 IEEE80211_HW_SPECTRUM_MGMT
|
5003 IEEE80211_HW_AP_LINK_PS
|
5004 IEEE80211_HW_AMPDU_AGGREGATION
|
5005 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5006 IEEE80211_HW_SCAN_WHILE_IDLE
;
5008 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5009 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5011 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5012 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5013 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5014 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5015 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5016 wl
->hw
->wiphy
->max_match_sets
= 16;
5018 * Maximum length of elements in scanning probe request templates
5019 * should be the maximum length possible for a template, without
5020 * the IEEE80211 header of the template
5022 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5023 sizeof(struct ieee80211_header
);
5025 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5026 sizeof(struct ieee80211_header
);
5028 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5029 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
;
5031 /* make sure all our channels fit in the scanned_ch bitmask */
5032 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5033 ARRAY_SIZE(wl1271_channels_5ghz
) >
5034 WL1271_MAX_CHANNELS
);
5036 * We keep local copies of the band structs because we need to
5037 * modify them on a per-device basis.
5039 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5040 sizeof(wl1271_band_2ghz
));
5041 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5042 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5043 sizeof(*wl
->ht_cap
));
5044 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5045 sizeof(wl1271_band_5ghz
));
5046 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5047 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5048 sizeof(*wl
->ht_cap
));
5050 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5051 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5052 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5053 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5056 wl
->hw
->max_rates
= 1;
5058 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5060 /* the FW answers probe-requests in AP-mode */
5061 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5062 wl
->hw
->wiphy
->probe_resp_offload
=
5063 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5064 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5065 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5067 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5069 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5070 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5072 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5077 #define WL1271_DEFAULT_CHANNEL 0
5079 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
)
5081 struct ieee80211_hw
*hw
;
5086 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5088 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5090 wl1271_error("could not alloc ieee80211_hw");
5096 memset(wl
, 0, sizeof(*wl
));
5098 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5100 wl1271_error("could not alloc wl priv");
5102 goto err_priv_alloc
;
5105 INIT_LIST_HEAD(&wl
->wlvif_list
);
5109 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5110 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5111 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5113 skb_queue_head_init(&wl
->deferred_rx_queue
);
5114 skb_queue_head_init(&wl
->deferred_tx_queue
);
5116 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5117 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5118 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5119 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5120 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5121 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5122 INIT_DELAYED_WORK(&wl
->connection_loss_work
,
5123 wl1271_connection_loss_work
);
5125 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5126 if (!wl
->freezable_wq
) {
5131 wl
->channel
= WL1271_DEFAULT_CHANNEL
;
5133 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5134 wl
->band
= IEEE80211_BAND_2GHZ
;
5135 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5137 wl
->sg_enabled
= true;
5140 wl
->ap_fw_ps_map
= 0;
5142 wl
->platform_quirks
= 0;
5143 wl
->sched_scanning
= false;
5144 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5145 wl
->active_sta_count
= 0;
5147 init_waitqueue_head(&wl
->fwlog_waitq
);
5149 /* The system link is always allocated */
5150 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5152 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5153 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5154 wl
->tx_frames
[i
] = NULL
;
5156 spin_lock_init(&wl
->wl_lock
);
5158 wl
->state
= WL1271_STATE_OFF
;
5159 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5160 mutex_init(&wl
->mutex
);
5161 mutex_init(&wl
->flush_mutex
);
5163 order
= get_order(WL1271_AGGR_BUFFER_SIZE
);
5164 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5165 if (!wl
->aggr_buf
) {
5170 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5171 if (!wl
->dummy_packet
) {
5176 /* Allocate one page for the FW log */
5177 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5180 goto err_dummy_packet
;
5183 wl
->mbox
= kmalloc(sizeof(*wl
->mbox
), GFP_KERNEL
| GFP_DMA
);
5192 free_page((unsigned long)wl
->fwlog
);
5195 dev_kfree_skb(wl
->dummy_packet
);
5198 free_pages((unsigned long)wl
->aggr_buf
, order
);
5201 destroy_workqueue(wl
->freezable_wq
);
5204 wl1271_debugfs_exit(wl
);
5208 ieee80211_free_hw(hw
);
5212 return ERR_PTR(ret
);
5214 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5216 int wlcore_free_hw(struct wl1271
*wl
)
5218 /* Unblock any fwlog readers */
5219 mutex_lock(&wl
->mutex
);
5220 wl
->fwlog_size
= -1;
5221 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5222 mutex_unlock(&wl
->mutex
);
5224 device_remove_bin_file(wl
->dev
, &fwlog_attr
);
5226 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5228 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5229 free_page((unsigned long)wl
->fwlog
);
5230 dev_kfree_skb(wl
->dummy_packet
);
5231 free_pages((unsigned long)wl
->aggr_buf
,
5232 get_order(WL1271_AGGR_BUFFER_SIZE
));
5234 wl1271_debugfs_exit(wl
);
5238 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5242 kfree(wl
->fw_status_1
);
5243 kfree(wl
->tx_res_if
);
5244 destroy_workqueue(wl
->freezable_wq
);
5247 ieee80211_free_hw(wl
->hw
);
5251 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
5253 static irqreturn_t
wl12xx_hardirq(int irq
, void *cookie
)
5255 struct wl1271
*wl
= cookie
;
5256 unsigned long flags
;
5258 wl1271_debug(DEBUG_IRQ
, "IRQ");
5260 /* complete the ELP completion */
5261 spin_lock_irqsave(&wl
->wl_lock
, flags
);
5262 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
5263 if (wl
->elp_compl
) {
5264 complete(wl
->elp_compl
);
5265 wl
->elp_compl
= NULL
;
5268 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
5269 /* don't enqueue a work right now. mark it as pending */
5270 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
5271 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
5272 disable_irq_nosync(wl
->irq
);
5273 pm_wakeup_event(wl
->dev
, 0);
5274 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5277 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5279 return IRQ_WAKE_THREAD
;
5282 int __devinit
wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
5284 struct wl12xx_platform_data
*pdata
= pdev
->dev
.platform_data
;
5285 unsigned long irqflags
;
5288 if (!wl
->ops
|| !wl
->ptable
) {
5293 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
5295 /* adjust some runtime configuration parameters */
5296 wlcore_adjust_conf(wl
);
5298 wl
->irq
= platform_get_irq(pdev
, 0);
5299 wl
->platform_quirks
= pdata
->platform_quirks
;
5300 wl
->set_power
= pdata
->set_power
;
5301 wl
->dev
= &pdev
->dev
;
5302 wl
->if_ops
= pdata
->ops
;
5304 platform_set_drvdata(pdev
, wl
);
5306 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
5307 irqflags
= IRQF_TRIGGER_RISING
;
5309 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
5311 ret
= request_threaded_irq(wl
->irq
, wl12xx_hardirq
, wl1271_irq
,
5315 wl1271_error("request_irq() failed: %d", ret
);
5319 ret
= enable_irq_wake(wl
->irq
);
5321 wl
->irq_wake_enabled
= true;
5322 device_init_wakeup(wl
->dev
, 1);
5323 if (pdata
->pwr_in_suspend
) {
5324 wl
->hw
->wiphy
->wowlan
.flags
= WIPHY_WOWLAN_ANY
;
5325 wl
->hw
->wiphy
->wowlan
.n_patterns
=
5326 WL1271_MAX_RX_FILTERS
;
5327 wl
->hw
->wiphy
->wowlan
.pattern_min_len
= 1;
5328 wl
->hw
->wiphy
->wowlan
.pattern_max_len
=
5329 WL1271_RX_FILTER_MAX_PATTERN_SIZE
;
5332 disable_irq(wl
->irq
);
5334 ret
= wl12xx_get_hw_info(wl
);
5336 wl1271_error("couldn't get hw info");
5340 ret
= wl
->ops
->identify_chip(wl
);
5344 ret
= wl1271_init_ieee80211(wl
);
5348 ret
= wl1271_register_hw(wl
);
5352 /* Create sysfs file to control bt coex state */
5353 ret
= device_create_file(wl
->dev
, &dev_attr_bt_coex_state
);
5355 wl1271_error("failed to create sysfs file bt_coex_state");
5359 /* Create sysfs file to get HW PG version */
5360 ret
= device_create_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5362 wl1271_error("failed to create sysfs file hw_pg_ver");
5363 goto out_bt_coex_state
;
5366 /* Create sysfs file for the FW log */
5367 ret
= device_create_bin_file(wl
->dev
, &fwlog_attr
);
5369 wl1271_error("failed to create sysfs file fwlog");
5376 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5379 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5382 free_irq(wl
->irq
, wl
);
5390 EXPORT_SYMBOL_GPL(wlcore_probe
);
5392 int __devexit
wlcore_remove(struct platform_device
*pdev
)
5394 struct wl1271
*wl
= platform_get_drvdata(pdev
);
5396 if (wl
->irq_wake_enabled
) {
5397 device_init_wakeup(wl
->dev
, 0);
5398 disable_irq_wake(wl
->irq
);
5400 wl1271_unregister_hw(wl
);
5401 free_irq(wl
->irq
, wl
);
5406 EXPORT_SYMBOL_GPL(wlcore_remove
);
5408 u32 wl12xx_debug_level
= DEBUG_NONE
;
5409 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
5410 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
5411 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
5413 module_param_named(fwlog
, fwlog_param
, charp
, 0);
5414 MODULE_PARM_DESC(fwlog
,
5415 "FW logger options: continuous, ondemand, dbgpins or disable");
5417 module_param(bug_on_recovery
, bool, S_IRUSR
| S_IWUSR
);
5418 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
5420 module_param(no_recovery
, bool, S_IRUSR
| S_IWUSR
);
5421 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
5423 MODULE_LICENSE("GPL");
5424 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5425 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");