3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param
;
59 static bool bug_on_recovery
;
60 static bool no_recovery
;
62 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
63 struct ieee80211_vif
*vif
,
64 bool reset_tx_queues
);
65 static void wl1271_op_stop(struct ieee80211_hw
*hw
);
66 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
68 static int wl12xx_set_authorized(struct wl1271
*wl
,
69 struct wl12xx_vif
*wlvif
)
73 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
82 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
->sta
.hlid
);
86 wl12xx_croc(wl
, wlvif
->role_id
);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy
*wiphy
,
93 struct regulatory_request
*request
)
95 struct ieee80211_supported_band
*band
;
96 struct ieee80211_channel
*ch
;
99 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
100 for (i
= 0; i
< band
->n_channels
; i
++) {
101 ch
= &band
->channels
[i
];
102 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
105 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
106 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
107 IEEE80211_CHAN_PASSIVE_SCAN
;
114 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
119 /* we should hold wl->mutex */
120 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
139 int period
= wl
->conf
.rx_streaming
.interval
;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
148 (wl
->conf
.rx_streaming
.always
||
149 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
150 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
152 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif
->rx_streaming_timer
);
160 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
163 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
164 rx_streaming_enable_work
);
165 struct wl1271
*wl
= wlvif
->wl
;
167 mutex_lock(&wl
->mutex
);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
171 (!wl
->conf
.rx_streaming
.always
&&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
175 if (!wl
->conf
.rx_streaming
.interval
)
178 ret
= wl1271_ps_elp_wakeup(wl
);
182 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif
->rx_streaming_timer
,
188 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
191 wl1271_ps_elp_sleep(wl
);
193 mutex_unlock(&wl
->mutex
);
196 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
199 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
200 rx_streaming_disable_work
);
201 struct wl1271
*wl
= wlvif
->wl
;
203 mutex_lock(&wl
->mutex
);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
208 ret
= wl1271_ps_elp_wakeup(wl
);
212 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
217 wl1271_ps_elp_sleep(wl
);
219 mutex_unlock(&wl
->mutex
);
222 static void wl1271_rx_streaming_timer(unsigned long data
)
224 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
225 struct wl1271
*wl
= wlvif
->wl
;
226 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl
->tx_allocated_blocks
== 0)
236 cancel_delayed_work(&wl
->tx_watchdog_work
);
237 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
238 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
241 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
243 struct delayed_work
*dwork
;
246 dwork
= container_of(work
, struct delayed_work
, work
);
247 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
249 mutex_lock(&wl
->mutex
);
251 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl
->tx_allocated_blocks
== 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
263 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
264 wl
->conf
.tx
.tx_watchdog_timeout
);
265 wl12xx_rearm_tx_watchdog_locked(wl
);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
274 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
275 wl
->conf
.tx
.tx_watchdog_timeout
);
276 wl12xx_rearm_tx_watchdog_locked(wl
);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl
->active_sta_count
) {
287 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
289 wl
->conf
.tx
.tx_watchdog_timeout
,
290 wl
->active_sta_count
);
291 wl12xx_rearm_tx_watchdog_locked(wl
);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl
->conf
.tx
.tx_watchdog_timeout
);
297 wl12xx_queue_recovery_work(wl
);
300 mutex_unlock(&wl
->mutex
);
303 static void wlcore_adjust_conf(struct wl1271
*wl
)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param
, "continuous")) {
308 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
309 } else if (!strcmp(fwlog_param
, "ondemand")) {
310 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
311 } else if (!strcmp(fwlog_param
, "dbgpins")) {
312 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
313 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
314 } else if (!strcmp(fwlog_param
, "disable")) {
315 wl
->conf
.fwlog
.mem_blocks
= 0;
316 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
323 static int wl1271_plt_init(struct wl1271
*wl
)
327 ret
= wl
->ops
->hw_init(wl
);
331 ret
= wl1271_acx_init_mem_config(wl
);
335 ret
= wl12xx_acx_mem_cfg(wl
);
337 goto out_free_memmap
;
339 /* Enable data path */
340 ret
= wl1271_cmd_data_path(wl
, 1);
342 goto out_free_memmap
;
344 /* Configure for CAM power saving (ie. always active) */
345 ret
= wl1271_acx_sleep_auth(wl
, WL1271_PSM_CAM
);
347 goto out_free_memmap
;
350 ret
= wl1271_acx_pm_config(wl
);
352 goto out_free_memmap
;
357 kfree(wl
->target_mem_map
);
358 wl
->target_mem_map
= NULL
;
363 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
364 struct wl12xx_vif
*wlvif
,
367 bool fw_ps
, single_sta
;
369 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
370 single_sta
= (wl
->active_sta_count
== 1);
373 * Wake up from high level PS if the STA is asleep with too little
374 * packets in FW or if the STA is awake.
376 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
377 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
380 * Start high-level PS if the STA is asleep with enough blocks in FW.
381 * Make an exception if this is the only connected station. In this
382 * case FW-memory congestion is not a problem.
384 else if (!single_sta
&& fw_ps
&& tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
385 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
388 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
389 struct wl12xx_vif
*wlvif
,
390 struct wl_fw_status
*status
)
392 struct wl1271_link
*lnk
;
396 /* TODO: also use link_fast_bitmap here */
398 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
399 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
400 wl1271_debug(DEBUG_PSM
,
401 "link ps prev 0x%x cur 0x%x changed 0x%x",
402 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
403 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
405 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
408 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
) {
409 lnk
= &wl
->links
[hlid
];
410 cnt
= status
->counters
.tx_lnk_free_pkts
[hlid
] -
411 lnk
->prev_freed_pkts
;
413 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[hlid
];
414 lnk
->allocated_pkts
-= cnt
;
416 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
417 lnk
->allocated_pkts
);
421 static void wl12xx_fw_status(struct wl1271
*wl
,
422 struct wl_fw_status
*status
)
424 struct wl12xx_vif
*wlvif
;
426 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
427 int avail
, freed_blocks
;
431 status_len
= sizeof(*status
) + wl
->fw_status_priv_len
;
433 wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status
,
436 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
437 "drv_rx_counter = %d, tx_results_counter = %d)",
439 status
->fw_rx_counter
,
440 status
->drv_rx_counter
,
441 status
->tx_results_counter
);
443 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
444 /* prevent wrap-around in freed-packets counter */
445 wl
->tx_allocated_pkts
[i
] -=
446 (status
->counters
.tx_released_pkts
[i
] -
447 wl
->tx_pkts_freed
[i
]) & 0xff;
449 wl
->tx_pkts_freed
[i
] = status
->counters
.tx_released_pkts
[i
];
452 /* prevent wrap-around in total blocks counter */
453 if (likely(wl
->tx_blocks_freed
<=
454 le32_to_cpu(status
->total_released_blks
)))
455 freed_blocks
= le32_to_cpu(status
->total_released_blks
) -
458 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
459 le32_to_cpu(status
->total_released_blks
);
461 wl
->tx_blocks_freed
= le32_to_cpu(status
->total_released_blks
);
463 wl
->tx_allocated_blocks
-= freed_blocks
;
466 * If the FW freed some blocks:
467 * If we still have allocated blocks - re-arm the timer, Tx is
468 * not stuck. Otherwise, cancel the timer (no Tx currently).
471 if (wl
->tx_allocated_blocks
)
472 wl12xx_rearm_tx_watchdog_locked(wl
);
474 cancel_delayed_work(&wl
->tx_watchdog_work
);
477 avail
= le32_to_cpu(status
->tx_total
) - wl
->tx_allocated_blocks
;
480 * The FW might change the total number of TX memblocks before
481 * we get a notification about blocks being released. Thus, the
482 * available blocks calculation might yield a temporary result
483 * which is lower than the actual available blocks. Keeping in
484 * mind that only blocks that were allocated can be moved from
485 * TX to RX, tx_blocks_available should never decrease here.
487 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
490 /* if more blocks are available now, tx work can be scheduled */
491 if (wl
->tx_blocks_available
> old_tx_blk_count
)
492 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
494 /* for AP update num of allocated TX blocks per link and ps status */
495 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
496 wl12xx_irq_update_links_status(wl
, wlvif
, status
);
499 /* update the host-chipset time offset */
501 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
502 (s64
)le32_to_cpu(status
->fw_localtime
);
505 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
509 /* Pass all received frames to the network stack */
510 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
511 ieee80211_rx_ni(wl
->hw
, skb
);
513 /* Return sent skbs to the network stack */
514 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
515 ieee80211_tx_status_ni(wl
->hw
, skb
);
518 static void wl1271_netstack_work(struct work_struct
*work
)
521 container_of(work
, struct wl1271
, netstack_work
);
524 wl1271_flush_deferred_work(wl
);
525 } while (skb_queue_len(&wl
->deferred_rx_queue
));
528 #define WL1271_IRQ_MAX_LOOPS 256
530 static irqreturn_t
wl1271_irq(int irq
, void *cookie
)
534 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
535 struct wl1271
*wl
= (struct wl1271
*)cookie
;
537 unsigned int defer_count
;
540 /* TX might be handled here, avoid redundant work */
541 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
542 cancel_work_sync(&wl
->tx_work
);
545 * In case edge triggered interrupt must be used, we cannot iterate
546 * more than once without introducing race conditions with the hardirq.
548 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
551 mutex_lock(&wl
->mutex
);
553 wl1271_debug(DEBUG_IRQ
, "IRQ work");
555 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
558 ret
= wl1271_ps_elp_wakeup(wl
);
562 while (!done
&& loopcount
--) {
564 * In order to avoid a race with the hardirq, clear the flag
565 * before acknowledging the chip. Since the mutex is held,
566 * wl1271_ps_elp_wakeup cannot be called concurrently.
568 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
569 smp_mb__after_clear_bit();
571 wl12xx_fw_status(wl
, wl
->fw_status
);
573 wlcore_hw_tx_immediate_compl(wl
);
575 intr
= le32_to_cpu(wl
->fw_status
->intr
);
576 intr
&= WL1271_INTR_MASK
;
582 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
583 wl1271_error("watchdog interrupt received! "
584 "starting recovery.");
585 wl12xx_queue_recovery_work(wl
);
587 /* restarting the chip. ignore any other interrupt. */
591 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
592 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
594 wl12xx_rx(wl
, wl
->fw_status
);
596 /* Check if any tx blocks were freed */
597 spin_lock_irqsave(&wl
->wl_lock
, flags
);
598 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
599 wl1271_tx_total_queue_count(wl
) > 0) {
600 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
602 * In order to avoid starvation of the TX path,
603 * call the work function directly.
605 wl1271_tx_work_locked(wl
);
607 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
610 /* check for tx results */
611 wlcore_hw_tx_delayed_compl(wl
);
613 /* Make sure the deferred queues don't get too long */
614 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
615 skb_queue_len(&wl
->deferred_rx_queue
);
616 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
617 wl1271_flush_deferred_work(wl
);
620 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
621 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
622 wl1271_event_handle(wl
, 0);
625 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
626 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
627 wl1271_event_handle(wl
, 1);
630 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
631 wl1271_debug(DEBUG_IRQ
,
632 "WL1271_ACX_INTR_INIT_COMPLETE");
634 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
635 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
638 wl1271_ps_elp_sleep(wl
);
641 spin_lock_irqsave(&wl
->wl_lock
, flags
);
642 /* In case TX was not handled here, queue TX work */
643 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
644 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
645 wl1271_tx_total_queue_count(wl
) > 0)
646 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
647 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
649 mutex_unlock(&wl
->mutex
);
654 struct vif_counter_data
{
657 struct ieee80211_vif
*cur_vif
;
658 bool cur_vif_running
;
661 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
662 struct ieee80211_vif
*vif
)
664 struct vif_counter_data
*counter
= data
;
667 if (counter
->cur_vif
== vif
)
668 counter
->cur_vif_running
= true;
671 /* caller must not hold wl->mutex, as it might deadlock */
672 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
673 struct ieee80211_vif
*cur_vif
,
674 struct vif_counter_data
*data
)
676 memset(data
, 0, sizeof(*data
));
677 data
->cur_vif
= cur_vif
;
679 ieee80211_iterate_active_interfaces(hw
,
680 wl12xx_vif_count_iter
, data
);
683 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
685 const struct firmware
*fw
;
687 enum wl12xx_fw_type fw_type
;
691 fw_type
= WL12XX_FW_TYPE_PLT
;
692 fw_name
= wl
->plt_fw_name
;
695 * we can't call wl12xx_get_vif_count() here because
696 * wl->mutex is taken, so use the cached last_vif_count value
698 if (wl
->last_vif_count
> 1) {
699 fw_type
= WL12XX_FW_TYPE_MULTI
;
700 fw_name
= wl
->mr_fw_name
;
702 fw_type
= WL12XX_FW_TYPE_NORMAL
;
703 fw_name
= wl
->sr_fw_name
;
707 if (wl
->fw_type
== fw_type
)
710 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
712 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
715 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
720 wl1271_error("firmware size is not multiple of 32 bits: %zu",
727 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
728 wl
->fw_len
= fw
->size
;
729 wl
->fw
= vmalloc(wl
->fw_len
);
732 wl1271_error("could not allocate memory for the firmware");
737 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
739 wl
->fw_type
= fw_type
;
741 release_firmware(fw
);
746 static int wl1271_fetch_nvs(struct wl1271
*wl
)
748 const struct firmware
*fw
;
751 ret
= request_firmware(&fw
, WL12XX_NVS_NAME
, wl
->dev
);
754 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME
,
759 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
762 wl1271_error("could not allocate memory for the nvs file");
767 wl
->nvs_len
= fw
->size
;
770 release_firmware(fw
);
775 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
777 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
778 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
781 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
785 /* The FW log is a length-value list, find where the log end */
786 while (len
< maxlen
) {
787 if (memblock
[len
] == 0)
789 if (len
+ memblock
[len
] + 1 > maxlen
)
791 len
+= memblock
[len
] + 1;
794 /* Make sure we have enough room */
795 len
= min(len
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
797 /* Fill the FW log file, consumed by the sysfs fwlog entry */
798 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
799 wl
->fwlog_size
+= len
;
804 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
810 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
811 (wl
->conf
.fwlog
.mode
!= WL12XX_FWLOG_ON_DEMAND
) ||
812 (wl
->conf
.fwlog
.mem_blocks
== 0))
815 wl1271_info("Reading FW panic log");
817 block
= kmalloc(WL12XX_HW_BLOCK_SIZE
, GFP_KERNEL
);
822 * Make sure the chip is awake and the logger isn't active.
823 * This might fail if the firmware hanged.
825 if (!wl1271_ps_elp_wakeup(wl
))
826 wl12xx_cmd_stop_fwlog(wl
);
828 /* Read the first memory block address */
829 wl12xx_fw_status(wl
, wl
->fw_status
);
830 first_addr
= le32_to_cpu(wl
->fw_status
->log_start_addr
);
834 /* Traverse the memory blocks linked list */
837 memset(block
, 0, WL12XX_HW_BLOCK_SIZE
);
838 wl1271_read_hwaddr(wl
, addr
, block
, WL12XX_HW_BLOCK_SIZE
,
842 * Memory blocks are linked to one another. The first 4 bytes
843 * of each memory block hold the hardware address of the next
844 * one. The last memory block points to the first one.
846 addr
= le32_to_cpup((__le32
*)block
);
847 if (!wl12xx_copy_fwlog(wl
, block
+ sizeof(addr
),
848 WL12XX_HW_BLOCK_SIZE
- sizeof(addr
)))
850 } while (addr
&& (addr
!= first_addr
));
852 wake_up_interruptible(&wl
->fwlog_waitq
);
858 static void wl1271_recovery_work(struct work_struct
*work
)
861 container_of(work
, struct wl1271
, recovery_work
);
862 struct wl12xx_vif
*wlvif
;
863 struct ieee80211_vif
*vif
;
865 mutex_lock(&wl
->mutex
);
867 if (wl
->state
!= WL1271_STATE_ON
|| wl
->plt
)
870 /* Avoid a recursive recovery */
871 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
873 wl12xx_read_fwlog_panic(wl
);
875 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
877 wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
));
879 BUG_ON(bug_on_recovery
&&
880 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
883 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
884 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
888 BUG_ON(bug_on_recovery
);
891 * Advance security sequence number to overcome potential progress
892 * in the firmware during recovery. This doens't hurt if the network is
895 wl12xx_for_each_wlvif(wl
, wlvif
) {
896 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
897 test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
898 wlvif
->tx_security_seq
+=
899 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
902 /* Prevent spurious TX during FW restart */
903 ieee80211_stop_queues(wl
->hw
);
905 if (wl
->sched_scanning
) {
906 ieee80211_sched_scan_stopped(wl
->hw
);
907 wl
->sched_scanning
= false;
910 /* reboot the chipset */
911 while (!list_empty(&wl
->wlvif_list
)) {
912 wlvif
= list_first_entry(&wl
->wlvif_list
,
913 struct wl12xx_vif
, list
);
914 vif
= wl12xx_wlvif_to_vif(wlvif
);
915 __wl1271_op_remove_interface(wl
, vif
, false);
917 mutex_unlock(&wl
->mutex
);
918 wl1271_op_stop(wl
->hw
);
920 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
922 ieee80211_restart_hw(wl
->hw
);
925 * Its safe to enable TX now - the queues are stopped after a request
928 ieee80211_wake_queues(wl
->hw
);
931 mutex_unlock(&wl
->mutex
);
934 static void wl1271_fw_wakeup(struct wl1271
*wl
)
936 wl1271_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
939 static int wl1271_setup(struct wl1271
*wl
)
941 wl
->fw_status
= kmalloc(sizeof(*wl
->fw_status
), GFP_KERNEL
);
945 wl
->tx_res_if
= kmalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
946 if (!wl
->tx_res_if
) {
947 kfree(wl
->fw_status
);
954 static int wl12xx_set_power_on(struct wl1271
*wl
)
958 msleep(WL1271_PRE_POWER_ON_SLEEP
);
959 ret
= wl1271_power_on(wl
);
962 msleep(WL1271_POWER_ON_SLEEP
);
966 wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
968 /* ELP module wake up */
969 wl1271_fw_wakeup(wl
);
975 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
979 ret
= wl12xx_set_power_on(wl
);
984 * For wl127x based devices we could use the default block
985 * size (512 bytes), but due to a bug in the sdio driver, we
986 * need to set it explicitly after the chip is powered on. To
987 * simplify the code and since the performance impact is
988 * negligible, we use the same block size for all different
991 if (wl1271_set_block_size(wl
))
992 wl
->quirks
|= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
994 ret
= wl
->ops
->identify_chip(wl
);
998 /* TODO: make sure the lower driver has set things up correctly */
1000 ret
= wl1271_setup(wl
);
1004 ret
= wl12xx_fetch_firmware(wl
, plt
);
1008 /* No NVS from netlink, try to get it from the filesystem */
1009 if (wl
->nvs
== NULL
) {
1010 ret
= wl1271_fetch_nvs(wl
);
1019 int wl1271_plt_start(struct wl1271
*wl
)
1021 int retries
= WL1271_BOOT_RETRIES
;
1022 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1025 mutex_lock(&wl
->mutex
);
1027 wl1271_notice("power up");
1029 if (wl
->state
!= WL1271_STATE_OFF
) {
1030 wl1271_error("cannot go into PLT state because not "
1031 "in off state: %d", wl
->state
);
1038 ret
= wl12xx_chip_wakeup(wl
, true);
1042 ret
= wl
->ops
->boot(wl
);
1046 ret
= wl1271_plt_init(wl
);
1051 wl
->state
= WL1271_STATE_ON
;
1052 wl1271_notice("firmware booted in PLT mode (%s)",
1053 wl
->chip
.fw_ver_str
);
1055 /* update hw/fw version info in wiphy struct */
1056 wiphy
->hw_version
= wl
->chip
.id
;
1057 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1058 sizeof(wiphy
->fw_version
));
1063 mutex_unlock(&wl
->mutex
);
1064 /* Unlocking the mutex in the middle of handling is
1065 inherently unsafe. In this case we deem it safe to do,
1066 because we need to let any possibly pending IRQ out of
1067 the system (and while we are WL1271_STATE_OFF the IRQ
1068 work function will not do anything.) Also, any other
1069 possible concurrent operations will fail due to the
1070 current state, hence the wl1271 struct should be safe. */
1071 wlcore_disable_interrupts(wl
);
1072 wl1271_flush_deferred_work(wl
);
1073 cancel_work_sync(&wl
->netstack_work
);
1074 mutex_lock(&wl
->mutex
);
1076 wl1271_power_off(wl
);
1079 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1080 WL1271_BOOT_RETRIES
);
1082 mutex_unlock(&wl
->mutex
);
1087 int wl1271_plt_stop(struct wl1271
*wl
)
1091 wl1271_notice("power down");
1094 * Interrupts must be disabled before setting the state to OFF.
1095 * Otherwise, the interrupt handler might be called and exit without
1096 * reading the interrupt status.
1098 wlcore_disable_interrupts(wl
);
1099 mutex_lock(&wl
->mutex
);
1101 mutex_unlock(&wl
->mutex
);
1104 * This will not necessarily enable interrupts as interrupts
1105 * may have been disabled when op_stop was called. It will,
1106 * however, balance the above call to disable_interrupts().
1108 wlcore_enable_interrupts(wl
);
1110 wl1271_error("cannot power down because not in PLT "
1111 "state: %d", wl
->state
);
1116 mutex_unlock(&wl
->mutex
);
1118 wl1271_flush_deferred_work(wl
);
1119 cancel_work_sync(&wl
->netstack_work
);
1120 cancel_work_sync(&wl
->recovery_work
);
1121 cancel_delayed_work_sync(&wl
->elp_work
);
1122 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1123 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1125 mutex_lock(&wl
->mutex
);
1126 wl1271_power_off(wl
);
1128 wl
->state
= WL1271_STATE_OFF
;
1131 mutex_unlock(&wl
->mutex
);
1137 static void wl1271_op_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
1139 struct wl1271
*wl
= hw
->priv
;
1140 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1141 struct ieee80211_vif
*vif
= info
->control
.vif
;
1142 struct wl12xx_vif
*wlvif
= NULL
;
1143 unsigned long flags
;
1148 wlvif
= wl12xx_vif_to_data(vif
);
1150 mapping
= skb_get_queue_mapping(skb
);
1151 q
= wl1271_tx_get_queue(mapping
);
1153 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
);
1155 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1157 /* queue the packet */
1158 if (hlid
== WL12XX_INVALID_LINK_ID
||
1159 (wlvif
&& !test_bit(hlid
, wlvif
->links_map
))) {
1160 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1161 ieee80211_free_txskb(hw
, skb
);
1165 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1167 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1169 wl
->tx_queue_count
[q
]++;
1172 * The workqueue is slow to process the tx_queue and we need stop
1173 * the queue here, otherwise the queue will get too long.
1175 if (wl
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
) {
1176 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1177 ieee80211_stop_queue(wl
->hw
, mapping
);
1178 set_bit(q
, &wl
->stopped_queues_map
);
1182 * The chip specific setup must run before the first TX packet -
1183 * before that, the tx_work will not be initialized!
1186 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1187 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1188 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1191 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1194 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1196 unsigned long flags
;
1199 /* no need to queue a new dummy packet if one is already pending */
1200 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1203 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1205 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1206 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1207 wl
->tx_queue_count
[q
]++;
1208 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1210 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1211 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1212 wl1271_tx_work_locked(wl
);
1215 * If the FW TX is busy, TX work will be scheduled by the threaded
1216 * interrupt handler function
1222 * The size of the dummy packet should be at least 1400 bytes. However, in
1223 * order to minimize the number of bus transactions, aligning it to 512 bytes
1224 * boundaries could be beneficial, performance wise
1226 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1228 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1230 struct sk_buff
*skb
;
1231 struct ieee80211_hdr_3addr
*hdr
;
1232 unsigned int dummy_packet_size
;
1234 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1235 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1237 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1239 wl1271_warning("Failed to allocate a dummy packet skb");
1243 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1245 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1246 memset(hdr
, 0, sizeof(*hdr
));
1247 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1248 IEEE80211_STYPE_NULLFUNC
|
1249 IEEE80211_FCTL_TODS
);
1251 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1253 /* Dummy packets require the TID to be management */
1254 skb
->priority
= WL1271_TID_MGMT
;
1256 /* Initialize all fields that might be used */
1257 skb_set_queue_mapping(skb
, 0);
1258 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1266 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern
*p
)
1268 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1269 int i
, pattern_len
= 0;
1272 wl1271_warning("No mask in WoWLAN pattern");
1277 * The pattern is broken up into segments of bytes at different offsets
1278 * that need to be checked by the FW filter. Each segment is called
1279 * a field in the FW API. We verify that the total number of fields
1280 * required for this pattern won't exceed FW limits (8)
1281 * as well as the total fields buffer won't exceed the FW limit.
1282 * Note that if there's a pattern which crosses Ethernet/IP header
1283 * boundary a new field is required.
1285 for (i
= 0; i
< p
->pattern_len
; i
++) {
1286 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1291 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1293 fields_size
+= pattern_len
+
1294 RX_FILTER_FIELD_OVERHEAD
;
1302 fields_size
+= pattern_len
+
1303 RX_FILTER_FIELD_OVERHEAD
;
1310 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1314 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1315 wl1271_warning("RX Filter too complex. Too many segments");
1319 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1320 wl1271_warning("RX filter pattern is too big");
1327 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1329 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1332 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1339 for (i
= 0; i
< filter
->num_fields
; i
++)
1340 kfree(filter
->fields
[i
].pattern
);
1345 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1346 u16 offset
, u8 flags
,
1347 u8
*pattern
, u8 len
)
1349 struct wl12xx_rx_filter_field
*field
;
1351 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1352 wl1271_warning("Max fields per RX filter. can't alloc another");
1356 field
= &filter
->fields
[filter
->num_fields
];
1358 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1359 if (!field
->pattern
) {
1360 wl1271_warning("Failed to allocate RX filter pattern");
1364 filter
->num_fields
++;
1366 field
->offset
= cpu_to_le16(offset
);
1367 field
->flags
= flags
;
1369 memcpy(field
->pattern
, pattern
, len
);
1374 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1376 int i
, fields_size
= 0;
1378 for (i
= 0; i
< filter
->num_fields
; i
++)
1379 fields_size
+= filter
->fields
[i
].len
+
1380 sizeof(struct wl12xx_rx_filter_field
) -
1386 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1390 struct wl12xx_rx_filter_field
*field
;
1392 for (i
= 0; i
< filter
->num_fields
; i
++) {
1393 field
= (struct wl12xx_rx_filter_field
*)buf
;
1395 field
->offset
= filter
->fields
[i
].offset
;
1396 field
->flags
= filter
->fields
[i
].flags
;
1397 field
->len
= filter
->fields
[i
].len
;
1399 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1400 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1401 sizeof(u8
*) + field
->len
;
1406 * Allocates an RX filter returned through f
1407 * which needs to be freed using rx_filter_free()
1409 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1410 struct cfg80211_wowlan_trig_pkt_pattern
*p
,
1411 struct wl12xx_rx_filter
**f
)
1414 struct wl12xx_rx_filter
*filter
;
1418 filter
= wl1271_rx_filter_alloc();
1420 wl1271_warning("Failed to alloc rx filter");
1426 while (i
< p
->pattern_len
) {
1427 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1432 for (j
= i
; j
< p
->pattern_len
; j
++) {
1433 if (!test_bit(j
, (unsigned long *)p
->mask
))
1436 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1437 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1441 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1443 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1445 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1446 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1451 ret
= wl1271_rx_filter_alloc_field(filter
,
1454 &p
->pattern
[i
], len
);
1461 filter
->action
= FILTER_SIGNAL
;
1467 wl1271_rx_filter_free(filter
);
1473 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1474 struct cfg80211_wowlan
*wow
)
1478 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1479 wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1480 wl1271_rx_filter_clear_all(wl
);
1484 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1487 /* Validate all incoming patterns before clearing current FW state */
1488 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1489 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1491 wl1271_warning("Bad wowlan pattern %d", i
);
1496 wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1497 wl1271_rx_filter_clear_all(wl
);
1499 /* Translate WoWLAN patterns into filters */
1500 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1501 struct cfg80211_wowlan_trig_pkt_pattern
*p
;
1502 struct wl12xx_rx_filter
*filter
= NULL
;
1504 p
= &wow
->patterns
[i
];
1506 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1508 wl1271_warning("Failed to create an RX filter from "
1509 "wowlan pattern %d", i
);
1513 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1515 wl1271_rx_filter_free(filter
);
1520 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1526 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1527 struct wl12xx_vif
*wlvif
,
1528 struct cfg80211_wowlan
*wow
)
1532 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1535 ret
= wl1271_ps_elp_wakeup(wl
);
1539 wl1271_configure_wowlan(wl
, wow
);
1540 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1541 wl
->conf
.conn
.suspend_wake_up_event
,
1542 wl
->conf
.conn
.suspend_listen_interval
);
1545 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1547 wl1271_ps_elp_sleep(wl
);
1554 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1555 struct wl12xx_vif
*wlvif
)
1559 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1562 ret
= wl1271_ps_elp_wakeup(wl
);
1566 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1568 wl1271_ps_elp_sleep(wl
);
1574 static int wl1271_configure_suspend(struct wl1271
*wl
,
1575 struct wl12xx_vif
*wlvif
,
1576 struct cfg80211_wowlan
*wow
)
1578 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1579 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1580 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1581 return wl1271_configure_suspend_ap(wl
, wlvif
);
1585 static void wl1271_configure_resume(struct wl1271
*wl
,
1586 struct wl12xx_vif
*wlvif
)
1589 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1590 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1592 if ((!is_ap
) && (!is_sta
))
1595 ret
= wl1271_ps_elp_wakeup(wl
);
1600 wl1271_configure_wowlan(wl
, NULL
);
1602 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1603 wl
->conf
.conn
.wake_up_event
,
1604 wl
->conf
.conn
.listen_interval
);
1607 wl1271_error("resume: wake up conditions failed: %d",
1611 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1614 wl1271_ps_elp_sleep(wl
);
1617 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1618 struct cfg80211_wowlan
*wow
)
1620 struct wl1271
*wl
= hw
->priv
;
1621 struct wl12xx_vif
*wlvif
;
1624 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1627 wl1271_tx_flush(wl
);
1629 mutex_lock(&wl
->mutex
);
1630 wl
->wow_enabled
= true;
1631 wl12xx_for_each_wlvif(wl
, wlvif
) {
1632 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1634 mutex_unlock(&wl
->mutex
);
1635 wl1271_warning("couldn't prepare device to suspend");
1639 mutex_unlock(&wl
->mutex
);
1640 /* flush any remaining work */
1641 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1644 * disable and re-enable interrupts in order to flush
1647 wlcore_disable_interrupts(wl
);
1650 * set suspended flag to avoid triggering a new threaded_irq
1651 * work. no need for spinlock as interrupts are disabled.
1653 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1655 wlcore_enable_interrupts(wl
);
1656 flush_work(&wl
->tx_work
);
1657 flush_delayed_work(&wl
->elp_work
);
1662 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1664 struct wl1271
*wl
= hw
->priv
;
1665 struct wl12xx_vif
*wlvif
;
1666 unsigned long flags
;
1667 bool run_irq_work
= false;
1669 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1671 WARN_ON(!wl
->wow_enabled
);
1674 * re-enable irq_work enqueuing, and call irq_work directly if
1675 * there is a pending work.
1677 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1678 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1679 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1680 run_irq_work
= true;
1681 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1684 wl1271_debug(DEBUG_MAC80211
,
1685 "run postponed irq_work directly");
1687 wlcore_enable_interrupts(wl
);
1690 mutex_lock(&wl
->mutex
);
1691 wl12xx_for_each_wlvif(wl
, wlvif
) {
1692 wl1271_configure_resume(wl
, wlvif
);
1694 wl
->wow_enabled
= false;
1695 mutex_unlock(&wl
->mutex
);
1701 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1703 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1706 * We have to delay the booting of the hardware because
1707 * we need to know the local MAC address before downloading and
1708 * initializing the firmware. The MAC address cannot be changed
1709 * after boot, and without the proper MAC address, the firmware
1710 * will not function properly.
1712 * The MAC address is first known when the corresponding interface
1713 * is added. That is where we will initialize the hardware.
1719 static void wl1271_op_stop(struct ieee80211_hw
*hw
)
1721 struct wl1271
*wl
= hw
->priv
;
1724 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1727 * Interrupts must be disabled before setting the state to OFF.
1728 * Otherwise, the interrupt handler might be called and exit without
1729 * reading the interrupt status.
1731 wlcore_disable_interrupts(wl
);
1732 mutex_lock(&wl
->mutex
);
1733 if (wl
->state
== WL1271_STATE_OFF
) {
1734 mutex_unlock(&wl
->mutex
);
1737 * This will not necessarily enable interrupts as interrupts
1738 * may have been disabled when op_stop was called. It will,
1739 * however, balance the above call to disable_interrupts().
1741 wlcore_enable_interrupts(wl
);
1746 * this must be before the cancel_work calls below, so that the work
1747 * functions don't perform further work.
1749 wl
->state
= WL1271_STATE_OFF
;
1750 mutex_unlock(&wl
->mutex
);
1752 wl1271_flush_deferred_work(wl
);
1753 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1754 cancel_work_sync(&wl
->netstack_work
);
1755 cancel_work_sync(&wl
->tx_work
);
1756 cancel_delayed_work_sync(&wl
->elp_work
);
1757 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1758 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1760 /* let's notify MAC80211 about the remaining pending TX frames */
1761 wl12xx_tx_reset(wl
, true);
1762 mutex_lock(&wl
->mutex
);
1764 wl1271_power_off(wl
);
1766 wl
->band
= IEEE80211_BAND_2GHZ
;
1769 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1770 wl
->tx_blocks_available
= 0;
1771 wl
->tx_allocated_blocks
= 0;
1772 wl
->tx_results_count
= 0;
1773 wl
->tx_packets_count
= 0;
1774 wl
->time_offset
= 0;
1775 wl
->ap_fw_ps_map
= 0;
1777 wl
->sched_scanning
= false;
1778 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1779 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1780 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1781 wl
->active_sta_count
= 0;
1783 /* The system link is always allocated */
1784 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1787 * this is performed after the cancel_work calls and the associated
1788 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1789 * get executed before all these vars have been reset.
1793 wl
->tx_blocks_freed
= 0;
1795 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1796 wl
->tx_pkts_freed
[i
] = 0;
1797 wl
->tx_allocated_pkts
[i
] = 0;
1800 wl1271_debugfs_reset(wl
);
1802 kfree(wl
->fw_status
);
1803 wl
->fw_status
= NULL
;
1804 kfree(wl
->tx_res_if
);
1805 wl
->tx_res_if
= NULL
;
1806 kfree(wl
->target_mem_map
);
1807 wl
->target_mem_map
= NULL
;
1809 mutex_unlock(&wl
->mutex
);
1812 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
1814 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
1815 WL12XX_MAX_RATE_POLICIES
);
1816 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
1819 __set_bit(policy
, wl
->rate_policies_map
);
1824 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
1826 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
1829 __clear_bit(*idx
, wl
->rate_policies_map
);
1830 *idx
= WL12XX_MAX_RATE_POLICIES
;
1833 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1835 switch (wlvif
->bss_type
) {
1836 case BSS_TYPE_AP_BSS
:
1838 return WL1271_ROLE_P2P_GO
;
1840 return WL1271_ROLE_AP
;
1842 case BSS_TYPE_STA_BSS
:
1844 return WL1271_ROLE_P2P_CL
;
1846 return WL1271_ROLE_STA
;
1849 return WL1271_ROLE_IBSS
;
1852 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
1854 return WL12XX_INVALID_ROLE_TYPE
;
1857 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
1859 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
1862 /* clear everything but the persistent data */
1863 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
1865 switch (ieee80211_vif_type_p2p(vif
)) {
1866 case NL80211_IFTYPE_P2P_CLIENT
:
1869 case NL80211_IFTYPE_STATION
:
1870 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
1872 case NL80211_IFTYPE_ADHOC
:
1873 wlvif
->bss_type
= BSS_TYPE_IBSS
;
1875 case NL80211_IFTYPE_P2P_GO
:
1878 case NL80211_IFTYPE_AP
:
1879 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
1882 wlvif
->bss_type
= MAX_BSS_TYPE
;
1886 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
1887 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
1888 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
1890 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
1891 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
1892 /* init sta/ibss data */
1893 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
1894 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
1895 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
1896 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
1899 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
1900 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
1901 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
1902 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
1903 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
1904 wl12xx_allocate_rate_policy(wl
,
1905 &wlvif
->ap
.ucast_rate_idx
[i
]);
1908 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
1909 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
1910 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
1911 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
1912 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
1913 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
1916 * mac80211 configures some values globally, while we treat them
1917 * per-interface. thus, on init, we have to copy them from wl
1919 wlvif
->band
= wl
->band
;
1920 wlvif
->channel
= wl
->channel
;
1921 wlvif
->power_level
= wl
->power_level
;
1923 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
1924 wl1271_rx_streaming_enable_work
);
1925 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
1926 wl1271_rx_streaming_disable_work
);
1927 INIT_LIST_HEAD(&wlvif
->list
);
1929 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
1930 (unsigned long) wlvif
);
1934 static bool wl12xx_init_fw(struct wl1271
*wl
)
1936 int retries
= WL1271_BOOT_RETRIES
;
1937 bool booted
= false;
1938 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1943 ret
= wl12xx_chip_wakeup(wl
, false);
1947 ret
= wl
->ops
->boot(wl
);
1951 ret
= wl1271_hw_init(wl
);
1959 mutex_unlock(&wl
->mutex
);
1960 /* Unlocking the mutex in the middle of handling is
1961 inherently unsafe. In this case we deem it safe to do,
1962 because we need to let any possibly pending IRQ out of
1963 the system (and while we are WL1271_STATE_OFF the IRQ
1964 work function will not do anything.) Also, any other
1965 possible concurrent operations will fail due to the
1966 current state, hence the wl1271 struct should be safe. */
1967 wlcore_disable_interrupts(wl
);
1968 wl1271_flush_deferred_work(wl
);
1969 cancel_work_sync(&wl
->netstack_work
);
1970 mutex_lock(&wl
->mutex
);
1972 wl1271_power_off(wl
);
1976 wl1271_error("firmware boot failed despite %d retries",
1977 WL1271_BOOT_RETRIES
);
1981 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
1983 /* update hw/fw version info in wiphy struct */
1984 wiphy
->hw_version
= wl
->chip
.id
;
1985 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1986 sizeof(wiphy
->fw_version
));
1989 * Now we know if 11a is supported (info from the NVS), so disable
1990 * 11a channels if not supported
1992 if (!wl
->enable_11a
)
1993 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
1995 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
1996 wl
->enable_11a
? "" : "not ");
1998 wl
->state
= WL1271_STATE_ON
;
2003 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2005 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2009 * Check whether a fw switch (i.e. moving from one loaded
2010 * fw to another) is needed. This function is also responsible
2011 * for updating wl->last_vif_count, so it must be called before
2012 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2015 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2016 struct vif_counter_data vif_counter_data
,
2019 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2020 u8 vif_count
= vif_counter_data
.counter
;
2022 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2025 /* increase the vif count if this is a new vif */
2026 if (add
&& !vif_counter_data
.cur_vif_running
)
2029 wl
->last_vif_count
= vif_count
;
2031 /* no need for fw change if the device is OFF */
2032 if (wl
->state
== WL1271_STATE_OFF
)
2035 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2037 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2044 * Enter "forced psm". Make sure the sta is in psm against the ap,
2045 * to make the fw switch a bit more disconnection-persistent.
2047 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2049 struct wl12xx_vif
*wlvif
;
2051 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2052 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2056 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2057 struct ieee80211_vif
*vif
)
2059 struct wl1271
*wl
= hw
->priv
;
2060 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2061 struct vif_counter_data vif_count
;
2064 bool booted
= false;
2066 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2067 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2069 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2070 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2072 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2074 mutex_lock(&wl
->mutex
);
2075 ret
= wl1271_ps_elp_wakeup(wl
);
2080 * in some very corner case HW recovery scenarios its possible to
2081 * get here before __wl1271_op_remove_interface is complete, so
2082 * opt out if that is the case.
2084 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2085 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2091 ret
= wl12xx_init_vif_data(wl
, vif
);
2096 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2097 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2102 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2103 wl12xx_force_active_psm(wl
);
2104 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2105 mutex_unlock(&wl
->mutex
);
2106 wl1271_recovery_work(&wl
->recovery_work
);
2111 * TODO: after the nvs issue will be solved, move this block
2112 * to start(), and make sure here the driver is ON.
2114 if (wl
->state
== WL1271_STATE_OFF
) {
2116 * we still need this in order to configure the fw
2117 * while uploading the nvs
2119 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2121 booted
= wl12xx_init_fw(wl
);
2128 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2129 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2131 * The device role is a special role used for
2132 * rx and tx frames prior to association (as
2133 * the STA role can get packets only from
2134 * its associated bssid)
2136 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2138 &wlvif
->dev_role_id
);
2143 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2144 role_type
, &wlvif
->role_id
);
2148 ret
= wl1271_init_vif_specific(wl
, vif
);
2152 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2153 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2155 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2160 wl1271_ps_elp_sleep(wl
);
2162 mutex_unlock(&wl
->mutex
);
2167 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2168 struct ieee80211_vif
*vif
,
2169 bool reset_tx_queues
)
2171 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2174 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2176 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2179 /* because of hardware recovery, we may get here twice */
2180 if (wl
->state
!= WL1271_STATE_ON
)
2183 wl1271_info("down");
2185 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2186 wl
->scan_vif
== vif
) {
2188 * Rearm the tx watchdog just before idling scan. This
2189 * prevents just-finished scans from triggering the watchdog
2191 wl12xx_rearm_tx_watchdog_locked(wl
);
2193 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2194 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2195 wl
->scan_vif
= NULL
;
2196 wl
->scan
.req
= NULL
;
2197 ieee80211_scan_completed(wl
->hw
, true);
2200 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2201 /* disable active roles */
2202 ret
= wl1271_ps_elp_wakeup(wl
);
2206 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2207 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2208 if (wl12xx_dev_role_started(wlvif
))
2209 wl12xx_stop_dev(wl
, wlvif
);
2211 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->dev_role_id
);
2216 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2220 wl1271_ps_elp_sleep(wl
);
2223 /* clear all hlids (except system_hlid) */
2224 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2226 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2227 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2228 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2229 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2230 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2231 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2233 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2234 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2235 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2236 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2237 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2238 wl12xx_free_rate_policy(wl
,
2239 &wlvif
->ap
.ucast_rate_idx
[i
]);
2240 wl1271_free_ap_keys(wl
, wlvif
);
2243 dev_kfree_skb(wlvif
->probereq
);
2244 wlvif
->probereq
= NULL
;
2245 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2246 if (wl
->last_wlvif
== wlvif
)
2247 wl
->last_wlvif
= NULL
;
2248 list_del(&wlvif
->list
);
2249 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2250 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2251 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2253 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2258 mutex_unlock(&wl
->mutex
);
2260 del_timer_sync(&wlvif
->rx_streaming_timer
);
2261 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2262 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2264 mutex_lock(&wl
->mutex
);
2267 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2268 struct ieee80211_vif
*vif
)
2270 struct wl1271
*wl
= hw
->priv
;
2271 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2272 struct wl12xx_vif
*iter
;
2273 struct vif_counter_data vif_count
;
2274 bool cancel_recovery
= true;
2276 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2277 mutex_lock(&wl
->mutex
);
2279 if (wl
->state
== WL1271_STATE_OFF
||
2280 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2284 * wl->vif can be null here if someone shuts down the interface
2285 * just when hardware recovery has been started.
2287 wl12xx_for_each_wlvif(wl
, iter
) {
2291 __wl1271_op_remove_interface(wl
, vif
, true);
2294 WARN_ON(iter
!= wlvif
);
2295 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2296 wl12xx_force_active_psm(wl
);
2297 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2298 wl12xx_queue_recovery_work(wl
);
2299 cancel_recovery
= false;
2302 mutex_unlock(&wl
->mutex
);
2303 if (cancel_recovery
)
2304 cancel_work_sync(&wl
->recovery_work
);
2307 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2308 struct ieee80211_vif
*vif
,
2309 enum nl80211_iftype new_type
, bool p2p
)
2311 struct wl1271
*wl
= hw
->priv
;
2314 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2315 wl1271_op_remove_interface(hw
, vif
);
2317 vif
->type
= new_type
;
2319 ret
= wl1271_op_add_interface(hw
, vif
);
2321 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2325 static int wl1271_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2329 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2332 * One of the side effects of the JOIN command is that is clears
2333 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2334 * to a WPA/WPA2 access point will therefore kill the data-path.
2335 * Currently the only valid scenario for JOIN during association
2336 * is on roaming, in which case we will also be given new keys.
2337 * Keep the below message for now, unless it starts bothering
2338 * users who really like to roam a lot :)
2340 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2341 wl1271_info("JOIN while associated.");
2343 /* clear encryption type */
2344 wlvif
->encryption_type
= KEY_NONE
;
2347 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2350 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2352 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2356 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2360 * The join command disable the keep-alive mode, shut down its process,
2361 * and also clear the template config, so we need to reset it all after
2362 * the join. The acx_aid starts the keep-alive process, and the order
2363 * of the commands below is relevant.
2365 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2369 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2373 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2377 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2378 CMD_TEMPL_KLV_IDX_NULL_DATA
,
2379 ACX_KEEP_ALIVE_TPL_VALID
);
2387 static int wl1271_unjoin(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2391 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2392 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2394 wl12xx_cmd_stop_channel_switch(wl
);
2395 ieee80211_chswitch_done(vif
, false);
2398 /* to stop listening to a channel, we disconnect */
2399 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2403 /* reset TX security counters on a clean disconnect */
2404 wlvif
->tx_security_last_seq_lsb
= 0;
2405 wlvif
->tx_security_seq
= 0;
2411 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2413 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2414 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2417 static int wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2421 bool cur_idle
= !test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2423 if (idle
== cur_idle
)
2427 /* no need to croc if we weren't busy (e.g. during boot) */
2428 if (wl12xx_dev_role_started(wlvif
)) {
2429 ret
= wl12xx_stop_dev(wl
, wlvif
);
2434 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
2435 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2438 ret
= wl1271_acx_keep_alive_config(
2439 wl
, wlvif
, CMD_TEMPL_KLV_IDX_NULL_DATA
,
2440 ACX_KEEP_ALIVE_TPL_INVALID
);
2443 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2445 /* The current firmware only supports sched_scan in idle */
2446 if (wl
->sched_scanning
) {
2447 wl1271_scan_sched_scan_stop(wl
);
2448 ieee80211_sched_scan_stopped(wl
->hw
);
2451 ret
= wl12xx_start_dev(wl
, wlvif
);
2454 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2461 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2462 struct ieee80211_conf
*conf
, u32 changed
)
2464 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2467 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2469 /* if the channel changes while joined, join again */
2470 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
&&
2471 ((wlvif
->band
!= conf
->channel
->band
) ||
2472 (wlvif
->channel
!= channel
))) {
2473 /* send all pending packets */
2474 wl1271_tx_work_locked(wl
);
2475 wlvif
->band
= conf
->channel
->band
;
2476 wlvif
->channel
= channel
;
2480 * FIXME: the mac80211 should really provide a fixed
2481 * rate to use here. for now, just use the smallest
2482 * possible rate for the band as a fixed rate for
2483 * association frames and other control messages.
2485 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2486 wl1271_set_band_rate(wl
, wlvif
);
2489 wl1271_tx_min_rate_get(wl
,
2490 wlvif
->basic_rate_set
);
2491 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2493 wl1271_warning("rate policy for channel "
2497 * change the ROC channel. do it only if we are
2498 * not idle. otherwise, CROC will be called
2501 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
,
2503 wl12xx_dev_role_started(wlvif
) &&
2504 !(conf
->flags
& IEEE80211_CONF_IDLE
)) {
2505 ret
= wl12xx_stop_dev(wl
, wlvif
);
2509 ret
= wl12xx_start_dev(wl
, wlvif
);
2516 if ((changed
& IEEE80211_CONF_CHANGE_PS
) && !is_ap
) {
2518 if ((conf
->flags
& IEEE80211_CONF_PS
) &&
2519 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
2520 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2525 if (wl
->conf
.conn
.forced_ps
) {
2526 ps_mode
= STATION_POWER_SAVE_MODE
;
2527 ps_mode_str
= "forced";
2529 ps_mode
= STATION_AUTO_PS_MODE
;
2530 ps_mode_str
= "auto";
2533 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
2535 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
2538 wl1271_warning("enter %s ps failed %d",
2541 } else if (!(conf
->flags
& IEEE80211_CONF_PS
) &&
2542 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2544 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
2546 ret
= wl1271_ps_set_mode(wl
, wlvif
,
2547 STATION_ACTIVE_MODE
);
2549 wl1271_warning("exit auto ps failed %d", ret
);
2553 if (conf
->power_level
!= wlvif
->power_level
) {
2554 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2558 wlvif
->power_level
= conf
->power_level
;
2564 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2566 struct wl1271
*wl
= hw
->priv
;
2567 struct wl12xx_vif
*wlvif
;
2568 struct ieee80211_conf
*conf
= &hw
->conf
;
2569 int channel
, ret
= 0;
2571 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2573 wl1271_debug(DEBUG_MAC80211
, "mac80211 config ch %d psm %s power %d %s"
2576 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
2578 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
2582 * mac80211 will go to idle nearly immediately after transmitting some
2583 * frames, such as the deauth. To make sure those frames reach the air,
2584 * wait here until the TX queue is fully flushed.
2586 if ((changed
& IEEE80211_CONF_CHANGE_IDLE
) &&
2587 (conf
->flags
& IEEE80211_CONF_IDLE
))
2588 wl1271_tx_flush(wl
);
2590 mutex_lock(&wl
->mutex
);
2592 /* we support configuring the channel and band even while off */
2593 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
) {
2594 wl
->band
= conf
->channel
->band
;
2595 wl
->channel
= channel
;
2598 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
2599 wl
->power_level
= conf
->power_level
;
2601 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2604 ret
= wl1271_ps_elp_wakeup(wl
);
2608 /* configure each interface */
2609 wl12xx_for_each_wlvif(wl
, wlvif
) {
2610 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
2616 wl1271_ps_elp_sleep(wl
);
2619 mutex_unlock(&wl
->mutex
);
2624 struct wl1271_filter_params
{
2627 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
2630 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
2631 struct netdev_hw_addr_list
*mc_list
)
2633 struct wl1271_filter_params
*fp
;
2634 struct netdev_hw_addr
*ha
;
2635 struct wl1271
*wl
= hw
->priv
;
2637 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2640 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
2642 wl1271_error("Out of memory setting filters.");
2646 /* update multicast filtering parameters */
2647 fp
->mc_list_length
= 0;
2648 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
2649 fp
->enabled
= false;
2652 netdev_hw_addr_list_for_each(ha
, mc_list
) {
2653 memcpy(fp
->mc_list
[fp
->mc_list_length
],
2654 ha
->addr
, ETH_ALEN
);
2655 fp
->mc_list_length
++;
2659 return (u64
)(unsigned long)fp
;
2662 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2665 FIF_BCN_PRBRESP_PROMISC | \
2669 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
2670 unsigned int changed
,
2671 unsigned int *total
, u64 multicast
)
2673 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
2674 struct wl1271
*wl
= hw
->priv
;
2675 struct wl12xx_vif
*wlvif
;
2679 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
2680 " total %x", changed
, *total
);
2682 mutex_lock(&wl
->mutex
);
2684 *total
&= WL1271_SUPPORTED_FILTERS
;
2685 changed
&= WL1271_SUPPORTED_FILTERS
;
2687 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2690 ret
= wl1271_ps_elp_wakeup(wl
);
2694 wl12xx_for_each_wlvif(wl
, wlvif
) {
2695 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
2696 if (*total
& FIF_ALLMULTI
)
2697 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2701 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2704 fp
->mc_list_length
);
2711 * the fw doesn't provide an api to configure the filters. instead,
2712 * the filters configuration is based on the active roles / ROC
2717 wl1271_ps_elp_sleep(wl
);
2720 mutex_unlock(&wl
->mutex
);
2724 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2725 u8 id
, u8 key_type
, u8 key_size
,
2726 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
2729 struct wl1271_ap_key
*ap_key
;
2732 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
2734 if (key_size
> MAX_KEY_SIZE
)
2738 * Find next free entry in ap_keys. Also check we are not replacing
2741 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2742 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2745 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
2746 wl1271_warning("trying to record key replacement");
2751 if (i
== MAX_NUM_KEYS
)
2754 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
2759 ap_key
->key_type
= key_type
;
2760 ap_key
->key_size
= key_size
;
2761 memcpy(ap_key
->key
, key
, key_size
);
2762 ap_key
->hlid
= hlid
;
2763 ap_key
->tx_seq_32
= tx_seq_32
;
2764 ap_key
->tx_seq_16
= tx_seq_16
;
2766 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
2770 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2774 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2775 kfree(wlvif
->ap
.recorded_keys
[i
]);
2776 wlvif
->ap
.recorded_keys
[i
] = NULL
;
2780 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2783 struct wl1271_ap_key
*key
;
2784 bool wep_key_added
= false;
2786 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2788 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2791 key
= wlvif
->ap
.recorded_keys
[i
];
2793 if (hlid
== WL12XX_INVALID_LINK_ID
)
2794 hlid
= wlvif
->ap
.bcast_hlid
;
2796 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2797 key
->id
, key
->key_type
,
2798 key
->key_size
, key
->key
,
2799 hlid
, key
->tx_seq_32
,
2804 if (key
->key_type
== KEY_WEP
)
2805 wep_key_added
= true;
2808 if (wep_key_added
) {
2809 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
2810 wlvif
->ap
.bcast_hlid
);
2816 wl1271_free_ap_keys(wl
, wlvif
);
2820 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2821 u16 action
, u8 id
, u8 key_type
,
2822 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
2823 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
2826 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2829 * A role set to GEM cipher requires different Tx settings (namely
2830 * spare blocks). Note when we are in this mode so the HW can adjust.
2832 if (key_type
== KEY_GEM
) {
2833 if (action
== KEY_ADD_OR_REPLACE
)
2834 wlvif
->is_gem
= true;
2835 else if (action
== KEY_REMOVE
)
2836 wlvif
->is_gem
= false;
2840 struct wl1271_station
*wl_sta
;
2844 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
2845 hlid
= wl_sta
->hlid
;
2847 hlid
= wlvif
->ap
.bcast_hlid
;
2850 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
2852 * We do not support removing keys after AP shutdown.
2853 * Pretend we do to make mac80211 happy.
2855 if (action
!= KEY_ADD_OR_REPLACE
)
2858 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
2860 key
, hlid
, tx_seq_32
,
2863 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
2864 id
, key_type
, key_size
,
2865 key
, hlid
, tx_seq_32
,
2873 static const u8 bcast_addr
[ETH_ALEN
] = {
2874 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2877 addr
= sta
? sta
->addr
: bcast_addr
;
2879 if (is_zero_ether_addr(addr
)) {
2880 /* We dont support TX only encryption */
2884 /* The wl1271 does not allow to remove unicast keys - they
2885 will be cleared automatically on next CMD_JOIN. Ignore the
2886 request silently, as we dont want the mac80211 to emit
2887 an error message. */
2888 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
2891 /* don't remove key if hlid was already deleted */
2892 if (action
== KEY_REMOVE
&&
2893 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
2896 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
2897 id
, key_type
, key_size
,
2898 key
, addr
, tx_seq_32
,
2903 /* the default WEP key needs to be configured at least once */
2904 if (key_type
== KEY_WEP
) {
2905 ret
= wl12xx_cmd_set_default_wep_key(wl
,
2916 static int wl1271_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
2917 struct ieee80211_vif
*vif
,
2918 struct ieee80211_sta
*sta
,
2919 struct ieee80211_key_conf
*key_conf
)
2921 struct wl1271
*wl
= hw
->priv
;
2922 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2928 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
2930 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
2931 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
2932 key_conf
->cipher
, key_conf
->keyidx
,
2933 key_conf
->keylen
, key_conf
->flags
);
2934 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
2936 mutex_lock(&wl
->mutex
);
2938 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
2943 ret
= wl1271_ps_elp_wakeup(wl
);
2947 switch (key_conf
->cipher
) {
2948 case WLAN_CIPHER_SUITE_WEP40
:
2949 case WLAN_CIPHER_SUITE_WEP104
:
2952 key_conf
->hw_key_idx
= key_conf
->keyidx
;
2954 case WLAN_CIPHER_SUITE_TKIP
:
2955 key_type
= KEY_TKIP
;
2957 key_conf
->hw_key_idx
= key_conf
->keyidx
;
2958 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
2959 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
2961 case WLAN_CIPHER_SUITE_CCMP
:
2964 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
2965 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
2966 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
2968 case WL1271_CIPHER_SUITE_GEM
:
2970 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
2971 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
2974 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
2982 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2983 key_conf
->keyidx
, key_type
,
2984 key_conf
->keylen
, key_conf
->key
,
2985 tx_seq_32
, tx_seq_16
, sta
);
2987 wl1271_error("Could not add or replace key");
2992 * reconfiguring arp response if the unicast (or common)
2993 * encryption key type was changed
2995 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
2996 (sta
|| key_type
== KEY_WEP
) &&
2997 wlvif
->encryption_type
!= key_type
) {
2998 wlvif
->encryption_type
= key_type
;
2999 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3001 wl1271_warning("build arp rsp failed: %d", ret
);
3008 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3009 key_conf
->keyidx
, key_type
,
3010 key_conf
->keylen
, key_conf
->key
,
3013 wl1271_error("Could not remove key");
3019 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3025 wl1271_ps_elp_sleep(wl
);
3028 mutex_unlock(&wl
->mutex
);
3033 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3034 struct ieee80211_vif
*vif
,
3035 struct cfg80211_scan_request
*req
)
3037 struct wl1271
*wl
= hw
->priv
;
3042 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3045 ssid
= req
->ssids
[0].ssid
;
3046 len
= req
->ssids
[0].ssid_len
;
3049 mutex_lock(&wl
->mutex
);
3051 if (wl
->state
== WL1271_STATE_OFF
) {
3053 * We cannot return -EBUSY here because cfg80211 will expect
3054 * a call to ieee80211_scan_completed if we do - in this case
3055 * there won't be any call.
3061 ret
= wl1271_ps_elp_wakeup(wl
);
3065 /* fail if there is any role in ROC */
3066 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3067 /* don't allow scanning right now */
3072 ret
= wl1271_scan(hw
->priv
, vif
, ssid
, len
, req
);
3074 wl1271_ps_elp_sleep(wl
);
3076 mutex_unlock(&wl
->mutex
);
3081 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3082 struct ieee80211_vif
*vif
)
3084 struct wl1271
*wl
= hw
->priv
;
3087 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3089 mutex_lock(&wl
->mutex
);
3091 if (wl
->state
== WL1271_STATE_OFF
)
3094 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3097 ret
= wl1271_ps_elp_wakeup(wl
);
3101 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3102 ret
= wl1271_scan_stop(wl
);
3108 * Rearm the tx watchdog just before idling scan. This
3109 * prevents just-finished scans from triggering the watchdog
3111 wl12xx_rearm_tx_watchdog_locked(wl
);
3113 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3114 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3115 wl
->scan_vif
= NULL
;
3116 wl
->scan
.req
= NULL
;
3117 ieee80211_scan_completed(wl
->hw
, true);
3120 wl1271_ps_elp_sleep(wl
);
3122 mutex_unlock(&wl
->mutex
);
3124 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3127 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3128 struct ieee80211_vif
*vif
,
3129 struct cfg80211_sched_scan_request
*req
,
3130 struct ieee80211_sched_scan_ies
*ies
)
3132 struct wl1271
*wl
= hw
->priv
;
3133 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3136 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3138 mutex_lock(&wl
->mutex
);
3140 if (wl
->state
== WL1271_STATE_OFF
) {
3145 ret
= wl1271_ps_elp_wakeup(wl
);
3149 ret
= wl1271_scan_sched_scan_config(wl
, wlvif
, req
, ies
);
3153 ret
= wl1271_scan_sched_scan_start(wl
, wlvif
);
3157 wl
->sched_scanning
= true;
3160 wl1271_ps_elp_sleep(wl
);
3162 mutex_unlock(&wl
->mutex
);
3166 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3167 struct ieee80211_vif
*vif
)
3169 struct wl1271
*wl
= hw
->priv
;
3172 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3174 mutex_lock(&wl
->mutex
);
3176 if (wl
->state
== WL1271_STATE_OFF
)
3179 ret
= wl1271_ps_elp_wakeup(wl
);
3183 wl1271_scan_sched_scan_stop(wl
);
3185 wl1271_ps_elp_sleep(wl
);
3187 mutex_unlock(&wl
->mutex
);
3190 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3192 struct wl1271
*wl
= hw
->priv
;
3195 mutex_lock(&wl
->mutex
);
3197 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3202 ret
= wl1271_ps_elp_wakeup(wl
);
3206 ret
= wl1271_acx_frag_threshold(wl
, value
);
3208 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3210 wl1271_ps_elp_sleep(wl
);
3213 mutex_unlock(&wl
->mutex
);
3218 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3220 struct wl1271
*wl
= hw
->priv
;
3221 struct wl12xx_vif
*wlvif
;
3224 mutex_lock(&wl
->mutex
);
3226 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3231 ret
= wl1271_ps_elp_wakeup(wl
);
3235 wl12xx_for_each_wlvif(wl
, wlvif
) {
3236 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3238 wl1271_warning("set rts threshold failed: %d", ret
);
3240 wl1271_ps_elp_sleep(wl
);
3243 mutex_unlock(&wl
->mutex
);
3248 static int wl1271_ssid_set(struct ieee80211_vif
*vif
, struct sk_buff
*skb
,
3251 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3253 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
3257 wl1271_error("No SSID in IEs!");
3262 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
3263 wl1271_error("SSID is too long!");
3267 wlvif
->ssid_len
= ssid_len
;
3268 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
3272 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3275 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3276 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3277 skb
->len
- ieoffset
);
3282 memmove(ie
, next
, end
- next
);
3283 skb_trim(skb
, skb
->len
- len
);
3286 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3287 unsigned int oui
, u8 oui_type
,
3291 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3292 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3293 skb
->data
+ ieoffset
,
3294 skb
->len
- ieoffset
);
3299 memmove(ie
, next
, end
- next
);
3300 skb_trim(skb
, skb
->len
- len
);
3303 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3304 struct ieee80211_vif
*vif
)
3306 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3307 struct sk_buff
*skb
;
3310 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3314 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3315 CMD_TEMPL_AP_PROBE_RESPONSE
,
3324 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3325 struct ieee80211_vif
*vif
,
3327 size_t probe_rsp_len
,
3330 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3331 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3332 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3333 int ssid_ie_offset
, ie_offset
, templ_len
;
3336 /* no need to change probe response if the SSID is set correctly */
3337 if (wlvif
->ssid_len
> 0)
3338 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3339 CMD_TEMPL_AP_PROBE_RESPONSE
,
3344 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3345 wl1271_error("probe_rsp template too big");
3349 /* start searching from IE offset */
3350 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3352 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3353 probe_rsp_len
- ie_offset
);
3355 wl1271_error("No SSID in beacon!");
3359 ssid_ie_offset
= ptr
- probe_rsp_data
;
3360 ptr
+= (ptr
[1] + 2);
3362 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3364 /* insert SSID from bss_conf */
3365 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3366 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3367 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3368 bss_conf
->ssid
, bss_conf
->ssid_len
);
3369 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3371 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3372 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3373 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3375 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3376 CMD_TEMPL_AP_PROBE_RESPONSE
,
3382 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3383 struct ieee80211_vif
*vif
,
3384 struct ieee80211_bss_conf
*bss_conf
,
3387 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3390 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3391 if (bss_conf
->use_short_slot
)
3392 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3394 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3396 wl1271_warning("Set slot time failed %d", ret
);
3401 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3402 if (bss_conf
->use_short_preamble
)
3403 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3405 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3408 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3409 if (bss_conf
->use_cts_prot
)
3410 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3413 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3414 CTSPROTECT_DISABLE
);
3416 wl1271_warning("Set ctsprotect failed %d", ret
);
3425 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3426 struct ieee80211_vif
*vif
,
3427 struct ieee80211_bss_conf
*bss_conf
,
3430 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3431 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3434 if ((changed
& BSS_CHANGED_BEACON_INT
)) {
3435 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3436 bss_conf
->beacon_int
);
3438 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3441 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3442 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3443 if (!wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
)) {
3444 wl1271_debug(DEBUG_AP
, "probe response updated");
3445 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3449 if ((changed
& BSS_CHANGED_BEACON
)) {
3450 struct ieee80211_hdr
*hdr
;
3452 int ieoffset
= offsetof(struct ieee80211_mgmt
,
3454 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3462 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3464 ret
= wl1271_ssid_set(vif
, beacon
, ieoffset
);
3466 dev_kfree_skb(beacon
);
3469 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3470 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3472 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3477 dev_kfree_skb(beacon
);
3482 * In case we already have a probe-resp beacon set explicitly
3483 * by usermode, don't use the beacon data.
3485 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3488 /* remove TIM ie from probe response */
3489 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3492 * remove p2p ie from probe response.
3493 * the fw reponds to probe requests that don't include
3494 * the p2p ie. probe requests with p2p ie will be passed,
3495 * and will be responded by the supplicant (the spec
3496 * forbids including the p2p ie when responding to probe
3497 * requests that didn't include it).
3499 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3500 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3502 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3503 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3504 IEEE80211_STYPE_PROBE_RESP
);
3506 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3511 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3512 CMD_TEMPL_PROBE_RESPONSE
,
3517 dev_kfree_skb(beacon
);
3524 wl1271_error("beacon info change failed: %d", ret
);
3528 /* AP mode changes */
3529 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
3530 struct ieee80211_vif
*vif
,
3531 struct ieee80211_bss_conf
*bss_conf
,
3534 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3537 if ((changed
& BSS_CHANGED_BASIC_RATES
)) {
3538 u32 rates
= bss_conf
->basic_rates
;
3540 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
3542 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
3543 wlvif
->basic_rate_set
);
3545 ret
= wl1271_init_ap_rates(wl
, wlvif
);
3547 wl1271_error("AP rate policy change failed %d", ret
);
3551 ret
= wl1271_ap_init_templates(wl
, vif
);
3556 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
3560 if ((changed
& BSS_CHANGED_BEACON_ENABLED
)) {
3561 if (bss_conf
->enable_beacon
) {
3562 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3563 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
3567 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
3571 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3572 wl1271_debug(DEBUG_AP
, "started AP");
3575 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3576 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
3580 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3581 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
3583 wl1271_debug(DEBUG_AP
, "stopped AP");
3588 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3592 /* Handle HT information change */
3593 if ((changed
& BSS_CHANGED_HT
) &&
3594 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3595 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3596 bss_conf
->ht_operation_mode
);
3598 wl1271_warning("Set ht information failed %d", ret
);
3607 /* STA/IBSS mode changes */
3608 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
3609 struct ieee80211_vif
*vif
,
3610 struct ieee80211_bss_conf
*bss_conf
,
3613 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3614 bool do_join
= false, set_assoc
= false;
3615 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
3616 bool ibss_joined
= false;
3617 u32 sta_rate_set
= 0;
3619 struct ieee80211_sta
*sta
;
3620 bool sta_exists
= false;
3621 struct ieee80211_sta_ht_cap sta_ht_cap
;
3624 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
3630 if (changed
& BSS_CHANGED_IBSS
) {
3631 if (bss_conf
->ibss_joined
) {
3632 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
3635 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
,
3637 wl1271_unjoin(wl
, wlvif
);
3641 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
3644 /* Need to update the SSID (for filtering etc) */
3645 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
3648 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
3649 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
3650 bss_conf
->enable_beacon
? "enabled" : "disabled");
3655 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
) {
3656 ret
= wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
3658 wl1271_warning("idle mode change failed %d", ret
);
3661 if ((changed
& BSS_CHANGED_CQM
)) {
3662 bool enable
= false;
3663 if (bss_conf
->cqm_rssi_thold
)
3665 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
3666 bss_conf
->cqm_rssi_thold
,
3667 bss_conf
->cqm_rssi_hyst
);
3670 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
3673 if (changed
& BSS_CHANGED_BSSID
)
3674 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
3675 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
3679 ret
= wl1271_build_qos_null_data(wl
, vif
);
3684 if (changed
& (BSS_CHANGED_ASSOC
| BSS_CHANGED_HT
)) {
3686 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
3690 /* save the supp_rates of the ap */
3691 sta_rate_set
= sta
->supp_rates
[wl
->hw
->conf
.channel
->band
];
3692 if (sta
->ht_cap
.ht_supported
)
3694 (sta
->ht_cap
.mcs
.rx_mask
[0] << HW_HT_RATES_OFFSET
);
3695 sta_ht_cap
= sta
->ht_cap
;
3702 if ((changed
& BSS_CHANGED_ASSOC
)) {
3703 if (bss_conf
->assoc
) {
3706 wlvif
->aid
= bss_conf
->aid
;
3707 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3711 /* Cancel connection_loss_work */
3712 cancel_delayed_work_sync(&wl
->connection_loss_work
);
3715 * use basic rates from AP, and determine lowest rate
3716 * to use with control frames.
3718 rates
= bss_conf
->basic_rates
;
3719 wlvif
->basic_rate_set
=
3720 wl1271_tx_enabled_rates_get(wl
, rates
,
3723 wl1271_tx_min_rate_get(wl
,
3724 wlvif
->basic_rate_set
);
3727 wl1271_tx_enabled_rates_get(wl
,
3730 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3735 * with wl1271, we don't need to update the
3736 * beacon_int and dtim_period, because the firmware
3737 * updates it by itself when the first beacon is
3738 * received after a join.
3740 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
3745 * Get a template for hardware connection maintenance
3747 dev_kfree_skb(wlvif
->probereq
);
3748 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
3751 ieoffset
= offsetof(struct ieee80211_mgmt
,
3752 u
.probe_req
.variable
);
3753 wl1271_ssid_set(vif
, wlvif
->probereq
, ieoffset
);
3755 /* enable the connection monitoring feature */
3756 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
3760 /* use defaults when not associated */
3762 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
,
3765 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT
,
3769 /* free probe-request template */
3770 dev_kfree_skb(wlvif
->probereq
);
3771 wlvif
->probereq
= NULL
;
3773 /* revert back to minimum rates for the current band */
3774 wl1271_set_band_rate(wl
, wlvif
);
3776 wl1271_tx_min_rate_get(wl
,
3777 wlvif
->basic_rate_set
);
3778 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3782 /* disable connection monitor features */
3783 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
3785 /* Disable the keep-alive feature */
3786 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
3790 /* restore the bssid filter and go to dummy bssid */
3793 * we might have to disable roc, if there was
3794 * no IF_OPER_UP notification.
3797 ret
= wl12xx_croc(wl
, wlvif
->role_id
);
3802 * (we also need to disable roc in case of
3803 * roaming on the same channel. until we will
3804 * have a better flow...)
3806 if (test_bit(wlvif
->dev_role_id
, wl
->roc_map
)) {
3807 ret
= wl12xx_croc(wl
,
3808 wlvif
->dev_role_id
);
3813 wl1271_unjoin(wl
, wlvif
);
3814 if (!bss_conf
->idle
)
3815 wl12xx_start_dev(wl
, wlvif
);
3820 if (changed
& BSS_CHANGED_IBSS
) {
3821 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
3822 bss_conf
->ibss_joined
);
3824 if (bss_conf
->ibss_joined
) {
3825 u32 rates
= bss_conf
->basic_rates
;
3826 wlvif
->basic_rate_set
=
3827 wl1271_tx_enabled_rates_get(wl
, rates
,
3830 wl1271_tx_min_rate_get(wl
,
3831 wlvif
->basic_rate_set
);
3833 /* by default, use 11b + OFDM rates */
3834 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
3835 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3841 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3846 ret
= wl1271_join(wl
, wlvif
, set_assoc
);
3848 wl1271_warning("cmd join failed %d", ret
);
3852 /* ROC until connected (after EAPOL exchange) */
3854 ret
= wl12xx_roc(wl
, wlvif
, wlvif
->role_id
);
3858 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
3859 wl12xx_set_authorized(wl
, wlvif
);
3862 * stop device role if started (we might already be in
3865 if (wl12xx_dev_role_started(wlvif
)) {
3866 ret
= wl12xx_stop_dev(wl
, wlvif
);
3872 /* Handle new association with HT. Do this after join. */
3874 if ((changed
& BSS_CHANGED_HT
) &&
3875 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3876 ret
= wl1271_acx_set_ht_capabilities(wl
,
3881 wl1271_warning("Set ht cap true failed %d",
3886 /* handle new association without HT and disassociation */
3887 else if (changed
& BSS_CHANGED_ASSOC
) {
3888 ret
= wl1271_acx_set_ht_capabilities(wl
,
3893 wl1271_warning("Set ht cap false failed %d",
3900 /* Handle HT information change. Done after join. */
3901 if ((changed
& BSS_CHANGED_HT
) &&
3902 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3903 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3904 bss_conf
->ht_operation_mode
);
3906 wl1271_warning("Set ht information failed %d", ret
);
3911 /* Handle arp filtering. Done after join. */
3912 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
3913 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
3914 __be32 addr
= bss_conf
->arp_addr_list
[0];
3915 wlvif
->sta
.qos
= bss_conf
->qos
;
3916 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
3918 if (bss_conf
->arp_addr_cnt
== 1 &&
3919 bss_conf
->arp_filter_enabled
) {
3920 wlvif
->ip_addr
= addr
;
3922 * The template should have been configured only upon
3923 * association. however, it seems that the correct ip
3924 * isn't being set (when sending), so we have to
3925 * reconfigure the template upon every ip change.
3927 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3929 wl1271_warning("build arp rsp failed: %d", ret
);
3933 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
3934 (ACX_ARP_FILTER_ARP_FILTERING
|
3935 ACX_ARP_FILTER_AUTO_ARP
),
3939 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
3950 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
3951 struct ieee80211_vif
*vif
,
3952 struct ieee80211_bss_conf
*bss_conf
,
3955 struct wl1271
*wl
= hw
->priv
;
3956 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3957 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3960 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info changed 0x%x",
3963 mutex_lock(&wl
->mutex
);
3965 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
3968 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
3971 ret
= wl1271_ps_elp_wakeup(wl
);
3976 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
3978 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
3980 wl1271_ps_elp_sleep(wl
);
3983 mutex_unlock(&wl
->mutex
);
3986 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
3987 struct ieee80211_vif
*vif
, u16 queue
,
3988 const struct ieee80211_tx_queue_params
*params
)
3990 struct wl1271
*wl
= hw
->priv
;
3991 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3995 mutex_lock(&wl
->mutex
);
3997 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4000 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4002 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4004 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4007 ret
= wl1271_ps_elp_wakeup(wl
);
4012 * the txop is confed in units of 32us by the mac80211,
4015 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4016 params
->cw_min
, params
->cw_max
,
4017 params
->aifs
, params
->txop
<< 5);
4021 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4022 CONF_CHANNEL_TYPE_EDCF
,
4023 wl1271_tx_get_queue(queue
),
4024 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4028 wl1271_ps_elp_sleep(wl
);
4031 mutex_unlock(&wl
->mutex
);
4036 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4037 struct ieee80211_vif
*vif
)
4040 struct wl1271
*wl
= hw
->priv
;
4041 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4042 u64 mactime
= ULLONG_MAX
;
4045 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4047 mutex_lock(&wl
->mutex
);
4049 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4052 ret
= wl1271_ps_elp_wakeup(wl
);
4056 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4061 wl1271_ps_elp_sleep(wl
);
4064 mutex_unlock(&wl
->mutex
);
4068 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4069 struct survey_info
*survey
)
4071 struct wl1271
*wl
= hw
->priv
;
4072 struct ieee80211_conf
*conf
= &hw
->conf
;
4077 survey
->channel
= conf
->channel
;
4078 survey
->filled
= SURVEY_INFO_NOISE_DBM
;
4079 survey
->noise
= wl
->noise
;
4084 static int wl1271_allocate_sta(struct wl1271
*wl
,
4085 struct wl12xx_vif
*wlvif
,
4086 struct ieee80211_sta
*sta
)
4088 struct wl1271_station
*wl_sta
;
4092 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4093 wl1271_warning("could not allocate HLID - too much stations");
4097 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4098 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4100 wl1271_warning("could not allocate HLID - too many links");
4104 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4105 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4106 wl
->active_sta_count
++;
4110 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4112 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4115 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4116 memset(wl
->links
[hlid
].addr
, 0, ETH_ALEN
);
4117 wl
->links
[hlid
].ba_bitmap
= 0;
4118 __clear_bit(hlid
, &wl
->ap_ps_map
);
4119 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4120 wl12xx_free_link(wl
, wlvif
, &hlid
);
4121 wl
->active_sta_count
--;
4124 * rearm the tx watchdog when the last STA is freed - give the FW a
4125 * chance to return STA-buffered packets before complaining.
4127 if (wl
->active_sta_count
== 0)
4128 wl12xx_rearm_tx_watchdog_locked(wl
);
4131 static int wl12xx_sta_add(struct wl1271
*wl
,
4132 struct wl12xx_vif
*wlvif
,
4133 struct ieee80211_sta
*sta
)
4135 struct wl1271_station
*wl_sta
;
4139 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4141 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4145 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4146 hlid
= wl_sta
->hlid
;
4148 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4150 wl1271_free_sta(wl
, wlvif
, hlid
);
4155 static int wl12xx_sta_remove(struct wl1271
*wl
,
4156 struct wl12xx_vif
*wlvif
,
4157 struct ieee80211_sta
*sta
)
4159 struct wl1271_station
*wl_sta
;
4162 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4164 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4166 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4169 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4173 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4177 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4178 struct wl12xx_vif
*wlvif
,
4179 struct ieee80211_sta
*sta
,
4180 enum ieee80211_sta_state old_state
,
4181 enum ieee80211_sta_state new_state
)
4183 struct wl1271_station
*wl_sta
;
4185 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4186 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4189 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4190 hlid
= wl_sta
->hlid
;
4192 /* Add station (AP mode) */
4194 old_state
== IEEE80211_STA_NOTEXIST
&&
4195 new_state
== IEEE80211_STA_NONE
)
4196 return wl12xx_sta_add(wl
, wlvif
, sta
);
4198 /* Remove station (AP mode) */
4200 old_state
== IEEE80211_STA_NONE
&&
4201 new_state
== IEEE80211_STA_NOTEXIST
) {
4203 wl12xx_sta_remove(wl
, wlvif
, sta
);
4207 /* Authorize station (AP mode) */
4209 new_state
== IEEE80211_STA_AUTHORIZED
) {
4210 ret
= wl12xx_cmd_set_peer_state(wl
, hlid
);
4214 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4219 /* Authorize station */
4221 new_state
== IEEE80211_STA_AUTHORIZED
) {
4222 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4223 return wl12xx_set_authorized(wl
, wlvif
);
4227 old_state
== IEEE80211_STA_AUTHORIZED
&&
4228 new_state
== IEEE80211_STA_ASSOC
) {
4229 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4236 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4237 struct ieee80211_vif
*vif
,
4238 struct ieee80211_sta
*sta
,
4239 enum ieee80211_sta_state old_state
,
4240 enum ieee80211_sta_state new_state
)
4242 struct wl1271
*wl
= hw
->priv
;
4243 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4246 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4247 sta
->aid
, old_state
, new_state
);
4249 mutex_lock(&wl
->mutex
);
4251 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4256 ret
= wl1271_ps_elp_wakeup(wl
);
4260 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4262 wl1271_ps_elp_sleep(wl
);
4264 mutex_unlock(&wl
->mutex
);
4265 if (new_state
< old_state
)
4270 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4271 struct ieee80211_vif
*vif
,
4272 enum ieee80211_ampdu_mlme_action action
,
4273 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4276 struct wl1271
*wl
= hw
->priv
;
4277 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4279 u8 hlid
, *ba_bitmap
;
4281 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4284 /* sanity check - the fields in FW are only 8bits wide */
4285 if (WARN_ON(tid
> 0xFF))
4288 mutex_lock(&wl
->mutex
);
4290 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4295 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4296 hlid
= wlvif
->sta
.hlid
;
4297 ba_bitmap
= &wlvif
->sta
.ba_rx_bitmap
;
4298 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4299 struct wl1271_station
*wl_sta
;
4301 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4302 hlid
= wl_sta
->hlid
;
4303 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4309 ret
= wl1271_ps_elp_wakeup(wl
);
4313 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4317 case IEEE80211_AMPDU_RX_START
:
4318 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4323 if (wl
->ba_rx_session_count
>= RX_BA_MAX_SESSIONS
) {
4325 wl1271_error("exceeded max RX BA sessions");
4329 if (*ba_bitmap
& BIT(tid
)) {
4331 wl1271_error("cannot enable RX BA session on active "
4336 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
4339 *ba_bitmap
|= BIT(tid
);
4340 wl
->ba_rx_session_count
++;
4344 case IEEE80211_AMPDU_RX_STOP
:
4345 if (!(*ba_bitmap
& BIT(tid
))) {
4347 wl1271_error("no active RX BA session on tid: %d",
4352 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
4355 *ba_bitmap
&= ~BIT(tid
);
4356 wl
->ba_rx_session_count
--;
4361 * The BA initiator session management in FW independently.
4362 * Falling break here on purpose for all TX APDU commands.
4364 case IEEE80211_AMPDU_TX_START
:
4365 case IEEE80211_AMPDU_TX_STOP
:
4366 case IEEE80211_AMPDU_TX_OPERATIONAL
:
4371 wl1271_error("Incorrect ampdu action id=%x\n", action
);
4375 wl1271_ps_elp_sleep(wl
);
4378 mutex_unlock(&wl
->mutex
);
4383 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
4384 struct ieee80211_vif
*vif
,
4385 const struct cfg80211_bitrate_mask
*mask
)
4387 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4388 struct wl1271
*wl
= hw
->priv
;
4391 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
4392 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
4393 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
4395 mutex_lock(&wl
->mutex
);
4397 for (i
= 0; i
< IEEE80211_NUM_BANDS
; i
++)
4398 wlvif
->bitrate_masks
[i
] =
4399 wl1271_tx_enabled_rates_get(wl
,
4400 mask
->control
[i
].legacy
,
4403 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4406 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4407 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
4409 ret
= wl1271_ps_elp_wakeup(wl
);
4413 wl1271_set_band_rate(wl
, wlvif
);
4415 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4416 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4418 wl1271_ps_elp_sleep(wl
);
4421 mutex_unlock(&wl
->mutex
);
4426 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
4427 struct ieee80211_channel_switch
*ch_switch
)
4429 struct wl1271
*wl
= hw
->priv
;
4430 struct wl12xx_vif
*wlvif
;
4433 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
4435 wl1271_tx_flush(wl
);
4437 mutex_lock(&wl
->mutex
);
4439 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4440 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4441 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4442 ieee80211_chswitch_done(vif
, false);
4447 ret
= wl1271_ps_elp_wakeup(wl
);
4451 /* TODO: change mac80211 to pass vif as param */
4452 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4453 ret
= wl12xx_cmd_channel_switch(wl
, wlvif
, ch_switch
);
4456 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
4459 wl1271_ps_elp_sleep(wl
);
4462 mutex_unlock(&wl
->mutex
);
4465 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
4467 struct wl1271
*wl
= hw
->priv
;
4470 mutex_lock(&wl
->mutex
);
4472 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4475 /* packets are considered pending if in the TX queue or the FW */
4476 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
4478 mutex_unlock(&wl
->mutex
);
4483 /* can't be const, mac80211 writes to this */
4484 static struct ieee80211_rate wl1271_rates
[] = {
4486 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
4487 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
4489 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
4490 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
4491 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4493 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
4494 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
4495 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4497 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
4498 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
4499 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4501 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4502 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4504 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4505 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4507 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4508 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4510 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4511 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4513 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4514 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4516 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4517 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4519 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4520 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4522 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4523 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4526 /* can't be const, mac80211 writes to this */
4527 static struct ieee80211_channel wl1271_channels
[] = {
4528 { .hw_value
= 1, .center_freq
= 2412, .max_power
= 25 },
4529 { .hw_value
= 2, .center_freq
= 2417, .max_power
= 25 },
4530 { .hw_value
= 3, .center_freq
= 2422, .max_power
= 25 },
4531 { .hw_value
= 4, .center_freq
= 2427, .max_power
= 25 },
4532 { .hw_value
= 5, .center_freq
= 2432, .max_power
= 25 },
4533 { .hw_value
= 6, .center_freq
= 2437, .max_power
= 25 },
4534 { .hw_value
= 7, .center_freq
= 2442, .max_power
= 25 },
4535 { .hw_value
= 8, .center_freq
= 2447, .max_power
= 25 },
4536 { .hw_value
= 9, .center_freq
= 2452, .max_power
= 25 },
4537 { .hw_value
= 10, .center_freq
= 2457, .max_power
= 25 },
4538 { .hw_value
= 11, .center_freq
= 2462, .max_power
= 25 },
4539 { .hw_value
= 12, .center_freq
= 2467, .max_power
= 25 },
4540 { .hw_value
= 13, .center_freq
= 2472, .max_power
= 25 },
4541 { .hw_value
= 14, .center_freq
= 2484, .max_power
= 25 },
4544 /* can't be const, mac80211 writes to this */
4545 static struct ieee80211_supported_band wl1271_band_2ghz
= {
4546 .channels
= wl1271_channels
,
4547 .n_channels
= ARRAY_SIZE(wl1271_channels
),
4548 .bitrates
= wl1271_rates
,
4549 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
4552 /* 5 GHz data rates for WL1273 */
4553 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
4555 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4556 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4558 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4559 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4561 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4562 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4564 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4565 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4567 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4568 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4570 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4571 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4573 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4574 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4576 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4577 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4580 /* 5 GHz band channels for WL1273 */
4581 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
4582 { .hw_value
= 7, .center_freq
= 5035, .max_power
= 25 },
4583 { .hw_value
= 8, .center_freq
= 5040, .max_power
= 25 },
4584 { .hw_value
= 9, .center_freq
= 5045, .max_power
= 25 },
4585 { .hw_value
= 11, .center_freq
= 5055, .max_power
= 25 },
4586 { .hw_value
= 12, .center_freq
= 5060, .max_power
= 25 },
4587 { .hw_value
= 16, .center_freq
= 5080, .max_power
= 25 },
4588 { .hw_value
= 34, .center_freq
= 5170, .max_power
= 25 },
4589 { .hw_value
= 36, .center_freq
= 5180, .max_power
= 25 },
4590 { .hw_value
= 38, .center_freq
= 5190, .max_power
= 25 },
4591 { .hw_value
= 40, .center_freq
= 5200, .max_power
= 25 },
4592 { .hw_value
= 42, .center_freq
= 5210, .max_power
= 25 },
4593 { .hw_value
= 44, .center_freq
= 5220, .max_power
= 25 },
4594 { .hw_value
= 46, .center_freq
= 5230, .max_power
= 25 },
4595 { .hw_value
= 48, .center_freq
= 5240, .max_power
= 25 },
4596 { .hw_value
= 52, .center_freq
= 5260, .max_power
= 25 },
4597 { .hw_value
= 56, .center_freq
= 5280, .max_power
= 25 },
4598 { .hw_value
= 60, .center_freq
= 5300, .max_power
= 25 },
4599 { .hw_value
= 64, .center_freq
= 5320, .max_power
= 25 },
4600 { .hw_value
= 100, .center_freq
= 5500, .max_power
= 25 },
4601 { .hw_value
= 104, .center_freq
= 5520, .max_power
= 25 },
4602 { .hw_value
= 108, .center_freq
= 5540, .max_power
= 25 },
4603 { .hw_value
= 112, .center_freq
= 5560, .max_power
= 25 },
4604 { .hw_value
= 116, .center_freq
= 5580, .max_power
= 25 },
4605 { .hw_value
= 120, .center_freq
= 5600, .max_power
= 25 },
4606 { .hw_value
= 124, .center_freq
= 5620, .max_power
= 25 },
4607 { .hw_value
= 128, .center_freq
= 5640, .max_power
= 25 },
4608 { .hw_value
= 132, .center_freq
= 5660, .max_power
= 25 },
4609 { .hw_value
= 136, .center_freq
= 5680, .max_power
= 25 },
4610 { .hw_value
= 140, .center_freq
= 5700, .max_power
= 25 },
4611 { .hw_value
= 149, .center_freq
= 5745, .max_power
= 25 },
4612 { .hw_value
= 153, .center_freq
= 5765, .max_power
= 25 },
4613 { .hw_value
= 157, .center_freq
= 5785, .max_power
= 25 },
4614 { .hw_value
= 161, .center_freq
= 5805, .max_power
= 25 },
4615 { .hw_value
= 165, .center_freq
= 5825, .max_power
= 25 },
4618 static struct ieee80211_supported_band wl1271_band_5ghz
= {
4619 .channels
= wl1271_channels_5ghz
,
4620 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
4621 .bitrates
= wl1271_rates_5ghz
,
4622 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
4625 static const struct ieee80211_ops wl1271_ops
= {
4626 .start
= wl1271_op_start
,
4627 .stop
= wl1271_op_stop
,
4628 .add_interface
= wl1271_op_add_interface
,
4629 .remove_interface
= wl1271_op_remove_interface
,
4630 .change_interface
= wl12xx_op_change_interface
,
4632 .suspend
= wl1271_op_suspend
,
4633 .resume
= wl1271_op_resume
,
4635 .config
= wl1271_op_config
,
4636 .prepare_multicast
= wl1271_op_prepare_multicast
,
4637 .configure_filter
= wl1271_op_configure_filter
,
4639 .set_key
= wl1271_op_set_key
,
4640 .hw_scan
= wl1271_op_hw_scan
,
4641 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
4642 .sched_scan_start
= wl1271_op_sched_scan_start
,
4643 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
4644 .bss_info_changed
= wl1271_op_bss_info_changed
,
4645 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
4646 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
4647 .conf_tx
= wl1271_op_conf_tx
,
4648 .get_tsf
= wl1271_op_get_tsf
,
4649 .get_survey
= wl1271_op_get_survey
,
4650 .sta_state
= wl12xx_op_sta_state
,
4651 .ampdu_action
= wl1271_op_ampdu_action
,
4652 .tx_frames_pending
= wl1271_tx_frames_pending
,
4653 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
4654 .channel_switch
= wl12xx_op_channel_switch
,
4655 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
4659 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
4665 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
4666 wl1271_error("Illegal RX rate from HW: %d", rate
);
4670 idx
= wl
->band_rate_to_idx
[band
][rate
];
4671 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
4672 wl1271_error("Unsupported RX rate from HW: %d", rate
);
4679 static ssize_t
wl1271_sysfs_show_bt_coex_state(struct device
*dev
,
4680 struct device_attribute
*attr
,
4683 struct wl1271
*wl
= dev_get_drvdata(dev
);
4688 mutex_lock(&wl
->mutex
);
4689 len
= snprintf(buf
, len
, "%d\n\n0 - off\n1 - on\n",
4691 mutex_unlock(&wl
->mutex
);
4697 static ssize_t
wl1271_sysfs_store_bt_coex_state(struct device
*dev
,
4698 struct device_attribute
*attr
,
4699 const char *buf
, size_t count
)
4701 struct wl1271
*wl
= dev_get_drvdata(dev
);
4705 ret
= kstrtoul(buf
, 10, &res
);
4707 wl1271_warning("incorrect value written to bt_coex_mode");
4711 mutex_lock(&wl
->mutex
);
4715 if (res
== wl
->sg_enabled
)
4718 wl
->sg_enabled
= res
;
4720 if (wl
->state
== WL1271_STATE_OFF
)
4723 ret
= wl1271_ps_elp_wakeup(wl
);
4727 wl1271_acx_sg_enable(wl
, wl
->sg_enabled
);
4728 wl1271_ps_elp_sleep(wl
);
4731 mutex_unlock(&wl
->mutex
);
4735 static DEVICE_ATTR(bt_coex_state
, S_IRUGO
| S_IWUSR
,
4736 wl1271_sysfs_show_bt_coex_state
,
4737 wl1271_sysfs_store_bt_coex_state
);
4739 static ssize_t
wl1271_sysfs_show_hw_pg_ver(struct device
*dev
,
4740 struct device_attribute
*attr
,
4743 struct wl1271
*wl
= dev_get_drvdata(dev
);
4748 mutex_lock(&wl
->mutex
);
4749 if (wl
->hw_pg_ver
>= 0)
4750 len
= snprintf(buf
, len
, "%d\n", wl
->hw_pg_ver
);
4752 len
= snprintf(buf
, len
, "n/a\n");
4753 mutex_unlock(&wl
->mutex
);
4758 static DEVICE_ATTR(hw_pg_ver
, S_IRUGO
,
4759 wl1271_sysfs_show_hw_pg_ver
, NULL
);
4761 static ssize_t
wl1271_sysfs_read_fwlog(struct file
*filp
, struct kobject
*kobj
,
4762 struct bin_attribute
*bin_attr
,
4763 char *buffer
, loff_t pos
, size_t count
)
4765 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
4766 struct wl1271
*wl
= dev_get_drvdata(dev
);
4770 ret
= mutex_lock_interruptible(&wl
->mutex
);
4772 return -ERESTARTSYS
;
4774 /* Let only one thread read the log at a time, blocking others */
4775 while (wl
->fwlog_size
== 0) {
4778 prepare_to_wait_exclusive(&wl
->fwlog_waitq
,
4780 TASK_INTERRUPTIBLE
);
4782 if (wl
->fwlog_size
!= 0) {
4783 finish_wait(&wl
->fwlog_waitq
, &wait
);
4787 mutex_unlock(&wl
->mutex
);
4790 finish_wait(&wl
->fwlog_waitq
, &wait
);
4792 if (signal_pending(current
))
4793 return -ERESTARTSYS
;
4795 ret
= mutex_lock_interruptible(&wl
->mutex
);
4797 return -ERESTARTSYS
;
4800 /* Check if the fwlog is still valid */
4801 if (wl
->fwlog_size
< 0) {
4802 mutex_unlock(&wl
->mutex
);
4806 /* Seeking is not supported - old logs are not kept. Disregard pos. */
4807 len
= min(count
, (size_t)wl
->fwlog_size
);
4808 wl
->fwlog_size
-= len
;
4809 memcpy(buffer
, wl
->fwlog
, len
);
4811 /* Make room for new messages */
4812 memmove(wl
->fwlog
, wl
->fwlog
+ len
, wl
->fwlog_size
);
4814 mutex_unlock(&wl
->mutex
);
4819 static struct bin_attribute fwlog_attr
= {
4820 .attr
= {.name
= "fwlog", .mode
= S_IRUSR
},
4821 .read
= wl1271_sysfs_read_fwlog
,
4824 static void wl1271_connection_loss_work(struct work_struct
*work
)
4826 struct delayed_work
*dwork
;
4828 struct ieee80211_vif
*vif
;
4829 struct wl12xx_vif
*wlvif
;
4831 dwork
= container_of(work
, struct delayed_work
, work
);
4832 wl
= container_of(dwork
, struct wl1271
, connection_loss_work
);
4834 wl1271_info("Connection loss work.");
4836 mutex_lock(&wl
->mutex
);
4838 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4841 /* Call mac80211 connection loss */
4842 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4843 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
4845 vif
= wl12xx_wlvif_to_vif(wlvif
);
4846 ieee80211_connection_loss(vif
);
4849 mutex_unlock(&wl
->mutex
);
4852 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
,
4853 u32 oui
, u32 nic
, int n
)
4857 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x, n %d",
4860 if (nic
+ n
- 1 > 0xffffff)
4861 wl1271_warning("NIC part of the MAC address wraps around!");
4863 for (i
= 0; i
< n
; i
++) {
4864 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
4865 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
4866 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
4867 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
4868 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
4869 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
4873 wl
->hw
->wiphy
->n_addresses
= n
;
4874 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
4877 static int wl12xx_get_hw_info(struct wl1271
*wl
)
4881 ret
= wl12xx_set_power_on(wl
);
4885 wl
->chip
.id
= wlcore_read_reg(wl
, REG_CHIP_ID_B
);
4887 wl
->fuse_oui_addr
= 0;
4888 wl
->fuse_nic_addr
= 0;
4890 wl
->hw_pg_ver
= wl
->ops
->get_pg_ver(wl
);
4892 if (wl
->ops
->get_mac
)
4893 wl
->ops
->get_mac(wl
);
4895 wl1271_power_off(wl
);
4900 static int wl1271_register_hw(struct wl1271
*wl
)
4903 u32 oui_addr
= 0, nic_addr
= 0;
4905 if (wl
->mac80211_registered
)
4908 ret
= wl12xx_get_hw_info(wl
);
4910 wl1271_error("couldn't get hw info");
4914 ret
= wl1271_fetch_nvs(wl
);
4916 /* NOTE: The wl->nvs->nvs element must be first, in
4917 * order to simplify the casting, we assume it is at
4918 * the beginning of the wl->nvs structure.
4920 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
4923 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
4925 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
4928 /* if the MAC address is zeroed in the NVS derive from fuse */
4929 if (oui_addr
== 0 && nic_addr
== 0) {
4930 oui_addr
= wl
->fuse_oui_addr
;
4931 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
4932 nic_addr
= wl
->fuse_nic_addr
+ 1;
4935 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
, 2);
4937 ret
= ieee80211_register_hw(wl
->hw
);
4939 wl1271_error("unable to register mac80211 hw: %d", ret
);
4943 wl
->mac80211_registered
= true;
4945 wl1271_debugfs_init(wl
);
4947 wl1271_notice("loaded");
4953 static void wl1271_unregister_hw(struct wl1271
*wl
)
4956 wl1271_plt_stop(wl
);
4958 ieee80211_unregister_hw(wl
->hw
);
4959 wl
->mac80211_registered
= false;
4963 static int wl1271_init_ieee80211(struct wl1271
*wl
)
4965 static const u32 cipher_suites
[] = {
4966 WLAN_CIPHER_SUITE_WEP40
,
4967 WLAN_CIPHER_SUITE_WEP104
,
4968 WLAN_CIPHER_SUITE_TKIP
,
4969 WLAN_CIPHER_SUITE_CCMP
,
4970 WL1271_CIPHER_SUITE_GEM
,
4973 /* The tx descriptor buffer and the TKIP space. */
4974 wl
->hw
->extra_tx_headroom
= WL1271_EXTRA_SPACE_TKIP
+
4975 sizeof(struct wl1271_tx_hw_descr
);
4978 /* FIXME: find a proper value */
4979 wl
->hw
->channel_change_time
= 10000;
4980 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
4982 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
4983 IEEE80211_HW_SUPPORTS_PS
|
4984 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
4985 IEEE80211_HW_SUPPORTS_UAPSD
|
4986 IEEE80211_HW_HAS_RATE_CONTROL
|
4987 IEEE80211_HW_CONNECTION_MONITOR
|
4988 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
4989 IEEE80211_HW_SPECTRUM_MGMT
|
4990 IEEE80211_HW_AP_LINK_PS
|
4991 IEEE80211_HW_AMPDU_AGGREGATION
|
4992 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
4993 IEEE80211_HW_SCAN_WHILE_IDLE
;
4995 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
4996 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
4998 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
4999 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5000 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5001 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5002 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5003 wl
->hw
->wiphy
->max_match_sets
= 16;
5005 * Maximum length of elements in scanning probe request templates
5006 * should be the maximum length possible for a template, without
5007 * the IEEE80211 header of the template
5009 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5010 sizeof(struct ieee80211_header
);
5012 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5013 sizeof(struct ieee80211_header
);
5015 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5016 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
;
5018 /* make sure all our channels fit in the scanned_ch bitmask */
5019 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5020 ARRAY_SIZE(wl1271_channels_5ghz
) >
5021 WL1271_MAX_CHANNELS
);
5023 * We keep local copies of the band structs because we need to
5024 * modify them on a per-device basis.
5026 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5027 sizeof(wl1271_band_2ghz
));
5028 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
, &wl
->ht_cap
,
5029 sizeof(wl
->ht_cap
));
5030 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5031 sizeof(wl1271_band_5ghz
));
5032 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
, &wl
->ht_cap
,
5033 sizeof(wl
->ht_cap
));
5035 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5036 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5037 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5038 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5041 wl
->hw
->max_rates
= 1;
5043 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5045 /* the FW answers probe-requests in AP-mode */
5046 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5047 wl
->hw
->wiphy
->probe_resp_offload
=
5048 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5049 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5050 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5052 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5054 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5055 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5057 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5062 #define WL1271_DEFAULT_CHANNEL 0
5064 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
)
5066 struct ieee80211_hw
*hw
;
5071 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5073 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5075 wl1271_error("could not alloc ieee80211_hw");
5081 memset(wl
, 0, sizeof(*wl
));
5083 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5085 wl1271_error("could not alloc wl priv");
5087 goto err_priv_alloc
;
5090 INIT_LIST_HEAD(&wl
->wlvif_list
);
5094 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5095 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5096 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5098 skb_queue_head_init(&wl
->deferred_rx_queue
);
5099 skb_queue_head_init(&wl
->deferred_tx_queue
);
5101 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5102 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5103 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5104 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5105 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5106 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5107 INIT_DELAYED_WORK(&wl
->connection_loss_work
,
5108 wl1271_connection_loss_work
);
5110 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5111 if (!wl
->freezable_wq
) {
5116 wl
->channel
= WL1271_DEFAULT_CHANNEL
;
5118 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5119 wl
->band
= IEEE80211_BAND_2GHZ
;
5121 wl
->sg_enabled
= true;
5124 wl
->ap_fw_ps_map
= 0;
5126 wl
->platform_quirks
= 0;
5127 wl
->sched_scanning
= false;
5128 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5129 wl
->active_sta_count
= 0;
5131 init_waitqueue_head(&wl
->fwlog_waitq
);
5133 /* The system link is always allocated */
5134 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5136 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5137 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5138 wl
->tx_frames
[i
] = NULL
;
5140 spin_lock_init(&wl
->wl_lock
);
5142 wl
->state
= WL1271_STATE_OFF
;
5143 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5144 mutex_init(&wl
->mutex
);
5146 order
= get_order(WL1271_AGGR_BUFFER_SIZE
);
5147 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5148 if (!wl
->aggr_buf
) {
5153 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5154 if (!wl
->dummy_packet
) {
5159 /* Allocate one page for the FW log */
5160 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5163 goto err_dummy_packet
;
5166 wl
->mbox
= kmalloc(sizeof(*wl
->mbox
), GFP_KERNEL
| GFP_DMA
);
5175 free_page((unsigned long)wl
->fwlog
);
5178 dev_kfree_skb(wl
->dummy_packet
);
5181 free_pages((unsigned long)wl
->aggr_buf
, order
);
5184 destroy_workqueue(wl
->freezable_wq
);
5187 wl1271_debugfs_exit(wl
);
5191 ieee80211_free_hw(hw
);
5195 return ERR_PTR(ret
);
5197 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5199 int wlcore_free_hw(struct wl1271
*wl
)
5201 /* Unblock any fwlog readers */
5202 mutex_lock(&wl
->mutex
);
5203 wl
->fwlog_size
= -1;
5204 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5205 mutex_unlock(&wl
->mutex
);
5207 device_remove_bin_file(wl
->dev
, &fwlog_attr
);
5209 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5211 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5212 free_page((unsigned long)wl
->fwlog
);
5213 dev_kfree_skb(wl
->dummy_packet
);
5214 free_pages((unsigned long)wl
->aggr_buf
,
5215 get_order(WL1271_AGGR_BUFFER_SIZE
));
5217 wl1271_debugfs_exit(wl
);
5221 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5225 kfree(wl
->fw_status
);
5226 kfree(wl
->tx_res_if
);
5227 destroy_workqueue(wl
->freezable_wq
);
5230 ieee80211_free_hw(wl
->hw
);
5234 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
5236 static irqreturn_t
wl12xx_hardirq(int irq
, void *cookie
)
5238 struct wl1271
*wl
= cookie
;
5239 unsigned long flags
;
5241 wl1271_debug(DEBUG_IRQ
, "IRQ");
5243 /* complete the ELP completion */
5244 spin_lock_irqsave(&wl
->wl_lock
, flags
);
5245 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
5246 if (wl
->elp_compl
) {
5247 complete(wl
->elp_compl
);
5248 wl
->elp_compl
= NULL
;
5251 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
5252 /* don't enqueue a work right now. mark it as pending */
5253 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
5254 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
5255 disable_irq_nosync(wl
->irq
);
5256 pm_wakeup_event(wl
->dev
, 0);
5257 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5260 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5262 return IRQ_WAKE_THREAD
;
5265 int __devinit
wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
5267 struct wl12xx_platform_data
*pdata
= pdev
->dev
.platform_data
;
5268 unsigned long irqflags
;
5271 if (!wl
->ops
|| !wl
->ptable
) {
5276 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
5278 /* adjust some runtime configuration parameters */
5279 wlcore_adjust_conf(wl
);
5281 wl
->irq
= platform_get_irq(pdev
, 0);
5282 wl
->ref_clock
= pdata
->board_ref_clock
;
5283 wl
->tcxo_clock
= pdata
->board_tcxo_clock
;
5284 wl
->platform_quirks
= pdata
->platform_quirks
;
5285 wl
->set_power
= pdata
->set_power
;
5286 wl
->dev
= &pdev
->dev
;
5287 wl
->if_ops
= pdata
->ops
;
5289 platform_set_drvdata(pdev
, wl
);
5291 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
5292 irqflags
= IRQF_TRIGGER_RISING
;
5294 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
5296 ret
= request_threaded_irq(wl
->irq
, wl12xx_hardirq
, wl1271_irq
,
5300 wl1271_error("request_irq() failed: %d", ret
);
5304 ret
= enable_irq_wake(wl
->irq
);
5306 wl
->irq_wake_enabled
= true;
5307 device_init_wakeup(wl
->dev
, 1);
5308 if (pdata
->pwr_in_suspend
) {
5309 wl
->hw
->wiphy
->wowlan
.flags
= WIPHY_WOWLAN_ANY
;
5310 wl
->hw
->wiphy
->wowlan
.n_patterns
=
5311 WL1271_MAX_RX_FILTERS
;
5312 wl
->hw
->wiphy
->wowlan
.pattern_min_len
= 1;
5313 wl
->hw
->wiphy
->wowlan
.pattern_max_len
=
5314 WL1271_RX_FILTER_MAX_PATTERN_SIZE
;
5317 disable_irq(wl
->irq
);
5319 ret
= wl1271_init_ieee80211(wl
);
5323 ret
= wl1271_register_hw(wl
);
5327 /* Create sysfs file to control bt coex state */
5328 ret
= device_create_file(wl
->dev
, &dev_attr_bt_coex_state
);
5330 wl1271_error("failed to create sysfs file bt_coex_state");
5334 /* Create sysfs file to get HW PG version */
5335 ret
= device_create_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5337 wl1271_error("failed to create sysfs file hw_pg_ver");
5338 goto out_bt_coex_state
;
5341 /* Create sysfs file for the FW log */
5342 ret
= device_create_bin_file(wl
->dev
, &fwlog_attr
);
5344 wl1271_error("failed to create sysfs file fwlog");
5351 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5354 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5357 free_irq(wl
->irq
, wl
);
5365 EXPORT_SYMBOL_GPL(wlcore_probe
);
5367 int __devexit
wlcore_remove(struct platform_device
*pdev
)
5369 struct wl1271
*wl
= platform_get_drvdata(pdev
);
5371 if (wl
->irq_wake_enabled
) {
5372 device_init_wakeup(wl
->dev
, 0);
5373 disable_irq_wake(wl
->irq
);
5375 wl1271_unregister_hw(wl
);
5376 free_irq(wl
->irq
, wl
);
5381 EXPORT_SYMBOL_GPL(wlcore_remove
);
5383 u32 wl12xx_debug_level
= DEBUG_NONE
;
5384 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
5385 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
5386 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
5388 module_param_named(fwlog
, fwlog_param
, charp
, 0);
5389 MODULE_PARM_DESC(fwlog
,
5390 "FW logger options: continuous, ondemand, dbgpins or disable");
5392 module_param(bug_on_recovery
, bool, S_IRUSR
| S_IWUSR
);
5393 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
5395 module_param(no_recovery
, bool, S_IRUSR
| S_IWUSR
);
5396 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
5398 MODULE_LICENSE("GPL");
5399 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5400 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");