Merge tag 'for-v3.5' of git://git.infradead.org/battery-2.6
[deliverable/linux.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wl1271
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37
38 #include "wlcore.h"
39 #include "debug.h"
40 #include "wl12xx_80211.h"
41 #include "io.h"
42 #include "event.h"
43 #include "tx.h"
44 #include "rx.h"
45 #include "ps.h"
46 #include "init.h"
47 #include "debugfs.h"
48 #include "cmd.h"
49 #include "boot.h"
50 #include "testmode.h"
51 #include "scan.h"
52 #include "hw_ops.h"
53
54 #define WL1271_BOOT_RETRIES 3
55
56 #define WL1271_BOOT_RETRIES 3
57
58 static char *fwlog_param;
59 static bool bug_on_recovery;
60 static bool no_recovery;
61
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wl1271_op_stop(struct ieee80211_hw *hw);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
70 {
71 int ret;
72
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
74 return -EINVAL;
75
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77 return 0;
78
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80 return 0;
81
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
83 if (ret < 0)
84 return ret;
85
86 wl12xx_croc(wl, wlvif->role_id);
87
88 wl1271_info("Association completed.");
89 return 0;
90 }
91
92 static int wl1271_reg_notify(struct wiphy *wiphy,
93 struct regulatory_request *request)
94 {
95 struct ieee80211_supported_band *band;
96 struct ieee80211_channel *ch;
97 int i;
98
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
103 continue;
104
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
108
109 }
110
111 return 0;
112 }
113
114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
115 bool enable)
116 {
117 int ret = 0;
118
119 /* we should hold wl->mutex */
120 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
121 if (ret < 0)
122 goto out;
123
124 if (enable)
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
126 else
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
128 out:
129 return ret;
130 }
131
132 /*
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
135 */
136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
137 {
138 int ret = 0;
139 int period = wl->conf.rx_streaming.interval;
140
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
143 goto out;
144
145 /* reconfigure/disable according to new streaming_period */
146 if (period &&
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148 (wl->conf.rx_streaming.always ||
149 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
151 else {
152 ret = wl1271_set_rx_streaming(wl, wlvif, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif->rx_streaming_timer);
155 }
156 out:
157 return ret;
158 }
159
160 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
161 {
162 int ret;
163 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
164 rx_streaming_enable_work);
165 struct wl1271 *wl = wlvif->wl;
166
167 mutex_lock(&wl->mutex);
168
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171 (!wl->conf.rx_streaming.always &&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
173 goto out;
174
175 if (!wl->conf.rx_streaming.interval)
176 goto out;
177
178 ret = wl1271_ps_elp_wakeup(wl);
179 if (ret < 0)
180 goto out;
181
182 ret = wl1271_set_rx_streaming(wl, wlvif, true);
183 if (ret < 0)
184 goto out_sleep;
185
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif->rx_streaming_timer,
188 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
189
190 out_sleep:
191 wl1271_ps_elp_sleep(wl);
192 out:
193 mutex_unlock(&wl->mutex);
194 }
195
196 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
197 {
198 int ret;
199 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
200 rx_streaming_disable_work);
201 struct wl1271 *wl = wlvif->wl;
202
203 mutex_lock(&wl->mutex);
204
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
206 goto out;
207
208 ret = wl1271_ps_elp_wakeup(wl);
209 if (ret < 0)
210 goto out;
211
212 ret = wl1271_set_rx_streaming(wl, wlvif, false);
213 if (ret)
214 goto out_sleep;
215
216 out_sleep:
217 wl1271_ps_elp_sleep(wl);
218 out:
219 mutex_unlock(&wl->mutex);
220 }
221
222 static void wl1271_rx_streaming_timer(unsigned long data)
223 {
224 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
225 struct wl1271 *wl = wlvif->wl;
226 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
227 }
228
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
231 {
232 /* if the watchdog is not armed, don't do anything */
233 if (wl->tx_allocated_blocks == 0)
234 return;
235
236 cancel_delayed_work(&wl->tx_watchdog_work);
237 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
238 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
239 }
240
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
242 {
243 struct delayed_work *dwork;
244 struct wl1271 *wl;
245
246 dwork = container_of(work, struct delayed_work, work);
247 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
248
249 mutex_lock(&wl->mutex);
250
251 if (unlikely(wl->state == WL1271_STATE_OFF))
252 goto out;
253
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl->tx_allocated_blocks == 0))
256 goto out;
257
258 /*
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
261 */
262 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 wl->conf.tx.tx_watchdog_timeout);
265 wl12xx_rearm_tx_watchdog_locked(wl);
266 goto out;
267 }
268
269 /*
270 * if a scan is in progress, we might not have any Tx for a long
271 * time
272 */
273 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 wl->conf.tx.tx_watchdog_timeout);
276 wl12xx_rearm_tx_watchdog_locked(wl);
277 goto out;
278 }
279
280 /*
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
285 */
286 if (wl->active_sta_count) {
287 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
288 " %d stations",
289 wl->conf.tx.tx_watchdog_timeout,
290 wl->active_sta_count);
291 wl12xx_rearm_tx_watchdog_locked(wl);
292 goto out;
293 }
294
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_queue_recovery_work(wl);
298
299 out:
300 mutex_unlock(&wl->mutex);
301 }
302
303 static void wlcore_adjust_conf(struct wl1271 *wl)
304 {
305 /* Adjust settings according to optional module parameters */
306 if (fwlog_param) {
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 } else {
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 }
320 }
321 }
322
323 static int wl1271_plt_init(struct wl1271 *wl)
324 {
325 int ret;
326
327 ret = wl->ops->hw_init(wl);
328 if (ret < 0)
329 return ret;
330
331 ret = wl1271_acx_init_mem_config(wl);
332 if (ret < 0)
333 return ret;
334
335 ret = wl12xx_acx_mem_cfg(wl);
336 if (ret < 0)
337 goto out_free_memmap;
338
339 /* Enable data path */
340 ret = wl1271_cmd_data_path(wl, 1);
341 if (ret < 0)
342 goto out_free_memmap;
343
344 /* Configure for CAM power saving (ie. always active) */
345 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
346 if (ret < 0)
347 goto out_free_memmap;
348
349 /* configure PM */
350 ret = wl1271_acx_pm_config(wl);
351 if (ret < 0)
352 goto out_free_memmap;
353
354 return 0;
355
356 out_free_memmap:
357 kfree(wl->target_mem_map);
358 wl->target_mem_map = NULL;
359
360 return ret;
361 }
362
363 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
364 struct wl12xx_vif *wlvif,
365 u8 hlid, u8 tx_pkts)
366 {
367 bool fw_ps, single_sta;
368
369 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
370 single_sta = (wl->active_sta_count == 1);
371
372 /*
373 * Wake up from high level PS if the STA is asleep with too little
374 * packets in FW or if the STA is awake.
375 */
376 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
377 wl12xx_ps_link_end(wl, wlvif, hlid);
378
379 /*
380 * Start high-level PS if the STA is asleep with enough blocks in FW.
381 * Make an exception if this is the only connected station. In this
382 * case FW-memory congestion is not a problem.
383 */
384 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
385 wl12xx_ps_link_start(wl, wlvif, hlid, true);
386 }
387
388 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
389 struct wl12xx_vif *wlvif,
390 struct wl_fw_status *status)
391 {
392 struct wl1271_link *lnk;
393 u32 cur_fw_ps_map;
394 u8 hlid, cnt;
395
396 /* TODO: also use link_fast_bitmap here */
397
398 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
399 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
400 wl1271_debug(DEBUG_PSM,
401 "link ps prev 0x%x cur 0x%x changed 0x%x",
402 wl->ap_fw_ps_map, cur_fw_ps_map,
403 wl->ap_fw_ps_map ^ cur_fw_ps_map);
404
405 wl->ap_fw_ps_map = cur_fw_ps_map;
406 }
407
408 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
409 lnk = &wl->links[hlid];
410 cnt = status->counters.tx_lnk_free_pkts[hlid] -
411 lnk->prev_freed_pkts;
412
413 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
414 lnk->allocated_pkts -= cnt;
415
416 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
417 lnk->allocated_pkts);
418 }
419 }
420
421 static void wl12xx_fw_status(struct wl1271 *wl,
422 struct wl_fw_status *status)
423 {
424 struct wl12xx_vif *wlvif;
425 struct timespec ts;
426 u32 old_tx_blk_count = wl->tx_blocks_available;
427 int avail, freed_blocks;
428 int i;
429 size_t status_len;
430
431 status_len = sizeof(*status) + wl->fw_status_priv_len;
432
433 wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status,
434 status_len, false);
435
436 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
437 "drv_rx_counter = %d, tx_results_counter = %d)",
438 status->intr,
439 status->fw_rx_counter,
440 status->drv_rx_counter,
441 status->tx_results_counter);
442
443 for (i = 0; i < NUM_TX_QUEUES; i++) {
444 /* prevent wrap-around in freed-packets counter */
445 wl->tx_allocated_pkts[i] -=
446 (status->counters.tx_released_pkts[i] -
447 wl->tx_pkts_freed[i]) & 0xff;
448
449 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
450 }
451
452 /* prevent wrap-around in total blocks counter */
453 if (likely(wl->tx_blocks_freed <=
454 le32_to_cpu(status->total_released_blks)))
455 freed_blocks = le32_to_cpu(status->total_released_blks) -
456 wl->tx_blocks_freed;
457 else
458 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
459 le32_to_cpu(status->total_released_blks);
460
461 wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks);
462
463 wl->tx_allocated_blocks -= freed_blocks;
464
465 /*
466 * If the FW freed some blocks:
467 * If we still have allocated blocks - re-arm the timer, Tx is
468 * not stuck. Otherwise, cancel the timer (no Tx currently).
469 */
470 if (freed_blocks) {
471 if (wl->tx_allocated_blocks)
472 wl12xx_rearm_tx_watchdog_locked(wl);
473 else
474 cancel_delayed_work(&wl->tx_watchdog_work);
475 }
476
477 avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
478
479 /*
480 * The FW might change the total number of TX memblocks before
481 * we get a notification about blocks being released. Thus, the
482 * available blocks calculation might yield a temporary result
483 * which is lower than the actual available blocks. Keeping in
484 * mind that only blocks that were allocated can be moved from
485 * TX to RX, tx_blocks_available should never decrease here.
486 */
487 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
488 avail);
489
490 /* if more blocks are available now, tx work can be scheduled */
491 if (wl->tx_blocks_available > old_tx_blk_count)
492 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
493
494 /* for AP update num of allocated TX blocks per link and ps status */
495 wl12xx_for_each_wlvif_ap(wl, wlvif) {
496 wl12xx_irq_update_links_status(wl, wlvif, status);
497 }
498
499 /* update the host-chipset time offset */
500 getnstimeofday(&ts);
501 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
502 (s64)le32_to_cpu(status->fw_localtime);
503 }
504
505 static void wl1271_flush_deferred_work(struct wl1271 *wl)
506 {
507 struct sk_buff *skb;
508
509 /* Pass all received frames to the network stack */
510 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
511 ieee80211_rx_ni(wl->hw, skb);
512
513 /* Return sent skbs to the network stack */
514 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
515 ieee80211_tx_status_ni(wl->hw, skb);
516 }
517
518 static void wl1271_netstack_work(struct work_struct *work)
519 {
520 struct wl1271 *wl =
521 container_of(work, struct wl1271, netstack_work);
522
523 do {
524 wl1271_flush_deferred_work(wl);
525 } while (skb_queue_len(&wl->deferred_rx_queue));
526 }
527
528 #define WL1271_IRQ_MAX_LOOPS 256
529
530 static irqreturn_t wl1271_irq(int irq, void *cookie)
531 {
532 int ret;
533 u32 intr;
534 int loopcount = WL1271_IRQ_MAX_LOOPS;
535 struct wl1271 *wl = (struct wl1271 *)cookie;
536 bool done = false;
537 unsigned int defer_count;
538 unsigned long flags;
539
540 /* TX might be handled here, avoid redundant work */
541 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
542 cancel_work_sync(&wl->tx_work);
543
544 /*
545 * In case edge triggered interrupt must be used, we cannot iterate
546 * more than once without introducing race conditions with the hardirq.
547 */
548 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
549 loopcount = 1;
550
551 mutex_lock(&wl->mutex);
552
553 wl1271_debug(DEBUG_IRQ, "IRQ work");
554
555 if (unlikely(wl->state == WL1271_STATE_OFF))
556 goto out;
557
558 ret = wl1271_ps_elp_wakeup(wl);
559 if (ret < 0)
560 goto out;
561
562 while (!done && loopcount--) {
563 /*
564 * In order to avoid a race with the hardirq, clear the flag
565 * before acknowledging the chip. Since the mutex is held,
566 * wl1271_ps_elp_wakeup cannot be called concurrently.
567 */
568 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
569 smp_mb__after_clear_bit();
570
571 wl12xx_fw_status(wl, wl->fw_status);
572
573 wlcore_hw_tx_immediate_compl(wl);
574
575 intr = le32_to_cpu(wl->fw_status->intr);
576 intr &= WL1271_INTR_MASK;
577 if (!intr) {
578 done = true;
579 continue;
580 }
581
582 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
583 wl1271_error("watchdog interrupt received! "
584 "starting recovery.");
585 wl12xx_queue_recovery_work(wl);
586
587 /* restarting the chip. ignore any other interrupt. */
588 goto out;
589 }
590
591 if (likely(intr & WL1271_ACX_INTR_DATA)) {
592 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
593
594 wl12xx_rx(wl, wl->fw_status);
595
596 /* Check if any tx blocks were freed */
597 spin_lock_irqsave(&wl->wl_lock, flags);
598 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
599 wl1271_tx_total_queue_count(wl) > 0) {
600 spin_unlock_irqrestore(&wl->wl_lock, flags);
601 /*
602 * In order to avoid starvation of the TX path,
603 * call the work function directly.
604 */
605 wl1271_tx_work_locked(wl);
606 } else {
607 spin_unlock_irqrestore(&wl->wl_lock, flags);
608 }
609
610 /* check for tx results */
611 wlcore_hw_tx_delayed_compl(wl);
612
613 /* Make sure the deferred queues don't get too long */
614 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
615 skb_queue_len(&wl->deferred_rx_queue);
616 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
617 wl1271_flush_deferred_work(wl);
618 }
619
620 if (intr & WL1271_ACX_INTR_EVENT_A) {
621 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 wl1271_event_handle(wl, 0);
623 }
624
625 if (intr & WL1271_ACX_INTR_EVENT_B) {
626 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
627 wl1271_event_handle(wl, 1);
628 }
629
630 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
631 wl1271_debug(DEBUG_IRQ,
632 "WL1271_ACX_INTR_INIT_COMPLETE");
633
634 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
635 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
636 }
637
638 wl1271_ps_elp_sleep(wl);
639
640 out:
641 spin_lock_irqsave(&wl->wl_lock, flags);
642 /* In case TX was not handled here, queue TX work */
643 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
644 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
645 wl1271_tx_total_queue_count(wl) > 0)
646 ieee80211_queue_work(wl->hw, &wl->tx_work);
647 spin_unlock_irqrestore(&wl->wl_lock, flags);
648
649 mutex_unlock(&wl->mutex);
650
651 return IRQ_HANDLED;
652 }
653
654 struct vif_counter_data {
655 u8 counter;
656
657 struct ieee80211_vif *cur_vif;
658 bool cur_vif_running;
659 };
660
661 static void wl12xx_vif_count_iter(void *data, u8 *mac,
662 struct ieee80211_vif *vif)
663 {
664 struct vif_counter_data *counter = data;
665
666 counter->counter++;
667 if (counter->cur_vif == vif)
668 counter->cur_vif_running = true;
669 }
670
671 /* caller must not hold wl->mutex, as it might deadlock */
672 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
673 struct ieee80211_vif *cur_vif,
674 struct vif_counter_data *data)
675 {
676 memset(data, 0, sizeof(*data));
677 data->cur_vif = cur_vif;
678
679 ieee80211_iterate_active_interfaces(hw,
680 wl12xx_vif_count_iter, data);
681 }
682
683 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
684 {
685 const struct firmware *fw;
686 const char *fw_name;
687 enum wl12xx_fw_type fw_type;
688 int ret;
689
690 if (plt) {
691 fw_type = WL12XX_FW_TYPE_PLT;
692 fw_name = wl->plt_fw_name;
693 } else {
694 /*
695 * we can't call wl12xx_get_vif_count() here because
696 * wl->mutex is taken, so use the cached last_vif_count value
697 */
698 if (wl->last_vif_count > 1) {
699 fw_type = WL12XX_FW_TYPE_MULTI;
700 fw_name = wl->mr_fw_name;
701 } else {
702 fw_type = WL12XX_FW_TYPE_NORMAL;
703 fw_name = wl->sr_fw_name;
704 }
705 }
706
707 if (wl->fw_type == fw_type)
708 return 0;
709
710 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
711
712 ret = request_firmware(&fw, fw_name, wl->dev);
713
714 if (ret < 0) {
715 wl1271_error("could not get firmware %s: %d", fw_name, ret);
716 return ret;
717 }
718
719 if (fw->size % 4) {
720 wl1271_error("firmware size is not multiple of 32 bits: %zu",
721 fw->size);
722 ret = -EILSEQ;
723 goto out;
724 }
725
726 vfree(wl->fw);
727 wl->fw_type = WL12XX_FW_TYPE_NONE;
728 wl->fw_len = fw->size;
729 wl->fw = vmalloc(wl->fw_len);
730
731 if (!wl->fw) {
732 wl1271_error("could not allocate memory for the firmware");
733 ret = -ENOMEM;
734 goto out;
735 }
736
737 memcpy(wl->fw, fw->data, wl->fw_len);
738 ret = 0;
739 wl->fw_type = fw_type;
740 out:
741 release_firmware(fw);
742
743 return ret;
744 }
745
746 static int wl1271_fetch_nvs(struct wl1271 *wl)
747 {
748 const struct firmware *fw;
749 int ret;
750
751 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
752
753 if (ret < 0) {
754 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
755 ret);
756 return ret;
757 }
758
759 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
760
761 if (!wl->nvs) {
762 wl1271_error("could not allocate memory for the nvs file");
763 ret = -ENOMEM;
764 goto out;
765 }
766
767 wl->nvs_len = fw->size;
768
769 out:
770 release_firmware(fw);
771
772 return ret;
773 }
774
775 void wl12xx_queue_recovery_work(struct wl1271 *wl)
776 {
777 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
778 ieee80211_queue_work(wl->hw, &wl->recovery_work);
779 }
780
781 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
782 {
783 size_t len = 0;
784
785 /* The FW log is a length-value list, find where the log end */
786 while (len < maxlen) {
787 if (memblock[len] == 0)
788 break;
789 if (len + memblock[len] + 1 > maxlen)
790 break;
791 len += memblock[len] + 1;
792 }
793
794 /* Make sure we have enough room */
795 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
796
797 /* Fill the FW log file, consumed by the sysfs fwlog entry */
798 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
799 wl->fwlog_size += len;
800
801 return len;
802 }
803
804 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
805 {
806 u32 addr;
807 u32 first_addr;
808 u8 *block;
809
810 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
811 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
812 (wl->conf.fwlog.mem_blocks == 0))
813 return;
814
815 wl1271_info("Reading FW panic log");
816
817 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
818 if (!block)
819 return;
820
821 /*
822 * Make sure the chip is awake and the logger isn't active.
823 * This might fail if the firmware hanged.
824 */
825 if (!wl1271_ps_elp_wakeup(wl))
826 wl12xx_cmd_stop_fwlog(wl);
827
828 /* Read the first memory block address */
829 wl12xx_fw_status(wl, wl->fw_status);
830 first_addr = le32_to_cpu(wl->fw_status->log_start_addr);
831 if (!first_addr)
832 goto out;
833
834 /* Traverse the memory blocks linked list */
835 addr = first_addr;
836 do {
837 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
838 wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
839 false);
840
841 /*
842 * Memory blocks are linked to one another. The first 4 bytes
843 * of each memory block hold the hardware address of the next
844 * one. The last memory block points to the first one.
845 */
846 addr = le32_to_cpup((__le32 *)block);
847 if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
848 WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
849 break;
850 } while (addr && (addr != first_addr));
851
852 wake_up_interruptible(&wl->fwlog_waitq);
853
854 out:
855 kfree(block);
856 }
857
858 static void wl1271_recovery_work(struct work_struct *work)
859 {
860 struct wl1271 *wl =
861 container_of(work, struct wl1271, recovery_work);
862 struct wl12xx_vif *wlvif;
863 struct ieee80211_vif *vif;
864
865 mutex_lock(&wl->mutex);
866
867 if (wl->state != WL1271_STATE_ON || wl->plt)
868 goto out_unlock;
869
870 /* Avoid a recursive recovery */
871 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
872
873 wl12xx_read_fwlog_panic(wl);
874
875 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
876 wl->chip.fw_ver_str,
877 wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
878
879 BUG_ON(bug_on_recovery &&
880 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
881
882 if (no_recovery) {
883 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
884 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
885 goto out_unlock;
886 }
887
888 BUG_ON(bug_on_recovery);
889
890 /*
891 * Advance security sequence number to overcome potential progress
892 * in the firmware during recovery. This doens't hurt if the network is
893 * not encrypted.
894 */
895 wl12xx_for_each_wlvif(wl, wlvif) {
896 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
897 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
898 wlvif->tx_security_seq +=
899 WL1271_TX_SQN_POST_RECOVERY_PADDING;
900 }
901
902 /* Prevent spurious TX during FW restart */
903 ieee80211_stop_queues(wl->hw);
904
905 if (wl->sched_scanning) {
906 ieee80211_sched_scan_stopped(wl->hw);
907 wl->sched_scanning = false;
908 }
909
910 /* reboot the chipset */
911 while (!list_empty(&wl->wlvif_list)) {
912 wlvif = list_first_entry(&wl->wlvif_list,
913 struct wl12xx_vif, list);
914 vif = wl12xx_wlvif_to_vif(wlvif);
915 __wl1271_op_remove_interface(wl, vif, false);
916 }
917 mutex_unlock(&wl->mutex);
918 wl1271_op_stop(wl->hw);
919
920 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
921
922 ieee80211_restart_hw(wl->hw);
923
924 /*
925 * Its safe to enable TX now - the queues are stopped after a request
926 * to restart the HW.
927 */
928 ieee80211_wake_queues(wl->hw);
929 return;
930 out_unlock:
931 mutex_unlock(&wl->mutex);
932 }
933
934 static void wl1271_fw_wakeup(struct wl1271 *wl)
935 {
936 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
937 }
938
939 static int wl1271_setup(struct wl1271 *wl)
940 {
941 wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL);
942 if (!wl->fw_status)
943 return -ENOMEM;
944
945 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
946 if (!wl->tx_res_if) {
947 kfree(wl->fw_status);
948 return -ENOMEM;
949 }
950
951 return 0;
952 }
953
954 static int wl12xx_set_power_on(struct wl1271 *wl)
955 {
956 int ret;
957
958 msleep(WL1271_PRE_POWER_ON_SLEEP);
959 ret = wl1271_power_on(wl);
960 if (ret < 0)
961 goto out;
962 msleep(WL1271_POWER_ON_SLEEP);
963 wl1271_io_reset(wl);
964 wl1271_io_init(wl);
965
966 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
967
968 /* ELP module wake up */
969 wl1271_fw_wakeup(wl);
970
971 out:
972 return ret;
973 }
974
975 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
976 {
977 int ret = 0;
978
979 ret = wl12xx_set_power_on(wl);
980 if (ret < 0)
981 goto out;
982
983 /*
984 * For wl127x based devices we could use the default block
985 * size (512 bytes), but due to a bug in the sdio driver, we
986 * need to set it explicitly after the chip is powered on. To
987 * simplify the code and since the performance impact is
988 * negligible, we use the same block size for all different
989 * chip types.
990 */
991 if (wl1271_set_block_size(wl))
992 wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
993
994 ret = wl->ops->identify_chip(wl);
995 if (ret < 0)
996 goto out;
997
998 /* TODO: make sure the lower driver has set things up correctly */
999
1000 ret = wl1271_setup(wl);
1001 if (ret < 0)
1002 goto out;
1003
1004 ret = wl12xx_fetch_firmware(wl, plt);
1005 if (ret < 0)
1006 goto out;
1007
1008 /* No NVS from netlink, try to get it from the filesystem */
1009 if (wl->nvs == NULL) {
1010 ret = wl1271_fetch_nvs(wl);
1011 if (ret < 0)
1012 goto out;
1013 }
1014
1015 out:
1016 return ret;
1017 }
1018
1019 int wl1271_plt_start(struct wl1271 *wl)
1020 {
1021 int retries = WL1271_BOOT_RETRIES;
1022 struct wiphy *wiphy = wl->hw->wiphy;
1023 int ret;
1024
1025 mutex_lock(&wl->mutex);
1026
1027 wl1271_notice("power up");
1028
1029 if (wl->state != WL1271_STATE_OFF) {
1030 wl1271_error("cannot go into PLT state because not "
1031 "in off state: %d", wl->state);
1032 ret = -EBUSY;
1033 goto out;
1034 }
1035
1036 while (retries) {
1037 retries--;
1038 ret = wl12xx_chip_wakeup(wl, true);
1039 if (ret < 0)
1040 goto power_off;
1041
1042 ret = wl->ops->boot(wl);
1043 if (ret < 0)
1044 goto power_off;
1045
1046 ret = wl1271_plt_init(wl);
1047 if (ret < 0)
1048 goto irq_disable;
1049
1050 wl->plt = true;
1051 wl->state = WL1271_STATE_ON;
1052 wl1271_notice("firmware booted in PLT mode (%s)",
1053 wl->chip.fw_ver_str);
1054
1055 /* update hw/fw version info in wiphy struct */
1056 wiphy->hw_version = wl->chip.id;
1057 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1058 sizeof(wiphy->fw_version));
1059
1060 goto out;
1061
1062 irq_disable:
1063 mutex_unlock(&wl->mutex);
1064 /* Unlocking the mutex in the middle of handling is
1065 inherently unsafe. In this case we deem it safe to do,
1066 because we need to let any possibly pending IRQ out of
1067 the system (and while we are WL1271_STATE_OFF the IRQ
1068 work function will not do anything.) Also, any other
1069 possible concurrent operations will fail due to the
1070 current state, hence the wl1271 struct should be safe. */
1071 wlcore_disable_interrupts(wl);
1072 wl1271_flush_deferred_work(wl);
1073 cancel_work_sync(&wl->netstack_work);
1074 mutex_lock(&wl->mutex);
1075 power_off:
1076 wl1271_power_off(wl);
1077 }
1078
1079 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1080 WL1271_BOOT_RETRIES);
1081 out:
1082 mutex_unlock(&wl->mutex);
1083
1084 return ret;
1085 }
1086
1087 int wl1271_plt_stop(struct wl1271 *wl)
1088 {
1089 int ret = 0;
1090
1091 wl1271_notice("power down");
1092
1093 /*
1094 * Interrupts must be disabled before setting the state to OFF.
1095 * Otherwise, the interrupt handler might be called and exit without
1096 * reading the interrupt status.
1097 */
1098 wlcore_disable_interrupts(wl);
1099 mutex_lock(&wl->mutex);
1100 if (!wl->plt) {
1101 mutex_unlock(&wl->mutex);
1102
1103 /*
1104 * This will not necessarily enable interrupts as interrupts
1105 * may have been disabled when op_stop was called. It will,
1106 * however, balance the above call to disable_interrupts().
1107 */
1108 wlcore_enable_interrupts(wl);
1109
1110 wl1271_error("cannot power down because not in PLT "
1111 "state: %d", wl->state);
1112 ret = -EBUSY;
1113 goto out;
1114 }
1115
1116 mutex_unlock(&wl->mutex);
1117
1118 wl1271_flush_deferred_work(wl);
1119 cancel_work_sync(&wl->netstack_work);
1120 cancel_work_sync(&wl->recovery_work);
1121 cancel_delayed_work_sync(&wl->elp_work);
1122 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1123 cancel_delayed_work_sync(&wl->connection_loss_work);
1124
1125 mutex_lock(&wl->mutex);
1126 wl1271_power_off(wl);
1127 wl->flags = 0;
1128 wl->state = WL1271_STATE_OFF;
1129 wl->plt = false;
1130 wl->rx_counter = 0;
1131 mutex_unlock(&wl->mutex);
1132
1133 out:
1134 return ret;
1135 }
1136
1137 static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1138 {
1139 struct wl1271 *wl = hw->priv;
1140 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1141 struct ieee80211_vif *vif = info->control.vif;
1142 struct wl12xx_vif *wlvif = NULL;
1143 unsigned long flags;
1144 int q, mapping;
1145 u8 hlid;
1146
1147 if (vif)
1148 wlvif = wl12xx_vif_to_data(vif);
1149
1150 mapping = skb_get_queue_mapping(skb);
1151 q = wl1271_tx_get_queue(mapping);
1152
1153 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
1154
1155 spin_lock_irqsave(&wl->wl_lock, flags);
1156
1157 /* queue the packet */
1158 if (hlid == WL12XX_INVALID_LINK_ID ||
1159 (wlvif && !test_bit(hlid, wlvif->links_map))) {
1160 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1161 ieee80211_free_txskb(hw, skb);
1162 goto out;
1163 }
1164
1165 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1166 hlid, q, skb->len);
1167 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1168
1169 wl->tx_queue_count[q]++;
1170
1171 /*
1172 * The workqueue is slow to process the tx_queue and we need stop
1173 * the queue here, otherwise the queue will get too long.
1174 */
1175 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
1176 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1177 ieee80211_stop_queue(wl->hw, mapping);
1178 set_bit(q, &wl->stopped_queues_map);
1179 }
1180
1181 /*
1182 * The chip specific setup must run before the first TX packet -
1183 * before that, the tx_work will not be initialized!
1184 */
1185
1186 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1187 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1188 ieee80211_queue_work(wl->hw, &wl->tx_work);
1189
1190 out:
1191 spin_unlock_irqrestore(&wl->wl_lock, flags);
1192 }
1193
1194 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1195 {
1196 unsigned long flags;
1197 int q;
1198
1199 /* no need to queue a new dummy packet if one is already pending */
1200 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1201 return 0;
1202
1203 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1204
1205 spin_lock_irqsave(&wl->wl_lock, flags);
1206 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1207 wl->tx_queue_count[q]++;
1208 spin_unlock_irqrestore(&wl->wl_lock, flags);
1209
1210 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1211 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1212 wl1271_tx_work_locked(wl);
1213
1214 /*
1215 * If the FW TX is busy, TX work will be scheduled by the threaded
1216 * interrupt handler function
1217 */
1218 return 0;
1219 }
1220
1221 /*
1222 * The size of the dummy packet should be at least 1400 bytes. However, in
1223 * order to minimize the number of bus transactions, aligning it to 512 bytes
1224 * boundaries could be beneficial, performance wise
1225 */
1226 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1227
1228 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1229 {
1230 struct sk_buff *skb;
1231 struct ieee80211_hdr_3addr *hdr;
1232 unsigned int dummy_packet_size;
1233
1234 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1235 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1236
1237 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1238 if (!skb) {
1239 wl1271_warning("Failed to allocate a dummy packet skb");
1240 return NULL;
1241 }
1242
1243 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1244
1245 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1246 memset(hdr, 0, sizeof(*hdr));
1247 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1248 IEEE80211_STYPE_NULLFUNC |
1249 IEEE80211_FCTL_TODS);
1250
1251 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1252
1253 /* Dummy packets require the TID to be management */
1254 skb->priority = WL1271_TID_MGMT;
1255
1256 /* Initialize all fields that might be used */
1257 skb_set_queue_mapping(skb, 0);
1258 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1259
1260 return skb;
1261 }
1262
1263
1264 #ifdef CONFIG_PM
1265 static int
1266 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1267 {
1268 int num_fields = 0, in_field = 0, fields_size = 0;
1269 int i, pattern_len = 0;
1270
1271 if (!p->mask) {
1272 wl1271_warning("No mask in WoWLAN pattern");
1273 return -EINVAL;
1274 }
1275
1276 /*
1277 * The pattern is broken up into segments of bytes at different offsets
1278 * that need to be checked by the FW filter. Each segment is called
1279 * a field in the FW API. We verify that the total number of fields
1280 * required for this pattern won't exceed FW limits (8)
1281 * as well as the total fields buffer won't exceed the FW limit.
1282 * Note that if there's a pattern which crosses Ethernet/IP header
1283 * boundary a new field is required.
1284 */
1285 for (i = 0; i < p->pattern_len; i++) {
1286 if (test_bit(i, (unsigned long *)p->mask)) {
1287 if (!in_field) {
1288 in_field = 1;
1289 pattern_len = 1;
1290 } else {
1291 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1292 num_fields++;
1293 fields_size += pattern_len +
1294 RX_FILTER_FIELD_OVERHEAD;
1295 pattern_len = 1;
1296 } else
1297 pattern_len++;
1298 }
1299 } else {
1300 if (in_field) {
1301 in_field = 0;
1302 fields_size += pattern_len +
1303 RX_FILTER_FIELD_OVERHEAD;
1304 num_fields++;
1305 }
1306 }
1307 }
1308
1309 if (in_field) {
1310 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1311 num_fields++;
1312 }
1313
1314 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1315 wl1271_warning("RX Filter too complex. Too many segments");
1316 return -EINVAL;
1317 }
1318
1319 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1320 wl1271_warning("RX filter pattern is too big");
1321 return -E2BIG;
1322 }
1323
1324 return 0;
1325 }
1326
1327 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1328 {
1329 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1330 }
1331
1332 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1333 {
1334 int i;
1335
1336 if (filter == NULL)
1337 return;
1338
1339 for (i = 0; i < filter->num_fields; i++)
1340 kfree(filter->fields[i].pattern);
1341
1342 kfree(filter);
1343 }
1344
1345 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1346 u16 offset, u8 flags,
1347 u8 *pattern, u8 len)
1348 {
1349 struct wl12xx_rx_filter_field *field;
1350
1351 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1352 wl1271_warning("Max fields per RX filter. can't alloc another");
1353 return -EINVAL;
1354 }
1355
1356 field = &filter->fields[filter->num_fields];
1357
1358 field->pattern = kzalloc(len, GFP_KERNEL);
1359 if (!field->pattern) {
1360 wl1271_warning("Failed to allocate RX filter pattern");
1361 return -ENOMEM;
1362 }
1363
1364 filter->num_fields++;
1365
1366 field->offset = cpu_to_le16(offset);
1367 field->flags = flags;
1368 field->len = len;
1369 memcpy(field->pattern, pattern, len);
1370
1371 return 0;
1372 }
1373
1374 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1375 {
1376 int i, fields_size = 0;
1377
1378 for (i = 0; i < filter->num_fields; i++)
1379 fields_size += filter->fields[i].len +
1380 sizeof(struct wl12xx_rx_filter_field) -
1381 sizeof(u8 *);
1382
1383 return fields_size;
1384 }
1385
1386 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1387 u8 *buf)
1388 {
1389 int i;
1390 struct wl12xx_rx_filter_field *field;
1391
1392 for (i = 0; i < filter->num_fields; i++) {
1393 field = (struct wl12xx_rx_filter_field *)buf;
1394
1395 field->offset = filter->fields[i].offset;
1396 field->flags = filter->fields[i].flags;
1397 field->len = filter->fields[i].len;
1398
1399 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1400 buf += sizeof(struct wl12xx_rx_filter_field) -
1401 sizeof(u8 *) + field->len;
1402 }
1403 }
1404
1405 /*
1406 * Allocates an RX filter returned through f
1407 * which needs to be freed using rx_filter_free()
1408 */
1409 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1410 struct cfg80211_wowlan_trig_pkt_pattern *p,
1411 struct wl12xx_rx_filter **f)
1412 {
1413 int i, j, ret = 0;
1414 struct wl12xx_rx_filter *filter;
1415 u16 offset;
1416 u8 flags, len;
1417
1418 filter = wl1271_rx_filter_alloc();
1419 if (!filter) {
1420 wl1271_warning("Failed to alloc rx filter");
1421 ret = -ENOMEM;
1422 goto err;
1423 }
1424
1425 i = 0;
1426 while (i < p->pattern_len) {
1427 if (!test_bit(i, (unsigned long *)p->mask)) {
1428 i++;
1429 continue;
1430 }
1431
1432 for (j = i; j < p->pattern_len; j++) {
1433 if (!test_bit(j, (unsigned long *)p->mask))
1434 break;
1435
1436 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1437 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1438 break;
1439 }
1440
1441 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1442 offset = i;
1443 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1444 } else {
1445 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1446 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1447 }
1448
1449 len = j - i;
1450
1451 ret = wl1271_rx_filter_alloc_field(filter,
1452 offset,
1453 flags,
1454 &p->pattern[i], len);
1455 if (ret)
1456 goto err;
1457
1458 i = j;
1459 }
1460
1461 filter->action = FILTER_SIGNAL;
1462
1463 *f = filter;
1464 return 0;
1465
1466 err:
1467 wl1271_rx_filter_free(filter);
1468 *f = NULL;
1469
1470 return ret;
1471 }
1472
1473 static int wl1271_configure_wowlan(struct wl1271 *wl,
1474 struct cfg80211_wowlan *wow)
1475 {
1476 int i, ret;
1477
1478 if (!wow || wow->any || !wow->n_patterns) {
1479 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1480 wl1271_rx_filter_clear_all(wl);
1481 return 0;
1482 }
1483
1484 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1485 return -EINVAL;
1486
1487 /* Validate all incoming patterns before clearing current FW state */
1488 for (i = 0; i < wow->n_patterns; i++) {
1489 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1490 if (ret) {
1491 wl1271_warning("Bad wowlan pattern %d", i);
1492 return ret;
1493 }
1494 }
1495
1496 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1497 wl1271_rx_filter_clear_all(wl);
1498
1499 /* Translate WoWLAN patterns into filters */
1500 for (i = 0; i < wow->n_patterns; i++) {
1501 struct cfg80211_wowlan_trig_pkt_pattern *p;
1502 struct wl12xx_rx_filter *filter = NULL;
1503
1504 p = &wow->patterns[i];
1505
1506 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1507 if (ret) {
1508 wl1271_warning("Failed to create an RX filter from "
1509 "wowlan pattern %d", i);
1510 goto out;
1511 }
1512
1513 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1514
1515 wl1271_rx_filter_free(filter);
1516 if (ret)
1517 goto out;
1518 }
1519
1520 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1521
1522 out:
1523 return ret;
1524 }
1525
1526 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1527 struct wl12xx_vif *wlvif,
1528 struct cfg80211_wowlan *wow)
1529 {
1530 int ret = 0;
1531
1532 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1533 goto out;
1534
1535 ret = wl1271_ps_elp_wakeup(wl);
1536 if (ret < 0)
1537 goto out;
1538
1539 wl1271_configure_wowlan(wl, wow);
1540 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1541 wl->conf.conn.suspend_wake_up_event,
1542 wl->conf.conn.suspend_listen_interval);
1543
1544 if (ret < 0)
1545 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1546
1547 wl1271_ps_elp_sleep(wl);
1548
1549 out:
1550 return ret;
1551
1552 }
1553
1554 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1555 struct wl12xx_vif *wlvif)
1556 {
1557 int ret = 0;
1558
1559 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1560 goto out;
1561
1562 ret = wl1271_ps_elp_wakeup(wl);
1563 if (ret < 0)
1564 goto out;
1565
1566 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1567
1568 wl1271_ps_elp_sleep(wl);
1569 out:
1570 return ret;
1571
1572 }
1573
1574 static int wl1271_configure_suspend(struct wl1271 *wl,
1575 struct wl12xx_vif *wlvif,
1576 struct cfg80211_wowlan *wow)
1577 {
1578 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1579 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1580 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1581 return wl1271_configure_suspend_ap(wl, wlvif);
1582 return 0;
1583 }
1584
1585 static void wl1271_configure_resume(struct wl1271 *wl,
1586 struct wl12xx_vif *wlvif)
1587 {
1588 int ret = 0;
1589 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1590 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1591
1592 if ((!is_ap) && (!is_sta))
1593 return;
1594
1595 ret = wl1271_ps_elp_wakeup(wl);
1596 if (ret < 0)
1597 return;
1598
1599 if (is_sta) {
1600 wl1271_configure_wowlan(wl, NULL);
1601
1602 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1603 wl->conf.conn.wake_up_event,
1604 wl->conf.conn.listen_interval);
1605
1606 if (ret < 0)
1607 wl1271_error("resume: wake up conditions failed: %d",
1608 ret);
1609
1610 } else if (is_ap) {
1611 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1612 }
1613
1614 wl1271_ps_elp_sleep(wl);
1615 }
1616
1617 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1618 struct cfg80211_wowlan *wow)
1619 {
1620 struct wl1271 *wl = hw->priv;
1621 struct wl12xx_vif *wlvif;
1622 int ret;
1623
1624 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1625 WARN_ON(!wow);
1626
1627 wl1271_tx_flush(wl);
1628
1629 mutex_lock(&wl->mutex);
1630 wl->wow_enabled = true;
1631 wl12xx_for_each_wlvif(wl, wlvif) {
1632 ret = wl1271_configure_suspend(wl, wlvif, wow);
1633 if (ret < 0) {
1634 mutex_unlock(&wl->mutex);
1635 wl1271_warning("couldn't prepare device to suspend");
1636 return ret;
1637 }
1638 }
1639 mutex_unlock(&wl->mutex);
1640 /* flush any remaining work */
1641 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1642
1643 /*
1644 * disable and re-enable interrupts in order to flush
1645 * the threaded_irq
1646 */
1647 wlcore_disable_interrupts(wl);
1648
1649 /*
1650 * set suspended flag to avoid triggering a new threaded_irq
1651 * work. no need for spinlock as interrupts are disabled.
1652 */
1653 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1654
1655 wlcore_enable_interrupts(wl);
1656 flush_work(&wl->tx_work);
1657 flush_delayed_work(&wl->elp_work);
1658
1659 return 0;
1660 }
1661
1662 static int wl1271_op_resume(struct ieee80211_hw *hw)
1663 {
1664 struct wl1271 *wl = hw->priv;
1665 struct wl12xx_vif *wlvif;
1666 unsigned long flags;
1667 bool run_irq_work = false;
1668
1669 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1670 wl->wow_enabled);
1671 WARN_ON(!wl->wow_enabled);
1672
1673 /*
1674 * re-enable irq_work enqueuing, and call irq_work directly if
1675 * there is a pending work.
1676 */
1677 spin_lock_irqsave(&wl->wl_lock, flags);
1678 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1679 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1680 run_irq_work = true;
1681 spin_unlock_irqrestore(&wl->wl_lock, flags);
1682
1683 if (run_irq_work) {
1684 wl1271_debug(DEBUG_MAC80211,
1685 "run postponed irq_work directly");
1686 wl1271_irq(0, wl);
1687 wlcore_enable_interrupts(wl);
1688 }
1689
1690 mutex_lock(&wl->mutex);
1691 wl12xx_for_each_wlvif(wl, wlvif) {
1692 wl1271_configure_resume(wl, wlvif);
1693 }
1694 wl->wow_enabled = false;
1695 mutex_unlock(&wl->mutex);
1696
1697 return 0;
1698 }
1699 #endif
1700
1701 static int wl1271_op_start(struct ieee80211_hw *hw)
1702 {
1703 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1704
1705 /*
1706 * We have to delay the booting of the hardware because
1707 * we need to know the local MAC address before downloading and
1708 * initializing the firmware. The MAC address cannot be changed
1709 * after boot, and without the proper MAC address, the firmware
1710 * will not function properly.
1711 *
1712 * The MAC address is first known when the corresponding interface
1713 * is added. That is where we will initialize the hardware.
1714 */
1715
1716 return 0;
1717 }
1718
1719 static void wl1271_op_stop(struct ieee80211_hw *hw)
1720 {
1721 struct wl1271 *wl = hw->priv;
1722 int i;
1723
1724 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1725
1726 /*
1727 * Interrupts must be disabled before setting the state to OFF.
1728 * Otherwise, the interrupt handler might be called and exit without
1729 * reading the interrupt status.
1730 */
1731 wlcore_disable_interrupts(wl);
1732 mutex_lock(&wl->mutex);
1733 if (wl->state == WL1271_STATE_OFF) {
1734 mutex_unlock(&wl->mutex);
1735
1736 /*
1737 * This will not necessarily enable interrupts as interrupts
1738 * may have been disabled when op_stop was called. It will,
1739 * however, balance the above call to disable_interrupts().
1740 */
1741 wlcore_enable_interrupts(wl);
1742 return;
1743 }
1744
1745 /*
1746 * this must be before the cancel_work calls below, so that the work
1747 * functions don't perform further work.
1748 */
1749 wl->state = WL1271_STATE_OFF;
1750 mutex_unlock(&wl->mutex);
1751
1752 wl1271_flush_deferred_work(wl);
1753 cancel_delayed_work_sync(&wl->scan_complete_work);
1754 cancel_work_sync(&wl->netstack_work);
1755 cancel_work_sync(&wl->tx_work);
1756 cancel_delayed_work_sync(&wl->elp_work);
1757 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1758 cancel_delayed_work_sync(&wl->connection_loss_work);
1759
1760 /* let's notify MAC80211 about the remaining pending TX frames */
1761 wl12xx_tx_reset(wl, true);
1762 mutex_lock(&wl->mutex);
1763
1764 wl1271_power_off(wl);
1765
1766 wl->band = IEEE80211_BAND_2GHZ;
1767
1768 wl->rx_counter = 0;
1769 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1770 wl->tx_blocks_available = 0;
1771 wl->tx_allocated_blocks = 0;
1772 wl->tx_results_count = 0;
1773 wl->tx_packets_count = 0;
1774 wl->time_offset = 0;
1775 wl->ap_fw_ps_map = 0;
1776 wl->ap_ps_map = 0;
1777 wl->sched_scanning = false;
1778 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1779 memset(wl->links_map, 0, sizeof(wl->links_map));
1780 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1781 wl->active_sta_count = 0;
1782
1783 /* The system link is always allocated */
1784 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1785
1786 /*
1787 * this is performed after the cancel_work calls and the associated
1788 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1789 * get executed before all these vars have been reset.
1790 */
1791 wl->flags = 0;
1792
1793 wl->tx_blocks_freed = 0;
1794
1795 for (i = 0; i < NUM_TX_QUEUES; i++) {
1796 wl->tx_pkts_freed[i] = 0;
1797 wl->tx_allocated_pkts[i] = 0;
1798 }
1799
1800 wl1271_debugfs_reset(wl);
1801
1802 kfree(wl->fw_status);
1803 wl->fw_status = NULL;
1804 kfree(wl->tx_res_if);
1805 wl->tx_res_if = NULL;
1806 kfree(wl->target_mem_map);
1807 wl->target_mem_map = NULL;
1808
1809 mutex_unlock(&wl->mutex);
1810 }
1811
1812 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1813 {
1814 u8 policy = find_first_zero_bit(wl->rate_policies_map,
1815 WL12XX_MAX_RATE_POLICIES);
1816 if (policy >= WL12XX_MAX_RATE_POLICIES)
1817 return -EBUSY;
1818
1819 __set_bit(policy, wl->rate_policies_map);
1820 *idx = policy;
1821 return 0;
1822 }
1823
1824 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1825 {
1826 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1827 return;
1828
1829 __clear_bit(*idx, wl->rate_policies_map);
1830 *idx = WL12XX_MAX_RATE_POLICIES;
1831 }
1832
1833 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1834 {
1835 switch (wlvif->bss_type) {
1836 case BSS_TYPE_AP_BSS:
1837 if (wlvif->p2p)
1838 return WL1271_ROLE_P2P_GO;
1839 else
1840 return WL1271_ROLE_AP;
1841
1842 case BSS_TYPE_STA_BSS:
1843 if (wlvif->p2p)
1844 return WL1271_ROLE_P2P_CL;
1845 else
1846 return WL1271_ROLE_STA;
1847
1848 case BSS_TYPE_IBSS:
1849 return WL1271_ROLE_IBSS;
1850
1851 default:
1852 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
1853 }
1854 return WL12XX_INVALID_ROLE_TYPE;
1855 }
1856
1857 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1858 {
1859 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1860 int i;
1861
1862 /* clear everything but the persistent data */
1863 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
1864
1865 switch (ieee80211_vif_type_p2p(vif)) {
1866 case NL80211_IFTYPE_P2P_CLIENT:
1867 wlvif->p2p = 1;
1868 /* fall-through */
1869 case NL80211_IFTYPE_STATION:
1870 wlvif->bss_type = BSS_TYPE_STA_BSS;
1871 break;
1872 case NL80211_IFTYPE_ADHOC:
1873 wlvif->bss_type = BSS_TYPE_IBSS;
1874 break;
1875 case NL80211_IFTYPE_P2P_GO:
1876 wlvif->p2p = 1;
1877 /* fall-through */
1878 case NL80211_IFTYPE_AP:
1879 wlvif->bss_type = BSS_TYPE_AP_BSS;
1880 break;
1881 default:
1882 wlvif->bss_type = MAX_BSS_TYPE;
1883 return -EOPNOTSUPP;
1884 }
1885
1886 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
1887 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
1888 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
1889
1890 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
1891 wlvif->bss_type == BSS_TYPE_IBSS) {
1892 /* init sta/ibss data */
1893 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
1894 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
1895 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
1896 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
1897 } else {
1898 /* init ap data */
1899 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
1900 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
1901 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
1902 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
1903 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
1904 wl12xx_allocate_rate_policy(wl,
1905 &wlvif->ap.ucast_rate_idx[i]);
1906 }
1907
1908 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
1909 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
1910 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1911 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
1912 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
1913 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
1914
1915 /*
1916 * mac80211 configures some values globally, while we treat them
1917 * per-interface. thus, on init, we have to copy them from wl
1918 */
1919 wlvif->band = wl->band;
1920 wlvif->channel = wl->channel;
1921 wlvif->power_level = wl->power_level;
1922
1923 INIT_WORK(&wlvif->rx_streaming_enable_work,
1924 wl1271_rx_streaming_enable_work);
1925 INIT_WORK(&wlvif->rx_streaming_disable_work,
1926 wl1271_rx_streaming_disable_work);
1927 INIT_LIST_HEAD(&wlvif->list);
1928
1929 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
1930 (unsigned long) wlvif);
1931 return 0;
1932 }
1933
1934 static bool wl12xx_init_fw(struct wl1271 *wl)
1935 {
1936 int retries = WL1271_BOOT_RETRIES;
1937 bool booted = false;
1938 struct wiphy *wiphy = wl->hw->wiphy;
1939 int ret;
1940
1941 while (retries) {
1942 retries--;
1943 ret = wl12xx_chip_wakeup(wl, false);
1944 if (ret < 0)
1945 goto power_off;
1946
1947 ret = wl->ops->boot(wl);
1948 if (ret < 0)
1949 goto power_off;
1950
1951 ret = wl1271_hw_init(wl);
1952 if (ret < 0)
1953 goto irq_disable;
1954
1955 booted = true;
1956 break;
1957
1958 irq_disable:
1959 mutex_unlock(&wl->mutex);
1960 /* Unlocking the mutex in the middle of handling is
1961 inherently unsafe. In this case we deem it safe to do,
1962 because we need to let any possibly pending IRQ out of
1963 the system (and while we are WL1271_STATE_OFF the IRQ
1964 work function will not do anything.) Also, any other
1965 possible concurrent operations will fail due to the
1966 current state, hence the wl1271 struct should be safe. */
1967 wlcore_disable_interrupts(wl);
1968 wl1271_flush_deferred_work(wl);
1969 cancel_work_sync(&wl->netstack_work);
1970 mutex_lock(&wl->mutex);
1971 power_off:
1972 wl1271_power_off(wl);
1973 }
1974
1975 if (!booted) {
1976 wl1271_error("firmware boot failed despite %d retries",
1977 WL1271_BOOT_RETRIES);
1978 goto out;
1979 }
1980
1981 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
1982
1983 /* update hw/fw version info in wiphy struct */
1984 wiphy->hw_version = wl->chip.id;
1985 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1986 sizeof(wiphy->fw_version));
1987
1988 /*
1989 * Now we know if 11a is supported (info from the NVS), so disable
1990 * 11a channels if not supported
1991 */
1992 if (!wl->enable_11a)
1993 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
1994
1995 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
1996 wl->enable_11a ? "" : "not ");
1997
1998 wl->state = WL1271_STATE_ON;
1999 out:
2000 return booted;
2001 }
2002
2003 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2004 {
2005 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2006 }
2007
2008 /*
2009 * Check whether a fw switch (i.e. moving from one loaded
2010 * fw to another) is needed. This function is also responsible
2011 * for updating wl->last_vif_count, so it must be called before
2012 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2013 * will be used).
2014 */
2015 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2016 struct vif_counter_data vif_counter_data,
2017 bool add)
2018 {
2019 enum wl12xx_fw_type current_fw = wl->fw_type;
2020 u8 vif_count = vif_counter_data.counter;
2021
2022 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2023 return false;
2024
2025 /* increase the vif count if this is a new vif */
2026 if (add && !vif_counter_data.cur_vif_running)
2027 vif_count++;
2028
2029 wl->last_vif_count = vif_count;
2030
2031 /* no need for fw change if the device is OFF */
2032 if (wl->state == WL1271_STATE_OFF)
2033 return false;
2034
2035 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2036 return true;
2037 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2038 return true;
2039
2040 return false;
2041 }
2042
2043 /*
2044 * Enter "forced psm". Make sure the sta is in psm against the ap,
2045 * to make the fw switch a bit more disconnection-persistent.
2046 */
2047 static void wl12xx_force_active_psm(struct wl1271 *wl)
2048 {
2049 struct wl12xx_vif *wlvif;
2050
2051 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2052 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2053 }
2054 }
2055
2056 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2057 struct ieee80211_vif *vif)
2058 {
2059 struct wl1271 *wl = hw->priv;
2060 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2061 struct vif_counter_data vif_count;
2062 int ret = 0;
2063 u8 role_type;
2064 bool booted = false;
2065
2066 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2067 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2068
2069 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2070 ieee80211_vif_type_p2p(vif), vif->addr);
2071
2072 wl12xx_get_vif_count(hw, vif, &vif_count);
2073
2074 mutex_lock(&wl->mutex);
2075 ret = wl1271_ps_elp_wakeup(wl);
2076 if (ret < 0)
2077 goto out_unlock;
2078
2079 /*
2080 * in some very corner case HW recovery scenarios its possible to
2081 * get here before __wl1271_op_remove_interface is complete, so
2082 * opt out if that is the case.
2083 */
2084 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2085 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2086 ret = -EBUSY;
2087 goto out;
2088 }
2089
2090
2091 ret = wl12xx_init_vif_data(wl, vif);
2092 if (ret < 0)
2093 goto out;
2094
2095 wlvif->wl = wl;
2096 role_type = wl12xx_get_role_type(wl, wlvif);
2097 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2098 ret = -EINVAL;
2099 goto out;
2100 }
2101
2102 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2103 wl12xx_force_active_psm(wl);
2104 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2105 mutex_unlock(&wl->mutex);
2106 wl1271_recovery_work(&wl->recovery_work);
2107 return 0;
2108 }
2109
2110 /*
2111 * TODO: after the nvs issue will be solved, move this block
2112 * to start(), and make sure here the driver is ON.
2113 */
2114 if (wl->state == WL1271_STATE_OFF) {
2115 /*
2116 * we still need this in order to configure the fw
2117 * while uploading the nvs
2118 */
2119 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2120
2121 booted = wl12xx_init_fw(wl);
2122 if (!booted) {
2123 ret = -EINVAL;
2124 goto out;
2125 }
2126 }
2127
2128 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2129 wlvif->bss_type == BSS_TYPE_IBSS) {
2130 /*
2131 * The device role is a special role used for
2132 * rx and tx frames prior to association (as
2133 * the STA role can get packets only from
2134 * its associated bssid)
2135 */
2136 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2137 WL1271_ROLE_DEVICE,
2138 &wlvif->dev_role_id);
2139 if (ret < 0)
2140 goto out;
2141 }
2142
2143 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2144 role_type, &wlvif->role_id);
2145 if (ret < 0)
2146 goto out;
2147
2148 ret = wl1271_init_vif_specific(wl, vif);
2149 if (ret < 0)
2150 goto out;
2151
2152 list_add(&wlvif->list, &wl->wlvif_list);
2153 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2154
2155 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2156 wl->ap_count++;
2157 else
2158 wl->sta_count++;
2159 out:
2160 wl1271_ps_elp_sleep(wl);
2161 out_unlock:
2162 mutex_unlock(&wl->mutex);
2163
2164 return ret;
2165 }
2166
2167 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2168 struct ieee80211_vif *vif,
2169 bool reset_tx_queues)
2170 {
2171 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2172 int i, ret;
2173
2174 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2175
2176 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2177 return;
2178
2179 /* because of hardware recovery, we may get here twice */
2180 if (wl->state != WL1271_STATE_ON)
2181 return;
2182
2183 wl1271_info("down");
2184
2185 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2186 wl->scan_vif == vif) {
2187 /*
2188 * Rearm the tx watchdog just before idling scan. This
2189 * prevents just-finished scans from triggering the watchdog
2190 */
2191 wl12xx_rearm_tx_watchdog_locked(wl);
2192
2193 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2194 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2195 wl->scan_vif = NULL;
2196 wl->scan.req = NULL;
2197 ieee80211_scan_completed(wl->hw, true);
2198 }
2199
2200 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2201 /* disable active roles */
2202 ret = wl1271_ps_elp_wakeup(wl);
2203 if (ret < 0)
2204 goto deinit;
2205
2206 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2207 wlvif->bss_type == BSS_TYPE_IBSS) {
2208 if (wl12xx_dev_role_started(wlvif))
2209 wl12xx_stop_dev(wl, wlvif);
2210
2211 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2212 if (ret < 0)
2213 goto deinit;
2214 }
2215
2216 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2217 if (ret < 0)
2218 goto deinit;
2219
2220 wl1271_ps_elp_sleep(wl);
2221 }
2222 deinit:
2223 /* clear all hlids (except system_hlid) */
2224 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2225
2226 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2227 wlvif->bss_type == BSS_TYPE_IBSS) {
2228 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2229 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2230 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2231 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2232 } else {
2233 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2234 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2235 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2236 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2237 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2238 wl12xx_free_rate_policy(wl,
2239 &wlvif->ap.ucast_rate_idx[i]);
2240 wl1271_free_ap_keys(wl, wlvif);
2241 }
2242
2243 dev_kfree_skb(wlvif->probereq);
2244 wlvif->probereq = NULL;
2245 wl12xx_tx_reset_wlvif(wl, wlvif);
2246 if (wl->last_wlvif == wlvif)
2247 wl->last_wlvif = NULL;
2248 list_del(&wlvif->list);
2249 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2250 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2251 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2252
2253 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2254 wl->ap_count--;
2255 else
2256 wl->sta_count--;
2257
2258 mutex_unlock(&wl->mutex);
2259
2260 del_timer_sync(&wlvif->rx_streaming_timer);
2261 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2262 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2263
2264 mutex_lock(&wl->mutex);
2265 }
2266
2267 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2268 struct ieee80211_vif *vif)
2269 {
2270 struct wl1271 *wl = hw->priv;
2271 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2272 struct wl12xx_vif *iter;
2273 struct vif_counter_data vif_count;
2274 bool cancel_recovery = true;
2275
2276 wl12xx_get_vif_count(hw, vif, &vif_count);
2277 mutex_lock(&wl->mutex);
2278
2279 if (wl->state == WL1271_STATE_OFF ||
2280 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2281 goto out;
2282
2283 /*
2284 * wl->vif can be null here if someone shuts down the interface
2285 * just when hardware recovery has been started.
2286 */
2287 wl12xx_for_each_wlvif(wl, iter) {
2288 if (iter != wlvif)
2289 continue;
2290
2291 __wl1271_op_remove_interface(wl, vif, true);
2292 break;
2293 }
2294 WARN_ON(iter != wlvif);
2295 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2296 wl12xx_force_active_psm(wl);
2297 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2298 wl12xx_queue_recovery_work(wl);
2299 cancel_recovery = false;
2300 }
2301 out:
2302 mutex_unlock(&wl->mutex);
2303 if (cancel_recovery)
2304 cancel_work_sync(&wl->recovery_work);
2305 }
2306
2307 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2308 struct ieee80211_vif *vif,
2309 enum nl80211_iftype new_type, bool p2p)
2310 {
2311 struct wl1271 *wl = hw->priv;
2312 int ret;
2313
2314 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2315 wl1271_op_remove_interface(hw, vif);
2316
2317 vif->type = new_type;
2318 vif->p2p = p2p;
2319 ret = wl1271_op_add_interface(hw, vif);
2320
2321 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2322 return ret;
2323 }
2324
2325 static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2326 bool set_assoc)
2327 {
2328 int ret;
2329 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2330
2331 /*
2332 * One of the side effects of the JOIN command is that is clears
2333 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2334 * to a WPA/WPA2 access point will therefore kill the data-path.
2335 * Currently the only valid scenario for JOIN during association
2336 * is on roaming, in which case we will also be given new keys.
2337 * Keep the below message for now, unless it starts bothering
2338 * users who really like to roam a lot :)
2339 */
2340 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2341 wl1271_info("JOIN while associated.");
2342
2343 /* clear encryption type */
2344 wlvif->encryption_type = KEY_NONE;
2345
2346 if (set_assoc)
2347 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2348
2349 if (is_ibss)
2350 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2351 else
2352 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2353 if (ret < 0)
2354 goto out;
2355
2356 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2357 goto out;
2358
2359 /*
2360 * The join command disable the keep-alive mode, shut down its process,
2361 * and also clear the template config, so we need to reset it all after
2362 * the join. The acx_aid starts the keep-alive process, and the order
2363 * of the commands below is relevant.
2364 */
2365 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2366 if (ret < 0)
2367 goto out;
2368
2369 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2370 if (ret < 0)
2371 goto out;
2372
2373 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2374 if (ret < 0)
2375 goto out;
2376
2377 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2378 CMD_TEMPL_KLV_IDX_NULL_DATA,
2379 ACX_KEEP_ALIVE_TPL_VALID);
2380 if (ret < 0)
2381 goto out;
2382
2383 out:
2384 return ret;
2385 }
2386
2387 static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2388 {
2389 int ret;
2390
2391 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2392 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2393
2394 wl12xx_cmd_stop_channel_switch(wl);
2395 ieee80211_chswitch_done(vif, false);
2396 }
2397
2398 /* to stop listening to a channel, we disconnect */
2399 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2400 if (ret < 0)
2401 goto out;
2402
2403 /* reset TX security counters on a clean disconnect */
2404 wlvif->tx_security_last_seq_lsb = 0;
2405 wlvif->tx_security_seq = 0;
2406
2407 out:
2408 return ret;
2409 }
2410
2411 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2412 {
2413 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2414 wlvif->rate_set = wlvif->basic_rate_set;
2415 }
2416
2417 static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2418 bool idle)
2419 {
2420 int ret;
2421 bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2422
2423 if (idle == cur_idle)
2424 return 0;
2425
2426 if (idle) {
2427 /* no need to croc if we weren't busy (e.g. during boot) */
2428 if (wl12xx_dev_role_started(wlvif)) {
2429 ret = wl12xx_stop_dev(wl, wlvif);
2430 if (ret < 0)
2431 goto out;
2432 }
2433 wlvif->rate_set =
2434 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2435 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2436 if (ret < 0)
2437 goto out;
2438 ret = wl1271_acx_keep_alive_config(
2439 wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
2440 ACX_KEEP_ALIVE_TPL_INVALID);
2441 if (ret < 0)
2442 goto out;
2443 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2444 } else {
2445 /* The current firmware only supports sched_scan in idle */
2446 if (wl->sched_scanning) {
2447 wl1271_scan_sched_scan_stop(wl);
2448 ieee80211_sched_scan_stopped(wl->hw);
2449 }
2450
2451 ret = wl12xx_start_dev(wl, wlvif);
2452 if (ret < 0)
2453 goto out;
2454 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2455 }
2456
2457 out:
2458 return ret;
2459 }
2460
2461 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2462 struct ieee80211_conf *conf, u32 changed)
2463 {
2464 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2465 int channel, ret;
2466
2467 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2468
2469 /* if the channel changes while joined, join again */
2470 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2471 ((wlvif->band != conf->channel->band) ||
2472 (wlvif->channel != channel))) {
2473 /* send all pending packets */
2474 wl1271_tx_work_locked(wl);
2475 wlvif->band = conf->channel->band;
2476 wlvif->channel = channel;
2477
2478 if (!is_ap) {
2479 /*
2480 * FIXME: the mac80211 should really provide a fixed
2481 * rate to use here. for now, just use the smallest
2482 * possible rate for the band as a fixed rate for
2483 * association frames and other control messages.
2484 */
2485 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2486 wl1271_set_band_rate(wl, wlvif);
2487
2488 wlvif->basic_rate =
2489 wl1271_tx_min_rate_get(wl,
2490 wlvif->basic_rate_set);
2491 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2492 if (ret < 0)
2493 wl1271_warning("rate policy for channel "
2494 "failed %d", ret);
2495
2496 /*
2497 * change the ROC channel. do it only if we are
2498 * not idle. otherwise, CROC will be called
2499 * anyway.
2500 */
2501 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
2502 &wlvif->flags) &&
2503 wl12xx_dev_role_started(wlvif) &&
2504 !(conf->flags & IEEE80211_CONF_IDLE)) {
2505 ret = wl12xx_stop_dev(wl, wlvif);
2506 if (ret < 0)
2507 return ret;
2508
2509 ret = wl12xx_start_dev(wl, wlvif);
2510 if (ret < 0)
2511 return ret;
2512 }
2513 }
2514 }
2515
2516 if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
2517
2518 if ((conf->flags & IEEE80211_CONF_PS) &&
2519 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
2520 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2521
2522 int ps_mode;
2523 char *ps_mode_str;
2524
2525 if (wl->conf.conn.forced_ps) {
2526 ps_mode = STATION_POWER_SAVE_MODE;
2527 ps_mode_str = "forced";
2528 } else {
2529 ps_mode = STATION_AUTO_PS_MODE;
2530 ps_mode_str = "auto";
2531 }
2532
2533 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
2534
2535 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
2536
2537 if (ret < 0)
2538 wl1271_warning("enter %s ps failed %d",
2539 ps_mode_str, ret);
2540
2541 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
2542 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2543
2544 wl1271_debug(DEBUG_PSM, "auto ps disabled");
2545
2546 ret = wl1271_ps_set_mode(wl, wlvif,
2547 STATION_ACTIVE_MODE);
2548 if (ret < 0)
2549 wl1271_warning("exit auto ps failed %d", ret);
2550 }
2551 }
2552
2553 if (conf->power_level != wlvif->power_level) {
2554 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2555 if (ret < 0)
2556 return ret;
2557
2558 wlvif->power_level = conf->power_level;
2559 }
2560
2561 return 0;
2562 }
2563
2564 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2565 {
2566 struct wl1271 *wl = hw->priv;
2567 struct wl12xx_vif *wlvif;
2568 struct ieee80211_conf *conf = &hw->conf;
2569 int channel, ret = 0;
2570
2571 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2572
2573 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2574 " changed 0x%x",
2575 channel,
2576 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2577 conf->power_level,
2578 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2579 changed);
2580
2581 /*
2582 * mac80211 will go to idle nearly immediately after transmitting some
2583 * frames, such as the deauth. To make sure those frames reach the air,
2584 * wait here until the TX queue is fully flushed.
2585 */
2586 if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2587 (conf->flags & IEEE80211_CONF_IDLE))
2588 wl1271_tx_flush(wl);
2589
2590 mutex_lock(&wl->mutex);
2591
2592 /* we support configuring the channel and band even while off */
2593 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2594 wl->band = conf->channel->band;
2595 wl->channel = channel;
2596 }
2597
2598 if (changed & IEEE80211_CONF_CHANGE_POWER)
2599 wl->power_level = conf->power_level;
2600
2601 if (unlikely(wl->state == WL1271_STATE_OFF))
2602 goto out;
2603
2604 ret = wl1271_ps_elp_wakeup(wl);
2605 if (ret < 0)
2606 goto out;
2607
2608 /* configure each interface */
2609 wl12xx_for_each_wlvif(wl, wlvif) {
2610 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2611 if (ret < 0)
2612 goto out_sleep;
2613 }
2614
2615 out_sleep:
2616 wl1271_ps_elp_sleep(wl);
2617
2618 out:
2619 mutex_unlock(&wl->mutex);
2620
2621 return ret;
2622 }
2623
2624 struct wl1271_filter_params {
2625 bool enabled;
2626 int mc_list_length;
2627 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2628 };
2629
2630 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2631 struct netdev_hw_addr_list *mc_list)
2632 {
2633 struct wl1271_filter_params *fp;
2634 struct netdev_hw_addr *ha;
2635 struct wl1271 *wl = hw->priv;
2636
2637 if (unlikely(wl->state == WL1271_STATE_OFF))
2638 return 0;
2639
2640 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2641 if (!fp) {
2642 wl1271_error("Out of memory setting filters.");
2643 return 0;
2644 }
2645
2646 /* update multicast filtering parameters */
2647 fp->mc_list_length = 0;
2648 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2649 fp->enabled = false;
2650 } else {
2651 fp->enabled = true;
2652 netdev_hw_addr_list_for_each(ha, mc_list) {
2653 memcpy(fp->mc_list[fp->mc_list_length],
2654 ha->addr, ETH_ALEN);
2655 fp->mc_list_length++;
2656 }
2657 }
2658
2659 return (u64)(unsigned long)fp;
2660 }
2661
2662 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2663 FIF_ALLMULTI | \
2664 FIF_FCSFAIL | \
2665 FIF_BCN_PRBRESP_PROMISC | \
2666 FIF_CONTROL | \
2667 FIF_OTHER_BSS)
2668
2669 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2670 unsigned int changed,
2671 unsigned int *total, u64 multicast)
2672 {
2673 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2674 struct wl1271 *wl = hw->priv;
2675 struct wl12xx_vif *wlvif;
2676
2677 int ret;
2678
2679 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2680 " total %x", changed, *total);
2681
2682 mutex_lock(&wl->mutex);
2683
2684 *total &= WL1271_SUPPORTED_FILTERS;
2685 changed &= WL1271_SUPPORTED_FILTERS;
2686
2687 if (unlikely(wl->state == WL1271_STATE_OFF))
2688 goto out;
2689
2690 ret = wl1271_ps_elp_wakeup(wl);
2691 if (ret < 0)
2692 goto out;
2693
2694 wl12xx_for_each_wlvif(wl, wlvif) {
2695 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2696 if (*total & FIF_ALLMULTI)
2697 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2698 false,
2699 NULL, 0);
2700 else if (fp)
2701 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2702 fp->enabled,
2703 fp->mc_list,
2704 fp->mc_list_length);
2705 if (ret < 0)
2706 goto out_sleep;
2707 }
2708 }
2709
2710 /*
2711 * the fw doesn't provide an api to configure the filters. instead,
2712 * the filters configuration is based on the active roles / ROC
2713 * state.
2714 */
2715
2716 out_sleep:
2717 wl1271_ps_elp_sleep(wl);
2718
2719 out:
2720 mutex_unlock(&wl->mutex);
2721 kfree(fp);
2722 }
2723
2724 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2725 u8 id, u8 key_type, u8 key_size,
2726 const u8 *key, u8 hlid, u32 tx_seq_32,
2727 u16 tx_seq_16)
2728 {
2729 struct wl1271_ap_key *ap_key;
2730 int i;
2731
2732 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2733
2734 if (key_size > MAX_KEY_SIZE)
2735 return -EINVAL;
2736
2737 /*
2738 * Find next free entry in ap_keys. Also check we are not replacing
2739 * an existing key.
2740 */
2741 for (i = 0; i < MAX_NUM_KEYS; i++) {
2742 if (wlvif->ap.recorded_keys[i] == NULL)
2743 break;
2744
2745 if (wlvif->ap.recorded_keys[i]->id == id) {
2746 wl1271_warning("trying to record key replacement");
2747 return -EINVAL;
2748 }
2749 }
2750
2751 if (i == MAX_NUM_KEYS)
2752 return -EBUSY;
2753
2754 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2755 if (!ap_key)
2756 return -ENOMEM;
2757
2758 ap_key->id = id;
2759 ap_key->key_type = key_type;
2760 ap_key->key_size = key_size;
2761 memcpy(ap_key->key, key, key_size);
2762 ap_key->hlid = hlid;
2763 ap_key->tx_seq_32 = tx_seq_32;
2764 ap_key->tx_seq_16 = tx_seq_16;
2765
2766 wlvif->ap.recorded_keys[i] = ap_key;
2767 return 0;
2768 }
2769
2770 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2771 {
2772 int i;
2773
2774 for (i = 0; i < MAX_NUM_KEYS; i++) {
2775 kfree(wlvif->ap.recorded_keys[i]);
2776 wlvif->ap.recorded_keys[i] = NULL;
2777 }
2778 }
2779
2780 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2781 {
2782 int i, ret = 0;
2783 struct wl1271_ap_key *key;
2784 bool wep_key_added = false;
2785
2786 for (i = 0; i < MAX_NUM_KEYS; i++) {
2787 u8 hlid;
2788 if (wlvif->ap.recorded_keys[i] == NULL)
2789 break;
2790
2791 key = wlvif->ap.recorded_keys[i];
2792 hlid = key->hlid;
2793 if (hlid == WL12XX_INVALID_LINK_ID)
2794 hlid = wlvif->ap.bcast_hlid;
2795
2796 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2797 key->id, key->key_type,
2798 key->key_size, key->key,
2799 hlid, key->tx_seq_32,
2800 key->tx_seq_16);
2801 if (ret < 0)
2802 goto out;
2803
2804 if (key->key_type == KEY_WEP)
2805 wep_key_added = true;
2806 }
2807
2808 if (wep_key_added) {
2809 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
2810 wlvif->ap.bcast_hlid);
2811 if (ret < 0)
2812 goto out;
2813 }
2814
2815 out:
2816 wl1271_free_ap_keys(wl, wlvif);
2817 return ret;
2818 }
2819
2820 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2821 u16 action, u8 id, u8 key_type,
2822 u8 key_size, const u8 *key, u32 tx_seq_32,
2823 u16 tx_seq_16, struct ieee80211_sta *sta)
2824 {
2825 int ret;
2826 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2827
2828 /*
2829 * A role set to GEM cipher requires different Tx settings (namely
2830 * spare blocks). Note when we are in this mode so the HW can adjust.
2831 */
2832 if (key_type == KEY_GEM) {
2833 if (action == KEY_ADD_OR_REPLACE)
2834 wlvif->is_gem = true;
2835 else if (action == KEY_REMOVE)
2836 wlvif->is_gem = false;
2837 }
2838
2839 if (is_ap) {
2840 struct wl1271_station *wl_sta;
2841 u8 hlid;
2842
2843 if (sta) {
2844 wl_sta = (struct wl1271_station *)sta->drv_priv;
2845 hlid = wl_sta->hlid;
2846 } else {
2847 hlid = wlvif->ap.bcast_hlid;
2848 }
2849
2850 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
2851 /*
2852 * We do not support removing keys after AP shutdown.
2853 * Pretend we do to make mac80211 happy.
2854 */
2855 if (action != KEY_ADD_OR_REPLACE)
2856 return 0;
2857
2858 ret = wl1271_record_ap_key(wl, wlvif, id,
2859 key_type, key_size,
2860 key, hlid, tx_seq_32,
2861 tx_seq_16);
2862 } else {
2863 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
2864 id, key_type, key_size,
2865 key, hlid, tx_seq_32,
2866 tx_seq_16);
2867 }
2868
2869 if (ret < 0)
2870 return ret;
2871 } else {
2872 const u8 *addr;
2873 static const u8 bcast_addr[ETH_ALEN] = {
2874 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2875 };
2876
2877 addr = sta ? sta->addr : bcast_addr;
2878
2879 if (is_zero_ether_addr(addr)) {
2880 /* We dont support TX only encryption */
2881 return -EOPNOTSUPP;
2882 }
2883
2884 /* The wl1271 does not allow to remove unicast keys - they
2885 will be cleared automatically on next CMD_JOIN. Ignore the
2886 request silently, as we dont want the mac80211 to emit
2887 an error message. */
2888 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
2889 return 0;
2890
2891 /* don't remove key if hlid was already deleted */
2892 if (action == KEY_REMOVE &&
2893 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
2894 return 0;
2895
2896 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
2897 id, key_type, key_size,
2898 key, addr, tx_seq_32,
2899 tx_seq_16);
2900 if (ret < 0)
2901 return ret;
2902
2903 /* the default WEP key needs to be configured at least once */
2904 if (key_type == KEY_WEP) {
2905 ret = wl12xx_cmd_set_default_wep_key(wl,
2906 wlvif->default_key,
2907 wlvif->sta.hlid);
2908 if (ret < 0)
2909 return ret;
2910 }
2911 }
2912
2913 return 0;
2914 }
2915
2916 static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2917 struct ieee80211_vif *vif,
2918 struct ieee80211_sta *sta,
2919 struct ieee80211_key_conf *key_conf)
2920 {
2921 struct wl1271 *wl = hw->priv;
2922 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2923 int ret;
2924 u32 tx_seq_32 = 0;
2925 u16 tx_seq_16 = 0;
2926 u8 key_type;
2927
2928 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
2929
2930 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
2931 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
2932 key_conf->cipher, key_conf->keyidx,
2933 key_conf->keylen, key_conf->flags);
2934 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
2935
2936 mutex_lock(&wl->mutex);
2937
2938 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2939 ret = -EAGAIN;
2940 goto out_unlock;
2941 }
2942
2943 ret = wl1271_ps_elp_wakeup(wl);
2944 if (ret < 0)
2945 goto out_unlock;
2946
2947 switch (key_conf->cipher) {
2948 case WLAN_CIPHER_SUITE_WEP40:
2949 case WLAN_CIPHER_SUITE_WEP104:
2950 key_type = KEY_WEP;
2951
2952 key_conf->hw_key_idx = key_conf->keyidx;
2953 break;
2954 case WLAN_CIPHER_SUITE_TKIP:
2955 key_type = KEY_TKIP;
2956
2957 key_conf->hw_key_idx = key_conf->keyidx;
2958 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2959 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2960 break;
2961 case WLAN_CIPHER_SUITE_CCMP:
2962 key_type = KEY_AES;
2963
2964 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2965 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2966 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2967 break;
2968 case WL1271_CIPHER_SUITE_GEM:
2969 key_type = KEY_GEM;
2970 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2971 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2972 break;
2973 default:
2974 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
2975
2976 ret = -EOPNOTSUPP;
2977 goto out_sleep;
2978 }
2979
2980 switch (cmd) {
2981 case SET_KEY:
2982 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2983 key_conf->keyidx, key_type,
2984 key_conf->keylen, key_conf->key,
2985 tx_seq_32, tx_seq_16, sta);
2986 if (ret < 0) {
2987 wl1271_error("Could not add or replace key");
2988 goto out_sleep;
2989 }
2990
2991 /*
2992 * reconfiguring arp response if the unicast (or common)
2993 * encryption key type was changed
2994 */
2995 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
2996 (sta || key_type == KEY_WEP) &&
2997 wlvif->encryption_type != key_type) {
2998 wlvif->encryption_type = key_type;
2999 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3000 if (ret < 0) {
3001 wl1271_warning("build arp rsp failed: %d", ret);
3002 goto out_sleep;
3003 }
3004 }
3005 break;
3006
3007 case DISABLE_KEY:
3008 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3009 key_conf->keyidx, key_type,
3010 key_conf->keylen, key_conf->key,
3011 0, 0, sta);
3012 if (ret < 0) {
3013 wl1271_error("Could not remove key");
3014 goto out_sleep;
3015 }
3016 break;
3017
3018 default:
3019 wl1271_error("Unsupported key cmd 0x%x", cmd);
3020 ret = -EOPNOTSUPP;
3021 break;
3022 }
3023
3024 out_sleep:
3025 wl1271_ps_elp_sleep(wl);
3026
3027 out_unlock:
3028 mutex_unlock(&wl->mutex);
3029
3030 return ret;
3031 }
3032
3033 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3034 struct ieee80211_vif *vif,
3035 struct cfg80211_scan_request *req)
3036 {
3037 struct wl1271 *wl = hw->priv;
3038 int ret;
3039 u8 *ssid = NULL;
3040 size_t len = 0;
3041
3042 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3043
3044 if (req->n_ssids) {
3045 ssid = req->ssids[0].ssid;
3046 len = req->ssids[0].ssid_len;
3047 }
3048
3049 mutex_lock(&wl->mutex);
3050
3051 if (wl->state == WL1271_STATE_OFF) {
3052 /*
3053 * We cannot return -EBUSY here because cfg80211 will expect
3054 * a call to ieee80211_scan_completed if we do - in this case
3055 * there won't be any call.
3056 */
3057 ret = -EAGAIN;
3058 goto out;
3059 }
3060
3061 ret = wl1271_ps_elp_wakeup(wl);
3062 if (ret < 0)
3063 goto out;
3064
3065 /* fail if there is any role in ROC */
3066 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3067 /* don't allow scanning right now */
3068 ret = -EBUSY;
3069 goto out_sleep;
3070 }
3071
3072 ret = wl1271_scan(hw->priv, vif, ssid, len, req);
3073 out_sleep:
3074 wl1271_ps_elp_sleep(wl);
3075 out:
3076 mutex_unlock(&wl->mutex);
3077
3078 return ret;
3079 }
3080
3081 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3082 struct ieee80211_vif *vif)
3083 {
3084 struct wl1271 *wl = hw->priv;
3085 int ret;
3086
3087 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3088
3089 mutex_lock(&wl->mutex);
3090
3091 if (wl->state == WL1271_STATE_OFF)
3092 goto out;
3093
3094 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3095 goto out;
3096
3097 ret = wl1271_ps_elp_wakeup(wl);
3098 if (ret < 0)
3099 goto out;
3100
3101 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3102 ret = wl1271_scan_stop(wl);
3103 if (ret < 0)
3104 goto out_sleep;
3105 }
3106
3107 /*
3108 * Rearm the tx watchdog just before idling scan. This
3109 * prevents just-finished scans from triggering the watchdog
3110 */
3111 wl12xx_rearm_tx_watchdog_locked(wl);
3112
3113 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3114 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3115 wl->scan_vif = NULL;
3116 wl->scan.req = NULL;
3117 ieee80211_scan_completed(wl->hw, true);
3118
3119 out_sleep:
3120 wl1271_ps_elp_sleep(wl);
3121 out:
3122 mutex_unlock(&wl->mutex);
3123
3124 cancel_delayed_work_sync(&wl->scan_complete_work);
3125 }
3126
3127 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3128 struct ieee80211_vif *vif,
3129 struct cfg80211_sched_scan_request *req,
3130 struct ieee80211_sched_scan_ies *ies)
3131 {
3132 struct wl1271 *wl = hw->priv;
3133 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3134 int ret;
3135
3136 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3137
3138 mutex_lock(&wl->mutex);
3139
3140 if (wl->state == WL1271_STATE_OFF) {
3141 ret = -EAGAIN;
3142 goto out;
3143 }
3144
3145 ret = wl1271_ps_elp_wakeup(wl);
3146 if (ret < 0)
3147 goto out;
3148
3149 ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
3150 if (ret < 0)
3151 goto out_sleep;
3152
3153 ret = wl1271_scan_sched_scan_start(wl, wlvif);
3154 if (ret < 0)
3155 goto out_sleep;
3156
3157 wl->sched_scanning = true;
3158
3159 out_sleep:
3160 wl1271_ps_elp_sleep(wl);
3161 out:
3162 mutex_unlock(&wl->mutex);
3163 return ret;
3164 }
3165
3166 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3167 struct ieee80211_vif *vif)
3168 {
3169 struct wl1271 *wl = hw->priv;
3170 int ret;
3171
3172 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3173
3174 mutex_lock(&wl->mutex);
3175
3176 if (wl->state == WL1271_STATE_OFF)
3177 goto out;
3178
3179 ret = wl1271_ps_elp_wakeup(wl);
3180 if (ret < 0)
3181 goto out;
3182
3183 wl1271_scan_sched_scan_stop(wl);
3184
3185 wl1271_ps_elp_sleep(wl);
3186 out:
3187 mutex_unlock(&wl->mutex);
3188 }
3189
3190 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3191 {
3192 struct wl1271 *wl = hw->priv;
3193 int ret = 0;
3194
3195 mutex_lock(&wl->mutex);
3196
3197 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3198 ret = -EAGAIN;
3199 goto out;
3200 }
3201
3202 ret = wl1271_ps_elp_wakeup(wl);
3203 if (ret < 0)
3204 goto out;
3205
3206 ret = wl1271_acx_frag_threshold(wl, value);
3207 if (ret < 0)
3208 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3209
3210 wl1271_ps_elp_sleep(wl);
3211
3212 out:
3213 mutex_unlock(&wl->mutex);
3214
3215 return ret;
3216 }
3217
3218 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3219 {
3220 struct wl1271 *wl = hw->priv;
3221 struct wl12xx_vif *wlvif;
3222 int ret = 0;
3223
3224 mutex_lock(&wl->mutex);
3225
3226 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3227 ret = -EAGAIN;
3228 goto out;
3229 }
3230
3231 ret = wl1271_ps_elp_wakeup(wl);
3232 if (ret < 0)
3233 goto out;
3234
3235 wl12xx_for_each_wlvif(wl, wlvif) {
3236 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3237 if (ret < 0)
3238 wl1271_warning("set rts threshold failed: %d", ret);
3239 }
3240 wl1271_ps_elp_sleep(wl);
3241
3242 out:
3243 mutex_unlock(&wl->mutex);
3244
3245 return ret;
3246 }
3247
3248 static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
3249 int offset)
3250 {
3251 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3252 u8 ssid_len;
3253 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
3254 skb->len - offset);
3255
3256 if (!ptr) {
3257 wl1271_error("No SSID in IEs!");
3258 return -ENOENT;
3259 }
3260
3261 ssid_len = ptr[1];
3262 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
3263 wl1271_error("SSID is too long!");
3264 return -EINVAL;
3265 }
3266
3267 wlvif->ssid_len = ssid_len;
3268 memcpy(wlvif->ssid, ptr+2, ssid_len);
3269 return 0;
3270 }
3271
3272 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3273 {
3274 int len;
3275 const u8 *next, *end = skb->data + skb->len;
3276 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3277 skb->len - ieoffset);
3278 if (!ie)
3279 return;
3280 len = ie[1] + 2;
3281 next = ie + len;
3282 memmove(ie, next, end - next);
3283 skb_trim(skb, skb->len - len);
3284 }
3285
3286 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3287 unsigned int oui, u8 oui_type,
3288 int ieoffset)
3289 {
3290 int len;
3291 const u8 *next, *end = skb->data + skb->len;
3292 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3293 skb->data + ieoffset,
3294 skb->len - ieoffset);
3295 if (!ie)
3296 return;
3297 len = ie[1] + 2;
3298 next = ie + len;
3299 memmove(ie, next, end - next);
3300 skb_trim(skb, skb->len - len);
3301 }
3302
3303 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3304 struct ieee80211_vif *vif)
3305 {
3306 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3307 struct sk_buff *skb;
3308 int ret;
3309
3310 skb = ieee80211_proberesp_get(wl->hw, vif);
3311 if (!skb)
3312 return -EOPNOTSUPP;
3313
3314 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3315 CMD_TEMPL_AP_PROBE_RESPONSE,
3316 skb->data,
3317 skb->len, 0,
3318 rates);
3319
3320 dev_kfree_skb(skb);
3321 return ret;
3322 }
3323
3324 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3325 struct ieee80211_vif *vif,
3326 u8 *probe_rsp_data,
3327 size_t probe_rsp_len,
3328 u32 rates)
3329 {
3330 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3331 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3332 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3333 int ssid_ie_offset, ie_offset, templ_len;
3334 const u8 *ptr;
3335
3336 /* no need to change probe response if the SSID is set correctly */
3337 if (wlvif->ssid_len > 0)
3338 return wl1271_cmd_template_set(wl, wlvif->role_id,
3339 CMD_TEMPL_AP_PROBE_RESPONSE,
3340 probe_rsp_data,
3341 probe_rsp_len, 0,
3342 rates);
3343
3344 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3345 wl1271_error("probe_rsp template too big");
3346 return -EINVAL;
3347 }
3348
3349 /* start searching from IE offset */
3350 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3351
3352 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3353 probe_rsp_len - ie_offset);
3354 if (!ptr) {
3355 wl1271_error("No SSID in beacon!");
3356 return -EINVAL;
3357 }
3358
3359 ssid_ie_offset = ptr - probe_rsp_data;
3360 ptr += (ptr[1] + 2);
3361
3362 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3363
3364 /* insert SSID from bss_conf */
3365 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3366 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3367 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3368 bss_conf->ssid, bss_conf->ssid_len);
3369 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3370
3371 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3372 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3373 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3374
3375 return wl1271_cmd_template_set(wl, wlvif->role_id,
3376 CMD_TEMPL_AP_PROBE_RESPONSE,
3377 probe_rsp_templ,
3378 templ_len, 0,
3379 rates);
3380 }
3381
3382 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3383 struct ieee80211_vif *vif,
3384 struct ieee80211_bss_conf *bss_conf,
3385 u32 changed)
3386 {
3387 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3388 int ret = 0;
3389
3390 if (changed & BSS_CHANGED_ERP_SLOT) {
3391 if (bss_conf->use_short_slot)
3392 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3393 else
3394 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3395 if (ret < 0) {
3396 wl1271_warning("Set slot time failed %d", ret);
3397 goto out;
3398 }
3399 }
3400
3401 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3402 if (bss_conf->use_short_preamble)
3403 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3404 else
3405 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3406 }
3407
3408 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3409 if (bss_conf->use_cts_prot)
3410 ret = wl1271_acx_cts_protect(wl, wlvif,
3411 CTSPROTECT_ENABLE);
3412 else
3413 ret = wl1271_acx_cts_protect(wl, wlvif,
3414 CTSPROTECT_DISABLE);
3415 if (ret < 0) {
3416 wl1271_warning("Set ctsprotect failed %d", ret);
3417 goto out;
3418 }
3419 }
3420
3421 out:
3422 return ret;
3423 }
3424
3425 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3426 struct ieee80211_vif *vif,
3427 struct ieee80211_bss_conf *bss_conf,
3428 u32 changed)
3429 {
3430 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3431 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3432 int ret = 0;
3433
3434 if ((changed & BSS_CHANGED_BEACON_INT)) {
3435 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3436 bss_conf->beacon_int);
3437
3438 wlvif->beacon_int = bss_conf->beacon_int;
3439 }
3440
3441 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3442 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3443 if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
3444 wl1271_debug(DEBUG_AP, "probe response updated");
3445 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3446 }
3447 }
3448
3449 if ((changed & BSS_CHANGED_BEACON)) {
3450 struct ieee80211_hdr *hdr;
3451 u32 min_rate;
3452 int ieoffset = offsetof(struct ieee80211_mgmt,
3453 u.beacon.variable);
3454 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3455 u16 tmpl_id;
3456
3457 if (!beacon) {
3458 ret = -EINVAL;
3459 goto out;
3460 }
3461
3462 wl1271_debug(DEBUG_MASTER, "beacon updated");
3463
3464 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3465 if (ret < 0) {
3466 dev_kfree_skb(beacon);
3467 goto out;
3468 }
3469 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3470 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3471 CMD_TEMPL_BEACON;
3472 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3473 beacon->data,
3474 beacon->len, 0,
3475 min_rate);
3476 if (ret < 0) {
3477 dev_kfree_skb(beacon);
3478 goto out;
3479 }
3480
3481 /*
3482 * In case we already have a probe-resp beacon set explicitly
3483 * by usermode, don't use the beacon data.
3484 */
3485 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3486 goto end_bcn;
3487
3488 /* remove TIM ie from probe response */
3489 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3490
3491 /*
3492 * remove p2p ie from probe response.
3493 * the fw reponds to probe requests that don't include
3494 * the p2p ie. probe requests with p2p ie will be passed,
3495 * and will be responded by the supplicant (the spec
3496 * forbids including the p2p ie when responding to probe
3497 * requests that didn't include it).
3498 */
3499 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3500 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3501
3502 hdr = (struct ieee80211_hdr *) beacon->data;
3503 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3504 IEEE80211_STYPE_PROBE_RESP);
3505 if (is_ap)
3506 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3507 beacon->data,
3508 beacon->len,
3509 min_rate);
3510 else
3511 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3512 CMD_TEMPL_PROBE_RESPONSE,
3513 beacon->data,
3514 beacon->len, 0,
3515 min_rate);
3516 end_bcn:
3517 dev_kfree_skb(beacon);
3518 if (ret < 0)
3519 goto out;
3520 }
3521
3522 out:
3523 if (ret != 0)
3524 wl1271_error("beacon info change failed: %d", ret);
3525 return ret;
3526 }
3527
3528 /* AP mode changes */
3529 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3530 struct ieee80211_vif *vif,
3531 struct ieee80211_bss_conf *bss_conf,
3532 u32 changed)
3533 {
3534 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3535 int ret = 0;
3536
3537 if ((changed & BSS_CHANGED_BASIC_RATES)) {
3538 u32 rates = bss_conf->basic_rates;
3539
3540 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3541 wlvif->band);
3542 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3543 wlvif->basic_rate_set);
3544
3545 ret = wl1271_init_ap_rates(wl, wlvif);
3546 if (ret < 0) {
3547 wl1271_error("AP rate policy change failed %d", ret);
3548 goto out;
3549 }
3550
3551 ret = wl1271_ap_init_templates(wl, vif);
3552 if (ret < 0)
3553 goto out;
3554 }
3555
3556 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3557 if (ret < 0)
3558 goto out;
3559
3560 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3561 if (bss_conf->enable_beacon) {
3562 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3563 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3564 if (ret < 0)
3565 goto out;
3566
3567 ret = wl1271_ap_init_hwenc(wl, wlvif);
3568 if (ret < 0)
3569 goto out;
3570
3571 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3572 wl1271_debug(DEBUG_AP, "started AP");
3573 }
3574 } else {
3575 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3576 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3577 if (ret < 0)
3578 goto out;
3579
3580 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3581 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3582 &wlvif->flags);
3583 wl1271_debug(DEBUG_AP, "stopped AP");
3584 }
3585 }
3586 }
3587
3588 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3589 if (ret < 0)
3590 goto out;
3591
3592 /* Handle HT information change */
3593 if ((changed & BSS_CHANGED_HT) &&
3594 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3595 ret = wl1271_acx_set_ht_information(wl, wlvif,
3596 bss_conf->ht_operation_mode);
3597 if (ret < 0) {
3598 wl1271_warning("Set ht information failed %d", ret);
3599 goto out;
3600 }
3601 }
3602
3603 out:
3604 return;
3605 }
3606
3607 /* STA/IBSS mode changes */
3608 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3609 struct ieee80211_vif *vif,
3610 struct ieee80211_bss_conf *bss_conf,
3611 u32 changed)
3612 {
3613 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3614 bool do_join = false, set_assoc = false;
3615 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3616 bool ibss_joined = false;
3617 u32 sta_rate_set = 0;
3618 int ret;
3619 struct ieee80211_sta *sta;
3620 bool sta_exists = false;
3621 struct ieee80211_sta_ht_cap sta_ht_cap;
3622
3623 if (is_ibss) {
3624 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3625 changed);
3626 if (ret < 0)
3627 goto out;
3628 }
3629
3630 if (changed & BSS_CHANGED_IBSS) {
3631 if (bss_conf->ibss_joined) {
3632 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3633 ibss_joined = true;
3634 } else {
3635 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
3636 &wlvif->flags))
3637 wl1271_unjoin(wl, wlvif);
3638 }
3639 }
3640
3641 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3642 do_join = true;
3643
3644 /* Need to update the SSID (for filtering etc) */
3645 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3646 do_join = true;
3647
3648 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3649 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3650 bss_conf->enable_beacon ? "enabled" : "disabled");
3651
3652 do_join = true;
3653 }
3654
3655 if (changed & BSS_CHANGED_IDLE && !is_ibss) {
3656 ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
3657 if (ret < 0)
3658 wl1271_warning("idle mode change failed %d", ret);
3659 }
3660
3661 if ((changed & BSS_CHANGED_CQM)) {
3662 bool enable = false;
3663 if (bss_conf->cqm_rssi_thold)
3664 enable = true;
3665 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3666 bss_conf->cqm_rssi_thold,
3667 bss_conf->cqm_rssi_hyst);
3668 if (ret < 0)
3669 goto out;
3670 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3671 }
3672
3673 if (changed & BSS_CHANGED_BSSID)
3674 if (!is_zero_ether_addr(bss_conf->bssid)) {
3675 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3676 if (ret < 0)
3677 goto out;
3678
3679 ret = wl1271_build_qos_null_data(wl, vif);
3680 if (ret < 0)
3681 goto out;
3682 }
3683
3684 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
3685 rcu_read_lock();
3686 sta = ieee80211_find_sta(vif, bss_conf->bssid);
3687 if (!sta)
3688 goto sta_not_found;
3689
3690 /* save the supp_rates of the ap */
3691 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3692 if (sta->ht_cap.ht_supported)
3693 sta_rate_set |=
3694 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
3695 sta_ht_cap = sta->ht_cap;
3696 sta_exists = true;
3697
3698 sta_not_found:
3699 rcu_read_unlock();
3700 }
3701
3702 if ((changed & BSS_CHANGED_ASSOC)) {
3703 if (bss_conf->assoc) {
3704 u32 rates;
3705 int ieoffset;
3706 wlvif->aid = bss_conf->aid;
3707 wlvif->beacon_int = bss_conf->beacon_int;
3708 do_join = true;
3709 set_assoc = true;
3710
3711 /* Cancel connection_loss_work */
3712 cancel_delayed_work_sync(&wl->connection_loss_work);
3713
3714 /*
3715 * use basic rates from AP, and determine lowest rate
3716 * to use with control frames.
3717 */
3718 rates = bss_conf->basic_rates;
3719 wlvif->basic_rate_set =
3720 wl1271_tx_enabled_rates_get(wl, rates,
3721 wlvif->band);
3722 wlvif->basic_rate =
3723 wl1271_tx_min_rate_get(wl,
3724 wlvif->basic_rate_set);
3725 if (sta_rate_set)
3726 wlvif->rate_set =
3727 wl1271_tx_enabled_rates_get(wl,
3728 sta_rate_set,
3729 wlvif->band);
3730 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3731 if (ret < 0)
3732 goto out;
3733
3734 /*
3735 * with wl1271, we don't need to update the
3736 * beacon_int and dtim_period, because the firmware
3737 * updates it by itself when the first beacon is
3738 * received after a join.
3739 */
3740 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3741 if (ret < 0)
3742 goto out;
3743
3744 /*
3745 * Get a template for hardware connection maintenance
3746 */
3747 dev_kfree_skb(wlvif->probereq);
3748 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3749 wlvif,
3750 NULL);
3751 ieoffset = offsetof(struct ieee80211_mgmt,
3752 u.probe_req.variable);
3753 wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
3754
3755 /* enable the connection monitoring feature */
3756 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3757 if (ret < 0)
3758 goto out;
3759 } else {
3760 /* use defaults when not associated */
3761 bool was_assoc =
3762 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
3763 &wlvif->flags);
3764 bool was_ifup =
3765 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
3766 &wlvif->flags);
3767 wlvif->aid = 0;
3768
3769 /* free probe-request template */
3770 dev_kfree_skb(wlvif->probereq);
3771 wlvif->probereq = NULL;
3772
3773 /* revert back to minimum rates for the current band */
3774 wl1271_set_band_rate(wl, wlvif);
3775 wlvif->basic_rate =
3776 wl1271_tx_min_rate_get(wl,
3777 wlvif->basic_rate_set);
3778 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3779 if (ret < 0)
3780 goto out;
3781
3782 /* disable connection monitor features */
3783 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3784
3785 /* Disable the keep-alive feature */
3786 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3787 if (ret < 0)
3788 goto out;
3789
3790 /* restore the bssid filter and go to dummy bssid */
3791 if (was_assoc) {
3792 /*
3793 * we might have to disable roc, if there was
3794 * no IF_OPER_UP notification.
3795 */
3796 if (!was_ifup) {
3797 ret = wl12xx_croc(wl, wlvif->role_id);
3798 if (ret < 0)
3799 goto out;
3800 }
3801 /*
3802 * (we also need to disable roc in case of
3803 * roaming on the same channel. until we will
3804 * have a better flow...)
3805 */
3806 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
3807 ret = wl12xx_croc(wl,
3808 wlvif->dev_role_id);
3809 if (ret < 0)
3810 goto out;
3811 }
3812
3813 wl1271_unjoin(wl, wlvif);
3814 if (!bss_conf->idle)
3815 wl12xx_start_dev(wl, wlvif);
3816 }
3817 }
3818 }
3819
3820 if (changed & BSS_CHANGED_IBSS) {
3821 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
3822 bss_conf->ibss_joined);
3823
3824 if (bss_conf->ibss_joined) {
3825 u32 rates = bss_conf->basic_rates;
3826 wlvif->basic_rate_set =
3827 wl1271_tx_enabled_rates_get(wl, rates,
3828 wlvif->band);
3829 wlvif->basic_rate =
3830 wl1271_tx_min_rate_get(wl,
3831 wlvif->basic_rate_set);
3832
3833 /* by default, use 11b + OFDM rates */
3834 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
3835 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3836 if (ret < 0)
3837 goto out;
3838 }
3839 }
3840
3841 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3842 if (ret < 0)
3843 goto out;
3844
3845 if (do_join) {
3846 ret = wl1271_join(wl, wlvif, set_assoc);
3847 if (ret < 0) {
3848 wl1271_warning("cmd join failed %d", ret);
3849 goto out;
3850 }
3851
3852 /* ROC until connected (after EAPOL exchange) */
3853 if (!is_ibss) {
3854 ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
3855 if (ret < 0)
3856 goto out;
3857
3858 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
3859 wl12xx_set_authorized(wl, wlvif);
3860 }
3861 /*
3862 * stop device role if started (we might already be in
3863 * STA/IBSS role).
3864 */
3865 if (wl12xx_dev_role_started(wlvif)) {
3866 ret = wl12xx_stop_dev(wl, wlvif);
3867 if (ret < 0)
3868 goto out;
3869 }
3870 }
3871
3872 /* Handle new association with HT. Do this after join. */
3873 if (sta_exists) {
3874 if ((changed & BSS_CHANGED_HT) &&
3875 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3876 ret = wl1271_acx_set_ht_capabilities(wl,
3877 &sta_ht_cap,
3878 true,
3879 wlvif->sta.hlid);
3880 if (ret < 0) {
3881 wl1271_warning("Set ht cap true failed %d",
3882 ret);
3883 goto out;
3884 }
3885 }
3886 /* handle new association without HT and disassociation */
3887 else if (changed & BSS_CHANGED_ASSOC) {
3888 ret = wl1271_acx_set_ht_capabilities(wl,
3889 &sta_ht_cap,
3890 false,
3891 wlvif->sta.hlid);
3892 if (ret < 0) {
3893 wl1271_warning("Set ht cap false failed %d",
3894 ret);
3895 goto out;
3896 }
3897 }
3898 }
3899
3900 /* Handle HT information change. Done after join. */
3901 if ((changed & BSS_CHANGED_HT) &&
3902 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3903 ret = wl1271_acx_set_ht_information(wl, wlvif,
3904 bss_conf->ht_operation_mode);
3905 if (ret < 0) {
3906 wl1271_warning("Set ht information failed %d", ret);
3907 goto out;
3908 }
3909 }
3910
3911 /* Handle arp filtering. Done after join. */
3912 if ((changed & BSS_CHANGED_ARP_FILTER) ||
3913 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
3914 __be32 addr = bss_conf->arp_addr_list[0];
3915 wlvif->sta.qos = bss_conf->qos;
3916 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
3917
3918 if (bss_conf->arp_addr_cnt == 1 &&
3919 bss_conf->arp_filter_enabled) {
3920 wlvif->ip_addr = addr;
3921 /*
3922 * The template should have been configured only upon
3923 * association. however, it seems that the correct ip
3924 * isn't being set (when sending), so we have to
3925 * reconfigure the template upon every ip change.
3926 */
3927 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3928 if (ret < 0) {
3929 wl1271_warning("build arp rsp failed: %d", ret);
3930 goto out;
3931 }
3932
3933 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
3934 (ACX_ARP_FILTER_ARP_FILTERING |
3935 ACX_ARP_FILTER_AUTO_ARP),
3936 addr);
3937 } else {
3938 wlvif->ip_addr = 0;
3939 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
3940 }
3941
3942 if (ret < 0)
3943 goto out;
3944 }
3945
3946 out:
3947 return;
3948 }
3949
3950 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
3951 struct ieee80211_vif *vif,
3952 struct ieee80211_bss_conf *bss_conf,
3953 u32 changed)
3954 {
3955 struct wl1271 *wl = hw->priv;
3956 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3957 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3958 int ret;
3959
3960 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
3961 (int)changed);
3962
3963 mutex_lock(&wl->mutex);
3964
3965 if (unlikely(wl->state == WL1271_STATE_OFF))
3966 goto out;
3967
3968 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
3969 goto out;
3970
3971 ret = wl1271_ps_elp_wakeup(wl);
3972 if (ret < 0)
3973 goto out;
3974
3975 if (is_ap)
3976 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
3977 else
3978 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
3979
3980 wl1271_ps_elp_sleep(wl);
3981
3982 out:
3983 mutex_unlock(&wl->mutex);
3984 }
3985
3986 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
3987 struct ieee80211_vif *vif, u16 queue,
3988 const struct ieee80211_tx_queue_params *params)
3989 {
3990 struct wl1271 *wl = hw->priv;
3991 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3992 u8 ps_scheme;
3993 int ret = 0;
3994
3995 mutex_lock(&wl->mutex);
3996
3997 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
3998
3999 if (params->uapsd)
4000 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4001 else
4002 ps_scheme = CONF_PS_SCHEME_LEGACY;
4003
4004 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4005 goto out;
4006
4007 ret = wl1271_ps_elp_wakeup(wl);
4008 if (ret < 0)
4009 goto out;
4010
4011 /*
4012 * the txop is confed in units of 32us by the mac80211,
4013 * we need us
4014 */
4015 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4016 params->cw_min, params->cw_max,
4017 params->aifs, params->txop << 5);
4018 if (ret < 0)
4019 goto out_sleep;
4020
4021 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4022 CONF_CHANNEL_TYPE_EDCF,
4023 wl1271_tx_get_queue(queue),
4024 ps_scheme, CONF_ACK_POLICY_LEGACY,
4025 0, 0);
4026
4027 out_sleep:
4028 wl1271_ps_elp_sleep(wl);
4029
4030 out:
4031 mutex_unlock(&wl->mutex);
4032
4033 return ret;
4034 }
4035
4036 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4037 struct ieee80211_vif *vif)
4038 {
4039
4040 struct wl1271 *wl = hw->priv;
4041 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4042 u64 mactime = ULLONG_MAX;
4043 int ret;
4044
4045 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4046
4047 mutex_lock(&wl->mutex);
4048
4049 if (unlikely(wl->state == WL1271_STATE_OFF))
4050 goto out;
4051
4052 ret = wl1271_ps_elp_wakeup(wl);
4053 if (ret < 0)
4054 goto out;
4055
4056 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4057 if (ret < 0)
4058 goto out_sleep;
4059
4060 out_sleep:
4061 wl1271_ps_elp_sleep(wl);
4062
4063 out:
4064 mutex_unlock(&wl->mutex);
4065 return mactime;
4066 }
4067
4068 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4069 struct survey_info *survey)
4070 {
4071 struct wl1271 *wl = hw->priv;
4072 struct ieee80211_conf *conf = &hw->conf;
4073
4074 if (idx != 0)
4075 return -ENOENT;
4076
4077 survey->channel = conf->channel;
4078 survey->filled = SURVEY_INFO_NOISE_DBM;
4079 survey->noise = wl->noise;
4080
4081 return 0;
4082 }
4083
4084 static int wl1271_allocate_sta(struct wl1271 *wl,
4085 struct wl12xx_vif *wlvif,
4086 struct ieee80211_sta *sta)
4087 {
4088 struct wl1271_station *wl_sta;
4089 int ret;
4090
4091
4092 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4093 wl1271_warning("could not allocate HLID - too much stations");
4094 return -EBUSY;
4095 }
4096
4097 wl_sta = (struct wl1271_station *)sta->drv_priv;
4098 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4099 if (ret < 0) {
4100 wl1271_warning("could not allocate HLID - too many links");
4101 return -EBUSY;
4102 }
4103
4104 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4105 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4106 wl->active_sta_count++;
4107 return 0;
4108 }
4109
4110 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4111 {
4112 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4113 return;
4114
4115 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4116 memset(wl->links[hlid].addr, 0, ETH_ALEN);
4117 wl->links[hlid].ba_bitmap = 0;
4118 __clear_bit(hlid, &wl->ap_ps_map);
4119 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4120 wl12xx_free_link(wl, wlvif, &hlid);
4121 wl->active_sta_count--;
4122
4123 /*
4124 * rearm the tx watchdog when the last STA is freed - give the FW a
4125 * chance to return STA-buffered packets before complaining.
4126 */
4127 if (wl->active_sta_count == 0)
4128 wl12xx_rearm_tx_watchdog_locked(wl);
4129 }
4130
4131 static int wl12xx_sta_add(struct wl1271 *wl,
4132 struct wl12xx_vif *wlvif,
4133 struct ieee80211_sta *sta)
4134 {
4135 struct wl1271_station *wl_sta;
4136 int ret = 0;
4137 u8 hlid;
4138
4139 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4140
4141 ret = wl1271_allocate_sta(wl, wlvif, sta);
4142 if (ret < 0)
4143 return ret;
4144
4145 wl_sta = (struct wl1271_station *)sta->drv_priv;
4146 hlid = wl_sta->hlid;
4147
4148 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4149 if (ret < 0)
4150 wl1271_free_sta(wl, wlvif, hlid);
4151
4152 return ret;
4153 }
4154
4155 static int wl12xx_sta_remove(struct wl1271 *wl,
4156 struct wl12xx_vif *wlvif,
4157 struct ieee80211_sta *sta)
4158 {
4159 struct wl1271_station *wl_sta;
4160 int ret = 0, id;
4161
4162 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4163
4164 wl_sta = (struct wl1271_station *)sta->drv_priv;
4165 id = wl_sta->hlid;
4166 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4167 return -EINVAL;
4168
4169 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4170 if (ret < 0)
4171 return ret;
4172
4173 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4174 return ret;
4175 }
4176
4177 static int wl12xx_update_sta_state(struct wl1271 *wl,
4178 struct wl12xx_vif *wlvif,
4179 struct ieee80211_sta *sta,
4180 enum ieee80211_sta_state old_state,
4181 enum ieee80211_sta_state new_state)
4182 {
4183 struct wl1271_station *wl_sta;
4184 u8 hlid;
4185 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4186 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4187 int ret;
4188
4189 wl_sta = (struct wl1271_station *)sta->drv_priv;
4190 hlid = wl_sta->hlid;
4191
4192 /* Add station (AP mode) */
4193 if (is_ap &&
4194 old_state == IEEE80211_STA_NOTEXIST &&
4195 new_state == IEEE80211_STA_NONE)
4196 return wl12xx_sta_add(wl, wlvif, sta);
4197
4198 /* Remove station (AP mode) */
4199 if (is_ap &&
4200 old_state == IEEE80211_STA_NONE &&
4201 new_state == IEEE80211_STA_NOTEXIST) {
4202 /* must not fail */
4203 wl12xx_sta_remove(wl, wlvif, sta);
4204 return 0;
4205 }
4206
4207 /* Authorize station (AP mode) */
4208 if (is_ap &&
4209 new_state == IEEE80211_STA_AUTHORIZED) {
4210 ret = wl12xx_cmd_set_peer_state(wl, hlid);
4211 if (ret < 0)
4212 return ret;
4213
4214 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4215 hlid);
4216 return ret;
4217 }
4218
4219 /* Authorize station */
4220 if (is_sta &&
4221 new_state == IEEE80211_STA_AUTHORIZED) {
4222 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4223 return wl12xx_set_authorized(wl, wlvif);
4224 }
4225
4226 if (is_sta &&
4227 old_state == IEEE80211_STA_AUTHORIZED &&
4228 new_state == IEEE80211_STA_ASSOC) {
4229 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4230 return 0;
4231 }
4232
4233 return 0;
4234 }
4235
4236 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4237 struct ieee80211_vif *vif,
4238 struct ieee80211_sta *sta,
4239 enum ieee80211_sta_state old_state,
4240 enum ieee80211_sta_state new_state)
4241 {
4242 struct wl1271 *wl = hw->priv;
4243 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4244 int ret;
4245
4246 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4247 sta->aid, old_state, new_state);
4248
4249 mutex_lock(&wl->mutex);
4250
4251 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4252 ret = -EBUSY;
4253 goto out;
4254 }
4255
4256 ret = wl1271_ps_elp_wakeup(wl);
4257 if (ret < 0)
4258 goto out;
4259
4260 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4261
4262 wl1271_ps_elp_sleep(wl);
4263 out:
4264 mutex_unlock(&wl->mutex);
4265 if (new_state < old_state)
4266 return 0;
4267 return ret;
4268 }
4269
4270 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4271 struct ieee80211_vif *vif,
4272 enum ieee80211_ampdu_mlme_action action,
4273 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4274 u8 buf_size)
4275 {
4276 struct wl1271 *wl = hw->priv;
4277 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4278 int ret;
4279 u8 hlid, *ba_bitmap;
4280
4281 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4282 tid);
4283
4284 /* sanity check - the fields in FW are only 8bits wide */
4285 if (WARN_ON(tid > 0xFF))
4286 return -ENOTSUPP;
4287
4288 mutex_lock(&wl->mutex);
4289
4290 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4291 ret = -EAGAIN;
4292 goto out;
4293 }
4294
4295 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4296 hlid = wlvif->sta.hlid;
4297 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4298 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4299 struct wl1271_station *wl_sta;
4300
4301 wl_sta = (struct wl1271_station *)sta->drv_priv;
4302 hlid = wl_sta->hlid;
4303 ba_bitmap = &wl->links[hlid].ba_bitmap;
4304 } else {
4305 ret = -EINVAL;
4306 goto out;
4307 }
4308
4309 ret = wl1271_ps_elp_wakeup(wl);
4310 if (ret < 0)
4311 goto out;
4312
4313 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4314 tid, action);
4315
4316 switch (action) {
4317 case IEEE80211_AMPDU_RX_START:
4318 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4319 ret = -ENOTSUPP;
4320 break;
4321 }
4322
4323 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4324 ret = -EBUSY;
4325 wl1271_error("exceeded max RX BA sessions");
4326 break;
4327 }
4328
4329 if (*ba_bitmap & BIT(tid)) {
4330 ret = -EINVAL;
4331 wl1271_error("cannot enable RX BA session on active "
4332 "tid: %d", tid);
4333 break;
4334 }
4335
4336 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4337 hlid);
4338 if (!ret) {
4339 *ba_bitmap |= BIT(tid);
4340 wl->ba_rx_session_count++;
4341 }
4342 break;
4343
4344 case IEEE80211_AMPDU_RX_STOP:
4345 if (!(*ba_bitmap & BIT(tid))) {
4346 ret = -EINVAL;
4347 wl1271_error("no active RX BA session on tid: %d",
4348 tid);
4349 break;
4350 }
4351
4352 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4353 hlid);
4354 if (!ret) {
4355 *ba_bitmap &= ~BIT(tid);
4356 wl->ba_rx_session_count--;
4357 }
4358 break;
4359
4360 /*
4361 * The BA initiator session management in FW independently.
4362 * Falling break here on purpose for all TX APDU commands.
4363 */
4364 case IEEE80211_AMPDU_TX_START:
4365 case IEEE80211_AMPDU_TX_STOP:
4366 case IEEE80211_AMPDU_TX_OPERATIONAL:
4367 ret = -EINVAL;
4368 break;
4369
4370 default:
4371 wl1271_error("Incorrect ampdu action id=%x\n", action);
4372 ret = -EINVAL;
4373 }
4374
4375 wl1271_ps_elp_sleep(wl);
4376
4377 out:
4378 mutex_unlock(&wl->mutex);
4379
4380 return ret;
4381 }
4382
4383 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4384 struct ieee80211_vif *vif,
4385 const struct cfg80211_bitrate_mask *mask)
4386 {
4387 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4388 struct wl1271 *wl = hw->priv;
4389 int i, ret = 0;
4390
4391 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4392 mask->control[NL80211_BAND_2GHZ].legacy,
4393 mask->control[NL80211_BAND_5GHZ].legacy);
4394
4395 mutex_lock(&wl->mutex);
4396
4397 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
4398 wlvif->bitrate_masks[i] =
4399 wl1271_tx_enabled_rates_get(wl,
4400 mask->control[i].legacy,
4401 i);
4402
4403 if (unlikely(wl->state == WL1271_STATE_OFF))
4404 goto out;
4405
4406 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4407 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4408
4409 ret = wl1271_ps_elp_wakeup(wl);
4410 if (ret < 0)
4411 goto out;
4412
4413 wl1271_set_band_rate(wl, wlvif);
4414 wlvif->basic_rate =
4415 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4416 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4417
4418 wl1271_ps_elp_sleep(wl);
4419 }
4420 out:
4421 mutex_unlock(&wl->mutex);
4422
4423 return ret;
4424 }
4425
4426 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4427 struct ieee80211_channel_switch *ch_switch)
4428 {
4429 struct wl1271 *wl = hw->priv;
4430 struct wl12xx_vif *wlvif;
4431 int ret;
4432
4433 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4434
4435 wl1271_tx_flush(wl);
4436
4437 mutex_lock(&wl->mutex);
4438
4439 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4440 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4441 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4442 ieee80211_chswitch_done(vif, false);
4443 }
4444 goto out;
4445 }
4446
4447 ret = wl1271_ps_elp_wakeup(wl);
4448 if (ret < 0)
4449 goto out;
4450
4451 /* TODO: change mac80211 to pass vif as param */
4452 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4453 ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
4454
4455 if (!ret)
4456 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4457 }
4458
4459 wl1271_ps_elp_sleep(wl);
4460
4461 out:
4462 mutex_unlock(&wl->mutex);
4463 }
4464
4465 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4466 {
4467 struct wl1271 *wl = hw->priv;
4468 bool ret = false;
4469
4470 mutex_lock(&wl->mutex);
4471
4472 if (unlikely(wl->state == WL1271_STATE_OFF))
4473 goto out;
4474
4475 /* packets are considered pending if in the TX queue or the FW */
4476 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4477 out:
4478 mutex_unlock(&wl->mutex);
4479
4480 return ret;
4481 }
4482
4483 /* can't be const, mac80211 writes to this */
4484 static struct ieee80211_rate wl1271_rates[] = {
4485 { .bitrate = 10,
4486 .hw_value = CONF_HW_BIT_RATE_1MBPS,
4487 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
4488 { .bitrate = 20,
4489 .hw_value = CONF_HW_BIT_RATE_2MBPS,
4490 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
4491 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4492 { .bitrate = 55,
4493 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
4494 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
4495 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4496 { .bitrate = 110,
4497 .hw_value = CONF_HW_BIT_RATE_11MBPS,
4498 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
4499 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4500 { .bitrate = 60,
4501 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4502 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4503 { .bitrate = 90,
4504 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4505 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4506 { .bitrate = 120,
4507 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4508 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4509 { .bitrate = 180,
4510 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4511 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4512 { .bitrate = 240,
4513 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4514 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4515 { .bitrate = 360,
4516 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4517 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4518 { .bitrate = 480,
4519 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4520 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4521 { .bitrate = 540,
4522 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4523 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4524 };
4525
4526 /* can't be const, mac80211 writes to this */
4527 static struct ieee80211_channel wl1271_channels[] = {
4528 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
4529 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
4530 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
4531 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
4532 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
4533 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
4534 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
4535 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
4536 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
4537 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
4538 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
4539 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
4540 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
4541 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4542 };
4543
4544 /* can't be const, mac80211 writes to this */
4545 static struct ieee80211_supported_band wl1271_band_2ghz = {
4546 .channels = wl1271_channels,
4547 .n_channels = ARRAY_SIZE(wl1271_channels),
4548 .bitrates = wl1271_rates,
4549 .n_bitrates = ARRAY_SIZE(wl1271_rates),
4550 };
4551
4552 /* 5 GHz data rates for WL1273 */
4553 static struct ieee80211_rate wl1271_rates_5ghz[] = {
4554 { .bitrate = 60,
4555 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4556 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4557 { .bitrate = 90,
4558 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4559 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4560 { .bitrate = 120,
4561 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4562 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4563 { .bitrate = 180,
4564 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4565 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4566 { .bitrate = 240,
4567 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4568 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4569 { .bitrate = 360,
4570 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4571 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4572 { .bitrate = 480,
4573 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4574 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4575 { .bitrate = 540,
4576 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4577 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4578 };
4579
4580 /* 5 GHz band channels for WL1273 */
4581 static struct ieee80211_channel wl1271_channels_5ghz[] = {
4582 { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
4583 { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
4584 { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
4585 { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
4586 { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
4587 { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
4588 { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
4589 { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
4590 { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
4591 { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
4592 { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
4593 { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
4594 { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
4595 { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
4596 { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
4597 { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
4598 { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
4599 { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
4600 { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
4601 { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
4602 { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
4603 { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
4604 { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
4605 { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
4606 { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
4607 { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
4608 { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
4609 { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
4610 { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
4611 { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
4612 { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
4613 { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
4614 { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
4615 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4616 };
4617
4618 static struct ieee80211_supported_band wl1271_band_5ghz = {
4619 .channels = wl1271_channels_5ghz,
4620 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4621 .bitrates = wl1271_rates_5ghz,
4622 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4623 };
4624
4625 static const struct ieee80211_ops wl1271_ops = {
4626 .start = wl1271_op_start,
4627 .stop = wl1271_op_stop,
4628 .add_interface = wl1271_op_add_interface,
4629 .remove_interface = wl1271_op_remove_interface,
4630 .change_interface = wl12xx_op_change_interface,
4631 #ifdef CONFIG_PM
4632 .suspend = wl1271_op_suspend,
4633 .resume = wl1271_op_resume,
4634 #endif
4635 .config = wl1271_op_config,
4636 .prepare_multicast = wl1271_op_prepare_multicast,
4637 .configure_filter = wl1271_op_configure_filter,
4638 .tx = wl1271_op_tx,
4639 .set_key = wl1271_op_set_key,
4640 .hw_scan = wl1271_op_hw_scan,
4641 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
4642 .sched_scan_start = wl1271_op_sched_scan_start,
4643 .sched_scan_stop = wl1271_op_sched_scan_stop,
4644 .bss_info_changed = wl1271_op_bss_info_changed,
4645 .set_frag_threshold = wl1271_op_set_frag_threshold,
4646 .set_rts_threshold = wl1271_op_set_rts_threshold,
4647 .conf_tx = wl1271_op_conf_tx,
4648 .get_tsf = wl1271_op_get_tsf,
4649 .get_survey = wl1271_op_get_survey,
4650 .sta_state = wl12xx_op_sta_state,
4651 .ampdu_action = wl1271_op_ampdu_action,
4652 .tx_frames_pending = wl1271_tx_frames_pending,
4653 .set_bitrate_mask = wl12xx_set_bitrate_mask,
4654 .channel_switch = wl12xx_op_channel_switch,
4655 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
4656 };
4657
4658
4659 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
4660 {
4661 u8 idx;
4662
4663 BUG_ON(band >= 2);
4664
4665 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
4666 wl1271_error("Illegal RX rate from HW: %d", rate);
4667 return 0;
4668 }
4669
4670 idx = wl->band_rate_to_idx[band][rate];
4671 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
4672 wl1271_error("Unsupported RX rate from HW: %d", rate);
4673 return 0;
4674 }
4675
4676 return idx;
4677 }
4678
4679 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
4680 struct device_attribute *attr,
4681 char *buf)
4682 {
4683 struct wl1271 *wl = dev_get_drvdata(dev);
4684 ssize_t len;
4685
4686 len = PAGE_SIZE;
4687
4688 mutex_lock(&wl->mutex);
4689 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
4690 wl->sg_enabled);
4691 mutex_unlock(&wl->mutex);
4692
4693 return len;
4694
4695 }
4696
4697 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
4698 struct device_attribute *attr,
4699 const char *buf, size_t count)
4700 {
4701 struct wl1271 *wl = dev_get_drvdata(dev);
4702 unsigned long res;
4703 int ret;
4704
4705 ret = kstrtoul(buf, 10, &res);
4706 if (ret < 0) {
4707 wl1271_warning("incorrect value written to bt_coex_mode");
4708 return count;
4709 }
4710
4711 mutex_lock(&wl->mutex);
4712
4713 res = !!res;
4714
4715 if (res == wl->sg_enabled)
4716 goto out;
4717
4718 wl->sg_enabled = res;
4719
4720 if (wl->state == WL1271_STATE_OFF)
4721 goto out;
4722
4723 ret = wl1271_ps_elp_wakeup(wl);
4724 if (ret < 0)
4725 goto out;
4726
4727 wl1271_acx_sg_enable(wl, wl->sg_enabled);
4728 wl1271_ps_elp_sleep(wl);
4729
4730 out:
4731 mutex_unlock(&wl->mutex);
4732 return count;
4733 }
4734
4735 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
4736 wl1271_sysfs_show_bt_coex_state,
4737 wl1271_sysfs_store_bt_coex_state);
4738
4739 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
4740 struct device_attribute *attr,
4741 char *buf)
4742 {
4743 struct wl1271 *wl = dev_get_drvdata(dev);
4744 ssize_t len;
4745
4746 len = PAGE_SIZE;
4747
4748 mutex_lock(&wl->mutex);
4749 if (wl->hw_pg_ver >= 0)
4750 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
4751 else
4752 len = snprintf(buf, len, "n/a\n");
4753 mutex_unlock(&wl->mutex);
4754
4755 return len;
4756 }
4757
4758 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
4759 wl1271_sysfs_show_hw_pg_ver, NULL);
4760
4761 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
4762 struct bin_attribute *bin_attr,
4763 char *buffer, loff_t pos, size_t count)
4764 {
4765 struct device *dev = container_of(kobj, struct device, kobj);
4766 struct wl1271 *wl = dev_get_drvdata(dev);
4767 ssize_t len;
4768 int ret;
4769
4770 ret = mutex_lock_interruptible(&wl->mutex);
4771 if (ret < 0)
4772 return -ERESTARTSYS;
4773
4774 /* Let only one thread read the log at a time, blocking others */
4775 while (wl->fwlog_size == 0) {
4776 DEFINE_WAIT(wait);
4777
4778 prepare_to_wait_exclusive(&wl->fwlog_waitq,
4779 &wait,
4780 TASK_INTERRUPTIBLE);
4781
4782 if (wl->fwlog_size != 0) {
4783 finish_wait(&wl->fwlog_waitq, &wait);
4784 break;
4785 }
4786
4787 mutex_unlock(&wl->mutex);
4788
4789 schedule();
4790 finish_wait(&wl->fwlog_waitq, &wait);
4791
4792 if (signal_pending(current))
4793 return -ERESTARTSYS;
4794
4795 ret = mutex_lock_interruptible(&wl->mutex);
4796 if (ret < 0)
4797 return -ERESTARTSYS;
4798 }
4799
4800 /* Check if the fwlog is still valid */
4801 if (wl->fwlog_size < 0) {
4802 mutex_unlock(&wl->mutex);
4803 return 0;
4804 }
4805
4806 /* Seeking is not supported - old logs are not kept. Disregard pos. */
4807 len = min(count, (size_t)wl->fwlog_size);
4808 wl->fwlog_size -= len;
4809 memcpy(buffer, wl->fwlog, len);
4810
4811 /* Make room for new messages */
4812 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
4813
4814 mutex_unlock(&wl->mutex);
4815
4816 return len;
4817 }
4818
4819 static struct bin_attribute fwlog_attr = {
4820 .attr = {.name = "fwlog", .mode = S_IRUSR},
4821 .read = wl1271_sysfs_read_fwlog,
4822 };
4823
4824 static void wl1271_connection_loss_work(struct work_struct *work)
4825 {
4826 struct delayed_work *dwork;
4827 struct wl1271 *wl;
4828 struct ieee80211_vif *vif;
4829 struct wl12xx_vif *wlvif;
4830
4831 dwork = container_of(work, struct delayed_work, work);
4832 wl = container_of(dwork, struct wl1271, connection_loss_work);
4833
4834 wl1271_info("Connection loss work.");
4835
4836 mutex_lock(&wl->mutex);
4837
4838 if (unlikely(wl->state == WL1271_STATE_OFF))
4839 goto out;
4840
4841 /* Call mac80211 connection loss */
4842 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4843 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
4844 goto out;
4845 vif = wl12xx_wlvif_to_vif(wlvif);
4846 ieee80211_connection_loss(vif);
4847 }
4848 out:
4849 mutex_unlock(&wl->mutex);
4850 }
4851
4852 static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
4853 u32 oui, u32 nic, int n)
4854 {
4855 int i;
4856
4857 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
4858 oui, nic, n);
4859
4860 if (nic + n - 1 > 0xffffff)
4861 wl1271_warning("NIC part of the MAC address wraps around!");
4862
4863 for (i = 0; i < n; i++) {
4864 wl->addresses[i].addr[0] = (u8)(oui >> 16);
4865 wl->addresses[i].addr[1] = (u8)(oui >> 8);
4866 wl->addresses[i].addr[2] = (u8) oui;
4867 wl->addresses[i].addr[3] = (u8)(nic >> 16);
4868 wl->addresses[i].addr[4] = (u8)(nic >> 8);
4869 wl->addresses[i].addr[5] = (u8) nic;
4870 nic++;
4871 }
4872
4873 wl->hw->wiphy->n_addresses = n;
4874 wl->hw->wiphy->addresses = wl->addresses;
4875 }
4876
4877 static int wl12xx_get_hw_info(struct wl1271 *wl)
4878 {
4879 int ret;
4880
4881 ret = wl12xx_set_power_on(wl);
4882 if (ret < 0)
4883 goto out;
4884
4885 wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B);
4886
4887 wl->fuse_oui_addr = 0;
4888 wl->fuse_nic_addr = 0;
4889
4890 wl->hw_pg_ver = wl->ops->get_pg_ver(wl);
4891
4892 if (wl->ops->get_mac)
4893 wl->ops->get_mac(wl);
4894
4895 wl1271_power_off(wl);
4896 out:
4897 return ret;
4898 }
4899
4900 static int wl1271_register_hw(struct wl1271 *wl)
4901 {
4902 int ret;
4903 u32 oui_addr = 0, nic_addr = 0;
4904
4905 if (wl->mac80211_registered)
4906 return 0;
4907
4908 ret = wl12xx_get_hw_info(wl);
4909 if (ret < 0) {
4910 wl1271_error("couldn't get hw info");
4911 goto out;
4912 }
4913
4914 ret = wl1271_fetch_nvs(wl);
4915 if (ret == 0) {
4916 /* NOTE: The wl->nvs->nvs element must be first, in
4917 * order to simplify the casting, we assume it is at
4918 * the beginning of the wl->nvs structure.
4919 */
4920 u8 *nvs_ptr = (u8 *)wl->nvs;
4921
4922 oui_addr =
4923 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
4924 nic_addr =
4925 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
4926 }
4927
4928 /* if the MAC address is zeroed in the NVS derive from fuse */
4929 if (oui_addr == 0 && nic_addr == 0) {
4930 oui_addr = wl->fuse_oui_addr;
4931 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
4932 nic_addr = wl->fuse_nic_addr + 1;
4933 }
4934
4935 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
4936
4937 ret = ieee80211_register_hw(wl->hw);
4938 if (ret < 0) {
4939 wl1271_error("unable to register mac80211 hw: %d", ret);
4940 goto out;
4941 }
4942
4943 wl->mac80211_registered = true;
4944
4945 wl1271_debugfs_init(wl);
4946
4947 wl1271_notice("loaded");
4948
4949 out:
4950 return ret;
4951 }
4952
4953 static void wl1271_unregister_hw(struct wl1271 *wl)
4954 {
4955 if (wl->plt)
4956 wl1271_plt_stop(wl);
4957
4958 ieee80211_unregister_hw(wl->hw);
4959 wl->mac80211_registered = false;
4960
4961 }
4962
4963 static int wl1271_init_ieee80211(struct wl1271 *wl)
4964 {
4965 static const u32 cipher_suites[] = {
4966 WLAN_CIPHER_SUITE_WEP40,
4967 WLAN_CIPHER_SUITE_WEP104,
4968 WLAN_CIPHER_SUITE_TKIP,
4969 WLAN_CIPHER_SUITE_CCMP,
4970 WL1271_CIPHER_SUITE_GEM,
4971 };
4972
4973 /* The tx descriptor buffer and the TKIP space. */
4974 wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP +
4975 sizeof(struct wl1271_tx_hw_descr);
4976
4977 /* unit us */
4978 /* FIXME: find a proper value */
4979 wl->hw->channel_change_time = 10000;
4980 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
4981
4982 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
4983 IEEE80211_HW_SUPPORTS_PS |
4984 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
4985 IEEE80211_HW_SUPPORTS_UAPSD |
4986 IEEE80211_HW_HAS_RATE_CONTROL |
4987 IEEE80211_HW_CONNECTION_MONITOR |
4988 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
4989 IEEE80211_HW_SPECTRUM_MGMT |
4990 IEEE80211_HW_AP_LINK_PS |
4991 IEEE80211_HW_AMPDU_AGGREGATION |
4992 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
4993 IEEE80211_HW_SCAN_WHILE_IDLE;
4994
4995 wl->hw->wiphy->cipher_suites = cipher_suites;
4996 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
4997
4998 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
4999 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5000 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5001 wl->hw->wiphy->max_scan_ssids = 1;
5002 wl->hw->wiphy->max_sched_scan_ssids = 16;
5003 wl->hw->wiphy->max_match_sets = 16;
5004 /*
5005 * Maximum length of elements in scanning probe request templates
5006 * should be the maximum length possible for a template, without
5007 * the IEEE80211 header of the template
5008 */
5009 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5010 sizeof(struct ieee80211_header);
5011
5012 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5013 sizeof(struct ieee80211_header);
5014
5015 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5016 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5017
5018 /* make sure all our channels fit in the scanned_ch bitmask */
5019 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5020 ARRAY_SIZE(wl1271_channels_5ghz) >
5021 WL1271_MAX_CHANNELS);
5022 /*
5023 * We keep local copies of the band structs because we need to
5024 * modify them on a per-device basis.
5025 */
5026 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5027 sizeof(wl1271_band_2ghz));
5028 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap,
5029 sizeof(wl->ht_cap));
5030 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5031 sizeof(wl1271_band_5ghz));
5032 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap,
5033 sizeof(wl->ht_cap));
5034
5035 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5036 &wl->bands[IEEE80211_BAND_2GHZ];
5037 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5038 &wl->bands[IEEE80211_BAND_5GHZ];
5039
5040 wl->hw->queues = 4;
5041 wl->hw->max_rates = 1;
5042
5043 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5044
5045 /* the FW answers probe-requests in AP-mode */
5046 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5047 wl->hw->wiphy->probe_resp_offload =
5048 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5049 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5050 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5051
5052 SET_IEEE80211_DEV(wl->hw, wl->dev);
5053
5054 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5055 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5056
5057 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5058
5059 return 0;
5060 }
5061
5062 #define WL1271_DEFAULT_CHANNEL 0
5063
5064 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5065 {
5066 struct ieee80211_hw *hw;
5067 struct wl1271 *wl;
5068 int i, j, ret;
5069 unsigned int order;
5070
5071 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5072
5073 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5074 if (!hw) {
5075 wl1271_error("could not alloc ieee80211_hw");
5076 ret = -ENOMEM;
5077 goto err_hw_alloc;
5078 }
5079
5080 wl = hw->priv;
5081 memset(wl, 0, sizeof(*wl));
5082
5083 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5084 if (!wl->priv) {
5085 wl1271_error("could not alloc wl priv");
5086 ret = -ENOMEM;
5087 goto err_priv_alloc;
5088 }
5089
5090 INIT_LIST_HEAD(&wl->wlvif_list);
5091
5092 wl->hw = hw;
5093
5094 for (i = 0; i < NUM_TX_QUEUES; i++)
5095 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5096 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5097
5098 skb_queue_head_init(&wl->deferred_rx_queue);
5099 skb_queue_head_init(&wl->deferred_tx_queue);
5100
5101 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5102 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5103 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5104 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5105 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5106 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5107 INIT_DELAYED_WORK(&wl->connection_loss_work,
5108 wl1271_connection_loss_work);
5109
5110 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5111 if (!wl->freezable_wq) {
5112 ret = -ENOMEM;
5113 goto err_hw;
5114 }
5115
5116 wl->channel = WL1271_DEFAULT_CHANNEL;
5117 wl->rx_counter = 0;
5118 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5119 wl->band = IEEE80211_BAND_2GHZ;
5120 wl->flags = 0;
5121 wl->sg_enabled = true;
5122 wl->hw_pg_ver = -1;
5123 wl->ap_ps_map = 0;
5124 wl->ap_fw_ps_map = 0;
5125 wl->quirks = 0;
5126 wl->platform_quirks = 0;
5127 wl->sched_scanning = false;
5128 wl->system_hlid = WL12XX_SYSTEM_HLID;
5129 wl->active_sta_count = 0;
5130 wl->fwlog_size = 0;
5131 init_waitqueue_head(&wl->fwlog_waitq);
5132
5133 /* The system link is always allocated */
5134 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5135
5136 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5137 for (i = 0; i < wl->num_tx_desc; i++)
5138 wl->tx_frames[i] = NULL;
5139
5140 spin_lock_init(&wl->wl_lock);
5141
5142 wl->state = WL1271_STATE_OFF;
5143 wl->fw_type = WL12XX_FW_TYPE_NONE;
5144 mutex_init(&wl->mutex);
5145
5146 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5147 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5148 if (!wl->aggr_buf) {
5149 ret = -ENOMEM;
5150 goto err_wq;
5151 }
5152
5153 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5154 if (!wl->dummy_packet) {
5155 ret = -ENOMEM;
5156 goto err_aggr;
5157 }
5158
5159 /* Allocate one page for the FW log */
5160 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5161 if (!wl->fwlog) {
5162 ret = -ENOMEM;
5163 goto err_dummy_packet;
5164 }
5165
5166 wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
5167 if (!wl->mbox) {
5168 ret = -ENOMEM;
5169 goto err_fwlog;
5170 }
5171
5172 return hw;
5173
5174 err_fwlog:
5175 free_page((unsigned long)wl->fwlog);
5176
5177 err_dummy_packet:
5178 dev_kfree_skb(wl->dummy_packet);
5179
5180 err_aggr:
5181 free_pages((unsigned long)wl->aggr_buf, order);
5182
5183 err_wq:
5184 destroy_workqueue(wl->freezable_wq);
5185
5186 err_hw:
5187 wl1271_debugfs_exit(wl);
5188 kfree(wl->priv);
5189
5190 err_priv_alloc:
5191 ieee80211_free_hw(hw);
5192
5193 err_hw_alloc:
5194
5195 return ERR_PTR(ret);
5196 }
5197 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5198
5199 int wlcore_free_hw(struct wl1271 *wl)
5200 {
5201 /* Unblock any fwlog readers */
5202 mutex_lock(&wl->mutex);
5203 wl->fwlog_size = -1;
5204 wake_up_interruptible_all(&wl->fwlog_waitq);
5205 mutex_unlock(&wl->mutex);
5206
5207 device_remove_bin_file(wl->dev, &fwlog_attr);
5208
5209 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5210
5211 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5212 free_page((unsigned long)wl->fwlog);
5213 dev_kfree_skb(wl->dummy_packet);
5214 free_pages((unsigned long)wl->aggr_buf,
5215 get_order(WL1271_AGGR_BUFFER_SIZE));
5216
5217 wl1271_debugfs_exit(wl);
5218
5219 vfree(wl->fw);
5220 wl->fw = NULL;
5221 wl->fw_type = WL12XX_FW_TYPE_NONE;
5222 kfree(wl->nvs);
5223 wl->nvs = NULL;
5224
5225 kfree(wl->fw_status);
5226 kfree(wl->tx_res_if);
5227 destroy_workqueue(wl->freezable_wq);
5228
5229 kfree(wl->priv);
5230 ieee80211_free_hw(wl->hw);
5231
5232 return 0;
5233 }
5234 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5235
5236 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5237 {
5238 struct wl1271 *wl = cookie;
5239 unsigned long flags;
5240
5241 wl1271_debug(DEBUG_IRQ, "IRQ");
5242
5243 /* complete the ELP completion */
5244 spin_lock_irqsave(&wl->wl_lock, flags);
5245 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5246 if (wl->elp_compl) {
5247 complete(wl->elp_compl);
5248 wl->elp_compl = NULL;
5249 }
5250
5251 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5252 /* don't enqueue a work right now. mark it as pending */
5253 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5254 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5255 disable_irq_nosync(wl->irq);
5256 pm_wakeup_event(wl->dev, 0);
5257 spin_unlock_irqrestore(&wl->wl_lock, flags);
5258 return IRQ_HANDLED;
5259 }
5260 spin_unlock_irqrestore(&wl->wl_lock, flags);
5261
5262 return IRQ_WAKE_THREAD;
5263 }
5264
5265 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5266 {
5267 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5268 unsigned long irqflags;
5269 int ret;
5270
5271 if (!wl->ops || !wl->ptable) {
5272 ret = -EINVAL;
5273 goto out_free_hw;
5274 }
5275
5276 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5277
5278 /* adjust some runtime configuration parameters */
5279 wlcore_adjust_conf(wl);
5280
5281 wl->irq = platform_get_irq(pdev, 0);
5282 wl->ref_clock = pdata->board_ref_clock;
5283 wl->tcxo_clock = pdata->board_tcxo_clock;
5284 wl->platform_quirks = pdata->platform_quirks;
5285 wl->set_power = pdata->set_power;
5286 wl->dev = &pdev->dev;
5287 wl->if_ops = pdata->ops;
5288
5289 platform_set_drvdata(pdev, wl);
5290
5291 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5292 irqflags = IRQF_TRIGGER_RISING;
5293 else
5294 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5295
5296 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
5297 irqflags,
5298 pdev->name, wl);
5299 if (ret < 0) {
5300 wl1271_error("request_irq() failed: %d", ret);
5301 goto out_free_hw;
5302 }
5303
5304 ret = enable_irq_wake(wl->irq);
5305 if (!ret) {
5306 wl->irq_wake_enabled = true;
5307 device_init_wakeup(wl->dev, 1);
5308 if (pdata->pwr_in_suspend) {
5309 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5310 wl->hw->wiphy->wowlan.n_patterns =
5311 WL1271_MAX_RX_FILTERS;
5312 wl->hw->wiphy->wowlan.pattern_min_len = 1;
5313 wl->hw->wiphy->wowlan.pattern_max_len =
5314 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5315 }
5316 }
5317 disable_irq(wl->irq);
5318
5319 ret = wl1271_init_ieee80211(wl);
5320 if (ret)
5321 goto out_irq;
5322
5323 ret = wl1271_register_hw(wl);
5324 if (ret)
5325 goto out_irq;
5326
5327 /* Create sysfs file to control bt coex state */
5328 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5329 if (ret < 0) {
5330 wl1271_error("failed to create sysfs file bt_coex_state");
5331 goto out_irq;
5332 }
5333
5334 /* Create sysfs file to get HW PG version */
5335 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5336 if (ret < 0) {
5337 wl1271_error("failed to create sysfs file hw_pg_ver");
5338 goto out_bt_coex_state;
5339 }
5340
5341 /* Create sysfs file for the FW log */
5342 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5343 if (ret < 0) {
5344 wl1271_error("failed to create sysfs file fwlog");
5345 goto out_hw_pg_ver;
5346 }
5347
5348 goto out;
5349
5350 out_hw_pg_ver:
5351 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5352
5353 out_bt_coex_state:
5354 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5355
5356 out_irq:
5357 free_irq(wl->irq, wl);
5358
5359 out_free_hw:
5360 wlcore_free_hw(wl);
5361
5362 out:
5363 return ret;
5364 }
5365 EXPORT_SYMBOL_GPL(wlcore_probe);
5366
5367 int __devexit wlcore_remove(struct platform_device *pdev)
5368 {
5369 struct wl1271 *wl = platform_get_drvdata(pdev);
5370
5371 if (wl->irq_wake_enabled) {
5372 device_init_wakeup(wl->dev, 0);
5373 disable_irq_wake(wl->irq);
5374 }
5375 wl1271_unregister_hw(wl);
5376 free_irq(wl->irq, wl);
5377 wlcore_free_hw(wl);
5378
5379 return 0;
5380 }
5381 EXPORT_SYMBOL_GPL(wlcore_remove);
5382
5383 u32 wl12xx_debug_level = DEBUG_NONE;
5384 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
5385 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
5386 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
5387
5388 module_param_named(fwlog, fwlog_param, charp, 0);
5389 MODULE_PARM_DESC(fwlog,
5390 "FW logger options: continuous, ondemand, dbgpins or disable");
5391
5392 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5393 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5394
5395 module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5396 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5397
5398 MODULE_LICENSE("GPL");
5399 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5400 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
This page took 0.330762 seconds and 6 git commands to generate.