wlcore: support peer MIMO rates
[deliverable/linux.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wl1271
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37
38 #include "wlcore.h"
39 #include "debug.h"
40 #include "wl12xx_80211.h"
41 #include "io.h"
42 #include "event.h"
43 #include "tx.h"
44 #include "rx.h"
45 #include "ps.h"
46 #include "init.h"
47 #include "debugfs.h"
48 #include "cmd.h"
49 #include "boot.h"
50 #include "testmode.h"
51 #include "scan.h"
52 #include "hw_ops.h"
53
54 #define WL1271_BOOT_RETRIES 3
55
56 #define WL1271_BOOT_RETRIES 3
57
58 static char *fwlog_param;
59 static bool bug_on_recovery;
60 static bool no_recovery;
61
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wl1271_op_stop(struct ieee80211_hw *hw);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
70 {
71 int ret;
72
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
74 return -EINVAL;
75
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77 return 0;
78
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80 return 0;
81
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
83 if (ret < 0)
84 return ret;
85
86 wl12xx_croc(wl, wlvif->role_id);
87
88 wl1271_info("Association completed.");
89 return 0;
90 }
91
92 static int wl1271_reg_notify(struct wiphy *wiphy,
93 struct regulatory_request *request)
94 {
95 struct ieee80211_supported_band *band;
96 struct ieee80211_channel *ch;
97 int i;
98
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
103 continue;
104
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
108
109 }
110
111 return 0;
112 }
113
114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
115 bool enable)
116 {
117 int ret = 0;
118
119 /* we should hold wl->mutex */
120 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
121 if (ret < 0)
122 goto out;
123
124 if (enable)
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
126 else
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
128 out:
129 return ret;
130 }
131
132 /*
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
135 */
136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
137 {
138 int ret = 0;
139 int period = wl->conf.rx_streaming.interval;
140
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
143 goto out;
144
145 /* reconfigure/disable according to new streaming_period */
146 if (period &&
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148 (wl->conf.rx_streaming.always ||
149 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
151 else {
152 ret = wl1271_set_rx_streaming(wl, wlvif, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif->rx_streaming_timer);
155 }
156 out:
157 return ret;
158 }
159
160 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
161 {
162 int ret;
163 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
164 rx_streaming_enable_work);
165 struct wl1271 *wl = wlvif->wl;
166
167 mutex_lock(&wl->mutex);
168
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171 (!wl->conf.rx_streaming.always &&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
173 goto out;
174
175 if (!wl->conf.rx_streaming.interval)
176 goto out;
177
178 ret = wl1271_ps_elp_wakeup(wl);
179 if (ret < 0)
180 goto out;
181
182 ret = wl1271_set_rx_streaming(wl, wlvif, true);
183 if (ret < 0)
184 goto out_sleep;
185
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif->rx_streaming_timer,
188 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
189
190 out_sleep:
191 wl1271_ps_elp_sleep(wl);
192 out:
193 mutex_unlock(&wl->mutex);
194 }
195
196 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
197 {
198 int ret;
199 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
200 rx_streaming_disable_work);
201 struct wl1271 *wl = wlvif->wl;
202
203 mutex_lock(&wl->mutex);
204
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
206 goto out;
207
208 ret = wl1271_ps_elp_wakeup(wl);
209 if (ret < 0)
210 goto out;
211
212 ret = wl1271_set_rx_streaming(wl, wlvif, false);
213 if (ret)
214 goto out_sleep;
215
216 out_sleep:
217 wl1271_ps_elp_sleep(wl);
218 out:
219 mutex_unlock(&wl->mutex);
220 }
221
222 static void wl1271_rx_streaming_timer(unsigned long data)
223 {
224 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
225 struct wl1271 *wl = wlvif->wl;
226 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
227 }
228
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
231 {
232 /* if the watchdog is not armed, don't do anything */
233 if (wl->tx_allocated_blocks == 0)
234 return;
235
236 cancel_delayed_work(&wl->tx_watchdog_work);
237 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
238 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
239 }
240
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
242 {
243 struct delayed_work *dwork;
244 struct wl1271 *wl;
245
246 dwork = container_of(work, struct delayed_work, work);
247 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
248
249 mutex_lock(&wl->mutex);
250
251 if (unlikely(wl->state == WL1271_STATE_OFF))
252 goto out;
253
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl->tx_allocated_blocks == 0))
256 goto out;
257
258 /*
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
261 */
262 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 wl->conf.tx.tx_watchdog_timeout);
265 wl12xx_rearm_tx_watchdog_locked(wl);
266 goto out;
267 }
268
269 /*
270 * if a scan is in progress, we might not have any Tx for a long
271 * time
272 */
273 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 wl->conf.tx.tx_watchdog_timeout);
276 wl12xx_rearm_tx_watchdog_locked(wl);
277 goto out;
278 }
279
280 /*
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
285 */
286 if (wl->active_sta_count) {
287 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
288 " %d stations",
289 wl->conf.tx.tx_watchdog_timeout,
290 wl->active_sta_count);
291 wl12xx_rearm_tx_watchdog_locked(wl);
292 goto out;
293 }
294
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_queue_recovery_work(wl);
298
299 out:
300 mutex_unlock(&wl->mutex);
301 }
302
303 static void wlcore_adjust_conf(struct wl1271 *wl)
304 {
305 /* Adjust settings according to optional module parameters */
306 if (fwlog_param) {
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 } else {
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 }
320 }
321 }
322
323 static int wl1271_plt_init(struct wl1271 *wl)
324 {
325 int ret;
326
327 ret = wl->ops->hw_init(wl);
328 if (ret < 0)
329 return ret;
330
331 ret = wl1271_acx_init_mem_config(wl);
332 if (ret < 0)
333 return ret;
334
335 ret = wl12xx_acx_mem_cfg(wl);
336 if (ret < 0)
337 goto out_free_memmap;
338
339 /* Enable data path */
340 ret = wl1271_cmd_data_path(wl, 1);
341 if (ret < 0)
342 goto out_free_memmap;
343
344 /* Configure for CAM power saving (ie. always active) */
345 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
346 if (ret < 0)
347 goto out_free_memmap;
348
349 /* configure PM */
350 ret = wl1271_acx_pm_config(wl);
351 if (ret < 0)
352 goto out_free_memmap;
353
354 return 0;
355
356 out_free_memmap:
357 kfree(wl->target_mem_map);
358 wl->target_mem_map = NULL;
359
360 return ret;
361 }
362
363 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
364 struct wl12xx_vif *wlvif,
365 u8 hlid, u8 tx_pkts)
366 {
367 bool fw_ps, single_sta;
368
369 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
370 single_sta = (wl->active_sta_count == 1);
371
372 /*
373 * Wake up from high level PS if the STA is asleep with too little
374 * packets in FW or if the STA is awake.
375 */
376 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
377 wl12xx_ps_link_end(wl, wlvif, hlid);
378
379 /*
380 * Start high-level PS if the STA is asleep with enough blocks in FW.
381 * Make an exception if this is the only connected station. In this
382 * case FW-memory congestion is not a problem.
383 */
384 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
385 wl12xx_ps_link_start(wl, wlvif, hlid, true);
386 }
387
388 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
389 struct wl12xx_vif *wlvif,
390 struct wl_fw_status *status)
391 {
392 struct wl1271_link *lnk;
393 u32 cur_fw_ps_map;
394 u8 hlid, cnt;
395
396 /* TODO: also use link_fast_bitmap here */
397
398 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
399 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
400 wl1271_debug(DEBUG_PSM,
401 "link ps prev 0x%x cur 0x%x changed 0x%x",
402 wl->ap_fw_ps_map, cur_fw_ps_map,
403 wl->ap_fw_ps_map ^ cur_fw_ps_map);
404
405 wl->ap_fw_ps_map = cur_fw_ps_map;
406 }
407
408 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
409 lnk = &wl->links[hlid];
410 cnt = status->counters.tx_lnk_free_pkts[hlid] -
411 lnk->prev_freed_pkts;
412
413 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
414 lnk->allocated_pkts -= cnt;
415
416 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
417 lnk->allocated_pkts);
418 }
419 }
420
421 static void wl12xx_fw_status(struct wl1271 *wl,
422 struct wl_fw_status *status)
423 {
424 struct wl12xx_vif *wlvif;
425 struct timespec ts;
426 u32 old_tx_blk_count = wl->tx_blocks_available;
427 int avail, freed_blocks;
428 int i;
429 size_t status_len;
430
431 status_len = sizeof(*status) + wl->fw_status_priv_len;
432
433 wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status,
434 status_len, false);
435
436 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
437 "drv_rx_counter = %d, tx_results_counter = %d)",
438 status->intr,
439 status->fw_rx_counter,
440 status->drv_rx_counter,
441 status->tx_results_counter);
442
443 for (i = 0; i < NUM_TX_QUEUES; i++) {
444 /* prevent wrap-around in freed-packets counter */
445 wl->tx_allocated_pkts[i] -=
446 (status->counters.tx_released_pkts[i] -
447 wl->tx_pkts_freed[i]) & 0xff;
448
449 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
450 }
451
452 /* prevent wrap-around in total blocks counter */
453 if (likely(wl->tx_blocks_freed <=
454 le32_to_cpu(status->total_released_blks)))
455 freed_blocks = le32_to_cpu(status->total_released_blks) -
456 wl->tx_blocks_freed;
457 else
458 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
459 le32_to_cpu(status->total_released_blks);
460
461 wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks);
462
463 wl->tx_allocated_blocks -= freed_blocks;
464
465 /*
466 * If the FW freed some blocks:
467 * If we still have allocated blocks - re-arm the timer, Tx is
468 * not stuck. Otherwise, cancel the timer (no Tx currently).
469 */
470 if (freed_blocks) {
471 if (wl->tx_allocated_blocks)
472 wl12xx_rearm_tx_watchdog_locked(wl);
473 else
474 cancel_delayed_work(&wl->tx_watchdog_work);
475 }
476
477 avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
478
479 /*
480 * The FW might change the total number of TX memblocks before
481 * we get a notification about blocks being released. Thus, the
482 * available blocks calculation might yield a temporary result
483 * which is lower than the actual available blocks. Keeping in
484 * mind that only blocks that were allocated can be moved from
485 * TX to RX, tx_blocks_available should never decrease here.
486 */
487 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
488 avail);
489
490 /* if more blocks are available now, tx work can be scheduled */
491 if (wl->tx_blocks_available > old_tx_blk_count)
492 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
493
494 /* for AP update num of allocated TX blocks per link and ps status */
495 wl12xx_for_each_wlvif_ap(wl, wlvif) {
496 wl12xx_irq_update_links_status(wl, wlvif, status);
497 }
498
499 /* update the host-chipset time offset */
500 getnstimeofday(&ts);
501 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
502 (s64)le32_to_cpu(status->fw_localtime);
503 }
504
505 static void wl1271_flush_deferred_work(struct wl1271 *wl)
506 {
507 struct sk_buff *skb;
508
509 /* Pass all received frames to the network stack */
510 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
511 ieee80211_rx_ni(wl->hw, skb);
512
513 /* Return sent skbs to the network stack */
514 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
515 ieee80211_tx_status_ni(wl->hw, skb);
516 }
517
518 static void wl1271_netstack_work(struct work_struct *work)
519 {
520 struct wl1271 *wl =
521 container_of(work, struct wl1271, netstack_work);
522
523 do {
524 wl1271_flush_deferred_work(wl);
525 } while (skb_queue_len(&wl->deferred_rx_queue));
526 }
527
528 #define WL1271_IRQ_MAX_LOOPS 256
529
530 static irqreturn_t wl1271_irq(int irq, void *cookie)
531 {
532 int ret;
533 u32 intr;
534 int loopcount = WL1271_IRQ_MAX_LOOPS;
535 struct wl1271 *wl = (struct wl1271 *)cookie;
536 bool done = false;
537 unsigned int defer_count;
538 unsigned long flags;
539
540 /* TX might be handled here, avoid redundant work */
541 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
542 cancel_work_sync(&wl->tx_work);
543
544 /*
545 * In case edge triggered interrupt must be used, we cannot iterate
546 * more than once without introducing race conditions with the hardirq.
547 */
548 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
549 loopcount = 1;
550
551 mutex_lock(&wl->mutex);
552
553 wl1271_debug(DEBUG_IRQ, "IRQ work");
554
555 if (unlikely(wl->state == WL1271_STATE_OFF))
556 goto out;
557
558 ret = wl1271_ps_elp_wakeup(wl);
559 if (ret < 0)
560 goto out;
561
562 while (!done && loopcount--) {
563 /*
564 * In order to avoid a race with the hardirq, clear the flag
565 * before acknowledging the chip. Since the mutex is held,
566 * wl1271_ps_elp_wakeup cannot be called concurrently.
567 */
568 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
569 smp_mb__after_clear_bit();
570
571 wl12xx_fw_status(wl, wl->fw_status);
572
573 wlcore_hw_tx_immediate_compl(wl);
574
575 intr = le32_to_cpu(wl->fw_status->intr);
576 intr &= WL1271_INTR_MASK;
577 if (!intr) {
578 done = true;
579 continue;
580 }
581
582 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
583 wl1271_error("watchdog interrupt received! "
584 "starting recovery.");
585 wl12xx_queue_recovery_work(wl);
586
587 /* restarting the chip. ignore any other interrupt. */
588 goto out;
589 }
590
591 if (likely(intr & WL1271_ACX_INTR_DATA)) {
592 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
593
594 wl12xx_rx(wl, wl->fw_status);
595
596 /* Check if any tx blocks were freed */
597 spin_lock_irqsave(&wl->wl_lock, flags);
598 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
599 wl1271_tx_total_queue_count(wl) > 0) {
600 spin_unlock_irqrestore(&wl->wl_lock, flags);
601 /*
602 * In order to avoid starvation of the TX path,
603 * call the work function directly.
604 */
605 wl1271_tx_work_locked(wl);
606 } else {
607 spin_unlock_irqrestore(&wl->wl_lock, flags);
608 }
609
610 /* check for tx results */
611 wlcore_hw_tx_delayed_compl(wl);
612
613 /* Make sure the deferred queues don't get too long */
614 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
615 skb_queue_len(&wl->deferred_rx_queue);
616 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
617 wl1271_flush_deferred_work(wl);
618 }
619
620 if (intr & WL1271_ACX_INTR_EVENT_A) {
621 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 wl1271_event_handle(wl, 0);
623 }
624
625 if (intr & WL1271_ACX_INTR_EVENT_B) {
626 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
627 wl1271_event_handle(wl, 1);
628 }
629
630 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
631 wl1271_debug(DEBUG_IRQ,
632 "WL1271_ACX_INTR_INIT_COMPLETE");
633
634 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
635 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
636 }
637
638 wl1271_ps_elp_sleep(wl);
639
640 out:
641 spin_lock_irqsave(&wl->wl_lock, flags);
642 /* In case TX was not handled here, queue TX work */
643 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
644 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
645 wl1271_tx_total_queue_count(wl) > 0)
646 ieee80211_queue_work(wl->hw, &wl->tx_work);
647 spin_unlock_irqrestore(&wl->wl_lock, flags);
648
649 mutex_unlock(&wl->mutex);
650
651 return IRQ_HANDLED;
652 }
653
654 struct vif_counter_data {
655 u8 counter;
656
657 struct ieee80211_vif *cur_vif;
658 bool cur_vif_running;
659 };
660
661 static void wl12xx_vif_count_iter(void *data, u8 *mac,
662 struct ieee80211_vif *vif)
663 {
664 struct vif_counter_data *counter = data;
665
666 counter->counter++;
667 if (counter->cur_vif == vif)
668 counter->cur_vif_running = true;
669 }
670
671 /* caller must not hold wl->mutex, as it might deadlock */
672 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
673 struct ieee80211_vif *cur_vif,
674 struct vif_counter_data *data)
675 {
676 memset(data, 0, sizeof(*data));
677 data->cur_vif = cur_vif;
678
679 ieee80211_iterate_active_interfaces(hw,
680 wl12xx_vif_count_iter, data);
681 }
682
683 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
684 {
685 const struct firmware *fw;
686 const char *fw_name;
687 enum wl12xx_fw_type fw_type;
688 int ret;
689
690 if (plt) {
691 fw_type = WL12XX_FW_TYPE_PLT;
692 fw_name = wl->plt_fw_name;
693 } else {
694 /*
695 * we can't call wl12xx_get_vif_count() here because
696 * wl->mutex is taken, so use the cached last_vif_count value
697 */
698 if (wl->last_vif_count > 1) {
699 fw_type = WL12XX_FW_TYPE_MULTI;
700 fw_name = wl->mr_fw_name;
701 } else {
702 fw_type = WL12XX_FW_TYPE_NORMAL;
703 fw_name = wl->sr_fw_name;
704 }
705 }
706
707 if (wl->fw_type == fw_type)
708 return 0;
709
710 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
711
712 ret = request_firmware(&fw, fw_name, wl->dev);
713
714 if (ret < 0) {
715 wl1271_error("could not get firmware %s: %d", fw_name, ret);
716 return ret;
717 }
718
719 if (fw->size % 4) {
720 wl1271_error("firmware size is not multiple of 32 bits: %zu",
721 fw->size);
722 ret = -EILSEQ;
723 goto out;
724 }
725
726 vfree(wl->fw);
727 wl->fw_type = WL12XX_FW_TYPE_NONE;
728 wl->fw_len = fw->size;
729 wl->fw = vmalloc(wl->fw_len);
730
731 if (!wl->fw) {
732 wl1271_error("could not allocate memory for the firmware");
733 ret = -ENOMEM;
734 goto out;
735 }
736
737 memcpy(wl->fw, fw->data, wl->fw_len);
738 ret = 0;
739 wl->fw_type = fw_type;
740 out:
741 release_firmware(fw);
742
743 return ret;
744 }
745
746 static int wl1271_fetch_nvs(struct wl1271 *wl)
747 {
748 const struct firmware *fw;
749 int ret;
750
751 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
752
753 if (ret < 0) {
754 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
755 ret);
756 return ret;
757 }
758
759 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
760
761 if (!wl->nvs) {
762 wl1271_error("could not allocate memory for the nvs file");
763 ret = -ENOMEM;
764 goto out;
765 }
766
767 wl->nvs_len = fw->size;
768
769 out:
770 release_firmware(fw);
771
772 return ret;
773 }
774
775 void wl12xx_queue_recovery_work(struct wl1271 *wl)
776 {
777 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
778 ieee80211_queue_work(wl->hw, &wl->recovery_work);
779 }
780
781 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
782 {
783 size_t len = 0;
784
785 /* The FW log is a length-value list, find where the log end */
786 while (len < maxlen) {
787 if (memblock[len] == 0)
788 break;
789 if (len + memblock[len] + 1 > maxlen)
790 break;
791 len += memblock[len] + 1;
792 }
793
794 /* Make sure we have enough room */
795 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
796
797 /* Fill the FW log file, consumed by the sysfs fwlog entry */
798 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
799 wl->fwlog_size += len;
800
801 return len;
802 }
803
804 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
805 {
806 u32 addr;
807 u32 first_addr;
808 u8 *block;
809
810 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
811 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
812 (wl->conf.fwlog.mem_blocks == 0))
813 return;
814
815 wl1271_info("Reading FW panic log");
816
817 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
818 if (!block)
819 return;
820
821 /*
822 * Make sure the chip is awake and the logger isn't active.
823 * This might fail if the firmware hanged.
824 */
825 if (!wl1271_ps_elp_wakeup(wl))
826 wl12xx_cmd_stop_fwlog(wl);
827
828 /* Read the first memory block address */
829 wl12xx_fw_status(wl, wl->fw_status);
830 first_addr = le32_to_cpu(wl->fw_status->log_start_addr);
831 if (!first_addr)
832 goto out;
833
834 /* Traverse the memory blocks linked list */
835 addr = first_addr;
836 do {
837 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
838 wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
839 false);
840
841 /*
842 * Memory blocks are linked to one another. The first 4 bytes
843 * of each memory block hold the hardware address of the next
844 * one. The last memory block points to the first one.
845 */
846 addr = le32_to_cpup((__le32 *)block);
847 if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
848 WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
849 break;
850 } while (addr && (addr != first_addr));
851
852 wake_up_interruptible(&wl->fwlog_waitq);
853
854 out:
855 kfree(block);
856 }
857
858 static void wl1271_recovery_work(struct work_struct *work)
859 {
860 struct wl1271 *wl =
861 container_of(work, struct wl1271, recovery_work);
862 struct wl12xx_vif *wlvif;
863 struct ieee80211_vif *vif;
864
865 mutex_lock(&wl->mutex);
866
867 if (wl->state != WL1271_STATE_ON || wl->plt)
868 goto out_unlock;
869
870 /* Avoid a recursive recovery */
871 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
872
873 wl12xx_read_fwlog_panic(wl);
874
875 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
876 wl->chip.fw_ver_str,
877 wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
878
879 BUG_ON(bug_on_recovery &&
880 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
881
882 if (no_recovery) {
883 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
884 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
885 goto out_unlock;
886 }
887
888 BUG_ON(bug_on_recovery);
889
890 /*
891 * Advance security sequence number to overcome potential progress
892 * in the firmware during recovery. This doens't hurt if the network is
893 * not encrypted.
894 */
895 wl12xx_for_each_wlvif(wl, wlvif) {
896 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
897 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
898 wlvif->tx_security_seq +=
899 WL1271_TX_SQN_POST_RECOVERY_PADDING;
900 }
901
902 /* Prevent spurious TX during FW restart */
903 ieee80211_stop_queues(wl->hw);
904
905 if (wl->sched_scanning) {
906 ieee80211_sched_scan_stopped(wl->hw);
907 wl->sched_scanning = false;
908 }
909
910 /* reboot the chipset */
911 while (!list_empty(&wl->wlvif_list)) {
912 wlvif = list_first_entry(&wl->wlvif_list,
913 struct wl12xx_vif, list);
914 vif = wl12xx_wlvif_to_vif(wlvif);
915 __wl1271_op_remove_interface(wl, vif, false);
916 }
917 mutex_unlock(&wl->mutex);
918 wl1271_op_stop(wl->hw);
919
920 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
921
922 ieee80211_restart_hw(wl->hw);
923
924 /*
925 * Its safe to enable TX now - the queues are stopped after a request
926 * to restart the HW.
927 */
928 ieee80211_wake_queues(wl->hw);
929 return;
930 out_unlock:
931 mutex_unlock(&wl->mutex);
932 }
933
934 static void wl1271_fw_wakeup(struct wl1271 *wl)
935 {
936 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
937 }
938
939 static int wl1271_setup(struct wl1271 *wl)
940 {
941 wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL);
942 if (!wl->fw_status)
943 return -ENOMEM;
944
945 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
946 if (!wl->tx_res_if) {
947 kfree(wl->fw_status);
948 return -ENOMEM;
949 }
950
951 return 0;
952 }
953
954 static int wl12xx_set_power_on(struct wl1271 *wl)
955 {
956 int ret;
957
958 msleep(WL1271_PRE_POWER_ON_SLEEP);
959 ret = wl1271_power_on(wl);
960 if (ret < 0)
961 goto out;
962 msleep(WL1271_POWER_ON_SLEEP);
963 wl1271_io_reset(wl);
964 wl1271_io_init(wl);
965
966 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
967
968 /* ELP module wake up */
969 wl1271_fw_wakeup(wl);
970
971 out:
972 return ret;
973 }
974
975 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
976 {
977 int ret = 0;
978
979 ret = wl12xx_set_power_on(wl);
980 if (ret < 0)
981 goto out;
982
983 /*
984 * For wl127x based devices we could use the default block
985 * size (512 bytes), but due to a bug in the sdio driver, we
986 * need to set it explicitly after the chip is powered on. To
987 * simplify the code and since the performance impact is
988 * negligible, we use the same block size for all different
989 * chip types.
990 */
991 if (wl1271_set_block_size(wl))
992 wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
993
994 ret = wl->ops->identify_chip(wl);
995 if (ret < 0)
996 goto out;
997
998 /* TODO: make sure the lower driver has set things up correctly */
999
1000 ret = wl1271_setup(wl);
1001 if (ret < 0)
1002 goto out;
1003
1004 ret = wl12xx_fetch_firmware(wl, plt);
1005 if (ret < 0)
1006 goto out;
1007
1008 /* No NVS from netlink, try to get it from the filesystem */
1009 if (wl->nvs == NULL) {
1010 ret = wl1271_fetch_nvs(wl);
1011 if (ret < 0)
1012 goto out;
1013 }
1014
1015 out:
1016 return ret;
1017 }
1018
1019 int wl1271_plt_start(struct wl1271 *wl)
1020 {
1021 int retries = WL1271_BOOT_RETRIES;
1022 struct wiphy *wiphy = wl->hw->wiphy;
1023 int ret;
1024
1025 mutex_lock(&wl->mutex);
1026
1027 wl1271_notice("power up");
1028
1029 if (wl->state != WL1271_STATE_OFF) {
1030 wl1271_error("cannot go into PLT state because not "
1031 "in off state: %d", wl->state);
1032 ret = -EBUSY;
1033 goto out;
1034 }
1035
1036 while (retries) {
1037 retries--;
1038 ret = wl12xx_chip_wakeup(wl, true);
1039 if (ret < 0)
1040 goto power_off;
1041
1042 ret = wl->ops->boot(wl);
1043 if (ret < 0)
1044 goto power_off;
1045
1046 ret = wl1271_plt_init(wl);
1047 if (ret < 0)
1048 goto irq_disable;
1049
1050 wl->plt = true;
1051 wl->state = WL1271_STATE_ON;
1052 wl1271_notice("firmware booted in PLT mode (%s)",
1053 wl->chip.fw_ver_str);
1054
1055 /* update hw/fw version info in wiphy struct */
1056 wiphy->hw_version = wl->chip.id;
1057 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1058 sizeof(wiphy->fw_version));
1059
1060 goto out;
1061
1062 irq_disable:
1063 mutex_unlock(&wl->mutex);
1064 /* Unlocking the mutex in the middle of handling is
1065 inherently unsafe. In this case we deem it safe to do,
1066 because we need to let any possibly pending IRQ out of
1067 the system (and while we are WL1271_STATE_OFF the IRQ
1068 work function will not do anything.) Also, any other
1069 possible concurrent operations will fail due to the
1070 current state, hence the wl1271 struct should be safe. */
1071 wlcore_disable_interrupts(wl);
1072 wl1271_flush_deferred_work(wl);
1073 cancel_work_sync(&wl->netstack_work);
1074 mutex_lock(&wl->mutex);
1075 power_off:
1076 wl1271_power_off(wl);
1077 }
1078
1079 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1080 WL1271_BOOT_RETRIES);
1081 out:
1082 mutex_unlock(&wl->mutex);
1083
1084 return ret;
1085 }
1086
1087 int wl1271_plt_stop(struct wl1271 *wl)
1088 {
1089 int ret = 0;
1090
1091 wl1271_notice("power down");
1092
1093 /*
1094 * Interrupts must be disabled before setting the state to OFF.
1095 * Otherwise, the interrupt handler might be called and exit without
1096 * reading the interrupt status.
1097 */
1098 wlcore_disable_interrupts(wl);
1099 mutex_lock(&wl->mutex);
1100 if (!wl->plt) {
1101 mutex_unlock(&wl->mutex);
1102
1103 /*
1104 * This will not necessarily enable interrupts as interrupts
1105 * may have been disabled when op_stop was called. It will,
1106 * however, balance the above call to disable_interrupts().
1107 */
1108 wlcore_enable_interrupts(wl);
1109
1110 wl1271_error("cannot power down because not in PLT "
1111 "state: %d", wl->state);
1112 ret = -EBUSY;
1113 goto out;
1114 }
1115
1116 mutex_unlock(&wl->mutex);
1117
1118 wl1271_flush_deferred_work(wl);
1119 cancel_work_sync(&wl->netstack_work);
1120 cancel_work_sync(&wl->recovery_work);
1121 cancel_delayed_work_sync(&wl->elp_work);
1122 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1123 cancel_delayed_work_sync(&wl->connection_loss_work);
1124
1125 mutex_lock(&wl->mutex);
1126 wl1271_power_off(wl);
1127 wl->flags = 0;
1128 wl->state = WL1271_STATE_OFF;
1129 wl->plt = false;
1130 wl->rx_counter = 0;
1131 mutex_unlock(&wl->mutex);
1132
1133 out:
1134 return ret;
1135 }
1136
1137 static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1138 {
1139 struct wl1271 *wl = hw->priv;
1140 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1141 struct ieee80211_vif *vif = info->control.vif;
1142 struct wl12xx_vif *wlvif = NULL;
1143 unsigned long flags;
1144 int q, mapping;
1145 u8 hlid;
1146
1147 if (vif)
1148 wlvif = wl12xx_vif_to_data(vif);
1149
1150 mapping = skb_get_queue_mapping(skb);
1151 q = wl1271_tx_get_queue(mapping);
1152
1153 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
1154
1155 spin_lock_irqsave(&wl->wl_lock, flags);
1156
1157 /* queue the packet */
1158 if (hlid == WL12XX_INVALID_LINK_ID ||
1159 (wlvif && !test_bit(hlid, wlvif->links_map))) {
1160 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1161 ieee80211_free_txskb(hw, skb);
1162 goto out;
1163 }
1164
1165 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1166 hlid, q, skb->len);
1167 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1168
1169 wl->tx_queue_count[q]++;
1170
1171 /*
1172 * The workqueue is slow to process the tx_queue and we need stop
1173 * the queue here, otherwise the queue will get too long.
1174 */
1175 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
1176 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1177 ieee80211_stop_queue(wl->hw, mapping);
1178 set_bit(q, &wl->stopped_queues_map);
1179 }
1180
1181 /*
1182 * The chip specific setup must run before the first TX packet -
1183 * before that, the tx_work will not be initialized!
1184 */
1185
1186 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1187 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1188 ieee80211_queue_work(wl->hw, &wl->tx_work);
1189
1190 out:
1191 spin_unlock_irqrestore(&wl->wl_lock, flags);
1192 }
1193
1194 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1195 {
1196 unsigned long flags;
1197 int q;
1198
1199 /* no need to queue a new dummy packet if one is already pending */
1200 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1201 return 0;
1202
1203 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1204
1205 spin_lock_irqsave(&wl->wl_lock, flags);
1206 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1207 wl->tx_queue_count[q]++;
1208 spin_unlock_irqrestore(&wl->wl_lock, flags);
1209
1210 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1211 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1212 wl1271_tx_work_locked(wl);
1213
1214 /*
1215 * If the FW TX is busy, TX work will be scheduled by the threaded
1216 * interrupt handler function
1217 */
1218 return 0;
1219 }
1220
1221 /*
1222 * The size of the dummy packet should be at least 1400 bytes. However, in
1223 * order to minimize the number of bus transactions, aligning it to 512 bytes
1224 * boundaries could be beneficial, performance wise
1225 */
1226 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1227
1228 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1229 {
1230 struct sk_buff *skb;
1231 struct ieee80211_hdr_3addr *hdr;
1232 unsigned int dummy_packet_size;
1233
1234 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1235 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1236
1237 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1238 if (!skb) {
1239 wl1271_warning("Failed to allocate a dummy packet skb");
1240 return NULL;
1241 }
1242
1243 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1244
1245 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1246 memset(hdr, 0, sizeof(*hdr));
1247 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1248 IEEE80211_STYPE_NULLFUNC |
1249 IEEE80211_FCTL_TODS);
1250
1251 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1252
1253 /* Dummy packets require the TID to be management */
1254 skb->priority = WL1271_TID_MGMT;
1255
1256 /* Initialize all fields that might be used */
1257 skb_set_queue_mapping(skb, 0);
1258 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1259
1260 return skb;
1261 }
1262
1263
1264 #ifdef CONFIG_PM
1265 static int
1266 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1267 {
1268 int num_fields = 0, in_field = 0, fields_size = 0;
1269 int i, pattern_len = 0;
1270
1271 if (!p->mask) {
1272 wl1271_warning("No mask in WoWLAN pattern");
1273 return -EINVAL;
1274 }
1275
1276 /*
1277 * The pattern is broken up into segments of bytes at different offsets
1278 * that need to be checked by the FW filter. Each segment is called
1279 * a field in the FW API. We verify that the total number of fields
1280 * required for this pattern won't exceed FW limits (8)
1281 * as well as the total fields buffer won't exceed the FW limit.
1282 * Note that if there's a pattern which crosses Ethernet/IP header
1283 * boundary a new field is required.
1284 */
1285 for (i = 0; i < p->pattern_len; i++) {
1286 if (test_bit(i, (unsigned long *)p->mask)) {
1287 if (!in_field) {
1288 in_field = 1;
1289 pattern_len = 1;
1290 } else {
1291 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1292 num_fields++;
1293 fields_size += pattern_len +
1294 RX_FILTER_FIELD_OVERHEAD;
1295 pattern_len = 1;
1296 } else
1297 pattern_len++;
1298 }
1299 } else {
1300 if (in_field) {
1301 in_field = 0;
1302 fields_size += pattern_len +
1303 RX_FILTER_FIELD_OVERHEAD;
1304 num_fields++;
1305 }
1306 }
1307 }
1308
1309 if (in_field) {
1310 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1311 num_fields++;
1312 }
1313
1314 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1315 wl1271_warning("RX Filter too complex. Too many segments");
1316 return -EINVAL;
1317 }
1318
1319 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1320 wl1271_warning("RX filter pattern is too big");
1321 return -E2BIG;
1322 }
1323
1324 return 0;
1325 }
1326
1327 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1328 {
1329 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1330 }
1331
1332 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1333 {
1334 int i;
1335
1336 if (filter == NULL)
1337 return;
1338
1339 for (i = 0; i < filter->num_fields; i++)
1340 kfree(filter->fields[i].pattern);
1341
1342 kfree(filter);
1343 }
1344
1345 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1346 u16 offset, u8 flags,
1347 u8 *pattern, u8 len)
1348 {
1349 struct wl12xx_rx_filter_field *field;
1350
1351 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1352 wl1271_warning("Max fields per RX filter. can't alloc another");
1353 return -EINVAL;
1354 }
1355
1356 field = &filter->fields[filter->num_fields];
1357
1358 field->pattern = kzalloc(len, GFP_KERNEL);
1359 if (!field->pattern) {
1360 wl1271_warning("Failed to allocate RX filter pattern");
1361 return -ENOMEM;
1362 }
1363
1364 filter->num_fields++;
1365
1366 field->offset = cpu_to_le16(offset);
1367 field->flags = flags;
1368 field->len = len;
1369 memcpy(field->pattern, pattern, len);
1370
1371 return 0;
1372 }
1373
1374 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1375 {
1376 int i, fields_size = 0;
1377
1378 for (i = 0; i < filter->num_fields; i++)
1379 fields_size += filter->fields[i].len +
1380 sizeof(struct wl12xx_rx_filter_field) -
1381 sizeof(u8 *);
1382
1383 return fields_size;
1384 }
1385
1386 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1387 u8 *buf)
1388 {
1389 int i;
1390 struct wl12xx_rx_filter_field *field;
1391
1392 for (i = 0; i < filter->num_fields; i++) {
1393 field = (struct wl12xx_rx_filter_field *)buf;
1394
1395 field->offset = filter->fields[i].offset;
1396 field->flags = filter->fields[i].flags;
1397 field->len = filter->fields[i].len;
1398
1399 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1400 buf += sizeof(struct wl12xx_rx_filter_field) -
1401 sizeof(u8 *) + field->len;
1402 }
1403 }
1404
1405 /*
1406 * Allocates an RX filter returned through f
1407 * which needs to be freed using rx_filter_free()
1408 */
1409 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1410 struct cfg80211_wowlan_trig_pkt_pattern *p,
1411 struct wl12xx_rx_filter **f)
1412 {
1413 int i, j, ret = 0;
1414 struct wl12xx_rx_filter *filter;
1415 u16 offset;
1416 u8 flags, len;
1417
1418 filter = wl1271_rx_filter_alloc();
1419 if (!filter) {
1420 wl1271_warning("Failed to alloc rx filter");
1421 ret = -ENOMEM;
1422 goto err;
1423 }
1424
1425 i = 0;
1426 while (i < p->pattern_len) {
1427 if (!test_bit(i, (unsigned long *)p->mask)) {
1428 i++;
1429 continue;
1430 }
1431
1432 for (j = i; j < p->pattern_len; j++) {
1433 if (!test_bit(j, (unsigned long *)p->mask))
1434 break;
1435
1436 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1437 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1438 break;
1439 }
1440
1441 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1442 offset = i;
1443 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1444 } else {
1445 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1446 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1447 }
1448
1449 len = j - i;
1450
1451 ret = wl1271_rx_filter_alloc_field(filter,
1452 offset,
1453 flags,
1454 &p->pattern[i], len);
1455 if (ret)
1456 goto err;
1457
1458 i = j;
1459 }
1460
1461 filter->action = FILTER_SIGNAL;
1462
1463 *f = filter;
1464 return 0;
1465
1466 err:
1467 wl1271_rx_filter_free(filter);
1468 *f = NULL;
1469
1470 return ret;
1471 }
1472
1473 static int wl1271_configure_wowlan(struct wl1271 *wl,
1474 struct cfg80211_wowlan *wow)
1475 {
1476 int i, ret;
1477
1478 if (!wow || wow->any || !wow->n_patterns) {
1479 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1480 wl1271_rx_filter_clear_all(wl);
1481 return 0;
1482 }
1483
1484 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1485 return -EINVAL;
1486
1487 /* Validate all incoming patterns before clearing current FW state */
1488 for (i = 0; i < wow->n_patterns; i++) {
1489 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1490 if (ret) {
1491 wl1271_warning("Bad wowlan pattern %d", i);
1492 return ret;
1493 }
1494 }
1495
1496 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1497 wl1271_rx_filter_clear_all(wl);
1498
1499 /* Translate WoWLAN patterns into filters */
1500 for (i = 0; i < wow->n_patterns; i++) {
1501 struct cfg80211_wowlan_trig_pkt_pattern *p;
1502 struct wl12xx_rx_filter *filter = NULL;
1503
1504 p = &wow->patterns[i];
1505
1506 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1507 if (ret) {
1508 wl1271_warning("Failed to create an RX filter from "
1509 "wowlan pattern %d", i);
1510 goto out;
1511 }
1512
1513 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1514
1515 wl1271_rx_filter_free(filter);
1516 if (ret)
1517 goto out;
1518 }
1519
1520 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1521
1522 out:
1523 return ret;
1524 }
1525
1526 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1527 struct wl12xx_vif *wlvif,
1528 struct cfg80211_wowlan *wow)
1529 {
1530 int ret = 0;
1531
1532 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1533 goto out;
1534
1535 ret = wl1271_ps_elp_wakeup(wl);
1536 if (ret < 0)
1537 goto out;
1538
1539 wl1271_configure_wowlan(wl, wow);
1540 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1541 wl->conf.conn.suspend_wake_up_event,
1542 wl->conf.conn.suspend_listen_interval);
1543
1544 if (ret < 0)
1545 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1546
1547 wl1271_ps_elp_sleep(wl);
1548
1549 out:
1550 return ret;
1551
1552 }
1553
1554 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1555 struct wl12xx_vif *wlvif)
1556 {
1557 int ret = 0;
1558
1559 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1560 goto out;
1561
1562 ret = wl1271_ps_elp_wakeup(wl);
1563 if (ret < 0)
1564 goto out;
1565
1566 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1567
1568 wl1271_ps_elp_sleep(wl);
1569 out:
1570 return ret;
1571
1572 }
1573
1574 static int wl1271_configure_suspend(struct wl1271 *wl,
1575 struct wl12xx_vif *wlvif,
1576 struct cfg80211_wowlan *wow)
1577 {
1578 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1579 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1580 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1581 return wl1271_configure_suspend_ap(wl, wlvif);
1582 return 0;
1583 }
1584
1585 static void wl1271_configure_resume(struct wl1271 *wl,
1586 struct wl12xx_vif *wlvif)
1587 {
1588 int ret = 0;
1589 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1590 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1591
1592 if ((!is_ap) && (!is_sta))
1593 return;
1594
1595 ret = wl1271_ps_elp_wakeup(wl);
1596 if (ret < 0)
1597 return;
1598
1599 if (is_sta) {
1600 wl1271_configure_wowlan(wl, NULL);
1601
1602 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1603 wl->conf.conn.wake_up_event,
1604 wl->conf.conn.listen_interval);
1605
1606 if (ret < 0)
1607 wl1271_error("resume: wake up conditions failed: %d",
1608 ret);
1609
1610 } else if (is_ap) {
1611 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1612 }
1613
1614 wl1271_ps_elp_sleep(wl);
1615 }
1616
1617 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1618 struct cfg80211_wowlan *wow)
1619 {
1620 struct wl1271 *wl = hw->priv;
1621 struct wl12xx_vif *wlvif;
1622 int ret;
1623
1624 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1625 WARN_ON(!wow);
1626
1627 wl1271_tx_flush(wl);
1628
1629 mutex_lock(&wl->mutex);
1630 wl->wow_enabled = true;
1631 wl12xx_for_each_wlvif(wl, wlvif) {
1632 ret = wl1271_configure_suspend(wl, wlvif, wow);
1633 if (ret < 0) {
1634 mutex_unlock(&wl->mutex);
1635 wl1271_warning("couldn't prepare device to suspend");
1636 return ret;
1637 }
1638 }
1639 mutex_unlock(&wl->mutex);
1640 /* flush any remaining work */
1641 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1642
1643 /*
1644 * disable and re-enable interrupts in order to flush
1645 * the threaded_irq
1646 */
1647 wlcore_disable_interrupts(wl);
1648
1649 /*
1650 * set suspended flag to avoid triggering a new threaded_irq
1651 * work. no need for spinlock as interrupts are disabled.
1652 */
1653 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1654
1655 wlcore_enable_interrupts(wl);
1656 flush_work(&wl->tx_work);
1657 flush_delayed_work(&wl->elp_work);
1658
1659 return 0;
1660 }
1661
1662 static int wl1271_op_resume(struct ieee80211_hw *hw)
1663 {
1664 struct wl1271 *wl = hw->priv;
1665 struct wl12xx_vif *wlvif;
1666 unsigned long flags;
1667 bool run_irq_work = false;
1668
1669 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1670 wl->wow_enabled);
1671 WARN_ON(!wl->wow_enabled);
1672
1673 /*
1674 * re-enable irq_work enqueuing, and call irq_work directly if
1675 * there is a pending work.
1676 */
1677 spin_lock_irqsave(&wl->wl_lock, flags);
1678 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1679 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1680 run_irq_work = true;
1681 spin_unlock_irqrestore(&wl->wl_lock, flags);
1682
1683 if (run_irq_work) {
1684 wl1271_debug(DEBUG_MAC80211,
1685 "run postponed irq_work directly");
1686 wl1271_irq(0, wl);
1687 wlcore_enable_interrupts(wl);
1688 }
1689
1690 mutex_lock(&wl->mutex);
1691 wl12xx_for_each_wlvif(wl, wlvif) {
1692 wl1271_configure_resume(wl, wlvif);
1693 }
1694 wl->wow_enabled = false;
1695 mutex_unlock(&wl->mutex);
1696
1697 return 0;
1698 }
1699 #endif
1700
1701 static int wl1271_op_start(struct ieee80211_hw *hw)
1702 {
1703 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1704
1705 /*
1706 * We have to delay the booting of the hardware because
1707 * we need to know the local MAC address before downloading and
1708 * initializing the firmware. The MAC address cannot be changed
1709 * after boot, and without the proper MAC address, the firmware
1710 * will not function properly.
1711 *
1712 * The MAC address is first known when the corresponding interface
1713 * is added. That is where we will initialize the hardware.
1714 */
1715
1716 return 0;
1717 }
1718
1719 static void wl1271_op_stop(struct ieee80211_hw *hw)
1720 {
1721 struct wl1271 *wl = hw->priv;
1722 int i;
1723
1724 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1725
1726 /*
1727 * Interrupts must be disabled before setting the state to OFF.
1728 * Otherwise, the interrupt handler might be called and exit without
1729 * reading the interrupt status.
1730 */
1731 wlcore_disable_interrupts(wl);
1732 mutex_lock(&wl->mutex);
1733 if (wl->state == WL1271_STATE_OFF) {
1734 mutex_unlock(&wl->mutex);
1735
1736 /*
1737 * This will not necessarily enable interrupts as interrupts
1738 * may have been disabled when op_stop was called. It will,
1739 * however, balance the above call to disable_interrupts().
1740 */
1741 wlcore_enable_interrupts(wl);
1742 return;
1743 }
1744
1745 /*
1746 * this must be before the cancel_work calls below, so that the work
1747 * functions don't perform further work.
1748 */
1749 wl->state = WL1271_STATE_OFF;
1750 mutex_unlock(&wl->mutex);
1751
1752 wl1271_flush_deferred_work(wl);
1753 cancel_delayed_work_sync(&wl->scan_complete_work);
1754 cancel_work_sync(&wl->netstack_work);
1755 cancel_work_sync(&wl->tx_work);
1756 cancel_delayed_work_sync(&wl->elp_work);
1757 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1758 cancel_delayed_work_sync(&wl->connection_loss_work);
1759
1760 /* let's notify MAC80211 about the remaining pending TX frames */
1761 wl12xx_tx_reset(wl, true);
1762 mutex_lock(&wl->mutex);
1763
1764 wl1271_power_off(wl);
1765
1766 wl->band = IEEE80211_BAND_2GHZ;
1767
1768 wl->rx_counter = 0;
1769 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1770 wl->channel_type = NL80211_CHAN_NO_HT;
1771 wl->tx_blocks_available = 0;
1772 wl->tx_allocated_blocks = 0;
1773 wl->tx_results_count = 0;
1774 wl->tx_packets_count = 0;
1775 wl->time_offset = 0;
1776 wl->ap_fw_ps_map = 0;
1777 wl->ap_ps_map = 0;
1778 wl->sched_scanning = false;
1779 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1780 memset(wl->links_map, 0, sizeof(wl->links_map));
1781 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1782 wl->active_sta_count = 0;
1783
1784 /* The system link is always allocated */
1785 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1786
1787 /*
1788 * this is performed after the cancel_work calls and the associated
1789 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1790 * get executed before all these vars have been reset.
1791 */
1792 wl->flags = 0;
1793
1794 wl->tx_blocks_freed = 0;
1795
1796 for (i = 0; i < NUM_TX_QUEUES; i++) {
1797 wl->tx_pkts_freed[i] = 0;
1798 wl->tx_allocated_pkts[i] = 0;
1799 }
1800
1801 wl1271_debugfs_reset(wl);
1802
1803 kfree(wl->fw_status);
1804 wl->fw_status = NULL;
1805 kfree(wl->tx_res_if);
1806 wl->tx_res_if = NULL;
1807 kfree(wl->target_mem_map);
1808 wl->target_mem_map = NULL;
1809
1810 mutex_unlock(&wl->mutex);
1811 }
1812
1813 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1814 {
1815 u8 policy = find_first_zero_bit(wl->rate_policies_map,
1816 WL12XX_MAX_RATE_POLICIES);
1817 if (policy >= WL12XX_MAX_RATE_POLICIES)
1818 return -EBUSY;
1819
1820 __set_bit(policy, wl->rate_policies_map);
1821 *idx = policy;
1822 return 0;
1823 }
1824
1825 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1826 {
1827 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1828 return;
1829
1830 __clear_bit(*idx, wl->rate_policies_map);
1831 *idx = WL12XX_MAX_RATE_POLICIES;
1832 }
1833
1834 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1835 {
1836 switch (wlvif->bss_type) {
1837 case BSS_TYPE_AP_BSS:
1838 if (wlvif->p2p)
1839 return WL1271_ROLE_P2P_GO;
1840 else
1841 return WL1271_ROLE_AP;
1842
1843 case BSS_TYPE_STA_BSS:
1844 if (wlvif->p2p)
1845 return WL1271_ROLE_P2P_CL;
1846 else
1847 return WL1271_ROLE_STA;
1848
1849 case BSS_TYPE_IBSS:
1850 return WL1271_ROLE_IBSS;
1851
1852 default:
1853 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
1854 }
1855 return WL12XX_INVALID_ROLE_TYPE;
1856 }
1857
1858 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1859 {
1860 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1861 int i;
1862
1863 /* clear everything but the persistent data */
1864 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
1865
1866 switch (ieee80211_vif_type_p2p(vif)) {
1867 case NL80211_IFTYPE_P2P_CLIENT:
1868 wlvif->p2p = 1;
1869 /* fall-through */
1870 case NL80211_IFTYPE_STATION:
1871 wlvif->bss_type = BSS_TYPE_STA_BSS;
1872 break;
1873 case NL80211_IFTYPE_ADHOC:
1874 wlvif->bss_type = BSS_TYPE_IBSS;
1875 break;
1876 case NL80211_IFTYPE_P2P_GO:
1877 wlvif->p2p = 1;
1878 /* fall-through */
1879 case NL80211_IFTYPE_AP:
1880 wlvif->bss_type = BSS_TYPE_AP_BSS;
1881 break;
1882 default:
1883 wlvif->bss_type = MAX_BSS_TYPE;
1884 return -EOPNOTSUPP;
1885 }
1886
1887 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
1888 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
1889 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
1890
1891 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
1892 wlvif->bss_type == BSS_TYPE_IBSS) {
1893 /* init sta/ibss data */
1894 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
1895 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
1896 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
1897 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
1898 } else {
1899 /* init ap data */
1900 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
1901 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
1902 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
1903 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
1904 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
1905 wl12xx_allocate_rate_policy(wl,
1906 &wlvif->ap.ucast_rate_idx[i]);
1907 }
1908
1909 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
1910 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
1911 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1912 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
1913 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
1914 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
1915
1916 /*
1917 * mac80211 configures some values globally, while we treat them
1918 * per-interface. thus, on init, we have to copy them from wl
1919 */
1920 wlvif->band = wl->band;
1921 wlvif->channel = wl->channel;
1922 wlvif->power_level = wl->power_level;
1923 wlvif->channel_type = wl->channel_type;
1924
1925 INIT_WORK(&wlvif->rx_streaming_enable_work,
1926 wl1271_rx_streaming_enable_work);
1927 INIT_WORK(&wlvif->rx_streaming_disable_work,
1928 wl1271_rx_streaming_disable_work);
1929 INIT_LIST_HEAD(&wlvif->list);
1930
1931 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
1932 (unsigned long) wlvif);
1933 return 0;
1934 }
1935
1936 static bool wl12xx_init_fw(struct wl1271 *wl)
1937 {
1938 int retries = WL1271_BOOT_RETRIES;
1939 bool booted = false;
1940 struct wiphy *wiphy = wl->hw->wiphy;
1941 int ret;
1942
1943 while (retries) {
1944 retries--;
1945 ret = wl12xx_chip_wakeup(wl, false);
1946 if (ret < 0)
1947 goto power_off;
1948
1949 ret = wl->ops->boot(wl);
1950 if (ret < 0)
1951 goto power_off;
1952
1953 ret = wl1271_hw_init(wl);
1954 if (ret < 0)
1955 goto irq_disable;
1956
1957 booted = true;
1958 break;
1959
1960 irq_disable:
1961 mutex_unlock(&wl->mutex);
1962 /* Unlocking the mutex in the middle of handling is
1963 inherently unsafe. In this case we deem it safe to do,
1964 because we need to let any possibly pending IRQ out of
1965 the system (and while we are WL1271_STATE_OFF the IRQ
1966 work function will not do anything.) Also, any other
1967 possible concurrent operations will fail due to the
1968 current state, hence the wl1271 struct should be safe. */
1969 wlcore_disable_interrupts(wl);
1970 wl1271_flush_deferred_work(wl);
1971 cancel_work_sync(&wl->netstack_work);
1972 mutex_lock(&wl->mutex);
1973 power_off:
1974 wl1271_power_off(wl);
1975 }
1976
1977 if (!booted) {
1978 wl1271_error("firmware boot failed despite %d retries",
1979 WL1271_BOOT_RETRIES);
1980 goto out;
1981 }
1982
1983 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
1984
1985 /* update hw/fw version info in wiphy struct */
1986 wiphy->hw_version = wl->chip.id;
1987 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1988 sizeof(wiphy->fw_version));
1989
1990 /*
1991 * Now we know if 11a is supported (info from the NVS), so disable
1992 * 11a channels if not supported
1993 */
1994 if (!wl->enable_11a)
1995 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
1996
1997 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
1998 wl->enable_11a ? "" : "not ");
1999
2000 wl->state = WL1271_STATE_ON;
2001 out:
2002 return booted;
2003 }
2004
2005 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2006 {
2007 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2008 }
2009
2010 /*
2011 * Check whether a fw switch (i.e. moving from one loaded
2012 * fw to another) is needed. This function is also responsible
2013 * for updating wl->last_vif_count, so it must be called before
2014 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2015 * will be used).
2016 */
2017 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2018 struct vif_counter_data vif_counter_data,
2019 bool add)
2020 {
2021 enum wl12xx_fw_type current_fw = wl->fw_type;
2022 u8 vif_count = vif_counter_data.counter;
2023
2024 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2025 return false;
2026
2027 /* increase the vif count if this is a new vif */
2028 if (add && !vif_counter_data.cur_vif_running)
2029 vif_count++;
2030
2031 wl->last_vif_count = vif_count;
2032
2033 /* no need for fw change if the device is OFF */
2034 if (wl->state == WL1271_STATE_OFF)
2035 return false;
2036
2037 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2038 return true;
2039 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2040 return true;
2041
2042 return false;
2043 }
2044
2045 /*
2046 * Enter "forced psm". Make sure the sta is in psm against the ap,
2047 * to make the fw switch a bit more disconnection-persistent.
2048 */
2049 static void wl12xx_force_active_psm(struct wl1271 *wl)
2050 {
2051 struct wl12xx_vif *wlvif;
2052
2053 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2054 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2055 }
2056 }
2057
2058 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2059 struct ieee80211_vif *vif)
2060 {
2061 struct wl1271 *wl = hw->priv;
2062 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2063 struct vif_counter_data vif_count;
2064 int ret = 0;
2065 u8 role_type;
2066 bool booted = false;
2067
2068 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2069 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2070
2071 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2072 ieee80211_vif_type_p2p(vif), vif->addr);
2073
2074 wl12xx_get_vif_count(hw, vif, &vif_count);
2075
2076 mutex_lock(&wl->mutex);
2077 ret = wl1271_ps_elp_wakeup(wl);
2078 if (ret < 0)
2079 goto out_unlock;
2080
2081 /*
2082 * in some very corner case HW recovery scenarios its possible to
2083 * get here before __wl1271_op_remove_interface is complete, so
2084 * opt out if that is the case.
2085 */
2086 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2087 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2088 ret = -EBUSY;
2089 goto out;
2090 }
2091
2092
2093 ret = wl12xx_init_vif_data(wl, vif);
2094 if (ret < 0)
2095 goto out;
2096
2097 wlvif->wl = wl;
2098 role_type = wl12xx_get_role_type(wl, wlvif);
2099 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2100 ret = -EINVAL;
2101 goto out;
2102 }
2103
2104 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2105 wl12xx_force_active_psm(wl);
2106 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2107 mutex_unlock(&wl->mutex);
2108 wl1271_recovery_work(&wl->recovery_work);
2109 return 0;
2110 }
2111
2112 /*
2113 * TODO: after the nvs issue will be solved, move this block
2114 * to start(), and make sure here the driver is ON.
2115 */
2116 if (wl->state == WL1271_STATE_OFF) {
2117 /*
2118 * we still need this in order to configure the fw
2119 * while uploading the nvs
2120 */
2121 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2122
2123 booted = wl12xx_init_fw(wl);
2124 if (!booted) {
2125 ret = -EINVAL;
2126 goto out;
2127 }
2128 }
2129
2130 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2131 wlvif->bss_type == BSS_TYPE_IBSS) {
2132 /*
2133 * The device role is a special role used for
2134 * rx and tx frames prior to association (as
2135 * the STA role can get packets only from
2136 * its associated bssid)
2137 */
2138 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2139 WL1271_ROLE_DEVICE,
2140 &wlvif->dev_role_id);
2141 if (ret < 0)
2142 goto out;
2143 }
2144
2145 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2146 role_type, &wlvif->role_id);
2147 if (ret < 0)
2148 goto out;
2149
2150 ret = wl1271_init_vif_specific(wl, vif);
2151 if (ret < 0)
2152 goto out;
2153
2154 list_add(&wlvif->list, &wl->wlvif_list);
2155 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2156
2157 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2158 wl->ap_count++;
2159 else
2160 wl->sta_count++;
2161 out:
2162 wl1271_ps_elp_sleep(wl);
2163 out_unlock:
2164 mutex_unlock(&wl->mutex);
2165
2166 return ret;
2167 }
2168
2169 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2170 struct ieee80211_vif *vif,
2171 bool reset_tx_queues)
2172 {
2173 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2174 int i, ret;
2175
2176 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2177
2178 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2179 return;
2180
2181 /* because of hardware recovery, we may get here twice */
2182 if (wl->state != WL1271_STATE_ON)
2183 return;
2184
2185 wl1271_info("down");
2186
2187 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2188 wl->scan_vif == vif) {
2189 /*
2190 * Rearm the tx watchdog just before idling scan. This
2191 * prevents just-finished scans from triggering the watchdog
2192 */
2193 wl12xx_rearm_tx_watchdog_locked(wl);
2194
2195 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2196 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2197 wl->scan_vif = NULL;
2198 wl->scan.req = NULL;
2199 ieee80211_scan_completed(wl->hw, true);
2200 }
2201
2202 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2203 /* disable active roles */
2204 ret = wl1271_ps_elp_wakeup(wl);
2205 if (ret < 0)
2206 goto deinit;
2207
2208 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2209 wlvif->bss_type == BSS_TYPE_IBSS) {
2210 if (wl12xx_dev_role_started(wlvif))
2211 wl12xx_stop_dev(wl, wlvif);
2212
2213 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2214 if (ret < 0)
2215 goto deinit;
2216 }
2217
2218 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2219 if (ret < 0)
2220 goto deinit;
2221
2222 wl1271_ps_elp_sleep(wl);
2223 }
2224 deinit:
2225 /* clear all hlids (except system_hlid) */
2226 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2227
2228 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2229 wlvif->bss_type == BSS_TYPE_IBSS) {
2230 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2231 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2232 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2233 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2234 } else {
2235 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2236 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2237 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2238 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2239 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2240 wl12xx_free_rate_policy(wl,
2241 &wlvif->ap.ucast_rate_idx[i]);
2242 wl1271_free_ap_keys(wl, wlvif);
2243 }
2244
2245 dev_kfree_skb(wlvif->probereq);
2246 wlvif->probereq = NULL;
2247 wl12xx_tx_reset_wlvif(wl, wlvif);
2248 if (wl->last_wlvif == wlvif)
2249 wl->last_wlvif = NULL;
2250 list_del(&wlvif->list);
2251 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2252 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2253 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2254
2255 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2256 wl->ap_count--;
2257 else
2258 wl->sta_count--;
2259
2260 mutex_unlock(&wl->mutex);
2261
2262 del_timer_sync(&wlvif->rx_streaming_timer);
2263 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2264 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2265
2266 mutex_lock(&wl->mutex);
2267 }
2268
2269 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2270 struct ieee80211_vif *vif)
2271 {
2272 struct wl1271 *wl = hw->priv;
2273 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2274 struct wl12xx_vif *iter;
2275 struct vif_counter_data vif_count;
2276 bool cancel_recovery = true;
2277
2278 wl12xx_get_vif_count(hw, vif, &vif_count);
2279 mutex_lock(&wl->mutex);
2280
2281 if (wl->state == WL1271_STATE_OFF ||
2282 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2283 goto out;
2284
2285 /*
2286 * wl->vif can be null here if someone shuts down the interface
2287 * just when hardware recovery has been started.
2288 */
2289 wl12xx_for_each_wlvif(wl, iter) {
2290 if (iter != wlvif)
2291 continue;
2292
2293 __wl1271_op_remove_interface(wl, vif, true);
2294 break;
2295 }
2296 WARN_ON(iter != wlvif);
2297 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2298 wl12xx_force_active_psm(wl);
2299 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2300 wl12xx_queue_recovery_work(wl);
2301 cancel_recovery = false;
2302 }
2303 out:
2304 mutex_unlock(&wl->mutex);
2305 if (cancel_recovery)
2306 cancel_work_sync(&wl->recovery_work);
2307 }
2308
2309 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2310 struct ieee80211_vif *vif,
2311 enum nl80211_iftype new_type, bool p2p)
2312 {
2313 struct wl1271 *wl = hw->priv;
2314 int ret;
2315
2316 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2317 wl1271_op_remove_interface(hw, vif);
2318
2319 vif->type = new_type;
2320 vif->p2p = p2p;
2321 ret = wl1271_op_add_interface(hw, vif);
2322
2323 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2324 return ret;
2325 }
2326
2327 static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2328 bool set_assoc)
2329 {
2330 int ret;
2331 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2332
2333 /*
2334 * One of the side effects of the JOIN command is that is clears
2335 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2336 * to a WPA/WPA2 access point will therefore kill the data-path.
2337 * Currently the only valid scenario for JOIN during association
2338 * is on roaming, in which case we will also be given new keys.
2339 * Keep the below message for now, unless it starts bothering
2340 * users who really like to roam a lot :)
2341 */
2342 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2343 wl1271_info("JOIN while associated.");
2344
2345 /* clear encryption type */
2346 wlvif->encryption_type = KEY_NONE;
2347
2348 if (set_assoc)
2349 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2350
2351 if (is_ibss)
2352 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2353 else
2354 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2355 if (ret < 0)
2356 goto out;
2357
2358 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2359 goto out;
2360
2361 /*
2362 * The join command disable the keep-alive mode, shut down its process,
2363 * and also clear the template config, so we need to reset it all after
2364 * the join. The acx_aid starts the keep-alive process, and the order
2365 * of the commands below is relevant.
2366 */
2367 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2368 if (ret < 0)
2369 goto out;
2370
2371 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2372 if (ret < 0)
2373 goto out;
2374
2375 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2376 if (ret < 0)
2377 goto out;
2378
2379 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2380 CMD_TEMPL_KLV_IDX_NULL_DATA,
2381 ACX_KEEP_ALIVE_TPL_VALID);
2382 if (ret < 0)
2383 goto out;
2384
2385 out:
2386 return ret;
2387 }
2388
2389 static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2390 {
2391 int ret;
2392
2393 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2394 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2395
2396 wl12xx_cmd_stop_channel_switch(wl);
2397 ieee80211_chswitch_done(vif, false);
2398 }
2399
2400 /* to stop listening to a channel, we disconnect */
2401 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2402 if (ret < 0)
2403 goto out;
2404
2405 /* reset TX security counters on a clean disconnect */
2406 wlvif->tx_security_last_seq_lsb = 0;
2407 wlvif->tx_security_seq = 0;
2408
2409 out:
2410 return ret;
2411 }
2412
2413 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2414 {
2415 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2416 wlvif->rate_set = wlvif->basic_rate_set;
2417 }
2418
2419 static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2420 bool idle)
2421 {
2422 int ret;
2423 bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2424
2425 if (idle == cur_idle)
2426 return 0;
2427
2428 if (idle) {
2429 /* no need to croc if we weren't busy (e.g. during boot) */
2430 if (wl12xx_dev_role_started(wlvif)) {
2431 ret = wl12xx_stop_dev(wl, wlvif);
2432 if (ret < 0)
2433 goto out;
2434 }
2435 wlvif->rate_set =
2436 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2437 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2438 if (ret < 0)
2439 goto out;
2440 ret = wl1271_acx_keep_alive_config(
2441 wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
2442 ACX_KEEP_ALIVE_TPL_INVALID);
2443 if (ret < 0)
2444 goto out;
2445 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2446 } else {
2447 /* The current firmware only supports sched_scan in idle */
2448 if (wl->sched_scanning) {
2449 wl1271_scan_sched_scan_stop(wl);
2450 ieee80211_sched_scan_stopped(wl->hw);
2451 }
2452
2453 ret = wl12xx_start_dev(wl, wlvif);
2454 if (ret < 0)
2455 goto out;
2456 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2457 }
2458
2459 out:
2460 return ret;
2461 }
2462
2463 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2464 struct ieee80211_conf *conf, u32 changed)
2465 {
2466 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2467 int channel, ret;
2468
2469 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2470
2471 /* if the channel changes while joined, join again */
2472 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2473 ((wlvif->band != conf->channel->band) ||
2474 (wlvif->channel != channel) ||
2475 (wlvif->channel_type != conf->channel_type))) {
2476 /* send all pending packets */
2477 wl1271_tx_work_locked(wl);
2478 wlvif->band = conf->channel->band;
2479 wlvif->channel = channel;
2480 wlvif->channel_type = conf->channel_type;
2481
2482 if (!is_ap) {
2483 /*
2484 * FIXME: the mac80211 should really provide a fixed
2485 * rate to use here. for now, just use the smallest
2486 * possible rate for the band as a fixed rate for
2487 * association frames and other control messages.
2488 */
2489 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2490 wl1271_set_band_rate(wl, wlvif);
2491
2492 wlvif->basic_rate =
2493 wl1271_tx_min_rate_get(wl,
2494 wlvif->basic_rate_set);
2495 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2496 if (ret < 0)
2497 wl1271_warning("rate policy for channel "
2498 "failed %d", ret);
2499
2500 /*
2501 * change the ROC channel. do it only if we are
2502 * not idle. otherwise, CROC will be called
2503 * anyway.
2504 */
2505 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
2506 &wlvif->flags) &&
2507 wl12xx_dev_role_started(wlvif) &&
2508 !(conf->flags & IEEE80211_CONF_IDLE)) {
2509 ret = wl12xx_stop_dev(wl, wlvif);
2510 if (ret < 0)
2511 return ret;
2512
2513 ret = wl12xx_start_dev(wl, wlvif);
2514 if (ret < 0)
2515 return ret;
2516 }
2517 }
2518 }
2519
2520 if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
2521
2522 if ((conf->flags & IEEE80211_CONF_PS) &&
2523 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
2524 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2525
2526 int ps_mode;
2527 char *ps_mode_str;
2528
2529 if (wl->conf.conn.forced_ps) {
2530 ps_mode = STATION_POWER_SAVE_MODE;
2531 ps_mode_str = "forced";
2532 } else {
2533 ps_mode = STATION_AUTO_PS_MODE;
2534 ps_mode_str = "auto";
2535 }
2536
2537 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
2538
2539 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
2540
2541 if (ret < 0)
2542 wl1271_warning("enter %s ps failed %d",
2543 ps_mode_str, ret);
2544
2545 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
2546 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2547
2548 wl1271_debug(DEBUG_PSM, "auto ps disabled");
2549
2550 ret = wl1271_ps_set_mode(wl, wlvif,
2551 STATION_ACTIVE_MODE);
2552 if (ret < 0)
2553 wl1271_warning("exit auto ps failed %d", ret);
2554 }
2555 }
2556
2557 if (conf->power_level != wlvif->power_level) {
2558 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2559 if (ret < 0)
2560 return ret;
2561
2562 wlvif->power_level = conf->power_level;
2563 }
2564
2565 return 0;
2566 }
2567
2568 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2569 {
2570 struct wl1271 *wl = hw->priv;
2571 struct wl12xx_vif *wlvif;
2572 struct ieee80211_conf *conf = &hw->conf;
2573 int channel, ret = 0;
2574
2575 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2576
2577 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2578 " changed 0x%x",
2579 channel,
2580 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2581 conf->power_level,
2582 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2583 changed);
2584
2585 /*
2586 * mac80211 will go to idle nearly immediately after transmitting some
2587 * frames, such as the deauth. To make sure those frames reach the air,
2588 * wait here until the TX queue is fully flushed.
2589 */
2590 if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2591 (conf->flags & IEEE80211_CONF_IDLE))
2592 wl1271_tx_flush(wl);
2593
2594 mutex_lock(&wl->mutex);
2595
2596 /* we support configuring the channel and band even while off */
2597 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2598 wl->band = conf->channel->band;
2599 wl->channel = channel;
2600 wl->channel_type = conf->channel_type;
2601 }
2602
2603 if (changed & IEEE80211_CONF_CHANGE_POWER)
2604 wl->power_level = conf->power_level;
2605
2606 if (unlikely(wl->state == WL1271_STATE_OFF))
2607 goto out;
2608
2609 ret = wl1271_ps_elp_wakeup(wl);
2610 if (ret < 0)
2611 goto out;
2612
2613 /* configure each interface */
2614 wl12xx_for_each_wlvif(wl, wlvif) {
2615 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2616 if (ret < 0)
2617 goto out_sleep;
2618 }
2619
2620 out_sleep:
2621 wl1271_ps_elp_sleep(wl);
2622
2623 out:
2624 mutex_unlock(&wl->mutex);
2625
2626 return ret;
2627 }
2628
2629 struct wl1271_filter_params {
2630 bool enabled;
2631 int mc_list_length;
2632 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2633 };
2634
2635 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2636 struct netdev_hw_addr_list *mc_list)
2637 {
2638 struct wl1271_filter_params *fp;
2639 struct netdev_hw_addr *ha;
2640 struct wl1271 *wl = hw->priv;
2641
2642 if (unlikely(wl->state == WL1271_STATE_OFF))
2643 return 0;
2644
2645 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2646 if (!fp) {
2647 wl1271_error("Out of memory setting filters.");
2648 return 0;
2649 }
2650
2651 /* update multicast filtering parameters */
2652 fp->mc_list_length = 0;
2653 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2654 fp->enabled = false;
2655 } else {
2656 fp->enabled = true;
2657 netdev_hw_addr_list_for_each(ha, mc_list) {
2658 memcpy(fp->mc_list[fp->mc_list_length],
2659 ha->addr, ETH_ALEN);
2660 fp->mc_list_length++;
2661 }
2662 }
2663
2664 return (u64)(unsigned long)fp;
2665 }
2666
2667 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2668 FIF_ALLMULTI | \
2669 FIF_FCSFAIL | \
2670 FIF_BCN_PRBRESP_PROMISC | \
2671 FIF_CONTROL | \
2672 FIF_OTHER_BSS)
2673
2674 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2675 unsigned int changed,
2676 unsigned int *total, u64 multicast)
2677 {
2678 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2679 struct wl1271 *wl = hw->priv;
2680 struct wl12xx_vif *wlvif;
2681
2682 int ret;
2683
2684 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2685 " total %x", changed, *total);
2686
2687 mutex_lock(&wl->mutex);
2688
2689 *total &= WL1271_SUPPORTED_FILTERS;
2690 changed &= WL1271_SUPPORTED_FILTERS;
2691
2692 if (unlikely(wl->state == WL1271_STATE_OFF))
2693 goto out;
2694
2695 ret = wl1271_ps_elp_wakeup(wl);
2696 if (ret < 0)
2697 goto out;
2698
2699 wl12xx_for_each_wlvif(wl, wlvif) {
2700 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2701 if (*total & FIF_ALLMULTI)
2702 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2703 false,
2704 NULL, 0);
2705 else if (fp)
2706 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2707 fp->enabled,
2708 fp->mc_list,
2709 fp->mc_list_length);
2710 if (ret < 0)
2711 goto out_sleep;
2712 }
2713 }
2714
2715 /*
2716 * the fw doesn't provide an api to configure the filters. instead,
2717 * the filters configuration is based on the active roles / ROC
2718 * state.
2719 */
2720
2721 out_sleep:
2722 wl1271_ps_elp_sleep(wl);
2723
2724 out:
2725 mutex_unlock(&wl->mutex);
2726 kfree(fp);
2727 }
2728
2729 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2730 u8 id, u8 key_type, u8 key_size,
2731 const u8 *key, u8 hlid, u32 tx_seq_32,
2732 u16 tx_seq_16)
2733 {
2734 struct wl1271_ap_key *ap_key;
2735 int i;
2736
2737 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2738
2739 if (key_size > MAX_KEY_SIZE)
2740 return -EINVAL;
2741
2742 /*
2743 * Find next free entry in ap_keys. Also check we are not replacing
2744 * an existing key.
2745 */
2746 for (i = 0; i < MAX_NUM_KEYS; i++) {
2747 if (wlvif->ap.recorded_keys[i] == NULL)
2748 break;
2749
2750 if (wlvif->ap.recorded_keys[i]->id == id) {
2751 wl1271_warning("trying to record key replacement");
2752 return -EINVAL;
2753 }
2754 }
2755
2756 if (i == MAX_NUM_KEYS)
2757 return -EBUSY;
2758
2759 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2760 if (!ap_key)
2761 return -ENOMEM;
2762
2763 ap_key->id = id;
2764 ap_key->key_type = key_type;
2765 ap_key->key_size = key_size;
2766 memcpy(ap_key->key, key, key_size);
2767 ap_key->hlid = hlid;
2768 ap_key->tx_seq_32 = tx_seq_32;
2769 ap_key->tx_seq_16 = tx_seq_16;
2770
2771 wlvif->ap.recorded_keys[i] = ap_key;
2772 return 0;
2773 }
2774
2775 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2776 {
2777 int i;
2778
2779 for (i = 0; i < MAX_NUM_KEYS; i++) {
2780 kfree(wlvif->ap.recorded_keys[i]);
2781 wlvif->ap.recorded_keys[i] = NULL;
2782 }
2783 }
2784
2785 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2786 {
2787 int i, ret = 0;
2788 struct wl1271_ap_key *key;
2789 bool wep_key_added = false;
2790
2791 for (i = 0; i < MAX_NUM_KEYS; i++) {
2792 u8 hlid;
2793 if (wlvif->ap.recorded_keys[i] == NULL)
2794 break;
2795
2796 key = wlvif->ap.recorded_keys[i];
2797 hlid = key->hlid;
2798 if (hlid == WL12XX_INVALID_LINK_ID)
2799 hlid = wlvif->ap.bcast_hlid;
2800
2801 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2802 key->id, key->key_type,
2803 key->key_size, key->key,
2804 hlid, key->tx_seq_32,
2805 key->tx_seq_16);
2806 if (ret < 0)
2807 goto out;
2808
2809 if (key->key_type == KEY_WEP)
2810 wep_key_added = true;
2811 }
2812
2813 if (wep_key_added) {
2814 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
2815 wlvif->ap.bcast_hlid);
2816 if (ret < 0)
2817 goto out;
2818 }
2819
2820 out:
2821 wl1271_free_ap_keys(wl, wlvif);
2822 return ret;
2823 }
2824
2825 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2826 u16 action, u8 id, u8 key_type,
2827 u8 key_size, const u8 *key, u32 tx_seq_32,
2828 u16 tx_seq_16, struct ieee80211_sta *sta)
2829 {
2830 int ret;
2831 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2832
2833 /*
2834 * A role set to GEM cipher requires different Tx settings (namely
2835 * spare blocks). Note when we are in this mode so the HW can adjust.
2836 */
2837 if (key_type == KEY_GEM) {
2838 if (action == KEY_ADD_OR_REPLACE)
2839 wlvif->is_gem = true;
2840 else if (action == KEY_REMOVE)
2841 wlvif->is_gem = false;
2842 }
2843
2844 if (is_ap) {
2845 struct wl1271_station *wl_sta;
2846 u8 hlid;
2847
2848 if (sta) {
2849 wl_sta = (struct wl1271_station *)sta->drv_priv;
2850 hlid = wl_sta->hlid;
2851 } else {
2852 hlid = wlvif->ap.bcast_hlid;
2853 }
2854
2855 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
2856 /*
2857 * We do not support removing keys after AP shutdown.
2858 * Pretend we do to make mac80211 happy.
2859 */
2860 if (action != KEY_ADD_OR_REPLACE)
2861 return 0;
2862
2863 ret = wl1271_record_ap_key(wl, wlvif, id,
2864 key_type, key_size,
2865 key, hlid, tx_seq_32,
2866 tx_seq_16);
2867 } else {
2868 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
2869 id, key_type, key_size,
2870 key, hlid, tx_seq_32,
2871 tx_seq_16);
2872 }
2873
2874 if (ret < 0)
2875 return ret;
2876 } else {
2877 const u8 *addr;
2878 static const u8 bcast_addr[ETH_ALEN] = {
2879 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2880 };
2881
2882 addr = sta ? sta->addr : bcast_addr;
2883
2884 if (is_zero_ether_addr(addr)) {
2885 /* We dont support TX only encryption */
2886 return -EOPNOTSUPP;
2887 }
2888
2889 /* The wl1271 does not allow to remove unicast keys - they
2890 will be cleared automatically on next CMD_JOIN. Ignore the
2891 request silently, as we dont want the mac80211 to emit
2892 an error message. */
2893 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
2894 return 0;
2895
2896 /* don't remove key if hlid was already deleted */
2897 if (action == KEY_REMOVE &&
2898 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
2899 return 0;
2900
2901 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
2902 id, key_type, key_size,
2903 key, addr, tx_seq_32,
2904 tx_seq_16);
2905 if (ret < 0)
2906 return ret;
2907
2908 /* the default WEP key needs to be configured at least once */
2909 if (key_type == KEY_WEP) {
2910 ret = wl12xx_cmd_set_default_wep_key(wl,
2911 wlvif->default_key,
2912 wlvif->sta.hlid);
2913 if (ret < 0)
2914 return ret;
2915 }
2916 }
2917
2918 return 0;
2919 }
2920
2921 static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2922 struct ieee80211_vif *vif,
2923 struct ieee80211_sta *sta,
2924 struct ieee80211_key_conf *key_conf)
2925 {
2926 struct wl1271 *wl = hw->priv;
2927 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2928 int ret;
2929 u32 tx_seq_32 = 0;
2930 u16 tx_seq_16 = 0;
2931 u8 key_type;
2932
2933 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
2934
2935 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
2936 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
2937 key_conf->cipher, key_conf->keyidx,
2938 key_conf->keylen, key_conf->flags);
2939 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
2940
2941 mutex_lock(&wl->mutex);
2942
2943 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2944 ret = -EAGAIN;
2945 goto out_unlock;
2946 }
2947
2948 ret = wl1271_ps_elp_wakeup(wl);
2949 if (ret < 0)
2950 goto out_unlock;
2951
2952 switch (key_conf->cipher) {
2953 case WLAN_CIPHER_SUITE_WEP40:
2954 case WLAN_CIPHER_SUITE_WEP104:
2955 key_type = KEY_WEP;
2956
2957 key_conf->hw_key_idx = key_conf->keyidx;
2958 break;
2959 case WLAN_CIPHER_SUITE_TKIP:
2960 key_type = KEY_TKIP;
2961
2962 key_conf->hw_key_idx = key_conf->keyidx;
2963 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2964 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2965 break;
2966 case WLAN_CIPHER_SUITE_CCMP:
2967 key_type = KEY_AES;
2968
2969 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2970 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2971 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2972 break;
2973 case WL1271_CIPHER_SUITE_GEM:
2974 key_type = KEY_GEM;
2975 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2976 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2977 break;
2978 default:
2979 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
2980
2981 ret = -EOPNOTSUPP;
2982 goto out_sleep;
2983 }
2984
2985 switch (cmd) {
2986 case SET_KEY:
2987 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2988 key_conf->keyidx, key_type,
2989 key_conf->keylen, key_conf->key,
2990 tx_seq_32, tx_seq_16, sta);
2991 if (ret < 0) {
2992 wl1271_error("Could not add or replace key");
2993 goto out_sleep;
2994 }
2995
2996 /*
2997 * reconfiguring arp response if the unicast (or common)
2998 * encryption key type was changed
2999 */
3000 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3001 (sta || key_type == KEY_WEP) &&
3002 wlvif->encryption_type != key_type) {
3003 wlvif->encryption_type = key_type;
3004 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3005 if (ret < 0) {
3006 wl1271_warning("build arp rsp failed: %d", ret);
3007 goto out_sleep;
3008 }
3009 }
3010 break;
3011
3012 case DISABLE_KEY:
3013 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3014 key_conf->keyidx, key_type,
3015 key_conf->keylen, key_conf->key,
3016 0, 0, sta);
3017 if (ret < 0) {
3018 wl1271_error("Could not remove key");
3019 goto out_sleep;
3020 }
3021 break;
3022
3023 default:
3024 wl1271_error("Unsupported key cmd 0x%x", cmd);
3025 ret = -EOPNOTSUPP;
3026 break;
3027 }
3028
3029 out_sleep:
3030 wl1271_ps_elp_sleep(wl);
3031
3032 out_unlock:
3033 mutex_unlock(&wl->mutex);
3034
3035 return ret;
3036 }
3037
3038 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3039 struct ieee80211_vif *vif,
3040 struct cfg80211_scan_request *req)
3041 {
3042 struct wl1271 *wl = hw->priv;
3043 int ret;
3044 u8 *ssid = NULL;
3045 size_t len = 0;
3046
3047 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3048
3049 if (req->n_ssids) {
3050 ssid = req->ssids[0].ssid;
3051 len = req->ssids[0].ssid_len;
3052 }
3053
3054 mutex_lock(&wl->mutex);
3055
3056 if (wl->state == WL1271_STATE_OFF) {
3057 /*
3058 * We cannot return -EBUSY here because cfg80211 will expect
3059 * a call to ieee80211_scan_completed if we do - in this case
3060 * there won't be any call.
3061 */
3062 ret = -EAGAIN;
3063 goto out;
3064 }
3065
3066 ret = wl1271_ps_elp_wakeup(wl);
3067 if (ret < 0)
3068 goto out;
3069
3070 /* fail if there is any role in ROC */
3071 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3072 /* don't allow scanning right now */
3073 ret = -EBUSY;
3074 goto out_sleep;
3075 }
3076
3077 ret = wl1271_scan(hw->priv, vif, ssid, len, req);
3078 out_sleep:
3079 wl1271_ps_elp_sleep(wl);
3080 out:
3081 mutex_unlock(&wl->mutex);
3082
3083 return ret;
3084 }
3085
3086 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3087 struct ieee80211_vif *vif)
3088 {
3089 struct wl1271 *wl = hw->priv;
3090 int ret;
3091
3092 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3093
3094 mutex_lock(&wl->mutex);
3095
3096 if (wl->state == WL1271_STATE_OFF)
3097 goto out;
3098
3099 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3100 goto out;
3101
3102 ret = wl1271_ps_elp_wakeup(wl);
3103 if (ret < 0)
3104 goto out;
3105
3106 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3107 ret = wl1271_scan_stop(wl);
3108 if (ret < 0)
3109 goto out_sleep;
3110 }
3111
3112 /*
3113 * Rearm the tx watchdog just before idling scan. This
3114 * prevents just-finished scans from triggering the watchdog
3115 */
3116 wl12xx_rearm_tx_watchdog_locked(wl);
3117
3118 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3119 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3120 wl->scan_vif = NULL;
3121 wl->scan.req = NULL;
3122 ieee80211_scan_completed(wl->hw, true);
3123
3124 out_sleep:
3125 wl1271_ps_elp_sleep(wl);
3126 out:
3127 mutex_unlock(&wl->mutex);
3128
3129 cancel_delayed_work_sync(&wl->scan_complete_work);
3130 }
3131
3132 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3133 struct ieee80211_vif *vif,
3134 struct cfg80211_sched_scan_request *req,
3135 struct ieee80211_sched_scan_ies *ies)
3136 {
3137 struct wl1271 *wl = hw->priv;
3138 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3139 int ret;
3140
3141 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3142
3143 mutex_lock(&wl->mutex);
3144
3145 if (wl->state == WL1271_STATE_OFF) {
3146 ret = -EAGAIN;
3147 goto out;
3148 }
3149
3150 ret = wl1271_ps_elp_wakeup(wl);
3151 if (ret < 0)
3152 goto out;
3153
3154 ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
3155 if (ret < 0)
3156 goto out_sleep;
3157
3158 ret = wl1271_scan_sched_scan_start(wl, wlvif);
3159 if (ret < 0)
3160 goto out_sleep;
3161
3162 wl->sched_scanning = true;
3163
3164 out_sleep:
3165 wl1271_ps_elp_sleep(wl);
3166 out:
3167 mutex_unlock(&wl->mutex);
3168 return ret;
3169 }
3170
3171 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3172 struct ieee80211_vif *vif)
3173 {
3174 struct wl1271 *wl = hw->priv;
3175 int ret;
3176
3177 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3178
3179 mutex_lock(&wl->mutex);
3180
3181 if (wl->state == WL1271_STATE_OFF)
3182 goto out;
3183
3184 ret = wl1271_ps_elp_wakeup(wl);
3185 if (ret < 0)
3186 goto out;
3187
3188 wl1271_scan_sched_scan_stop(wl);
3189
3190 wl1271_ps_elp_sleep(wl);
3191 out:
3192 mutex_unlock(&wl->mutex);
3193 }
3194
3195 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3196 {
3197 struct wl1271 *wl = hw->priv;
3198 int ret = 0;
3199
3200 mutex_lock(&wl->mutex);
3201
3202 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3203 ret = -EAGAIN;
3204 goto out;
3205 }
3206
3207 ret = wl1271_ps_elp_wakeup(wl);
3208 if (ret < 0)
3209 goto out;
3210
3211 ret = wl1271_acx_frag_threshold(wl, value);
3212 if (ret < 0)
3213 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3214
3215 wl1271_ps_elp_sleep(wl);
3216
3217 out:
3218 mutex_unlock(&wl->mutex);
3219
3220 return ret;
3221 }
3222
3223 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3224 {
3225 struct wl1271 *wl = hw->priv;
3226 struct wl12xx_vif *wlvif;
3227 int ret = 0;
3228
3229 mutex_lock(&wl->mutex);
3230
3231 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3232 ret = -EAGAIN;
3233 goto out;
3234 }
3235
3236 ret = wl1271_ps_elp_wakeup(wl);
3237 if (ret < 0)
3238 goto out;
3239
3240 wl12xx_for_each_wlvif(wl, wlvif) {
3241 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3242 if (ret < 0)
3243 wl1271_warning("set rts threshold failed: %d", ret);
3244 }
3245 wl1271_ps_elp_sleep(wl);
3246
3247 out:
3248 mutex_unlock(&wl->mutex);
3249
3250 return ret;
3251 }
3252
3253 static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
3254 int offset)
3255 {
3256 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3257 u8 ssid_len;
3258 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
3259 skb->len - offset);
3260
3261 if (!ptr) {
3262 wl1271_error("No SSID in IEs!");
3263 return -ENOENT;
3264 }
3265
3266 ssid_len = ptr[1];
3267 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
3268 wl1271_error("SSID is too long!");
3269 return -EINVAL;
3270 }
3271
3272 wlvif->ssid_len = ssid_len;
3273 memcpy(wlvif->ssid, ptr+2, ssid_len);
3274 return 0;
3275 }
3276
3277 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3278 {
3279 int len;
3280 const u8 *next, *end = skb->data + skb->len;
3281 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3282 skb->len - ieoffset);
3283 if (!ie)
3284 return;
3285 len = ie[1] + 2;
3286 next = ie + len;
3287 memmove(ie, next, end - next);
3288 skb_trim(skb, skb->len - len);
3289 }
3290
3291 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3292 unsigned int oui, u8 oui_type,
3293 int ieoffset)
3294 {
3295 int len;
3296 const u8 *next, *end = skb->data + skb->len;
3297 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3298 skb->data + ieoffset,
3299 skb->len - ieoffset);
3300 if (!ie)
3301 return;
3302 len = ie[1] + 2;
3303 next = ie + len;
3304 memmove(ie, next, end - next);
3305 skb_trim(skb, skb->len - len);
3306 }
3307
3308 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3309 struct ieee80211_vif *vif)
3310 {
3311 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3312 struct sk_buff *skb;
3313 int ret;
3314
3315 skb = ieee80211_proberesp_get(wl->hw, vif);
3316 if (!skb)
3317 return -EOPNOTSUPP;
3318
3319 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3320 CMD_TEMPL_AP_PROBE_RESPONSE,
3321 skb->data,
3322 skb->len, 0,
3323 rates);
3324
3325 dev_kfree_skb(skb);
3326 return ret;
3327 }
3328
3329 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3330 struct ieee80211_vif *vif,
3331 u8 *probe_rsp_data,
3332 size_t probe_rsp_len,
3333 u32 rates)
3334 {
3335 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3336 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3337 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3338 int ssid_ie_offset, ie_offset, templ_len;
3339 const u8 *ptr;
3340
3341 /* no need to change probe response if the SSID is set correctly */
3342 if (wlvif->ssid_len > 0)
3343 return wl1271_cmd_template_set(wl, wlvif->role_id,
3344 CMD_TEMPL_AP_PROBE_RESPONSE,
3345 probe_rsp_data,
3346 probe_rsp_len, 0,
3347 rates);
3348
3349 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3350 wl1271_error("probe_rsp template too big");
3351 return -EINVAL;
3352 }
3353
3354 /* start searching from IE offset */
3355 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3356
3357 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3358 probe_rsp_len - ie_offset);
3359 if (!ptr) {
3360 wl1271_error("No SSID in beacon!");
3361 return -EINVAL;
3362 }
3363
3364 ssid_ie_offset = ptr - probe_rsp_data;
3365 ptr += (ptr[1] + 2);
3366
3367 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3368
3369 /* insert SSID from bss_conf */
3370 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3371 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3372 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3373 bss_conf->ssid, bss_conf->ssid_len);
3374 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3375
3376 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3377 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3378 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3379
3380 return wl1271_cmd_template_set(wl, wlvif->role_id,
3381 CMD_TEMPL_AP_PROBE_RESPONSE,
3382 probe_rsp_templ,
3383 templ_len, 0,
3384 rates);
3385 }
3386
3387 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3388 struct ieee80211_vif *vif,
3389 struct ieee80211_bss_conf *bss_conf,
3390 u32 changed)
3391 {
3392 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3393 int ret = 0;
3394
3395 if (changed & BSS_CHANGED_ERP_SLOT) {
3396 if (bss_conf->use_short_slot)
3397 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3398 else
3399 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3400 if (ret < 0) {
3401 wl1271_warning("Set slot time failed %d", ret);
3402 goto out;
3403 }
3404 }
3405
3406 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3407 if (bss_conf->use_short_preamble)
3408 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3409 else
3410 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3411 }
3412
3413 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3414 if (bss_conf->use_cts_prot)
3415 ret = wl1271_acx_cts_protect(wl, wlvif,
3416 CTSPROTECT_ENABLE);
3417 else
3418 ret = wl1271_acx_cts_protect(wl, wlvif,
3419 CTSPROTECT_DISABLE);
3420 if (ret < 0) {
3421 wl1271_warning("Set ctsprotect failed %d", ret);
3422 goto out;
3423 }
3424 }
3425
3426 out:
3427 return ret;
3428 }
3429
3430 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3431 struct ieee80211_vif *vif,
3432 struct ieee80211_bss_conf *bss_conf,
3433 u32 changed)
3434 {
3435 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3436 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3437 int ret = 0;
3438
3439 if ((changed & BSS_CHANGED_BEACON_INT)) {
3440 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3441 bss_conf->beacon_int);
3442
3443 wlvif->beacon_int = bss_conf->beacon_int;
3444 }
3445
3446 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3447 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3448 if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
3449 wl1271_debug(DEBUG_AP, "probe response updated");
3450 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3451 }
3452 }
3453
3454 if ((changed & BSS_CHANGED_BEACON)) {
3455 struct ieee80211_hdr *hdr;
3456 u32 min_rate;
3457 int ieoffset = offsetof(struct ieee80211_mgmt,
3458 u.beacon.variable);
3459 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3460 u16 tmpl_id;
3461
3462 if (!beacon) {
3463 ret = -EINVAL;
3464 goto out;
3465 }
3466
3467 wl1271_debug(DEBUG_MASTER, "beacon updated");
3468
3469 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3470 if (ret < 0) {
3471 dev_kfree_skb(beacon);
3472 goto out;
3473 }
3474 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3475 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3476 CMD_TEMPL_BEACON;
3477 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3478 beacon->data,
3479 beacon->len, 0,
3480 min_rate);
3481 if (ret < 0) {
3482 dev_kfree_skb(beacon);
3483 goto out;
3484 }
3485
3486 /*
3487 * In case we already have a probe-resp beacon set explicitly
3488 * by usermode, don't use the beacon data.
3489 */
3490 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3491 goto end_bcn;
3492
3493 /* remove TIM ie from probe response */
3494 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3495
3496 /*
3497 * remove p2p ie from probe response.
3498 * the fw reponds to probe requests that don't include
3499 * the p2p ie. probe requests with p2p ie will be passed,
3500 * and will be responded by the supplicant (the spec
3501 * forbids including the p2p ie when responding to probe
3502 * requests that didn't include it).
3503 */
3504 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3505 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3506
3507 hdr = (struct ieee80211_hdr *) beacon->data;
3508 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3509 IEEE80211_STYPE_PROBE_RESP);
3510 if (is_ap)
3511 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3512 beacon->data,
3513 beacon->len,
3514 min_rate);
3515 else
3516 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3517 CMD_TEMPL_PROBE_RESPONSE,
3518 beacon->data,
3519 beacon->len, 0,
3520 min_rate);
3521 end_bcn:
3522 dev_kfree_skb(beacon);
3523 if (ret < 0)
3524 goto out;
3525 }
3526
3527 out:
3528 if (ret != 0)
3529 wl1271_error("beacon info change failed: %d", ret);
3530 return ret;
3531 }
3532
3533 /* AP mode changes */
3534 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3535 struct ieee80211_vif *vif,
3536 struct ieee80211_bss_conf *bss_conf,
3537 u32 changed)
3538 {
3539 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3540 int ret = 0;
3541
3542 if ((changed & BSS_CHANGED_BASIC_RATES)) {
3543 u32 rates = bss_conf->basic_rates;
3544
3545 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3546 wlvif->band);
3547 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3548 wlvif->basic_rate_set);
3549
3550 ret = wl1271_init_ap_rates(wl, wlvif);
3551 if (ret < 0) {
3552 wl1271_error("AP rate policy change failed %d", ret);
3553 goto out;
3554 }
3555
3556 ret = wl1271_ap_init_templates(wl, vif);
3557 if (ret < 0)
3558 goto out;
3559 }
3560
3561 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3562 if (ret < 0)
3563 goto out;
3564
3565 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3566 if (bss_conf->enable_beacon) {
3567 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3568 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3569 if (ret < 0)
3570 goto out;
3571
3572 ret = wl1271_ap_init_hwenc(wl, wlvif);
3573 if (ret < 0)
3574 goto out;
3575
3576 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3577 wl1271_debug(DEBUG_AP, "started AP");
3578 }
3579 } else {
3580 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3581 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3582 if (ret < 0)
3583 goto out;
3584
3585 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3586 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3587 &wlvif->flags);
3588 wl1271_debug(DEBUG_AP, "stopped AP");
3589 }
3590 }
3591 }
3592
3593 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3594 if (ret < 0)
3595 goto out;
3596
3597 /* Handle HT information change */
3598 if ((changed & BSS_CHANGED_HT) &&
3599 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3600 ret = wl1271_acx_set_ht_information(wl, wlvif,
3601 bss_conf->ht_operation_mode);
3602 if (ret < 0) {
3603 wl1271_warning("Set ht information failed %d", ret);
3604 goto out;
3605 }
3606 }
3607
3608 out:
3609 return;
3610 }
3611
3612 /* STA/IBSS mode changes */
3613 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3614 struct ieee80211_vif *vif,
3615 struct ieee80211_bss_conf *bss_conf,
3616 u32 changed)
3617 {
3618 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3619 bool do_join = false, set_assoc = false;
3620 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3621 bool ibss_joined = false;
3622 u32 sta_rate_set = 0;
3623 int ret;
3624 struct ieee80211_sta *sta;
3625 bool sta_exists = false;
3626 struct ieee80211_sta_ht_cap sta_ht_cap;
3627
3628 if (is_ibss) {
3629 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3630 changed);
3631 if (ret < 0)
3632 goto out;
3633 }
3634
3635 if (changed & BSS_CHANGED_IBSS) {
3636 if (bss_conf->ibss_joined) {
3637 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3638 ibss_joined = true;
3639 } else {
3640 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
3641 &wlvif->flags))
3642 wl1271_unjoin(wl, wlvif);
3643 }
3644 }
3645
3646 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3647 do_join = true;
3648
3649 /* Need to update the SSID (for filtering etc) */
3650 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3651 do_join = true;
3652
3653 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3654 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3655 bss_conf->enable_beacon ? "enabled" : "disabled");
3656
3657 do_join = true;
3658 }
3659
3660 if (changed & BSS_CHANGED_IDLE && !is_ibss) {
3661 ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
3662 if (ret < 0)
3663 wl1271_warning("idle mode change failed %d", ret);
3664 }
3665
3666 if ((changed & BSS_CHANGED_CQM)) {
3667 bool enable = false;
3668 if (bss_conf->cqm_rssi_thold)
3669 enable = true;
3670 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3671 bss_conf->cqm_rssi_thold,
3672 bss_conf->cqm_rssi_hyst);
3673 if (ret < 0)
3674 goto out;
3675 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3676 }
3677
3678 if (changed & BSS_CHANGED_BSSID)
3679 if (!is_zero_ether_addr(bss_conf->bssid)) {
3680 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3681 if (ret < 0)
3682 goto out;
3683
3684 ret = wl1271_build_qos_null_data(wl, vif);
3685 if (ret < 0)
3686 goto out;
3687 }
3688
3689 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
3690 rcu_read_lock();
3691 sta = ieee80211_find_sta(vif, bss_conf->bssid);
3692 if (!sta)
3693 goto sta_not_found;
3694
3695 /* save the supp_rates of the ap */
3696 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3697 if (sta->ht_cap.ht_supported)
3698 sta_rate_set |=
3699 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
3700 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
3701 sta_ht_cap = sta->ht_cap;
3702 sta_exists = true;
3703
3704 sta_not_found:
3705 rcu_read_unlock();
3706 }
3707
3708 if ((changed & BSS_CHANGED_ASSOC)) {
3709 if (bss_conf->assoc) {
3710 u32 rates;
3711 int ieoffset;
3712 wlvif->aid = bss_conf->aid;
3713 wlvif->channel_type = bss_conf->channel_type;
3714 wlvif->beacon_int = bss_conf->beacon_int;
3715 do_join = true;
3716 set_assoc = true;
3717
3718 /* Cancel connection_loss_work */
3719 cancel_delayed_work_sync(&wl->connection_loss_work);
3720
3721 /*
3722 * use basic rates from AP, and determine lowest rate
3723 * to use with control frames.
3724 */
3725 rates = bss_conf->basic_rates;
3726 wlvif->basic_rate_set =
3727 wl1271_tx_enabled_rates_get(wl, rates,
3728 wlvif->band);
3729 wlvif->basic_rate =
3730 wl1271_tx_min_rate_get(wl,
3731 wlvif->basic_rate_set);
3732 if (sta_rate_set)
3733 wlvif->rate_set =
3734 wl1271_tx_enabled_rates_get(wl,
3735 sta_rate_set,
3736 wlvif->band);
3737 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3738 if (ret < 0)
3739 goto out;
3740
3741 /*
3742 * with wl1271, we don't need to update the
3743 * beacon_int and dtim_period, because the firmware
3744 * updates it by itself when the first beacon is
3745 * received after a join.
3746 */
3747 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3748 if (ret < 0)
3749 goto out;
3750
3751 /*
3752 * Get a template for hardware connection maintenance
3753 */
3754 dev_kfree_skb(wlvif->probereq);
3755 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3756 wlvif,
3757 NULL);
3758 ieoffset = offsetof(struct ieee80211_mgmt,
3759 u.probe_req.variable);
3760 wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
3761
3762 /* enable the connection monitoring feature */
3763 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3764 if (ret < 0)
3765 goto out;
3766 } else {
3767 /* use defaults when not associated */
3768 bool was_assoc =
3769 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
3770 &wlvif->flags);
3771 bool was_ifup =
3772 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
3773 &wlvif->flags);
3774 wlvif->aid = 0;
3775
3776 /* free probe-request template */
3777 dev_kfree_skb(wlvif->probereq);
3778 wlvif->probereq = NULL;
3779
3780 /* revert back to minimum rates for the current band */
3781 wl1271_set_band_rate(wl, wlvif);
3782 wlvif->basic_rate =
3783 wl1271_tx_min_rate_get(wl,
3784 wlvif->basic_rate_set);
3785 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3786 if (ret < 0)
3787 goto out;
3788
3789 /* disable connection monitor features */
3790 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3791
3792 /* Disable the keep-alive feature */
3793 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3794 if (ret < 0)
3795 goto out;
3796
3797 /* restore the bssid filter and go to dummy bssid */
3798 if (was_assoc) {
3799 /*
3800 * we might have to disable roc, if there was
3801 * no IF_OPER_UP notification.
3802 */
3803 if (!was_ifup) {
3804 ret = wl12xx_croc(wl, wlvif->role_id);
3805 if (ret < 0)
3806 goto out;
3807 }
3808 /*
3809 * (we also need to disable roc in case of
3810 * roaming on the same channel. until we will
3811 * have a better flow...)
3812 */
3813 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
3814 ret = wl12xx_croc(wl,
3815 wlvif->dev_role_id);
3816 if (ret < 0)
3817 goto out;
3818 }
3819
3820 wl1271_unjoin(wl, wlvif);
3821 if (!bss_conf->idle)
3822 wl12xx_start_dev(wl, wlvif);
3823 }
3824 }
3825 }
3826
3827 if (changed & BSS_CHANGED_IBSS) {
3828 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
3829 bss_conf->ibss_joined);
3830
3831 if (bss_conf->ibss_joined) {
3832 u32 rates = bss_conf->basic_rates;
3833 wlvif->basic_rate_set =
3834 wl1271_tx_enabled_rates_get(wl, rates,
3835 wlvif->band);
3836 wlvif->basic_rate =
3837 wl1271_tx_min_rate_get(wl,
3838 wlvif->basic_rate_set);
3839
3840 /* by default, use 11b + OFDM rates */
3841 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
3842 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3843 if (ret < 0)
3844 goto out;
3845 }
3846 }
3847
3848 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3849 if (ret < 0)
3850 goto out;
3851
3852 if (do_join) {
3853 ret = wl1271_join(wl, wlvif, set_assoc);
3854 if (ret < 0) {
3855 wl1271_warning("cmd join failed %d", ret);
3856 goto out;
3857 }
3858
3859 /* ROC until connected (after EAPOL exchange) */
3860 if (!is_ibss) {
3861 ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
3862 if (ret < 0)
3863 goto out;
3864
3865 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
3866 wl12xx_set_authorized(wl, wlvif);
3867 }
3868 /*
3869 * stop device role if started (we might already be in
3870 * STA/IBSS role).
3871 */
3872 if (wl12xx_dev_role_started(wlvif)) {
3873 ret = wl12xx_stop_dev(wl, wlvif);
3874 if (ret < 0)
3875 goto out;
3876 }
3877 }
3878
3879 /* Handle new association with HT. Do this after join. */
3880 if (sta_exists) {
3881 if ((changed & BSS_CHANGED_HT) &&
3882 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3883 ret = wl1271_acx_set_ht_capabilities(wl,
3884 &sta_ht_cap,
3885 true,
3886 wlvif->sta.hlid);
3887 if (ret < 0) {
3888 wl1271_warning("Set ht cap true failed %d",
3889 ret);
3890 goto out;
3891 }
3892 }
3893 /* handle new association without HT and disassociation */
3894 else if (changed & BSS_CHANGED_ASSOC) {
3895 ret = wl1271_acx_set_ht_capabilities(wl,
3896 &sta_ht_cap,
3897 false,
3898 wlvif->sta.hlid);
3899 if (ret < 0) {
3900 wl1271_warning("Set ht cap false failed %d",
3901 ret);
3902 goto out;
3903 }
3904 }
3905 }
3906
3907 /* Handle HT information change. Done after join. */
3908 if ((changed & BSS_CHANGED_HT) &&
3909 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3910 ret = wl1271_acx_set_ht_information(wl, wlvif,
3911 bss_conf->ht_operation_mode);
3912 if (ret < 0) {
3913 wl1271_warning("Set ht information failed %d", ret);
3914 goto out;
3915 }
3916 }
3917
3918 /* Handle arp filtering. Done after join. */
3919 if ((changed & BSS_CHANGED_ARP_FILTER) ||
3920 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
3921 __be32 addr = bss_conf->arp_addr_list[0];
3922 wlvif->sta.qos = bss_conf->qos;
3923 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
3924
3925 if (bss_conf->arp_addr_cnt == 1 &&
3926 bss_conf->arp_filter_enabled) {
3927 wlvif->ip_addr = addr;
3928 /*
3929 * The template should have been configured only upon
3930 * association. however, it seems that the correct ip
3931 * isn't being set (when sending), so we have to
3932 * reconfigure the template upon every ip change.
3933 */
3934 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3935 if (ret < 0) {
3936 wl1271_warning("build arp rsp failed: %d", ret);
3937 goto out;
3938 }
3939
3940 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
3941 (ACX_ARP_FILTER_ARP_FILTERING |
3942 ACX_ARP_FILTER_AUTO_ARP),
3943 addr);
3944 } else {
3945 wlvif->ip_addr = 0;
3946 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
3947 }
3948
3949 if (ret < 0)
3950 goto out;
3951 }
3952
3953 out:
3954 return;
3955 }
3956
3957 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
3958 struct ieee80211_vif *vif,
3959 struct ieee80211_bss_conf *bss_conf,
3960 u32 changed)
3961 {
3962 struct wl1271 *wl = hw->priv;
3963 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3964 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3965 int ret;
3966
3967 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
3968 (int)changed);
3969
3970 mutex_lock(&wl->mutex);
3971
3972 if (unlikely(wl->state == WL1271_STATE_OFF))
3973 goto out;
3974
3975 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
3976 goto out;
3977
3978 ret = wl1271_ps_elp_wakeup(wl);
3979 if (ret < 0)
3980 goto out;
3981
3982 if (is_ap)
3983 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
3984 else
3985 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
3986
3987 wl1271_ps_elp_sleep(wl);
3988
3989 out:
3990 mutex_unlock(&wl->mutex);
3991 }
3992
3993 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
3994 struct ieee80211_vif *vif, u16 queue,
3995 const struct ieee80211_tx_queue_params *params)
3996 {
3997 struct wl1271 *wl = hw->priv;
3998 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3999 u8 ps_scheme;
4000 int ret = 0;
4001
4002 mutex_lock(&wl->mutex);
4003
4004 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4005
4006 if (params->uapsd)
4007 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4008 else
4009 ps_scheme = CONF_PS_SCHEME_LEGACY;
4010
4011 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4012 goto out;
4013
4014 ret = wl1271_ps_elp_wakeup(wl);
4015 if (ret < 0)
4016 goto out;
4017
4018 /*
4019 * the txop is confed in units of 32us by the mac80211,
4020 * we need us
4021 */
4022 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4023 params->cw_min, params->cw_max,
4024 params->aifs, params->txop << 5);
4025 if (ret < 0)
4026 goto out_sleep;
4027
4028 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4029 CONF_CHANNEL_TYPE_EDCF,
4030 wl1271_tx_get_queue(queue),
4031 ps_scheme, CONF_ACK_POLICY_LEGACY,
4032 0, 0);
4033
4034 out_sleep:
4035 wl1271_ps_elp_sleep(wl);
4036
4037 out:
4038 mutex_unlock(&wl->mutex);
4039
4040 return ret;
4041 }
4042
4043 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4044 struct ieee80211_vif *vif)
4045 {
4046
4047 struct wl1271 *wl = hw->priv;
4048 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4049 u64 mactime = ULLONG_MAX;
4050 int ret;
4051
4052 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4053
4054 mutex_lock(&wl->mutex);
4055
4056 if (unlikely(wl->state == WL1271_STATE_OFF))
4057 goto out;
4058
4059 ret = wl1271_ps_elp_wakeup(wl);
4060 if (ret < 0)
4061 goto out;
4062
4063 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4064 if (ret < 0)
4065 goto out_sleep;
4066
4067 out_sleep:
4068 wl1271_ps_elp_sleep(wl);
4069
4070 out:
4071 mutex_unlock(&wl->mutex);
4072 return mactime;
4073 }
4074
4075 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4076 struct survey_info *survey)
4077 {
4078 struct wl1271 *wl = hw->priv;
4079 struct ieee80211_conf *conf = &hw->conf;
4080
4081 if (idx != 0)
4082 return -ENOENT;
4083
4084 survey->channel = conf->channel;
4085 survey->filled = SURVEY_INFO_NOISE_DBM;
4086 survey->noise = wl->noise;
4087
4088 return 0;
4089 }
4090
4091 static int wl1271_allocate_sta(struct wl1271 *wl,
4092 struct wl12xx_vif *wlvif,
4093 struct ieee80211_sta *sta)
4094 {
4095 struct wl1271_station *wl_sta;
4096 int ret;
4097
4098
4099 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4100 wl1271_warning("could not allocate HLID - too much stations");
4101 return -EBUSY;
4102 }
4103
4104 wl_sta = (struct wl1271_station *)sta->drv_priv;
4105 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4106 if (ret < 0) {
4107 wl1271_warning("could not allocate HLID - too many links");
4108 return -EBUSY;
4109 }
4110
4111 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4112 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4113 wl->active_sta_count++;
4114 return 0;
4115 }
4116
4117 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4118 {
4119 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4120 return;
4121
4122 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4123 memset(wl->links[hlid].addr, 0, ETH_ALEN);
4124 wl->links[hlid].ba_bitmap = 0;
4125 __clear_bit(hlid, &wl->ap_ps_map);
4126 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4127 wl12xx_free_link(wl, wlvif, &hlid);
4128 wl->active_sta_count--;
4129
4130 /*
4131 * rearm the tx watchdog when the last STA is freed - give the FW a
4132 * chance to return STA-buffered packets before complaining.
4133 */
4134 if (wl->active_sta_count == 0)
4135 wl12xx_rearm_tx_watchdog_locked(wl);
4136 }
4137
4138 static int wl12xx_sta_add(struct wl1271 *wl,
4139 struct wl12xx_vif *wlvif,
4140 struct ieee80211_sta *sta)
4141 {
4142 struct wl1271_station *wl_sta;
4143 int ret = 0;
4144 u8 hlid;
4145
4146 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4147
4148 ret = wl1271_allocate_sta(wl, wlvif, sta);
4149 if (ret < 0)
4150 return ret;
4151
4152 wl_sta = (struct wl1271_station *)sta->drv_priv;
4153 hlid = wl_sta->hlid;
4154
4155 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4156 if (ret < 0)
4157 wl1271_free_sta(wl, wlvif, hlid);
4158
4159 return ret;
4160 }
4161
4162 static int wl12xx_sta_remove(struct wl1271 *wl,
4163 struct wl12xx_vif *wlvif,
4164 struct ieee80211_sta *sta)
4165 {
4166 struct wl1271_station *wl_sta;
4167 int ret = 0, id;
4168
4169 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4170
4171 wl_sta = (struct wl1271_station *)sta->drv_priv;
4172 id = wl_sta->hlid;
4173 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4174 return -EINVAL;
4175
4176 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4177 if (ret < 0)
4178 return ret;
4179
4180 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4181 return ret;
4182 }
4183
4184 static int wl12xx_update_sta_state(struct wl1271 *wl,
4185 struct wl12xx_vif *wlvif,
4186 struct ieee80211_sta *sta,
4187 enum ieee80211_sta_state old_state,
4188 enum ieee80211_sta_state new_state)
4189 {
4190 struct wl1271_station *wl_sta;
4191 u8 hlid;
4192 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4193 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4194 int ret;
4195
4196 wl_sta = (struct wl1271_station *)sta->drv_priv;
4197 hlid = wl_sta->hlid;
4198
4199 /* Add station (AP mode) */
4200 if (is_ap &&
4201 old_state == IEEE80211_STA_NOTEXIST &&
4202 new_state == IEEE80211_STA_NONE)
4203 return wl12xx_sta_add(wl, wlvif, sta);
4204
4205 /* Remove station (AP mode) */
4206 if (is_ap &&
4207 old_state == IEEE80211_STA_NONE &&
4208 new_state == IEEE80211_STA_NOTEXIST) {
4209 /* must not fail */
4210 wl12xx_sta_remove(wl, wlvif, sta);
4211 return 0;
4212 }
4213
4214 /* Authorize station (AP mode) */
4215 if (is_ap &&
4216 new_state == IEEE80211_STA_AUTHORIZED) {
4217 ret = wl12xx_cmd_set_peer_state(wl, hlid);
4218 if (ret < 0)
4219 return ret;
4220
4221 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4222 hlid);
4223 return ret;
4224 }
4225
4226 /* Authorize station */
4227 if (is_sta &&
4228 new_state == IEEE80211_STA_AUTHORIZED) {
4229 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4230 return wl12xx_set_authorized(wl, wlvif);
4231 }
4232
4233 if (is_sta &&
4234 old_state == IEEE80211_STA_AUTHORIZED &&
4235 new_state == IEEE80211_STA_ASSOC) {
4236 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4237 return 0;
4238 }
4239
4240 return 0;
4241 }
4242
4243 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4244 struct ieee80211_vif *vif,
4245 struct ieee80211_sta *sta,
4246 enum ieee80211_sta_state old_state,
4247 enum ieee80211_sta_state new_state)
4248 {
4249 struct wl1271 *wl = hw->priv;
4250 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4251 int ret;
4252
4253 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4254 sta->aid, old_state, new_state);
4255
4256 mutex_lock(&wl->mutex);
4257
4258 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4259 ret = -EBUSY;
4260 goto out;
4261 }
4262
4263 ret = wl1271_ps_elp_wakeup(wl);
4264 if (ret < 0)
4265 goto out;
4266
4267 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4268
4269 wl1271_ps_elp_sleep(wl);
4270 out:
4271 mutex_unlock(&wl->mutex);
4272 if (new_state < old_state)
4273 return 0;
4274 return ret;
4275 }
4276
4277 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4278 struct ieee80211_vif *vif,
4279 enum ieee80211_ampdu_mlme_action action,
4280 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4281 u8 buf_size)
4282 {
4283 struct wl1271 *wl = hw->priv;
4284 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4285 int ret;
4286 u8 hlid, *ba_bitmap;
4287
4288 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4289 tid);
4290
4291 /* sanity check - the fields in FW are only 8bits wide */
4292 if (WARN_ON(tid > 0xFF))
4293 return -ENOTSUPP;
4294
4295 mutex_lock(&wl->mutex);
4296
4297 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4298 ret = -EAGAIN;
4299 goto out;
4300 }
4301
4302 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4303 hlid = wlvif->sta.hlid;
4304 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4305 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4306 struct wl1271_station *wl_sta;
4307
4308 wl_sta = (struct wl1271_station *)sta->drv_priv;
4309 hlid = wl_sta->hlid;
4310 ba_bitmap = &wl->links[hlid].ba_bitmap;
4311 } else {
4312 ret = -EINVAL;
4313 goto out;
4314 }
4315
4316 ret = wl1271_ps_elp_wakeup(wl);
4317 if (ret < 0)
4318 goto out;
4319
4320 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4321 tid, action);
4322
4323 switch (action) {
4324 case IEEE80211_AMPDU_RX_START:
4325 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4326 ret = -ENOTSUPP;
4327 break;
4328 }
4329
4330 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4331 ret = -EBUSY;
4332 wl1271_error("exceeded max RX BA sessions");
4333 break;
4334 }
4335
4336 if (*ba_bitmap & BIT(tid)) {
4337 ret = -EINVAL;
4338 wl1271_error("cannot enable RX BA session on active "
4339 "tid: %d", tid);
4340 break;
4341 }
4342
4343 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4344 hlid);
4345 if (!ret) {
4346 *ba_bitmap |= BIT(tid);
4347 wl->ba_rx_session_count++;
4348 }
4349 break;
4350
4351 case IEEE80211_AMPDU_RX_STOP:
4352 if (!(*ba_bitmap & BIT(tid))) {
4353 ret = -EINVAL;
4354 wl1271_error("no active RX BA session on tid: %d",
4355 tid);
4356 break;
4357 }
4358
4359 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4360 hlid);
4361 if (!ret) {
4362 *ba_bitmap &= ~BIT(tid);
4363 wl->ba_rx_session_count--;
4364 }
4365 break;
4366
4367 /*
4368 * The BA initiator session management in FW independently.
4369 * Falling break here on purpose for all TX APDU commands.
4370 */
4371 case IEEE80211_AMPDU_TX_START:
4372 case IEEE80211_AMPDU_TX_STOP:
4373 case IEEE80211_AMPDU_TX_OPERATIONAL:
4374 ret = -EINVAL;
4375 break;
4376
4377 default:
4378 wl1271_error("Incorrect ampdu action id=%x\n", action);
4379 ret = -EINVAL;
4380 }
4381
4382 wl1271_ps_elp_sleep(wl);
4383
4384 out:
4385 mutex_unlock(&wl->mutex);
4386
4387 return ret;
4388 }
4389
4390 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4391 struct ieee80211_vif *vif,
4392 const struct cfg80211_bitrate_mask *mask)
4393 {
4394 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4395 struct wl1271 *wl = hw->priv;
4396 int i, ret = 0;
4397
4398 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4399 mask->control[NL80211_BAND_2GHZ].legacy,
4400 mask->control[NL80211_BAND_5GHZ].legacy);
4401
4402 mutex_lock(&wl->mutex);
4403
4404 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
4405 wlvif->bitrate_masks[i] =
4406 wl1271_tx_enabled_rates_get(wl,
4407 mask->control[i].legacy,
4408 i);
4409
4410 if (unlikely(wl->state == WL1271_STATE_OFF))
4411 goto out;
4412
4413 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4414 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4415
4416 ret = wl1271_ps_elp_wakeup(wl);
4417 if (ret < 0)
4418 goto out;
4419
4420 wl1271_set_band_rate(wl, wlvif);
4421 wlvif->basic_rate =
4422 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4423 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4424
4425 wl1271_ps_elp_sleep(wl);
4426 }
4427 out:
4428 mutex_unlock(&wl->mutex);
4429
4430 return ret;
4431 }
4432
4433 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4434 struct ieee80211_channel_switch *ch_switch)
4435 {
4436 struct wl1271 *wl = hw->priv;
4437 struct wl12xx_vif *wlvif;
4438 int ret;
4439
4440 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4441
4442 wl1271_tx_flush(wl);
4443
4444 mutex_lock(&wl->mutex);
4445
4446 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4447 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4448 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4449 ieee80211_chswitch_done(vif, false);
4450 }
4451 goto out;
4452 }
4453
4454 ret = wl1271_ps_elp_wakeup(wl);
4455 if (ret < 0)
4456 goto out;
4457
4458 /* TODO: change mac80211 to pass vif as param */
4459 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4460 ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
4461
4462 if (!ret)
4463 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4464 }
4465
4466 wl1271_ps_elp_sleep(wl);
4467
4468 out:
4469 mutex_unlock(&wl->mutex);
4470 }
4471
4472 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4473 {
4474 struct wl1271 *wl = hw->priv;
4475 bool ret = false;
4476
4477 mutex_lock(&wl->mutex);
4478
4479 if (unlikely(wl->state == WL1271_STATE_OFF))
4480 goto out;
4481
4482 /* packets are considered pending if in the TX queue or the FW */
4483 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4484 out:
4485 mutex_unlock(&wl->mutex);
4486
4487 return ret;
4488 }
4489
4490 /* can't be const, mac80211 writes to this */
4491 static struct ieee80211_rate wl1271_rates[] = {
4492 { .bitrate = 10,
4493 .hw_value = CONF_HW_BIT_RATE_1MBPS,
4494 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
4495 { .bitrate = 20,
4496 .hw_value = CONF_HW_BIT_RATE_2MBPS,
4497 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
4498 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4499 { .bitrate = 55,
4500 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
4501 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
4502 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4503 { .bitrate = 110,
4504 .hw_value = CONF_HW_BIT_RATE_11MBPS,
4505 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
4506 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4507 { .bitrate = 60,
4508 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4509 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4510 { .bitrate = 90,
4511 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4512 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4513 { .bitrate = 120,
4514 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4515 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4516 { .bitrate = 180,
4517 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4518 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4519 { .bitrate = 240,
4520 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4521 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4522 { .bitrate = 360,
4523 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4524 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4525 { .bitrate = 480,
4526 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4527 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4528 { .bitrate = 540,
4529 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4530 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4531 };
4532
4533 /* can't be const, mac80211 writes to this */
4534 static struct ieee80211_channel wl1271_channels[] = {
4535 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
4536 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
4537 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
4538 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
4539 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
4540 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
4541 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
4542 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
4543 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
4544 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
4545 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
4546 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
4547 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
4548 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4549 };
4550
4551 /* can't be const, mac80211 writes to this */
4552 static struct ieee80211_supported_band wl1271_band_2ghz = {
4553 .channels = wl1271_channels,
4554 .n_channels = ARRAY_SIZE(wl1271_channels),
4555 .bitrates = wl1271_rates,
4556 .n_bitrates = ARRAY_SIZE(wl1271_rates),
4557 };
4558
4559 /* 5 GHz data rates for WL1273 */
4560 static struct ieee80211_rate wl1271_rates_5ghz[] = {
4561 { .bitrate = 60,
4562 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4563 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4564 { .bitrate = 90,
4565 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4566 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4567 { .bitrate = 120,
4568 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4569 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4570 { .bitrate = 180,
4571 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4572 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4573 { .bitrate = 240,
4574 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4575 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4576 { .bitrate = 360,
4577 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4578 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4579 { .bitrate = 480,
4580 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4581 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4582 { .bitrate = 540,
4583 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4584 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4585 };
4586
4587 /* 5 GHz band channels for WL1273 */
4588 static struct ieee80211_channel wl1271_channels_5ghz[] = {
4589 { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
4590 { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
4591 { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
4592 { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
4593 { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
4594 { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
4595 { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
4596 { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
4597 { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
4598 { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
4599 { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
4600 { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
4601 { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
4602 { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
4603 { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
4604 { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
4605 { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
4606 { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
4607 { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
4608 { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
4609 { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
4610 { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
4611 { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
4612 { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
4613 { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
4614 { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
4615 { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
4616 { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
4617 { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
4618 { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
4619 { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
4620 { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
4621 { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
4622 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4623 };
4624
4625 static struct ieee80211_supported_band wl1271_band_5ghz = {
4626 .channels = wl1271_channels_5ghz,
4627 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4628 .bitrates = wl1271_rates_5ghz,
4629 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4630 };
4631
4632 static const struct ieee80211_ops wl1271_ops = {
4633 .start = wl1271_op_start,
4634 .stop = wl1271_op_stop,
4635 .add_interface = wl1271_op_add_interface,
4636 .remove_interface = wl1271_op_remove_interface,
4637 .change_interface = wl12xx_op_change_interface,
4638 #ifdef CONFIG_PM
4639 .suspend = wl1271_op_suspend,
4640 .resume = wl1271_op_resume,
4641 #endif
4642 .config = wl1271_op_config,
4643 .prepare_multicast = wl1271_op_prepare_multicast,
4644 .configure_filter = wl1271_op_configure_filter,
4645 .tx = wl1271_op_tx,
4646 .set_key = wl1271_op_set_key,
4647 .hw_scan = wl1271_op_hw_scan,
4648 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
4649 .sched_scan_start = wl1271_op_sched_scan_start,
4650 .sched_scan_stop = wl1271_op_sched_scan_stop,
4651 .bss_info_changed = wl1271_op_bss_info_changed,
4652 .set_frag_threshold = wl1271_op_set_frag_threshold,
4653 .set_rts_threshold = wl1271_op_set_rts_threshold,
4654 .conf_tx = wl1271_op_conf_tx,
4655 .get_tsf = wl1271_op_get_tsf,
4656 .get_survey = wl1271_op_get_survey,
4657 .sta_state = wl12xx_op_sta_state,
4658 .ampdu_action = wl1271_op_ampdu_action,
4659 .tx_frames_pending = wl1271_tx_frames_pending,
4660 .set_bitrate_mask = wl12xx_set_bitrate_mask,
4661 .channel_switch = wl12xx_op_channel_switch,
4662 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
4663 };
4664
4665
4666 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
4667 {
4668 u8 idx;
4669
4670 BUG_ON(band >= 2);
4671
4672 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
4673 wl1271_error("Illegal RX rate from HW: %d", rate);
4674 return 0;
4675 }
4676
4677 idx = wl->band_rate_to_idx[band][rate];
4678 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
4679 wl1271_error("Unsupported RX rate from HW: %d", rate);
4680 return 0;
4681 }
4682
4683 return idx;
4684 }
4685
4686 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
4687 struct device_attribute *attr,
4688 char *buf)
4689 {
4690 struct wl1271 *wl = dev_get_drvdata(dev);
4691 ssize_t len;
4692
4693 len = PAGE_SIZE;
4694
4695 mutex_lock(&wl->mutex);
4696 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
4697 wl->sg_enabled);
4698 mutex_unlock(&wl->mutex);
4699
4700 return len;
4701
4702 }
4703
4704 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
4705 struct device_attribute *attr,
4706 const char *buf, size_t count)
4707 {
4708 struct wl1271 *wl = dev_get_drvdata(dev);
4709 unsigned long res;
4710 int ret;
4711
4712 ret = kstrtoul(buf, 10, &res);
4713 if (ret < 0) {
4714 wl1271_warning("incorrect value written to bt_coex_mode");
4715 return count;
4716 }
4717
4718 mutex_lock(&wl->mutex);
4719
4720 res = !!res;
4721
4722 if (res == wl->sg_enabled)
4723 goto out;
4724
4725 wl->sg_enabled = res;
4726
4727 if (wl->state == WL1271_STATE_OFF)
4728 goto out;
4729
4730 ret = wl1271_ps_elp_wakeup(wl);
4731 if (ret < 0)
4732 goto out;
4733
4734 wl1271_acx_sg_enable(wl, wl->sg_enabled);
4735 wl1271_ps_elp_sleep(wl);
4736
4737 out:
4738 mutex_unlock(&wl->mutex);
4739 return count;
4740 }
4741
4742 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
4743 wl1271_sysfs_show_bt_coex_state,
4744 wl1271_sysfs_store_bt_coex_state);
4745
4746 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
4747 struct device_attribute *attr,
4748 char *buf)
4749 {
4750 struct wl1271 *wl = dev_get_drvdata(dev);
4751 ssize_t len;
4752
4753 len = PAGE_SIZE;
4754
4755 mutex_lock(&wl->mutex);
4756 if (wl->hw_pg_ver >= 0)
4757 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
4758 else
4759 len = snprintf(buf, len, "n/a\n");
4760 mutex_unlock(&wl->mutex);
4761
4762 return len;
4763 }
4764
4765 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
4766 wl1271_sysfs_show_hw_pg_ver, NULL);
4767
4768 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
4769 struct bin_attribute *bin_attr,
4770 char *buffer, loff_t pos, size_t count)
4771 {
4772 struct device *dev = container_of(kobj, struct device, kobj);
4773 struct wl1271 *wl = dev_get_drvdata(dev);
4774 ssize_t len;
4775 int ret;
4776
4777 ret = mutex_lock_interruptible(&wl->mutex);
4778 if (ret < 0)
4779 return -ERESTARTSYS;
4780
4781 /* Let only one thread read the log at a time, blocking others */
4782 while (wl->fwlog_size == 0) {
4783 DEFINE_WAIT(wait);
4784
4785 prepare_to_wait_exclusive(&wl->fwlog_waitq,
4786 &wait,
4787 TASK_INTERRUPTIBLE);
4788
4789 if (wl->fwlog_size != 0) {
4790 finish_wait(&wl->fwlog_waitq, &wait);
4791 break;
4792 }
4793
4794 mutex_unlock(&wl->mutex);
4795
4796 schedule();
4797 finish_wait(&wl->fwlog_waitq, &wait);
4798
4799 if (signal_pending(current))
4800 return -ERESTARTSYS;
4801
4802 ret = mutex_lock_interruptible(&wl->mutex);
4803 if (ret < 0)
4804 return -ERESTARTSYS;
4805 }
4806
4807 /* Check if the fwlog is still valid */
4808 if (wl->fwlog_size < 0) {
4809 mutex_unlock(&wl->mutex);
4810 return 0;
4811 }
4812
4813 /* Seeking is not supported - old logs are not kept. Disregard pos. */
4814 len = min(count, (size_t)wl->fwlog_size);
4815 wl->fwlog_size -= len;
4816 memcpy(buffer, wl->fwlog, len);
4817
4818 /* Make room for new messages */
4819 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
4820
4821 mutex_unlock(&wl->mutex);
4822
4823 return len;
4824 }
4825
4826 static struct bin_attribute fwlog_attr = {
4827 .attr = {.name = "fwlog", .mode = S_IRUSR},
4828 .read = wl1271_sysfs_read_fwlog,
4829 };
4830
4831 static void wl1271_connection_loss_work(struct work_struct *work)
4832 {
4833 struct delayed_work *dwork;
4834 struct wl1271 *wl;
4835 struct ieee80211_vif *vif;
4836 struct wl12xx_vif *wlvif;
4837
4838 dwork = container_of(work, struct delayed_work, work);
4839 wl = container_of(dwork, struct wl1271, connection_loss_work);
4840
4841 wl1271_info("Connection loss work.");
4842
4843 mutex_lock(&wl->mutex);
4844
4845 if (unlikely(wl->state == WL1271_STATE_OFF))
4846 goto out;
4847
4848 /* Call mac80211 connection loss */
4849 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4850 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
4851 goto out;
4852 vif = wl12xx_wlvif_to_vif(wlvif);
4853 ieee80211_connection_loss(vif);
4854 }
4855 out:
4856 mutex_unlock(&wl->mutex);
4857 }
4858
4859 static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
4860 u32 oui, u32 nic, int n)
4861 {
4862 int i;
4863
4864 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
4865 oui, nic, n);
4866
4867 if (nic + n - 1 > 0xffffff)
4868 wl1271_warning("NIC part of the MAC address wraps around!");
4869
4870 for (i = 0; i < n; i++) {
4871 wl->addresses[i].addr[0] = (u8)(oui >> 16);
4872 wl->addresses[i].addr[1] = (u8)(oui >> 8);
4873 wl->addresses[i].addr[2] = (u8) oui;
4874 wl->addresses[i].addr[3] = (u8)(nic >> 16);
4875 wl->addresses[i].addr[4] = (u8)(nic >> 8);
4876 wl->addresses[i].addr[5] = (u8) nic;
4877 nic++;
4878 }
4879
4880 wl->hw->wiphy->n_addresses = n;
4881 wl->hw->wiphy->addresses = wl->addresses;
4882 }
4883
4884 static int wl12xx_get_hw_info(struct wl1271 *wl)
4885 {
4886 int ret;
4887
4888 ret = wl12xx_set_power_on(wl);
4889 if (ret < 0)
4890 goto out;
4891
4892 wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B);
4893
4894 wl->fuse_oui_addr = 0;
4895 wl->fuse_nic_addr = 0;
4896
4897 wl->hw_pg_ver = wl->ops->get_pg_ver(wl);
4898
4899 if (wl->ops->get_mac)
4900 wl->ops->get_mac(wl);
4901
4902 wl1271_power_off(wl);
4903 out:
4904 return ret;
4905 }
4906
4907 static int wl1271_register_hw(struct wl1271 *wl)
4908 {
4909 int ret;
4910 u32 oui_addr = 0, nic_addr = 0;
4911
4912 if (wl->mac80211_registered)
4913 return 0;
4914
4915 ret = wl12xx_get_hw_info(wl);
4916 if (ret < 0) {
4917 wl1271_error("couldn't get hw info");
4918 goto out;
4919 }
4920
4921 ret = wl1271_fetch_nvs(wl);
4922 if (ret == 0) {
4923 /* NOTE: The wl->nvs->nvs element must be first, in
4924 * order to simplify the casting, we assume it is at
4925 * the beginning of the wl->nvs structure.
4926 */
4927 u8 *nvs_ptr = (u8 *)wl->nvs;
4928
4929 oui_addr =
4930 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
4931 nic_addr =
4932 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
4933 }
4934
4935 /* if the MAC address is zeroed in the NVS derive from fuse */
4936 if (oui_addr == 0 && nic_addr == 0) {
4937 oui_addr = wl->fuse_oui_addr;
4938 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
4939 nic_addr = wl->fuse_nic_addr + 1;
4940 }
4941
4942 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
4943
4944 ret = ieee80211_register_hw(wl->hw);
4945 if (ret < 0) {
4946 wl1271_error("unable to register mac80211 hw: %d", ret);
4947 goto out;
4948 }
4949
4950 wl->mac80211_registered = true;
4951
4952 wl1271_debugfs_init(wl);
4953
4954 wl1271_notice("loaded");
4955
4956 out:
4957 return ret;
4958 }
4959
4960 static void wl1271_unregister_hw(struct wl1271 *wl)
4961 {
4962 if (wl->plt)
4963 wl1271_plt_stop(wl);
4964
4965 ieee80211_unregister_hw(wl->hw);
4966 wl->mac80211_registered = false;
4967
4968 }
4969
4970 static int wl1271_init_ieee80211(struct wl1271 *wl)
4971 {
4972 static const u32 cipher_suites[] = {
4973 WLAN_CIPHER_SUITE_WEP40,
4974 WLAN_CIPHER_SUITE_WEP104,
4975 WLAN_CIPHER_SUITE_TKIP,
4976 WLAN_CIPHER_SUITE_CCMP,
4977 WL1271_CIPHER_SUITE_GEM,
4978 };
4979
4980 /* The tx descriptor buffer and the TKIP space. */
4981 wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP +
4982 sizeof(struct wl1271_tx_hw_descr);
4983
4984 /* unit us */
4985 /* FIXME: find a proper value */
4986 wl->hw->channel_change_time = 10000;
4987 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
4988
4989 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
4990 IEEE80211_HW_SUPPORTS_PS |
4991 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
4992 IEEE80211_HW_SUPPORTS_UAPSD |
4993 IEEE80211_HW_HAS_RATE_CONTROL |
4994 IEEE80211_HW_CONNECTION_MONITOR |
4995 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
4996 IEEE80211_HW_SPECTRUM_MGMT |
4997 IEEE80211_HW_AP_LINK_PS |
4998 IEEE80211_HW_AMPDU_AGGREGATION |
4999 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5000 IEEE80211_HW_SCAN_WHILE_IDLE;
5001
5002 wl->hw->wiphy->cipher_suites = cipher_suites;
5003 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5004
5005 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5006 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5007 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5008 wl->hw->wiphy->max_scan_ssids = 1;
5009 wl->hw->wiphy->max_sched_scan_ssids = 16;
5010 wl->hw->wiphy->max_match_sets = 16;
5011 /*
5012 * Maximum length of elements in scanning probe request templates
5013 * should be the maximum length possible for a template, without
5014 * the IEEE80211 header of the template
5015 */
5016 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5017 sizeof(struct ieee80211_header);
5018
5019 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5020 sizeof(struct ieee80211_header);
5021
5022 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5023 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5024
5025 /* make sure all our channels fit in the scanned_ch bitmask */
5026 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5027 ARRAY_SIZE(wl1271_channels_5ghz) >
5028 WL1271_MAX_CHANNELS);
5029 /*
5030 * We keep local copies of the band structs because we need to
5031 * modify them on a per-device basis.
5032 */
5033 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5034 sizeof(wl1271_band_2ghz));
5035 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap,
5036 sizeof(wl->ht_cap));
5037 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5038 sizeof(wl1271_band_5ghz));
5039 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap,
5040 sizeof(wl->ht_cap));
5041
5042 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5043 &wl->bands[IEEE80211_BAND_2GHZ];
5044 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5045 &wl->bands[IEEE80211_BAND_5GHZ];
5046
5047 wl->hw->queues = 4;
5048 wl->hw->max_rates = 1;
5049
5050 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5051
5052 /* the FW answers probe-requests in AP-mode */
5053 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5054 wl->hw->wiphy->probe_resp_offload =
5055 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5056 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5057 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5058
5059 SET_IEEE80211_DEV(wl->hw, wl->dev);
5060
5061 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5062 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5063
5064 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5065
5066 return 0;
5067 }
5068
5069 #define WL1271_DEFAULT_CHANNEL 0
5070
5071 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5072 {
5073 struct ieee80211_hw *hw;
5074 struct wl1271 *wl;
5075 int i, j, ret;
5076 unsigned int order;
5077
5078 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5079
5080 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5081 if (!hw) {
5082 wl1271_error("could not alloc ieee80211_hw");
5083 ret = -ENOMEM;
5084 goto err_hw_alloc;
5085 }
5086
5087 wl = hw->priv;
5088 memset(wl, 0, sizeof(*wl));
5089
5090 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5091 if (!wl->priv) {
5092 wl1271_error("could not alloc wl priv");
5093 ret = -ENOMEM;
5094 goto err_priv_alloc;
5095 }
5096
5097 INIT_LIST_HEAD(&wl->wlvif_list);
5098
5099 wl->hw = hw;
5100
5101 for (i = 0; i < NUM_TX_QUEUES; i++)
5102 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5103 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5104
5105 skb_queue_head_init(&wl->deferred_rx_queue);
5106 skb_queue_head_init(&wl->deferred_tx_queue);
5107
5108 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5109 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5110 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5111 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5112 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5113 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5114 INIT_DELAYED_WORK(&wl->connection_loss_work,
5115 wl1271_connection_loss_work);
5116
5117 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5118 if (!wl->freezable_wq) {
5119 ret = -ENOMEM;
5120 goto err_hw;
5121 }
5122
5123 wl->channel = WL1271_DEFAULT_CHANNEL;
5124 wl->rx_counter = 0;
5125 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5126 wl->band = IEEE80211_BAND_2GHZ;
5127 wl->channel_type = NL80211_CHAN_NO_HT;
5128 wl->flags = 0;
5129 wl->sg_enabled = true;
5130 wl->hw_pg_ver = -1;
5131 wl->ap_ps_map = 0;
5132 wl->ap_fw_ps_map = 0;
5133 wl->quirks = 0;
5134 wl->platform_quirks = 0;
5135 wl->sched_scanning = false;
5136 wl->system_hlid = WL12XX_SYSTEM_HLID;
5137 wl->active_sta_count = 0;
5138 wl->fwlog_size = 0;
5139 init_waitqueue_head(&wl->fwlog_waitq);
5140
5141 /* The system link is always allocated */
5142 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5143
5144 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5145 for (i = 0; i < wl->num_tx_desc; i++)
5146 wl->tx_frames[i] = NULL;
5147
5148 spin_lock_init(&wl->wl_lock);
5149
5150 wl->state = WL1271_STATE_OFF;
5151 wl->fw_type = WL12XX_FW_TYPE_NONE;
5152 mutex_init(&wl->mutex);
5153
5154 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5155 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5156 if (!wl->aggr_buf) {
5157 ret = -ENOMEM;
5158 goto err_wq;
5159 }
5160
5161 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5162 if (!wl->dummy_packet) {
5163 ret = -ENOMEM;
5164 goto err_aggr;
5165 }
5166
5167 /* Allocate one page for the FW log */
5168 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5169 if (!wl->fwlog) {
5170 ret = -ENOMEM;
5171 goto err_dummy_packet;
5172 }
5173
5174 wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
5175 if (!wl->mbox) {
5176 ret = -ENOMEM;
5177 goto err_fwlog;
5178 }
5179
5180 return hw;
5181
5182 err_fwlog:
5183 free_page((unsigned long)wl->fwlog);
5184
5185 err_dummy_packet:
5186 dev_kfree_skb(wl->dummy_packet);
5187
5188 err_aggr:
5189 free_pages((unsigned long)wl->aggr_buf, order);
5190
5191 err_wq:
5192 destroy_workqueue(wl->freezable_wq);
5193
5194 err_hw:
5195 wl1271_debugfs_exit(wl);
5196 kfree(wl->priv);
5197
5198 err_priv_alloc:
5199 ieee80211_free_hw(hw);
5200
5201 err_hw_alloc:
5202
5203 return ERR_PTR(ret);
5204 }
5205 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5206
5207 int wlcore_free_hw(struct wl1271 *wl)
5208 {
5209 /* Unblock any fwlog readers */
5210 mutex_lock(&wl->mutex);
5211 wl->fwlog_size = -1;
5212 wake_up_interruptible_all(&wl->fwlog_waitq);
5213 mutex_unlock(&wl->mutex);
5214
5215 device_remove_bin_file(wl->dev, &fwlog_attr);
5216
5217 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5218
5219 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5220 free_page((unsigned long)wl->fwlog);
5221 dev_kfree_skb(wl->dummy_packet);
5222 free_pages((unsigned long)wl->aggr_buf,
5223 get_order(WL1271_AGGR_BUFFER_SIZE));
5224
5225 wl1271_debugfs_exit(wl);
5226
5227 vfree(wl->fw);
5228 wl->fw = NULL;
5229 wl->fw_type = WL12XX_FW_TYPE_NONE;
5230 kfree(wl->nvs);
5231 wl->nvs = NULL;
5232
5233 kfree(wl->fw_status);
5234 kfree(wl->tx_res_if);
5235 destroy_workqueue(wl->freezable_wq);
5236
5237 kfree(wl->priv);
5238 ieee80211_free_hw(wl->hw);
5239
5240 return 0;
5241 }
5242 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5243
5244 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5245 {
5246 struct wl1271 *wl = cookie;
5247 unsigned long flags;
5248
5249 wl1271_debug(DEBUG_IRQ, "IRQ");
5250
5251 /* complete the ELP completion */
5252 spin_lock_irqsave(&wl->wl_lock, flags);
5253 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5254 if (wl->elp_compl) {
5255 complete(wl->elp_compl);
5256 wl->elp_compl = NULL;
5257 }
5258
5259 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5260 /* don't enqueue a work right now. mark it as pending */
5261 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5262 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5263 disable_irq_nosync(wl->irq);
5264 pm_wakeup_event(wl->dev, 0);
5265 spin_unlock_irqrestore(&wl->wl_lock, flags);
5266 return IRQ_HANDLED;
5267 }
5268 spin_unlock_irqrestore(&wl->wl_lock, flags);
5269
5270 return IRQ_WAKE_THREAD;
5271 }
5272
5273 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5274 {
5275 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5276 unsigned long irqflags;
5277 int ret;
5278
5279 if (!wl->ops || !wl->ptable) {
5280 ret = -EINVAL;
5281 goto out_free_hw;
5282 }
5283
5284 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5285
5286 /* adjust some runtime configuration parameters */
5287 wlcore_adjust_conf(wl);
5288
5289 wl->irq = platform_get_irq(pdev, 0);
5290 wl->ref_clock = pdata->board_ref_clock;
5291 wl->tcxo_clock = pdata->board_tcxo_clock;
5292 wl->platform_quirks = pdata->platform_quirks;
5293 wl->set_power = pdata->set_power;
5294 wl->dev = &pdev->dev;
5295 wl->if_ops = pdata->ops;
5296
5297 platform_set_drvdata(pdev, wl);
5298
5299 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5300 irqflags = IRQF_TRIGGER_RISING;
5301 else
5302 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5303
5304 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
5305 irqflags,
5306 pdev->name, wl);
5307 if (ret < 0) {
5308 wl1271_error("request_irq() failed: %d", ret);
5309 goto out_free_hw;
5310 }
5311
5312 ret = enable_irq_wake(wl->irq);
5313 if (!ret) {
5314 wl->irq_wake_enabled = true;
5315 device_init_wakeup(wl->dev, 1);
5316 if (pdata->pwr_in_suspend) {
5317 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5318 wl->hw->wiphy->wowlan.n_patterns =
5319 WL1271_MAX_RX_FILTERS;
5320 wl->hw->wiphy->wowlan.pattern_min_len = 1;
5321 wl->hw->wiphy->wowlan.pattern_max_len =
5322 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5323 }
5324 }
5325 disable_irq(wl->irq);
5326
5327 ret = wl1271_init_ieee80211(wl);
5328 if (ret)
5329 goto out_irq;
5330
5331 ret = wl1271_register_hw(wl);
5332 if (ret)
5333 goto out_irq;
5334
5335 /* Create sysfs file to control bt coex state */
5336 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5337 if (ret < 0) {
5338 wl1271_error("failed to create sysfs file bt_coex_state");
5339 goto out_irq;
5340 }
5341
5342 /* Create sysfs file to get HW PG version */
5343 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5344 if (ret < 0) {
5345 wl1271_error("failed to create sysfs file hw_pg_ver");
5346 goto out_bt_coex_state;
5347 }
5348
5349 /* Create sysfs file for the FW log */
5350 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5351 if (ret < 0) {
5352 wl1271_error("failed to create sysfs file fwlog");
5353 goto out_hw_pg_ver;
5354 }
5355
5356 goto out;
5357
5358 out_hw_pg_ver:
5359 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5360
5361 out_bt_coex_state:
5362 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5363
5364 out_irq:
5365 free_irq(wl->irq, wl);
5366
5367 out_free_hw:
5368 wlcore_free_hw(wl);
5369
5370 out:
5371 return ret;
5372 }
5373 EXPORT_SYMBOL_GPL(wlcore_probe);
5374
5375 int __devexit wlcore_remove(struct platform_device *pdev)
5376 {
5377 struct wl1271 *wl = platform_get_drvdata(pdev);
5378
5379 if (wl->irq_wake_enabled) {
5380 device_init_wakeup(wl->dev, 0);
5381 disable_irq_wake(wl->irq);
5382 }
5383 wl1271_unregister_hw(wl);
5384 free_irq(wl->irq, wl);
5385 wlcore_free_hw(wl);
5386
5387 return 0;
5388 }
5389 EXPORT_SYMBOL_GPL(wlcore_remove);
5390
5391 u32 wl12xx_debug_level = DEBUG_NONE;
5392 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
5393 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
5394 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
5395
5396 module_param_named(fwlog, fwlog_param, charp, 0);
5397 MODULE_PARM_DESC(fwlog,
5398 "FW logger options: continuous, ondemand, dbgpins or disable");
5399
5400 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5401 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5402
5403 module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5404 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5405
5406 MODULE_LICENSE("GPL");
5407 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5408 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
This page took 0.205919 seconds and 6 git commands to generate.