netdev: Add netdev->select_queue() method.
[deliverable/linux.git] / net / mac80211 / main.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <net/mac80211.h>
12 #include <net/ieee80211_radiotap.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/netdevice.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/etherdevice.h>
20 #include <linux/if_arp.h>
21 #include <linux/wireless.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/bitmap.h>
24 #include <net/net_namespace.h>
25 #include <net/cfg80211.h>
26
27 #include "ieee80211_i.h"
28 #include "rate.h"
29 #include "mesh.h"
30 #include "wep.h"
31 #include "wme.h"
32 #include "aes_ccm.h"
33 #include "led.h"
34 #include "cfg.h"
35 #include "debugfs.h"
36 #include "debugfs_netdev.h"
37
38 /*
39 * For seeing transmitted packets on monitor interfaces
40 * we have a radiotap header too.
41 */
42 struct ieee80211_tx_status_rtap_hdr {
43 struct ieee80211_radiotap_header hdr;
44 __le16 tx_flags;
45 u8 data_retries;
46 } __attribute__ ((packed));
47
48 /* common interface routines */
49
50 static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
51 {
52 memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
53 return ETH_ALEN;
54 }
55
56 /* must be called under mdev tx lock */
57 static void ieee80211_configure_filter(struct ieee80211_local *local)
58 {
59 unsigned int changed_flags;
60 unsigned int new_flags = 0;
61
62 if (atomic_read(&local->iff_promiscs))
63 new_flags |= FIF_PROMISC_IN_BSS;
64
65 if (atomic_read(&local->iff_allmultis))
66 new_flags |= FIF_ALLMULTI;
67
68 if (local->monitors)
69 new_flags |= FIF_BCN_PRBRESP_PROMISC;
70
71 if (local->fif_fcsfail)
72 new_flags |= FIF_FCSFAIL;
73
74 if (local->fif_plcpfail)
75 new_flags |= FIF_PLCPFAIL;
76
77 if (local->fif_control)
78 new_flags |= FIF_CONTROL;
79
80 if (local->fif_other_bss)
81 new_flags |= FIF_OTHER_BSS;
82
83 changed_flags = local->filter_flags ^ new_flags;
84
85 /* be a bit nasty */
86 new_flags |= (1<<31);
87
88 local->ops->configure_filter(local_to_hw(local),
89 changed_flags, &new_flags,
90 local->mdev->mc_count,
91 local->mdev->mc_list);
92
93 WARN_ON(new_flags & (1<<31));
94
95 local->filter_flags = new_flags & ~(1<<31);
96 }
97
98 /* master interface */
99
100 static int ieee80211_master_open(struct net_device *dev)
101 {
102 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
103 struct ieee80211_sub_if_data *sdata;
104 int res = -EOPNOTSUPP;
105
106 /* we hold the RTNL here so can safely walk the list */
107 list_for_each_entry(sdata, &local->interfaces, list) {
108 if (netif_running(sdata->dev)) {
109 res = 0;
110 break;
111 }
112 }
113
114 if (res)
115 return res;
116
117 netif_start_queue(local->mdev);
118
119 return 0;
120 }
121
122 static int ieee80211_master_stop(struct net_device *dev)
123 {
124 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
125 struct ieee80211_sub_if_data *sdata;
126
127 /* we hold the RTNL here so can safely walk the list */
128 list_for_each_entry(sdata, &local->interfaces, list)
129 if (netif_running(sdata->dev))
130 dev_close(sdata->dev);
131
132 return 0;
133 }
134
135 static void ieee80211_master_set_multicast_list(struct net_device *dev)
136 {
137 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
138
139 ieee80211_configure_filter(local);
140 }
141
142 /* regular interfaces */
143
144 static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
145 {
146 int meshhdrlen;
147 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
148
149 meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
150
151 /* FIX: what would be proper limits for MTU?
152 * This interface uses 802.3 frames. */
153 if (new_mtu < 256 ||
154 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
155 return -EINVAL;
156 }
157
158 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
159 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
160 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
161 dev->mtu = new_mtu;
162 return 0;
163 }
164
165 static inline int identical_mac_addr_allowed(int type1, int type2)
166 {
167 return (type1 == IEEE80211_IF_TYPE_MNTR ||
168 type2 == IEEE80211_IF_TYPE_MNTR ||
169 (type1 == IEEE80211_IF_TYPE_AP &&
170 type2 == IEEE80211_IF_TYPE_WDS) ||
171 (type1 == IEEE80211_IF_TYPE_WDS &&
172 (type2 == IEEE80211_IF_TYPE_WDS ||
173 type2 == IEEE80211_IF_TYPE_AP)) ||
174 (type1 == IEEE80211_IF_TYPE_AP &&
175 type2 == IEEE80211_IF_TYPE_VLAN) ||
176 (type1 == IEEE80211_IF_TYPE_VLAN &&
177 (type2 == IEEE80211_IF_TYPE_AP ||
178 type2 == IEEE80211_IF_TYPE_VLAN)));
179 }
180
181 static int ieee80211_open(struct net_device *dev)
182 {
183 struct ieee80211_sub_if_data *sdata, *nsdata;
184 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
185 struct sta_info *sta;
186 struct ieee80211_if_init_conf conf;
187 u32 changed = 0;
188 int res;
189 bool need_hw_reconfig = 0;
190
191 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
192
193 /* we hold the RTNL here so can safely walk the list */
194 list_for_each_entry(nsdata, &local->interfaces, list) {
195 struct net_device *ndev = nsdata->dev;
196
197 if (ndev != dev && netif_running(ndev)) {
198 /*
199 * Allow only a single IBSS interface to be up at any
200 * time. This is restricted because beacon distribution
201 * cannot work properly if both are in the same IBSS.
202 *
203 * To remove this restriction we'd have to disallow them
204 * from setting the same SSID on different IBSS interfaces
205 * belonging to the same hardware. Then, however, we're
206 * faced with having to adopt two different TSF timers...
207 */
208 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
209 nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
210 return -EBUSY;
211
212 /*
213 * The remaining checks are only performed for interfaces
214 * with the same MAC address.
215 */
216 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
217 continue;
218
219 /*
220 * check whether it may have the same address
221 */
222 if (!identical_mac_addr_allowed(sdata->vif.type,
223 nsdata->vif.type))
224 return -ENOTUNIQ;
225
226 /*
227 * can only add VLANs to enabled APs
228 */
229 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
230 nsdata->vif.type == IEEE80211_IF_TYPE_AP)
231 sdata->bss = &nsdata->u.ap;
232 }
233 }
234
235 switch (sdata->vif.type) {
236 case IEEE80211_IF_TYPE_WDS:
237 if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
238 return -ENOLINK;
239 break;
240 case IEEE80211_IF_TYPE_VLAN:
241 if (!sdata->bss)
242 return -ENOLINK;
243 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
244 break;
245 case IEEE80211_IF_TYPE_AP:
246 sdata->bss = &sdata->u.ap;
247 break;
248 case IEEE80211_IF_TYPE_STA:
249 case IEEE80211_IF_TYPE_MNTR:
250 case IEEE80211_IF_TYPE_IBSS:
251 case IEEE80211_IF_TYPE_MESH_POINT:
252 /* no special treatment */
253 break;
254 case IEEE80211_IF_TYPE_INVALID:
255 /* cannot happen */
256 WARN_ON(1);
257 break;
258 }
259
260 if (local->open_count == 0) {
261 res = 0;
262 if (local->ops->start)
263 res = local->ops->start(local_to_hw(local));
264 if (res)
265 goto err_del_bss;
266 need_hw_reconfig = 1;
267 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
268 }
269
270 switch (sdata->vif.type) {
271 case IEEE80211_IF_TYPE_VLAN:
272 /* no need to tell driver */
273 break;
274 case IEEE80211_IF_TYPE_MNTR:
275 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
276 local->cooked_mntrs++;
277 break;
278 }
279
280 /* must be before the call to ieee80211_configure_filter */
281 local->monitors++;
282 if (local->monitors == 1)
283 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
284
285 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
286 local->fif_fcsfail++;
287 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
288 local->fif_plcpfail++;
289 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
290 local->fif_control++;
291 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
292 local->fif_other_bss++;
293
294 netif_addr_lock_bh(local->mdev);
295 ieee80211_configure_filter(local);
296 netif_addr_unlock_bh(local->mdev);
297 break;
298 case IEEE80211_IF_TYPE_STA:
299 case IEEE80211_IF_TYPE_IBSS:
300 sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
301 /* fall through */
302 default:
303 conf.vif = &sdata->vif;
304 conf.type = sdata->vif.type;
305 conf.mac_addr = dev->dev_addr;
306 res = local->ops->add_interface(local_to_hw(local), &conf);
307 if (res)
308 goto err_stop;
309
310 if (ieee80211_vif_is_mesh(&sdata->vif))
311 ieee80211_start_mesh(sdata->dev);
312 changed |= ieee80211_reset_erp_info(dev);
313 ieee80211_bss_info_change_notify(sdata, changed);
314 ieee80211_enable_keys(sdata);
315
316 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
317 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
318 netif_carrier_off(dev);
319 else
320 netif_carrier_on(dev);
321 }
322
323 if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
324 /* Create STA entry for the WDS peer */
325 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
326 GFP_KERNEL);
327 if (!sta) {
328 res = -ENOMEM;
329 goto err_del_interface;
330 }
331
332 /* no locking required since STA is not live yet */
333 sta->flags |= WLAN_STA_AUTHORIZED;
334
335 res = sta_info_insert(sta);
336 if (res) {
337 /* STA has been freed */
338 goto err_del_interface;
339 }
340 }
341
342 if (local->open_count == 0) {
343 res = dev_open(local->mdev);
344 WARN_ON(res);
345 if (res)
346 goto err_del_interface;
347 tasklet_enable(&local->tx_pending_tasklet);
348 tasklet_enable(&local->tasklet);
349 }
350
351 /*
352 * set_multicast_list will be invoked by the networking core
353 * which will check whether any increments here were done in
354 * error and sync them down to the hardware as filter flags.
355 */
356 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
357 atomic_inc(&local->iff_allmultis);
358
359 if (sdata->flags & IEEE80211_SDATA_PROMISC)
360 atomic_inc(&local->iff_promiscs);
361
362 local->open_count++;
363 if (need_hw_reconfig)
364 ieee80211_hw_config(local);
365
366 /*
367 * ieee80211_sta_work is disabled while network interface
368 * is down. Therefore, some configuration changes may not
369 * yet be effective. Trigger execution of ieee80211_sta_work
370 * to fix this.
371 */
372 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
373 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
374 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
375 queue_work(local->hw.workqueue, &ifsta->work);
376 }
377
378 netif_start_queue(dev);
379
380 return 0;
381 err_del_interface:
382 local->ops->remove_interface(local_to_hw(local), &conf);
383 err_stop:
384 if (!local->open_count && local->ops->stop)
385 local->ops->stop(local_to_hw(local));
386 err_del_bss:
387 sdata->bss = NULL;
388 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
389 list_del(&sdata->u.vlan.list);
390 return res;
391 }
392
393 static int ieee80211_stop(struct net_device *dev)
394 {
395 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
396 struct ieee80211_local *local = sdata->local;
397 struct ieee80211_if_init_conf conf;
398 struct sta_info *sta;
399
400 /*
401 * Stop TX on this interface first.
402 */
403 netif_stop_queue(dev);
404
405 /*
406 * Now delete all active aggregation sessions.
407 */
408 rcu_read_lock();
409
410 list_for_each_entry_rcu(sta, &local->sta_list, list) {
411 if (sta->sdata == sdata)
412 ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
413 }
414
415 rcu_read_unlock();
416
417 /*
418 * Remove all stations associated with this interface.
419 *
420 * This must be done before calling ops->remove_interface()
421 * because otherwise we can later invoke ops->sta_notify()
422 * whenever the STAs are removed, and that invalidates driver
423 * assumptions about always getting a vif pointer that is valid
424 * (because if we remove a STA after ops->remove_interface()
425 * the driver will have removed the vif info already!)
426 *
427 * We could relax this and only unlink the stations from the
428 * hash table and list but keep them on a per-sdata list that
429 * will be inserted back again when the interface is brought
430 * up again, but I don't currently see a use case for that,
431 * except with WDS which gets a STA entry created when it is
432 * brought up.
433 */
434 sta_info_flush(local, sdata);
435
436 /*
437 * Don't count this interface for promisc/allmulti while it
438 * is down. dev_mc_unsync() will invoke set_multicast_list
439 * on the master interface which will sync these down to the
440 * hardware as filter flags.
441 */
442 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
443 atomic_dec(&local->iff_allmultis);
444
445 if (sdata->flags & IEEE80211_SDATA_PROMISC)
446 atomic_dec(&local->iff_promiscs);
447
448 dev_mc_unsync(local->mdev, dev);
449
450 /* APs need special treatment */
451 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
452 struct ieee80211_sub_if_data *vlan, *tmp;
453 struct beacon_data *old_beacon = sdata->u.ap.beacon;
454
455 /* remove beacon */
456 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
457 synchronize_rcu();
458 kfree(old_beacon);
459
460 /* down all dependent devices, that is VLANs */
461 list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
462 u.vlan.list)
463 dev_close(vlan->dev);
464 WARN_ON(!list_empty(&sdata->u.ap.vlans));
465 }
466
467 local->open_count--;
468
469 switch (sdata->vif.type) {
470 case IEEE80211_IF_TYPE_VLAN:
471 list_del(&sdata->u.vlan.list);
472 /* no need to tell driver */
473 break;
474 case IEEE80211_IF_TYPE_MNTR:
475 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
476 local->cooked_mntrs--;
477 break;
478 }
479
480 local->monitors--;
481 if (local->monitors == 0)
482 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
483
484 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
485 local->fif_fcsfail--;
486 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
487 local->fif_plcpfail--;
488 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
489 local->fif_control--;
490 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
491 local->fif_other_bss--;
492
493 netif_addr_lock_bh(local->mdev);
494 ieee80211_configure_filter(local);
495 netif_addr_unlock_bh(local->mdev);
496 break;
497 case IEEE80211_IF_TYPE_MESH_POINT:
498 case IEEE80211_IF_TYPE_STA:
499 case IEEE80211_IF_TYPE_IBSS:
500 sdata->u.sta.state = IEEE80211_DISABLED;
501 memset(sdata->u.sta.bssid, 0, ETH_ALEN);
502 del_timer_sync(&sdata->u.sta.timer);
503 /*
504 * When we get here, the interface is marked down.
505 * Call synchronize_rcu() to wait for the RX path
506 * should it be using the interface and enqueuing
507 * frames at this very time on another CPU.
508 */
509 synchronize_rcu();
510 skb_queue_purge(&sdata->u.sta.skb_queue);
511
512 if (local->scan_dev == sdata->dev) {
513 if (!local->ops->hw_scan) {
514 local->sta_sw_scanning = 0;
515 cancel_delayed_work(&local->scan_work);
516 } else
517 local->sta_hw_scanning = 0;
518 }
519
520 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
521 kfree(sdata->u.sta.extra_ie);
522 sdata->u.sta.extra_ie = NULL;
523 sdata->u.sta.extra_ie_len = 0;
524 /* fall through */
525 default:
526 conf.vif = &sdata->vif;
527 conf.type = sdata->vif.type;
528 conf.mac_addr = dev->dev_addr;
529 /* disable all keys for as long as this netdev is down */
530 ieee80211_disable_keys(sdata);
531 local->ops->remove_interface(local_to_hw(local), &conf);
532 }
533
534 sdata->bss = NULL;
535
536 if (local->open_count == 0) {
537 if (netif_running(local->mdev))
538 dev_close(local->mdev);
539
540 if (local->ops->stop)
541 local->ops->stop(local_to_hw(local));
542
543 ieee80211_led_radio(local, 0);
544
545 flush_workqueue(local->hw.workqueue);
546
547 tasklet_disable(&local->tx_pending_tasklet);
548 tasklet_disable(&local->tasklet);
549 }
550
551 return 0;
552 }
553
554 int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
555 {
556 struct ieee80211_local *local = hw_to_local(hw);
557 struct netdev_queue *txq;
558 struct sta_info *sta;
559 struct ieee80211_sub_if_data *sdata;
560 u16 start_seq_num = 0;
561 u8 *state;
562 int ret;
563 DECLARE_MAC_BUF(mac);
564
565 if (tid >= STA_TID_NUM)
566 return -EINVAL;
567
568 #ifdef CONFIG_MAC80211_HT_DEBUG
569 printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
570 print_mac(mac, ra), tid);
571 #endif /* CONFIG_MAC80211_HT_DEBUG */
572
573 rcu_read_lock();
574
575 sta = sta_info_get(local, ra);
576 if (!sta) {
577 #ifdef CONFIG_MAC80211_HT_DEBUG
578 printk(KERN_DEBUG "Could not find the station\n");
579 #endif
580 ret = -ENOENT;
581 goto exit;
582 }
583
584 spin_lock_bh(&sta->lock);
585
586 /* we have tried too many times, receiver does not want A-MPDU */
587 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
588 ret = -EBUSY;
589 goto err_unlock_sta;
590 }
591
592 state = &sta->ampdu_mlme.tid_state_tx[tid];
593 /* check if the TID is not in aggregation flow already */
594 if (*state != HT_AGG_STATE_IDLE) {
595 #ifdef CONFIG_MAC80211_HT_DEBUG
596 printk(KERN_DEBUG "BA request denied - session is not "
597 "idle on tid %u\n", tid);
598 #endif /* CONFIG_MAC80211_HT_DEBUG */
599 ret = -EAGAIN;
600 goto err_unlock_sta;
601 }
602
603 /* prepare A-MPDU MLME for Tx aggregation */
604 sta->ampdu_mlme.tid_tx[tid] =
605 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
606 if (!sta->ampdu_mlme.tid_tx[tid]) {
607 #ifdef CONFIG_MAC80211_HT_DEBUG
608 if (net_ratelimit())
609 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
610 tid);
611 #endif
612 ret = -ENOMEM;
613 goto err_unlock_sta;
614 }
615 /* Tx timer */
616 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
617 sta_addba_resp_timer_expired;
618 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
619 (unsigned long)&sta->timer_to_tid[tid];
620 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
621
622 /* ensure that TX flow won't interrupt us
623 * until the end of the call to requeue function */
624 txq = netdev_get_tx_queue(local->mdev, 0);
625 spin_lock_bh(&txq->lock);
626
627 /* create a new queue for this aggregation */
628 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
629
630 /* case no queue is available to aggregation
631 * don't switch to aggregation */
632 if (ret) {
633 #ifdef CONFIG_MAC80211_HT_DEBUG
634 printk(KERN_DEBUG "BA request denied - queue unavailable for"
635 " tid %d\n", tid);
636 #endif /* CONFIG_MAC80211_HT_DEBUG */
637 goto err_unlock_queue;
638 }
639 sdata = sta->sdata;
640
641 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
642 * call back right away, it must see that the flow has begun */
643 *state |= HT_ADDBA_REQUESTED_MSK;
644
645 if (local->ops->ampdu_action)
646 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
647 ra, tid, &start_seq_num);
648
649 if (ret) {
650 /* No need to requeue the packets in the agg queue, since we
651 * held the tx lock: no packet could be enqueued to the newly
652 * allocated queue */
653 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
654 #ifdef CONFIG_MAC80211_HT_DEBUG
655 printk(KERN_DEBUG "BA request denied - HW unavailable for"
656 " tid %d\n", tid);
657 #endif /* CONFIG_MAC80211_HT_DEBUG */
658 *state = HT_AGG_STATE_IDLE;
659 goto err_unlock_queue;
660 }
661
662 /* Will put all the packets in the new SW queue */
663 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
664 spin_unlock_bh(&txq->lock);
665 spin_unlock_bh(&sta->lock);
666
667 /* send an addBA request */
668 sta->ampdu_mlme.dialog_token_allocator++;
669 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
670 sta->ampdu_mlme.dialog_token_allocator;
671 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
672
673
674 ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
675 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
676 sta->ampdu_mlme.tid_tx[tid]->ssn,
677 0x40, 5000);
678 /* activate the timer for the recipient's addBA response */
679 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
680 jiffies + ADDBA_RESP_INTERVAL;
681 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
682 #ifdef CONFIG_MAC80211_HT_DEBUG
683 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
684 #endif
685 goto exit;
686
687 err_unlock_queue:
688 kfree(sta->ampdu_mlme.tid_tx[tid]);
689 sta->ampdu_mlme.tid_tx[tid] = NULL;
690 spin_unlock_bh(&txq->lock);
691 ret = -EBUSY;
692 err_unlock_sta:
693 spin_unlock_bh(&sta->lock);
694 exit:
695 rcu_read_unlock();
696 return ret;
697 }
698 EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
699
700 int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
701 u8 *ra, u16 tid,
702 enum ieee80211_back_parties initiator)
703 {
704 struct ieee80211_local *local = hw_to_local(hw);
705 struct sta_info *sta;
706 u8 *state;
707 int ret = 0;
708 DECLARE_MAC_BUF(mac);
709
710 if (tid >= STA_TID_NUM)
711 return -EINVAL;
712
713 rcu_read_lock();
714 sta = sta_info_get(local, ra);
715 if (!sta) {
716 rcu_read_unlock();
717 return -ENOENT;
718 }
719
720 /* check if the TID is in aggregation */
721 state = &sta->ampdu_mlme.tid_state_tx[tid];
722 spin_lock_bh(&sta->lock);
723
724 if (*state != HT_AGG_STATE_OPERATIONAL) {
725 ret = -ENOENT;
726 goto stop_BA_exit;
727 }
728
729 #ifdef CONFIG_MAC80211_HT_DEBUG
730 printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
731 print_mac(mac, ra), tid);
732 #endif /* CONFIG_MAC80211_HT_DEBUG */
733
734 ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
735
736 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
737 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
738
739 if (local->ops->ampdu_action)
740 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
741 ra, tid, NULL);
742
743 /* case HW denied going back to legacy */
744 if (ret) {
745 WARN_ON(ret != -EBUSY);
746 *state = HT_AGG_STATE_OPERATIONAL;
747 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
748 goto stop_BA_exit;
749 }
750
751 stop_BA_exit:
752 spin_unlock_bh(&sta->lock);
753 rcu_read_unlock();
754 return ret;
755 }
756 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
757
758 void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
759 {
760 struct ieee80211_local *local = hw_to_local(hw);
761 struct sta_info *sta;
762 u8 *state;
763 DECLARE_MAC_BUF(mac);
764
765 if (tid >= STA_TID_NUM) {
766 #ifdef CONFIG_MAC80211_HT_DEBUG
767 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
768 tid, STA_TID_NUM);
769 #endif
770 return;
771 }
772
773 rcu_read_lock();
774 sta = sta_info_get(local, ra);
775 if (!sta) {
776 rcu_read_unlock();
777 #ifdef CONFIG_MAC80211_HT_DEBUG
778 printk(KERN_DEBUG "Could not find station: %s\n",
779 print_mac(mac, ra));
780 #endif
781 return;
782 }
783
784 state = &sta->ampdu_mlme.tid_state_tx[tid];
785 spin_lock_bh(&sta->lock);
786
787 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
788 #ifdef CONFIG_MAC80211_HT_DEBUG
789 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
790 *state);
791 #endif
792 spin_unlock_bh(&sta->lock);
793 rcu_read_unlock();
794 return;
795 }
796
797 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
798
799 *state |= HT_ADDBA_DRV_READY_MSK;
800
801 if (*state == HT_AGG_STATE_OPERATIONAL) {
802 #ifdef CONFIG_MAC80211_HT_DEBUG
803 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
804 #endif
805 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
806 }
807 spin_unlock_bh(&sta->lock);
808 rcu_read_unlock();
809 }
810 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
811
812 void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
813 {
814 struct ieee80211_local *local = hw_to_local(hw);
815 struct netdev_queue *txq;
816 struct sta_info *sta;
817 u8 *state;
818 int agg_queue;
819 DECLARE_MAC_BUF(mac);
820
821 if (tid >= STA_TID_NUM) {
822 #ifdef CONFIG_MAC80211_HT_DEBUG
823 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
824 tid, STA_TID_NUM);
825 #endif
826 return;
827 }
828
829 #ifdef CONFIG_MAC80211_HT_DEBUG
830 printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
831 print_mac(mac, ra), tid);
832 #endif /* CONFIG_MAC80211_HT_DEBUG */
833
834 rcu_read_lock();
835 sta = sta_info_get(local, ra);
836 if (!sta) {
837 #ifdef CONFIG_MAC80211_HT_DEBUG
838 printk(KERN_DEBUG "Could not find station: %s\n",
839 print_mac(mac, ra));
840 #endif
841 rcu_read_unlock();
842 return;
843 }
844 state = &sta->ampdu_mlme.tid_state_tx[tid];
845
846 /* NOTE: no need to use sta->lock in this state check, as
847 * ieee80211_stop_tx_ba_session will let only
848 * one stop call to pass through per sta/tid */
849 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
850 #ifdef CONFIG_MAC80211_HT_DEBUG
851 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
852 #endif
853 rcu_read_unlock();
854 return;
855 }
856
857 if (*state & HT_AGG_STATE_INITIATOR_MSK)
858 ieee80211_send_delba(sta->sdata->dev, ra, tid,
859 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
860
861 agg_queue = sta->tid_to_tx_q[tid];
862
863 /* avoid ordering issues: we are the only one that can modify
864 * the content of the qdiscs */
865 txq = netdev_get_tx_queue(local->mdev, 0);
866 spin_lock_bh(&txq->lock);
867 /* remove the queue for this aggregation */
868 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
869 spin_unlock_bh(&txq->lock);
870
871 /* we just requeued the all the frames that were in the removed
872 * queue, and since we might miss a softirq we do netif_schedule_queue.
873 * ieee80211_wake_queue is not used here as this queue is not
874 * necessarily stopped */
875 netif_schedule_queue(txq);
876 spin_lock_bh(&sta->lock);
877 *state = HT_AGG_STATE_IDLE;
878 sta->ampdu_mlme.addba_req_num[tid] = 0;
879 kfree(sta->ampdu_mlme.tid_tx[tid]);
880 sta->ampdu_mlme.tid_tx[tid] = NULL;
881 spin_unlock_bh(&sta->lock);
882
883 rcu_read_unlock();
884 }
885 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
886
887 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
888 const u8 *ra, u16 tid)
889 {
890 struct ieee80211_local *local = hw_to_local(hw);
891 struct ieee80211_ra_tid *ra_tid;
892 struct sk_buff *skb = dev_alloc_skb(0);
893
894 if (unlikely(!skb)) {
895 #ifdef CONFIG_MAC80211_HT_DEBUG
896 if (net_ratelimit())
897 printk(KERN_WARNING "%s: Not enough memory, "
898 "dropping start BA session", skb->dev->name);
899 #endif
900 return;
901 }
902 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
903 memcpy(&ra_tid->ra, ra, ETH_ALEN);
904 ra_tid->tid = tid;
905
906 skb->pkt_type = IEEE80211_ADDBA_MSG;
907 skb_queue_tail(&local->skb_queue, skb);
908 tasklet_schedule(&local->tasklet);
909 }
910 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
911
912 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
913 const u8 *ra, u16 tid)
914 {
915 struct ieee80211_local *local = hw_to_local(hw);
916 struct ieee80211_ra_tid *ra_tid;
917 struct sk_buff *skb = dev_alloc_skb(0);
918
919 if (unlikely(!skb)) {
920 #ifdef CONFIG_MAC80211_HT_DEBUG
921 if (net_ratelimit())
922 printk(KERN_WARNING "%s: Not enough memory, "
923 "dropping stop BA session", skb->dev->name);
924 #endif
925 return;
926 }
927 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
928 memcpy(&ra_tid->ra, ra, ETH_ALEN);
929 ra_tid->tid = tid;
930
931 skb->pkt_type = IEEE80211_DELBA_MSG;
932 skb_queue_tail(&local->skb_queue, skb);
933 tasklet_schedule(&local->tasklet);
934 }
935 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
936
937 static void ieee80211_set_multicast_list(struct net_device *dev)
938 {
939 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
940 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
941 int allmulti, promisc, sdata_allmulti, sdata_promisc;
942
943 allmulti = !!(dev->flags & IFF_ALLMULTI);
944 promisc = !!(dev->flags & IFF_PROMISC);
945 sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
946 sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
947
948 if (allmulti != sdata_allmulti) {
949 if (dev->flags & IFF_ALLMULTI)
950 atomic_inc(&local->iff_allmultis);
951 else
952 atomic_dec(&local->iff_allmultis);
953 sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
954 }
955
956 if (promisc != sdata_promisc) {
957 if (dev->flags & IFF_PROMISC)
958 atomic_inc(&local->iff_promiscs);
959 else
960 atomic_dec(&local->iff_promiscs);
961 sdata->flags ^= IEEE80211_SDATA_PROMISC;
962 }
963
964 dev_mc_sync(local->mdev, dev);
965 }
966
967 static const struct header_ops ieee80211_header_ops = {
968 .create = eth_header,
969 .parse = header_parse_80211,
970 .rebuild = eth_rebuild_header,
971 .cache = eth_header_cache,
972 .cache_update = eth_header_cache_update,
973 };
974
975 void ieee80211_if_setup(struct net_device *dev)
976 {
977 ether_setup(dev);
978 dev->hard_start_xmit = ieee80211_subif_start_xmit;
979 dev->wireless_handlers = &ieee80211_iw_handler_def;
980 dev->set_multicast_list = ieee80211_set_multicast_list;
981 dev->change_mtu = ieee80211_change_mtu;
982 dev->open = ieee80211_open;
983 dev->stop = ieee80211_stop;
984 dev->destructor = free_netdev;
985 }
986
987 /* everything else */
988
989 int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
990 {
991 struct ieee80211_local *local = sdata->local;
992 struct ieee80211_if_conf conf;
993
994 if (WARN_ON(!netif_running(sdata->dev)))
995 return 0;
996
997 if (!local->ops->config_interface)
998 return 0;
999
1000 memset(&conf, 0, sizeof(conf));
1001 conf.changed = changed;
1002
1003 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
1004 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
1005 conf.bssid = sdata->u.sta.bssid;
1006 conf.ssid = sdata->u.sta.ssid;
1007 conf.ssid_len = sdata->u.sta.ssid_len;
1008 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
1009 conf.bssid = sdata->dev->dev_addr;
1010 conf.ssid = sdata->u.ap.ssid;
1011 conf.ssid_len = sdata->u.ap.ssid_len;
1012 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
1013 u8 zero[ETH_ALEN] = { 0 };
1014 conf.bssid = zero;
1015 conf.ssid = zero;
1016 conf.ssid_len = 0;
1017 } else {
1018 WARN_ON(1);
1019 return -EINVAL;
1020 }
1021
1022 if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID)))
1023 return -EINVAL;
1024
1025 if (WARN_ON(!conf.ssid && (changed & IEEE80211_IFCC_SSID)))
1026 return -EINVAL;
1027
1028 return local->ops->config_interface(local_to_hw(local),
1029 &sdata->vif, &conf);
1030 }
1031
1032 int ieee80211_hw_config(struct ieee80211_local *local)
1033 {
1034 struct ieee80211_channel *chan;
1035 int ret = 0;
1036
1037 if (local->sta_sw_scanning)
1038 chan = local->scan_channel;
1039 else
1040 chan = local->oper_channel;
1041
1042 local->hw.conf.channel = chan;
1043
1044 if (!local->hw.conf.power_level)
1045 local->hw.conf.power_level = chan->max_power;
1046 else
1047 local->hw.conf.power_level = min(chan->max_power,
1048 local->hw.conf.power_level);
1049
1050 local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
1051
1052 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1053 printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
1054 wiphy_name(local->hw.wiphy), chan->center_freq);
1055 #endif
1056
1057 if (local->open_count)
1058 ret = local->ops->config(local_to_hw(local), &local->hw.conf);
1059
1060 return ret;
1061 }
1062
1063 /**
1064 * ieee80211_handle_ht should be used only after legacy configuration
1065 * has been determined namely band, as ht configuration depends upon
1066 * the hardware's HT abilities for a _specific_ band.
1067 */
1068 u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
1069 struct ieee80211_ht_info *req_ht_cap,
1070 struct ieee80211_ht_bss_info *req_bss_cap)
1071 {
1072 struct ieee80211_conf *conf = &local->hw.conf;
1073 struct ieee80211_supported_band *sband;
1074 struct ieee80211_ht_info ht_conf;
1075 struct ieee80211_ht_bss_info ht_bss_conf;
1076 u32 changed = 0;
1077 int i;
1078 u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS;
1079 u8 tx_mcs_set_cap;
1080
1081 sband = local->hw.wiphy->bands[conf->channel->band];
1082
1083 memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
1084 memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
1085
1086 /* HT is not supported */
1087 if (!sband->ht_info.ht_supported) {
1088 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1089 goto out;
1090 }
1091
1092 /* disable HT */
1093 if (!enable_ht) {
1094 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
1095 changed |= BSS_CHANGED_HT;
1096 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1097 conf->ht_conf.ht_supported = 0;
1098 goto out;
1099 }
1100
1101
1102 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
1103 changed |= BSS_CHANGED_HT;
1104
1105 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
1106 ht_conf.ht_supported = 1;
1107
1108 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
1109 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
1110 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
1111 ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
1112 ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
1113 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
1114
1115 ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
1116 ht_conf.ampdu_density = req_ht_cap->ampdu_density;
1117
1118 /* Bits 96-100 */
1119 tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12];
1120
1121 /* configure suppoerted Tx MCS according to requested MCS
1122 * (based in most cases on Rx capabilities of peer) and self
1123 * Tx MCS capabilities (as defined by low level driver HW
1124 * Tx capabilities) */
1125 if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED))
1126 goto check_changed;
1127
1128 /* Counting from 0 therfore + 1 */
1129 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF)
1130 max_tx_streams = ((tx_mcs_set_cap &
1131 IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1;
1132
1133 for (i = 0; i < max_tx_streams; i++)
1134 ht_conf.supp_mcs_set[i] =
1135 sband->ht_info.supp_mcs_set[i] &
1136 req_ht_cap->supp_mcs_set[i];
1137
1138 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM)
1139 for (i = IEEE80211_SUPP_MCS_SET_UEQM;
1140 i < IEEE80211_SUPP_MCS_SET_LEN; i++)
1141 ht_conf.supp_mcs_set[i] =
1142 sband->ht_info.supp_mcs_set[i] &
1143 req_ht_cap->supp_mcs_set[i];
1144
1145 check_changed:
1146 /* if bss configuration changed store the new one */
1147 if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
1148 memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
1149 changed |= BSS_CHANGED_HT;
1150 memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
1151 memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
1152 }
1153 out:
1154 return changed;
1155 }
1156
1157 void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
1158 u32 changed)
1159 {
1160 struct ieee80211_local *local = sdata->local;
1161
1162 if (!changed)
1163 return;
1164
1165 if (local->ops->bss_info_changed)
1166 local->ops->bss_info_changed(local_to_hw(local),
1167 &sdata->vif,
1168 &sdata->bss_conf,
1169 changed);
1170 }
1171
1172 u32 ieee80211_reset_erp_info(struct net_device *dev)
1173 {
1174 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1175
1176 sdata->bss_conf.use_cts_prot = 0;
1177 sdata->bss_conf.use_short_preamble = 0;
1178 return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE;
1179 }
1180
1181 void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
1182 struct sk_buff *skb)
1183 {
1184 struct ieee80211_local *local = hw_to_local(hw);
1185 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1186 int tmp;
1187
1188 skb->dev = local->mdev;
1189 skb->pkt_type = IEEE80211_TX_STATUS_MSG;
1190 skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
1191 &local->skb_queue : &local->skb_queue_unreliable, skb);
1192 tmp = skb_queue_len(&local->skb_queue) +
1193 skb_queue_len(&local->skb_queue_unreliable);
1194 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
1195 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
1196 dev_kfree_skb_irq(skb);
1197 tmp--;
1198 I802_DEBUG_INC(local->tx_status_drop);
1199 }
1200 tasklet_schedule(&local->tasklet);
1201 }
1202 EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
1203
1204 static void ieee80211_tasklet_handler(unsigned long data)
1205 {
1206 struct ieee80211_local *local = (struct ieee80211_local *) data;
1207 struct sk_buff *skb;
1208 struct ieee80211_rx_status rx_status;
1209 struct ieee80211_ra_tid *ra_tid;
1210
1211 while ((skb = skb_dequeue(&local->skb_queue)) ||
1212 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
1213 switch (skb->pkt_type) {
1214 case IEEE80211_RX_MSG:
1215 /* status is in skb->cb */
1216 memcpy(&rx_status, skb->cb, sizeof(rx_status));
1217 /* Clear skb->pkt_type in order to not confuse kernel
1218 * netstack. */
1219 skb->pkt_type = 0;
1220 __ieee80211_rx(local_to_hw(local), skb, &rx_status);
1221 break;
1222 case IEEE80211_TX_STATUS_MSG:
1223 skb->pkt_type = 0;
1224 ieee80211_tx_status(local_to_hw(local), skb);
1225 break;
1226 case IEEE80211_DELBA_MSG:
1227 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
1228 ieee80211_stop_tx_ba_cb(local_to_hw(local),
1229 ra_tid->ra, ra_tid->tid);
1230 dev_kfree_skb(skb);
1231 break;
1232 case IEEE80211_ADDBA_MSG:
1233 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
1234 ieee80211_start_tx_ba_cb(local_to_hw(local),
1235 ra_tid->ra, ra_tid->tid);
1236 dev_kfree_skb(skb);
1237 break ;
1238 default:
1239 WARN_ON(1);
1240 dev_kfree_skb(skb);
1241 break;
1242 }
1243 }
1244 }
1245
1246 /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
1247 * make a prepared TX frame (one that has been given to hw) to look like brand
1248 * new IEEE 802.11 frame that is ready to go through TX processing again.
1249 * Also, tx_packet_data in cb is restored from tx_control. */
1250 static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1251 struct ieee80211_key *key,
1252 struct sk_buff *skb)
1253 {
1254 int hdrlen, iv_len, mic_len;
1255 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1256
1257 info->flags &= IEEE80211_TX_CTL_REQ_TX_STATUS |
1258 IEEE80211_TX_CTL_DO_NOT_ENCRYPT |
1259 IEEE80211_TX_CTL_REQUEUE |
1260 IEEE80211_TX_CTL_EAPOL_FRAME;
1261
1262 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1263
1264 if (!key)
1265 goto no_key;
1266
1267 switch (key->conf.alg) {
1268 case ALG_WEP:
1269 iv_len = WEP_IV_LEN;
1270 mic_len = WEP_ICV_LEN;
1271 break;
1272 case ALG_TKIP:
1273 iv_len = TKIP_IV_LEN;
1274 mic_len = TKIP_ICV_LEN;
1275 break;
1276 case ALG_CCMP:
1277 iv_len = CCMP_HDR_LEN;
1278 mic_len = CCMP_MIC_LEN;
1279 break;
1280 default:
1281 goto no_key;
1282 }
1283
1284 if (skb->len >= mic_len &&
1285 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
1286 skb_trim(skb, skb->len - mic_len);
1287 if (skb->len >= iv_len && skb->len > hdrlen) {
1288 memmove(skb->data + iv_len, skb->data, hdrlen);
1289 skb_pull(skb, iv_len);
1290 }
1291
1292 no_key:
1293 {
1294 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1295 u16 fc = le16_to_cpu(hdr->frame_control);
1296 if ((fc & 0x8C) == 0x88) /* QoS Control Field */ {
1297 fc &= ~IEEE80211_STYPE_QOS_DATA;
1298 hdr->frame_control = cpu_to_le16(fc);
1299 memmove(skb->data + 2, skb->data, hdrlen - 2);
1300 skb_pull(skb, 2);
1301 }
1302 }
1303 }
1304
1305 static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1306 struct sta_info *sta,
1307 struct sk_buff *skb)
1308 {
1309 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1310
1311 sta->tx_filtered_count++;
1312
1313 /*
1314 * Clear the TX filter mask for this STA when sending the next
1315 * packet. If the STA went to power save mode, this will happen
1316 * when it wakes up for the next time.
1317 */
1318 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
1319
1320 /*
1321 * This code races in the following way:
1322 *
1323 * (1) STA sends frame indicating it will go to sleep and does so
1324 * (2) hardware/firmware adds STA to filter list, passes frame up
1325 * (3) hardware/firmware processes TX fifo and suppresses a frame
1326 * (4) we get TX status before having processed the frame and
1327 * knowing that the STA has gone to sleep.
1328 *
1329 * This is actually quite unlikely even when both those events are
1330 * processed from interrupts coming in quickly after one another or
1331 * even at the same time because we queue both TX status events and
1332 * RX frames to be processed by a tasklet and process them in the
1333 * same order that they were received or TX status last. Hence, there
1334 * is no race as long as the frame RX is processed before the next TX
1335 * status, which drivers can ensure, see below.
1336 *
1337 * Note that this can only happen if the hardware or firmware can
1338 * actually add STAs to the filter list, if this is done by the
1339 * driver in response to set_tim() (which will only reduce the race
1340 * this whole filtering tries to solve, not completely solve it)
1341 * this situation cannot happen.
1342 *
1343 * To completely solve this race drivers need to make sure that they
1344 * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
1345 * functions and
1346 * (b) always process RX events before TX status events if ordering
1347 * can be unknown, for example with different interrupt status
1348 * bits.
1349 */
1350 if (test_sta_flags(sta, WLAN_STA_PS) &&
1351 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
1352 ieee80211_remove_tx_extra(local, sta->key, skb);
1353 skb_queue_tail(&sta->tx_filtered, skb);
1354 return;
1355 }
1356
1357 if (!test_sta_flags(sta, WLAN_STA_PS) &&
1358 !(info->flags & IEEE80211_TX_CTL_REQUEUE)) {
1359 /* Software retry the packet once */
1360 info->flags |= IEEE80211_TX_CTL_REQUEUE;
1361 ieee80211_remove_tx_extra(local, sta->key, skb);
1362 dev_queue_xmit(skb);
1363 return;
1364 }
1365
1366 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1367 if (net_ratelimit())
1368 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
1369 "queue_len=%d PS=%d @%lu\n",
1370 wiphy_name(local->hw.wiphy),
1371 skb_queue_len(&sta->tx_filtered),
1372 !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
1373 #endif
1374 dev_kfree_skb(skb);
1375 }
1376
1377 void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1378 {
1379 struct sk_buff *skb2;
1380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1381 struct ieee80211_local *local = hw_to_local(hw);
1382 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1383 u16 frag, type;
1384 __le16 fc;
1385 struct ieee80211_tx_status_rtap_hdr *rthdr;
1386 struct ieee80211_sub_if_data *sdata;
1387 struct net_device *prev_dev = NULL;
1388 struct sta_info *sta;
1389
1390 rcu_read_lock();
1391
1392 if (info->status.excessive_retries) {
1393 sta = sta_info_get(local, hdr->addr1);
1394 if (sta) {
1395 if (test_sta_flags(sta, WLAN_STA_PS)) {
1396 /*
1397 * The STA is in power save mode, so assume
1398 * that this TX packet failed because of that.
1399 */
1400 ieee80211_handle_filtered_frame(local, sta, skb);
1401 rcu_read_unlock();
1402 return;
1403 }
1404 }
1405 }
1406
1407 fc = hdr->frame_control;
1408
1409 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
1410 (ieee80211_is_data_qos(fc))) {
1411 u16 tid, ssn;
1412 u8 *qc;
1413 sta = sta_info_get(local, hdr->addr1);
1414 if (sta) {
1415 qc = ieee80211_get_qos_ctl(hdr);
1416 tid = qc[0] & 0xf;
1417 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
1418 & IEEE80211_SCTL_SEQ);
1419 ieee80211_send_bar(sta->sdata->dev, hdr->addr1,
1420 tid, ssn);
1421 }
1422 }
1423
1424 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1425 sta = sta_info_get(local, hdr->addr1);
1426 if (sta) {
1427 ieee80211_handle_filtered_frame(local, sta, skb);
1428 rcu_read_unlock();
1429 return;
1430 }
1431 } else
1432 rate_control_tx_status(local->mdev, skb);
1433
1434 rcu_read_unlock();
1435
1436 ieee80211_led_tx(local, 0);
1437
1438 /* SNMP counters
1439 * Fragments are passed to low-level drivers as separate skbs, so these
1440 * are actually fragments, not frames. Update frame counters only for
1441 * the first fragment of the frame. */
1442
1443 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1444 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
1445
1446 if (info->flags & IEEE80211_TX_STAT_ACK) {
1447 if (frag == 0) {
1448 local->dot11TransmittedFrameCount++;
1449 if (is_multicast_ether_addr(hdr->addr1))
1450 local->dot11MulticastTransmittedFrameCount++;
1451 if (info->status.retry_count > 0)
1452 local->dot11RetryCount++;
1453 if (info->status.retry_count > 1)
1454 local->dot11MultipleRetryCount++;
1455 }
1456
1457 /* This counter shall be incremented for an acknowledged MPDU
1458 * with an individual address in the address 1 field or an MPDU
1459 * with a multicast address in the address 1 field of type Data
1460 * or Management. */
1461 if (!is_multicast_ether_addr(hdr->addr1) ||
1462 type == IEEE80211_FTYPE_DATA ||
1463 type == IEEE80211_FTYPE_MGMT)
1464 local->dot11TransmittedFragmentCount++;
1465 } else {
1466 if (frag == 0)
1467 local->dot11FailedCount++;
1468 }
1469
1470 /* this was a transmitted frame, but now we want to reuse it */
1471 skb_orphan(skb);
1472
1473 /*
1474 * This is a bit racy but we can avoid a lot of work
1475 * with this test...
1476 */
1477 if (!local->monitors && !local->cooked_mntrs) {
1478 dev_kfree_skb(skb);
1479 return;
1480 }
1481
1482 /* send frame to monitor interfaces now */
1483
1484 if (skb_headroom(skb) < sizeof(*rthdr)) {
1485 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
1486 dev_kfree_skb(skb);
1487 return;
1488 }
1489
1490 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
1491 skb_push(skb, sizeof(*rthdr));
1492
1493 memset(rthdr, 0, sizeof(*rthdr));
1494 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1495 rthdr->hdr.it_present =
1496 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
1497 (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
1498
1499 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
1500 !is_multicast_ether_addr(hdr->addr1))
1501 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
1502
1503 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) &&
1504 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
1505 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
1506 else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
1507 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
1508
1509 rthdr->data_retries = info->status.retry_count;
1510
1511 /* XXX: is this sufficient for BPF? */
1512 skb_set_mac_header(skb, 0);
1513 skb->ip_summed = CHECKSUM_UNNECESSARY;
1514 skb->pkt_type = PACKET_OTHERHOST;
1515 skb->protocol = htons(ETH_P_802_2);
1516 memset(skb->cb, 0, sizeof(skb->cb));
1517
1518 rcu_read_lock();
1519 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1520 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
1521 if (!netif_running(sdata->dev))
1522 continue;
1523
1524 if (prev_dev) {
1525 skb2 = skb_clone(skb, GFP_ATOMIC);
1526 if (skb2) {
1527 skb2->dev = prev_dev;
1528 netif_rx(skb2);
1529 }
1530 }
1531
1532 prev_dev = sdata->dev;
1533 }
1534 }
1535 if (prev_dev) {
1536 skb->dev = prev_dev;
1537 netif_rx(skb);
1538 skb = NULL;
1539 }
1540 rcu_read_unlock();
1541 dev_kfree_skb(skb);
1542 }
1543 EXPORT_SYMBOL(ieee80211_tx_status);
1544
1545 struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1546 const struct ieee80211_ops *ops)
1547 {
1548 struct ieee80211_local *local;
1549 int priv_size;
1550 struct wiphy *wiphy;
1551
1552 /* Ensure 32-byte alignment of our private data and hw private data.
1553 * We use the wiphy priv data for both our ieee80211_local and for
1554 * the driver's private data
1555 *
1556 * In memory it'll be like this:
1557 *
1558 * +-------------------------+
1559 * | struct wiphy |
1560 * +-------------------------+
1561 * | struct ieee80211_local |
1562 * +-------------------------+
1563 * | driver's private data |
1564 * +-------------------------+
1565 *
1566 */
1567 priv_size = ((sizeof(struct ieee80211_local) +
1568 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
1569 priv_data_len;
1570
1571 wiphy = wiphy_new(&mac80211_config_ops, priv_size);
1572
1573 if (!wiphy)
1574 return NULL;
1575
1576 wiphy->privid = mac80211_wiphy_privid;
1577
1578 local = wiphy_priv(wiphy);
1579 local->hw.wiphy = wiphy;
1580
1581 local->hw.priv = (char *)local +
1582 ((sizeof(struct ieee80211_local) +
1583 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
1584
1585 BUG_ON(!ops->tx);
1586 BUG_ON(!ops->start);
1587 BUG_ON(!ops->stop);
1588 BUG_ON(!ops->config);
1589 BUG_ON(!ops->add_interface);
1590 BUG_ON(!ops->remove_interface);
1591 BUG_ON(!ops->configure_filter);
1592 local->ops = ops;
1593
1594 local->hw.queues = 1; /* default */
1595
1596 local->bridge_packets = 1;
1597
1598 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
1599 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
1600 local->short_retry_limit = 7;
1601 local->long_retry_limit = 4;
1602 local->hw.conf.radio_enabled = 1;
1603
1604 INIT_LIST_HEAD(&local->interfaces);
1605
1606 spin_lock_init(&local->key_lock);
1607
1608 INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
1609
1610 sta_info_init(local);
1611
1612 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
1613 (unsigned long)local);
1614 tasklet_disable(&local->tx_pending_tasklet);
1615
1616 tasklet_init(&local->tasklet,
1617 ieee80211_tasklet_handler,
1618 (unsigned long) local);
1619 tasklet_disable(&local->tasklet);
1620
1621 skb_queue_head_init(&local->skb_queue);
1622 skb_queue_head_init(&local->skb_queue_unreliable);
1623
1624 return local_to_hw(local);
1625 }
1626 EXPORT_SYMBOL(ieee80211_alloc_hw);
1627
1628 int ieee80211_register_hw(struct ieee80211_hw *hw)
1629 {
1630 struct ieee80211_local *local = hw_to_local(hw);
1631 const char *name;
1632 int result;
1633 enum ieee80211_band band;
1634 struct net_device *mdev;
1635 struct wireless_dev *mwdev;
1636
1637 /*
1638 * generic code guarantees at least one band,
1639 * set this very early because much code assumes
1640 * that hw.conf.channel is assigned
1641 */
1642 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1643 struct ieee80211_supported_band *sband;
1644
1645 sband = local->hw.wiphy->bands[band];
1646 if (sband) {
1647 /* init channel we're on */
1648 local->hw.conf.channel =
1649 local->oper_channel =
1650 local->scan_channel = &sband->channels[0];
1651 break;
1652 }
1653 }
1654
1655 result = wiphy_register(local->hw.wiphy);
1656 if (result < 0)
1657 return result;
1658
1659 /*
1660 * We use the number of queues for feature tests (QoS, HT) internally
1661 * so restrict them appropriately.
1662 */
1663 #ifdef CONFIG_MAC80211_QOS
1664 if (hw->queues > IEEE80211_MAX_QUEUES)
1665 hw->queues = IEEE80211_MAX_QUEUES;
1666 if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
1667 hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
1668 if (hw->queues < 4)
1669 hw->ampdu_queues = 0;
1670 #else
1671 hw->queues = 1;
1672 hw->ampdu_queues = 0;
1673 #endif
1674
1675 mdev = alloc_netdev_mq(sizeof(struct wireless_dev),
1676 "wmaster%d", ether_setup,
1677 ieee80211_num_queues(hw));
1678 if (!mdev)
1679 goto fail_mdev_alloc;
1680
1681 mwdev = netdev_priv(mdev);
1682 mdev->ieee80211_ptr = mwdev;
1683 mwdev->wiphy = local->hw.wiphy;
1684
1685 local->mdev = mdev;
1686
1687 ieee80211_rx_bss_list_init(local);
1688
1689 mdev->hard_start_xmit = ieee80211_master_start_xmit;
1690 mdev->open = ieee80211_master_open;
1691 mdev->stop = ieee80211_master_stop;
1692 mdev->type = ARPHRD_IEEE80211;
1693 mdev->header_ops = &ieee80211_header_ops;
1694 mdev->set_multicast_list = ieee80211_master_set_multicast_list;
1695
1696 name = wiphy_dev(local->hw.wiphy)->driver->name;
1697 local->hw.workqueue = create_freezeable_workqueue(name);
1698 if (!local->hw.workqueue) {
1699 result = -ENOMEM;
1700 goto fail_workqueue;
1701 }
1702
1703 /*
1704 * The hardware needs headroom for sending the frame,
1705 * and we need some headroom for passing the frame to monitor
1706 * interfaces, but never both at the same time.
1707 */
1708 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
1709 sizeof(struct ieee80211_tx_status_rtap_hdr));
1710
1711 debugfs_hw_add(local);
1712
1713 if (local->hw.conf.beacon_int < 10)
1714 local->hw.conf.beacon_int = 100;
1715
1716 local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
1717 IEEE80211_HW_SIGNAL_DB |
1718 IEEE80211_HW_SIGNAL_DBM) ?
1719 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
1720 local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
1721 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
1722 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
1723 local->wstats_flags |= IW_QUAL_DBM;
1724
1725 result = sta_info_start(local);
1726 if (result < 0)
1727 goto fail_sta_info;
1728
1729 rtnl_lock();
1730 result = dev_alloc_name(local->mdev, local->mdev->name);
1731 if (result < 0)
1732 goto fail_dev;
1733
1734 memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
1735 SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy));
1736
1737 result = register_netdevice(local->mdev);
1738 if (result < 0)
1739 goto fail_dev;
1740
1741 result = ieee80211_init_rate_ctrl_alg(local,
1742 hw->rate_control_algorithm);
1743 if (result < 0) {
1744 printk(KERN_DEBUG "%s: Failed to initialize rate control "
1745 "algorithm\n", wiphy_name(local->hw.wiphy));
1746 goto fail_rate;
1747 }
1748
1749 result = ieee80211_wep_init(local);
1750
1751 if (result < 0) {
1752 printk(KERN_DEBUG "%s: Failed to initialize wep\n",
1753 wiphy_name(local->hw.wiphy));
1754 goto fail_wep;
1755 }
1756
1757 ieee80211_install_qdisc(local->mdev);
1758
1759 /* add one default STA interface */
1760 result = ieee80211_if_add(local, "wlan%d", NULL,
1761 IEEE80211_IF_TYPE_STA, NULL);
1762 if (result)
1763 printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
1764 wiphy_name(local->hw.wiphy));
1765
1766 rtnl_unlock();
1767
1768 ieee80211_led_init(local);
1769
1770 return 0;
1771
1772 fail_wep:
1773 rate_control_deinitialize(local);
1774 fail_rate:
1775 unregister_netdevice(local->mdev);
1776 local->mdev = NULL;
1777 fail_dev:
1778 rtnl_unlock();
1779 sta_info_stop(local);
1780 fail_sta_info:
1781 debugfs_hw_del(local);
1782 destroy_workqueue(local->hw.workqueue);
1783 fail_workqueue:
1784 if (local->mdev)
1785 free_netdev(local->mdev);
1786 fail_mdev_alloc:
1787 wiphy_unregister(local->hw.wiphy);
1788 return result;
1789 }
1790 EXPORT_SYMBOL(ieee80211_register_hw);
1791
1792 void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1793 {
1794 struct ieee80211_local *local = hw_to_local(hw);
1795
1796 tasklet_kill(&local->tx_pending_tasklet);
1797 tasklet_kill(&local->tasklet);
1798
1799 rtnl_lock();
1800
1801 /*
1802 * At this point, interface list manipulations are fine
1803 * because the driver cannot be handing us frames any
1804 * more and the tasklet is killed.
1805 */
1806
1807 /* First, we remove all virtual interfaces. */
1808 ieee80211_remove_interfaces(local);
1809
1810 /* then, finally, remove the master interface */
1811 unregister_netdevice(local->mdev);
1812
1813 rtnl_unlock();
1814
1815 ieee80211_rx_bss_list_deinit(local);
1816 ieee80211_clear_tx_pending(local);
1817 sta_info_stop(local);
1818 rate_control_deinitialize(local);
1819 debugfs_hw_del(local);
1820
1821 if (skb_queue_len(&local->skb_queue)
1822 || skb_queue_len(&local->skb_queue_unreliable))
1823 printk(KERN_WARNING "%s: skb_queue not empty\n",
1824 wiphy_name(local->hw.wiphy));
1825 skb_queue_purge(&local->skb_queue);
1826 skb_queue_purge(&local->skb_queue_unreliable);
1827
1828 destroy_workqueue(local->hw.workqueue);
1829 wiphy_unregister(local->hw.wiphy);
1830 ieee80211_wep_free(local);
1831 ieee80211_led_exit(local);
1832 free_netdev(local->mdev);
1833 }
1834 EXPORT_SYMBOL(ieee80211_unregister_hw);
1835
1836 void ieee80211_free_hw(struct ieee80211_hw *hw)
1837 {
1838 struct ieee80211_local *local = hw_to_local(hw);
1839
1840 wiphy_free(local->hw.wiphy);
1841 }
1842 EXPORT_SYMBOL(ieee80211_free_hw);
1843
1844 static int __init ieee80211_init(void)
1845 {
1846 struct sk_buff *skb;
1847 int ret;
1848
1849 BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
1850 BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
1851 IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
1852
1853 ret = rc80211_pid_init();
1854 if (ret)
1855 goto out;
1856
1857 ret = ieee80211_wme_register();
1858 if (ret) {
1859 printk(KERN_DEBUG "ieee80211_init: failed to "
1860 "initialize WME (err=%d)\n", ret);
1861 goto out_cleanup_pid;
1862 }
1863
1864 ieee80211_debugfs_netdev_init();
1865
1866 return 0;
1867
1868 out_cleanup_pid:
1869 rc80211_pid_exit();
1870 out:
1871 return ret;
1872 }
1873
1874 static void __exit ieee80211_exit(void)
1875 {
1876 rc80211_pid_exit();
1877
1878 /*
1879 * For key todo, it'll be empty by now but the work
1880 * might still be scheduled.
1881 */
1882 flush_scheduled_work();
1883
1884 if (mesh_allocated)
1885 ieee80211s_stop();
1886
1887 ieee80211_wme_unregister();
1888 ieee80211_debugfs_netdev_exit();
1889 }
1890
1891
1892 subsys_initcall(ieee80211_init);
1893 module_exit(ieee80211_exit);
1894
1895 MODULE_DESCRIPTION("IEEE 802.11 subsystem");
1896 MODULE_LICENSE("GPL");
This page took 0.095834 seconds and 6 git commands to generate.