Merge branch 'x86/setup' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux...
[deliverable/linux.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "led.h"
23 #include "mesh.h"
24 #include "wep.h"
25 #include "wpa.h"
26 #include "tkip.h"
27 #include "wme.h"
28
29 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
30 struct tid_ampdu_rx *tid_agg_rx,
31 struct sk_buff *skb,
32 u16 mpdu_seq_num,
33 int bar_req);
34 /*
35 * monitor mode reception
36 *
37 * This function cleans up the SKB, i.e. it removes all the stuff
38 * only useful for monitoring.
39 */
40 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
41 struct sk_buff *skb,
42 int rtap_len)
43 {
44 skb_pull(skb, rtap_len);
45
46 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
47 if (likely(skb->len > FCS_LEN))
48 skb_trim(skb, skb->len - FCS_LEN);
49 else {
50 /* driver bug */
51 WARN_ON(1);
52 dev_kfree_skb(skb);
53 skb = NULL;
54 }
55 }
56
57 return skb;
58 }
59
60 static inline int should_drop_frame(struct ieee80211_rx_status *status,
61 struct sk_buff *skb,
62 int present_fcs_len,
63 int radiotap_len)
64 {
65 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
66
67 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
68 return 1;
69 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
70 return 1;
71 if (ieee80211_is_ctl(hdr->frame_control) &&
72 !ieee80211_is_pspoll(hdr->frame_control) &&
73 !ieee80211_is_back_req(hdr->frame_control))
74 return 1;
75 return 0;
76 }
77
78 static int
79 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
80 struct ieee80211_rx_status *status)
81 {
82 int len;
83
84 /* always present fields */
85 len = sizeof(struct ieee80211_radiotap_header) + 9;
86
87 if (status->flag & RX_FLAG_TSFT)
88 len += 8;
89 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
90 len += 1;
91 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
92 len += 1;
93
94 if (len & 1) /* padding for RX_FLAGS if necessary */
95 len++;
96
97 /* make sure radiotap starts at a naturally aligned address */
98 if (len % 8)
99 len = roundup(len, 8);
100
101 return len;
102 }
103
104 /*
105 * ieee80211_add_rx_radiotap_header - add radiotap header
106 *
107 * add a radiotap header containing all the fields which the hardware provided.
108 */
109 static void
110 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
111 struct sk_buff *skb,
112 struct ieee80211_rx_status *status,
113 struct ieee80211_rate *rate,
114 int rtap_len)
115 {
116 struct ieee80211_radiotap_header *rthdr;
117 unsigned char *pos;
118
119 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
120 memset(rthdr, 0, rtap_len);
121
122 /* radiotap header, set always present flags */
123 rthdr->it_present =
124 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
125 (1 << IEEE80211_RADIOTAP_CHANNEL) |
126 (1 << IEEE80211_RADIOTAP_ANTENNA) |
127 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
128 rthdr->it_len = cpu_to_le16(rtap_len);
129
130 pos = (unsigned char *)(rthdr+1);
131
132 /* the order of the following fields is important */
133
134 /* IEEE80211_RADIOTAP_TSFT */
135 if (status->flag & RX_FLAG_TSFT) {
136 *(__le64 *)pos = cpu_to_le64(status->mactime);
137 rthdr->it_present |=
138 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
139 pos += 8;
140 }
141
142 /* IEEE80211_RADIOTAP_FLAGS */
143 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
144 *pos |= IEEE80211_RADIOTAP_F_FCS;
145 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
146 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
147 if (status->flag & RX_FLAG_SHORTPRE)
148 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
149 pos++;
150
151 /* IEEE80211_RADIOTAP_RATE */
152 if (status->flag & RX_FLAG_HT) {
153 /*
154 * TODO: add following information into radiotap header once
155 * suitable fields are defined for it:
156 * - MCS index (status->rate_idx)
157 * - HT40 (status->flag & RX_FLAG_40MHZ)
158 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
159 */
160 *pos = 0;
161 } else {
162 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
163 *pos = rate->bitrate / 5;
164 }
165 pos++;
166
167 /* IEEE80211_RADIOTAP_CHANNEL */
168 *(__le16 *)pos = cpu_to_le16(status->freq);
169 pos += 2;
170 if (status->band == IEEE80211_BAND_5GHZ)
171 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
172 IEEE80211_CHAN_5GHZ);
173 else if (rate->flags & IEEE80211_RATE_ERP_G)
174 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
175 IEEE80211_CHAN_2GHZ);
176 else
177 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
178 IEEE80211_CHAN_2GHZ);
179 pos += 2;
180
181 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
182 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
183 *pos = status->signal;
184 rthdr->it_present |=
185 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
186 pos++;
187 }
188
189 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
190 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
191 *pos = status->noise;
192 rthdr->it_present |=
193 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
194 pos++;
195 }
196
197 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
198
199 /* IEEE80211_RADIOTAP_ANTENNA */
200 *pos = status->antenna;
201 pos++;
202
203 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
204
205 /* IEEE80211_RADIOTAP_RX_FLAGS */
206 /* ensure 2 byte alignment for the 2 byte field as required */
207 if ((pos - (unsigned char *)rthdr) & 1)
208 pos++;
209 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
210 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP);
211 pos += 2;
212 }
213
214 /*
215 * This function copies a received frame to all monitor interfaces and
216 * returns a cleaned-up SKB that no longer includes the FCS nor the
217 * radiotap header the driver might have added.
218 */
219 static struct sk_buff *
220 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
221 struct ieee80211_rx_status *status,
222 struct ieee80211_rate *rate)
223 {
224 struct ieee80211_sub_if_data *sdata;
225 int needed_headroom = 0;
226 struct sk_buff *skb, *skb2;
227 struct net_device *prev_dev = NULL;
228 int present_fcs_len = 0;
229 int rtap_len = 0;
230
231 /*
232 * First, we may need to make a copy of the skb because
233 * (1) we need to modify it for radiotap (if not present), and
234 * (2) the other RX handlers will modify the skb we got.
235 *
236 * We don't need to, of course, if we aren't going to return
237 * the SKB because it has a bad FCS/PLCP checksum.
238 */
239 if (status->flag & RX_FLAG_RADIOTAP)
240 rtap_len = ieee80211_get_radiotap_len(origskb->data);
241 else
242 /* room for the radiotap header based on driver features */
243 needed_headroom = ieee80211_rx_radiotap_len(local, status);
244
245 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
246 present_fcs_len = FCS_LEN;
247
248 if (!local->monitors) {
249 if (should_drop_frame(status, origskb, present_fcs_len,
250 rtap_len)) {
251 dev_kfree_skb(origskb);
252 return NULL;
253 }
254
255 return remove_monitor_info(local, origskb, rtap_len);
256 }
257
258 if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
259 /* only need to expand headroom if necessary */
260 skb = origskb;
261 origskb = NULL;
262
263 /*
264 * This shouldn't trigger often because most devices have an
265 * RX header they pull before we get here, and that should
266 * be big enough for our radiotap information. We should
267 * probably export the length to drivers so that we can have
268 * them allocate enough headroom to start with.
269 */
270 if (skb_headroom(skb) < needed_headroom &&
271 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
272 dev_kfree_skb(skb);
273 return NULL;
274 }
275 } else {
276 /*
277 * Need to make a copy and possibly remove radiotap header
278 * and FCS from the original.
279 */
280 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
281
282 origskb = remove_monitor_info(local, origskb, rtap_len);
283
284 if (!skb)
285 return origskb;
286 }
287
288 /* if necessary, prepend radiotap information */
289 if (!(status->flag & RX_FLAG_RADIOTAP))
290 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
291 needed_headroom);
292
293 skb_reset_mac_header(skb);
294 skb->ip_summed = CHECKSUM_UNNECESSARY;
295 skb->pkt_type = PACKET_OTHERHOST;
296 skb->protocol = htons(ETH_P_802_2);
297
298 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
299 if (!netif_running(sdata->dev))
300 continue;
301
302 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
303 continue;
304
305 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
306 continue;
307
308 if (prev_dev) {
309 skb2 = skb_clone(skb, GFP_ATOMIC);
310 if (skb2) {
311 skb2->dev = prev_dev;
312 netif_rx(skb2);
313 }
314 }
315
316 prev_dev = sdata->dev;
317 sdata->dev->stats.rx_packets++;
318 sdata->dev->stats.rx_bytes += skb->len;
319 }
320
321 if (prev_dev) {
322 skb->dev = prev_dev;
323 netif_rx(skb);
324 } else
325 dev_kfree_skb(skb);
326
327 return origskb;
328 }
329
330
331 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
332 {
333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
334 int tid;
335
336 /* does the frame have a qos control field? */
337 if (ieee80211_is_data_qos(hdr->frame_control)) {
338 u8 *qc = ieee80211_get_qos_ctl(hdr);
339 /* frame has qos control */
340 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
341 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
342 rx->flags |= IEEE80211_RX_AMSDU;
343 else
344 rx->flags &= ~IEEE80211_RX_AMSDU;
345 } else {
346 /*
347 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
348 *
349 * Sequence numbers for management frames, QoS data
350 * frames with a broadcast/multicast address in the
351 * Address 1 field, and all non-QoS data frames sent
352 * by QoS STAs are assigned using an additional single
353 * modulo-4096 counter, [...]
354 *
355 * We also use that counter for non-QoS STAs.
356 */
357 tid = NUM_RX_DATA_QUEUES - 1;
358 }
359
360 rx->queue = tid;
361 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
362 * For now, set skb->priority to 0 for other cases. */
363 rx->skb->priority = (tid > 7) ? 0 : tid;
364 }
365
366 /**
367 * DOC: Packet alignment
368 *
369 * Drivers always need to pass packets that are aligned to two-byte boundaries
370 * to the stack.
371 *
372 * Additionally, should, if possible, align the payload data in a way that
373 * guarantees that the contained IP header is aligned to a four-byte
374 * boundary. In the case of regular frames, this simply means aligning the
375 * payload to a four-byte boundary (because either the IP header is directly
376 * contained, or IV/RFC1042 headers that have a length divisible by four are
377 * in front of it).
378 *
379 * With A-MSDU frames, however, the payload data address must yield two modulo
380 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
381 * push the IP header further back to a multiple of four again. Thankfully, the
382 * specs were sane enough this time around to require padding each A-MSDU
383 * subframe to a length that is a multiple of four.
384 *
385 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
386 * the payload is not supported, the driver is required to move the 802.11
387 * header to be directly in front of the payload in that case.
388 */
389 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
390 {
391 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
392 int hdrlen;
393
394 #ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
395 return;
396 #endif
397
398 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
399 "unaligned packet at 0x%p\n", rx->skb->data))
400 return;
401
402 if (!ieee80211_is_data_present(hdr->frame_control))
403 return;
404
405 hdrlen = ieee80211_hdrlen(hdr->frame_control);
406 if (rx->flags & IEEE80211_RX_AMSDU)
407 hdrlen += ETH_HLEN;
408 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
409 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
410 }
411
412
413 /* rx handlers */
414
415 static ieee80211_rx_result debug_noinline
416 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
417 {
418 struct ieee80211_local *local = rx->local;
419 struct sk_buff *skb = rx->skb;
420
421 if (unlikely(local->hw_scanning))
422 return ieee80211_scan_rx(rx->sdata, skb, rx->status);
423
424 if (unlikely(local->sw_scanning)) {
425 /* drop all the other packets during a software scan anyway */
426 if (ieee80211_scan_rx(rx->sdata, skb, rx->status)
427 != RX_QUEUED)
428 dev_kfree_skb(skb);
429 return RX_QUEUED;
430 }
431
432 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
433 /* scanning finished during invoking of handlers */
434 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
435 return RX_DROP_UNUSABLE;
436 }
437
438 return RX_CONTINUE;
439 }
440
441
442 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
443 {
444 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
445
446 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
447 return 0;
448
449 return ieee80211_is_robust_mgmt_frame(hdr);
450 }
451
452
453 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
454 {
455 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
456
457 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
458 return 0;
459
460 return ieee80211_is_robust_mgmt_frame(hdr);
461 }
462
463
464 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
465 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
466 {
467 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
468 struct ieee80211_mmie *mmie;
469
470 if (skb->len < 24 + sizeof(*mmie) ||
471 !is_multicast_ether_addr(hdr->da))
472 return -1;
473
474 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
475 return -1; /* not a robust management frame */
476
477 mmie = (struct ieee80211_mmie *)
478 (skb->data + skb->len - sizeof(*mmie));
479 if (mmie->element_id != WLAN_EID_MMIE ||
480 mmie->length != sizeof(*mmie) - 2)
481 return -1;
482
483 return le16_to_cpu(mmie->key_id);
484 }
485
486
487 static ieee80211_rx_result
488 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
489 {
490 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
491 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
492
493 if (ieee80211_is_data(hdr->frame_control)) {
494 if (!ieee80211_has_a4(hdr->frame_control))
495 return RX_DROP_MONITOR;
496 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
497 return RX_DROP_MONITOR;
498 }
499
500 /* If there is not an established peer link and this is not a peer link
501 * establisment frame, beacon or probe, drop the frame.
502 */
503
504 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
505 struct ieee80211_mgmt *mgmt;
506
507 if (!ieee80211_is_mgmt(hdr->frame_control))
508 return RX_DROP_MONITOR;
509
510 if (ieee80211_is_action(hdr->frame_control)) {
511 mgmt = (struct ieee80211_mgmt *)hdr;
512 if (mgmt->u.action.category != PLINK_CATEGORY)
513 return RX_DROP_MONITOR;
514 return RX_CONTINUE;
515 }
516
517 if (ieee80211_is_probe_req(hdr->frame_control) ||
518 ieee80211_is_probe_resp(hdr->frame_control) ||
519 ieee80211_is_beacon(hdr->frame_control))
520 return RX_CONTINUE;
521
522 return RX_DROP_MONITOR;
523
524 }
525
526 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
527
528 if (ieee80211_is_data(hdr->frame_control) &&
529 is_multicast_ether_addr(hdr->addr1) &&
530 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
531 return RX_DROP_MONITOR;
532 #undef msh_h_get
533
534 return RX_CONTINUE;
535 }
536
537
538 static ieee80211_rx_result debug_noinline
539 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
540 {
541 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
542
543 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
544 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
545 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
546 rx->sta->last_seq_ctrl[rx->queue] ==
547 hdr->seq_ctrl)) {
548 if (rx->flags & IEEE80211_RX_RA_MATCH) {
549 rx->local->dot11FrameDuplicateCount++;
550 rx->sta->num_duplicates++;
551 }
552 return RX_DROP_MONITOR;
553 } else
554 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
555 }
556
557 if (unlikely(rx->skb->len < 16)) {
558 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
559 return RX_DROP_MONITOR;
560 }
561
562 /* Drop disallowed frame classes based on STA auth/assoc state;
563 * IEEE 802.11, Chap 5.5.
564 *
565 * mac80211 filters only based on association state, i.e. it drops
566 * Class 3 frames from not associated stations. hostapd sends
567 * deauth/disassoc frames when needed. In addition, hostapd is
568 * responsible for filtering on both auth and assoc states.
569 */
570
571 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
572 return ieee80211_rx_mesh_check(rx);
573
574 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
575 ieee80211_is_pspoll(hdr->frame_control)) &&
576 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
577 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
578 if ((!ieee80211_has_fromds(hdr->frame_control) &&
579 !ieee80211_has_tods(hdr->frame_control) &&
580 ieee80211_is_data(hdr->frame_control)) ||
581 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
582 /* Drop IBSS frames and frames for other hosts
583 * silently. */
584 return RX_DROP_MONITOR;
585 }
586
587 return RX_DROP_MONITOR;
588 }
589
590 return RX_CONTINUE;
591 }
592
593
594 static ieee80211_rx_result debug_noinline
595 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
596 {
597 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
598 int keyidx;
599 int hdrlen;
600 ieee80211_rx_result result = RX_DROP_UNUSABLE;
601 struct ieee80211_key *stakey = NULL;
602 int mmie_keyidx = -1;
603
604 /*
605 * Key selection 101
606 *
607 * There are four types of keys:
608 * - GTK (group keys)
609 * - IGTK (group keys for management frames)
610 * - PTK (pairwise keys)
611 * - STK (station-to-station pairwise keys)
612 *
613 * When selecting a key, we have to distinguish between multicast
614 * (including broadcast) and unicast frames, the latter can only
615 * use PTKs and STKs while the former always use GTKs and IGTKs.
616 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
617 * unicast frames can also use key indices like GTKs. Hence, if we
618 * don't have a PTK/STK we check the key index for a WEP key.
619 *
620 * Note that in a regular BSS, multicast frames are sent by the
621 * AP only, associated stations unicast the frame to the AP first
622 * which then multicasts it on their behalf.
623 *
624 * There is also a slight problem in IBSS mode: GTKs are negotiated
625 * with each station, that is something we don't currently handle.
626 * The spec seems to expect that one negotiates the same key with
627 * every station but there's no such requirement; VLANs could be
628 * possible.
629 */
630
631 if (!ieee80211_has_protected(hdr->frame_control)) {
632 if (!ieee80211_is_mgmt(hdr->frame_control) ||
633 rx->sta == NULL || !test_sta_flags(rx->sta, WLAN_STA_MFP))
634 return RX_CONTINUE;
635 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
636 if (mmie_keyidx < 0)
637 return RX_CONTINUE;
638 }
639
640 /*
641 * No point in finding a key and decrypting if the frame is neither
642 * addressed to us nor a multicast frame.
643 */
644 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
645 return RX_CONTINUE;
646
647 if (rx->sta)
648 stakey = rcu_dereference(rx->sta->key);
649
650 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
651 rx->key = stakey;
652 } else if (mmie_keyidx >= 0) {
653 /* Broadcast/multicast robust management frame / BIP */
654 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
655 (rx->status->flag & RX_FLAG_IV_STRIPPED))
656 return RX_CONTINUE;
657
658 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
659 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
660 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
661 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
662 } else {
663 /*
664 * The device doesn't give us the IV so we won't be
665 * able to look up the key. That's ok though, we
666 * don't need to decrypt the frame, we just won't
667 * be able to keep statistics accurate.
668 * Except for key threshold notifications, should
669 * we somehow allow the driver to tell us which key
670 * the hardware used if this flag is set?
671 */
672 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
673 (rx->status->flag & RX_FLAG_IV_STRIPPED))
674 return RX_CONTINUE;
675
676 hdrlen = ieee80211_hdrlen(hdr->frame_control);
677
678 if (rx->skb->len < 8 + hdrlen)
679 return RX_DROP_UNUSABLE; /* TODO: count this? */
680
681 /*
682 * no need to call ieee80211_wep_get_keyidx,
683 * it verifies a bunch of things we've done already
684 */
685 keyidx = rx->skb->data[hdrlen + 3] >> 6;
686
687 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
688
689 /*
690 * RSNA-protected unicast frames should always be sent with
691 * pairwise or station-to-station keys, but for WEP we allow
692 * using a key index as well.
693 */
694 if (rx->key && rx->key->conf.alg != ALG_WEP &&
695 !is_multicast_ether_addr(hdr->addr1))
696 rx->key = NULL;
697 }
698
699 if (rx->key) {
700 rx->key->tx_rx_count++;
701 /* TODO: add threshold stuff again */
702 } else {
703 return RX_DROP_MONITOR;
704 }
705
706 /* Check for weak IVs if possible */
707 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
708 ieee80211_is_data(hdr->frame_control) &&
709 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
710 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
711 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
712 rx->sta->wep_weak_iv_count++;
713
714 switch (rx->key->conf.alg) {
715 case ALG_WEP:
716 result = ieee80211_crypto_wep_decrypt(rx);
717 break;
718 case ALG_TKIP:
719 result = ieee80211_crypto_tkip_decrypt(rx);
720 break;
721 case ALG_CCMP:
722 result = ieee80211_crypto_ccmp_decrypt(rx);
723 break;
724 case ALG_AES_CMAC:
725 result = ieee80211_crypto_aes_cmac_decrypt(rx);
726 break;
727 }
728
729 /* either the frame has been decrypted or will be dropped */
730 rx->status->flag |= RX_FLAG_DECRYPTED;
731
732 return result;
733 }
734
735 static ieee80211_rx_result debug_noinline
736 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
737 {
738 struct ieee80211_local *local;
739 struct ieee80211_hdr *hdr;
740 struct sk_buff *skb;
741
742 local = rx->local;
743 skb = rx->skb;
744 hdr = (struct ieee80211_hdr *) skb->data;
745
746 if (!local->pspolling)
747 return RX_CONTINUE;
748
749 if (!ieee80211_has_fromds(hdr->frame_control))
750 /* this is not from AP */
751 return RX_CONTINUE;
752
753 if (!ieee80211_is_data(hdr->frame_control))
754 return RX_CONTINUE;
755
756 if (!ieee80211_has_moredata(hdr->frame_control)) {
757 /* AP has no more frames buffered for us */
758 local->pspolling = false;
759 return RX_CONTINUE;
760 }
761
762 /* more data bit is set, let's request a new frame from the AP */
763 ieee80211_send_pspoll(local, rx->sdata);
764
765 return RX_CONTINUE;
766 }
767
768 static void ap_sta_ps_start(struct sta_info *sta)
769 {
770 struct ieee80211_sub_if_data *sdata = sta->sdata;
771 struct ieee80211_local *local = sdata->local;
772
773 atomic_inc(&sdata->bss->num_sta_ps);
774 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
775 if (local->ops->sta_notify)
776 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
777 STA_NOTIFY_SLEEP, &sta->sta);
778 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
779 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
780 sdata->dev->name, sta->sta.addr, sta->sta.aid);
781 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
782 }
783
784 static int ap_sta_ps_end(struct sta_info *sta)
785 {
786 struct ieee80211_sub_if_data *sdata = sta->sdata;
787 struct ieee80211_local *local = sdata->local;
788 struct sk_buff *skb;
789 int sent = 0;
790
791 atomic_dec(&sdata->bss->num_sta_ps);
792
793 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
794 if (local->ops->sta_notify)
795 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
796 STA_NOTIFY_AWAKE, &sta->sta);
797
798 if (!skb_queue_empty(&sta->ps_tx_buf))
799 sta_info_clear_tim_bit(sta);
800
801 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
802 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
803 sdata->dev->name, sta->sta.addr, sta->sta.aid);
804 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
805
806 /* Send all buffered frames to the station */
807 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
808 sent++;
809 skb->requeue = 1;
810 dev_queue_xmit(skb);
811 }
812 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
813 local->total_ps_buffered--;
814 sent++;
815 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
816 printk(KERN_DEBUG "%s: STA %pM aid %d send PS frame "
817 "since STA not sleeping anymore\n", sdata->dev->name,
818 sta->sta.addr, sta->sta.aid);
819 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
820 skb->requeue = 1;
821 dev_queue_xmit(skb);
822 }
823
824 return sent;
825 }
826
827 static ieee80211_rx_result debug_noinline
828 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
829 {
830 struct sta_info *sta = rx->sta;
831 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
832
833 if (!sta)
834 return RX_CONTINUE;
835
836 /* Update last_rx only for IBSS packets which are for the current
837 * BSSID to avoid keeping the current IBSS network alive in cases where
838 * other STAs are using different BSSID. */
839 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
840 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
841 NL80211_IFTYPE_ADHOC);
842 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
843 sta->last_rx = jiffies;
844 } else
845 if (!is_multicast_ether_addr(hdr->addr1) ||
846 rx->sdata->vif.type == NL80211_IFTYPE_STATION) {
847 /* Update last_rx only for unicast frames in order to prevent
848 * the Probe Request frames (the only broadcast frames from a
849 * STA in infrastructure mode) from keeping a connection alive.
850 * Mesh beacons will update last_rx when if they are found to
851 * match the current local configuration when processed.
852 */
853 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
854 ieee80211_is_beacon(hdr->frame_control)) {
855 rx->sdata->u.mgd.last_beacon = jiffies;
856 } else
857 sta->last_rx = jiffies;
858 }
859
860 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
861 return RX_CONTINUE;
862
863 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
864 ieee80211_sta_rx_notify(rx->sdata, hdr);
865
866 sta->rx_fragments++;
867 sta->rx_bytes += rx->skb->len;
868 sta->last_signal = rx->status->signal;
869 sta->last_qual = rx->status->qual;
870 sta->last_noise = rx->status->noise;
871
872 /*
873 * Change STA power saving mode only at the end of a frame
874 * exchange sequence.
875 */
876 if (!ieee80211_has_morefrags(hdr->frame_control) &&
877 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
878 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
879 if (test_sta_flags(sta, WLAN_STA_PS)) {
880 /*
881 * Ignore doze->wake transitions that are
882 * indicated by non-data frames, the standard
883 * is unclear here, but for example going to
884 * PS mode and then scanning would cause a
885 * doze->wake transition for the probe request,
886 * and that is clearly undesirable.
887 */
888 if (ieee80211_is_data(hdr->frame_control) &&
889 !ieee80211_has_pm(hdr->frame_control))
890 rx->sent_ps_buffered += ap_sta_ps_end(sta);
891 } else {
892 if (ieee80211_has_pm(hdr->frame_control))
893 ap_sta_ps_start(sta);
894 }
895 }
896
897 /* Drop data::nullfunc frames silently, since they are used only to
898 * control station power saving mode. */
899 if (ieee80211_is_nullfunc(hdr->frame_control)) {
900 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
901 /* Update counter and free packet here to avoid counting this
902 * as a dropped packed. */
903 sta->rx_packets++;
904 dev_kfree_skb(rx->skb);
905 return RX_QUEUED;
906 }
907
908 return RX_CONTINUE;
909 } /* ieee80211_rx_h_sta_process */
910
911 static inline struct ieee80211_fragment_entry *
912 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
913 unsigned int frag, unsigned int seq, int rx_queue,
914 struct sk_buff **skb)
915 {
916 struct ieee80211_fragment_entry *entry;
917 int idx;
918
919 idx = sdata->fragment_next;
920 entry = &sdata->fragments[sdata->fragment_next++];
921 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
922 sdata->fragment_next = 0;
923
924 if (!skb_queue_empty(&entry->skb_list)) {
925 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
926 struct ieee80211_hdr *hdr =
927 (struct ieee80211_hdr *) entry->skb_list.next->data;
928 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
929 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
930 "addr1=%pM addr2=%pM\n",
931 sdata->dev->name, idx,
932 jiffies - entry->first_frag_time, entry->seq,
933 entry->last_frag, hdr->addr1, hdr->addr2);
934 #endif
935 __skb_queue_purge(&entry->skb_list);
936 }
937
938 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
939 *skb = NULL;
940 entry->first_frag_time = jiffies;
941 entry->seq = seq;
942 entry->rx_queue = rx_queue;
943 entry->last_frag = frag;
944 entry->ccmp = 0;
945 entry->extra_len = 0;
946
947 return entry;
948 }
949
950 static inline struct ieee80211_fragment_entry *
951 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
952 unsigned int frag, unsigned int seq,
953 int rx_queue, struct ieee80211_hdr *hdr)
954 {
955 struct ieee80211_fragment_entry *entry;
956 int i, idx;
957
958 idx = sdata->fragment_next;
959 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
960 struct ieee80211_hdr *f_hdr;
961
962 idx--;
963 if (idx < 0)
964 idx = IEEE80211_FRAGMENT_MAX - 1;
965
966 entry = &sdata->fragments[idx];
967 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
968 entry->rx_queue != rx_queue ||
969 entry->last_frag + 1 != frag)
970 continue;
971
972 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
973
974 /*
975 * Check ftype and addresses are equal, else check next fragment
976 */
977 if (((hdr->frame_control ^ f_hdr->frame_control) &
978 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
979 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
980 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
981 continue;
982
983 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
984 __skb_queue_purge(&entry->skb_list);
985 continue;
986 }
987 return entry;
988 }
989
990 return NULL;
991 }
992
993 static ieee80211_rx_result debug_noinline
994 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
995 {
996 struct ieee80211_hdr *hdr;
997 u16 sc;
998 __le16 fc;
999 unsigned int frag, seq;
1000 struct ieee80211_fragment_entry *entry;
1001 struct sk_buff *skb;
1002
1003 hdr = (struct ieee80211_hdr *)rx->skb->data;
1004 fc = hdr->frame_control;
1005 sc = le16_to_cpu(hdr->seq_ctrl);
1006 frag = sc & IEEE80211_SCTL_FRAG;
1007
1008 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1009 (rx->skb)->len < 24 ||
1010 is_multicast_ether_addr(hdr->addr1))) {
1011 /* not fragmented */
1012 goto out;
1013 }
1014 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1015
1016 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1017
1018 if (frag == 0) {
1019 /* This is the first fragment of a new frame. */
1020 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1021 rx->queue, &(rx->skb));
1022 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1023 ieee80211_has_protected(fc)) {
1024 /* Store CCMP PN so that we can verify that the next
1025 * fragment has a sequential PN value. */
1026 entry->ccmp = 1;
1027 memcpy(entry->last_pn,
1028 rx->key->u.ccmp.rx_pn[rx->queue],
1029 CCMP_PN_LEN);
1030 }
1031 return RX_QUEUED;
1032 }
1033
1034 /* This is a fragment for a frame that should already be pending in
1035 * fragment cache. Add this fragment to the end of the pending entry.
1036 */
1037 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1038 if (!entry) {
1039 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1040 return RX_DROP_MONITOR;
1041 }
1042
1043 /* Verify that MPDUs within one MSDU have sequential PN values.
1044 * (IEEE 802.11i, 8.3.3.4.5) */
1045 if (entry->ccmp) {
1046 int i;
1047 u8 pn[CCMP_PN_LEN], *rpn;
1048 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1049 return RX_DROP_UNUSABLE;
1050 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1051 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1052 pn[i]++;
1053 if (pn[i])
1054 break;
1055 }
1056 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
1057 if (memcmp(pn, rpn, CCMP_PN_LEN))
1058 return RX_DROP_UNUSABLE;
1059 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1060 }
1061
1062 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1063 __skb_queue_tail(&entry->skb_list, rx->skb);
1064 entry->last_frag = frag;
1065 entry->extra_len += rx->skb->len;
1066 if (ieee80211_has_morefrags(fc)) {
1067 rx->skb = NULL;
1068 return RX_QUEUED;
1069 }
1070
1071 rx->skb = __skb_dequeue(&entry->skb_list);
1072 if (skb_tailroom(rx->skb) < entry->extra_len) {
1073 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1074 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1075 GFP_ATOMIC))) {
1076 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1077 __skb_queue_purge(&entry->skb_list);
1078 return RX_DROP_UNUSABLE;
1079 }
1080 }
1081 while ((skb = __skb_dequeue(&entry->skb_list))) {
1082 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1083 dev_kfree_skb(skb);
1084 }
1085
1086 /* Complete frame has been reassembled - process it now */
1087 rx->flags |= IEEE80211_RX_FRAGMENTED;
1088
1089 out:
1090 if (rx->sta)
1091 rx->sta->rx_packets++;
1092 if (is_multicast_ether_addr(hdr->addr1))
1093 rx->local->dot11MulticastReceivedFrameCount++;
1094 else
1095 ieee80211_led_rx(rx->local);
1096 return RX_CONTINUE;
1097 }
1098
1099 static ieee80211_rx_result debug_noinline
1100 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1101 {
1102 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1103 struct sk_buff *skb;
1104 int no_pending_pkts;
1105 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1106
1107 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1108 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1109 return RX_CONTINUE;
1110
1111 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1112 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1113 return RX_DROP_UNUSABLE;
1114
1115 skb = skb_dequeue(&rx->sta->tx_filtered);
1116 if (!skb) {
1117 skb = skb_dequeue(&rx->sta->ps_tx_buf);
1118 if (skb)
1119 rx->local->total_ps_buffered--;
1120 }
1121 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
1122 skb_queue_empty(&rx->sta->ps_tx_buf);
1123
1124 if (skb) {
1125 struct ieee80211_hdr *hdr =
1126 (struct ieee80211_hdr *) skb->data;
1127
1128 /*
1129 * Tell TX path to send one frame even though the STA may
1130 * still remain is PS mode after this frame exchange.
1131 */
1132 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1133
1134 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1135 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
1136 rx->sta->sta.addr, rx->sta->sta.aid,
1137 skb_queue_len(&rx->sta->ps_tx_buf));
1138 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1139
1140 /* Use MoreData flag to indicate whether there are more
1141 * buffered frames for this STA */
1142 if (no_pending_pkts)
1143 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1144 else
1145 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1146
1147 dev_queue_xmit(skb);
1148
1149 if (no_pending_pkts)
1150 sta_info_clear_tim_bit(rx->sta);
1151 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1152 } else if (!rx->sent_ps_buffered) {
1153 /*
1154 * FIXME: This can be the result of a race condition between
1155 * us expiring a frame and the station polling for it.
1156 * Should we send it a null-func frame indicating we
1157 * have nothing buffered for it?
1158 */
1159 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
1160 "though there are no buffered frames for it\n",
1161 rx->dev->name, rx->sta->sta.addr);
1162 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1163 }
1164
1165 /* Free PS Poll skb here instead of returning RX_DROP that would
1166 * count as an dropped frame. */
1167 dev_kfree_skb(rx->skb);
1168
1169 return RX_QUEUED;
1170 }
1171
1172 static ieee80211_rx_result debug_noinline
1173 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1174 {
1175 u8 *data = rx->skb->data;
1176 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1177
1178 if (!ieee80211_is_data_qos(hdr->frame_control))
1179 return RX_CONTINUE;
1180
1181 /* remove the qos control field, update frame type and meta-data */
1182 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1183 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1184 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1185 /* change frame type to non QOS */
1186 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1187
1188 return RX_CONTINUE;
1189 }
1190
1191 static int
1192 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1193 {
1194 if (unlikely(!rx->sta ||
1195 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1196 return -EACCES;
1197
1198 return 0;
1199 }
1200
1201 static int
1202 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1203 {
1204 /*
1205 * Pass through unencrypted frames if the hardware has
1206 * decrypted them already.
1207 */
1208 if (rx->status->flag & RX_FLAG_DECRYPTED)
1209 return 0;
1210
1211 /* Drop unencrypted frames if key is set. */
1212 if (unlikely(!ieee80211_has_protected(fc) &&
1213 !ieee80211_is_nullfunc(fc) &&
1214 (!ieee80211_is_mgmt(fc) ||
1215 (ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1216 rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP))) &&
1217 (rx->key || rx->sdata->drop_unencrypted)))
1218 return -EACCES;
1219 /* BIP does not use Protected field, so need to check MMIE */
1220 if (unlikely(rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP) &&
1221 ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1222 ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1223 (rx->key || rx->sdata->drop_unencrypted)))
1224 return -EACCES;
1225
1226 return 0;
1227 }
1228
1229 static int
1230 ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1231 {
1232 struct net_device *dev = rx->dev;
1233 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1234 u16 hdrlen, ethertype;
1235 u8 *payload;
1236 u8 dst[ETH_ALEN];
1237 u8 src[ETH_ALEN] __aligned(2);
1238 struct sk_buff *skb = rx->skb;
1239 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1240
1241 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1242 return -1;
1243
1244 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1245
1246 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1247 * header
1248 * IEEE 802.11 address fields:
1249 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1250 * 0 0 DA SA BSSID n/a
1251 * 0 1 DA BSSID SA n/a
1252 * 1 0 BSSID SA DA n/a
1253 * 1 1 RA TA DA SA
1254 */
1255 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1256 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1257
1258 switch (hdr->frame_control &
1259 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1260 case cpu_to_le16(IEEE80211_FCTL_TODS):
1261 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1262 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1263 return -1;
1264 break;
1265 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1266 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1267 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1268 return -1;
1269 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1270 struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *)
1271 (skb->data + hdrlen);
1272 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
1273 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
1274 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
1275 memcpy(src, meshdr->eaddr2, ETH_ALEN);
1276 }
1277 }
1278 break;
1279 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
1280 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1281 (is_multicast_ether_addr(dst) &&
1282 !compare_ether_addr(src, dev->dev_addr)))
1283 return -1;
1284 break;
1285 case cpu_to_le16(0):
1286 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1287 return -1;
1288 break;
1289 }
1290
1291 if (unlikely(skb->len - hdrlen < 8))
1292 return -1;
1293
1294 payload = skb->data + hdrlen;
1295 ethertype = (payload[6] << 8) | payload[7];
1296
1297 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1298 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1299 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1300 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1301 * replace EtherType */
1302 skb_pull(skb, hdrlen + 6);
1303 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1304 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1305 } else {
1306 struct ethhdr *ehdr;
1307 __be16 len;
1308
1309 skb_pull(skb, hdrlen);
1310 len = htons(skb->len);
1311 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1312 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1313 memcpy(ehdr->h_source, src, ETH_ALEN);
1314 ehdr->h_proto = len;
1315 }
1316 return 0;
1317 }
1318
1319 /*
1320 * requires that rx->skb is a frame with ethernet header
1321 */
1322 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1323 {
1324 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1325 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1326 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1327
1328 /*
1329 * Allow EAPOL frames to us/the PAE group address regardless
1330 * of whether the frame was encrypted or not.
1331 */
1332 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1333 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1334 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1335 return true;
1336
1337 if (ieee80211_802_1x_port_control(rx) ||
1338 ieee80211_drop_unencrypted(rx, fc))
1339 return false;
1340
1341 return true;
1342 }
1343
1344 /*
1345 * requires that rx->skb is a frame with ethernet header
1346 */
1347 static void
1348 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1349 {
1350 struct net_device *dev = rx->dev;
1351 struct ieee80211_local *local = rx->local;
1352 struct sk_buff *skb, *xmit_skb;
1353 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1354 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1355 struct sta_info *dsta;
1356
1357 skb = rx->skb;
1358 xmit_skb = NULL;
1359
1360 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1361 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1362 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1363 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1364 if (is_multicast_ether_addr(ehdr->h_dest)) {
1365 /*
1366 * send multicast frames both to higher layers in
1367 * local net stack and back to the wireless medium
1368 */
1369 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1370 if (!xmit_skb && net_ratelimit())
1371 printk(KERN_DEBUG "%s: failed to clone "
1372 "multicast frame\n", dev->name);
1373 } else {
1374 dsta = sta_info_get(local, skb->data);
1375 if (dsta && dsta->sdata->dev == dev) {
1376 /*
1377 * The destination station is associated to
1378 * this AP (in this VLAN), so send the frame
1379 * directly to it and do not pass it to local
1380 * net stack.
1381 */
1382 xmit_skb = skb;
1383 skb = NULL;
1384 }
1385 }
1386 }
1387
1388 if (skb) {
1389 int align __maybe_unused;
1390
1391 #if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1392 /*
1393 * 'align' will only take the values 0 or 2 here
1394 * since all frames are required to be aligned
1395 * to 2-byte boundaries when being passed to
1396 * mac80211. That also explains the __skb_push()
1397 * below.
1398 */
1399 align = (unsigned long)skb->data & 4;
1400 if (align) {
1401 if (WARN_ON(skb_headroom(skb) < 3)) {
1402 dev_kfree_skb(skb);
1403 skb = NULL;
1404 } else {
1405 u8 *data = skb->data;
1406 size_t len = skb->len;
1407 u8 *new = __skb_push(skb, align);
1408 memmove(new, data, len);
1409 __skb_trim(skb, len);
1410 }
1411 }
1412 #endif
1413
1414 if (skb) {
1415 /* deliver to local stack */
1416 skb->protocol = eth_type_trans(skb, dev);
1417 memset(skb->cb, 0, sizeof(skb->cb));
1418 netif_rx(skb);
1419 }
1420 }
1421
1422 if (xmit_skb) {
1423 /* send to wireless media */
1424 xmit_skb->protocol = htons(ETH_P_802_3);
1425 skb_reset_network_header(xmit_skb);
1426 skb_reset_mac_header(xmit_skb);
1427 dev_queue_xmit(xmit_skb);
1428 }
1429 }
1430
1431 static ieee80211_rx_result debug_noinline
1432 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1433 {
1434 struct net_device *dev = rx->dev;
1435 struct ieee80211_local *local = rx->local;
1436 u16 ethertype;
1437 u8 *payload;
1438 struct sk_buff *skb = rx->skb, *frame = NULL;
1439 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1440 __le16 fc = hdr->frame_control;
1441 const struct ethhdr *eth;
1442 int remaining, err;
1443 u8 dst[ETH_ALEN];
1444 u8 src[ETH_ALEN];
1445
1446 if (unlikely(!ieee80211_is_data(fc)))
1447 return RX_CONTINUE;
1448
1449 if (unlikely(!ieee80211_is_data_present(fc)))
1450 return RX_DROP_MONITOR;
1451
1452 if (!(rx->flags & IEEE80211_RX_AMSDU))
1453 return RX_CONTINUE;
1454
1455 err = ieee80211_data_to_8023(rx);
1456 if (unlikely(err))
1457 return RX_DROP_UNUSABLE;
1458
1459 skb->dev = dev;
1460
1461 dev->stats.rx_packets++;
1462 dev->stats.rx_bytes += skb->len;
1463
1464 /* skip the wrapping header */
1465 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1466 if (!eth)
1467 return RX_DROP_UNUSABLE;
1468
1469 while (skb != frame) {
1470 u8 padding;
1471 __be16 len = eth->h_proto;
1472 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1473
1474 remaining = skb->len;
1475 memcpy(dst, eth->h_dest, ETH_ALEN);
1476 memcpy(src, eth->h_source, ETH_ALEN);
1477
1478 padding = ((4 - subframe_len) & 0x3);
1479 /* the last MSDU has no padding */
1480 if (subframe_len > remaining)
1481 return RX_DROP_UNUSABLE;
1482
1483 skb_pull(skb, sizeof(struct ethhdr));
1484 /* if last subframe reuse skb */
1485 if (remaining <= subframe_len + padding)
1486 frame = skb;
1487 else {
1488 /*
1489 * Allocate and reserve two bytes more for payload
1490 * alignment since sizeof(struct ethhdr) is 14.
1491 */
1492 frame = dev_alloc_skb(
1493 ALIGN(local->hw.extra_tx_headroom, 4) +
1494 subframe_len + 2);
1495
1496 if (frame == NULL)
1497 return RX_DROP_UNUSABLE;
1498
1499 skb_reserve(frame,
1500 ALIGN(local->hw.extra_tx_headroom, 4) +
1501 sizeof(struct ethhdr) + 2);
1502 memcpy(skb_put(frame, ntohs(len)), skb->data,
1503 ntohs(len));
1504
1505 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1506 padding);
1507 if (!eth) {
1508 dev_kfree_skb(frame);
1509 return RX_DROP_UNUSABLE;
1510 }
1511 }
1512
1513 skb_reset_network_header(frame);
1514 frame->dev = dev;
1515 frame->priority = skb->priority;
1516 rx->skb = frame;
1517
1518 payload = frame->data;
1519 ethertype = (payload[6] << 8) | payload[7];
1520
1521 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1522 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1523 compare_ether_addr(payload,
1524 bridge_tunnel_header) == 0)) {
1525 /* remove RFC1042 or Bridge-Tunnel
1526 * encapsulation and replace EtherType */
1527 skb_pull(frame, 6);
1528 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1529 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1530 } else {
1531 memcpy(skb_push(frame, sizeof(__be16)),
1532 &len, sizeof(__be16));
1533 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1534 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1535 }
1536
1537 if (!ieee80211_frame_allowed(rx, fc)) {
1538 if (skb == frame) /* last frame */
1539 return RX_DROP_UNUSABLE;
1540 dev_kfree_skb(frame);
1541 continue;
1542 }
1543
1544 ieee80211_deliver_skb(rx);
1545 }
1546
1547 return RX_QUEUED;
1548 }
1549
1550 #ifdef CONFIG_MAC80211_MESH
1551 static ieee80211_rx_result
1552 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1553 {
1554 struct ieee80211_hdr *hdr;
1555 struct ieee80211s_hdr *mesh_hdr;
1556 unsigned int hdrlen;
1557 struct sk_buff *skb = rx->skb, *fwd_skb;
1558
1559 hdr = (struct ieee80211_hdr *) skb->data;
1560 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1561 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1562
1563 if (!ieee80211_is_data(hdr->frame_control))
1564 return RX_CONTINUE;
1565
1566 if (!mesh_hdr->ttl)
1567 /* illegal frame */
1568 return RX_DROP_MONITOR;
1569
1570 if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){
1571 struct ieee80211_sub_if_data *sdata;
1572 struct mesh_path *mppath;
1573
1574 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1575 rcu_read_lock();
1576 mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
1577 if (!mppath) {
1578 mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
1579 } else {
1580 spin_lock_bh(&mppath->state_lock);
1581 mppath->exp_time = jiffies;
1582 if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
1583 memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
1584 spin_unlock_bh(&mppath->state_lock);
1585 }
1586 rcu_read_unlock();
1587 }
1588
1589 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1590 return RX_CONTINUE;
1591
1592 mesh_hdr->ttl--;
1593
1594 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1595 if (!mesh_hdr->ttl)
1596 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1597 dropped_frames_ttl);
1598 else {
1599 struct ieee80211_hdr *fwd_hdr;
1600 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1601
1602 if (!fwd_skb && net_ratelimit())
1603 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1604 rx->dev->name);
1605
1606 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1607 /*
1608 * Save TA to addr1 to send TA a path error if a
1609 * suitable next hop is not found
1610 */
1611 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
1612 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1613 fwd_skb->dev = rx->local->mdev;
1614 fwd_skb->iif = rx->dev->ifindex;
1615 dev_queue_xmit(fwd_skb);
1616 }
1617 }
1618
1619 if (is_multicast_ether_addr(hdr->addr3) ||
1620 rx->dev->flags & IFF_PROMISC)
1621 return RX_CONTINUE;
1622 else
1623 return RX_DROP_MONITOR;
1624 }
1625 #endif
1626
1627 static ieee80211_rx_result debug_noinline
1628 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1629 {
1630 struct net_device *dev = rx->dev;
1631 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1632 __le16 fc = hdr->frame_control;
1633 int err;
1634
1635 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1636 return RX_CONTINUE;
1637
1638 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1639 return RX_DROP_MONITOR;
1640
1641 err = ieee80211_data_to_8023(rx);
1642 if (unlikely(err))
1643 return RX_DROP_UNUSABLE;
1644
1645 if (!ieee80211_frame_allowed(rx, fc))
1646 return RX_DROP_MONITOR;
1647
1648 rx->skb->dev = dev;
1649
1650 dev->stats.rx_packets++;
1651 dev->stats.rx_bytes += rx->skb->len;
1652
1653 ieee80211_deliver_skb(rx);
1654
1655 return RX_QUEUED;
1656 }
1657
1658 static ieee80211_rx_result debug_noinline
1659 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1660 {
1661 struct ieee80211_local *local = rx->local;
1662 struct ieee80211_hw *hw = &local->hw;
1663 struct sk_buff *skb = rx->skb;
1664 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1665 struct tid_ampdu_rx *tid_agg_rx;
1666 u16 start_seq_num;
1667 u16 tid;
1668
1669 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1670 return RX_CONTINUE;
1671
1672 if (ieee80211_is_back_req(bar->frame_control)) {
1673 if (!rx->sta)
1674 return RX_CONTINUE;
1675 tid = le16_to_cpu(bar->control) >> 12;
1676 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1677 != HT_AGG_STATE_OPERATIONAL)
1678 return RX_CONTINUE;
1679 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1680
1681 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1682
1683 /* reset session timer */
1684 if (tid_agg_rx->timeout)
1685 mod_timer(&tid_agg_rx->session_timer,
1686 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1687
1688 /* manage reordering buffer according to requested */
1689 /* sequence number */
1690 rcu_read_lock();
1691 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
1692 start_seq_num, 1);
1693 rcu_read_unlock();
1694 return RX_DROP_UNUSABLE;
1695 }
1696
1697 return RX_CONTINUE;
1698 }
1699
1700 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1701 struct ieee80211_mgmt *mgmt,
1702 size_t len)
1703 {
1704 struct ieee80211_local *local = sdata->local;
1705 struct sk_buff *skb;
1706 struct ieee80211_mgmt *resp;
1707
1708 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
1709 /* Not to own unicast address */
1710 return;
1711 }
1712
1713 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1714 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1715 /* Not from the current AP. */
1716 return;
1717 }
1718
1719 if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATE) {
1720 /* Association in progress; ignore SA Query */
1721 return;
1722 }
1723
1724 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1725 /* Too short SA Query request frame */
1726 return;
1727 }
1728
1729 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1730 if (skb == NULL)
1731 return;
1732
1733 skb_reserve(skb, local->hw.extra_tx_headroom);
1734 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1735 memset(resp, 0, 24);
1736 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1737 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
1738 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1739 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1740 IEEE80211_STYPE_ACTION);
1741 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1742 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1743 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1744 memcpy(resp->u.action.u.sa_query.trans_id,
1745 mgmt->u.action.u.sa_query.trans_id,
1746 WLAN_SA_QUERY_TR_ID_LEN);
1747
1748 ieee80211_tx_skb(sdata, skb, 1);
1749 }
1750
1751 static ieee80211_rx_result debug_noinline
1752 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1753 {
1754 struct ieee80211_local *local = rx->local;
1755 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1756 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1757 struct ieee80211_bss *bss;
1758 int len = rx->skb->len;
1759
1760 if (!ieee80211_is_action(mgmt->frame_control))
1761 return RX_CONTINUE;
1762
1763 if (!rx->sta)
1764 return RX_DROP_MONITOR;
1765
1766 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1767 return RX_DROP_MONITOR;
1768
1769 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1770 return RX_DROP_MONITOR;
1771
1772 /* all categories we currently handle have action_code */
1773 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1774 return RX_DROP_MONITOR;
1775
1776 switch (mgmt->u.action.category) {
1777 case WLAN_CATEGORY_BACK:
1778 /*
1779 * The aggregation code is not prepared to handle
1780 * anything but STA/AP due to the BSSID handling;
1781 * IBSS could work in the code but isn't supported
1782 * by drivers or the standard.
1783 */
1784 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1785 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1786 sdata->vif.type != NL80211_IFTYPE_AP)
1787 return RX_DROP_MONITOR;
1788
1789 switch (mgmt->u.action.u.addba_req.action_code) {
1790 case WLAN_ACTION_ADDBA_REQ:
1791 if (len < (IEEE80211_MIN_ACTION_SIZE +
1792 sizeof(mgmt->u.action.u.addba_req)))
1793 return RX_DROP_MONITOR;
1794 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1795 break;
1796 case WLAN_ACTION_ADDBA_RESP:
1797 if (len < (IEEE80211_MIN_ACTION_SIZE +
1798 sizeof(mgmt->u.action.u.addba_resp)))
1799 return RX_DROP_MONITOR;
1800 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1801 break;
1802 case WLAN_ACTION_DELBA:
1803 if (len < (IEEE80211_MIN_ACTION_SIZE +
1804 sizeof(mgmt->u.action.u.delba)))
1805 return RX_DROP_MONITOR;
1806 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1807 break;
1808 }
1809 break;
1810 case WLAN_CATEGORY_SPECTRUM_MGMT:
1811 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1812 return RX_DROP_MONITOR;
1813
1814 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1815 return RX_DROP_MONITOR;
1816
1817 switch (mgmt->u.action.u.measurement.action_code) {
1818 case WLAN_ACTION_SPCT_MSR_REQ:
1819 if (len < (IEEE80211_MIN_ACTION_SIZE +
1820 sizeof(mgmt->u.action.u.measurement)))
1821 return RX_DROP_MONITOR;
1822 ieee80211_process_measurement_req(sdata, mgmt, len);
1823 break;
1824 case WLAN_ACTION_SPCT_CHL_SWITCH:
1825 if (len < (IEEE80211_MIN_ACTION_SIZE +
1826 sizeof(mgmt->u.action.u.chan_switch)))
1827 return RX_DROP_MONITOR;
1828
1829 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1830 return RX_DROP_MONITOR;
1831
1832 bss = ieee80211_rx_bss_get(local, sdata->u.mgd.bssid,
1833 local->hw.conf.channel->center_freq,
1834 sdata->u.mgd.ssid,
1835 sdata->u.mgd.ssid_len);
1836 if (!bss)
1837 return RX_DROP_MONITOR;
1838
1839 ieee80211_process_chanswitch(sdata,
1840 &mgmt->u.action.u.chan_switch.sw_elem, bss);
1841 ieee80211_rx_bss_put(local, bss);
1842 break;
1843 }
1844 break;
1845 case WLAN_CATEGORY_SA_QUERY:
1846 if (len < (IEEE80211_MIN_ACTION_SIZE +
1847 sizeof(mgmt->u.action.u.sa_query)))
1848 return RX_DROP_MONITOR;
1849 switch (mgmt->u.action.u.sa_query.action) {
1850 case WLAN_ACTION_SA_QUERY_REQUEST:
1851 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1852 return RX_DROP_MONITOR;
1853 ieee80211_process_sa_query_req(sdata, mgmt, len);
1854 break;
1855 case WLAN_ACTION_SA_QUERY_RESPONSE:
1856 /*
1857 * SA Query response is currently only used in AP mode
1858 * and it is processed in user space.
1859 */
1860 return RX_CONTINUE;
1861 }
1862 break;
1863 default:
1864 return RX_CONTINUE;
1865 }
1866
1867 rx->sta->rx_packets++;
1868 dev_kfree_skb(rx->skb);
1869 return RX_QUEUED;
1870 }
1871
1872 static ieee80211_rx_result debug_noinline
1873 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1874 {
1875 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1876 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1877
1878 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1879 return RX_DROP_MONITOR;
1880
1881 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1882 return RX_DROP_MONITOR;
1883
1884 if (ieee80211_vif_is_mesh(&sdata->vif))
1885 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1886
1887 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
1888 return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status);
1889
1890 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1891 return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1892
1893 return RX_DROP_MONITOR;
1894 }
1895
1896 static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1897 struct ieee80211_hdr *hdr,
1898 struct ieee80211_rx_data *rx)
1899 {
1900 int keyidx;
1901 unsigned int hdrlen;
1902
1903 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1904 if (rx->skb->len >= hdrlen + 4)
1905 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1906 else
1907 keyidx = -1;
1908
1909 if (!rx->sta) {
1910 /*
1911 * Some hardware seem to generate incorrect Michael MIC
1912 * reports; ignore them to avoid triggering countermeasures.
1913 */
1914 goto ignore;
1915 }
1916
1917 if (!ieee80211_has_protected(hdr->frame_control))
1918 goto ignore;
1919
1920 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1921 /*
1922 * APs with pairwise keys should never receive Michael MIC
1923 * errors for non-zero keyidx because these are reserved for
1924 * group keys and only the AP is sending real multicast
1925 * frames in the BSS.
1926 */
1927 goto ignore;
1928 }
1929
1930 if (!ieee80211_is_data(hdr->frame_control) &&
1931 !ieee80211_is_auth(hdr->frame_control))
1932 goto ignore;
1933
1934 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr);
1935 ignore:
1936 dev_kfree_skb(rx->skb);
1937 rx->skb = NULL;
1938 }
1939
1940 /* TODO: use IEEE80211_RX_FRAGMENTED */
1941 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1942 {
1943 struct ieee80211_sub_if_data *sdata;
1944 struct ieee80211_local *local = rx->local;
1945 struct ieee80211_rtap_hdr {
1946 struct ieee80211_radiotap_header hdr;
1947 u8 flags;
1948 u8 rate;
1949 __le16 chan_freq;
1950 __le16 chan_flags;
1951 } __attribute__ ((packed)) *rthdr;
1952 struct sk_buff *skb = rx->skb, *skb2;
1953 struct net_device *prev_dev = NULL;
1954 struct ieee80211_rx_status *status = rx->status;
1955
1956 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1957 goto out_free_skb;
1958
1959 if (skb_headroom(skb) < sizeof(*rthdr) &&
1960 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1961 goto out_free_skb;
1962
1963 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1964 memset(rthdr, 0, sizeof(*rthdr));
1965 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1966 rthdr->hdr.it_present =
1967 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1968 (1 << IEEE80211_RADIOTAP_RATE) |
1969 (1 << IEEE80211_RADIOTAP_CHANNEL));
1970
1971 rthdr->rate = rx->rate->bitrate / 5;
1972 rthdr->chan_freq = cpu_to_le16(status->freq);
1973
1974 if (status->band == IEEE80211_BAND_5GHZ)
1975 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1976 IEEE80211_CHAN_5GHZ);
1977 else
1978 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1979 IEEE80211_CHAN_2GHZ);
1980
1981 skb_set_mac_header(skb, 0);
1982 skb->ip_summed = CHECKSUM_UNNECESSARY;
1983 skb->pkt_type = PACKET_OTHERHOST;
1984 skb->protocol = htons(ETH_P_802_2);
1985
1986 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1987 if (!netif_running(sdata->dev))
1988 continue;
1989
1990 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1991 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1992 continue;
1993
1994 if (prev_dev) {
1995 skb2 = skb_clone(skb, GFP_ATOMIC);
1996 if (skb2) {
1997 skb2->dev = prev_dev;
1998 netif_rx(skb2);
1999 }
2000 }
2001
2002 prev_dev = sdata->dev;
2003 sdata->dev->stats.rx_packets++;
2004 sdata->dev->stats.rx_bytes += skb->len;
2005 }
2006
2007 if (prev_dev) {
2008 skb->dev = prev_dev;
2009 netif_rx(skb);
2010 skb = NULL;
2011 } else
2012 goto out_free_skb;
2013
2014 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
2015 return;
2016
2017 out_free_skb:
2018 dev_kfree_skb(skb);
2019 }
2020
2021
2022 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2023 struct ieee80211_rx_data *rx,
2024 struct sk_buff *skb)
2025 {
2026 ieee80211_rx_result res = RX_DROP_MONITOR;
2027
2028 rx->skb = skb;
2029 rx->sdata = sdata;
2030 rx->dev = sdata->dev;
2031
2032 #define CALL_RXH(rxh) \
2033 do { \
2034 res = rxh(rx); \
2035 if (res != RX_CONTINUE) \
2036 goto rxh_done; \
2037 } while (0);
2038
2039 CALL_RXH(ieee80211_rx_h_passive_scan)
2040 CALL_RXH(ieee80211_rx_h_check)
2041 CALL_RXH(ieee80211_rx_h_decrypt)
2042 CALL_RXH(ieee80211_rx_h_check_more_data)
2043 CALL_RXH(ieee80211_rx_h_sta_process)
2044 CALL_RXH(ieee80211_rx_h_defragment)
2045 CALL_RXH(ieee80211_rx_h_ps_poll)
2046 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2047 /* must be after MMIC verify so header is counted in MPDU mic */
2048 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2049 CALL_RXH(ieee80211_rx_h_amsdu)
2050 #ifdef CONFIG_MAC80211_MESH
2051 if (ieee80211_vif_is_mesh(&sdata->vif))
2052 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2053 #endif
2054 CALL_RXH(ieee80211_rx_h_data)
2055 CALL_RXH(ieee80211_rx_h_ctrl)
2056 CALL_RXH(ieee80211_rx_h_action)
2057 CALL_RXH(ieee80211_rx_h_mgmt)
2058
2059 #undef CALL_RXH
2060
2061 rxh_done:
2062 switch (res) {
2063 case RX_DROP_MONITOR:
2064 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2065 if (rx->sta)
2066 rx->sta->rx_dropped++;
2067 /* fall through */
2068 case RX_CONTINUE:
2069 ieee80211_rx_cooked_monitor(rx);
2070 break;
2071 case RX_DROP_UNUSABLE:
2072 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2073 if (rx->sta)
2074 rx->sta->rx_dropped++;
2075 dev_kfree_skb(rx->skb);
2076 break;
2077 case RX_QUEUED:
2078 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
2079 break;
2080 }
2081 }
2082
2083 /* main receive path */
2084
2085 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2086 struct ieee80211_rx_data *rx,
2087 struct ieee80211_hdr *hdr)
2088 {
2089 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type);
2090 int multicast = is_multicast_ether_addr(hdr->addr1);
2091
2092 switch (sdata->vif.type) {
2093 case NL80211_IFTYPE_STATION:
2094 if (!bssid)
2095 return 0;
2096 if (!ieee80211_bssid_match(bssid, sdata->u.mgd.bssid)) {
2097 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2098 return 0;
2099 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2100 } else if (!multicast &&
2101 compare_ether_addr(sdata->dev->dev_addr,
2102 hdr->addr1) != 0) {
2103 if (!(sdata->dev->flags & IFF_PROMISC))
2104 return 0;
2105 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2106 }
2107 break;
2108 case NL80211_IFTYPE_ADHOC:
2109 if (!bssid)
2110 return 0;
2111 if (ieee80211_is_beacon(hdr->frame_control)) {
2112 return 1;
2113 }
2114 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2115 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2116 return 0;
2117 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2118 } else if (!multicast &&
2119 compare_ether_addr(sdata->dev->dev_addr,
2120 hdr->addr1) != 0) {
2121 if (!(sdata->dev->flags & IFF_PROMISC))
2122 return 0;
2123 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2124 } else if (!rx->sta) {
2125 int rate_idx;
2126 if (rx->status->flag & RX_FLAG_HT)
2127 rate_idx = 0; /* TODO: HT rates */
2128 else
2129 rate_idx = rx->status->rate_idx;
2130 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
2131 BIT(rate_idx));
2132 }
2133 break;
2134 case NL80211_IFTYPE_MESH_POINT:
2135 if (!multicast &&
2136 compare_ether_addr(sdata->dev->dev_addr,
2137 hdr->addr1) != 0) {
2138 if (!(sdata->dev->flags & IFF_PROMISC))
2139 return 0;
2140
2141 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2142 }
2143 break;
2144 case NL80211_IFTYPE_AP_VLAN:
2145 case NL80211_IFTYPE_AP:
2146 if (!bssid) {
2147 if (compare_ether_addr(sdata->dev->dev_addr,
2148 hdr->addr1))
2149 return 0;
2150 } else if (!ieee80211_bssid_match(bssid,
2151 sdata->dev->dev_addr)) {
2152 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2153 return 0;
2154 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2155 }
2156 break;
2157 case NL80211_IFTYPE_WDS:
2158 if (bssid || !ieee80211_is_data(hdr->frame_control))
2159 return 0;
2160 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2161 return 0;
2162 break;
2163 case NL80211_IFTYPE_MONITOR:
2164 /* take everything */
2165 break;
2166 case NL80211_IFTYPE_UNSPECIFIED:
2167 case __NL80211_IFTYPE_AFTER_LAST:
2168 /* should never get here */
2169 WARN_ON(1);
2170 break;
2171 }
2172
2173 return 1;
2174 }
2175
2176 /*
2177 * This is the actual Rx frames handler. as it blongs to Rx path it must
2178 * be called with rcu_read_lock protection.
2179 */
2180 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2181 struct sk_buff *skb,
2182 struct ieee80211_rx_status *status,
2183 struct ieee80211_rate *rate)
2184 {
2185 struct ieee80211_local *local = hw_to_local(hw);
2186 struct ieee80211_sub_if_data *sdata;
2187 struct ieee80211_hdr *hdr;
2188 struct ieee80211_rx_data rx;
2189 int prepares;
2190 struct ieee80211_sub_if_data *prev = NULL;
2191 struct sk_buff *skb_new;
2192
2193 hdr = (struct ieee80211_hdr *)skb->data;
2194 memset(&rx, 0, sizeof(rx));
2195 rx.skb = skb;
2196 rx.local = local;
2197
2198 rx.status = status;
2199 rx.rate = rate;
2200
2201 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2202 local->dot11ReceivedFragmentCount++;
2203
2204 rx.sta = sta_info_get(local, hdr->addr2);
2205 if (rx.sta) {
2206 rx.sdata = rx.sta->sdata;
2207 rx.dev = rx.sta->sdata->dev;
2208 }
2209
2210 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
2211 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
2212 return;
2213 }
2214
2215 if (unlikely(local->sw_scanning || local->hw_scanning))
2216 rx.flags |= IEEE80211_RX_IN_SCAN;
2217
2218 ieee80211_parse_qos(&rx);
2219 ieee80211_verify_alignment(&rx);
2220
2221 skb = rx.skb;
2222
2223 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2224 if (!netif_running(sdata->dev))
2225 continue;
2226
2227 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
2228 continue;
2229
2230 rx.flags |= IEEE80211_RX_RA_MATCH;
2231 prepares = prepare_for_handlers(sdata, &rx, hdr);
2232
2233 if (!prepares)
2234 continue;
2235
2236 /*
2237 * frame is destined for this interface, but if it's not
2238 * also for the previous one we handle that after the
2239 * loop to avoid copying the SKB once too much
2240 */
2241
2242 if (!prev) {
2243 prev = sdata;
2244 continue;
2245 }
2246
2247 /*
2248 * frame was destined for the previous interface
2249 * so invoke RX handlers for it
2250 */
2251
2252 skb_new = skb_copy(skb, GFP_ATOMIC);
2253 if (!skb_new) {
2254 if (net_ratelimit())
2255 printk(KERN_DEBUG "%s: failed to copy "
2256 "multicast frame for %s\n",
2257 wiphy_name(local->hw.wiphy),
2258 prev->dev->name);
2259 continue;
2260 }
2261 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
2262 prev = sdata;
2263 }
2264 if (prev)
2265 ieee80211_invoke_rx_handlers(prev, &rx, skb);
2266 else
2267 dev_kfree_skb(skb);
2268 }
2269
2270 #define SEQ_MODULO 0x1000
2271 #define SEQ_MASK 0xfff
2272
2273 static inline int seq_less(u16 sq1, u16 sq2)
2274 {
2275 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2276 }
2277
2278 static inline u16 seq_inc(u16 sq)
2279 {
2280 return (sq + 1) & SEQ_MASK;
2281 }
2282
2283 static inline u16 seq_sub(u16 sq1, u16 sq2)
2284 {
2285 return (sq1 - sq2) & SEQ_MASK;
2286 }
2287
2288
2289 /*
2290 * As it function blongs to Rx path it must be called with
2291 * the proper rcu_read_lock protection for its flow.
2292 */
2293 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2294 struct tid_ampdu_rx *tid_agg_rx,
2295 struct sk_buff *skb,
2296 u16 mpdu_seq_num,
2297 int bar_req)
2298 {
2299 struct ieee80211_local *local = hw_to_local(hw);
2300 struct ieee80211_rx_status status;
2301 u16 head_seq_num, buf_size;
2302 int index;
2303 struct ieee80211_supported_band *sband;
2304 struct ieee80211_rate *rate;
2305
2306 buf_size = tid_agg_rx->buf_size;
2307 head_seq_num = tid_agg_rx->head_seq_num;
2308
2309 /* frame with out of date sequence number */
2310 if (seq_less(mpdu_seq_num, head_seq_num)) {
2311 dev_kfree_skb(skb);
2312 return 1;
2313 }
2314
2315 /* if frame sequence number exceeds our buffering window size or
2316 * block Ack Request arrived - release stored frames */
2317 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
2318 /* new head to the ordering buffer */
2319 if (bar_req)
2320 head_seq_num = mpdu_seq_num;
2321 else
2322 head_seq_num =
2323 seq_inc(seq_sub(mpdu_seq_num, buf_size));
2324 /* release stored frames up to new head to stack */
2325 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2326 index = seq_sub(tid_agg_rx->head_seq_num,
2327 tid_agg_rx->ssn)
2328 % tid_agg_rx->buf_size;
2329
2330 if (tid_agg_rx->reorder_buf[index]) {
2331 /* release the reordered frames to stack */
2332 memcpy(&status,
2333 tid_agg_rx->reorder_buf[index]->cb,
2334 sizeof(status));
2335 sband = local->hw.wiphy->bands[status.band];
2336 if (status.flag & RX_FLAG_HT) {
2337 /* TODO: HT rates */
2338 rate = sband->bitrates;
2339 } else {
2340 rate = &sband->bitrates
2341 [status.rate_idx];
2342 }
2343 __ieee80211_rx_handle_packet(hw,
2344 tid_agg_rx->reorder_buf[index],
2345 &status, rate);
2346 tid_agg_rx->stored_mpdu_num--;
2347 tid_agg_rx->reorder_buf[index] = NULL;
2348 }
2349 tid_agg_rx->head_seq_num =
2350 seq_inc(tid_agg_rx->head_seq_num);
2351 }
2352 if (bar_req)
2353 return 1;
2354 }
2355
2356 /* now the new frame is always in the range of the reordering */
2357 /* buffer window */
2358 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
2359 % tid_agg_rx->buf_size;
2360 /* check if we already stored this frame */
2361 if (tid_agg_rx->reorder_buf[index]) {
2362 dev_kfree_skb(skb);
2363 return 1;
2364 }
2365
2366 /* if arrived mpdu is in the right order and nothing else stored */
2367 /* release it immediately */
2368 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2369 tid_agg_rx->stored_mpdu_num == 0) {
2370 tid_agg_rx->head_seq_num =
2371 seq_inc(tid_agg_rx->head_seq_num);
2372 return 0;
2373 }
2374
2375 /* put the frame in the reordering buffer */
2376 tid_agg_rx->reorder_buf[index] = skb;
2377 tid_agg_rx->stored_mpdu_num++;
2378 /* release the buffer until next missing frame */
2379 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2380 % tid_agg_rx->buf_size;
2381 while (tid_agg_rx->reorder_buf[index]) {
2382 /* release the reordered frame back to stack */
2383 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb,
2384 sizeof(status));
2385 sband = local->hw.wiphy->bands[status.band];
2386 if (status.flag & RX_FLAG_HT)
2387 rate = sband->bitrates; /* TODO: HT rates */
2388 else
2389 rate = &sband->bitrates[status.rate_idx];
2390 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2391 &status, rate);
2392 tid_agg_rx->stored_mpdu_num--;
2393 tid_agg_rx->reorder_buf[index] = NULL;
2394 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2395 index = seq_sub(tid_agg_rx->head_seq_num,
2396 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2397 }
2398 return 1;
2399 }
2400
2401 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2402 struct sk_buff *skb)
2403 {
2404 struct ieee80211_hw *hw = &local->hw;
2405 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2406 struct sta_info *sta;
2407 struct tid_ampdu_rx *tid_agg_rx;
2408 u16 sc;
2409 u16 mpdu_seq_num;
2410 u8 ret = 0;
2411 int tid;
2412
2413 sta = sta_info_get(local, hdr->addr2);
2414 if (!sta)
2415 return ret;
2416
2417 /* filter the QoS data rx stream according to
2418 * STA/TID and check if this STA/TID is on aggregation */
2419 if (!ieee80211_is_data_qos(hdr->frame_control))
2420 goto end_reorder;
2421
2422 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2423
2424 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2425 goto end_reorder;
2426
2427 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2428
2429 /* qos null data frames are excluded */
2430 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2431 goto end_reorder;
2432
2433 /* new un-ordered ampdu frame - process it */
2434
2435 /* reset session timer */
2436 if (tid_agg_rx->timeout)
2437 mod_timer(&tid_agg_rx->session_timer,
2438 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2439
2440 /* if this mpdu is fragmented - terminate rx aggregation session */
2441 sc = le16_to_cpu(hdr->seq_ctrl);
2442 if (sc & IEEE80211_SCTL_FRAG) {
2443 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2444 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2445 ret = 1;
2446 goto end_reorder;
2447 }
2448
2449 /* according to mpdu sequence number deal with reordering buffer */
2450 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2451 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2452 mpdu_seq_num, 0);
2453 end_reorder:
2454 return ret;
2455 }
2456
2457 /*
2458 * This is the receive path handler. It is called by a low level driver when an
2459 * 802.11 MPDU is received from the hardware.
2460 */
2461 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2462 struct ieee80211_rx_status *status)
2463 {
2464 struct ieee80211_local *local = hw_to_local(hw);
2465 struct ieee80211_rate *rate = NULL;
2466 struct ieee80211_supported_band *sband;
2467
2468 if (status->band < 0 ||
2469 status->band >= IEEE80211_NUM_BANDS) {
2470 WARN_ON(1);
2471 return;
2472 }
2473
2474 sband = local->hw.wiphy->bands[status->band];
2475 if (!sband) {
2476 WARN_ON(1);
2477 return;
2478 }
2479
2480 if (status->flag & RX_FLAG_HT) {
2481 /* rate_idx is MCS index */
2482 if (WARN_ON(status->rate_idx < 0 ||
2483 status->rate_idx >= 76))
2484 return;
2485 /* HT rates are not in the table - use the highest legacy rate
2486 * for now since other parts of mac80211 may not yet be fully
2487 * MCS aware. */
2488 rate = &sband->bitrates[sband->n_bitrates - 1];
2489 } else {
2490 if (WARN_ON(status->rate_idx < 0 ||
2491 status->rate_idx >= sband->n_bitrates))
2492 return;
2493 rate = &sband->bitrates[status->rate_idx];
2494 }
2495
2496 /*
2497 * key references and virtual interfaces are protected using RCU
2498 * and this requires that we are in a read-side RCU section during
2499 * receive processing
2500 */
2501 rcu_read_lock();
2502
2503 /*
2504 * Frames with failed FCS/PLCP checksum are not returned,
2505 * all other frames are returned without radiotap header
2506 * if it was previously present.
2507 * Also, frames with less than 16 bytes are dropped.
2508 */
2509 skb = ieee80211_rx_monitor(local, skb, status, rate);
2510 if (!skb) {
2511 rcu_read_unlock();
2512 return;
2513 }
2514
2515 if (!ieee80211_rx_reorder_ampdu(local, skb))
2516 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2517
2518 rcu_read_unlock();
2519 }
2520 EXPORT_SYMBOL(__ieee80211_rx);
2521
2522 /* This is a version of the rx handler that can be called from hard irq
2523 * context. Post the skb on the queue and schedule the tasklet */
2524 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
2525 struct ieee80211_rx_status *status)
2526 {
2527 struct ieee80211_local *local = hw_to_local(hw);
2528
2529 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2530
2531 skb->dev = local->mdev;
2532 /* copy status into skb->cb for use by tasklet */
2533 memcpy(skb->cb, status, sizeof(*status));
2534 skb->pkt_type = IEEE80211_RX_MSG;
2535 skb_queue_tail(&local->skb_queue, skb);
2536 tasklet_schedule(&local->tasklet);
2537 }
2538 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
This page took 0.084357 seconds and 6 git commands to generate.