d052f400482931b254188da53b7544f0d34b02e0
[deliverable/linux.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "driver-ops.h"
23 #include "led.h"
24 #include "mesh.h"
25 #include "wep.h"
26 #include "wpa.h"
27 #include "tkip.h"
28 #include "wme.h"
29
30 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
31 struct tid_ampdu_rx *tid_agg_rx,
32 struct sk_buff *skb,
33 struct ieee80211_rx_status *status,
34 u16 mpdu_seq_num,
35 int bar_req);
36 /*
37 * monitor mode reception
38 *
39 * This function cleans up the SKB, i.e. it removes all the stuff
40 * only useful for monitoring.
41 */
42 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
43 struct sk_buff *skb,
44 int rtap_len)
45 {
46 skb_pull(skb, rtap_len);
47
48 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
49 if (likely(skb->len > FCS_LEN))
50 skb_trim(skb, skb->len - FCS_LEN);
51 else {
52 /* driver bug */
53 WARN_ON(1);
54 dev_kfree_skb(skb);
55 skb = NULL;
56 }
57 }
58
59 return skb;
60 }
61
62 static inline int should_drop_frame(struct ieee80211_rx_status *status,
63 struct sk_buff *skb,
64 int present_fcs_len,
65 int radiotap_len)
66 {
67 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
68
69 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
70 return 1;
71 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
72 return 1;
73 if (ieee80211_is_ctl(hdr->frame_control) &&
74 !ieee80211_is_pspoll(hdr->frame_control) &&
75 !ieee80211_is_back_req(hdr->frame_control))
76 return 1;
77 return 0;
78 }
79
80 static int
81 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
82 struct ieee80211_rx_status *status)
83 {
84 int len;
85
86 /* always present fields */
87 len = sizeof(struct ieee80211_radiotap_header) + 9;
88
89 if (status->flag & RX_FLAG_TSFT)
90 len += 8;
91 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
92 len += 1;
93 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
94 len += 1;
95
96 if (len & 1) /* padding for RX_FLAGS if necessary */
97 len++;
98
99 /* make sure radiotap starts at a naturally aligned address */
100 if (len % 8)
101 len = roundup(len, 8);
102
103 return len;
104 }
105
106 /*
107 * ieee80211_add_rx_radiotap_header - add radiotap header
108 *
109 * add a radiotap header containing all the fields which the hardware provided.
110 */
111 static void
112 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
113 struct sk_buff *skb,
114 struct ieee80211_rx_status *status,
115 struct ieee80211_rate *rate,
116 int rtap_len)
117 {
118 struct ieee80211_radiotap_header *rthdr;
119 unsigned char *pos;
120
121 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
122 memset(rthdr, 0, rtap_len);
123
124 /* radiotap header, set always present flags */
125 rthdr->it_present =
126 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
127 (1 << IEEE80211_RADIOTAP_CHANNEL) |
128 (1 << IEEE80211_RADIOTAP_ANTENNA) |
129 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
130 rthdr->it_len = cpu_to_le16(rtap_len);
131
132 pos = (unsigned char *)(rthdr+1);
133
134 /* the order of the following fields is important */
135
136 /* IEEE80211_RADIOTAP_TSFT */
137 if (status->flag & RX_FLAG_TSFT) {
138 *(__le64 *)pos = cpu_to_le64(status->mactime);
139 rthdr->it_present |=
140 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
141 pos += 8;
142 }
143
144 /* IEEE80211_RADIOTAP_FLAGS */
145 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
146 *pos |= IEEE80211_RADIOTAP_F_FCS;
147 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
148 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
149 if (status->flag & RX_FLAG_SHORTPRE)
150 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
151 pos++;
152
153 /* IEEE80211_RADIOTAP_RATE */
154 if (status->flag & RX_FLAG_HT) {
155 /*
156 * TODO: add following information into radiotap header once
157 * suitable fields are defined for it:
158 * - MCS index (status->rate_idx)
159 * - HT40 (status->flag & RX_FLAG_40MHZ)
160 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
161 */
162 *pos = 0;
163 } else {
164 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
165 *pos = rate->bitrate / 5;
166 }
167 pos++;
168
169 /* IEEE80211_RADIOTAP_CHANNEL */
170 *(__le16 *)pos = cpu_to_le16(status->freq);
171 pos += 2;
172 if (status->band == IEEE80211_BAND_5GHZ)
173 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
174 IEEE80211_CHAN_5GHZ);
175 else if (rate->flags & IEEE80211_RATE_ERP_G)
176 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
177 IEEE80211_CHAN_2GHZ);
178 else
179 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
180 IEEE80211_CHAN_2GHZ);
181 pos += 2;
182
183 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
184 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
185 *pos = status->signal;
186 rthdr->it_present |=
187 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
188 pos++;
189 }
190
191 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
192 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
193 *pos = status->noise;
194 rthdr->it_present |=
195 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
196 pos++;
197 }
198
199 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
200
201 /* IEEE80211_RADIOTAP_ANTENNA */
202 *pos = status->antenna;
203 pos++;
204
205 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
206
207 /* IEEE80211_RADIOTAP_RX_FLAGS */
208 /* ensure 2 byte alignment for the 2 byte field as required */
209 if ((pos - (unsigned char *)rthdr) & 1)
210 pos++;
211 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
212 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP);
213 pos += 2;
214 }
215
216 /*
217 * This function copies a received frame to all monitor interfaces and
218 * returns a cleaned-up SKB that no longer includes the FCS nor the
219 * radiotap header the driver might have added.
220 */
221 static struct sk_buff *
222 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
223 struct ieee80211_rx_status *status,
224 struct ieee80211_rate *rate)
225 {
226 struct ieee80211_sub_if_data *sdata;
227 int needed_headroom = 0;
228 struct sk_buff *skb, *skb2;
229 struct net_device *prev_dev = NULL;
230 int present_fcs_len = 0;
231 int rtap_len = 0;
232
233 /*
234 * First, we may need to make a copy of the skb because
235 * (1) we need to modify it for radiotap (if not present), and
236 * (2) the other RX handlers will modify the skb we got.
237 *
238 * We don't need to, of course, if we aren't going to return
239 * the SKB because it has a bad FCS/PLCP checksum.
240 */
241 if (status->flag & RX_FLAG_RADIOTAP)
242 rtap_len = ieee80211_get_radiotap_len(origskb->data);
243 else
244 /* room for the radiotap header based on driver features */
245 needed_headroom = ieee80211_rx_radiotap_len(local, status);
246
247 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
248 present_fcs_len = FCS_LEN;
249
250 if (!local->monitors) {
251 if (should_drop_frame(status, origskb, present_fcs_len,
252 rtap_len)) {
253 dev_kfree_skb(origskb);
254 return NULL;
255 }
256
257 return remove_monitor_info(local, origskb, rtap_len);
258 }
259
260 if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
261 /* only need to expand headroom if necessary */
262 skb = origskb;
263 origskb = NULL;
264
265 /*
266 * This shouldn't trigger often because most devices have an
267 * RX header they pull before we get here, and that should
268 * be big enough for our radiotap information. We should
269 * probably export the length to drivers so that we can have
270 * them allocate enough headroom to start with.
271 */
272 if (skb_headroom(skb) < needed_headroom &&
273 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
274 dev_kfree_skb(skb);
275 return NULL;
276 }
277 } else {
278 /*
279 * Need to make a copy and possibly remove radiotap header
280 * and FCS from the original.
281 */
282 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
283
284 origskb = remove_monitor_info(local, origskb, rtap_len);
285
286 if (!skb)
287 return origskb;
288 }
289
290 /* if necessary, prepend radiotap information */
291 if (!(status->flag & RX_FLAG_RADIOTAP))
292 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
293 needed_headroom);
294
295 skb_reset_mac_header(skb);
296 skb->ip_summed = CHECKSUM_UNNECESSARY;
297 skb->pkt_type = PACKET_OTHERHOST;
298 skb->protocol = htons(ETH_P_802_2);
299
300 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
301 if (!netif_running(sdata->dev))
302 continue;
303
304 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
305 continue;
306
307 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
308 continue;
309
310 if (prev_dev) {
311 skb2 = skb_clone(skb, GFP_ATOMIC);
312 if (skb2) {
313 skb2->dev = prev_dev;
314 netif_rx(skb2);
315 }
316 }
317
318 prev_dev = sdata->dev;
319 sdata->dev->stats.rx_packets++;
320 sdata->dev->stats.rx_bytes += skb->len;
321 }
322
323 if (prev_dev) {
324 skb->dev = prev_dev;
325 netif_rx(skb);
326 } else
327 dev_kfree_skb(skb);
328
329 return origskb;
330 }
331
332
333 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
334 {
335 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
336 int tid;
337
338 /* does the frame have a qos control field? */
339 if (ieee80211_is_data_qos(hdr->frame_control)) {
340 u8 *qc = ieee80211_get_qos_ctl(hdr);
341 /* frame has qos control */
342 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
343 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
344 rx->flags |= IEEE80211_RX_AMSDU;
345 else
346 rx->flags &= ~IEEE80211_RX_AMSDU;
347 } else {
348 /*
349 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
350 *
351 * Sequence numbers for management frames, QoS data
352 * frames with a broadcast/multicast address in the
353 * Address 1 field, and all non-QoS data frames sent
354 * by QoS STAs are assigned using an additional single
355 * modulo-4096 counter, [...]
356 *
357 * We also use that counter for non-QoS STAs.
358 */
359 tid = NUM_RX_DATA_QUEUES - 1;
360 }
361
362 rx->queue = tid;
363 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
364 * For now, set skb->priority to 0 for other cases. */
365 rx->skb->priority = (tid > 7) ? 0 : tid;
366 }
367
368 /**
369 * DOC: Packet alignment
370 *
371 * Drivers always need to pass packets that are aligned to two-byte boundaries
372 * to the stack.
373 *
374 * Additionally, should, if possible, align the payload data in a way that
375 * guarantees that the contained IP header is aligned to a four-byte
376 * boundary. In the case of regular frames, this simply means aligning the
377 * payload to a four-byte boundary (because either the IP header is directly
378 * contained, or IV/RFC1042 headers that have a length divisible by four are
379 * in front of it).
380 *
381 * With A-MSDU frames, however, the payload data address must yield two modulo
382 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
383 * push the IP header further back to a multiple of four again. Thankfully, the
384 * specs were sane enough this time around to require padding each A-MSDU
385 * subframe to a length that is a multiple of four.
386 *
387 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
388 * the payload is not supported, the driver is required to move the 802.11
389 * header to be directly in front of the payload in that case.
390 */
391 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
392 {
393 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
394 int hdrlen;
395
396 #ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
397 return;
398 #endif
399
400 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
401 "unaligned packet at 0x%p\n", rx->skb->data))
402 return;
403
404 if (!ieee80211_is_data_present(hdr->frame_control))
405 return;
406
407 hdrlen = ieee80211_hdrlen(hdr->frame_control);
408 if (rx->flags & IEEE80211_RX_AMSDU)
409 hdrlen += ETH_HLEN;
410 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
411 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
412 }
413
414
415 /* rx handlers */
416
417 static ieee80211_rx_result debug_noinline
418 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
419 {
420 struct ieee80211_local *local = rx->local;
421 struct sk_buff *skb = rx->skb;
422
423 if (unlikely(local->hw_scanning))
424 return ieee80211_scan_rx(rx->sdata, skb, rx->status);
425
426 if (unlikely(local->sw_scanning)) {
427 /* drop all the other packets during a software scan anyway */
428 if (ieee80211_scan_rx(rx->sdata, skb, rx->status)
429 != RX_QUEUED)
430 dev_kfree_skb(skb);
431 return RX_QUEUED;
432 }
433
434 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
435 /* scanning finished during invoking of handlers */
436 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
437 return RX_DROP_UNUSABLE;
438 }
439
440 return RX_CONTINUE;
441 }
442
443
444 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
445 {
446 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
447
448 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
449 return 0;
450
451 return ieee80211_is_robust_mgmt_frame(hdr);
452 }
453
454
455 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
456 {
457 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
458
459 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
460 return 0;
461
462 return ieee80211_is_robust_mgmt_frame(hdr);
463 }
464
465
466 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
467 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
468 {
469 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
470 struct ieee80211_mmie *mmie;
471
472 if (skb->len < 24 + sizeof(*mmie) ||
473 !is_multicast_ether_addr(hdr->da))
474 return -1;
475
476 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
477 return -1; /* not a robust management frame */
478
479 mmie = (struct ieee80211_mmie *)
480 (skb->data + skb->len - sizeof(*mmie));
481 if (mmie->element_id != WLAN_EID_MMIE ||
482 mmie->length != sizeof(*mmie) - 2)
483 return -1;
484
485 return le16_to_cpu(mmie->key_id);
486 }
487
488
489 static ieee80211_rx_result
490 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
491 {
492 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
493 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
494
495 if (ieee80211_is_data(hdr->frame_control)) {
496 if (!ieee80211_has_a4(hdr->frame_control))
497 return RX_DROP_MONITOR;
498 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
499 return RX_DROP_MONITOR;
500 }
501
502 /* If there is not an established peer link and this is not a peer link
503 * establisment frame, beacon or probe, drop the frame.
504 */
505
506 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
507 struct ieee80211_mgmt *mgmt;
508
509 if (!ieee80211_is_mgmt(hdr->frame_control))
510 return RX_DROP_MONITOR;
511
512 if (ieee80211_is_action(hdr->frame_control)) {
513 mgmt = (struct ieee80211_mgmt *)hdr;
514 if (mgmt->u.action.category != PLINK_CATEGORY)
515 return RX_DROP_MONITOR;
516 return RX_CONTINUE;
517 }
518
519 if (ieee80211_is_probe_req(hdr->frame_control) ||
520 ieee80211_is_probe_resp(hdr->frame_control) ||
521 ieee80211_is_beacon(hdr->frame_control))
522 return RX_CONTINUE;
523
524 return RX_DROP_MONITOR;
525
526 }
527
528 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
529
530 if (ieee80211_is_data(hdr->frame_control) &&
531 is_multicast_ether_addr(hdr->addr1) &&
532 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
533 return RX_DROP_MONITOR;
534 #undef msh_h_get
535
536 return RX_CONTINUE;
537 }
538
539
540 static ieee80211_rx_result debug_noinline
541 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
542 {
543 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
544
545 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
546 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
547 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
548 rx->sta->last_seq_ctrl[rx->queue] ==
549 hdr->seq_ctrl)) {
550 if (rx->flags & IEEE80211_RX_RA_MATCH) {
551 rx->local->dot11FrameDuplicateCount++;
552 rx->sta->num_duplicates++;
553 }
554 return RX_DROP_MONITOR;
555 } else
556 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
557 }
558
559 if (unlikely(rx->skb->len < 16)) {
560 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
561 return RX_DROP_MONITOR;
562 }
563
564 /* Drop disallowed frame classes based on STA auth/assoc state;
565 * IEEE 802.11, Chap 5.5.
566 *
567 * mac80211 filters only based on association state, i.e. it drops
568 * Class 3 frames from not associated stations. hostapd sends
569 * deauth/disassoc frames when needed. In addition, hostapd is
570 * responsible for filtering on both auth and assoc states.
571 */
572
573 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
574 return ieee80211_rx_mesh_check(rx);
575
576 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
577 ieee80211_is_pspoll(hdr->frame_control)) &&
578 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
579 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
580 if ((!ieee80211_has_fromds(hdr->frame_control) &&
581 !ieee80211_has_tods(hdr->frame_control) &&
582 ieee80211_is_data(hdr->frame_control)) ||
583 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
584 /* Drop IBSS frames and frames for other hosts
585 * silently. */
586 return RX_DROP_MONITOR;
587 }
588
589 return RX_DROP_MONITOR;
590 }
591
592 return RX_CONTINUE;
593 }
594
595
596 static ieee80211_rx_result debug_noinline
597 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
598 {
599 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
600 int keyidx;
601 int hdrlen;
602 ieee80211_rx_result result = RX_DROP_UNUSABLE;
603 struct ieee80211_key *stakey = NULL;
604 int mmie_keyidx = -1;
605
606 /*
607 * Key selection 101
608 *
609 * There are four types of keys:
610 * - GTK (group keys)
611 * - IGTK (group keys for management frames)
612 * - PTK (pairwise keys)
613 * - STK (station-to-station pairwise keys)
614 *
615 * When selecting a key, we have to distinguish between multicast
616 * (including broadcast) and unicast frames, the latter can only
617 * use PTKs and STKs while the former always use GTKs and IGTKs.
618 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
619 * unicast frames can also use key indices like GTKs. Hence, if we
620 * don't have a PTK/STK we check the key index for a WEP key.
621 *
622 * Note that in a regular BSS, multicast frames are sent by the
623 * AP only, associated stations unicast the frame to the AP first
624 * which then multicasts it on their behalf.
625 *
626 * There is also a slight problem in IBSS mode: GTKs are negotiated
627 * with each station, that is something we don't currently handle.
628 * The spec seems to expect that one negotiates the same key with
629 * every station but there's no such requirement; VLANs could be
630 * possible.
631 */
632
633 if (!ieee80211_has_protected(hdr->frame_control)) {
634 if (!ieee80211_is_mgmt(hdr->frame_control) ||
635 rx->sta == NULL || !test_sta_flags(rx->sta, WLAN_STA_MFP))
636 return RX_CONTINUE;
637 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
638 if (mmie_keyidx < 0)
639 return RX_CONTINUE;
640 }
641
642 /*
643 * No point in finding a key and decrypting if the frame is neither
644 * addressed to us nor a multicast frame.
645 */
646 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
647 return RX_CONTINUE;
648
649 if (rx->sta)
650 stakey = rcu_dereference(rx->sta->key);
651
652 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
653 rx->key = stakey;
654 } else if (mmie_keyidx >= 0) {
655 /* Broadcast/multicast robust management frame / BIP */
656 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
657 (rx->status->flag & RX_FLAG_IV_STRIPPED))
658 return RX_CONTINUE;
659
660 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
661 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
662 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
663 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
664 } else {
665 /*
666 * The device doesn't give us the IV so we won't be
667 * able to look up the key. That's ok though, we
668 * don't need to decrypt the frame, we just won't
669 * be able to keep statistics accurate.
670 * Except for key threshold notifications, should
671 * we somehow allow the driver to tell us which key
672 * the hardware used if this flag is set?
673 */
674 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
675 (rx->status->flag & RX_FLAG_IV_STRIPPED))
676 return RX_CONTINUE;
677
678 hdrlen = ieee80211_hdrlen(hdr->frame_control);
679
680 if (rx->skb->len < 8 + hdrlen)
681 return RX_DROP_UNUSABLE; /* TODO: count this? */
682
683 /*
684 * no need to call ieee80211_wep_get_keyidx,
685 * it verifies a bunch of things we've done already
686 */
687 keyidx = rx->skb->data[hdrlen + 3] >> 6;
688
689 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
690
691 /*
692 * RSNA-protected unicast frames should always be sent with
693 * pairwise or station-to-station keys, but for WEP we allow
694 * using a key index as well.
695 */
696 if (rx->key && rx->key->conf.alg != ALG_WEP &&
697 !is_multicast_ether_addr(hdr->addr1))
698 rx->key = NULL;
699 }
700
701 if (rx->key) {
702 rx->key->tx_rx_count++;
703 /* TODO: add threshold stuff again */
704 } else {
705 return RX_DROP_MONITOR;
706 }
707
708 /* Check for weak IVs if possible */
709 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
710 ieee80211_is_data(hdr->frame_control) &&
711 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
712 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
713 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
714 rx->sta->wep_weak_iv_count++;
715
716 switch (rx->key->conf.alg) {
717 case ALG_WEP:
718 result = ieee80211_crypto_wep_decrypt(rx);
719 break;
720 case ALG_TKIP:
721 result = ieee80211_crypto_tkip_decrypt(rx);
722 break;
723 case ALG_CCMP:
724 result = ieee80211_crypto_ccmp_decrypt(rx);
725 break;
726 case ALG_AES_CMAC:
727 result = ieee80211_crypto_aes_cmac_decrypt(rx);
728 break;
729 }
730
731 /* either the frame has been decrypted or will be dropped */
732 rx->status->flag |= RX_FLAG_DECRYPTED;
733
734 return result;
735 }
736
737 static ieee80211_rx_result debug_noinline
738 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
739 {
740 struct ieee80211_local *local;
741 struct ieee80211_hdr *hdr;
742 struct sk_buff *skb;
743
744 local = rx->local;
745 skb = rx->skb;
746 hdr = (struct ieee80211_hdr *) skb->data;
747
748 if (!local->pspolling)
749 return RX_CONTINUE;
750
751 if (!ieee80211_has_fromds(hdr->frame_control))
752 /* this is not from AP */
753 return RX_CONTINUE;
754
755 if (!ieee80211_is_data(hdr->frame_control))
756 return RX_CONTINUE;
757
758 if (!ieee80211_has_moredata(hdr->frame_control)) {
759 /* AP has no more frames buffered for us */
760 local->pspolling = false;
761 return RX_CONTINUE;
762 }
763
764 /* more data bit is set, let's request a new frame from the AP */
765 ieee80211_send_pspoll(local, rx->sdata);
766
767 return RX_CONTINUE;
768 }
769
770 static void ap_sta_ps_start(struct sta_info *sta)
771 {
772 struct ieee80211_sub_if_data *sdata = sta->sdata;
773 struct ieee80211_local *local = sdata->local;
774
775 atomic_inc(&sdata->bss->num_sta_ps);
776 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
777 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
778 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
779 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
780 sdata->dev->name, sta->sta.addr, sta->sta.aid);
781 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
782 }
783
784 static int ap_sta_ps_end(struct sta_info *sta)
785 {
786 struct ieee80211_sub_if_data *sdata = sta->sdata;
787 struct ieee80211_local *local = sdata->local;
788 struct sk_buff *skb;
789 int sent = 0;
790
791 atomic_dec(&sdata->bss->num_sta_ps);
792
793 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
794 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
795
796 if (!skb_queue_empty(&sta->ps_tx_buf))
797 sta_info_clear_tim_bit(sta);
798
799 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
800 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
801 sdata->dev->name, sta->sta.addr, sta->sta.aid);
802 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
803
804 /* Send all buffered frames to the station */
805 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
806 sent++;
807 skb->requeue = 1;
808 dev_queue_xmit(skb);
809 }
810 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
811 local->total_ps_buffered--;
812 sent++;
813 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
814 printk(KERN_DEBUG "%s: STA %pM aid %d send PS frame "
815 "since STA not sleeping anymore\n", sdata->dev->name,
816 sta->sta.addr, sta->sta.aid);
817 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
818 skb->requeue = 1;
819 dev_queue_xmit(skb);
820 }
821
822 return sent;
823 }
824
825 static ieee80211_rx_result debug_noinline
826 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
827 {
828 struct sta_info *sta = rx->sta;
829 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
830
831 if (!sta)
832 return RX_CONTINUE;
833
834 /* Update last_rx only for IBSS packets which are for the current
835 * BSSID to avoid keeping the current IBSS network alive in cases where
836 * other STAs are using different BSSID. */
837 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
838 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
839 NL80211_IFTYPE_ADHOC);
840 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
841 sta->last_rx = jiffies;
842 } else
843 if (!is_multicast_ether_addr(hdr->addr1) ||
844 rx->sdata->vif.type == NL80211_IFTYPE_STATION) {
845 /* Update last_rx only for unicast frames in order to prevent
846 * the Probe Request frames (the only broadcast frames from a
847 * STA in infrastructure mode) from keeping a connection alive.
848 * Mesh beacons will update last_rx when if they are found to
849 * match the current local configuration when processed.
850 */
851 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
852 ieee80211_is_beacon(hdr->frame_control)) {
853 rx->sdata->u.mgd.last_beacon = jiffies;
854 } else
855 sta->last_rx = jiffies;
856 }
857
858 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
859 return RX_CONTINUE;
860
861 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
862 ieee80211_sta_rx_notify(rx->sdata, hdr);
863
864 sta->rx_fragments++;
865 sta->rx_bytes += rx->skb->len;
866 sta->last_signal = rx->status->signal;
867 sta->last_qual = rx->status->qual;
868 sta->last_noise = rx->status->noise;
869
870 /*
871 * Change STA power saving mode only at the end of a frame
872 * exchange sequence.
873 */
874 if (!ieee80211_has_morefrags(hdr->frame_control) &&
875 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
876 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
877 if (test_sta_flags(sta, WLAN_STA_PS)) {
878 /*
879 * Ignore doze->wake transitions that are
880 * indicated by non-data frames, the standard
881 * is unclear here, but for example going to
882 * PS mode and then scanning would cause a
883 * doze->wake transition for the probe request,
884 * and that is clearly undesirable.
885 */
886 if (ieee80211_is_data(hdr->frame_control) &&
887 !ieee80211_has_pm(hdr->frame_control))
888 rx->sent_ps_buffered += ap_sta_ps_end(sta);
889 } else {
890 if (ieee80211_has_pm(hdr->frame_control))
891 ap_sta_ps_start(sta);
892 }
893 }
894
895 /* Drop data::nullfunc frames silently, since they are used only to
896 * control station power saving mode. */
897 if (ieee80211_is_nullfunc(hdr->frame_control)) {
898 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
899 /* Update counter and free packet here to avoid counting this
900 * as a dropped packed. */
901 sta->rx_packets++;
902 dev_kfree_skb(rx->skb);
903 return RX_QUEUED;
904 }
905
906 return RX_CONTINUE;
907 } /* ieee80211_rx_h_sta_process */
908
909 static inline struct ieee80211_fragment_entry *
910 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
911 unsigned int frag, unsigned int seq, int rx_queue,
912 struct sk_buff **skb)
913 {
914 struct ieee80211_fragment_entry *entry;
915 int idx;
916
917 idx = sdata->fragment_next;
918 entry = &sdata->fragments[sdata->fragment_next++];
919 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
920 sdata->fragment_next = 0;
921
922 if (!skb_queue_empty(&entry->skb_list)) {
923 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
924 struct ieee80211_hdr *hdr =
925 (struct ieee80211_hdr *) entry->skb_list.next->data;
926 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
927 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
928 "addr1=%pM addr2=%pM\n",
929 sdata->dev->name, idx,
930 jiffies - entry->first_frag_time, entry->seq,
931 entry->last_frag, hdr->addr1, hdr->addr2);
932 #endif
933 __skb_queue_purge(&entry->skb_list);
934 }
935
936 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
937 *skb = NULL;
938 entry->first_frag_time = jiffies;
939 entry->seq = seq;
940 entry->rx_queue = rx_queue;
941 entry->last_frag = frag;
942 entry->ccmp = 0;
943 entry->extra_len = 0;
944
945 return entry;
946 }
947
948 static inline struct ieee80211_fragment_entry *
949 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
950 unsigned int frag, unsigned int seq,
951 int rx_queue, struct ieee80211_hdr *hdr)
952 {
953 struct ieee80211_fragment_entry *entry;
954 int i, idx;
955
956 idx = sdata->fragment_next;
957 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
958 struct ieee80211_hdr *f_hdr;
959
960 idx--;
961 if (idx < 0)
962 idx = IEEE80211_FRAGMENT_MAX - 1;
963
964 entry = &sdata->fragments[idx];
965 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
966 entry->rx_queue != rx_queue ||
967 entry->last_frag + 1 != frag)
968 continue;
969
970 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
971
972 /*
973 * Check ftype and addresses are equal, else check next fragment
974 */
975 if (((hdr->frame_control ^ f_hdr->frame_control) &
976 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
977 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
978 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
979 continue;
980
981 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
982 __skb_queue_purge(&entry->skb_list);
983 continue;
984 }
985 return entry;
986 }
987
988 return NULL;
989 }
990
991 static ieee80211_rx_result debug_noinline
992 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
993 {
994 struct ieee80211_hdr *hdr;
995 u16 sc;
996 __le16 fc;
997 unsigned int frag, seq;
998 struct ieee80211_fragment_entry *entry;
999 struct sk_buff *skb;
1000
1001 hdr = (struct ieee80211_hdr *)rx->skb->data;
1002 fc = hdr->frame_control;
1003 sc = le16_to_cpu(hdr->seq_ctrl);
1004 frag = sc & IEEE80211_SCTL_FRAG;
1005
1006 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1007 (rx->skb)->len < 24 ||
1008 is_multicast_ether_addr(hdr->addr1))) {
1009 /* not fragmented */
1010 goto out;
1011 }
1012 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1013
1014 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1015
1016 if (frag == 0) {
1017 /* This is the first fragment of a new frame. */
1018 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1019 rx->queue, &(rx->skb));
1020 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1021 ieee80211_has_protected(fc)) {
1022 /* Store CCMP PN so that we can verify that the next
1023 * fragment has a sequential PN value. */
1024 entry->ccmp = 1;
1025 memcpy(entry->last_pn,
1026 rx->key->u.ccmp.rx_pn[rx->queue],
1027 CCMP_PN_LEN);
1028 }
1029 return RX_QUEUED;
1030 }
1031
1032 /* This is a fragment for a frame that should already be pending in
1033 * fragment cache. Add this fragment to the end of the pending entry.
1034 */
1035 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1036 if (!entry) {
1037 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1038 return RX_DROP_MONITOR;
1039 }
1040
1041 /* Verify that MPDUs within one MSDU have sequential PN values.
1042 * (IEEE 802.11i, 8.3.3.4.5) */
1043 if (entry->ccmp) {
1044 int i;
1045 u8 pn[CCMP_PN_LEN], *rpn;
1046 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1047 return RX_DROP_UNUSABLE;
1048 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1049 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1050 pn[i]++;
1051 if (pn[i])
1052 break;
1053 }
1054 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
1055 if (memcmp(pn, rpn, CCMP_PN_LEN))
1056 return RX_DROP_UNUSABLE;
1057 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1058 }
1059
1060 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1061 __skb_queue_tail(&entry->skb_list, rx->skb);
1062 entry->last_frag = frag;
1063 entry->extra_len += rx->skb->len;
1064 if (ieee80211_has_morefrags(fc)) {
1065 rx->skb = NULL;
1066 return RX_QUEUED;
1067 }
1068
1069 rx->skb = __skb_dequeue(&entry->skb_list);
1070 if (skb_tailroom(rx->skb) < entry->extra_len) {
1071 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1072 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1073 GFP_ATOMIC))) {
1074 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1075 __skb_queue_purge(&entry->skb_list);
1076 return RX_DROP_UNUSABLE;
1077 }
1078 }
1079 while ((skb = __skb_dequeue(&entry->skb_list))) {
1080 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1081 dev_kfree_skb(skb);
1082 }
1083
1084 /* Complete frame has been reassembled - process it now */
1085 rx->flags |= IEEE80211_RX_FRAGMENTED;
1086
1087 out:
1088 if (rx->sta)
1089 rx->sta->rx_packets++;
1090 if (is_multicast_ether_addr(hdr->addr1))
1091 rx->local->dot11MulticastReceivedFrameCount++;
1092 else
1093 ieee80211_led_rx(rx->local);
1094 return RX_CONTINUE;
1095 }
1096
1097 static ieee80211_rx_result debug_noinline
1098 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1099 {
1100 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1101 struct sk_buff *skb;
1102 int no_pending_pkts;
1103 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1104
1105 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1106 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1107 return RX_CONTINUE;
1108
1109 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1110 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1111 return RX_DROP_UNUSABLE;
1112
1113 skb = skb_dequeue(&rx->sta->tx_filtered);
1114 if (!skb) {
1115 skb = skb_dequeue(&rx->sta->ps_tx_buf);
1116 if (skb)
1117 rx->local->total_ps_buffered--;
1118 }
1119 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
1120 skb_queue_empty(&rx->sta->ps_tx_buf);
1121
1122 if (skb) {
1123 struct ieee80211_hdr *hdr =
1124 (struct ieee80211_hdr *) skb->data;
1125
1126 /*
1127 * Tell TX path to send one frame even though the STA may
1128 * still remain is PS mode after this frame exchange.
1129 */
1130 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1131
1132 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1133 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
1134 rx->sta->sta.addr, rx->sta->sta.aid,
1135 skb_queue_len(&rx->sta->ps_tx_buf));
1136 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1137
1138 /* Use MoreData flag to indicate whether there are more
1139 * buffered frames for this STA */
1140 if (no_pending_pkts)
1141 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1142 else
1143 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1144
1145 dev_queue_xmit(skb);
1146
1147 if (no_pending_pkts)
1148 sta_info_clear_tim_bit(rx->sta);
1149 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1150 } else if (!rx->sent_ps_buffered) {
1151 /*
1152 * FIXME: This can be the result of a race condition between
1153 * us expiring a frame and the station polling for it.
1154 * Should we send it a null-func frame indicating we
1155 * have nothing buffered for it?
1156 */
1157 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
1158 "though there are no buffered frames for it\n",
1159 rx->dev->name, rx->sta->sta.addr);
1160 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1161 }
1162
1163 /* Free PS Poll skb here instead of returning RX_DROP that would
1164 * count as an dropped frame. */
1165 dev_kfree_skb(rx->skb);
1166
1167 return RX_QUEUED;
1168 }
1169
1170 static ieee80211_rx_result debug_noinline
1171 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1172 {
1173 u8 *data = rx->skb->data;
1174 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1175
1176 if (!ieee80211_is_data_qos(hdr->frame_control))
1177 return RX_CONTINUE;
1178
1179 /* remove the qos control field, update frame type and meta-data */
1180 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1181 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1182 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1183 /* change frame type to non QOS */
1184 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1185
1186 return RX_CONTINUE;
1187 }
1188
1189 static int
1190 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1191 {
1192 if (unlikely(!rx->sta ||
1193 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1194 return -EACCES;
1195
1196 return 0;
1197 }
1198
1199 static int
1200 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1201 {
1202 /*
1203 * Pass through unencrypted frames if the hardware has
1204 * decrypted them already.
1205 */
1206 if (rx->status->flag & RX_FLAG_DECRYPTED)
1207 return 0;
1208
1209 /* Drop unencrypted frames if key is set. */
1210 if (unlikely(!ieee80211_has_protected(fc) &&
1211 !ieee80211_is_nullfunc(fc) &&
1212 (!ieee80211_is_mgmt(fc) ||
1213 (ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1214 rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP))) &&
1215 (rx->key || rx->sdata->drop_unencrypted)))
1216 return -EACCES;
1217 /* BIP does not use Protected field, so need to check MMIE */
1218 if (unlikely(rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP) &&
1219 ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1220 ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1221 (rx->key || rx->sdata->drop_unencrypted)))
1222 return -EACCES;
1223
1224 return 0;
1225 }
1226
1227 static int
1228 ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1229 {
1230 struct net_device *dev = rx->dev;
1231 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1232 u16 hdrlen, ethertype;
1233 u8 *payload;
1234 u8 dst[ETH_ALEN];
1235 u8 src[ETH_ALEN] __aligned(2);
1236 struct sk_buff *skb = rx->skb;
1237 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1238
1239 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1240 return -1;
1241
1242 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1243
1244 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1245 * header
1246 * IEEE 802.11 address fields:
1247 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1248 * 0 0 DA SA BSSID n/a
1249 * 0 1 DA BSSID SA n/a
1250 * 1 0 BSSID SA DA n/a
1251 * 1 1 RA TA DA SA
1252 */
1253 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1254 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1255
1256 switch (hdr->frame_control &
1257 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1258 case cpu_to_le16(IEEE80211_FCTL_TODS):
1259 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1260 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1261 return -1;
1262 break;
1263 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1264 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1265 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1266 return -1;
1267 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1268 struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *)
1269 (skb->data + hdrlen);
1270 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
1271 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
1272 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
1273 memcpy(src, meshdr->eaddr2, ETH_ALEN);
1274 }
1275 }
1276 break;
1277 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
1278 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1279 (is_multicast_ether_addr(dst) &&
1280 !compare_ether_addr(src, dev->dev_addr)))
1281 return -1;
1282 break;
1283 case cpu_to_le16(0):
1284 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1285 return -1;
1286 break;
1287 }
1288
1289 if (unlikely(skb->len - hdrlen < 8))
1290 return -1;
1291
1292 payload = skb->data + hdrlen;
1293 ethertype = (payload[6] << 8) | payload[7];
1294
1295 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1296 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1297 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1298 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1299 * replace EtherType */
1300 skb_pull(skb, hdrlen + 6);
1301 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1302 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1303 } else {
1304 struct ethhdr *ehdr;
1305 __be16 len;
1306
1307 skb_pull(skb, hdrlen);
1308 len = htons(skb->len);
1309 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1310 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1311 memcpy(ehdr->h_source, src, ETH_ALEN);
1312 ehdr->h_proto = len;
1313 }
1314 return 0;
1315 }
1316
1317 /*
1318 * requires that rx->skb is a frame with ethernet header
1319 */
1320 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1321 {
1322 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1323 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1324 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1325
1326 /*
1327 * Allow EAPOL frames to us/the PAE group address regardless
1328 * of whether the frame was encrypted or not.
1329 */
1330 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1331 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1332 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1333 return true;
1334
1335 if (ieee80211_802_1x_port_control(rx) ||
1336 ieee80211_drop_unencrypted(rx, fc))
1337 return false;
1338
1339 return true;
1340 }
1341
1342 /*
1343 * requires that rx->skb is a frame with ethernet header
1344 */
1345 static void
1346 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1347 {
1348 struct net_device *dev = rx->dev;
1349 struct ieee80211_local *local = rx->local;
1350 struct sk_buff *skb, *xmit_skb;
1351 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1352 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1353 struct sta_info *dsta;
1354
1355 skb = rx->skb;
1356 xmit_skb = NULL;
1357
1358 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1359 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1360 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1361 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1362 if (is_multicast_ether_addr(ehdr->h_dest)) {
1363 /*
1364 * send multicast frames both to higher layers in
1365 * local net stack and back to the wireless medium
1366 */
1367 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1368 if (!xmit_skb && net_ratelimit())
1369 printk(KERN_DEBUG "%s: failed to clone "
1370 "multicast frame\n", dev->name);
1371 } else {
1372 dsta = sta_info_get(local, skb->data);
1373 if (dsta && dsta->sdata->dev == dev) {
1374 /*
1375 * The destination station is associated to
1376 * this AP (in this VLAN), so send the frame
1377 * directly to it and do not pass it to local
1378 * net stack.
1379 */
1380 xmit_skb = skb;
1381 skb = NULL;
1382 }
1383 }
1384 }
1385
1386 if (skb) {
1387 int align __maybe_unused;
1388
1389 #if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1390 /*
1391 * 'align' will only take the values 0 or 2 here
1392 * since all frames are required to be aligned
1393 * to 2-byte boundaries when being passed to
1394 * mac80211. That also explains the __skb_push()
1395 * below.
1396 */
1397 align = (unsigned long)skb->data & 3;
1398 if (align) {
1399 if (WARN_ON(skb_headroom(skb) < 3)) {
1400 dev_kfree_skb(skb);
1401 skb = NULL;
1402 } else {
1403 u8 *data = skb->data;
1404 size_t len = skb->len;
1405 u8 *new = __skb_push(skb, align);
1406 memmove(new, data, len);
1407 __skb_trim(skb, len);
1408 }
1409 }
1410 #endif
1411
1412 if (skb) {
1413 /* deliver to local stack */
1414 skb->protocol = eth_type_trans(skb, dev);
1415 memset(skb->cb, 0, sizeof(skb->cb));
1416 netif_rx(skb);
1417 }
1418 }
1419
1420 if (xmit_skb) {
1421 /* send to wireless media */
1422 xmit_skb->protocol = htons(ETH_P_802_3);
1423 skb_reset_network_header(xmit_skb);
1424 skb_reset_mac_header(xmit_skb);
1425 dev_queue_xmit(xmit_skb);
1426 }
1427 }
1428
1429 static ieee80211_rx_result debug_noinline
1430 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1431 {
1432 struct net_device *dev = rx->dev;
1433 struct ieee80211_local *local = rx->local;
1434 u16 ethertype;
1435 u8 *payload;
1436 struct sk_buff *skb = rx->skb, *frame = NULL;
1437 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1438 __le16 fc = hdr->frame_control;
1439 const struct ethhdr *eth;
1440 int remaining, err;
1441 u8 dst[ETH_ALEN];
1442 u8 src[ETH_ALEN];
1443
1444 if (unlikely(!ieee80211_is_data(fc)))
1445 return RX_CONTINUE;
1446
1447 if (unlikely(!ieee80211_is_data_present(fc)))
1448 return RX_DROP_MONITOR;
1449
1450 if (!(rx->flags & IEEE80211_RX_AMSDU))
1451 return RX_CONTINUE;
1452
1453 err = ieee80211_data_to_8023(rx);
1454 if (unlikely(err))
1455 return RX_DROP_UNUSABLE;
1456
1457 skb->dev = dev;
1458
1459 dev->stats.rx_packets++;
1460 dev->stats.rx_bytes += skb->len;
1461
1462 /* skip the wrapping header */
1463 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1464 if (!eth)
1465 return RX_DROP_UNUSABLE;
1466
1467 while (skb != frame) {
1468 u8 padding;
1469 __be16 len = eth->h_proto;
1470 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1471
1472 remaining = skb->len;
1473 memcpy(dst, eth->h_dest, ETH_ALEN);
1474 memcpy(src, eth->h_source, ETH_ALEN);
1475
1476 padding = ((4 - subframe_len) & 0x3);
1477 /* the last MSDU has no padding */
1478 if (subframe_len > remaining)
1479 return RX_DROP_UNUSABLE;
1480
1481 skb_pull(skb, sizeof(struct ethhdr));
1482 /* if last subframe reuse skb */
1483 if (remaining <= subframe_len + padding)
1484 frame = skb;
1485 else {
1486 /*
1487 * Allocate and reserve two bytes more for payload
1488 * alignment since sizeof(struct ethhdr) is 14.
1489 */
1490 frame = dev_alloc_skb(
1491 ALIGN(local->hw.extra_tx_headroom, 4) +
1492 subframe_len + 2);
1493
1494 if (frame == NULL)
1495 return RX_DROP_UNUSABLE;
1496
1497 skb_reserve(frame,
1498 ALIGN(local->hw.extra_tx_headroom, 4) +
1499 sizeof(struct ethhdr) + 2);
1500 memcpy(skb_put(frame, ntohs(len)), skb->data,
1501 ntohs(len));
1502
1503 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1504 padding);
1505 if (!eth) {
1506 dev_kfree_skb(frame);
1507 return RX_DROP_UNUSABLE;
1508 }
1509 }
1510
1511 skb_reset_network_header(frame);
1512 frame->dev = dev;
1513 frame->priority = skb->priority;
1514 rx->skb = frame;
1515
1516 payload = frame->data;
1517 ethertype = (payload[6] << 8) | payload[7];
1518
1519 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1520 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1521 compare_ether_addr(payload,
1522 bridge_tunnel_header) == 0)) {
1523 /* remove RFC1042 or Bridge-Tunnel
1524 * encapsulation and replace EtherType */
1525 skb_pull(frame, 6);
1526 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1527 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1528 } else {
1529 memcpy(skb_push(frame, sizeof(__be16)),
1530 &len, sizeof(__be16));
1531 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1532 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1533 }
1534
1535 if (!ieee80211_frame_allowed(rx, fc)) {
1536 if (skb == frame) /* last frame */
1537 return RX_DROP_UNUSABLE;
1538 dev_kfree_skb(frame);
1539 continue;
1540 }
1541
1542 ieee80211_deliver_skb(rx);
1543 }
1544
1545 return RX_QUEUED;
1546 }
1547
1548 #ifdef CONFIG_MAC80211_MESH
1549 static ieee80211_rx_result
1550 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1551 {
1552 struct ieee80211_hdr *hdr;
1553 struct ieee80211s_hdr *mesh_hdr;
1554 unsigned int hdrlen;
1555 struct sk_buff *skb = rx->skb, *fwd_skb;
1556
1557 hdr = (struct ieee80211_hdr *) skb->data;
1558 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1559 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1560
1561 if (!ieee80211_is_data(hdr->frame_control))
1562 return RX_CONTINUE;
1563
1564 if (!mesh_hdr->ttl)
1565 /* illegal frame */
1566 return RX_DROP_MONITOR;
1567
1568 if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){
1569 struct ieee80211_sub_if_data *sdata;
1570 struct mesh_path *mppath;
1571
1572 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1573 rcu_read_lock();
1574 mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
1575 if (!mppath) {
1576 mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
1577 } else {
1578 spin_lock_bh(&mppath->state_lock);
1579 mppath->exp_time = jiffies;
1580 if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
1581 memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
1582 spin_unlock_bh(&mppath->state_lock);
1583 }
1584 rcu_read_unlock();
1585 }
1586
1587 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1588 return RX_CONTINUE;
1589
1590 mesh_hdr->ttl--;
1591
1592 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1593 if (!mesh_hdr->ttl)
1594 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1595 dropped_frames_ttl);
1596 else {
1597 struct ieee80211_hdr *fwd_hdr;
1598 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1599
1600 if (!fwd_skb && net_ratelimit())
1601 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1602 rx->dev->name);
1603
1604 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1605 /*
1606 * Save TA to addr1 to send TA a path error if a
1607 * suitable next hop is not found
1608 */
1609 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
1610 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1611 fwd_skb->dev = rx->local->mdev;
1612 fwd_skb->iif = rx->dev->ifindex;
1613 dev_queue_xmit(fwd_skb);
1614 }
1615 }
1616
1617 if (is_multicast_ether_addr(hdr->addr3) ||
1618 rx->dev->flags & IFF_PROMISC)
1619 return RX_CONTINUE;
1620 else
1621 return RX_DROP_MONITOR;
1622 }
1623 #endif
1624
1625 static ieee80211_rx_result debug_noinline
1626 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1627 {
1628 struct net_device *dev = rx->dev;
1629 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1630 __le16 fc = hdr->frame_control;
1631 int err;
1632
1633 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1634 return RX_CONTINUE;
1635
1636 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1637 return RX_DROP_MONITOR;
1638
1639 err = ieee80211_data_to_8023(rx);
1640 if (unlikely(err))
1641 return RX_DROP_UNUSABLE;
1642
1643 if (!ieee80211_frame_allowed(rx, fc))
1644 return RX_DROP_MONITOR;
1645
1646 rx->skb->dev = dev;
1647
1648 dev->stats.rx_packets++;
1649 dev->stats.rx_bytes += rx->skb->len;
1650
1651 ieee80211_deliver_skb(rx);
1652
1653 return RX_QUEUED;
1654 }
1655
1656 static ieee80211_rx_result debug_noinline
1657 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1658 {
1659 struct ieee80211_local *local = rx->local;
1660 struct ieee80211_hw *hw = &local->hw;
1661 struct sk_buff *skb = rx->skb;
1662 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1663 struct tid_ampdu_rx *tid_agg_rx;
1664 u16 start_seq_num;
1665 u16 tid;
1666
1667 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1668 return RX_CONTINUE;
1669
1670 if (ieee80211_is_back_req(bar->frame_control)) {
1671 if (!rx->sta)
1672 return RX_CONTINUE;
1673 tid = le16_to_cpu(bar->control) >> 12;
1674 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1675 != HT_AGG_STATE_OPERATIONAL)
1676 return RX_CONTINUE;
1677 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1678
1679 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1680
1681 /* reset session timer */
1682 if (tid_agg_rx->timeout)
1683 mod_timer(&tid_agg_rx->session_timer,
1684 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1685
1686 /* manage reordering buffer according to requested */
1687 /* sequence number */
1688 rcu_read_lock();
1689 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL,
1690 start_seq_num, 1);
1691 rcu_read_unlock();
1692 return RX_DROP_UNUSABLE;
1693 }
1694
1695 return RX_CONTINUE;
1696 }
1697
1698 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1699 struct ieee80211_mgmt *mgmt,
1700 size_t len)
1701 {
1702 struct ieee80211_local *local = sdata->local;
1703 struct sk_buff *skb;
1704 struct ieee80211_mgmt *resp;
1705
1706 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
1707 /* Not to own unicast address */
1708 return;
1709 }
1710
1711 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1712 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1713 /* Not from the current AP. */
1714 return;
1715 }
1716
1717 if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATE) {
1718 /* Association in progress; ignore SA Query */
1719 return;
1720 }
1721
1722 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1723 /* Too short SA Query request frame */
1724 return;
1725 }
1726
1727 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1728 if (skb == NULL)
1729 return;
1730
1731 skb_reserve(skb, local->hw.extra_tx_headroom);
1732 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1733 memset(resp, 0, 24);
1734 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1735 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
1736 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1737 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1738 IEEE80211_STYPE_ACTION);
1739 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1740 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1741 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1742 memcpy(resp->u.action.u.sa_query.trans_id,
1743 mgmt->u.action.u.sa_query.trans_id,
1744 WLAN_SA_QUERY_TR_ID_LEN);
1745
1746 ieee80211_tx_skb(sdata, skb, 1);
1747 }
1748
1749 static ieee80211_rx_result debug_noinline
1750 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1751 {
1752 struct ieee80211_local *local = rx->local;
1753 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1754 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1755 struct ieee80211_bss *bss;
1756 int len = rx->skb->len;
1757
1758 if (!ieee80211_is_action(mgmt->frame_control))
1759 return RX_CONTINUE;
1760
1761 if (!rx->sta)
1762 return RX_DROP_MONITOR;
1763
1764 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1765 return RX_DROP_MONITOR;
1766
1767 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1768 return RX_DROP_MONITOR;
1769
1770 /* all categories we currently handle have action_code */
1771 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1772 return RX_DROP_MONITOR;
1773
1774 switch (mgmt->u.action.category) {
1775 case WLAN_CATEGORY_BACK:
1776 /*
1777 * The aggregation code is not prepared to handle
1778 * anything but STA/AP due to the BSSID handling;
1779 * IBSS could work in the code but isn't supported
1780 * by drivers or the standard.
1781 */
1782 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1783 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1784 sdata->vif.type != NL80211_IFTYPE_AP)
1785 return RX_DROP_MONITOR;
1786
1787 switch (mgmt->u.action.u.addba_req.action_code) {
1788 case WLAN_ACTION_ADDBA_REQ:
1789 if (len < (IEEE80211_MIN_ACTION_SIZE +
1790 sizeof(mgmt->u.action.u.addba_req)))
1791 return RX_DROP_MONITOR;
1792 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1793 break;
1794 case WLAN_ACTION_ADDBA_RESP:
1795 if (len < (IEEE80211_MIN_ACTION_SIZE +
1796 sizeof(mgmt->u.action.u.addba_resp)))
1797 return RX_DROP_MONITOR;
1798 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1799 break;
1800 case WLAN_ACTION_DELBA:
1801 if (len < (IEEE80211_MIN_ACTION_SIZE +
1802 sizeof(mgmt->u.action.u.delba)))
1803 return RX_DROP_MONITOR;
1804 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1805 break;
1806 }
1807 break;
1808 case WLAN_CATEGORY_SPECTRUM_MGMT:
1809 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1810 return RX_DROP_MONITOR;
1811
1812 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1813 return RX_DROP_MONITOR;
1814
1815 switch (mgmt->u.action.u.measurement.action_code) {
1816 case WLAN_ACTION_SPCT_MSR_REQ:
1817 if (len < (IEEE80211_MIN_ACTION_SIZE +
1818 sizeof(mgmt->u.action.u.measurement)))
1819 return RX_DROP_MONITOR;
1820 ieee80211_process_measurement_req(sdata, mgmt, len);
1821 break;
1822 case WLAN_ACTION_SPCT_CHL_SWITCH:
1823 if (len < (IEEE80211_MIN_ACTION_SIZE +
1824 sizeof(mgmt->u.action.u.chan_switch)))
1825 return RX_DROP_MONITOR;
1826
1827 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1828 return RX_DROP_MONITOR;
1829
1830 bss = ieee80211_rx_bss_get(local, sdata->u.mgd.bssid,
1831 local->hw.conf.channel->center_freq,
1832 sdata->u.mgd.ssid,
1833 sdata->u.mgd.ssid_len);
1834 if (!bss)
1835 return RX_DROP_MONITOR;
1836
1837 ieee80211_process_chanswitch(sdata,
1838 &mgmt->u.action.u.chan_switch.sw_elem, bss);
1839 ieee80211_rx_bss_put(local, bss);
1840 break;
1841 }
1842 break;
1843 case WLAN_CATEGORY_SA_QUERY:
1844 if (len < (IEEE80211_MIN_ACTION_SIZE +
1845 sizeof(mgmt->u.action.u.sa_query)))
1846 return RX_DROP_MONITOR;
1847 switch (mgmt->u.action.u.sa_query.action) {
1848 case WLAN_ACTION_SA_QUERY_REQUEST:
1849 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1850 return RX_DROP_MONITOR;
1851 ieee80211_process_sa_query_req(sdata, mgmt, len);
1852 break;
1853 case WLAN_ACTION_SA_QUERY_RESPONSE:
1854 /*
1855 * SA Query response is currently only used in AP mode
1856 * and it is processed in user space.
1857 */
1858 return RX_CONTINUE;
1859 }
1860 break;
1861 default:
1862 return RX_CONTINUE;
1863 }
1864
1865 rx->sta->rx_packets++;
1866 dev_kfree_skb(rx->skb);
1867 return RX_QUEUED;
1868 }
1869
1870 static ieee80211_rx_result debug_noinline
1871 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1872 {
1873 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1874 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1875
1876 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1877 return RX_DROP_MONITOR;
1878
1879 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1880 return RX_DROP_MONITOR;
1881
1882 if (ieee80211_vif_is_mesh(&sdata->vif))
1883 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1884
1885 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
1886 return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status);
1887
1888 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1889 return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1890
1891 return RX_DROP_MONITOR;
1892 }
1893
1894 static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1895 struct ieee80211_hdr *hdr,
1896 struct ieee80211_rx_data *rx)
1897 {
1898 int keyidx;
1899 unsigned int hdrlen;
1900
1901 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1902 if (rx->skb->len >= hdrlen + 4)
1903 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1904 else
1905 keyidx = -1;
1906
1907 if (!rx->sta) {
1908 /*
1909 * Some hardware seem to generate incorrect Michael MIC
1910 * reports; ignore them to avoid triggering countermeasures.
1911 */
1912 goto ignore;
1913 }
1914
1915 if (!ieee80211_has_protected(hdr->frame_control))
1916 goto ignore;
1917
1918 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1919 /*
1920 * APs with pairwise keys should never receive Michael MIC
1921 * errors for non-zero keyidx because these are reserved for
1922 * group keys and only the AP is sending real multicast
1923 * frames in the BSS.
1924 */
1925 goto ignore;
1926 }
1927
1928 if (!ieee80211_is_data(hdr->frame_control) &&
1929 !ieee80211_is_auth(hdr->frame_control))
1930 goto ignore;
1931
1932 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL);
1933 ignore:
1934 dev_kfree_skb(rx->skb);
1935 rx->skb = NULL;
1936 }
1937
1938 /* TODO: use IEEE80211_RX_FRAGMENTED */
1939 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1940 {
1941 struct ieee80211_sub_if_data *sdata;
1942 struct ieee80211_local *local = rx->local;
1943 struct ieee80211_rtap_hdr {
1944 struct ieee80211_radiotap_header hdr;
1945 u8 flags;
1946 u8 rate;
1947 __le16 chan_freq;
1948 __le16 chan_flags;
1949 } __attribute__ ((packed)) *rthdr;
1950 struct sk_buff *skb = rx->skb, *skb2;
1951 struct net_device *prev_dev = NULL;
1952 struct ieee80211_rx_status *status = rx->status;
1953
1954 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1955 goto out_free_skb;
1956
1957 if (skb_headroom(skb) < sizeof(*rthdr) &&
1958 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1959 goto out_free_skb;
1960
1961 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1962 memset(rthdr, 0, sizeof(*rthdr));
1963 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1964 rthdr->hdr.it_present =
1965 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1966 (1 << IEEE80211_RADIOTAP_RATE) |
1967 (1 << IEEE80211_RADIOTAP_CHANNEL));
1968
1969 rthdr->rate = rx->rate->bitrate / 5;
1970 rthdr->chan_freq = cpu_to_le16(status->freq);
1971
1972 if (status->band == IEEE80211_BAND_5GHZ)
1973 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1974 IEEE80211_CHAN_5GHZ);
1975 else
1976 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1977 IEEE80211_CHAN_2GHZ);
1978
1979 skb_set_mac_header(skb, 0);
1980 skb->ip_summed = CHECKSUM_UNNECESSARY;
1981 skb->pkt_type = PACKET_OTHERHOST;
1982 skb->protocol = htons(ETH_P_802_2);
1983
1984 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1985 if (!netif_running(sdata->dev))
1986 continue;
1987
1988 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1989 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1990 continue;
1991
1992 if (prev_dev) {
1993 skb2 = skb_clone(skb, GFP_ATOMIC);
1994 if (skb2) {
1995 skb2->dev = prev_dev;
1996 netif_rx(skb2);
1997 }
1998 }
1999
2000 prev_dev = sdata->dev;
2001 sdata->dev->stats.rx_packets++;
2002 sdata->dev->stats.rx_bytes += skb->len;
2003 }
2004
2005 if (prev_dev) {
2006 skb->dev = prev_dev;
2007 netif_rx(skb);
2008 skb = NULL;
2009 } else
2010 goto out_free_skb;
2011
2012 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
2013 return;
2014
2015 out_free_skb:
2016 dev_kfree_skb(skb);
2017 }
2018
2019
2020 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2021 struct ieee80211_rx_data *rx,
2022 struct sk_buff *skb)
2023 {
2024 ieee80211_rx_result res = RX_DROP_MONITOR;
2025
2026 rx->skb = skb;
2027 rx->sdata = sdata;
2028 rx->dev = sdata->dev;
2029
2030 #define CALL_RXH(rxh) \
2031 do { \
2032 res = rxh(rx); \
2033 if (res != RX_CONTINUE) \
2034 goto rxh_done; \
2035 } while (0);
2036
2037 CALL_RXH(ieee80211_rx_h_passive_scan)
2038 CALL_RXH(ieee80211_rx_h_check)
2039 CALL_RXH(ieee80211_rx_h_decrypt)
2040 CALL_RXH(ieee80211_rx_h_check_more_data)
2041 CALL_RXH(ieee80211_rx_h_sta_process)
2042 CALL_RXH(ieee80211_rx_h_defragment)
2043 CALL_RXH(ieee80211_rx_h_ps_poll)
2044 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2045 /* must be after MMIC verify so header is counted in MPDU mic */
2046 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2047 CALL_RXH(ieee80211_rx_h_amsdu)
2048 #ifdef CONFIG_MAC80211_MESH
2049 if (ieee80211_vif_is_mesh(&sdata->vif))
2050 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2051 #endif
2052 CALL_RXH(ieee80211_rx_h_data)
2053 CALL_RXH(ieee80211_rx_h_ctrl)
2054 CALL_RXH(ieee80211_rx_h_action)
2055 CALL_RXH(ieee80211_rx_h_mgmt)
2056
2057 #undef CALL_RXH
2058
2059 rxh_done:
2060 switch (res) {
2061 case RX_DROP_MONITOR:
2062 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2063 if (rx->sta)
2064 rx->sta->rx_dropped++;
2065 /* fall through */
2066 case RX_CONTINUE:
2067 ieee80211_rx_cooked_monitor(rx);
2068 break;
2069 case RX_DROP_UNUSABLE:
2070 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2071 if (rx->sta)
2072 rx->sta->rx_dropped++;
2073 dev_kfree_skb(rx->skb);
2074 break;
2075 case RX_QUEUED:
2076 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
2077 break;
2078 }
2079 }
2080
2081 /* main receive path */
2082
2083 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2084 struct ieee80211_rx_data *rx,
2085 struct ieee80211_hdr *hdr)
2086 {
2087 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type);
2088 int multicast = is_multicast_ether_addr(hdr->addr1);
2089
2090 switch (sdata->vif.type) {
2091 case NL80211_IFTYPE_STATION:
2092 if (!bssid)
2093 return 0;
2094 if (!ieee80211_bssid_match(bssid, sdata->u.mgd.bssid)) {
2095 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2096 return 0;
2097 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2098 } else if (!multicast &&
2099 compare_ether_addr(sdata->dev->dev_addr,
2100 hdr->addr1) != 0) {
2101 if (!(sdata->dev->flags & IFF_PROMISC))
2102 return 0;
2103 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2104 }
2105 break;
2106 case NL80211_IFTYPE_ADHOC:
2107 if (!bssid)
2108 return 0;
2109 if (ieee80211_is_beacon(hdr->frame_control)) {
2110 return 1;
2111 }
2112 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2113 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2114 return 0;
2115 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2116 } else if (!multicast &&
2117 compare_ether_addr(sdata->dev->dev_addr,
2118 hdr->addr1) != 0) {
2119 if (!(sdata->dev->flags & IFF_PROMISC))
2120 return 0;
2121 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2122 } else if (!rx->sta) {
2123 int rate_idx;
2124 if (rx->status->flag & RX_FLAG_HT)
2125 rate_idx = 0; /* TODO: HT rates */
2126 else
2127 rate_idx = rx->status->rate_idx;
2128 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
2129 BIT(rate_idx));
2130 }
2131 break;
2132 case NL80211_IFTYPE_MESH_POINT:
2133 if (!multicast &&
2134 compare_ether_addr(sdata->dev->dev_addr,
2135 hdr->addr1) != 0) {
2136 if (!(sdata->dev->flags & IFF_PROMISC))
2137 return 0;
2138
2139 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2140 }
2141 break;
2142 case NL80211_IFTYPE_AP_VLAN:
2143 case NL80211_IFTYPE_AP:
2144 if (!bssid) {
2145 if (compare_ether_addr(sdata->dev->dev_addr,
2146 hdr->addr1))
2147 return 0;
2148 } else if (!ieee80211_bssid_match(bssid,
2149 sdata->dev->dev_addr)) {
2150 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2151 return 0;
2152 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2153 }
2154 break;
2155 case NL80211_IFTYPE_WDS:
2156 if (bssid || !ieee80211_is_data(hdr->frame_control))
2157 return 0;
2158 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2159 return 0;
2160 break;
2161 case NL80211_IFTYPE_MONITOR:
2162 /* take everything */
2163 break;
2164 case NL80211_IFTYPE_UNSPECIFIED:
2165 case __NL80211_IFTYPE_AFTER_LAST:
2166 /* should never get here */
2167 WARN_ON(1);
2168 break;
2169 }
2170
2171 return 1;
2172 }
2173
2174 /*
2175 * This is the actual Rx frames handler. as it blongs to Rx path it must
2176 * be called with rcu_read_lock protection.
2177 */
2178 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2179 struct sk_buff *skb,
2180 struct ieee80211_rx_status *status,
2181 struct ieee80211_rate *rate)
2182 {
2183 struct ieee80211_local *local = hw_to_local(hw);
2184 struct ieee80211_sub_if_data *sdata;
2185 struct ieee80211_hdr *hdr;
2186 struct ieee80211_rx_data rx;
2187 int prepares;
2188 struct ieee80211_sub_if_data *prev = NULL;
2189 struct sk_buff *skb_new;
2190
2191 hdr = (struct ieee80211_hdr *)skb->data;
2192 memset(&rx, 0, sizeof(rx));
2193 rx.skb = skb;
2194 rx.local = local;
2195
2196 rx.status = status;
2197 rx.rate = rate;
2198
2199 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2200 local->dot11ReceivedFragmentCount++;
2201
2202 rx.sta = sta_info_get(local, hdr->addr2);
2203 if (rx.sta) {
2204 rx.sdata = rx.sta->sdata;
2205 rx.dev = rx.sta->sdata->dev;
2206 }
2207
2208 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
2209 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
2210 return;
2211 }
2212
2213 if (unlikely(local->sw_scanning || local->hw_scanning))
2214 rx.flags |= IEEE80211_RX_IN_SCAN;
2215
2216 ieee80211_parse_qos(&rx);
2217 ieee80211_verify_alignment(&rx);
2218
2219 skb = rx.skb;
2220
2221 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2222 if (!netif_running(sdata->dev))
2223 continue;
2224
2225 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
2226 continue;
2227
2228 rx.flags |= IEEE80211_RX_RA_MATCH;
2229 prepares = prepare_for_handlers(sdata, &rx, hdr);
2230
2231 if (!prepares)
2232 continue;
2233
2234 /*
2235 * frame is destined for this interface, but if it's not
2236 * also for the previous one we handle that after the
2237 * loop to avoid copying the SKB once too much
2238 */
2239
2240 if (!prev) {
2241 prev = sdata;
2242 continue;
2243 }
2244
2245 /*
2246 * frame was destined for the previous interface
2247 * so invoke RX handlers for it
2248 */
2249
2250 skb_new = skb_copy(skb, GFP_ATOMIC);
2251 if (!skb_new) {
2252 if (net_ratelimit())
2253 printk(KERN_DEBUG "%s: failed to copy "
2254 "multicast frame for %s\n",
2255 wiphy_name(local->hw.wiphy),
2256 prev->dev->name);
2257 continue;
2258 }
2259 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
2260 prev = sdata;
2261 }
2262 if (prev)
2263 ieee80211_invoke_rx_handlers(prev, &rx, skb);
2264 else
2265 dev_kfree_skb(skb);
2266 }
2267
2268 #define SEQ_MODULO 0x1000
2269 #define SEQ_MASK 0xfff
2270
2271 static inline int seq_less(u16 sq1, u16 sq2)
2272 {
2273 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2274 }
2275
2276 static inline u16 seq_inc(u16 sq)
2277 {
2278 return (sq + 1) & SEQ_MASK;
2279 }
2280
2281 static inline u16 seq_sub(u16 sq1, u16 sq2)
2282 {
2283 return (sq1 - sq2) & SEQ_MASK;
2284 }
2285
2286
2287 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2288 struct tid_ampdu_rx *tid_agg_rx,
2289 int index)
2290 {
2291 struct ieee80211_supported_band *sband;
2292 struct ieee80211_rate *rate;
2293 struct ieee80211_rx_status status;
2294
2295 if (!tid_agg_rx->reorder_buf[index])
2296 goto no_frame;
2297
2298 /* release the reordered frames to stack */
2299 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, sizeof(status));
2300 sband = hw->wiphy->bands[status.band];
2301 if (status.flag & RX_FLAG_HT)
2302 rate = sband->bitrates; /* TODO: HT rates */
2303 else
2304 rate = &sband->bitrates[status.rate_idx];
2305 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2306 &status, rate);
2307 tid_agg_rx->stored_mpdu_num--;
2308 tid_agg_rx->reorder_buf[index] = NULL;
2309
2310 no_frame:
2311 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2312 }
2313
2314
2315 /*
2316 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
2317 * the skb was added to the buffer longer than this time ago, the earlier
2318 * frames that have not yet been received are assumed to be lost and the skb
2319 * can be released for processing. This may also release other skb's from the
2320 * reorder buffer if there are no additional gaps between the frames.
2321 */
2322 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2323
2324 /*
2325 * As it function blongs to Rx path it must be called with
2326 * the proper rcu_read_lock protection for its flow.
2327 */
2328 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2329 struct tid_ampdu_rx *tid_agg_rx,
2330 struct sk_buff *skb,
2331 struct ieee80211_rx_status *rxstatus,
2332 u16 mpdu_seq_num,
2333 int bar_req)
2334 {
2335 u16 head_seq_num, buf_size;
2336 int index;
2337
2338 buf_size = tid_agg_rx->buf_size;
2339 head_seq_num = tid_agg_rx->head_seq_num;
2340
2341 /* frame with out of date sequence number */
2342 if (seq_less(mpdu_seq_num, head_seq_num)) {
2343 dev_kfree_skb(skb);
2344 return 1;
2345 }
2346
2347 /* if frame sequence number exceeds our buffering window size or
2348 * block Ack Request arrived - release stored frames */
2349 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
2350 /* new head to the ordering buffer */
2351 if (bar_req)
2352 head_seq_num = mpdu_seq_num;
2353 else
2354 head_seq_num =
2355 seq_inc(seq_sub(mpdu_seq_num, buf_size));
2356 /* release stored frames up to new head to stack */
2357 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2358 index = seq_sub(tid_agg_rx->head_seq_num,
2359 tid_agg_rx->ssn)
2360 % tid_agg_rx->buf_size;
2361 ieee80211_release_reorder_frame(hw, tid_agg_rx,
2362 index);
2363 }
2364 if (bar_req)
2365 return 1;
2366 }
2367
2368 /* now the new frame is always in the range of the reordering */
2369 /* buffer window */
2370 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
2371 % tid_agg_rx->buf_size;
2372 /* check if we already stored this frame */
2373 if (tid_agg_rx->reorder_buf[index]) {
2374 dev_kfree_skb(skb);
2375 return 1;
2376 }
2377
2378 /* if arrived mpdu is in the right order and nothing else stored */
2379 /* release it immediately */
2380 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2381 tid_agg_rx->stored_mpdu_num == 0) {
2382 tid_agg_rx->head_seq_num =
2383 seq_inc(tid_agg_rx->head_seq_num);
2384 return 0;
2385 }
2386
2387 /* put the frame in the reordering buffer */
2388 tid_agg_rx->reorder_buf[index] = skb;
2389 tid_agg_rx->reorder_time[index] = jiffies;
2390 memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
2391 sizeof(*rxstatus));
2392 tid_agg_rx->stored_mpdu_num++;
2393 /* release the buffer until next missing frame */
2394 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2395 % tid_agg_rx->buf_size;
2396 if (!tid_agg_rx->reorder_buf[index] &&
2397 tid_agg_rx->stored_mpdu_num > 1) {
2398 /*
2399 * No buffers ready to be released, but check whether any
2400 * frames in the reorder buffer have timed out.
2401 */
2402 int j;
2403 int skipped = 1;
2404 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2405 j = (j + 1) % tid_agg_rx->buf_size) {
2406 if (tid_agg_rx->reorder_buf[j] == NULL) {
2407 skipped++;
2408 continue;
2409 }
2410 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2411 HZ / 10))
2412 break;
2413
2414 #ifdef CONFIG_MAC80211_HT_DEBUG
2415 if (net_ratelimit())
2416 printk(KERN_DEBUG "%s: release an RX reorder "
2417 "frame due to timeout on earlier "
2418 "frames\n",
2419 wiphy_name(hw->wiphy));
2420 #endif
2421 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
2422
2423 /*
2424 * Increment the head seq# also for the skipped slots.
2425 */
2426 tid_agg_rx->head_seq_num =
2427 (tid_agg_rx->head_seq_num + skipped) &
2428 SEQ_MASK;
2429 skipped = 0;
2430 }
2431 } else while (tid_agg_rx->reorder_buf[index]) {
2432 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2433 index = seq_sub(tid_agg_rx->head_seq_num,
2434 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2435 }
2436 return 1;
2437 }
2438
2439 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2440 struct sk_buff *skb,
2441 struct ieee80211_rx_status *status)
2442 {
2443 struct ieee80211_hw *hw = &local->hw;
2444 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2445 struct sta_info *sta;
2446 struct tid_ampdu_rx *tid_agg_rx;
2447 u16 sc;
2448 u16 mpdu_seq_num;
2449 u8 ret = 0;
2450 int tid;
2451
2452 sta = sta_info_get(local, hdr->addr2);
2453 if (!sta)
2454 return ret;
2455
2456 /* filter the QoS data rx stream according to
2457 * STA/TID and check if this STA/TID is on aggregation */
2458 if (!ieee80211_is_data_qos(hdr->frame_control))
2459 goto end_reorder;
2460
2461 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2462
2463 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2464 goto end_reorder;
2465
2466 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2467
2468 /* qos null data frames are excluded */
2469 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2470 goto end_reorder;
2471
2472 /* new un-ordered ampdu frame - process it */
2473
2474 /* reset session timer */
2475 if (tid_agg_rx->timeout)
2476 mod_timer(&tid_agg_rx->session_timer,
2477 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2478
2479 /* if this mpdu is fragmented - terminate rx aggregation session */
2480 sc = le16_to_cpu(hdr->seq_ctrl);
2481 if (sc & IEEE80211_SCTL_FRAG) {
2482 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2483 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2484 ret = 1;
2485 goto end_reorder;
2486 }
2487
2488 /* according to mpdu sequence number deal with reordering buffer */
2489 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2490 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status,
2491 mpdu_seq_num, 0);
2492 end_reorder:
2493 return ret;
2494 }
2495
2496 /*
2497 * This is the receive path handler. It is called by a low level driver when an
2498 * 802.11 MPDU is received from the hardware.
2499 */
2500 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2501 struct ieee80211_rx_status *status)
2502 {
2503 struct ieee80211_local *local = hw_to_local(hw);
2504 struct ieee80211_rate *rate = NULL;
2505 struct ieee80211_supported_band *sband;
2506
2507 if (status->band < 0 ||
2508 status->band >= IEEE80211_NUM_BANDS) {
2509 WARN_ON(1);
2510 return;
2511 }
2512
2513 sband = local->hw.wiphy->bands[status->band];
2514 if (!sband) {
2515 WARN_ON(1);
2516 return;
2517 }
2518
2519 if (status->flag & RX_FLAG_HT) {
2520 /* rate_idx is MCS index */
2521 if (WARN_ON(status->rate_idx < 0 ||
2522 status->rate_idx >= 76))
2523 return;
2524 /* HT rates are not in the table - use the highest legacy rate
2525 * for now since other parts of mac80211 may not yet be fully
2526 * MCS aware. */
2527 rate = &sband->bitrates[sband->n_bitrates - 1];
2528 } else {
2529 if (WARN_ON(status->rate_idx < 0 ||
2530 status->rate_idx >= sband->n_bitrates))
2531 return;
2532 rate = &sband->bitrates[status->rate_idx];
2533 }
2534
2535 /*
2536 * key references and virtual interfaces are protected using RCU
2537 * and this requires that we are in a read-side RCU section during
2538 * receive processing
2539 */
2540 rcu_read_lock();
2541
2542 /*
2543 * Frames with failed FCS/PLCP checksum are not returned,
2544 * all other frames are returned without radiotap header
2545 * if it was previously present.
2546 * Also, frames with less than 16 bytes are dropped.
2547 */
2548 skb = ieee80211_rx_monitor(local, skb, status, rate);
2549 if (!skb) {
2550 rcu_read_unlock();
2551 return;
2552 }
2553
2554 /*
2555 * In theory, the block ack reordering should happen after duplicate
2556 * removal (ieee80211_rx_h_check(), which is an RX handler). As such,
2557 * the call to ieee80211_rx_reorder_ampdu() should really be moved to
2558 * happen as a new RX handler between ieee80211_rx_h_check and
2559 * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for
2560 * the time being, the call can be here since RX reorder buf processing
2561 * will implicitly skip duplicates. We could, in theory at least,
2562 * process frames that ieee80211_rx_h_passive_scan would drop (e.g.,
2563 * frames from other than operational channel), but that should not
2564 * happen in normal networks.
2565 */
2566 if (!ieee80211_rx_reorder_ampdu(local, skb, status))
2567 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2568
2569 rcu_read_unlock();
2570 }
2571 EXPORT_SYMBOL(__ieee80211_rx);
2572
2573 /* This is a version of the rx handler that can be called from hard irq
2574 * context. Post the skb on the queue and schedule the tasklet */
2575 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
2576 struct ieee80211_rx_status *status)
2577 {
2578 struct ieee80211_local *local = hw_to_local(hw);
2579
2580 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2581
2582 skb->dev = local->mdev;
2583 /* copy status into skb->cb for use by tasklet */
2584 memcpy(skb->cb, status, sizeof(*status));
2585 skb->pkt_type = IEEE80211_RX_MSG;
2586 skb_queue_tail(&local->skb_queue, skb);
2587 tasklet_schedule(&local->tasklet);
2588 }
2589 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
This page took 0.497979 seconds and 5 git commands to generate.