Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan
[deliverable/linux.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "driver-ops.h"
23 #include "led.h"
24 #include "mesh.h"
25 #include "wep.h"
26 #include "wpa.h"
27 #include "tkip.h"
28 #include "wme.h"
29
30 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
31 struct tid_ampdu_rx *tid_agg_rx,
32 struct sk_buff *skb,
33 u16 mpdu_seq_num,
34 int bar_req);
35 /*
36 * monitor mode reception
37 *
38 * This function cleans up the SKB, i.e. it removes all the stuff
39 * only useful for monitoring.
40 */
41 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
42 struct sk_buff *skb)
43 {
44 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
45 if (likely(skb->len > FCS_LEN))
46 skb_trim(skb, skb->len - FCS_LEN);
47 else {
48 /* driver bug */
49 WARN_ON(1);
50 dev_kfree_skb(skb);
51 skb = NULL;
52 }
53 }
54
55 return skb;
56 }
57
58 static inline int should_drop_frame(struct sk_buff *skb,
59 int present_fcs_len)
60 {
61 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
62 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
63
64 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
65 return 1;
66 if (unlikely(skb->len < 16 + present_fcs_len))
67 return 1;
68 if (ieee80211_is_ctl(hdr->frame_control) &&
69 !ieee80211_is_pspoll(hdr->frame_control) &&
70 !ieee80211_is_back_req(hdr->frame_control))
71 return 1;
72 return 0;
73 }
74
75 static int
76 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
77 struct ieee80211_rx_status *status)
78 {
79 int len;
80
81 /* always present fields */
82 len = sizeof(struct ieee80211_radiotap_header) + 9;
83
84 if (status->flag & RX_FLAG_TSFT)
85 len += 8;
86 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
87 len += 1;
88 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
89 len += 1;
90
91 if (len & 1) /* padding for RX_FLAGS if necessary */
92 len++;
93
94 return len;
95 }
96
97 /*
98 * ieee80211_add_rx_radiotap_header - add radiotap header
99 *
100 * add a radiotap header containing all the fields which the hardware provided.
101 */
102 static void
103 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
104 struct sk_buff *skb,
105 struct ieee80211_rate *rate,
106 int rtap_len)
107 {
108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
109 struct ieee80211_radiotap_header *rthdr;
110 unsigned char *pos;
111 u16 rx_flags = 0;
112
113 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
114 memset(rthdr, 0, rtap_len);
115
116 /* radiotap header, set always present flags */
117 rthdr->it_present =
118 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
119 (1 << IEEE80211_RADIOTAP_CHANNEL) |
120 (1 << IEEE80211_RADIOTAP_ANTENNA) |
121 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
122 rthdr->it_len = cpu_to_le16(rtap_len);
123
124 pos = (unsigned char *)(rthdr+1);
125
126 /* the order of the following fields is important */
127
128 /* IEEE80211_RADIOTAP_TSFT */
129 if (status->flag & RX_FLAG_TSFT) {
130 put_unaligned_le64(status->mactime, pos);
131 rthdr->it_present |=
132 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
133 pos += 8;
134 }
135
136 /* IEEE80211_RADIOTAP_FLAGS */
137 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
138 *pos |= IEEE80211_RADIOTAP_F_FCS;
139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
140 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
141 if (status->flag & RX_FLAG_SHORTPRE)
142 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
143 pos++;
144
145 /* IEEE80211_RADIOTAP_RATE */
146 if (status->flag & RX_FLAG_HT) {
147 /*
148 * TODO: add following information into radiotap header once
149 * suitable fields are defined for it:
150 * - MCS index (status->rate_idx)
151 * - HT40 (status->flag & RX_FLAG_40MHZ)
152 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
153 */
154 *pos = 0;
155 } else {
156 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
157 *pos = rate->bitrate / 5;
158 }
159 pos++;
160
161 /* IEEE80211_RADIOTAP_CHANNEL */
162 put_unaligned_le16(status->freq, pos);
163 pos += 2;
164 if (status->band == IEEE80211_BAND_5GHZ)
165 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
166 pos);
167 else if (rate->flags & IEEE80211_RATE_ERP_G)
168 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
169 pos);
170 else
171 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
172 pos);
173 pos += 2;
174
175 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
176 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
177 *pos = status->signal;
178 rthdr->it_present |=
179 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
180 pos++;
181 }
182
183 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
184 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
185 *pos = status->noise;
186 rthdr->it_present |=
187 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
188 pos++;
189 }
190
191 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
192
193 /* IEEE80211_RADIOTAP_ANTENNA */
194 *pos = status->antenna;
195 pos++;
196
197 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
198
199 /* IEEE80211_RADIOTAP_RX_FLAGS */
200 /* ensure 2 byte alignment for the 2 byte field as required */
201 if ((pos - (u8 *)rthdr) & 1)
202 pos++;
203 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
204 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
205 put_unaligned_le16(rx_flags, pos);
206 pos += 2;
207 }
208
209 /*
210 * This function copies a received frame to all monitor interfaces and
211 * returns a cleaned-up SKB that no longer includes the FCS nor the
212 * radiotap header the driver might have added.
213 */
214 static struct sk_buff *
215 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
216 struct ieee80211_rate *rate)
217 {
218 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
219 struct ieee80211_sub_if_data *sdata;
220 int needed_headroom = 0;
221 struct sk_buff *skb, *skb2;
222 struct net_device *prev_dev = NULL;
223 int present_fcs_len = 0;
224
225 /*
226 * First, we may need to make a copy of the skb because
227 * (1) we need to modify it for radiotap (if not present), and
228 * (2) the other RX handlers will modify the skb we got.
229 *
230 * We don't need to, of course, if we aren't going to return
231 * the SKB because it has a bad FCS/PLCP checksum.
232 */
233
234 /* room for the radiotap header based on driver features */
235 needed_headroom = ieee80211_rx_radiotap_len(local, status);
236
237 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
238 present_fcs_len = FCS_LEN;
239
240 if (!local->monitors) {
241 if (should_drop_frame(origskb, present_fcs_len)) {
242 dev_kfree_skb(origskb);
243 return NULL;
244 }
245
246 return remove_monitor_info(local, origskb);
247 }
248
249 if (should_drop_frame(origskb, present_fcs_len)) {
250 /* only need to expand headroom if necessary */
251 skb = origskb;
252 origskb = NULL;
253
254 /*
255 * This shouldn't trigger often because most devices have an
256 * RX header they pull before we get here, and that should
257 * be big enough for our radiotap information. We should
258 * probably export the length to drivers so that we can have
259 * them allocate enough headroom to start with.
260 */
261 if (skb_headroom(skb) < needed_headroom &&
262 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
263 dev_kfree_skb(skb);
264 return NULL;
265 }
266 } else {
267 /*
268 * Need to make a copy and possibly remove radiotap header
269 * and FCS from the original.
270 */
271 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
272
273 origskb = remove_monitor_info(local, origskb);
274
275 if (!skb)
276 return origskb;
277 }
278
279 /* prepend radiotap information */
280 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
281
282 skb_reset_mac_header(skb);
283 skb->ip_summed = CHECKSUM_UNNECESSARY;
284 skb->pkt_type = PACKET_OTHERHOST;
285 skb->protocol = htons(ETH_P_802_2);
286
287 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
288 if (!netif_running(sdata->dev))
289 continue;
290
291 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
292 continue;
293
294 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
295 continue;
296
297 if (prev_dev) {
298 skb2 = skb_clone(skb, GFP_ATOMIC);
299 if (skb2) {
300 skb2->dev = prev_dev;
301 netif_rx(skb2);
302 }
303 }
304
305 prev_dev = sdata->dev;
306 sdata->dev->stats.rx_packets++;
307 sdata->dev->stats.rx_bytes += skb->len;
308 }
309
310 if (prev_dev) {
311 skb->dev = prev_dev;
312 netif_rx(skb);
313 } else
314 dev_kfree_skb(skb);
315
316 return origskb;
317 }
318
319
320 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
321 {
322 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
323 int tid;
324
325 /* does the frame have a qos control field? */
326 if (ieee80211_is_data_qos(hdr->frame_control)) {
327 u8 *qc = ieee80211_get_qos_ctl(hdr);
328 /* frame has qos control */
329 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
330 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
331 rx->flags |= IEEE80211_RX_AMSDU;
332 else
333 rx->flags &= ~IEEE80211_RX_AMSDU;
334 } else {
335 /*
336 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
337 *
338 * Sequence numbers for management frames, QoS data
339 * frames with a broadcast/multicast address in the
340 * Address 1 field, and all non-QoS data frames sent
341 * by QoS STAs are assigned using an additional single
342 * modulo-4096 counter, [...]
343 *
344 * We also use that counter for non-QoS STAs.
345 */
346 tid = NUM_RX_DATA_QUEUES - 1;
347 }
348
349 rx->queue = tid;
350 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
351 * For now, set skb->priority to 0 for other cases. */
352 rx->skb->priority = (tid > 7) ? 0 : tid;
353 }
354
355 /**
356 * DOC: Packet alignment
357 *
358 * Drivers always need to pass packets that are aligned to two-byte boundaries
359 * to the stack.
360 *
361 * Additionally, should, if possible, align the payload data in a way that
362 * guarantees that the contained IP header is aligned to a four-byte
363 * boundary. In the case of regular frames, this simply means aligning the
364 * payload to a four-byte boundary (because either the IP header is directly
365 * contained, or IV/RFC1042 headers that have a length divisible by four are
366 * in front of it).
367 *
368 * With A-MSDU frames, however, the payload data address must yield two modulo
369 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
370 * push the IP header further back to a multiple of four again. Thankfully, the
371 * specs were sane enough this time around to require padding each A-MSDU
372 * subframe to a length that is a multiple of four.
373 *
374 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
375 * the payload is not supported, the driver is required to move the 802.11
376 * header to be directly in front of the payload in that case.
377 */
378 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
379 {
380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
381 int hdrlen;
382
383 #ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
384 return;
385 #endif
386
387 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
388 "unaligned packet at 0x%p\n", rx->skb->data))
389 return;
390
391 if (!ieee80211_is_data_present(hdr->frame_control))
392 return;
393
394 hdrlen = ieee80211_hdrlen(hdr->frame_control);
395 if (rx->flags & IEEE80211_RX_AMSDU)
396 hdrlen += ETH_HLEN;
397 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
398 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
399 }
400
401
402 /* rx handlers */
403
404 static ieee80211_rx_result debug_noinline
405 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
406 {
407 struct ieee80211_local *local = rx->local;
408 struct sk_buff *skb = rx->skb;
409
410 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning)))
411 return ieee80211_scan_rx(rx->sdata, skb);
412
413 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) &&
414 (rx->flags & IEEE80211_RX_IN_SCAN))) {
415 /* drop all the other packets during a software scan anyway */
416 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
417 dev_kfree_skb(skb);
418 return RX_QUEUED;
419 }
420
421 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
422 /* scanning finished during invoking of handlers */
423 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
424 return RX_DROP_UNUSABLE;
425 }
426
427 return RX_CONTINUE;
428 }
429
430
431 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
432 {
433 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
434
435 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
436 return 0;
437
438 return ieee80211_is_robust_mgmt_frame(hdr);
439 }
440
441
442 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
443 {
444 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
445
446 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
447 return 0;
448
449 return ieee80211_is_robust_mgmt_frame(hdr);
450 }
451
452
453 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
454 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
455 {
456 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
457 struct ieee80211_mmie *mmie;
458
459 if (skb->len < 24 + sizeof(*mmie) ||
460 !is_multicast_ether_addr(hdr->da))
461 return -1;
462
463 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
464 return -1; /* not a robust management frame */
465
466 mmie = (struct ieee80211_mmie *)
467 (skb->data + skb->len - sizeof(*mmie));
468 if (mmie->element_id != WLAN_EID_MMIE ||
469 mmie->length != sizeof(*mmie) - 2)
470 return -1;
471
472 return le16_to_cpu(mmie->key_id);
473 }
474
475
476 static ieee80211_rx_result
477 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
478 {
479 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
480 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
481 char *dev_addr = rx->dev->dev_addr;
482
483 if (ieee80211_is_data(hdr->frame_control)) {
484 if (is_multicast_ether_addr(hdr->addr1)) {
485 if (ieee80211_has_tods(hdr->frame_control) ||
486 !ieee80211_has_fromds(hdr->frame_control))
487 return RX_DROP_MONITOR;
488 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
489 return RX_DROP_MONITOR;
490 } else {
491 if (!ieee80211_has_a4(hdr->frame_control))
492 return RX_DROP_MONITOR;
493 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
494 return RX_DROP_MONITOR;
495 }
496 }
497
498 /* If there is not an established peer link and this is not a peer link
499 * establisment frame, beacon or probe, drop the frame.
500 */
501
502 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
503 struct ieee80211_mgmt *mgmt;
504
505 if (!ieee80211_is_mgmt(hdr->frame_control))
506 return RX_DROP_MONITOR;
507
508 if (ieee80211_is_action(hdr->frame_control)) {
509 mgmt = (struct ieee80211_mgmt *)hdr;
510 if (mgmt->u.action.category != PLINK_CATEGORY)
511 return RX_DROP_MONITOR;
512 return RX_CONTINUE;
513 }
514
515 if (ieee80211_is_probe_req(hdr->frame_control) ||
516 ieee80211_is_probe_resp(hdr->frame_control) ||
517 ieee80211_is_beacon(hdr->frame_control))
518 return RX_CONTINUE;
519
520 return RX_DROP_MONITOR;
521
522 }
523
524 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
525
526 if (ieee80211_is_data(hdr->frame_control) &&
527 is_multicast_ether_addr(hdr->addr1) &&
528 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
529 return RX_DROP_MONITOR;
530 #undef msh_h_get
531
532 return RX_CONTINUE;
533 }
534
535
536 static ieee80211_rx_result debug_noinline
537 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
538 {
539 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
540
541 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
542 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
543 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
544 rx->sta->last_seq_ctrl[rx->queue] ==
545 hdr->seq_ctrl)) {
546 if (rx->flags & IEEE80211_RX_RA_MATCH) {
547 rx->local->dot11FrameDuplicateCount++;
548 rx->sta->num_duplicates++;
549 }
550 return RX_DROP_MONITOR;
551 } else
552 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
553 }
554
555 if (unlikely(rx->skb->len < 16)) {
556 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
557 return RX_DROP_MONITOR;
558 }
559
560 /* Drop disallowed frame classes based on STA auth/assoc state;
561 * IEEE 802.11, Chap 5.5.
562 *
563 * mac80211 filters only based on association state, i.e. it drops
564 * Class 3 frames from not associated stations. hostapd sends
565 * deauth/disassoc frames when needed. In addition, hostapd is
566 * responsible for filtering on both auth and assoc states.
567 */
568
569 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
570 return ieee80211_rx_mesh_check(rx);
571
572 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
573 ieee80211_is_pspoll(hdr->frame_control)) &&
574 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
575 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
576 if ((!ieee80211_has_fromds(hdr->frame_control) &&
577 !ieee80211_has_tods(hdr->frame_control) &&
578 ieee80211_is_data(hdr->frame_control)) ||
579 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
580 /* Drop IBSS frames and frames for other hosts
581 * silently. */
582 return RX_DROP_MONITOR;
583 }
584
585 return RX_DROP_MONITOR;
586 }
587
588 return RX_CONTINUE;
589 }
590
591
592 static ieee80211_rx_result debug_noinline
593 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
594 {
595 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
596 int keyidx;
597 int hdrlen;
598 ieee80211_rx_result result = RX_DROP_UNUSABLE;
599 struct ieee80211_key *stakey = NULL;
600 int mmie_keyidx = -1;
601
602 /*
603 * Key selection 101
604 *
605 * There are four types of keys:
606 * - GTK (group keys)
607 * - IGTK (group keys for management frames)
608 * - PTK (pairwise keys)
609 * - STK (station-to-station pairwise keys)
610 *
611 * When selecting a key, we have to distinguish between multicast
612 * (including broadcast) and unicast frames, the latter can only
613 * use PTKs and STKs while the former always use GTKs and IGTKs.
614 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
615 * unicast frames can also use key indices like GTKs. Hence, if we
616 * don't have a PTK/STK we check the key index for a WEP key.
617 *
618 * Note that in a regular BSS, multicast frames are sent by the
619 * AP only, associated stations unicast the frame to the AP first
620 * which then multicasts it on their behalf.
621 *
622 * There is also a slight problem in IBSS mode: GTKs are negotiated
623 * with each station, that is something we don't currently handle.
624 * The spec seems to expect that one negotiates the same key with
625 * every station but there's no such requirement; VLANs could be
626 * possible.
627 */
628
629 /*
630 * No point in finding a key and decrypting if the frame is neither
631 * addressed to us nor a multicast frame.
632 */
633 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
634 return RX_CONTINUE;
635
636 if (rx->sta)
637 stakey = rcu_dereference(rx->sta->key);
638
639 if (!ieee80211_has_protected(hdr->frame_control))
640 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
641
642 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
643 rx->key = stakey;
644 /* Skip decryption if the frame is not protected. */
645 if (!ieee80211_has_protected(hdr->frame_control))
646 return RX_CONTINUE;
647 } else if (mmie_keyidx >= 0) {
648 /* Broadcast/multicast robust management frame / BIP */
649 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
650 (rx->status->flag & RX_FLAG_IV_STRIPPED))
651 return RX_CONTINUE;
652
653 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
654 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
655 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
656 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
657 } else if (!ieee80211_has_protected(hdr->frame_control)) {
658 /*
659 * The frame was not protected, so skip decryption. However, we
660 * need to set rx->key if there is a key that could have been
661 * used so that the frame may be dropped if encryption would
662 * have been expected.
663 */
664 struct ieee80211_key *key = NULL;
665 if (ieee80211_is_mgmt(hdr->frame_control) &&
666 is_multicast_ether_addr(hdr->addr1) &&
667 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
668 rx->key = key;
669 else if ((key = rcu_dereference(rx->sdata->default_key)))
670 rx->key = key;
671 return RX_CONTINUE;
672 } else {
673 /*
674 * The device doesn't give us the IV so we won't be
675 * able to look up the key. That's ok though, we
676 * don't need to decrypt the frame, we just won't
677 * be able to keep statistics accurate.
678 * Except for key threshold notifications, should
679 * we somehow allow the driver to tell us which key
680 * the hardware used if this flag is set?
681 */
682 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
683 (rx->status->flag & RX_FLAG_IV_STRIPPED))
684 return RX_CONTINUE;
685
686 hdrlen = ieee80211_hdrlen(hdr->frame_control);
687
688 if (rx->skb->len < 8 + hdrlen)
689 return RX_DROP_UNUSABLE; /* TODO: count this? */
690
691 /*
692 * no need to call ieee80211_wep_get_keyidx,
693 * it verifies a bunch of things we've done already
694 */
695 keyidx = rx->skb->data[hdrlen + 3] >> 6;
696
697 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
698
699 /*
700 * RSNA-protected unicast frames should always be sent with
701 * pairwise or station-to-station keys, but for WEP we allow
702 * using a key index as well.
703 */
704 if (rx->key && rx->key->conf.alg != ALG_WEP &&
705 !is_multicast_ether_addr(hdr->addr1))
706 rx->key = NULL;
707 }
708
709 if (rx->key) {
710 rx->key->tx_rx_count++;
711 /* TODO: add threshold stuff again */
712 } else {
713 return RX_DROP_MONITOR;
714 }
715
716 /* Check for weak IVs if possible */
717 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
718 ieee80211_is_data(hdr->frame_control) &&
719 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
720 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
721 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
722 rx->sta->wep_weak_iv_count++;
723
724 switch (rx->key->conf.alg) {
725 case ALG_WEP:
726 result = ieee80211_crypto_wep_decrypt(rx);
727 break;
728 case ALG_TKIP:
729 result = ieee80211_crypto_tkip_decrypt(rx);
730 break;
731 case ALG_CCMP:
732 result = ieee80211_crypto_ccmp_decrypt(rx);
733 break;
734 case ALG_AES_CMAC:
735 result = ieee80211_crypto_aes_cmac_decrypt(rx);
736 break;
737 }
738
739 /* either the frame has been decrypted or will be dropped */
740 rx->status->flag |= RX_FLAG_DECRYPTED;
741
742 return result;
743 }
744
745 static ieee80211_rx_result debug_noinline
746 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
747 {
748 struct ieee80211_local *local;
749 struct ieee80211_hdr *hdr;
750 struct sk_buff *skb;
751
752 local = rx->local;
753 skb = rx->skb;
754 hdr = (struct ieee80211_hdr *) skb->data;
755
756 if (!local->pspolling)
757 return RX_CONTINUE;
758
759 if (!ieee80211_has_fromds(hdr->frame_control))
760 /* this is not from AP */
761 return RX_CONTINUE;
762
763 if (!ieee80211_is_data(hdr->frame_control))
764 return RX_CONTINUE;
765
766 if (!ieee80211_has_moredata(hdr->frame_control)) {
767 /* AP has no more frames buffered for us */
768 local->pspolling = false;
769 return RX_CONTINUE;
770 }
771
772 /* more data bit is set, let's request a new frame from the AP */
773 ieee80211_send_pspoll(local, rx->sdata);
774
775 return RX_CONTINUE;
776 }
777
778 static void ap_sta_ps_start(struct sta_info *sta)
779 {
780 struct ieee80211_sub_if_data *sdata = sta->sdata;
781 struct ieee80211_local *local = sdata->local;
782
783 atomic_inc(&sdata->bss->num_sta_ps);
784 set_sta_flags(sta, WLAN_STA_PS_STA);
785 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
786 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
787 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
788 sdata->dev->name, sta->sta.addr, sta->sta.aid);
789 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
790 }
791
792 static void ap_sta_ps_end(struct sta_info *sta)
793 {
794 struct ieee80211_sub_if_data *sdata = sta->sdata;
795
796 atomic_dec(&sdata->bss->num_sta_ps);
797
798 clear_sta_flags(sta, WLAN_STA_PS_STA);
799
800 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
801 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
802 sdata->dev->name, sta->sta.addr, sta->sta.aid);
803 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
804
805 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
806 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
807 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
808 sdata->dev->name, sta->sta.addr, sta->sta.aid);
809 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
810 return;
811 }
812
813 ieee80211_sta_ps_deliver_wakeup(sta);
814 }
815
816 static ieee80211_rx_result debug_noinline
817 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
818 {
819 struct sta_info *sta = rx->sta;
820 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
821
822 if (!sta)
823 return RX_CONTINUE;
824
825 /*
826 * Update last_rx only for IBSS packets which are for the current
827 * BSSID to avoid keeping the current IBSS network alive in cases
828 * where other STAs start using different BSSID.
829 */
830 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
831 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
832 NL80211_IFTYPE_ADHOC);
833 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
834 sta->last_rx = jiffies;
835 } else if (!is_multicast_ether_addr(hdr->addr1)) {
836 /*
837 * Mesh beacons will update last_rx when if they are found to
838 * match the current local configuration when processed.
839 */
840 sta->last_rx = jiffies;
841 }
842
843 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
844 return RX_CONTINUE;
845
846 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
847 ieee80211_sta_rx_notify(rx->sdata, hdr);
848
849 sta->rx_fragments++;
850 sta->rx_bytes += rx->skb->len;
851 sta->last_signal = rx->status->signal;
852 sta->last_noise = rx->status->noise;
853
854 /*
855 * Change STA power saving mode only at the end of a frame
856 * exchange sequence.
857 */
858 if (!ieee80211_has_morefrags(hdr->frame_control) &&
859 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
860 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
861 if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
862 /*
863 * Ignore doze->wake transitions that are
864 * indicated by non-data frames, the standard
865 * is unclear here, but for example going to
866 * PS mode and then scanning would cause a
867 * doze->wake transition for the probe request,
868 * and that is clearly undesirable.
869 */
870 if (ieee80211_is_data(hdr->frame_control) &&
871 !ieee80211_has_pm(hdr->frame_control))
872 ap_sta_ps_end(sta);
873 } else {
874 if (ieee80211_has_pm(hdr->frame_control))
875 ap_sta_ps_start(sta);
876 }
877 }
878
879 /*
880 * Drop (qos-)data::nullfunc frames silently, since they
881 * are used only to control station power saving mode.
882 */
883 if (ieee80211_is_nullfunc(hdr->frame_control) ||
884 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
885 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
886 /*
887 * Update counter and free packet here to avoid
888 * counting this as a dropped packed.
889 */
890 sta->rx_packets++;
891 dev_kfree_skb(rx->skb);
892 return RX_QUEUED;
893 }
894
895 return RX_CONTINUE;
896 } /* ieee80211_rx_h_sta_process */
897
898 static inline struct ieee80211_fragment_entry *
899 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
900 unsigned int frag, unsigned int seq, int rx_queue,
901 struct sk_buff **skb)
902 {
903 struct ieee80211_fragment_entry *entry;
904 int idx;
905
906 idx = sdata->fragment_next;
907 entry = &sdata->fragments[sdata->fragment_next++];
908 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
909 sdata->fragment_next = 0;
910
911 if (!skb_queue_empty(&entry->skb_list)) {
912 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
913 struct ieee80211_hdr *hdr =
914 (struct ieee80211_hdr *) entry->skb_list.next->data;
915 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
916 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
917 "addr1=%pM addr2=%pM\n",
918 sdata->dev->name, idx,
919 jiffies - entry->first_frag_time, entry->seq,
920 entry->last_frag, hdr->addr1, hdr->addr2);
921 #endif
922 __skb_queue_purge(&entry->skb_list);
923 }
924
925 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
926 *skb = NULL;
927 entry->first_frag_time = jiffies;
928 entry->seq = seq;
929 entry->rx_queue = rx_queue;
930 entry->last_frag = frag;
931 entry->ccmp = 0;
932 entry->extra_len = 0;
933
934 return entry;
935 }
936
937 static inline struct ieee80211_fragment_entry *
938 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
939 unsigned int frag, unsigned int seq,
940 int rx_queue, struct ieee80211_hdr *hdr)
941 {
942 struct ieee80211_fragment_entry *entry;
943 int i, idx;
944
945 idx = sdata->fragment_next;
946 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
947 struct ieee80211_hdr *f_hdr;
948
949 idx--;
950 if (idx < 0)
951 idx = IEEE80211_FRAGMENT_MAX - 1;
952
953 entry = &sdata->fragments[idx];
954 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
955 entry->rx_queue != rx_queue ||
956 entry->last_frag + 1 != frag)
957 continue;
958
959 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
960
961 /*
962 * Check ftype and addresses are equal, else check next fragment
963 */
964 if (((hdr->frame_control ^ f_hdr->frame_control) &
965 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
966 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
967 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
968 continue;
969
970 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
971 __skb_queue_purge(&entry->skb_list);
972 continue;
973 }
974 return entry;
975 }
976
977 return NULL;
978 }
979
980 static ieee80211_rx_result debug_noinline
981 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
982 {
983 struct ieee80211_hdr *hdr;
984 u16 sc;
985 __le16 fc;
986 unsigned int frag, seq;
987 struct ieee80211_fragment_entry *entry;
988 struct sk_buff *skb;
989
990 hdr = (struct ieee80211_hdr *)rx->skb->data;
991 fc = hdr->frame_control;
992 sc = le16_to_cpu(hdr->seq_ctrl);
993 frag = sc & IEEE80211_SCTL_FRAG;
994
995 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
996 (rx->skb)->len < 24 ||
997 is_multicast_ether_addr(hdr->addr1))) {
998 /* not fragmented */
999 goto out;
1000 }
1001 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1002
1003 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1004
1005 if (frag == 0) {
1006 /* This is the first fragment of a new frame. */
1007 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1008 rx->queue, &(rx->skb));
1009 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1010 ieee80211_has_protected(fc)) {
1011 /* Store CCMP PN so that we can verify that the next
1012 * fragment has a sequential PN value. */
1013 entry->ccmp = 1;
1014 memcpy(entry->last_pn,
1015 rx->key->u.ccmp.rx_pn[rx->queue],
1016 CCMP_PN_LEN);
1017 }
1018 return RX_QUEUED;
1019 }
1020
1021 /* This is a fragment for a frame that should already be pending in
1022 * fragment cache. Add this fragment to the end of the pending entry.
1023 */
1024 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1025 if (!entry) {
1026 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1027 return RX_DROP_MONITOR;
1028 }
1029
1030 /* Verify that MPDUs within one MSDU have sequential PN values.
1031 * (IEEE 802.11i, 8.3.3.4.5) */
1032 if (entry->ccmp) {
1033 int i;
1034 u8 pn[CCMP_PN_LEN], *rpn;
1035 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1036 return RX_DROP_UNUSABLE;
1037 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1038 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1039 pn[i]++;
1040 if (pn[i])
1041 break;
1042 }
1043 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
1044 if (memcmp(pn, rpn, CCMP_PN_LEN))
1045 return RX_DROP_UNUSABLE;
1046 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1047 }
1048
1049 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1050 __skb_queue_tail(&entry->skb_list, rx->skb);
1051 entry->last_frag = frag;
1052 entry->extra_len += rx->skb->len;
1053 if (ieee80211_has_morefrags(fc)) {
1054 rx->skb = NULL;
1055 return RX_QUEUED;
1056 }
1057
1058 rx->skb = __skb_dequeue(&entry->skb_list);
1059 if (skb_tailroom(rx->skb) < entry->extra_len) {
1060 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1061 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1062 GFP_ATOMIC))) {
1063 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1064 __skb_queue_purge(&entry->skb_list);
1065 return RX_DROP_UNUSABLE;
1066 }
1067 }
1068 while ((skb = __skb_dequeue(&entry->skb_list))) {
1069 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1070 dev_kfree_skb(skb);
1071 }
1072
1073 /* Complete frame has been reassembled - process it now */
1074 rx->flags |= IEEE80211_RX_FRAGMENTED;
1075
1076 out:
1077 if (rx->sta)
1078 rx->sta->rx_packets++;
1079 if (is_multicast_ether_addr(hdr->addr1))
1080 rx->local->dot11MulticastReceivedFrameCount++;
1081 else
1082 ieee80211_led_rx(rx->local);
1083 return RX_CONTINUE;
1084 }
1085
1086 static ieee80211_rx_result debug_noinline
1087 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1088 {
1089 struct ieee80211_sub_if_data *sdata = rx->sdata;
1090 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1091
1092 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1093 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1094 return RX_CONTINUE;
1095
1096 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1097 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1098 return RX_DROP_UNUSABLE;
1099
1100 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1101 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1102 else
1103 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1104
1105 /* Free PS Poll skb here instead of returning RX_DROP that would
1106 * count as an dropped frame. */
1107 dev_kfree_skb(rx->skb);
1108
1109 return RX_QUEUED;
1110 }
1111
1112 static ieee80211_rx_result debug_noinline
1113 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1114 {
1115 u8 *data = rx->skb->data;
1116 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1117
1118 if (!ieee80211_is_data_qos(hdr->frame_control))
1119 return RX_CONTINUE;
1120
1121 /* remove the qos control field, update frame type and meta-data */
1122 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1123 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1124 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1125 /* change frame type to non QOS */
1126 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1127
1128 return RX_CONTINUE;
1129 }
1130
1131 static int
1132 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1133 {
1134 if (unlikely(!rx->sta ||
1135 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1136 return -EACCES;
1137
1138 return 0;
1139 }
1140
1141 static int
1142 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1143 {
1144 /*
1145 * Pass through unencrypted frames if the hardware has
1146 * decrypted them already.
1147 */
1148 if (rx->status->flag & RX_FLAG_DECRYPTED)
1149 return 0;
1150
1151 /* Drop unencrypted frames if key is set. */
1152 if (unlikely(!ieee80211_has_protected(fc) &&
1153 !ieee80211_is_nullfunc(fc) &&
1154 ieee80211_is_data(fc) &&
1155 (rx->key || rx->sdata->drop_unencrypted)))
1156 return -EACCES;
1157 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1158 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1159 rx->key))
1160 return -EACCES;
1161 /* BIP does not use Protected field, so need to check MMIE */
1162 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb)
1163 && ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1164 rx->key))
1165 return -EACCES;
1166 /*
1167 * When using MFP, Action frames are not allowed prior to
1168 * having configured keys.
1169 */
1170 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1171 ieee80211_is_robust_mgmt_frame(
1172 (struct ieee80211_hdr *) rx->skb->data)))
1173 return -EACCES;
1174 }
1175
1176 return 0;
1177 }
1178
1179 static int
1180 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1181 {
1182 struct net_device *dev = rx->dev;
1183 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1184
1185 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
1186 }
1187
1188 /*
1189 * requires that rx->skb is a frame with ethernet header
1190 */
1191 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1192 {
1193 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1194 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1195 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1196
1197 /*
1198 * Allow EAPOL frames to us/the PAE group address regardless
1199 * of whether the frame was encrypted or not.
1200 */
1201 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1202 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1203 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1204 return true;
1205
1206 if (ieee80211_802_1x_port_control(rx) ||
1207 ieee80211_drop_unencrypted(rx, fc))
1208 return false;
1209
1210 return true;
1211 }
1212
1213 /*
1214 * requires that rx->skb is a frame with ethernet header
1215 */
1216 static void
1217 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1218 {
1219 struct net_device *dev = rx->dev;
1220 struct ieee80211_local *local = rx->local;
1221 struct sk_buff *skb, *xmit_skb;
1222 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1223 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1224 struct sta_info *dsta;
1225
1226 skb = rx->skb;
1227 xmit_skb = NULL;
1228
1229 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1230 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1231 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1232 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1233 if (is_multicast_ether_addr(ehdr->h_dest)) {
1234 /*
1235 * send multicast frames both to higher layers in
1236 * local net stack and back to the wireless medium
1237 */
1238 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1239 if (!xmit_skb && net_ratelimit())
1240 printk(KERN_DEBUG "%s: failed to clone "
1241 "multicast frame\n", dev->name);
1242 } else {
1243 dsta = sta_info_get(local, skb->data);
1244 if (dsta && dsta->sdata->dev == dev) {
1245 /*
1246 * The destination station is associated to
1247 * this AP (in this VLAN), so send the frame
1248 * directly to it and do not pass it to local
1249 * net stack.
1250 */
1251 xmit_skb = skb;
1252 skb = NULL;
1253 }
1254 }
1255 }
1256
1257 if (skb) {
1258 int align __maybe_unused;
1259
1260 #if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1261 /*
1262 * 'align' will only take the values 0 or 2 here
1263 * since all frames are required to be aligned
1264 * to 2-byte boundaries when being passed to
1265 * mac80211. That also explains the __skb_push()
1266 * below.
1267 */
1268 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1269 if (align) {
1270 if (WARN_ON(skb_headroom(skb) < 3)) {
1271 dev_kfree_skb(skb);
1272 skb = NULL;
1273 } else {
1274 u8 *data = skb->data;
1275 size_t len = skb_headlen(skb);
1276 skb->data -= align;
1277 memmove(skb->data, data, len);
1278 skb_set_tail_pointer(skb, len);
1279 }
1280 }
1281 #endif
1282
1283 if (skb) {
1284 /* deliver to local stack */
1285 skb->protocol = eth_type_trans(skb, dev);
1286 memset(skb->cb, 0, sizeof(skb->cb));
1287 netif_rx(skb);
1288 }
1289 }
1290
1291 if (xmit_skb) {
1292 /* send to wireless media */
1293 xmit_skb->protocol = htons(ETH_P_802_3);
1294 skb_reset_network_header(xmit_skb);
1295 skb_reset_mac_header(xmit_skb);
1296 dev_queue_xmit(xmit_skb);
1297 }
1298 }
1299
1300 static ieee80211_rx_result debug_noinline
1301 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1302 {
1303 struct net_device *dev = rx->dev;
1304 struct ieee80211_local *local = rx->local;
1305 u16 ethertype;
1306 u8 *payload;
1307 struct sk_buff *skb = rx->skb, *frame = NULL;
1308 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1309 __le16 fc = hdr->frame_control;
1310 const struct ethhdr *eth;
1311 int remaining, err;
1312 u8 dst[ETH_ALEN];
1313 u8 src[ETH_ALEN];
1314
1315 if (unlikely(!ieee80211_is_data(fc)))
1316 return RX_CONTINUE;
1317
1318 if (unlikely(!ieee80211_is_data_present(fc)))
1319 return RX_DROP_MONITOR;
1320
1321 if (!(rx->flags & IEEE80211_RX_AMSDU))
1322 return RX_CONTINUE;
1323
1324 err = __ieee80211_data_to_8023(rx);
1325 if (unlikely(err))
1326 return RX_DROP_UNUSABLE;
1327
1328 skb->dev = dev;
1329
1330 dev->stats.rx_packets++;
1331 dev->stats.rx_bytes += skb->len;
1332
1333 /* skip the wrapping header */
1334 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1335 if (!eth)
1336 return RX_DROP_UNUSABLE;
1337
1338 while (skb != frame) {
1339 u8 padding;
1340 __be16 len = eth->h_proto;
1341 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1342
1343 remaining = skb->len;
1344 memcpy(dst, eth->h_dest, ETH_ALEN);
1345 memcpy(src, eth->h_source, ETH_ALEN);
1346
1347 padding = ((4 - subframe_len) & 0x3);
1348 /* the last MSDU has no padding */
1349 if (subframe_len > remaining)
1350 return RX_DROP_UNUSABLE;
1351
1352 skb_pull(skb, sizeof(struct ethhdr));
1353 /* if last subframe reuse skb */
1354 if (remaining <= subframe_len + padding)
1355 frame = skb;
1356 else {
1357 /*
1358 * Allocate and reserve two bytes more for payload
1359 * alignment since sizeof(struct ethhdr) is 14.
1360 */
1361 frame = dev_alloc_skb(
1362 ALIGN(local->hw.extra_tx_headroom, 4) +
1363 subframe_len + 2);
1364
1365 if (frame == NULL)
1366 return RX_DROP_UNUSABLE;
1367
1368 skb_reserve(frame,
1369 ALIGN(local->hw.extra_tx_headroom, 4) +
1370 sizeof(struct ethhdr) + 2);
1371 memcpy(skb_put(frame, ntohs(len)), skb->data,
1372 ntohs(len));
1373
1374 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1375 padding);
1376 if (!eth) {
1377 dev_kfree_skb(frame);
1378 return RX_DROP_UNUSABLE;
1379 }
1380 }
1381
1382 skb_reset_network_header(frame);
1383 frame->dev = dev;
1384 frame->priority = skb->priority;
1385 rx->skb = frame;
1386
1387 payload = frame->data;
1388 ethertype = (payload[6] << 8) | payload[7];
1389
1390 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1391 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1392 compare_ether_addr(payload,
1393 bridge_tunnel_header) == 0)) {
1394 /* remove RFC1042 or Bridge-Tunnel
1395 * encapsulation and replace EtherType */
1396 skb_pull(frame, 6);
1397 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1398 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1399 } else {
1400 memcpy(skb_push(frame, sizeof(__be16)),
1401 &len, sizeof(__be16));
1402 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1403 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1404 }
1405
1406 if (!ieee80211_frame_allowed(rx, fc)) {
1407 if (skb == frame) /* last frame */
1408 return RX_DROP_UNUSABLE;
1409 dev_kfree_skb(frame);
1410 continue;
1411 }
1412
1413 ieee80211_deliver_skb(rx);
1414 }
1415
1416 return RX_QUEUED;
1417 }
1418
1419 #ifdef CONFIG_MAC80211_MESH
1420 static ieee80211_rx_result
1421 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1422 {
1423 struct ieee80211_hdr *hdr;
1424 struct ieee80211s_hdr *mesh_hdr;
1425 unsigned int hdrlen;
1426 struct sk_buff *skb = rx->skb, *fwd_skb;
1427 struct ieee80211_local *local = rx->local;
1428 struct ieee80211_sub_if_data *sdata;
1429
1430 hdr = (struct ieee80211_hdr *) skb->data;
1431 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1432 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1433 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1434
1435 if (!ieee80211_is_data(hdr->frame_control))
1436 return RX_CONTINUE;
1437
1438 if (!mesh_hdr->ttl)
1439 /* illegal frame */
1440 return RX_DROP_MONITOR;
1441
1442 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1443 struct mesh_path *mppath;
1444 char *proxied_addr;
1445 char *mpp_addr;
1446
1447 if (is_multicast_ether_addr(hdr->addr1)) {
1448 mpp_addr = hdr->addr3;
1449 proxied_addr = mesh_hdr->eaddr1;
1450 } else {
1451 mpp_addr = hdr->addr4;
1452 proxied_addr = mesh_hdr->eaddr2;
1453 }
1454
1455 rcu_read_lock();
1456 mppath = mpp_path_lookup(proxied_addr, sdata);
1457 if (!mppath) {
1458 mpp_path_add(proxied_addr, mpp_addr, sdata);
1459 } else {
1460 spin_lock_bh(&mppath->state_lock);
1461 mppath->exp_time = jiffies;
1462 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1463 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1464 spin_unlock_bh(&mppath->state_lock);
1465 }
1466 rcu_read_unlock();
1467 }
1468
1469 /* Frame has reached destination. Don't forward */
1470 if (!is_multicast_ether_addr(hdr->addr1) &&
1471 compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1472 return RX_CONTINUE;
1473
1474 mesh_hdr->ttl--;
1475
1476 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1477 if (!mesh_hdr->ttl)
1478 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1479 dropped_frames_ttl);
1480 else {
1481 struct ieee80211_hdr *fwd_hdr;
1482 struct ieee80211_tx_info *info;
1483
1484 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1485
1486 if (!fwd_skb && net_ratelimit())
1487 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1488 rx->dev->name);
1489
1490 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1491 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1492 info = IEEE80211_SKB_CB(fwd_skb);
1493 memset(info, 0, sizeof(*info));
1494 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1495 info->control.vif = &rx->sdata->vif;
1496 ieee80211_select_queue(local, fwd_skb);
1497 if (is_multicast_ether_addr(fwd_hdr->addr1))
1498 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1499 fwded_mcast);
1500 else {
1501 int err;
1502 /*
1503 * Save TA to addr1 to send TA a path error if a
1504 * suitable next hop is not found
1505 */
1506 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1507 ETH_ALEN);
1508 err = mesh_nexthop_lookup(fwd_skb, sdata);
1509 /* Failed to immediately resolve next hop:
1510 * fwded frame was dropped or will be added
1511 * later to the pending skb queue. */
1512 if (err)
1513 return RX_DROP_MONITOR;
1514
1515 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1516 fwded_unicast);
1517 }
1518 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1519 fwded_frames);
1520 ieee80211_add_pending_skb(local, fwd_skb);
1521 }
1522 }
1523
1524 if (is_multicast_ether_addr(hdr->addr1) ||
1525 rx->dev->flags & IFF_PROMISC)
1526 return RX_CONTINUE;
1527 else
1528 return RX_DROP_MONITOR;
1529 }
1530 #endif
1531
1532 static ieee80211_rx_result debug_noinline
1533 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1534 {
1535 struct net_device *dev = rx->dev;
1536 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1537 __le16 fc = hdr->frame_control;
1538 int err;
1539
1540 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1541 return RX_CONTINUE;
1542
1543 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1544 return RX_DROP_MONITOR;
1545
1546 err = __ieee80211_data_to_8023(rx);
1547 if (unlikely(err))
1548 return RX_DROP_UNUSABLE;
1549
1550 if (!ieee80211_frame_allowed(rx, fc))
1551 return RX_DROP_MONITOR;
1552
1553 rx->skb->dev = dev;
1554
1555 dev->stats.rx_packets++;
1556 dev->stats.rx_bytes += rx->skb->len;
1557
1558 ieee80211_deliver_skb(rx);
1559
1560 return RX_QUEUED;
1561 }
1562
1563 static ieee80211_rx_result debug_noinline
1564 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1565 {
1566 struct ieee80211_local *local = rx->local;
1567 struct ieee80211_hw *hw = &local->hw;
1568 struct sk_buff *skb = rx->skb;
1569 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1570 struct tid_ampdu_rx *tid_agg_rx;
1571 u16 start_seq_num;
1572 u16 tid;
1573
1574 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1575 return RX_CONTINUE;
1576
1577 if (ieee80211_is_back_req(bar->frame_control)) {
1578 if (!rx->sta)
1579 return RX_CONTINUE;
1580 tid = le16_to_cpu(bar->control) >> 12;
1581 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1582 != HT_AGG_STATE_OPERATIONAL)
1583 return RX_CONTINUE;
1584 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1585
1586 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1587
1588 /* reset session timer */
1589 if (tid_agg_rx->timeout)
1590 mod_timer(&tid_agg_rx->session_timer,
1591 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1592
1593 /* manage reordering buffer according to requested */
1594 /* sequence number */
1595 rcu_read_lock();
1596 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
1597 start_seq_num, 1);
1598 rcu_read_unlock();
1599 return RX_DROP_UNUSABLE;
1600 }
1601
1602 return RX_CONTINUE;
1603 }
1604
1605 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1606 struct ieee80211_mgmt *mgmt,
1607 size_t len)
1608 {
1609 struct ieee80211_local *local = sdata->local;
1610 struct sk_buff *skb;
1611 struct ieee80211_mgmt *resp;
1612
1613 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
1614 /* Not to own unicast address */
1615 return;
1616 }
1617
1618 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1619 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1620 /* Not from the current AP or not associated yet. */
1621 return;
1622 }
1623
1624 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1625 /* Too short SA Query request frame */
1626 return;
1627 }
1628
1629 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1630 if (skb == NULL)
1631 return;
1632
1633 skb_reserve(skb, local->hw.extra_tx_headroom);
1634 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1635 memset(resp, 0, 24);
1636 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1637 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
1638 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1639 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1640 IEEE80211_STYPE_ACTION);
1641 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1642 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1643 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1644 memcpy(resp->u.action.u.sa_query.trans_id,
1645 mgmt->u.action.u.sa_query.trans_id,
1646 WLAN_SA_QUERY_TR_ID_LEN);
1647
1648 ieee80211_tx_skb(sdata, skb, 1);
1649 }
1650
1651 static ieee80211_rx_result debug_noinline
1652 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1653 {
1654 struct ieee80211_local *local = rx->local;
1655 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1656 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1657 int len = rx->skb->len;
1658
1659 if (!ieee80211_is_action(mgmt->frame_control))
1660 return RX_CONTINUE;
1661
1662 if (!rx->sta)
1663 return RX_DROP_MONITOR;
1664
1665 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1666 return RX_DROP_MONITOR;
1667
1668 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1669 return RX_DROP_MONITOR;
1670
1671 /* all categories we currently handle have action_code */
1672 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1673 return RX_DROP_MONITOR;
1674
1675 switch (mgmt->u.action.category) {
1676 case WLAN_CATEGORY_BACK:
1677 /*
1678 * The aggregation code is not prepared to handle
1679 * anything but STA/AP due to the BSSID handling;
1680 * IBSS could work in the code but isn't supported
1681 * by drivers or the standard.
1682 */
1683 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1684 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1685 sdata->vif.type != NL80211_IFTYPE_AP)
1686 return RX_DROP_MONITOR;
1687
1688 switch (mgmt->u.action.u.addba_req.action_code) {
1689 case WLAN_ACTION_ADDBA_REQ:
1690 if (len < (IEEE80211_MIN_ACTION_SIZE +
1691 sizeof(mgmt->u.action.u.addba_req)))
1692 return RX_DROP_MONITOR;
1693 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1694 break;
1695 case WLAN_ACTION_ADDBA_RESP:
1696 if (len < (IEEE80211_MIN_ACTION_SIZE +
1697 sizeof(mgmt->u.action.u.addba_resp)))
1698 return RX_DROP_MONITOR;
1699 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1700 break;
1701 case WLAN_ACTION_DELBA:
1702 if (len < (IEEE80211_MIN_ACTION_SIZE +
1703 sizeof(mgmt->u.action.u.delba)))
1704 return RX_DROP_MONITOR;
1705 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1706 break;
1707 }
1708 break;
1709 case WLAN_CATEGORY_SPECTRUM_MGMT:
1710 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1711 return RX_DROP_MONITOR;
1712
1713 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1714 return RX_DROP_MONITOR;
1715
1716 switch (mgmt->u.action.u.measurement.action_code) {
1717 case WLAN_ACTION_SPCT_MSR_REQ:
1718 if (len < (IEEE80211_MIN_ACTION_SIZE +
1719 sizeof(mgmt->u.action.u.measurement)))
1720 return RX_DROP_MONITOR;
1721 ieee80211_process_measurement_req(sdata, mgmt, len);
1722 break;
1723 case WLAN_ACTION_SPCT_CHL_SWITCH:
1724 if (len < (IEEE80211_MIN_ACTION_SIZE +
1725 sizeof(mgmt->u.action.u.chan_switch)))
1726 return RX_DROP_MONITOR;
1727
1728 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1729 return RX_DROP_MONITOR;
1730
1731 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1732 return RX_DROP_MONITOR;
1733
1734 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1735 }
1736 break;
1737 case WLAN_CATEGORY_SA_QUERY:
1738 if (len < (IEEE80211_MIN_ACTION_SIZE +
1739 sizeof(mgmt->u.action.u.sa_query)))
1740 return RX_DROP_MONITOR;
1741 switch (mgmt->u.action.u.sa_query.action) {
1742 case WLAN_ACTION_SA_QUERY_REQUEST:
1743 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1744 return RX_DROP_MONITOR;
1745 ieee80211_process_sa_query_req(sdata, mgmt, len);
1746 break;
1747 case WLAN_ACTION_SA_QUERY_RESPONSE:
1748 /*
1749 * SA Query response is currently only used in AP mode
1750 * and it is processed in user space.
1751 */
1752 return RX_CONTINUE;
1753 }
1754 break;
1755 default:
1756 return RX_CONTINUE;
1757 }
1758
1759 rx->sta->rx_packets++;
1760 dev_kfree_skb(rx->skb);
1761 return RX_QUEUED;
1762 }
1763
1764 static ieee80211_rx_result debug_noinline
1765 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1766 {
1767 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1768 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1769
1770 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1771 return RX_DROP_MONITOR;
1772
1773 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1774 return RX_DROP_MONITOR;
1775
1776 if (ieee80211_vif_is_mesh(&sdata->vif))
1777 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
1778
1779 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
1780 return ieee80211_ibss_rx_mgmt(sdata, rx->skb);
1781
1782 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1783 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1784
1785 return RX_DROP_MONITOR;
1786 }
1787
1788 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
1789 struct ieee80211_rx_data *rx)
1790 {
1791 int keyidx;
1792 unsigned int hdrlen;
1793
1794 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1795 if (rx->skb->len >= hdrlen + 4)
1796 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1797 else
1798 keyidx = -1;
1799
1800 if (!rx->sta) {
1801 /*
1802 * Some hardware seem to generate incorrect Michael MIC
1803 * reports; ignore them to avoid triggering countermeasures.
1804 */
1805 goto ignore;
1806 }
1807
1808 if (!ieee80211_has_protected(hdr->frame_control))
1809 goto ignore;
1810
1811 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1812 /*
1813 * APs with pairwise keys should never receive Michael MIC
1814 * errors for non-zero keyidx because these are reserved for
1815 * group keys and only the AP is sending real multicast
1816 * frames in the BSS.
1817 */
1818 goto ignore;
1819 }
1820
1821 if (!ieee80211_is_data(hdr->frame_control) &&
1822 !ieee80211_is_auth(hdr->frame_control))
1823 goto ignore;
1824
1825 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
1826 GFP_ATOMIC);
1827 ignore:
1828 dev_kfree_skb(rx->skb);
1829 rx->skb = NULL;
1830 }
1831
1832 /* TODO: use IEEE80211_RX_FRAGMENTED */
1833 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1834 {
1835 struct ieee80211_sub_if_data *sdata;
1836 struct ieee80211_local *local = rx->local;
1837 struct ieee80211_rtap_hdr {
1838 struct ieee80211_radiotap_header hdr;
1839 u8 flags;
1840 u8 rate;
1841 __le16 chan_freq;
1842 __le16 chan_flags;
1843 } __attribute__ ((packed)) *rthdr;
1844 struct sk_buff *skb = rx->skb, *skb2;
1845 struct net_device *prev_dev = NULL;
1846 struct ieee80211_rx_status *status = rx->status;
1847
1848 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1849 goto out_free_skb;
1850
1851 if (skb_headroom(skb) < sizeof(*rthdr) &&
1852 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1853 goto out_free_skb;
1854
1855 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1856 memset(rthdr, 0, sizeof(*rthdr));
1857 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1858 rthdr->hdr.it_present =
1859 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1860 (1 << IEEE80211_RADIOTAP_RATE) |
1861 (1 << IEEE80211_RADIOTAP_CHANNEL));
1862
1863 rthdr->rate = rx->rate->bitrate / 5;
1864 rthdr->chan_freq = cpu_to_le16(status->freq);
1865
1866 if (status->band == IEEE80211_BAND_5GHZ)
1867 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1868 IEEE80211_CHAN_5GHZ);
1869 else
1870 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1871 IEEE80211_CHAN_2GHZ);
1872
1873 skb_set_mac_header(skb, 0);
1874 skb->ip_summed = CHECKSUM_UNNECESSARY;
1875 skb->pkt_type = PACKET_OTHERHOST;
1876 skb->protocol = htons(ETH_P_802_2);
1877
1878 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1879 if (!netif_running(sdata->dev))
1880 continue;
1881
1882 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1883 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1884 continue;
1885
1886 if (prev_dev) {
1887 skb2 = skb_clone(skb, GFP_ATOMIC);
1888 if (skb2) {
1889 skb2->dev = prev_dev;
1890 netif_rx(skb2);
1891 }
1892 }
1893
1894 prev_dev = sdata->dev;
1895 sdata->dev->stats.rx_packets++;
1896 sdata->dev->stats.rx_bytes += skb->len;
1897 }
1898
1899 if (prev_dev) {
1900 skb->dev = prev_dev;
1901 netif_rx(skb);
1902 skb = NULL;
1903 } else
1904 goto out_free_skb;
1905
1906 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
1907 return;
1908
1909 out_free_skb:
1910 dev_kfree_skb(skb);
1911 }
1912
1913
1914 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1915 struct ieee80211_rx_data *rx,
1916 struct sk_buff *skb)
1917 {
1918 ieee80211_rx_result res = RX_DROP_MONITOR;
1919
1920 rx->skb = skb;
1921 rx->sdata = sdata;
1922 rx->dev = sdata->dev;
1923
1924 #define CALL_RXH(rxh) \
1925 do { \
1926 res = rxh(rx); \
1927 if (res != RX_CONTINUE) \
1928 goto rxh_done; \
1929 } while (0);
1930
1931 CALL_RXH(ieee80211_rx_h_passive_scan)
1932 CALL_RXH(ieee80211_rx_h_check)
1933 CALL_RXH(ieee80211_rx_h_decrypt)
1934 CALL_RXH(ieee80211_rx_h_check_more_data)
1935 CALL_RXH(ieee80211_rx_h_sta_process)
1936 CALL_RXH(ieee80211_rx_h_defragment)
1937 CALL_RXH(ieee80211_rx_h_ps_poll)
1938 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
1939 /* must be after MMIC verify so header is counted in MPDU mic */
1940 CALL_RXH(ieee80211_rx_h_remove_qos_control)
1941 CALL_RXH(ieee80211_rx_h_amsdu)
1942 #ifdef CONFIG_MAC80211_MESH
1943 if (ieee80211_vif_is_mesh(&sdata->vif))
1944 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1945 #endif
1946 CALL_RXH(ieee80211_rx_h_data)
1947 CALL_RXH(ieee80211_rx_h_ctrl)
1948 CALL_RXH(ieee80211_rx_h_action)
1949 CALL_RXH(ieee80211_rx_h_mgmt)
1950
1951 #undef CALL_RXH
1952
1953 rxh_done:
1954 switch (res) {
1955 case RX_DROP_MONITOR:
1956 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1957 if (rx->sta)
1958 rx->sta->rx_dropped++;
1959 /* fall through */
1960 case RX_CONTINUE:
1961 ieee80211_rx_cooked_monitor(rx);
1962 break;
1963 case RX_DROP_UNUSABLE:
1964 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1965 if (rx->sta)
1966 rx->sta->rx_dropped++;
1967 dev_kfree_skb(rx->skb);
1968 break;
1969 case RX_QUEUED:
1970 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1971 break;
1972 }
1973 }
1974
1975 /* main receive path */
1976
1977 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1978 struct ieee80211_rx_data *rx,
1979 struct ieee80211_hdr *hdr)
1980 {
1981 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type);
1982 int multicast = is_multicast_ether_addr(hdr->addr1);
1983
1984 switch (sdata->vif.type) {
1985 case NL80211_IFTYPE_STATION:
1986 if (!bssid)
1987 return 0;
1988 if (!multicast &&
1989 compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) {
1990 if (!(sdata->dev->flags & IFF_PROMISC))
1991 return 0;
1992 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1993 }
1994 break;
1995 case NL80211_IFTYPE_ADHOC:
1996 if (!bssid)
1997 return 0;
1998 if (ieee80211_is_beacon(hdr->frame_control)) {
1999 return 1;
2000 }
2001 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2002 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2003 return 0;
2004 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2005 } else if (!multicast &&
2006 compare_ether_addr(sdata->dev->dev_addr,
2007 hdr->addr1) != 0) {
2008 if (!(sdata->dev->flags & IFF_PROMISC))
2009 return 0;
2010 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2011 } else if (!rx->sta) {
2012 int rate_idx;
2013 if (rx->status->flag & RX_FLAG_HT)
2014 rate_idx = 0; /* TODO: HT rates */
2015 else
2016 rate_idx = rx->status->rate_idx;
2017 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
2018 BIT(rate_idx));
2019 }
2020 break;
2021 case NL80211_IFTYPE_MESH_POINT:
2022 if (!multicast &&
2023 compare_ether_addr(sdata->dev->dev_addr,
2024 hdr->addr1) != 0) {
2025 if (!(sdata->dev->flags & IFF_PROMISC))
2026 return 0;
2027
2028 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2029 }
2030 break;
2031 case NL80211_IFTYPE_AP_VLAN:
2032 case NL80211_IFTYPE_AP:
2033 if (!bssid) {
2034 if (compare_ether_addr(sdata->dev->dev_addr,
2035 hdr->addr1))
2036 return 0;
2037 } else if (!ieee80211_bssid_match(bssid,
2038 sdata->dev->dev_addr)) {
2039 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2040 return 0;
2041 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2042 }
2043 break;
2044 case NL80211_IFTYPE_WDS:
2045 if (bssid || !ieee80211_is_data(hdr->frame_control))
2046 return 0;
2047 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2048 return 0;
2049 break;
2050 case NL80211_IFTYPE_MONITOR:
2051 /* take everything */
2052 break;
2053 case NL80211_IFTYPE_UNSPECIFIED:
2054 case __NL80211_IFTYPE_AFTER_LAST:
2055 /* should never get here */
2056 WARN_ON(1);
2057 break;
2058 }
2059
2060 return 1;
2061 }
2062
2063 /*
2064 * This is the actual Rx frames handler. as it blongs to Rx path it must
2065 * be called with rcu_read_lock protection.
2066 */
2067 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2068 struct sk_buff *skb,
2069 struct ieee80211_rate *rate)
2070 {
2071 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2072 struct ieee80211_local *local = hw_to_local(hw);
2073 struct ieee80211_sub_if_data *sdata;
2074 struct ieee80211_hdr *hdr;
2075 struct ieee80211_rx_data rx;
2076 int prepares;
2077 struct ieee80211_sub_if_data *prev = NULL;
2078 struct sk_buff *skb_new;
2079
2080 hdr = (struct ieee80211_hdr *)skb->data;
2081 memset(&rx, 0, sizeof(rx));
2082 rx.skb = skb;
2083 rx.local = local;
2084
2085 rx.status = status;
2086 rx.rate = rate;
2087
2088 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2089 local->dot11ReceivedFragmentCount++;
2090
2091 rx.sta = sta_info_get(local, hdr->addr2);
2092 if (rx.sta) {
2093 rx.sdata = rx.sta->sdata;
2094 rx.dev = rx.sta->sdata->dev;
2095 }
2096
2097 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
2098 ieee80211_rx_michael_mic_report(hdr, &rx);
2099 return;
2100 }
2101
2102 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2103 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2104 rx.flags |= IEEE80211_RX_IN_SCAN;
2105
2106 ieee80211_parse_qos(&rx);
2107 ieee80211_verify_alignment(&rx);
2108
2109 skb = rx.skb;
2110
2111 if (rx.sdata && ieee80211_is_data(hdr->frame_control)) {
2112 rx.flags |= IEEE80211_RX_RA_MATCH;
2113 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2114 if (prepares)
2115 prev = rx.sdata;
2116 } else list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2117 if (!netif_running(sdata->dev))
2118 continue;
2119
2120 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2121 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2122 continue;
2123
2124 rx.flags |= IEEE80211_RX_RA_MATCH;
2125 prepares = prepare_for_handlers(sdata, &rx, hdr);
2126
2127 if (!prepares)
2128 continue;
2129
2130 /*
2131 * frame is destined for this interface, but if it's not
2132 * also for the previous one we handle that after the
2133 * loop to avoid copying the SKB once too much
2134 */
2135
2136 if (!prev) {
2137 prev = sdata;
2138 continue;
2139 }
2140
2141 /*
2142 * frame was destined for the previous interface
2143 * so invoke RX handlers for it
2144 */
2145
2146 skb_new = skb_copy(skb, GFP_ATOMIC);
2147 if (!skb_new) {
2148 if (net_ratelimit())
2149 printk(KERN_DEBUG "%s: failed to copy "
2150 "multicast frame for %s\n",
2151 wiphy_name(local->hw.wiphy),
2152 prev->dev->name);
2153 continue;
2154 }
2155 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
2156 prev = sdata;
2157 }
2158 if (prev)
2159 ieee80211_invoke_rx_handlers(prev, &rx, skb);
2160 else
2161 dev_kfree_skb(skb);
2162 }
2163
2164 #define SEQ_MODULO 0x1000
2165 #define SEQ_MASK 0xfff
2166
2167 static inline int seq_less(u16 sq1, u16 sq2)
2168 {
2169 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2170 }
2171
2172 static inline u16 seq_inc(u16 sq)
2173 {
2174 return (sq + 1) & SEQ_MASK;
2175 }
2176
2177 static inline u16 seq_sub(u16 sq1, u16 sq2)
2178 {
2179 return (sq1 - sq2) & SEQ_MASK;
2180 }
2181
2182
2183 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2184 struct tid_ampdu_rx *tid_agg_rx,
2185 int index)
2186 {
2187 struct ieee80211_supported_band *sband;
2188 struct ieee80211_rate *rate;
2189 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
2190 struct ieee80211_rx_status *status;
2191
2192 if (!skb)
2193 goto no_frame;
2194
2195 status = IEEE80211_SKB_RXCB(skb);
2196
2197 /* release the reordered frames to stack */
2198 sband = hw->wiphy->bands[status->band];
2199 if (status->flag & RX_FLAG_HT)
2200 rate = sband->bitrates; /* TODO: HT rates */
2201 else
2202 rate = &sband->bitrates[status->rate_idx];
2203 __ieee80211_rx_handle_packet(hw, skb, rate);
2204 tid_agg_rx->stored_mpdu_num--;
2205 tid_agg_rx->reorder_buf[index] = NULL;
2206
2207 no_frame:
2208 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2209 }
2210
2211
2212 /*
2213 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
2214 * the skb was added to the buffer longer than this time ago, the earlier
2215 * frames that have not yet been received are assumed to be lost and the skb
2216 * can be released for processing. This may also release other skb's from the
2217 * reorder buffer if there are no additional gaps between the frames.
2218 */
2219 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2220
2221 /*
2222 * As it function blongs to Rx path it must be called with
2223 * the proper rcu_read_lock protection for its flow.
2224 */
2225 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2226 struct tid_ampdu_rx *tid_agg_rx,
2227 struct sk_buff *skb,
2228 u16 mpdu_seq_num,
2229 int bar_req)
2230 {
2231 u16 head_seq_num, buf_size;
2232 int index;
2233
2234 buf_size = tid_agg_rx->buf_size;
2235 head_seq_num = tid_agg_rx->head_seq_num;
2236
2237 /* frame with out of date sequence number */
2238 if (seq_less(mpdu_seq_num, head_seq_num)) {
2239 dev_kfree_skb(skb);
2240 return 1;
2241 }
2242
2243 /* if frame sequence number exceeds our buffering window size or
2244 * block Ack Request arrived - release stored frames */
2245 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
2246 /* new head to the ordering buffer */
2247 if (bar_req)
2248 head_seq_num = mpdu_seq_num;
2249 else
2250 head_seq_num =
2251 seq_inc(seq_sub(mpdu_seq_num, buf_size));
2252 /* release stored frames up to new head to stack */
2253 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2254 index = seq_sub(tid_agg_rx->head_seq_num,
2255 tid_agg_rx->ssn)
2256 % tid_agg_rx->buf_size;
2257 ieee80211_release_reorder_frame(hw, tid_agg_rx,
2258 index);
2259 }
2260 if (bar_req)
2261 return 1;
2262 }
2263
2264 /* now the new frame is always in the range of the reordering */
2265 /* buffer window */
2266 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
2267 % tid_agg_rx->buf_size;
2268 /* check if we already stored this frame */
2269 if (tid_agg_rx->reorder_buf[index]) {
2270 dev_kfree_skb(skb);
2271 return 1;
2272 }
2273
2274 /* if arrived mpdu is in the right order and nothing else stored */
2275 /* release it immediately */
2276 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2277 tid_agg_rx->stored_mpdu_num == 0) {
2278 tid_agg_rx->head_seq_num =
2279 seq_inc(tid_agg_rx->head_seq_num);
2280 return 0;
2281 }
2282
2283 /* put the frame in the reordering buffer */
2284 tid_agg_rx->reorder_buf[index] = skb;
2285 tid_agg_rx->reorder_time[index] = jiffies;
2286 tid_agg_rx->stored_mpdu_num++;
2287 /* release the buffer until next missing frame */
2288 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2289 % tid_agg_rx->buf_size;
2290 if (!tid_agg_rx->reorder_buf[index] &&
2291 tid_agg_rx->stored_mpdu_num > 1) {
2292 /*
2293 * No buffers ready to be released, but check whether any
2294 * frames in the reorder buffer have timed out.
2295 */
2296 int j;
2297 int skipped = 1;
2298 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2299 j = (j + 1) % tid_agg_rx->buf_size) {
2300 if (tid_agg_rx->reorder_buf[j] == NULL) {
2301 skipped++;
2302 continue;
2303 }
2304 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2305 HZ / 10))
2306 break;
2307
2308 #ifdef CONFIG_MAC80211_HT_DEBUG
2309 if (net_ratelimit())
2310 printk(KERN_DEBUG "%s: release an RX reorder "
2311 "frame due to timeout on earlier "
2312 "frames\n",
2313 wiphy_name(hw->wiphy));
2314 #endif
2315 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
2316
2317 /*
2318 * Increment the head seq# also for the skipped slots.
2319 */
2320 tid_agg_rx->head_seq_num =
2321 (tid_agg_rx->head_seq_num + skipped) &
2322 SEQ_MASK;
2323 skipped = 0;
2324 }
2325 } else while (tid_agg_rx->reorder_buf[index]) {
2326 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2327 index = seq_sub(tid_agg_rx->head_seq_num,
2328 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2329 }
2330 return 1;
2331 }
2332
2333 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2334 struct sk_buff *skb)
2335 {
2336 struct ieee80211_hw *hw = &local->hw;
2337 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2338 struct sta_info *sta;
2339 struct tid_ampdu_rx *tid_agg_rx;
2340 u16 sc;
2341 u16 mpdu_seq_num;
2342 u8 ret = 0;
2343 int tid;
2344
2345 sta = sta_info_get(local, hdr->addr2);
2346 if (!sta)
2347 return ret;
2348
2349 /* filter the QoS data rx stream according to
2350 * STA/TID and check if this STA/TID is on aggregation */
2351 if (!ieee80211_is_data_qos(hdr->frame_control))
2352 goto end_reorder;
2353
2354 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2355
2356 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2357 goto end_reorder;
2358
2359 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2360
2361 /* qos null data frames are excluded */
2362 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2363 goto end_reorder;
2364
2365 /* new un-ordered ampdu frame - process it */
2366
2367 /* reset session timer */
2368 if (tid_agg_rx->timeout)
2369 mod_timer(&tid_agg_rx->session_timer,
2370 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2371
2372 /* if this mpdu is fragmented - terminate rx aggregation session */
2373 sc = le16_to_cpu(hdr->seq_ctrl);
2374 if (sc & IEEE80211_SCTL_FRAG) {
2375 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2376 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2377 ret = 1;
2378 goto end_reorder;
2379 }
2380
2381 /* according to mpdu sequence number deal with reordering buffer */
2382 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2383 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2384 mpdu_seq_num, 0);
2385 end_reorder:
2386 return ret;
2387 }
2388
2389 /*
2390 * This is the receive path handler. It is called by a low level driver when an
2391 * 802.11 MPDU is received from the hardware.
2392 */
2393 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2394 {
2395 struct ieee80211_local *local = hw_to_local(hw);
2396 struct ieee80211_rate *rate = NULL;
2397 struct ieee80211_supported_band *sband;
2398 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2399
2400 WARN_ON_ONCE(softirq_count() == 0);
2401
2402 if (WARN_ON(status->band < 0 ||
2403 status->band >= IEEE80211_NUM_BANDS))
2404 goto drop;
2405
2406 sband = local->hw.wiphy->bands[status->band];
2407 if (WARN_ON(!sband))
2408 goto drop;
2409
2410 /*
2411 * If we're suspending, it is possible although not too likely
2412 * that we'd be receiving frames after having already partially
2413 * quiesced the stack. We can't process such frames then since
2414 * that might, for example, cause stations to be added or other
2415 * driver callbacks be invoked.
2416 */
2417 if (unlikely(local->quiescing || local->suspended))
2418 goto drop;
2419
2420 /*
2421 * The same happens when we're not even started,
2422 * but that's worth a warning.
2423 */
2424 if (WARN_ON(!local->started))
2425 goto drop;
2426
2427 if (status->flag & RX_FLAG_HT) {
2428 /* rate_idx is MCS index */
2429 if (WARN_ON(status->rate_idx < 0 ||
2430 status->rate_idx >= 76))
2431 goto drop;
2432 /* HT rates are not in the table - use the highest legacy rate
2433 * for now since other parts of mac80211 may not yet be fully
2434 * MCS aware. */
2435 rate = &sband->bitrates[sband->n_bitrates - 1];
2436 } else {
2437 if (WARN_ON(status->rate_idx < 0 ||
2438 status->rate_idx >= sband->n_bitrates))
2439 goto drop;
2440 rate = &sband->bitrates[status->rate_idx];
2441 }
2442
2443 /*
2444 * key references and virtual interfaces are protected using RCU
2445 * and this requires that we are in a read-side RCU section during
2446 * receive processing
2447 */
2448 rcu_read_lock();
2449
2450 /*
2451 * Frames with failed FCS/PLCP checksum are not returned,
2452 * all other frames are returned without radiotap header
2453 * if it was previously present.
2454 * Also, frames with less than 16 bytes are dropped.
2455 */
2456 skb = ieee80211_rx_monitor(local, skb, rate);
2457 if (!skb) {
2458 rcu_read_unlock();
2459 return;
2460 }
2461
2462 /*
2463 * In theory, the block ack reordering should happen after duplicate
2464 * removal (ieee80211_rx_h_check(), which is an RX handler). As such,
2465 * the call to ieee80211_rx_reorder_ampdu() should really be moved to
2466 * happen as a new RX handler between ieee80211_rx_h_check and
2467 * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for
2468 * the time being, the call can be here since RX reorder buf processing
2469 * will implicitly skip duplicates. We could, in theory at least,
2470 * process frames that ieee80211_rx_h_passive_scan would drop (e.g.,
2471 * frames from other than operational channel), but that should not
2472 * happen in normal networks.
2473 */
2474 if (!ieee80211_rx_reorder_ampdu(local, skb))
2475 __ieee80211_rx_handle_packet(hw, skb, rate);
2476
2477 rcu_read_unlock();
2478
2479 return;
2480 drop:
2481 kfree_skb(skb);
2482 }
2483 EXPORT_SYMBOL(ieee80211_rx);
2484
2485 /* This is a version of the rx handler that can be called from hard irq
2486 * context. Post the skb on the queue and schedule the tasklet */
2487 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
2488 {
2489 struct ieee80211_local *local = hw_to_local(hw);
2490
2491 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2492
2493 skb->pkt_type = IEEE80211_RX_MSG;
2494 skb_queue_tail(&local->skb_queue, skb);
2495 tasklet_schedule(&local->tasklet);
2496 }
2497 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
This page took 0.118961 seconds and 6 git commands to generate.