Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <net/mac80211.h>
20 #include <net/ieee80211_radiotap.h>
21
22 #include "ieee80211_i.h"
23 #include "driver-ops.h"
24 #include "led.h"
25 #include "mesh.h"
26 #include "wep.h"
27 #include "wpa.h"
28 #include "tkip.h"
29 #include "wme.h"
30
31 /*
32 * monitor mode reception
33 *
34 * This function cleans up the SKB, i.e. it removes all the stuff
35 * only useful for monitoring.
36 */
37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
38 struct sk_buff *skb)
39 {
40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
41 if (likely(skb->len > FCS_LEN))
42 __pskb_trim(skb, skb->len - FCS_LEN);
43 else {
44 /* driver bug */
45 WARN_ON(1);
46 dev_kfree_skb(skb);
47 skb = NULL;
48 }
49 }
50
51 return skb;
52 }
53
54 static inline int should_drop_frame(struct sk_buff *skb,
55 int present_fcs_len)
56 {
57 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
58 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
59
60 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
61 return 1;
62 if (unlikely(skb->len < 16 + present_fcs_len))
63 return 1;
64 if (ieee80211_is_ctl(hdr->frame_control) &&
65 !ieee80211_is_pspoll(hdr->frame_control) &&
66 !ieee80211_is_back_req(hdr->frame_control))
67 return 1;
68 return 0;
69 }
70
71 static int
72 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
73 struct ieee80211_rx_status *status)
74 {
75 int len;
76
77 /* always present fields */
78 len = sizeof(struct ieee80211_radiotap_header) + 9;
79
80 if (status->flag & RX_FLAG_TSFT)
81 len += 8;
82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
83 len += 1;
84
85 if (len & 1) /* padding for RX_FLAGS if necessary */
86 len++;
87
88 return len;
89 }
90
91 /*
92 * ieee80211_add_rx_radiotap_header - add radiotap header
93 *
94 * add a radiotap header containing all the fields which the hardware provided.
95 */
96 static void
97 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
98 struct sk_buff *skb,
99 struct ieee80211_rate *rate,
100 int rtap_len)
101 {
102 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
103 struct ieee80211_radiotap_header *rthdr;
104 unsigned char *pos;
105 u16 rx_flags = 0;
106
107 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
108 memset(rthdr, 0, rtap_len);
109
110 /* radiotap header, set always present flags */
111 rthdr->it_present =
112 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
113 (1 << IEEE80211_RADIOTAP_CHANNEL) |
114 (1 << IEEE80211_RADIOTAP_ANTENNA) |
115 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
116 rthdr->it_len = cpu_to_le16(rtap_len);
117
118 pos = (unsigned char *)(rthdr+1);
119
120 /* the order of the following fields is important */
121
122 /* IEEE80211_RADIOTAP_TSFT */
123 if (status->flag & RX_FLAG_TSFT) {
124 put_unaligned_le64(status->mactime, pos);
125 rthdr->it_present |=
126 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
127 pos += 8;
128 }
129
130 /* IEEE80211_RADIOTAP_FLAGS */
131 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
132 *pos |= IEEE80211_RADIOTAP_F_FCS;
133 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
134 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
135 if (status->flag & RX_FLAG_SHORTPRE)
136 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
137 pos++;
138
139 /* IEEE80211_RADIOTAP_RATE */
140 if (status->flag & RX_FLAG_HT) {
141 /*
142 * TODO: add following information into radiotap header once
143 * suitable fields are defined for it:
144 * - MCS index (status->rate_idx)
145 * - HT40 (status->flag & RX_FLAG_40MHZ)
146 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
147 */
148 *pos = 0;
149 } else {
150 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
151 *pos = rate->bitrate / 5;
152 }
153 pos++;
154
155 /* IEEE80211_RADIOTAP_CHANNEL */
156 put_unaligned_le16(status->freq, pos);
157 pos += 2;
158 if (status->band == IEEE80211_BAND_5GHZ)
159 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
160 pos);
161 else if (status->flag & RX_FLAG_HT)
162 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
163 pos);
164 else if (rate->flags & IEEE80211_RATE_ERP_G)
165 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
166 pos);
167 else
168 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
169 pos);
170 pos += 2;
171
172 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
173 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
174 *pos = status->signal;
175 rthdr->it_present |=
176 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
177 pos++;
178 }
179
180 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
181
182 /* IEEE80211_RADIOTAP_ANTENNA */
183 *pos = status->antenna;
184 pos++;
185
186 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
187
188 /* IEEE80211_RADIOTAP_RX_FLAGS */
189 /* ensure 2 byte alignment for the 2 byte field as required */
190 if ((pos - (u8 *)rthdr) & 1)
191 pos++;
192 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
193 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
194 put_unaligned_le16(rx_flags, pos);
195 pos += 2;
196 }
197
198 /*
199 * This function copies a received frame to all monitor interfaces and
200 * returns a cleaned-up SKB that no longer includes the FCS nor the
201 * radiotap header the driver might have added.
202 */
203 static struct sk_buff *
204 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
205 struct ieee80211_rate *rate)
206 {
207 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
208 struct ieee80211_sub_if_data *sdata;
209 int needed_headroom = 0;
210 struct sk_buff *skb, *skb2;
211 struct net_device *prev_dev = NULL;
212 int present_fcs_len = 0;
213
214 /*
215 * First, we may need to make a copy of the skb because
216 * (1) we need to modify it for radiotap (if not present), and
217 * (2) the other RX handlers will modify the skb we got.
218 *
219 * We don't need to, of course, if we aren't going to return
220 * the SKB because it has a bad FCS/PLCP checksum.
221 */
222
223 /* room for the radiotap header based on driver features */
224 needed_headroom = ieee80211_rx_radiotap_len(local, status);
225
226 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
227 present_fcs_len = FCS_LEN;
228
229 /* make sure hdr->frame_control is on the linear part */
230 if (!pskb_may_pull(origskb, 2)) {
231 dev_kfree_skb(origskb);
232 return NULL;
233 }
234
235 if (!local->monitors) {
236 if (should_drop_frame(origskb, present_fcs_len)) {
237 dev_kfree_skb(origskb);
238 return NULL;
239 }
240
241 return remove_monitor_info(local, origskb);
242 }
243
244 if (should_drop_frame(origskb, present_fcs_len)) {
245 /* only need to expand headroom if necessary */
246 skb = origskb;
247 origskb = NULL;
248
249 /*
250 * This shouldn't trigger often because most devices have an
251 * RX header they pull before we get here, and that should
252 * be big enough for our radiotap information. We should
253 * probably export the length to drivers so that we can have
254 * them allocate enough headroom to start with.
255 */
256 if (skb_headroom(skb) < needed_headroom &&
257 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
258 dev_kfree_skb(skb);
259 return NULL;
260 }
261 } else {
262 /*
263 * Need to make a copy and possibly remove radiotap header
264 * and FCS from the original.
265 */
266 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
267
268 origskb = remove_monitor_info(local, origskb);
269
270 if (!skb)
271 return origskb;
272 }
273
274 /* prepend radiotap information */
275 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
276
277 skb_reset_mac_header(skb);
278 skb->ip_summed = CHECKSUM_UNNECESSARY;
279 skb->pkt_type = PACKET_OTHERHOST;
280 skb->protocol = htons(ETH_P_802_2);
281
282 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
283 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
284 continue;
285
286 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
287 continue;
288
289 if (!ieee80211_sdata_running(sdata))
290 continue;
291
292 if (prev_dev) {
293 skb2 = skb_clone(skb, GFP_ATOMIC);
294 if (skb2) {
295 skb2->dev = prev_dev;
296 netif_receive_skb(skb2);
297 }
298 }
299
300 prev_dev = sdata->dev;
301 sdata->dev->stats.rx_packets++;
302 sdata->dev->stats.rx_bytes += skb->len;
303 }
304
305 if (prev_dev) {
306 skb->dev = prev_dev;
307 netif_receive_skb(skb);
308 } else
309 dev_kfree_skb(skb);
310
311 return origskb;
312 }
313
314
315 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
316 {
317 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
318 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
319 int tid;
320
321 /* does the frame have a qos control field? */
322 if (ieee80211_is_data_qos(hdr->frame_control)) {
323 u8 *qc = ieee80211_get_qos_ctl(hdr);
324 /* frame has qos control */
325 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
326 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
327 status->rx_flags |= IEEE80211_RX_AMSDU;
328 } else {
329 /*
330 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
331 *
332 * Sequence numbers for management frames, QoS data
333 * frames with a broadcast/multicast address in the
334 * Address 1 field, and all non-QoS data frames sent
335 * by QoS STAs are assigned using an additional single
336 * modulo-4096 counter, [...]
337 *
338 * We also use that counter for non-QoS STAs.
339 */
340 tid = NUM_RX_DATA_QUEUES - 1;
341 }
342
343 rx->queue = tid;
344 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
345 * For now, set skb->priority to 0 for other cases. */
346 rx->skb->priority = (tid > 7) ? 0 : tid;
347 }
348
349 /**
350 * DOC: Packet alignment
351 *
352 * Drivers always need to pass packets that are aligned to two-byte boundaries
353 * to the stack.
354 *
355 * Additionally, should, if possible, align the payload data in a way that
356 * guarantees that the contained IP header is aligned to a four-byte
357 * boundary. In the case of regular frames, this simply means aligning the
358 * payload to a four-byte boundary (because either the IP header is directly
359 * contained, or IV/RFC1042 headers that have a length divisible by four are
360 * in front of it). If the payload data is not properly aligned and the
361 * architecture doesn't support efficient unaligned operations, mac80211
362 * will align the data.
363 *
364 * With A-MSDU frames, however, the payload data address must yield two modulo
365 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
366 * push the IP header further back to a multiple of four again. Thankfully, the
367 * specs were sane enough this time around to require padding each A-MSDU
368 * subframe to a length that is a multiple of four.
369 *
370 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
371 * the payload is not supported, the driver is required to move the 802.11
372 * header to be directly in front of the payload in that case.
373 */
374 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
375 {
376 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
377 WARN_ONCE((unsigned long)rx->skb->data & 1,
378 "unaligned packet at 0x%p\n", rx->skb->data);
379 #endif
380 }
381
382
383 /* rx handlers */
384
385 static ieee80211_rx_result debug_noinline
386 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
387 {
388 struct ieee80211_local *local = rx->local;
389 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
390 struct sk_buff *skb = rx->skb;
391
392 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
393 return RX_CONTINUE;
394
395 if (test_bit(SCAN_HW_SCANNING, &local->scanning))
396 return ieee80211_scan_rx(rx->sdata, skb);
397
398 if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
399 /* drop all the other packets during a software scan anyway */
400 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
401 dev_kfree_skb(skb);
402 return RX_QUEUED;
403 }
404
405 /* scanning finished during invoking of handlers */
406 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
407 return RX_DROP_UNUSABLE;
408 }
409
410
411 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
412 {
413 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
414
415 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
416 return 0;
417
418 return ieee80211_is_robust_mgmt_frame(hdr);
419 }
420
421
422 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
423 {
424 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
425
426 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
427 return 0;
428
429 return ieee80211_is_robust_mgmt_frame(hdr);
430 }
431
432
433 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
434 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
435 {
436 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
437 struct ieee80211_mmie *mmie;
438
439 if (skb->len < 24 + sizeof(*mmie) ||
440 !is_multicast_ether_addr(hdr->da))
441 return -1;
442
443 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
444 return -1; /* not a robust management frame */
445
446 mmie = (struct ieee80211_mmie *)
447 (skb->data + skb->len - sizeof(*mmie));
448 if (mmie->element_id != WLAN_EID_MMIE ||
449 mmie->length != sizeof(*mmie) - 2)
450 return -1;
451
452 return le16_to_cpu(mmie->key_id);
453 }
454
455
456 static ieee80211_rx_result
457 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
458 {
459 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
460 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
461 char *dev_addr = rx->sdata->vif.addr;
462
463 if (ieee80211_is_data(hdr->frame_control)) {
464 if (is_multicast_ether_addr(hdr->addr1)) {
465 if (ieee80211_has_tods(hdr->frame_control) ||
466 !ieee80211_has_fromds(hdr->frame_control))
467 return RX_DROP_MONITOR;
468 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
469 return RX_DROP_MONITOR;
470 } else {
471 if (!ieee80211_has_a4(hdr->frame_control))
472 return RX_DROP_MONITOR;
473 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
474 return RX_DROP_MONITOR;
475 }
476 }
477
478 /* If there is not an established peer link and this is not a peer link
479 * establisment frame, beacon or probe, drop the frame.
480 */
481
482 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
483 struct ieee80211_mgmt *mgmt;
484
485 if (!ieee80211_is_mgmt(hdr->frame_control))
486 return RX_DROP_MONITOR;
487
488 if (ieee80211_is_action(hdr->frame_control)) {
489 mgmt = (struct ieee80211_mgmt *)hdr;
490 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
491 return RX_DROP_MONITOR;
492 return RX_CONTINUE;
493 }
494
495 if (ieee80211_is_probe_req(hdr->frame_control) ||
496 ieee80211_is_probe_resp(hdr->frame_control) ||
497 ieee80211_is_beacon(hdr->frame_control))
498 return RX_CONTINUE;
499
500 return RX_DROP_MONITOR;
501
502 }
503
504 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
505
506 if (ieee80211_is_data(hdr->frame_control) &&
507 is_multicast_ether_addr(hdr->addr1) &&
508 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
509 return RX_DROP_MONITOR;
510 #undef msh_h_get
511
512 return RX_CONTINUE;
513 }
514
515 #define SEQ_MODULO 0x1000
516 #define SEQ_MASK 0xfff
517
518 static inline int seq_less(u16 sq1, u16 sq2)
519 {
520 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
521 }
522
523 static inline u16 seq_inc(u16 sq)
524 {
525 return (sq + 1) & SEQ_MASK;
526 }
527
528 static inline u16 seq_sub(u16 sq1, u16 sq2)
529 {
530 return (sq1 - sq2) & SEQ_MASK;
531 }
532
533
534 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
535 struct tid_ampdu_rx *tid_agg_rx,
536 int index,
537 struct sk_buff_head *frames)
538 {
539 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
540
541 lockdep_assert_held(&tid_agg_rx->reorder_lock);
542
543 if (!skb)
544 goto no_frame;
545
546 /* release the frame from the reorder ring buffer */
547 tid_agg_rx->stored_mpdu_num--;
548 tid_agg_rx->reorder_buf[index] = NULL;
549 __skb_queue_tail(frames, skb);
550
551 no_frame:
552 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
553 }
554
555 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
556 struct tid_ampdu_rx *tid_agg_rx,
557 u16 head_seq_num,
558 struct sk_buff_head *frames)
559 {
560 int index;
561
562 lockdep_assert_held(&tid_agg_rx->reorder_lock);
563
564 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
565 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
566 tid_agg_rx->buf_size;
567 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
568 }
569 }
570
571 /*
572 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
573 * the skb was added to the buffer longer than this time ago, the earlier
574 * frames that have not yet been received are assumed to be lost and the skb
575 * can be released for processing. This may also release other skb's from the
576 * reorder buffer if there are no additional gaps between the frames.
577 *
578 * Callers must hold tid_agg_rx->reorder_lock.
579 */
580 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
581
582 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
583 struct tid_ampdu_rx *tid_agg_rx,
584 struct sk_buff_head *frames)
585 {
586 int index, j;
587
588 lockdep_assert_held(&tid_agg_rx->reorder_lock);
589
590 /* release the buffer until next missing frame */
591 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
592 tid_agg_rx->buf_size;
593 if (!tid_agg_rx->reorder_buf[index] &&
594 tid_agg_rx->stored_mpdu_num > 1) {
595 /*
596 * No buffers ready to be released, but check whether any
597 * frames in the reorder buffer have timed out.
598 */
599 int skipped = 1;
600 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
601 j = (j + 1) % tid_agg_rx->buf_size) {
602 if (!tid_agg_rx->reorder_buf[j]) {
603 skipped++;
604 continue;
605 }
606 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
607 HT_RX_REORDER_BUF_TIMEOUT))
608 goto set_release_timer;
609
610 #ifdef CONFIG_MAC80211_HT_DEBUG
611 if (net_ratelimit())
612 wiphy_debug(hw->wiphy,
613 "release an RX reorder frame due to timeout on earlier frames\n");
614 #endif
615 ieee80211_release_reorder_frame(hw, tid_agg_rx,
616 j, frames);
617
618 /*
619 * Increment the head seq# also for the skipped slots.
620 */
621 tid_agg_rx->head_seq_num =
622 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
623 skipped = 0;
624 }
625 } else while (tid_agg_rx->reorder_buf[index]) {
626 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
627 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
628 tid_agg_rx->buf_size;
629 }
630
631 /*
632 * Disable the reorder release timer for now.
633 *
634 * The current implementation lacks a proper locking scheme
635 * which would protect vital statistic and debug counters
636 * from being updated by two different but concurrent BHs.
637 *
638 * More information about the topic is available from:
639 * - thread: http://marc.info/?t=128635927000001
640 *
641 * What was wrong:
642 * => http://marc.info/?l=linux-wireless&m=128636170811964
643 * "Basically the thing is that until your patch, the data
644 * in the struct didn't actually need locking because it
645 * was accessed by the RX path only which is not concurrent."
646 *
647 * List of what needs to be fixed:
648 * => http://marc.info/?l=linux-wireless&m=128656352920957
649 *
650
651 if (tid_agg_rx->stored_mpdu_num) {
652 j = index = seq_sub(tid_agg_rx->head_seq_num,
653 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
654
655 for (; j != (index - 1) % tid_agg_rx->buf_size;
656 j = (j + 1) % tid_agg_rx->buf_size) {
657 if (tid_agg_rx->reorder_buf[j])
658 break;
659 }
660
661 set_release_timer:
662
663 mod_timer(&tid_agg_rx->reorder_timer,
664 tid_agg_rx->reorder_time[j] +
665 HT_RX_REORDER_BUF_TIMEOUT);
666 } else {
667 del_timer(&tid_agg_rx->reorder_timer);
668 }
669 */
670
671 set_release_timer:
672 return;
673 }
674
675 /*
676 * As this function belongs to the RX path it must be under
677 * rcu_read_lock protection. It returns false if the frame
678 * can be processed immediately, true if it was consumed.
679 */
680 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
681 struct tid_ampdu_rx *tid_agg_rx,
682 struct sk_buff *skb,
683 struct sk_buff_head *frames)
684 {
685 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
686 u16 sc = le16_to_cpu(hdr->seq_ctrl);
687 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
688 u16 head_seq_num, buf_size;
689 int index;
690 bool ret = true;
691
692 spin_lock(&tid_agg_rx->reorder_lock);
693
694 buf_size = tid_agg_rx->buf_size;
695 head_seq_num = tid_agg_rx->head_seq_num;
696
697 /* frame with out of date sequence number */
698 if (seq_less(mpdu_seq_num, head_seq_num)) {
699 dev_kfree_skb(skb);
700 goto out;
701 }
702
703 /*
704 * If frame the sequence number exceeds our buffering window
705 * size release some previous frames to make room for this one.
706 */
707 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
708 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
709 /* release stored frames up to new head to stack */
710 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num,
711 frames);
712 }
713
714 /* Now the new frame is always in the range of the reordering buffer */
715
716 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
717
718 /* check if we already stored this frame */
719 if (tid_agg_rx->reorder_buf[index]) {
720 dev_kfree_skb(skb);
721 goto out;
722 }
723
724 /*
725 * If the current MPDU is in the right order and nothing else
726 * is stored we can process it directly, no need to buffer it.
727 */
728 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
729 tid_agg_rx->stored_mpdu_num == 0) {
730 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
731 ret = false;
732 goto out;
733 }
734
735 /* put the frame in the reordering buffer */
736 tid_agg_rx->reorder_buf[index] = skb;
737 tid_agg_rx->reorder_time[index] = jiffies;
738 tid_agg_rx->stored_mpdu_num++;
739 ieee80211_sta_reorder_release(hw, tid_agg_rx, frames);
740
741 out:
742 spin_unlock(&tid_agg_rx->reorder_lock);
743 return ret;
744 }
745
746 /*
747 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
748 * true if the MPDU was buffered, false if it should be processed.
749 */
750 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
751 struct sk_buff_head *frames)
752 {
753 struct sk_buff *skb = rx->skb;
754 struct ieee80211_local *local = rx->local;
755 struct ieee80211_hw *hw = &local->hw;
756 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
757 struct sta_info *sta = rx->sta;
758 struct tid_ampdu_rx *tid_agg_rx;
759 u16 sc;
760 int tid;
761
762 if (!ieee80211_is_data_qos(hdr->frame_control))
763 goto dont_reorder;
764
765 /*
766 * filter the QoS data rx stream according to
767 * STA/TID and check if this STA/TID is on aggregation
768 */
769
770 if (!sta)
771 goto dont_reorder;
772
773 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
774
775 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
776 if (!tid_agg_rx)
777 goto dont_reorder;
778
779 /* qos null data frames are excluded */
780 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
781 goto dont_reorder;
782
783 /* new, potentially un-ordered, ampdu frame - process it */
784
785 /* reset session timer */
786 if (tid_agg_rx->timeout)
787 mod_timer(&tid_agg_rx->session_timer,
788 TU_TO_EXP_TIME(tid_agg_rx->timeout));
789
790 /* if this mpdu is fragmented - terminate rx aggregation session */
791 sc = le16_to_cpu(hdr->seq_ctrl);
792 if (sc & IEEE80211_SCTL_FRAG) {
793 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
794 skb_queue_tail(&rx->sdata->skb_queue, skb);
795 ieee80211_queue_work(&local->hw, &rx->sdata->work);
796 return;
797 }
798
799 /*
800 * No locking needed -- we will only ever process one
801 * RX packet at a time, and thus own tid_agg_rx. All
802 * other code manipulating it needs to (and does) make
803 * sure that we cannot get to it any more before doing
804 * anything with it.
805 */
806 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
807 return;
808
809 dont_reorder:
810 __skb_queue_tail(frames, skb);
811 }
812
813 static ieee80211_rx_result debug_noinline
814 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
815 {
816 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
817 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
818
819 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
820 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
821 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
822 rx->sta->last_seq_ctrl[rx->queue] ==
823 hdr->seq_ctrl)) {
824 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
825 rx->local->dot11FrameDuplicateCount++;
826 rx->sta->num_duplicates++;
827 }
828 return RX_DROP_MONITOR;
829 } else
830 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
831 }
832
833 if (unlikely(rx->skb->len < 16)) {
834 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
835 return RX_DROP_MONITOR;
836 }
837
838 /* Drop disallowed frame classes based on STA auth/assoc state;
839 * IEEE 802.11, Chap 5.5.
840 *
841 * mac80211 filters only based on association state, i.e. it drops
842 * Class 3 frames from not associated stations. hostapd sends
843 * deauth/disassoc frames when needed. In addition, hostapd is
844 * responsible for filtering on both auth and assoc states.
845 */
846
847 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
848 return ieee80211_rx_mesh_check(rx);
849
850 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
851 ieee80211_is_pspoll(hdr->frame_control)) &&
852 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
853 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
854 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
855 if ((!ieee80211_has_fromds(hdr->frame_control) &&
856 !ieee80211_has_tods(hdr->frame_control) &&
857 ieee80211_is_data(hdr->frame_control)) ||
858 !(status->rx_flags & IEEE80211_RX_RA_MATCH)) {
859 /* Drop IBSS frames and frames for other hosts
860 * silently. */
861 return RX_DROP_MONITOR;
862 }
863
864 return RX_DROP_MONITOR;
865 }
866
867 return RX_CONTINUE;
868 }
869
870
871 static ieee80211_rx_result debug_noinline
872 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
873 {
874 struct sk_buff *skb = rx->skb;
875 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
876 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
877 int keyidx;
878 int hdrlen;
879 ieee80211_rx_result result = RX_DROP_UNUSABLE;
880 struct ieee80211_key *sta_ptk = NULL;
881 int mmie_keyidx = -1;
882 __le16 fc;
883
884 /*
885 * Key selection 101
886 *
887 * There are four types of keys:
888 * - GTK (group keys)
889 * - IGTK (group keys for management frames)
890 * - PTK (pairwise keys)
891 * - STK (station-to-station pairwise keys)
892 *
893 * When selecting a key, we have to distinguish between multicast
894 * (including broadcast) and unicast frames, the latter can only
895 * use PTKs and STKs while the former always use GTKs and IGTKs.
896 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
897 * unicast frames can also use key indices like GTKs. Hence, if we
898 * don't have a PTK/STK we check the key index for a WEP key.
899 *
900 * Note that in a regular BSS, multicast frames are sent by the
901 * AP only, associated stations unicast the frame to the AP first
902 * which then multicasts it on their behalf.
903 *
904 * There is also a slight problem in IBSS mode: GTKs are negotiated
905 * with each station, that is something we don't currently handle.
906 * The spec seems to expect that one negotiates the same key with
907 * every station but there's no such requirement; VLANs could be
908 * possible.
909 */
910
911 /*
912 * No point in finding a key and decrypting if the frame is neither
913 * addressed to us nor a multicast frame.
914 */
915 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
916 return RX_CONTINUE;
917
918 /* start without a key */
919 rx->key = NULL;
920
921 if (rx->sta)
922 sta_ptk = rcu_dereference(rx->sta->ptk);
923
924 fc = hdr->frame_control;
925
926 if (!ieee80211_has_protected(fc))
927 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
928
929 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
930 rx->key = sta_ptk;
931 if ((status->flag & RX_FLAG_DECRYPTED) &&
932 (status->flag & RX_FLAG_IV_STRIPPED))
933 return RX_CONTINUE;
934 /* Skip decryption if the frame is not protected. */
935 if (!ieee80211_has_protected(fc))
936 return RX_CONTINUE;
937 } else if (mmie_keyidx >= 0) {
938 /* Broadcast/multicast robust management frame / BIP */
939 if ((status->flag & RX_FLAG_DECRYPTED) &&
940 (status->flag & RX_FLAG_IV_STRIPPED))
941 return RX_CONTINUE;
942
943 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
944 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
945 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
946 if (rx->sta)
947 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
948 if (!rx->key)
949 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
950 } else if (!ieee80211_has_protected(fc)) {
951 /*
952 * The frame was not protected, so skip decryption. However, we
953 * need to set rx->key if there is a key that could have been
954 * used so that the frame may be dropped if encryption would
955 * have been expected.
956 */
957 struct ieee80211_key *key = NULL;
958 if (ieee80211_is_mgmt(fc) &&
959 is_multicast_ether_addr(hdr->addr1) &&
960 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
961 rx->key = key;
962 else if ((key = rcu_dereference(rx->sdata->default_key)))
963 rx->key = key;
964 return RX_CONTINUE;
965 } else {
966 u8 keyid;
967 /*
968 * The device doesn't give us the IV so we won't be
969 * able to look up the key. That's ok though, we
970 * don't need to decrypt the frame, we just won't
971 * be able to keep statistics accurate.
972 * Except for key threshold notifications, should
973 * we somehow allow the driver to tell us which key
974 * the hardware used if this flag is set?
975 */
976 if ((status->flag & RX_FLAG_DECRYPTED) &&
977 (status->flag & RX_FLAG_IV_STRIPPED))
978 return RX_CONTINUE;
979
980 hdrlen = ieee80211_hdrlen(fc);
981
982 if (rx->skb->len < 8 + hdrlen)
983 return RX_DROP_UNUSABLE; /* TODO: count this? */
984
985 /*
986 * no need to call ieee80211_wep_get_keyidx,
987 * it verifies a bunch of things we've done already
988 */
989 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
990 keyidx = keyid >> 6;
991
992 /* check per-station GTK first, if multicast packet */
993 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
994 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
995
996 /* if not found, try default key */
997 if (!rx->key) {
998 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
999
1000 /*
1001 * RSNA-protected unicast frames should always be
1002 * sent with pairwise or station-to-station keys,
1003 * but for WEP we allow using a key index as well.
1004 */
1005 if (rx->key &&
1006 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1007 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1008 !is_multicast_ether_addr(hdr->addr1))
1009 rx->key = NULL;
1010 }
1011 }
1012
1013 if (rx->key) {
1014 rx->key->tx_rx_count++;
1015 /* TODO: add threshold stuff again */
1016 } else {
1017 return RX_DROP_MONITOR;
1018 }
1019
1020 if (skb_linearize(rx->skb))
1021 return RX_DROP_UNUSABLE;
1022 /* the hdr variable is invalid now! */
1023
1024 switch (rx->key->conf.cipher) {
1025 case WLAN_CIPHER_SUITE_WEP40:
1026 case WLAN_CIPHER_SUITE_WEP104:
1027 /* Check for weak IVs if possible */
1028 if (rx->sta && ieee80211_is_data(fc) &&
1029 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1030 !(status->flag & RX_FLAG_DECRYPTED)) &&
1031 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1032 rx->sta->wep_weak_iv_count++;
1033
1034 result = ieee80211_crypto_wep_decrypt(rx);
1035 break;
1036 case WLAN_CIPHER_SUITE_TKIP:
1037 result = ieee80211_crypto_tkip_decrypt(rx);
1038 break;
1039 case WLAN_CIPHER_SUITE_CCMP:
1040 result = ieee80211_crypto_ccmp_decrypt(rx);
1041 break;
1042 case WLAN_CIPHER_SUITE_AES_CMAC:
1043 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1044 break;
1045 default:
1046 /*
1047 * We can reach here only with HW-only algorithms
1048 * but why didn't it decrypt the frame?!
1049 */
1050 return RX_DROP_UNUSABLE;
1051 }
1052
1053 /* either the frame has been decrypted or will be dropped */
1054 status->flag |= RX_FLAG_DECRYPTED;
1055
1056 return result;
1057 }
1058
1059 static ieee80211_rx_result debug_noinline
1060 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1061 {
1062 struct ieee80211_local *local;
1063 struct ieee80211_hdr *hdr;
1064 struct sk_buff *skb;
1065
1066 local = rx->local;
1067 skb = rx->skb;
1068 hdr = (struct ieee80211_hdr *) skb->data;
1069
1070 if (!local->pspolling)
1071 return RX_CONTINUE;
1072
1073 if (!ieee80211_has_fromds(hdr->frame_control))
1074 /* this is not from AP */
1075 return RX_CONTINUE;
1076
1077 if (!ieee80211_is_data(hdr->frame_control))
1078 return RX_CONTINUE;
1079
1080 if (!ieee80211_has_moredata(hdr->frame_control)) {
1081 /* AP has no more frames buffered for us */
1082 local->pspolling = false;
1083 return RX_CONTINUE;
1084 }
1085
1086 /* more data bit is set, let's request a new frame from the AP */
1087 ieee80211_send_pspoll(local, rx->sdata);
1088
1089 return RX_CONTINUE;
1090 }
1091
1092 static void ap_sta_ps_start(struct sta_info *sta)
1093 {
1094 struct ieee80211_sub_if_data *sdata = sta->sdata;
1095 struct ieee80211_local *local = sdata->local;
1096
1097 atomic_inc(&sdata->bss->num_sta_ps);
1098 set_sta_flags(sta, WLAN_STA_PS_STA);
1099 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1100 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1101 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1102 sdata->name, sta->sta.addr, sta->sta.aid);
1103 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1104 }
1105
1106 static void ap_sta_ps_end(struct sta_info *sta)
1107 {
1108 struct ieee80211_sub_if_data *sdata = sta->sdata;
1109
1110 atomic_dec(&sdata->bss->num_sta_ps);
1111
1112 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1113 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1114 sdata->name, sta->sta.addr, sta->sta.aid);
1115 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1116
1117 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1118 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1119 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1120 sdata->name, sta->sta.addr, sta->sta.aid);
1121 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1122 return;
1123 }
1124
1125 ieee80211_sta_ps_deliver_wakeup(sta);
1126 }
1127
1128 static ieee80211_rx_result debug_noinline
1129 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1130 {
1131 struct sta_info *sta = rx->sta;
1132 struct sk_buff *skb = rx->skb;
1133 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1134 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1135
1136 if (!sta)
1137 return RX_CONTINUE;
1138
1139 /*
1140 * Update last_rx only for IBSS packets which are for the current
1141 * BSSID to avoid keeping the current IBSS network alive in cases
1142 * where other STAs start using different BSSID.
1143 */
1144 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1145 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1146 NL80211_IFTYPE_ADHOC);
1147 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
1148 sta->last_rx = jiffies;
1149 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1150 /*
1151 * Mesh beacons will update last_rx when if they are found to
1152 * match the current local configuration when processed.
1153 */
1154 sta->last_rx = jiffies;
1155 }
1156
1157 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1158 return RX_CONTINUE;
1159
1160 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1161 ieee80211_sta_rx_notify(rx->sdata, hdr);
1162
1163 sta->rx_fragments++;
1164 sta->rx_bytes += rx->skb->len;
1165 sta->last_signal = status->signal;
1166
1167 /*
1168 * Change STA power saving mode only at the end of a frame
1169 * exchange sequence.
1170 */
1171 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1172 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1173 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1174 if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
1175 /*
1176 * Ignore doze->wake transitions that are
1177 * indicated by non-data frames, the standard
1178 * is unclear here, but for example going to
1179 * PS mode and then scanning would cause a
1180 * doze->wake transition for the probe request,
1181 * and that is clearly undesirable.
1182 */
1183 if (ieee80211_is_data(hdr->frame_control) &&
1184 !ieee80211_has_pm(hdr->frame_control))
1185 ap_sta_ps_end(sta);
1186 } else {
1187 if (ieee80211_has_pm(hdr->frame_control))
1188 ap_sta_ps_start(sta);
1189 }
1190 }
1191
1192 /*
1193 * Drop (qos-)data::nullfunc frames silently, since they
1194 * are used only to control station power saving mode.
1195 */
1196 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1197 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1198 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1199
1200 /*
1201 * If we receive a 4-addr nullfunc frame from a STA
1202 * that was not moved to a 4-addr STA vlan yet, drop
1203 * the frame to the monitor interface, to make sure
1204 * that hostapd sees it
1205 */
1206 if (ieee80211_has_a4(hdr->frame_control) &&
1207 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1208 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1209 !rx->sdata->u.vlan.sta)))
1210 return RX_DROP_MONITOR;
1211 /*
1212 * Update counter and free packet here to avoid
1213 * counting this as a dropped packed.
1214 */
1215 sta->rx_packets++;
1216 dev_kfree_skb(rx->skb);
1217 return RX_QUEUED;
1218 }
1219
1220 return RX_CONTINUE;
1221 } /* ieee80211_rx_h_sta_process */
1222
1223 static inline struct ieee80211_fragment_entry *
1224 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1225 unsigned int frag, unsigned int seq, int rx_queue,
1226 struct sk_buff **skb)
1227 {
1228 struct ieee80211_fragment_entry *entry;
1229 int idx;
1230
1231 idx = sdata->fragment_next;
1232 entry = &sdata->fragments[sdata->fragment_next++];
1233 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1234 sdata->fragment_next = 0;
1235
1236 if (!skb_queue_empty(&entry->skb_list)) {
1237 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1238 struct ieee80211_hdr *hdr =
1239 (struct ieee80211_hdr *) entry->skb_list.next->data;
1240 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1241 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1242 "addr1=%pM addr2=%pM\n",
1243 sdata->name, idx,
1244 jiffies - entry->first_frag_time, entry->seq,
1245 entry->last_frag, hdr->addr1, hdr->addr2);
1246 #endif
1247 __skb_queue_purge(&entry->skb_list);
1248 }
1249
1250 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1251 *skb = NULL;
1252 entry->first_frag_time = jiffies;
1253 entry->seq = seq;
1254 entry->rx_queue = rx_queue;
1255 entry->last_frag = frag;
1256 entry->ccmp = 0;
1257 entry->extra_len = 0;
1258
1259 return entry;
1260 }
1261
1262 static inline struct ieee80211_fragment_entry *
1263 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1264 unsigned int frag, unsigned int seq,
1265 int rx_queue, struct ieee80211_hdr *hdr)
1266 {
1267 struct ieee80211_fragment_entry *entry;
1268 int i, idx;
1269
1270 idx = sdata->fragment_next;
1271 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1272 struct ieee80211_hdr *f_hdr;
1273
1274 idx--;
1275 if (idx < 0)
1276 idx = IEEE80211_FRAGMENT_MAX - 1;
1277
1278 entry = &sdata->fragments[idx];
1279 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1280 entry->rx_queue != rx_queue ||
1281 entry->last_frag + 1 != frag)
1282 continue;
1283
1284 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1285
1286 /*
1287 * Check ftype and addresses are equal, else check next fragment
1288 */
1289 if (((hdr->frame_control ^ f_hdr->frame_control) &
1290 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1291 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1292 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1293 continue;
1294
1295 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1296 __skb_queue_purge(&entry->skb_list);
1297 continue;
1298 }
1299 return entry;
1300 }
1301
1302 return NULL;
1303 }
1304
1305 static ieee80211_rx_result debug_noinline
1306 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1307 {
1308 struct ieee80211_hdr *hdr;
1309 u16 sc;
1310 __le16 fc;
1311 unsigned int frag, seq;
1312 struct ieee80211_fragment_entry *entry;
1313 struct sk_buff *skb;
1314 struct ieee80211_rx_status *status;
1315
1316 hdr = (struct ieee80211_hdr *)rx->skb->data;
1317 fc = hdr->frame_control;
1318 sc = le16_to_cpu(hdr->seq_ctrl);
1319 frag = sc & IEEE80211_SCTL_FRAG;
1320
1321 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1322 (rx->skb)->len < 24 ||
1323 is_multicast_ether_addr(hdr->addr1))) {
1324 /* not fragmented */
1325 goto out;
1326 }
1327 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1328
1329 if (skb_linearize(rx->skb))
1330 return RX_DROP_UNUSABLE;
1331
1332 /*
1333 * skb_linearize() might change the skb->data and
1334 * previously cached variables (in this case, hdr) need to
1335 * be refreshed with the new data.
1336 */
1337 hdr = (struct ieee80211_hdr *)rx->skb->data;
1338 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1339
1340 if (frag == 0) {
1341 /* This is the first fragment of a new frame. */
1342 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1343 rx->queue, &(rx->skb));
1344 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1345 ieee80211_has_protected(fc)) {
1346 int queue = ieee80211_is_mgmt(fc) ?
1347 NUM_RX_DATA_QUEUES : rx->queue;
1348 /* Store CCMP PN so that we can verify that the next
1349 * fragment has a sequential PN value. */
1350 entry->ccmp = 1;
1351 memcpy(entry->last_pn,
1352 rx->key->u.ccmp.rx_pn[queue],
1353 CCMP_PN_LEN);
1354 }
1355 return RX_QUEUED;
1356 }
1357
1358 /* This is a fragment for a frame that should already be pending in
1359 * fragment cache. Add this fragment to the end of the pending entry.
1360 */
1361 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1362 if (!entry) {
1363 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1364 return RX_DROP_MONITOR;
1365 }
1366
1367 /* Verify that MPDUs within one MSDU have sequential PN values.
1368 * (IEEE 802.11i, 8.3.3.4.5) */
1369 if (entry->ccmp) {
1370 int i;
1371 u8 pn[CCMP_PN_LEN], *rpn;
1372 int queue;
1373 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1374 return RX_DROP_UNUSABLE;
1375 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1376 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1377 pn[i]++;
1378 if (pn[i])
1379 break;
1380 }
1381 queue = ieee80211_is_mgmt(fc) ?
1382 NUM_RX_DATA_QUEUES : rx->queue;
1383 rpn = rx->key->u.ccmp.rx_pn[queue];
1384 if (memcmp(pn, rpn, CCMP_PN_LEN))
1385 return RX_DROP_UNUSABLE;
1386 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1387 }
1388
1389 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1390 __skb_queue_tail(&entry->skb_list, rx->skb);
1391 entry->last_frag = frag;
1392 entry->extra_len += rx->skb->len;
1393 if (ieee80211_has_morefrags(fc)) {
1394 rx->skb = NULL;
1395 return RX_QUEUED;
1396 }
1397
1398 rx->skb = __skb_dequeue(&entry->skb_list);
1399 if (skb_tailroom(rx->skb) < entry->extra_len) {
1400 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1401 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1402 GFP_ATOMIC))) {
1403 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1404 __skb_queue_purge(&entry->skb_list);
1405 return RX_DROP_UNUSABLE;
1406 }
1407 }
1408 while ((skb = __skb_dequeue(&entry->skb_list))) {
1409 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1410 dev_kfree_skb(skb);
1411 }
1412
1413 /* Complete frame has been reassembled - process it now */
1414 status = IEEE80211_SKB_RXCB(rx->skb);
1415 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1416
1417 out:
1418 if (rx->sta)
1419 rx->sta->rx_packets++;
1420 if (is_multicast_ether_addr(hdr->addr1))
1421 rx->local->dot11MulticastReceivedFrameCount++;
1422 else
1423 ieee80211_led_rx(rx->local);
1424 return RX_CONTINUE;
1425 }
1426
1427 static ieee80211_rx_result debug_noinline
1428 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1429 {
1430 struct ieee80211_sub_if_data *sdata = rx->sdata;
1431 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1432 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1433
1434 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1435 !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
1436 return RX_CONTINUE;
1437
1438 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1439 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1440 return RX_DROP_UNUSABLE;
1441
1442 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1443 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1444 else
1445 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1446
1447 /* Free PS Poll skb here instead of returning RX_DROP that would
1448 * count as an dropped frame. */
1449 dev_kfree_skb(rx->skb);
1450
1451 return RX_QUEUED;
1452 }
1453
1454 static ieee80211_rx_result debug_noinline
1455 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1456 {
1457 u8 *data = rx->skb->data;
1458 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1459
1460 if (!ieee80211_is_data_qos(hdr->frame_control))
1461 return RX_CONTINUE;
1462
1463 /* remove the qos control field, update frame type and meta-data */
1464 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1465 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1466 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1467 /* change frame type to non QOS */
1468 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1469
1470 return RX_CONTINUE;
1471 }
1472
1473 static int
1474 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1475 {
1476 if (unlikely(!rx->sta ||
1477 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1478 return -EACCES;
1479
1480 return 0;
1481 }
1482
1483 static int
1484 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1485 {
1486 struct sk_buff *skb = rx->skb;
1487 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1488
1489 /*
1490 * Pass through unencrypted frames if the hardware has
1491 * decrypted them already.
1492 */
1493 if (status->flag & RX_FLAG_DECRYPTED)
1494 return 0;
1495
1496 /* Drop unencrypted frames if key is set. */
1497 if (unlikely(!ieee80211_has_protected(fc) &&
1498 !ieee80211_is_nullfunc(fc) &&
1499 ieee80211_is_data(fc) &&
1500 (rx->key || rx->sdata->drop_unencrypted)))
1501 return -EACCES;
1502
1503 return 0;
1504 }
1505
1506 static int
1507 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1508 {
1509 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1510 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1511 __le16 fc = hdr->frame_control;
1512
1513 /*
1514 * Pass through unencrypted frames if the hardware has
1515 * decrypted them already.
1516 */
1517 if (status->flag & RX_FLAG_DECRYPTED)
1518 return 0;
1519
1520 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1521 if (unlikely(!ieee80211_has_protected(fc) &&
1522 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1523 rx->key))
1524 return -EACCES;
1525 /* BIP does not use Protected field, so need to check MMIE */
1526 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1527 ieee80211_get_mmie_keyidx(rx->skb) < 0))
1528 return -EACCES;
1529 /*
1530 * When using MFP, Action frames are not allowed prior to
1531 * having configured keys.
1532 */
1533 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1534 ieee80211_is_robust_mgmt_frame(
1535 (struct ieee80211_hdr *) rx->skb->data)))
1536 return -EACCES;
1537 }
1538
1539 return 0;
1540 }
1541
1542 static int
1543 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1544 {
1545 struct ieee80211_sub_if_data *sdata = rx->sdata;
1546 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1547
1548 if (ieee80211_has_a4(hdr->frame_control) &&
1549 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1550 return -1;
1551
1552 if (is_multicast_ether_addr(hdr->addr1) &&
1553 ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
1554 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1555 return -1;
1556
1557 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1558 }
1559
1560 /*
1561 * requires that rx->skb is a frame with ethernet header
1562 */
1563 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1564 {
1565 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1566 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1567 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1568
1569 /*
1570 * Allow EAPOL frames to us/the PAE group address regardless
1571 * of whether the frame was encrypted or not.
1572 */
1573 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1574 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1575 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1576 return true;
1577
1578 if (ieee80211_802_1x_port_control(rx) ||
1579 ieee80211_drop_unencrypted(rx, fc))
1580 return false;
1581
1582 return true;
1583 }
1584
1585 /*
1586 * requires that rx->skb is a frame with ethernet header
1587 */
1588 static void
1589 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1590 {
1591 struct ieee80211_sub_if_data *sdata = rx->sdata;
1592 struct net_device *dev = sdata->dev;
1593 struct sk_buff *skb, *xmit_skb;
1594 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1595 struct sta_info *dsta;
1596 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1597
1598 skb = rx->skb;
1599 xmit_skb = NULL;
1600
1601 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1602 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1603 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1604 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1605 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1606 if (is_multicast_ether_addr(ehdr->h_dest)) {
1607 /*
1608 * send multicast frames both to higher layers in
1609 * local net stack and back to the wireless medium
1610 */
1611 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1612 if (!xmit_skb && net_ratelimit())
1613 printk(KERN_DEBUG "%s: failed to clone "
1614 "multicast frame\n", dev->name);
1615 } else {
1616 dsta = sta_info_get(sdata, skb->data);
1617 if (dsta) {
1618 /*
1619 * The destination station is associated to
1620 * this AP (in this VLAN), so send the frame
1621 * directly to it and do not pass it to local
1622 * net stack.
1623 */
1624 xmit_skb = skb;
1625 skb = NULL;
1626 }
1627 }
1628 }
1629
1630 if (skb) {
1631 int align __maybe_unused;
1632
1633 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1634 /*
1635 * 'align' will only take the values 0 or 2 here
1636 * since all frames are required to be aligned
1637 * to 2-byte boundaries when being passed to
1638 * mac80211. That also explains the __skb_push()
1639 * below.
1640 */
1641 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1642 if (align) {
1643 if (WARN_ON(skb_headroom(skb) < 3)) {
1644 dev_kfree_skb(skb);
1645 skb = NULL;
1646 } else {
1647 u8 *data = skb->data;
1648 size_t len = skb_headlen(skb);
1649 skb->data -= align;
1650 memmove(skb->data, data, len);
1651 skb_set_tail_pointer(skb, len);
1652 }
1653 }
1654 #endif
1655
1656 if (skb) {
1657 /* deliver to local stack */
1658 skb->protocol = eth_type_trans(skb, dev);
1659 memset(skb->cb, 0, sizeof(skb->cb));
1660 netif_receive_skb(skb);
1661 }
1662 }
1663
1664 if (xmit_skb) {
1665 /* send to wireless media */
1666 xmit_skb->protocol = htons(ETH_P_802_3);
1667 skb_reset_network_header(xmit_skb);
1668 skb_reset_mac_header(xmit_skb);
1669 dev_queue_xmit(xmit_skb);
1670 }
1671 }
1672
1673 static ieee80211_rx_result debug_noinline
1674 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1675 {
1676 struct net_device *dev = rx->sdata->dev;
1677 struct sk_buff *skb = rx->skb;
1678 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1679 __le16 fc = hdr->frame_control;
1680 struct sk_buff_head frame_list;
1681 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1682
1683 if (unlikely(!ieee80211_is_data(fc)))
1684 return RX_CONTINUE;
1685
1686 if (unlikely(!ieee80211_is_data_present(fc)))
1687 return RX_DROP_MONITOR;
1688
1689 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1690 return RX_CONTINUE;
1691
1692 if (ieee80211_has_a4(hdr->frame_control) &&
1693 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1694 !rx->sdata->u.vlan.sta)
1695 return RX_DROP_UNUSABLE;
1696
1697 if (is_multicast_ether_addr(hdr->addr1) &&
1698 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1699 rx->sdata->u.vlan.sta) ||
1700 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1701 rx->sdata->u.mgd.use_4addr)))
1702 return RX_DROP_UNUSABLE;
1703
1704 skb->dev = dev;
1705 __skb_queue_head_init(&frame_list);
1706
1707 if (skb_linearize(skb))
1708 return RX_DROP_UNUSABLE;
1709
1710 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1711 rx->sdata->vif.type,
1712 rx->local->hw.extra_tx_headroom);
1713
1714 while (!skb_queue_empty(&frame_list)) {
1715 rx->skb = __skb_dequeue(&frame_list);
1716
1717 if (!ieee80211_frame_allowed(rx, fc)) {
1718 dev_kfree_skb(rx->skb);
1719 continue;
1720 }
1721 dev->stats.rx_packets++;
1722 dev->stats.rx_bytes += rx->skb->len;
1723
1724 ieee80211_deliver_skb(rx);
1725 }
1726
1727 return RX_QUEUED;
1728 }
1729
1730 #ifdef CONFIG_MAC80211_MESH
1731 static ieee80211_rx_result
1732 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1733 {
1734 struct ieee80211_hdr *hdr;
1735 struct ieee80211s_hdr *mesh_hdr;
1736 unsigned int hdrlen;
1737 struct sk_buff *skb = rx->skb, *fwd_skb;
1738 struct ieee80211_local *local = rx->local;
1739 struct ieee80211_sub_if_data *sdata = rx->sdata;
1740 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1741
1742 hdr = (struct ieee80211_hdr *) skb->data;
1743 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1744 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1745
1746 if (!ieee80211_is_data(hdr->frame_control))
1747 return RX_CONTINUE;
1748
1749 if (!mesh_hdr->ttl)
1750 /* illegal frame */
1751 return RX_DROP_MONITOR;
1752
1753 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1754 struct mesh_path *mppath;
1755 char *proxied_addr;
1756 char *mpp_addr;
1757
1758 if (is_multicast_ether_addr(hdr->addr1)) {
1759 mpp_addr = hdr->addr3;
1760 proxied_addr = mesh_hdr->eaddr1;
1761 } else {
1762 mpp_addr = hdr->addr4;
1763 proxied_addr = mesh_hdr->eaddr2;
1764 }
1765
1766 rcu_read_lock();
1767 mppath = mpp_path_lookup(proxied_addr, sdata);
1768 if (!mppath) {
1769 mpp_path_add(proxied_addr, mpp_addr, sdata);
1770 } else {
1771 spin_lock_bh(&mppath->state_lock);
1772 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1773 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1774 spin_unlock_bh(&mppath->state_lock);
1775 }
1776 rcu_read_unlock();
1777 }
1778
1779 /* Frame has reached destination. Don't forward */
1780 if (!is_multicast_ether_addr(hdr->addr1) &&
1781 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1782 return RX_CONTINUE;
1783
1784 mesh_hdr->ttl--;
1785
1786 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
1787 if (!mesh_hdr->ttl)
1788 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1789 dropped_frames_ttl);
1790 else {
1791 struct ieee80211_hdr *fwd_hdr;
1792 struct ieee80211_tx_info *info;
1793
1794 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1795
1796 if (!fwd_skb && net_ratelimit())
1797 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1798 sdata->name);
1799
1800 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1801 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1802 info = IEEE80211_SKB_CB(fwd_skb);
1803 memset(info, 0, sizeof(*info));
1804 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1805 info->control.vif = &rx->sdata->vif;
1806 skb_set_queue_mapping(skb,
1807 ieee80211_select_queue(rx->sdata, fwd_skb));
1808 ieee80211_set_qos_hdr(local, skb);
1809 if (is_multicast_ether_addr(fwd_hdr->addr1))
1810 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1811 fwded_mcast);
1812 else {
1813 int err;
1814 /*
1815 * Save TA to addr1 to send TA a path error if a
1816 * suitable next hop is not found
1817 */
1818 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1819 ETH_ALEN);
1820 err = mesh_nexthop_lookup(fwd_skb, sdata);
1821 /* Failed to immediately resolve next hop:
1822 * fwded frame was dropped or will be added
1823 * later to the pending skb queue. */
1824 if (err)
1825 return RX_DROP_MONITOR;
1826
1827 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1828 fwded_unicast);
1829 }
1830 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1831 fwded_frames);
1832 ieee80211_add_pending_skb(local, fwd_skb);
1833 }
1834 }
1835
1836 if (is_multicast_ether_addr(hdr->addr1) ||
1837 sdata->dev->flags & IFF_PROMISC)
1838 return RX_CONTINUE;
1839 else
1840 return RX_DROP_MONITOR;
1841 }
1842 #endif
1843
1844 static ieee80211_rx_result debug_noinline
1845 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1846 {
1847 struct ieee80211_sub_if_data *sdata = rx->sdata;
1848 struct ieee80211_local *local = rx->local;
1849 struct net_device *dev = sdata->dev;
1850 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1851 __le16 fc = hdr->frame_control;
1852 int err;
1853
1854 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1855 return RX_CONTINUE;
1856
1857 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1858 return RX_DROP_MONITOR;
1859
1860 /*
1861 * Allow the cooked monitor interface of an AP to see 4-addr frames so
1862 * that a 4-addr station can be detected and moved into a separate VLAN
1863 */
1864 if (ieee80211_has_a4(hdr->frame_control) &&
1865 sdata->vif.type == NL80211_IFTYPE_AP)
1866 return RX_DROP_MONITOR;
1867
1868 err = __ieee80211_data_to_8023(rx);
1869 if (unlikely(err))
1870 return RX_DROP_UNUSABLE;
1871
1872 if (!ieee80211_frame_allowed(rx, fc))
1873 return RX_DROP_MONITOR;
1874
1875 rx->skb->dev = dev;
1876
1877 dev->stats.rx_packets++;
1878 dev->stats.rx_bytes += rx->skb->len;
1879
1880 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
1881 !is_multicast_ether_addr(((struct ethhdr *)rx->skb->data)->h_dest)) {
1882 mod_timer(&local->dynamic_ps_timer, jiffies +
1883 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1884 }
1885
1886 ieee80211_deliver_skb(rx);
1887
1888 return RX_QUEUED;
1889 }
1890
1891 static ieee80211_rx_result debug_noinline
1892 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1893 {
1894 struct ieee80211_local *local = rx->local;
1895 struct ieee80211_hw *hw = &local->hw;
1896 struct sk_buff *skb = rx->skb;
1897 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1898 struct tid_ampdu_rx *tid_agg_rx;
1899 u16 start_seq_num;
1900 u16 tid;
1901
1902 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1903 return RX_CONTINUE;
1904
1905 if (ieee80211_is_back_req(bar->frame_control)) {
1906 struct {
1907 __le16 control, start_seq_num;
1908 } __packed bar_data;
1909
1910 if (!rx->sta)
1911 return RX_DROP_MONITOR;
1912
1913 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
1914 &bar_data, sizeof(bar_data)))
1915 return RX_DROP_MONITOR;
1916
1917 tid = le16_to_cpu(bar_data.control) >> 12;
1918
1919 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
1920 if (!tid_agg_rx)
1921 return RX_DROP_MONITOR;
1922
1923 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1924
1925 /* reset session timer */
1926 if (tid_agg_rx->timeout)
1927 mod_timer(&tid_agg_rx->session_timer,
1928 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1929
1930 spin_lock(&tid_agg_rx->reorder_lock);
1931 /* release stored frames up to start of BAR */
1932 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1933 frames);
1934 spin_unlock(&tid_agg_rx->reorder_lock);
1935
1936 kfree_skb(skb);
1937 return RX_QUEUED;
1938 }
1939
1940 /*
1941 * After this point, we only want management frames,
1942 * so we can drop all remaining control frames to
1943 * cooked monitor interfaces.
1944 */
1945 return RX_DROP_MONITOR;
1946 }
1947
1948 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1949 struct ieee80211_mgmt *mgmt,
1950 size_t len)
1951 {
1952 struct ieee80211_local *local = sdata->local;
1953 struct sk_buff *skb;
1954 struct ieee80211_mgmt *resp;
1955
1956 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
1957 /* Not to own unicast address */
1958 return;
1959 }
1960
1961 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1962 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1963 /* Not from the current AP or not associated yet. */
1964 return;
1965 }
1966
1967 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1968 /* Too short SA Query request frame */
1969 return;
1970 }
1971
1972 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1973 if (skb == NULL)
1974 return;
1975
1976 skb_reserve(skb, local->hw.extra_tx_headroom);
1977 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1978 memset(resp, 0, 24);
1979 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1980 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
1981 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1982 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1983 IEEE80211_STYPE_ACTION);
1984 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1985 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1986 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1987 memcpy(resp->u.action.u.sa_query.trans_id,
1988 mgmt->u.action.u.sa_query.trans_id,
1989 WLAN_SA_QUERY_TR_ID_LEN);
1990
1991 ieee80211_tx_skb(sdata, skb);
1992 }
1993
1994 static ieee80211_rx_result debug_noinline
1995 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
1996 {
1997 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1998 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1999
2000 /*
2001 * From here on, look only at management frames.
2002 * Data and control frames are already handled,
2003 * and unknown (reserved) frames are useless.
2004 */
2005 if (rx->skb->len < 24)
2006 return RX_DROP_MONITOR;
2007
2008 if (!ieee80211_is_mgmt(mgmt->frame_control))
2009 return RX_DROP_MONITOR;
2010
2011 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2012 return RX_DROP_MONITOR;
2013
2014 if (ieee80211_drop_unencrypted_mgmt(rx))
2015 return RX_DROP_UNUSABLE;
2016
2017 return RX_CONTINUE;
2018 }
2019
2020 static ieee80211_rx_result debug_noinline
2021 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2022 {
2023 struct ieee80211_local *local = rx->local;
2024 struct ieee80211_sub_if_data *sdata = rx->sdata;
2025 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2026 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2027 int len = rx->skb->len;
2028
2029 if (!ieee80211_is_action(mgmt->frame_control))
2030 return RX_CONTINUE;
2031
2032 /* drop too small frames */
2033 if (len < IEEE80211_MIN_ACTION_SIZE)
2034 return RX_DROP_UNUSABLE;
2035
2036 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2037 return RX_DROP_UNUSABLE;
2038
2039 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2040 return RX_DROP_UNUSABLE;
2041
2042 switch (mgmt->u.action.category) {
2043 case WLAN_CATEGORY_BACK:
2044 /*
2045 * The aggregation code is not prepared to handle
2046 * anything but STA/AP due to the BSSID handling;
2047 * IBSS could work in the code but isn't supported
2048 * by drivers or the standard.
2049 */
2050 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2051 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2052 sdata->vif.type != NL80211_IFTYPE_AP)
2053 break;
2054
2055 /* verify action_code is present */
2056 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2057 break;
2058
2059 switch (mgmt->u.action.u.addba_req.action_code) {
2060 case WLAN_ACTION_ADDBA_REQ:
2061 if (len < (IEEE80211_MIN_ACTION_SIZE +
2062 sizeof(mgmt->u.action.u.addba_req)))
2063 goto invalid;
2064 break;
2065 case WLAN_ACTION_ADDBA_RESP:
2066 if (len < (IEEE80211_MIN_ACTION_SIZE +
2067 sizeof(mgmt->u.action.u.addba_resp)))
2068 goto invalid;
2069 break;
2070 case WLAN_ACTION_DELBA:
2071 if (len < (IEEE80211_MIN_ACTION_SIZE +
2072 sizeof(mgmt->u.action.u.delba)))
2073 goto invalid;
2074 break;
2075 default:
2076 goto invalid;
2077 }
2078
2079 goto queue;
2080 case WLAN_CATEGORY_SPECTRUM_MGMT:
2081 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2082 break;
2083
2084 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2085 break;
2086
2087 /* verify action_code is present */
2088 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2089 break;
2090
2091 switch (mgmt->u.action.u.measurement.action_code) {
2092 case WLAN_ACTION_SPCT_MSR_REQ:
2093 if (len < (IEEE80211_MIN_ACTION_SIZE +
2094 sizeof(mgmt->u.action.u.measurement)))
2095 break;
2096 ieee80211_process_measurement_req(sdata, mgmt, len);
2097 goto handled;
2098 case WLAN_ACTION_SPCT_CHL_SWITCH:
2099 if (len < (IEEE80211_MIN_ACTION_SIZE +
2100 sizeof(mgmt->u.action.u.chan_switch)))
2101 break;
2102
2103 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2104 break;
2105
2106 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2107 break;
2108
2109 goto queue;
2110 }
2111 break;
2112 case WLAN_CATEGORY_SA_QUERY:
2113 if (len < (IEEE80211_MIN_ACTION_SIZE +
2114 sizeof(mgmt->u.action.u.sa_query)))
2115 break;
2116
2117 switch (mgmt->u.action.u.sa_query.action) {
2118 case WLAN_ACTION_SA_QUERY_REQUEST:
2119 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2120 break;
2121 ieee80211_process_sa_query_req(sdata, mgmt, len);
2122 goto handled;
2123 }
2124 break;
2125 case WLAN_CATEGORY_MESH_PLINK:
2126 case WLAN_CATEGORY_MESH_PATH_SEL:
2127 if (!ieee80211_vif_is_mesh(&sdata->vif))
2128 break;
2129 goto queue;
2130 }
2131
2132 return RX_CONTINUE;
2133
2134 invalid:
2135 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2136 /* will return in the next handlers */
2137 return RX_CONTINUE;
2138
2139 handled:
2140 if (rx->sta)
2141 rx->sta->rx_packets++;
2142 dev_kfree_skb(rx->skb);
2143 return RX_QUEUED;
2144
2145 queue:
2146 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2147 skb_queue_tail(&sdata->skb_queue, rx->skb);
2148 ieee80211_queue_work(&local->hw, &sdata->work);
2149 if (rx->sta)
2150 rx->sta->rx_packets++;
2151 return RX_QUEUED;
2152 }
2153
2154 static ieee80211_rx_result debug_noinline
2155 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2156 {
2157 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2158
2159 /* skip known-bad action frames and return them in the next handler */
2160 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2161 return RX_CONTINUE;
2162
2163 /*
2164 * Getting here means the kernel doesn't know how to handle
2165 * it, but maybe userspace does ... include returned frames
2166 * so userspace can register for those to know whether ones
2167 * it transmitted were processed or returned.
2168 */
2169
2170 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2171 rx->skb->data, rx->skb->len,
2172 GFP_ATOMIC)) {
2173 if (rx->sta)
2174 rx->sta->rx_packets++;
2175 dev_kfree_skb(rx->skb);
2176 return RX_QUEUED;
2177 }
2178
2179
2180 return RX_CONTINUE;
2181 }
2182
2183 static ieee80211_rx_result debug_noinline
2184 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2185 {
2186 struct ieee80211_local *local = rx->local;
2187 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2188 struct sk_buff *nskb;
2189 struct ieee80211_sub_if_data *sdata = rx->sdata;
2190 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2191
2192 if (!ieee80211_is_action(mgmt->frame_control))
2193 return RX_CONTINUE;
2194
2195 /*
2196 * For AP mode, hostapd is responsible for handling any action
2197 * frames that we didn't handle, including returning unknown
2198 * ones. For all other modes we will return them to the sender,
2199 * setting the 0x80 bit in the action category, as required by
2200 * 802.11-2007 7.3.1.11.
2201 * Newer versions of hostapd shall also use the management frame
2202 * registration mechanisms, but older ones still use cooked
2203 * monitor interfaces so push all frames there.
2204 */
2205 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2206 (sdata->vif.type == NL80211_IFTYPE_AP ||
2207 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2208 return RX_DROP_MONITOR;
2209
2210 /* do not return rejected action frames */
2211 if (mgmt->u.action.category & 0x80)
2212 return RX_DROP_UNUSABLE;
2213
2214 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2215 GFP_ATOMIC);
2216 if (nskb) {
2217 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2218
2219 nmgmt->u.action.category |= 0x80;
2220 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2221 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2222
2223 memset(nskb->cb, 0, sizeof(nskb->cb));
2224
2225 ieee80211_tx_skb(rx->sdata, nskb);
2226 }
2227 dev_kfree_skb(rx->skb);
2228 return RX_QUEUED;
2229 }
2230
2231 static ieee80211_rx_result debug_noinline
2232 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2233 {
2234 struct ieee80211_sub_if_data *sdata = rx->sdata;
2235 ieee80211_rx_result rxs;
2236 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2237 __le16 stype;
2238
2239 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2240 if (rxs != RX_CONTINUE)
2241 return rxs;
2242
2243 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2244
2245 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2246 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2247 sdata->vif.type != NL80211_IFTYPE_STATION)
2248 return RX_DROP_MONITOR;
2249
2250 switch (stype) {
2251 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2252 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2253 /* process for all: mesh, mlme, ibss */
2254 break;
2255 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2256 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2257 if (is_multicast_ether_addr(mgmt->da) &&
2258 !is_broadcast_ether_addr(mgmt->da))
2259 return RX_DROP_MONITOR;
2260
2261 /* process only for station */
2262 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2263 return RX_DROP_MONITOR;
2264 break;
2265 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2266 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2267 /* process only for ibss */
2268 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2269 return RX_DROP_MONITOR;
2270 break;
2271 default:
2272 return RX_DROP_MONITOR;
2273 }
2274
2275 /* queue up frame and kick off work to process it */
2276 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2277 skb_queue_tail(&sdata->skb_queue, rx->skb);
2278 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2279 if (rx->sta)
2280 rx->sta->rx_packets++;
2281
2282 return RX_QUEUED;
2283 }
2284
2285 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
2286 struct ieee80211_rx_data *rx)
2287 {
2288 int keyidx;
2289 unsigned int hdrlen;
2290
2291 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2292 if (rx->skb->len >= hdrlen + 4)
2293 keyidx = rx->skb->data[hdrlen + 3] >> 6;
2294 else
2295 keyidx = -1;
2296
2297 if (!rx->sta) {
2298 /*
2299 * Some hardware seem to generate incorrect Michael MIC
2300 * reports; ignore them to avoid triggering countermeasures.
2301 */
2302 return;
2303 }
2304
2305 if (!ieee80211_has_protected(hdr->frame_control))
2306 return;
2307
2308 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
2309 /*
2310 * APs with pairwise keys should never receive Michael MIC
2311 * errors for non-zero keyidx because these are reserved for
2312 * group keys and only the AP is sending real multicast
2313 * frames in the BSS.
2314 */
2315 return;
2316 }
2317
2318 if (!ieee80211_is_data(hdr->frame_control) &&
2319 !ieee80211_is_auth(hdr->frame_control))
2320 return;
2321
2322 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
2323 GFP_ATOMIC);
2324 }
2325
2326 /* TODO: use IEEE80211_RX_FRAGMENTED */
2327 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2328 struct ieee80211_rate *rate)
2329 {
2330 struct ieee80211_sub_if_data *sdata;
2331 struct ieee80211_local *local = rx->local;
2332 struct ieee80211_rtap_hdr {
2333 struct ieee80211_radiotap_header hdr;
2334 u8 flags;
2335 u8 rate_or_pad;
2336 __le16 chan_freq;
2337 __le16 chan_flags;
2338 } __packed *rthdr;
2339 struct sk_buff *skb = rx->skb, *skb2;
2340 struct net_device *prev_dev = NULL;
2341 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2342
2343 /*
2344 * If cooked monitor has been processed already, then
2345 * don't do it again. If not, set the flag.
2346 */
2347 if (rx->flags & IEEE80211_RX_CMNTR)
2348 goto out_free_skb;
2349 rx->flags |= IEEE80211_RX_CMNTR;
2350
2351 if (skb_headroom(skb) < sizeof(*rthdr) &&
2352 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2353 goto out_free_skb;
2354
2355 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2356 memset(rthdr, 0, sizeof(*rthdr));
2357 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2358 rthdr->hdr.it_present =
2359 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2360 (1 << IEEE80211_RADIOTAP_CHANNEL));
2361
2362 if (rate) {
2363 rthdr->rate_or_pad = rate->bitrate / 5;
2364 rthdr->hdr.it_present |=
2365 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2366 }
2367 rthdr->chan_freq = cpu_to_le16(status->freq);
2368
2369 if (status->band == IEEE80211_BAND_5GHZ)
2370 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2371 IEEE80211_CHAN_5GHZ);
2372 else
2373 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2374 IEEE80211_CHAN_2GHZ);
2375
2376 skb_set_mac_header(skb, 0);
2377 skb->ip_summed = CHECKSUM_UNNECESSARY;
2378 skb->pkt_type = PACKET_OTHERHOST;
2379 skb->protocol = htons(ETH_P_802_2);
2380
2381 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2382 if (!ieee80211_sdata_running(sdata))
2383 continue;
2384
2385 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2386 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2387 continue;
2388
2389 if (prev_dev) {
2390 skb2 = skb_clone(skb, GFP_ATOMIC);
2391 if (skb2) {
2392 skb2->dev = prev_dev;
2393 netif_receive_skb(skb2);
2394 }
2395 }
2396
2397 prev_dev = sdata->dev;
2398 sdata->dev->stats.rx_packets++;
2399 sdata->dev->stats.rx_bytes += skb->len;
2400 }
2401
2402 if (prev_dev) {
2403 skb->dev = prev_dev;
2404 netif_receive_skb(skb);
2405 return;
2406 }
2407
2408 out_free_skb:
2409 dev_kfree_skb(skb);
2410 }
2411
2412 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2413 ieee80211_rx_result res)
2414 {
2415 switch (res) {
2416 case RX_DROP_MONITOR:
2417 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2418 if (rx->sta)
2419 rx->sta->rx_dropped++;
2420 /* fall through */
2421 case RX_CONTINUE: {
2422 struct ieee80211_rate *rate = NULL;
2423 struct ieee80211_supported_band *sband;
2424 struct ieee80211_rx_status *status;
2425
2426 status = IEEE80211_SKB_RXCB((rx->skb));
2427
2428 sband = rx->local->hw.wiphy->bands[status->band];
2429 if (!(status->flag & RX_FLAG_HT))
2430 rate = &sband->bitrates[status->rate_idx];
2431
2432 ieee80211_rx_cooked_monitor(rx, rate);
2433 break;
2434 }
2435 case RX_DROP_UNUSABLE:
2436 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2437 if (rx->sta)
2438 rx->sta->rx_dropped++;
2439 dev_kfree_skb(rx->skb);
2440 break;
2441 case RX_QUEUED:
2442 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2443 break;
2444 }
2445 }
2446
2447 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2448 struct sk_buff_head *frames)
2449 {
2450 ieee80211_rx_result res = RX_DROP_MONITOR;
2451 struct sk_buff *skb;
2452
2453 #define CALL_RXH(rxh) \
2454 do { \
2455 res = rxh(rx); \
2456 if (res != RX_CONTINUE) \
2457 goto rxh_next; \
2458 } while (0);
2459
2460 while ((skb = __skb_dequeue(frames))) {
2461 /*
2462 * all the other fields are valid across frames
2463 * that belong to an aMPDU since they are on the
2464 * same TID from the same station
2465 */
2466 rx->skb = skb;
2467 rx->flags = 0;
2468
2469 CALL_RXH(ieee80211_rx_h_decrypt)
2470 CALL_RXH(ieee80211_rx_h_check_more_data)
2471 CALL_RXH(ieee80211_rx_h_sta_process)
2472 CALL_RXH(ieee80211_rx_h_defragment)
2473 CALL_RXH(ieee80211_rx_h_ps_poll)
2474 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2475 /* must be after MMIC verify so header is counted in MPDU mic */
2476 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2477 CALL_RXH(ieee80211_rx_h_amsdu)
2478 #ifdef CONFIG_MAC80211_MESH
2479 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2480 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2481 #endif
2482 CALL_RXH(ieee80211_rx_h_data)
2483
2484 /* special treatment -- needs the queue */
2485 res = ieee80211_rx_h_ctrl(rx, frames);
2486 if (res != RX_CONTINUE)
2487 goto rxh_next;
2488
2489 CALL_RXH(ieee80211_rx_h_mgmt_check)
2490 CALL_RXH(ieee80211_rx_h_action)
2491 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2492 CALL_RXH(ieee80211_rx_h_action_return)
2493 CALL_RXH(ieee80211_rx_h_mgmt)
2494
2495 rxh_next:
2496 ieee80211_rx_handlers_result(rx, res);
2497
2498 #undef CALL_RXH
2499 }
2500 }
2501
2502 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2503 {
2504 struct sk_buff_head reorder_release;
2505 ieee80211_rx_result res = RX_DROP_MONITOR;
2506
2507 __skb_queue_head_init(&reorder_release);
2508
2509 #define CALL_RXH(rxh) \
2510 do { \
2511 res = rxh(rx); \
2512 if (res != RX_CONTINUE) \
2513 goto rxh_next; \
2514 } while (0);
2515
2516 CALL_RXH(ieee80211_rx_h_passive_scan)
2517 CALL_RXH(ieee80211_rx_h_check)
2518
2519 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2520
2521 ieee80211_rx_handlers(rx, &reorder_release);
2522 return;
2523
2524 rxh_next:
2525 ieee80211_rx_handlers_result(rx, res);
2526
2527 #undef CALL_RXH
2528 }
2529
2530 /*
2531 * This function makes calls into the RX path, therefore
2532 * it has to be invoked under RCU read lock.
2533 */
2534 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2535 {
2536 struct sk_buff_head frames;
2537 struct ieee80211_rx_data rx = {
2538 .sta = sta,
2539 .sdata = sta->sdata,
2540 .local = sta->local,
2541 .queue = tid,
2542 };
2543 struct tid_ampdu_rx *tid_agg_rx;
2544
2545 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2546 if (!tid_agg_rx)
2547 return;
2548
2549 __skb_queue_head_init(&frames);
2550
2551 spin_lock(&tid_agg_rx->reorder_lock);
2552 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames);
2553 spin_unlock(&tid_agg_rx->reorder_lock);
2554
2555 ieee80211_rx_handlers(&rx, &frames);
2556 }
2557
2558 /* main receive path */
2559
2560 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2561 struct ieee80211_hdr *hdr)
2562 {
2563 struct ieee80211_sub_if_data *sdata = rx->sdata;
2564 struct sk_buff *skb = rx->skb;
2565 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2566 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2567 int multicast = is_multicast_ether_addr(hdr->addr1);
2568
2569 switch (sdata->vif.type) {
2570 case NL80211_IFTYPE_STATION:
2571 if (!bssid && !sdata->u.mgd.use_4addr)
2572 return 0;
2573 if (!multicast &&
2574 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2575 if (!(sdata->dev->flags & IFF_PROMISC))
2576 return 0;
2577 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2578 }
2579 break;
2580 case NL80211_IFTYPE_ADHOC:
2581 if (!bssid)
2582 return 0;
2583 if (ieee80211_is_beacon(hdr->frame_control)) {
2584 return 1;
2585 }
2586 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2587 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2588 return 0;
2589 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2590 } else if (!multicast &&
2591 compare_ether_addr(sdata->vif.addr,
2592 hdr->addr1) != 0) {
2593 if (!(sdata->dev->flags & IFF_PROMISC))
2594 return 0;
2595 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2596 } else if (!rx->sta) {
2597 int rate_idx;
2598 if (status->flag & RX_FLAG_HT)
2599 rate_idx = 0; /* TODO: HT rates */
2600 else
2601 rate_idx = status->rate_idx;
2602 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2603 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2604 }
2605 break;
2606 case NL80211_IFTYPE_MESH_POINT:
2607 if (!multicast &&
2608 compare_ether_addr(sdata->vif.addr,
2609 hdr->addr1) != 0) {
2610 if (!(sdata->dev->flags & IFF_PROMISC))
2611 return 0;
2612
2613 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2614 }
2615 break;
2616 case NL80211_IFTYPE_AP_VLAN:
2617 case NL80211_IFTYPE_AP:
2618 if (!bssid) {
2619 if (compare_ether_addr(sdata->vif.addr,
2620 hdr->addr1))
2621 return 0;
2622 } else if (!ieee80211_bssid_match(bssid,
2623 sdata->vif.addr)) {
2624 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2625 return 0;
2626 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2627 }
2628 break;
2629 case NL80211_IFTYPE_WDS:
2630 if (bssid || !ieee80211_is_data(hdr->frame_control))
2631 return 0;
2632 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2633 return 0;
2634 break;
2635 default:
2636 /* should never get here */
2637 WARN_ON(1);
2638 break;
2639 }
2640
2641 return 1;
2642 }
2643
2644 /*
2645 * This function returns whether or not the SKB
2646 * was destined for RX processing or not, which,
2647 * if consume is true, is equivalent to whether
2648 * or not the skb was consumed.
2649 */
2650 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2651 struct sk_buff *skb, bool consume)
2652 {
2653 struct ieee80211_local *local = rx->local;
2654 struct ieee80211_sub_if_data *sdata = rx->sdata;
2655 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2656 struct ieee80211_hdr *hdr = (void *)skb->data;
2657 int prepares;
2658
2659 rx->skb = skb;
2660 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2661 prepares = prepare_for_handlers(rx, hdr);
2662
2663 if (!prepares)
2664 return false;
2665
2666 if (status->flag & RX_FLAG_MMIC_ERROR) {
2667 if (status->rx_flags & IEEE80211_RX_RA_MATCH)
2668 ieee80211_rx_michael_mic_report(hdr, rx);
2669 return false;
2670 }
2671
2672 if (!consume) {
2673 skb = skb_copy(skb, GFP_ATOMIC);
2674 if (!skb) {
2675 if (net_ratelimit())
2676 wiphy_debug(local->hw.wiphy,
2677 "failed to copy multicast frame for %s\n",
2678 sdata->name);
2679 return true;
2680 }
2681
2682 rx->skb = skb;
2683 }
2684
2685 ieee80211_invoke_rx_handlers(rx);
2686 return true;
2687 }
2688
2689 /*
2690 * This is the actual Rx frames handler. as it blongs to Rx path it must
2691 * be called with rcu_read_lock protection.
2692 */
2693 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2694 struct sk_buff *skb)
2695 {
2696 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2697 struct ieee80211_local *local = hw_to_local(hw);
2698 struct ieee80211_sub_if_data *sdata;
2699 struct ieee80211_hdr *hdr;
2700 __le16 fc;
2701 struct ieee80211_rx_data rx;
2702 struct ieee80211_sub_if_data *prev;
2703 struct sta_info *sta, *tmp, *prev_sta;
2704 int err = 0;
2705
2706 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2707 memset(&rx, 0, sizeof(rx));
2708 rx.skb = skb;
2709 rx.local = local;
2710
2711 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2712 local->dot11ReceivedFragmentCount++;
2713
2714 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2715 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2716 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2717
2718 if (ieee80211_is_mgmt(fc))
2719 err = skb_linearize(skb);
2720 else
2721 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2722
2723 if (err) {
2724 dev_kfree_skb(skb);
2725 return;
2726 }
2727
2728 hdr = (struct ieee80211_hdr *)skb->data;
2729 ieee80211_parse_qos(&rx);
2730 ieee80211_verify_alignment(&rx);
2731
2732 if (ieee80211_is_data(fc)) {
2733 prev_sta = NULL;
2734
2735 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2736 if (!prev_sta) {
2737 prev_sta = sta;
2738 continue;
2739 }
2740
2741 rx.sta = prev_sta;
2742 rx.sdata = prev_sta->sdata;
2743 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2744
2745 prev_sta = sta;
2746 }
2747
2748 if (prev_sta) {
2749 rx.sta = prev_sta;
2750 rx.sdata = prev_sta->sdata;
2751
2752 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2753 return;
2754 goto out;
2755 }
2756 }
2757
2758 prev = NULL;
2759
2760 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2761 if (!ieee80211_sdata_running(sdata))
2762 continue;
2763
2764 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2765 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2766 continue;
2767
2768 /*
2769 * frame is destined for this interface, but if it's
2770 * not also for the previous one we handle that after
2771 * the loop to avoid copying the SKB once too much
2772 */
2773
2774 if (!prev) {
2775 prev = sdata;
2776 continue;
2777 }
2778
2779 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2780 rx.sdata = prev;
2781 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2782
2783 prev = sdata;
2784 }
2785
2786 if (prev) {
2787 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2788 rx.sdata = prev;
2789
2790 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2791 return;
2792 }
2793
2794 out:
2795 dev_kfree_skb(skb);
2796 }
2797
2798 /*
2799 * This is the receive path handler. It is called by a low level driver when an
2800 * 802.11 MPDU is received from the hardware.
2801 */
2802 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2803 {
2804 struct ieee80211_local *local = hw_to_local(hw);
2805 struct ieee80211_rate *rate = NULL;
2806 struct ieee80211_supported_band *sband;
2807 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2808
2809 WARN_ON_ONCE(softirq_count() == 0);
2810
2811 if (WARN_ON(status->band < 0 ||
2812 status->band >= IEEE80211_NUM_BANDS))
2813 goto drop;
2814
2815 sband = local->hw.wiphy->bands[status->band];
2816 if (WARN_ON(!sband))
2817 goto drop;
2818
2819 /*
2820 * If we're suspending, it is possible although not too likely
2821 * that we'd be receiving frames after having already partially
2822 * quiesced the stack. We can't process such frames then since
2823 * that might, for example, cause stations to be added or other
2824 * driver callbacks be invoked.
2825 */
2826 if (unlikely(local->quiescing || local->suspended))
2827 goto drop;
2828
2829 /*
2830 * The same happens when we're not even started,
2831 * but that's worth a warning.
2832 */
2833 if (WARN_ON(!local->started))
2834 goto drop;
2835
2836 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
2837 /*
2838 * Validate the rate, unless a PLCP error means that
2839 * we probably can't have a valid rate here anyway.
2840 */
2841
2842 if (status->flag & RX_FLAG_HT) {
2843 /*
2844 * rate_idx is MCS index, which can be [0-76]
2845 * as documented on:
2846 *
2847 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2848 *
2849 * Anything else would be some sort of driver or
2850 * hardware error. The driver should catch hardware
2851 * errors.
2852 */
2853 if (WARN((status->rate_idx < 0 ||
2854 status->rate_idx > 76),
2855 "Rate marked as an HT rate but passed "
2856 "status->rate_idx is not "
2857 "an MCS index [0-76]: %d (0x%02x)\n",
2858 status->rate_idx,
2859 status->rate_idx))
2860 goto drop;
2861 } else {
2862 if (WARN_ON(status->rate_idx < 0 ||
2863 status->rate_idx >= sband->n_bitrates))
2864 goto drop;
2865 rate = &sband->bitrates[status->rate_idx];
2866 }
2867 }
2868
2869 status->rx_flags = 0;
2870
2871 /*
2872 * key references and virtual interfaces are protected using RCU
2873 * and this requires that we are in a read-side RCU section during
2874 * receive processing
2875 */
2876 rcu_read_lock();
2877
2878 /*
2879 * Frames with failed FCS/PLCP checksum are not returned,
2880 * all other frames are returned without radiotap header
2881 * if it was previously present.
2882 * Also, frames with less than 16 bytes are dropped.
2883 */
2884 skb = ieee80211_rx_monitor(local, skb, rate);
2885 if (!skb) {
2886 rcu_read_unlock();
2887 return;
2888 }
2889
2890 __ieee80211_rx_handle_packet(hw, skb);
2891
2892 rcu_read_unlock();
2893
2894 return;
2895 drop:
2896 kfree_skb(skb);
2897 }
2898 EXPORT_SYMBOL(ieee80211_rx);
2899
2900 /* This is a version of the rx handler that can be called from hard irq
2901 * context. Post the skb on the queue and schedule the tasklet */
2902 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
2903 {
2904 struct ieee80211_local *local = hw_to_local(hw);
2905
2906 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2907
2908 skb->pkt_type = IEEE80211_RX_MSG;
2909 skb_queue_tail(&local->skb_queue, skb);
2910 tasklet_schedule(&local->tasklet);
2911 }
2912 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
This page took 0.094182 seconds and 5 git commands to generate.