Merge tag 'sound-3.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[deliverable/linux.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <net/mac80211.h>
21 #include <net/ieee80211_radiotap.h>
22 #include <asm/unaligned.h>
23
24 #include "ieee80211_i.h"
25 #include "driver-ops.h"
26 #include "led.h"
27 #include "mesh.h"
28 #include "wep.h"
29 #include "wpa.h"
30 #include "tkip.h"
31 #include "wme.h"
32 #include "rate.h"
33
34 /*
35 * monitor mode reception
36 *
37 * This function cleans up the SKB, i.e. it removes all the stuff
38 * only useful for monitoring.
39 */
40 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
41 struct sk_buff *skb)
42 {
43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
44 if (likely(skb->len > FCS_LEN))
45 __pskb_trim(skb, skb->len - FCS_LEN);
46 else {
47 /* driver bug */
48 WARN_ON(1);
49 dev_kfree_skb(skb);
50 skb = NULL;
51 }
52 }
53
54 return skb;
55 }
56
57 static inline int should_drop_frame(struct sk_buff *skb,
58 int present_fcs_len)
59 {
60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
62
63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
64 return 1;
65 if (unlikely(skb->len < 16 + present_fcs_len))
66 return 1;
67 if (ieee80211_is_ctl(hdr->frame_control) &&
68 !ieee80211_is_pspoll(hdr->frame_control) &&
69 !ieee80211_is_back_req(hdr->frame_control))
70 return 1;
71 return 0;
72 }
73
74 static int
75 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
76 struct ieee80211_rx_status *status)
77 {
78 int len;
79
80 /* always present fields */
81 len = sizeof(struct ieee80211_radiotap_header) + 9;
82
83 if (status->flag & RX_FLAG_MACTIME_MPDU)
84 len += 8;
85 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
86 len += 1;
87
88 if (len & 1) /* padding for RX_FLAGS if necessary */
89 len++;
90
91 if (status->flag & RX_FLAG_HT) /* HT info */
92 len += 3;
93
94 return len;
95 }
96
97 /**
98 * ieee80211_add_rx_radiotap_header - add radiotap header
99 *
100 * add a radiotap header containing all the fields which the hardware provided.
101 */
102 static void
103 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
104 struct sk_buff *skb,
105 struct ieee80211_rate *rate,
106 int rtap_len, bool has_fcs)
107 {
108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
109 struct ieee80211_radiotap_header *rthdr;
110 unsigned char *pos;
111 u16 rx_flags = 0;
112
113 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
114 memset(rthdr, 0, rtap_len);
115
116 /* radiotap header, set always present flags */
117 rthdr->it_present =
118 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
119 (1 << IEEE80211_RADIOTAP_CHANNEL) |
120 (1 << IEEE80211_RADIOTAP_ANTENNA) |
121 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
122 rthdr->it_len = cpu_to_le16(rtap_len);
123
124 pos = (unsigned char *)(rthdr+1);
125
126 /* the order of the following fields is important */
127
128 /* IEEE80211_RADIOTAP_TSFT */
129 if (status->flag & RX_FLAG_MACTIME_MPDU) {
130 put_unaligned_le64(status->mactime, pos);
131 rthdr->it_present |=
132 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
133 pos += 8;
134 }
135
136 /* IEEE80211_RADIOTAP_FLAGS */
137 if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
138 *pos |= IEEE80211_RADIOTAP_F_FCS;
139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
140 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
141 if (status->flag & RX_FLAG_SHORTPRE)
142 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
143 pos++;
144
145 /* IEEE80211_RADIOTAP_RATE */
146 if (!rate || status->flag & RX_FLAG_HT) {
147 /*
148 * Without rate information don't add it. If we have,
149 * MCS information is a separate field in radiotap,
150 * added below. The byte here is needed as padding
151 * for the channel though, so initialise it to 0.
152 */
153 *pos = 0;
154 } else {
155 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
156 *pos = rate->bitrate / 5;
157 }
158 pos++;
159
160 /* IEEE80211_RADIOTAP_CHANNEL */
161 put_unaligned_le16(status->freq, pos);
162 pos += 2;
163 if (status->band == IEEE80211_BAND_5GHZ)
164 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
165 pos);
166 else if (status->flag & RX_FLAG_HT)
167 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
168 pos);
169 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
170 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
171 pos);
172 else if (rate)
173 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
174 pos);
175 else
176 put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
177 pos += 2;
178
179 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
180 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM &&
181 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
182 *pos = status->signal;
183 rthdr->it_present |=
184 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
185 pos++;
186 }
187
188 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
189
190 /* IEEE80211_RADIOTAP_ANTENNA */
191 *pos = status->antenna;
192 pos++;
193
194 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
195
196 /* IEEE80211_RADIOTAP_RX_FLAGS */
197 /* ensure 2 byte alignment for the 2 byte field as required */
198 if ((pos - (u8 *)rthdr) & 1)
199 pos++;
200 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
201 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
202 put_unaligned_le16(rx_flags, pos);
203 pos += 2;
204
205 if (status->flag & RX_FLAG_HT) {
206 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
207 *pos++ = local->hw.radiotap_mcs_details;
208 *pos = 0;
209 if (status->flag & RX_FLAG_SHORT_GI)
210 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
211 if (status->flag & RX_FLAG_40MHZ)
212 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
213 if (status->flag & RX_FLAG_HT_GF)
214 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
215 pos++;
216 *pos++ = status->rate_idx;
217 }
218 }
219
220 /*
221 * This function copies a received frame to all monitor interfaces and
222 * returns a cleaned-up SKB that no longer includes the FCS nor the
223 * radiotap header the driver might have added.
224 */
225 static struct sk_buff *
226 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
227 struct ieee80211_rate *rate)
228 {
229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
230 struct ieee80211_sub_if_data *sdata;
231 int needed_headroom;
232 struct sk_buff *skb, *skb2;
233 struct net_device *prev_dev = NULL;
234 int present_fcs_len = 0;
235
236 /*
237 * First, we may need to make a copy of the skb because
238 * (1) we need to modify it for radiotap (if not present), and
239 * (2) the other RX handlers will modify the skb we got.
240 *
241 * We don't need to, of course, if we aren't going to return
242 * the SKB because it has a bad FCS/PLCP checksum.
243 */
244
245 /* room for the radiotap header based on driver features */
246 needed_headroom = ieee80211_rx_radiotap_len(local, status);
247
248 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
249 present_fcs_len = FCS_LEN;
250
251 /* make sure hdr->frame_control is on the linear part */
252 if (!pskb_may_pull(origskb, 2)) {
253 dev_kfree_skb(origskb);
254 return NULL;
255 }
256
257 if (!local->monitors) {
258 if (should_drop_frame(origskb, present_fcs_len)) {
259 dev_kfree_skb(origskb);
260 return NULL;
261 }
262
263 return remove_monitor_info(local, origskb);
264 }
265
266 if (should_drop_frame(origskb, present_fcs_len)) {
267 /* only need to expand headroom if necessary */
268 skb = origskb;
269 origskb = NULL;
270
271 /*
272 * This shouldn't trigger often because most devices have an
273 * RX header they pull before we get here, and that should
274 * be big enough for our radiotap information. We should
275 * probably export the length to drivers so that we can have
276 * them allocate enough headroom to start with.
277 */
278 if (skb_headroom(skb) < needed_headroom &&
279 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
280 dev_kfree_skb(skb);
281 return NULL;
282 }
283 } else {
284 /*
285 * Need to make a copy and possibly remove radiotap header
286 * and FCS from the original.
287 */
288 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
289
290 origskb = remove_monitor_info(local, origskb);
291
292 if (!skb)
293 return origskb;
294 }
295
296 /* prepend radiotap information */
297 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
298 true);
299
300 skb_reset_mac_header(skb);
301 skb->ip_summed = CHECKSUM_UNNECESSARY;
302 skb->pkt_type = PACKET_OTHERHOST;
303 skb->protocol = htons(ETH_P_802_2);
304
305 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
306 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
307 continue;
308
309 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
310 continue;
311
312 if (!ieee80211_sdata_running(sdata))
313 continue;
314
315 if (prev_dev) {
316 skb2 = skb_clone(skb, GFP_ATOMIC);
317 if (skb2) {
318 skb2->dev = prev_dev;
319 netif_receive_skb(skb2);
320 }
321 }
322
323 prev_dev = sdata->dev;
324 sdata->dev->stats.rx_packets++;
325 sdata->dev->stats.rx_bytes += skb->len;
326 }
327
328 if (prev_dev) {
329 skb->dev = prev_dev;
330 netif_receive_skb(skb);
331 } else
332 dev_kfree_skb(skb);
333
334 return origskb;
335 }
336
337
338 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
339 {
340 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
341 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
342 int tid, seqno_idx, security_idx;
343
344 /* does the frame have a qos control field? */
345 if (ieee80211_is_data_qos(hdr->frame_control)) {
346 u8 *qc = ieee80211_get_qos_ctl(hdr);
347 /* frame has qos control */
348 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
349 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
350 status->rx_flags |= IEEE80211_RX_AMSDU;
351
352 seqno_idx = tid;
353 security_idx = tid;
354 } else {
355 /*
356 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
357 *
358 * Sequence numbers for management frames, QoS data
359 * frames with a broadcast/multicast address in the
360 * Address 1 field, and all non-QoS data frames sent
361 * by QoS STAs are assigned using an additional single
362 * modulo-4096 counter, [...]
363 *
364 * We also use that counter for non-QoS STAs.
365 */
366 seqno_idx = NUM_RX_DATA_QUEUES;
367 security_idx = 0;
368 if (ieee80211_is_mgmt(hdr->frame_control))
369 security_idx = NUM_RX_DATA_QUEUES;
370 tid = 0;
371 }
372
373 rx->seqno_idx = seqno_idx;
374 rx->security_idx = security_idx;
375 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
376 * For now, set skb->priority to 0 for other cases. */
377 rx->skb->priority = (tid > 7) ? 0 : tid;
378 }
379
380 /**
381 * DOC: Packet alignment
382 *
383 * Drivers always need to pass packets that are aligned to two-byte boundaries
384 * to the stack.
385 *
386 * Additionally, should, if possible, align the payload data in a way that
387 * guarantees that the contained IP header is aligned to a four-byte
388 * boundary. In the case of regular frames, this simply means aligning the
389 * payload to a four-byte boundary (because either the IP header is directly
390 * contained, or IV/RFC1042 headers that have a length divisible by four are
391 * in front of it). If the payload data is not properly aligned and the
392 * architecture doesn't support efficient unaligned operations, mac80211
393 * will align the data.
394 *
395 * With A-MSDU frames, however, the payload data address must yield two modulo
396 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
397 * push the IP header further back to a multiple of four again. Thankfully, the
398 * specs were sane enough this time around to require padding each A-MSDU
399 * subframe to a length that is a multiple of four.
400 *
401 * Padding like Atheros hardware adds which is between the 802.11 header and
402 * the payload is not supported, the driver is required to move the 802.11
403 * header to be directly in front of the payload in that case.
404 */
405 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
406 {
407 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
408 WARN_ONCE((unsigned long)rx->skb->data & 1,
409 "unaligned packet at 0x%p\n", rx->skb->data);
410 #endif
411 }
412
413
414 /* rx handlers */
415
416 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
417 {
418 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
419
420 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
421 return 0;
422
423 return ieee80211_is_robust_mgmt_frame(hdr);
424 }
425
426
427 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
428 {
429 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
430
431 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
432 return 0;
433
434 return ieee80211_is_robust_mgmt_frame(hdr);
435 }
436
437
438 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
439 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
440 {
441 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
442 struct ieee80211_mmie *mmie;
443
444 if (skb->len < 24 + sizeof(*mmie) ||
445 !is_multicast_ether_addr(hdr->da))
446 return -1;
447
448 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
449 return -1; /* not a robust management frame */
450
451 mmie = (struct ieee80211_mmie *)
452 (skb->data + skb->len - sizeof(*mmie));
453 if (mmie->element_id != WLAN_EID_MMIE ||
454 mmie->length != sizeof(*mmie) - 2)
455 return -1;
456
457 return le16_to_cpu(mmie->key_id);
458 }
459
460
461 static ieee80211_rx_result
462 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
463 {
464 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
465 char *dev_addr = rx->sdata->vif.addr;
466
467 if (ieee80211_is_data(hdr->frame_control)) {
468 if (is_multicast_ether_addr(hdr->addr1)) {
469 if (ieee80211_has_tods(hdr->frame_control) ||
470 !ieee80211_has_fromds(hdr->frame_control))
471 return RX_DROP_MONITOR;
472 if (ether_addr_equal(hdr->addr3, dev_addr))
473 return RX_DROP_MONITOR;
474 } else {
475 if (!ieee80211_has_a4(hdr->frame_control))
476 return RX_DROP_MONITOR;
477 if (ether_addr_equal(hdr->addr4, dev_addr))
478 return RX_DROP_MONITOR;
479 }
480 }
481
482 /* If there is not an established peer link and this is not a peer link
483 * establisment frame, beacon or probe, drop the frame.
484 */
485
486 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
487 struct ieee80211_mgmt *mgmt;
488
489 if (!ieee80211_is_mgmt(hdr->frame_control))
490 return RX_DROP_MONITOR;
491
492 if (ieee80211_is_action(hdr->frame_control)) {
493 u8 category;
494 mgmt = (struct ieee80211_mgmt *)hdr;
495 category = mgmt->u.action.category;
496 if (category != WLAN_CATEGORY_MESH_ACTION &&
497 category != WLAN_CATEGORY_SELF_PROTECTED)
498 return RX_DROP_MONITOR;
499 return RX_CONTINUE;
500 }
501
502 if (ieee80211_is_probe_req(hdr->frame_control) ||
503 ieee80211_is_probe_resp(hdr->frame_control) ||
504 ieee80211_is_beacon(hdr->frame_control) ||
505 ieee80211_is_auth(hdr->frame_control))
506 return RX_CONTINUE;
507
508 return RX_DROP_MONITOR;
509
510 }
511
512 return RX_CONTINUE;
513 }
514
515 #define SEQ_MODULO 0x1000
516 #define SEQ_MASK 0xfff
517
518 static inline int seq_less(u16 sq1, u16 sq2)
519 {
520 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
521 }
522
523 static inline u16 seq_inc(u16 sq)
524 {
525 return (sq + 1) & SEQ_MASK;
526 }
527
528 static inline u16 seq_sub(u16 sq1, u16 sq2)
529 {
530 return (sq1 - sq2) & SEQ_MASK;
531 }
532
533
534 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
535 struct tid_ampdu_rx *tid_agg_rx,
536 int index)
537 {
538 struct ieee80211_local *local = sdata->local;
539 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
540 struct ieee80211_rx_status *status;
541
542 lockdep_assert_held(&tid_agg_rx->reorder_lock);
543
544 if (!skb)
545 goto no_frame;
546
547 /* release the frame from the reorder ring buffer */
548 tid_agg_rx->stored_mpdu_num--;
549 tid_agg_rx->reorder_buf[index] = NULL;
550 status = IEEE80211_SKB_RXCB(skb);
551 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
552 skb_queue_tail(&local->rx_skb_queue, skb);
553
554 no_frame:
555 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
556 }
557
558 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
559 struct tid_ampdu_rx *tid_agg_rx,
560 u16 head_seq_num)
561 {
562 int index;
563
564 lockdep_assert_held(&tid_agg_rx->reorder_lock);
565
566 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
567 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
568 tid_agg_rx->buf_size;
569 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
570 }
571 }
572
573 /*
574 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
575 * the skb was added to the buffer longer than this time ago, the earlier
576 * frames that have not yet been received are assumed to be lost and the skb
577 * can be released for processing. This may also release other skb's from the
578 * reorder buffer if there are no additional gaps between the frames.
579 *
580 * Callers must hold tid_agg_rx->reorder_lock.
581 */
582 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
583
584 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
585 struct tid_ampdu_rx *tid_agg_rx)
586 {
587 int index, j;
588
589 lockdep_assert_held(&tid_agg_rx->reorder_lock);
590
591 /* release the buffer until next missing frame */
592 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
593 tid_agg_rx->buf_size;
594 if (!tid_agg_rx->reorder_buf[index] &&
595 tid_agg_rx->stored_mpdu_num) {
596 /*
597 * No buffers ready to be released, but check whether any
598 * frames in the reorder buffer have timed out.
599 */
600 int skipped = 1;
601 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
602 j = (j + 1) % tid_agg_rx->buf_size) {
603 if (!tid_agg_rx->reorder_buf[j]) {
604 skipped++;
605 continue;
606 }
607 if (skipped &&
608 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
609 HT_RX_REORDER_BUF_TIMEOUT))
610 goto set_release_timer;
611
612 ht_dbg_ratelimited(sdata,
613 "release an RX reorder frame due to timeout on earlier frames\n");
614 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j);
615
616 /*
617 * Increment the head seq# also for the skipped slots.
618 */
619 tid_agg_rx->head_seq_num =
620 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
621 skipped = 0;
622 }
623 } else while (tid_agg_rx->reorder_buf[index]) {
624 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
625 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
626 tid_agg_rx->buf_size;
627 }
628
629 if (tid_agg_rx->stored_mpdu_num) {
630 j = index = seq_sub(tid_agg_rx->head_seq_num,
631 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
632
633 for (; j != (index - 1) % tid_agg_rx->buf_size;
634 j = (j + 1) % tid_agg_rx->buf_size) {
635 if (tid_agg_rx->reorder_buf[j])
636 break;
637 }
638
639 set_release_timer:
640
641 mod_timer(&tid_agg_rx->reorder_timer,
642 tid_agg_rx->reorder_time[j] + 1 +
643 HT_RX_REORDER_BUF_TIMEOUT);
644 } else {
645 del_timer(&tid_agg_rx->reorder_timer);
646 }
647 }
648
649 /*
650 * As this function belongs to the RX path it must be under
651 * rcu_read_lock protection. It returns false if the frame
652 * can be processed immediately, true if it was consumed.
653 */
654 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
655 struct tid_ampdu_rx *tid_agg_rx,
656 struct sk_buff *skb)
657 {
658 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
659 u16 sc = le16_to_cpu(hdr->seq_ctrl);
660 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
661 u16 head_seq_num, buf_size;
662 int index;
663 bool ret = true;
664
665 spin_lock(&tid_agg_rx->reorder_lock);
666
667 buf_size = tid_agg_rx->buf_size;
668 head_seq_num = tid_agg_rx->head_seq_num;
669
670 /* frame with out of date sequence number */
671 if (seq_less(mpdu_seq_num, head_seq_num)) {
672 dev_kfree_skb(skb);
673 goto out;
674 }
675
676 /*
677 * If frame the sequence number exceeds our buffering window
678 * size release some previous frames to make room for this one.
679 */
680 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
681 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
682 /* release stored frames up to new head to stack */
683 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
684 head_seq_num);
685 }
686
687 /* Now the new frame is always in the range of the reordering buffer */
688
689 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
690
691 /* check if we already stored this frame */
692 if (tid_agg_rx->reorder_buf[index]) {
693 dev_kfree_skb(skb);
694 goto out;
695 }
696
697 /*
698 * If the current MPDU is in the right order and nothing else
699 * is stored we can process it directly, no need to buffer it.
700 * If it is first but there's something stored, we may be able
701 * to release frames after this one.
702 */
703 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
704 tid_agg_rx->stored_mpdu_num == 0) {
705 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
706 ret = false;
707 goto out;
708 }
709
710 /* put the frame in the reordering buffer */
711 tid_agg_rx->reorder_buf[index] = skb;
712 tid_agg_rx->reorder_time[index] = jiffies;
713 tid_agg_rx->stored_mpdu_num++;
714 ieee80211_sta_reorder_release(sdata, tid_agg_rx);
715
716 out:
717 spin_unlock(&tid_agg_rx->reorder_lock);
718 return ret;
719 }
720
721 /*
722 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
723 * true if the MPDU was buffered, false if it should be processed.
724 */
725 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
726 {
727 struct sk_buff *skb = rx->skb;
728 struct ieee80211_local *local = rx->local;
729 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
730 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
731 struct sta_info *sta = rx->sta;
732 struct tid_ampdu_rx *tid_agg_rx;
733 u16 sc;
734 u8 tid, ack_policy;
735
736 if (!ieee80211_is_data_qos(hdr->frame_control))
737 goto dont_reorder;
738
739 /*
740 * filter the QoS data rx stream according to
741 * STA/TID and check if this STA/TID is on aggregation
742 */
743
744 if (!sta)
745 goto dont_reorder;
746
747 ack_policy = *ieee80211_get_qos_ctl(hdr) &
748 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
749 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
750
751 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
752 if (!tid_agg_rx)
753 goto dont_reorder;
754
755 /* qos null data frames are excluded */
756 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
757 goto dont_reorder;
758
759 /* not part of a BA session */
760 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
761 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
762 goto dont_reorder;
763
764 /* not actually part of this BA session */
765 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
766 goto dont_reorder;
767
768 /* new, potentially un-ordered, ampdu frame - process it */
769
770 /* reset session timer */
771 if (tid_agg_rx->timeout)
772 tid_agg_rx->last_rx = jiffies;
773
774 /* if this mpdu is fragmented - terminate rx aggregation session */
775 sc = le16_to_cpu(hdr->seq_ctrl);
776 if (sc & IEEE80211_SCTL_FRAG) {
777 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
778 skb_queue_tail(&rx->sdata->skb_queue, skb);
779 ieee80211_queue_work(&local->hw, &rx->sdata->work);
780 return;
781 }
782
783 /*
784 * No locking needed -- we will only ever process one
785 * RX packet at a time, and thus own tid_agg_rx. All
786 * other code manipulating it needs to (and does) make
787 * sure that we cannot get to it any more before doing
788 * anything with it.
789 */
790 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb))
791 return;
792
793 dont_reorder:
794 skb_queue_tail(&local->rx_skb_queue, skb);
795 }
796
797 static ieee80211_rx_result debug_noinline
798 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
799 {
800 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
801 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
802
803 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
804 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
805 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
806 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
807 hdr->seq_ctrl)) {
808 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
809 rx->local->dot11FrameDuplicateCount++;
810 rx->sta->num_duplicates++;
811 }
812 return RX_DROP_UNUSABLE;
813 } else
814 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
815 }
816
817 if (unlikely(rx->skb->len < 16)) {
818 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
819 return RX_DROP_MONITOR;
820 }
821
822 /* Drop disallowed frame classes based on STA auth/assoc state;
823 * IEEE 802.11, Chap 5.5.
824 *
825 * mac80211 filters only based on association state, i.e. it drops
826 * Class 3 frames from not associated stations. hostapd sends
827 * deauth/disassoc frames when needed. In addition, hostapd is
828 * responsible for filtering on both auth and assoc states.
829 */
830
831 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
832 return ieee80211_rx_mesh_check(rx);
833
834 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
835 ieee80211_is_pspoll(hdr->frame_control)) &&
836 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
837 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
838 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
839 /*
840 * accept port control frames from the AP even when it's not
841 * yet marked ASSOC to prevent a race where we don't set the
842 * assoc bit quickly enough before it sends the first frame
843 */
844 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
845 ieee80211_is_data_present(hdr->frame_control)) {
846 u16 ethertype;
847 u8 *payload;
848
849 payload = rx->skb->data +
850 ieee80211_hdrlen(hdr->frame_control);
851 ethertype = (payload[6] << 8) | payload[7];
852 if (cpu_to_be16(ethertype) ==
853 rx->sdata->control_port_protocol)
854 return RX_CONTINUE;
855 }
856
857 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
858 cfg80211_rx_spurious_frame(rx->sdata->dev,
859 hdr->addr2,
860 GFP_ATOMIC))
861 return RX_DROP_UNUSABLE;
862
863 return RX_DROP_MONITOR;
864 }
865
866 return RX_CONTINUE;
867 }
868
869
870 static ieee80211_rx_result debug_noinline
871 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
872 {
873 struct sk_buff *skb = rx->skb;
874 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
875 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
876 int keyidx;
877 int hdrlen;
878 ieee80211_rx_result result = RX_DROP_UNUSABLE;
879 struct ieee80211_key *sta_ptk = NULL;
880 int mmie_keyidx = -1;
881 __le16 fc;
882
883 /*
884 * Key selection 101
885 *
886 * There are four types of keys:
887 * - GTK (group keys)
888 * - IGTK (group keys for management frames)
889 * - PTK (pairwise keys)
890 * - STK (station-to-station pairwise keys)
891 *
892 * When selecting a key, we have to distinguish between multicast
893 * (including broadcast) and unicast frames, the latter can only
894 * use PTKs and STKs while the former always use GTKs and IGTKs.
895 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
896 * unicast frames can also use key indices like GTKs. Hence, if we
897 * don't have a PTK/STK we check the key index for a WEP key.
898 *
899 * Note that in a regular BSS, multicast frames are sent by the
900 * AP only, associated stations unicast the frame to the AP first
901 * which then multicasts it on their behalf.
902 *
903 * There is also a slight problem in IBSS mode: GTKs are negotiated
904 * with each station, that is something we don't currently handle.
905 * The spec seems to expect that one negotiates the same key with
906 * every station but there's no such requirement; VLANs could be
907 * possible.
908 */
909
910 /*
911 * No point in finding a key and decrypting if the frame is neither
912 * addressed to us nor a multicast frame.
913 */
914 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
915 return RX_CONTINUE;
916
917 /* start without a key */
918 rx->key = NULL;
919
920 if (rx->sta)
921 sta_ptk = rcu_dereference(rx->sta->ptk);
922
923 fc = hdr->frame_control;
924
925 if (!ieee80211_has_protected(fc))
926 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
927
928 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
929 rx->key = sta_ptk;
930 if ((status->flag & RX_FLAG_DECRYPTED) &&
931 (status->flag & RX_FLAG_IV_STRIPPED))
932 return RX_CONTINUE;
933 /* Skip decryption if the frame is not protected. */
934 if (!ieee80211_has_protected(fc))
935 return RX_CONTINUE;
936 } else if (mmie_keyidx >= 0) {
937 /* Broadcast/multicast robust management frame / BIP */
938 if ((status->flag & RX_FLAG_DECRYPTED) &&
939 (status->flag & RX_FLAG_IV_STRIPPED))
940 return RX_CONTINUE;
941
942 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
943 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
944 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
945 if (rx->sta)
946 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
947 if (!rx->key)
948 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
949 } else if (!ieee80211_has_protected(fc)) {
950 /*
951 * The frame was not protected, so skip decryption. However, we
952 * need to set rx->key if there is a key that could have been
953 * used so that the frame may be dropped if encryption would
954 * have been expected.
955 */
956 struct ieee80211_key *key = NULL;
957 struct ieee80211_sub_if_data *sdata = rx->sdata;
958 int i;
959
960 if (ieee80211_is_mgmt(fc) &&
961 is_multicast_ether_addr(hdr->addr1) &&
962 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
963 rx->key = key;
964 else {
965 if (rx->sta) {
966 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
967 key = rcu_dereference(rx->sta->gtk[i]);
968 if (key)
969 break;
970 }
971 }
972 if (!key) {
973 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
974 key = rcu_dereference(sdata->keys[i]);
975 if (key)
976 break;
977 }
978 }
979 if (key)
980 rx->key = key;
981 }
982 return RX_CONTINUE;
983 } else {
984 u8 keyid;
985 /*
986 * The device doesn't give us the IV so we won't be
987 * able to look up the key. That's ok though, we
988 * don't need to decrypt the frame, we just won't
989 * be able to keep statistics accurate.
990 * Except for key threshold notifications, should
991 * we somehow allow the driver to tell us which key
992 * the hardware used if this flag is set?
993 */
994 if ((status->flag & RX_FLAG_DECRYPTED) &&
995 (status->flag & RX_FLAG_IV_STRIPPED))
996 return RX_CONTINUE;
997
998 hdrlen = ieee80211_hdrlen(fc);
999
1000 if (rx->skb->len < 8 + hdrlen)
1001 return RX_DROP_UNUSABLE; /* TODO: count this? */
1002
1003 /*
1004 * no need to call ieee80211_wep_get_keyidx,
1005 * it verifies a bunch of things we've done already
1006 */
1007 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1008 keyidx = keyid >> 6;
1009
1010 /* check per-station GTK first, if multicast packet */
1011 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1012 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1013
1014 /* if not found, try default key */
1015 if (!rx->key) {
1016 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1017
1018 /*
1019 * RSNA-protected unicast frames should always be
1020 * sent with pairwise or station-to-station keys,
1021 * but for WEP we allow using a key index as well.
1022 */
1023 if (rx->key &&
1024 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1025 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1026 !is_multicast_ether_addr(hdr->addr1))
1027 rx->key = NULL;
1028 }
1029 }
1030
1031 if (rx->key) {
1032 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1033 return RX_DROP_MONITOR;
1034
1035 rx->key->tx_rx_count++;
1036 /* TODO: add threshold stuff again */
1037 } else {
1038 return RX_DROP_MONITOR;
1039 }
1040
1041 switch (rx->key->conf.cipher) {
1042 case WLAN_CIPHER_SUITE_WEP40:
1043 case WLAN_CIPHER_SUITE_WEP104:
1044 result = ieee80211_crypto_wep_decrypt(rx);
1045 break;
1046 case WLAN_CIPHER_SUITE_TKIP:
1047 result = ieee80211_crypto_tkip_decrypt(rx);
1048 break;
1049 case WLAN_CIPHER_SUITE_CCMP:
1050 result = ieee80211_crypto_ccmp_decrypt(rx);
1051 break;
1052 case WLAN_CIPHER_SUITE_AES_CMAC:
1053 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1054 break;
1055 default:
1056 /*
1057 * We can reach here only with HW-only algorithms
1058 * but why didn't it decrypt the frame?!
1059 */
1060 return RX_DROP_UNUSABLE;
1061 }
1062
1063 /* the hdr variable is invalid after the decrypt handlers */
1064
1065 /* either the frame has been decrypted or will be dropped */
1066 status->flag |= RX_FLAG_DECRYPTED;
1067
1068 return result;
1069 }
1070
1071 static ieee80211_rx_result debug_noinline
1072 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1073 {
1074 struct ieee80211_local *local;
1075 struct ieee80211_hdr *hdr;
1076 struct sk_buff *skb;
1077
1078 local = rx->local;
1079 skb = rx->skb;
1080 hdr = (struct ieee80211_hdr *) skb->data;
1081
1082 if (!local->pspolling)
1083 return RX_CONTINUE;
1084
1085 if (!ieee80211_has_fromds(hdr->frame_control))
1086 /* this is not from AP */
1087 return RX_CONTINUE;
1088
1089 if (!ieee80211_is_data(hdr->frame_control))
1090 return RX_CONTINUE;
1091
1092 if (!ieee80211_has_moredata(hdr->frame_control)) {
1093 /* AP has no more frames buffered for us */
1094 local->pspolling = false;
1095 return RX_CONTINUE;
1096 }
1097
1098 /* more data bit is set, let's request a new frame from the AP */
1099 ieee80211_send_pspoll(local, rx->sdata);
1100
1101 return RX_CONTINUE;
1102 }
1103
1104 static void ap_sta_ps_start(struct sta_info *sta)
1105 {
1106 struct ieee80211_sub_if_data *sdata = sta->sdata;
1107 struct ieee80211_local *local = sdata->local;
1108
1109 atomic_inc(&sdata->bss->num_sta_ps);
1110 set_sta_flag(sta, WLAN_STA_PS_STA);
1111 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1112 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1113 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1114 sta->sta.addr, sta->sta.aid);
1115 }
1116
1117 static void ap_sta_ps_end(struct sta_info *sta)
1118 {
1119 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1120 sta->sta.addr, sta->sta.aid);
1121
1122 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1123 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1124 sta->sta.addr, sta->sta.aid);
1125 return;
1126 }
1127
1128 ieee80211_sta_ps_deliver_wakeup(sta);
1129 }
1130
1131 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1132 {
1133 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
1134 bool in_ps;
1135
1136 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1137
1138 /* Don't let the same PS state be set twice */
1139 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1140 if ((start && in_ps) || (!start && !in_ps))
1141 return -EINVAL;
1142
1143 if (start)
1144 ap_sta_ps_start(sta_inf);
1145 else
1146 ap_sta_ps_end(sta_inf);
1147
1148 return 0;
1149 }
1150 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1151
1152 static ieee80211_rx_result debug_noinline
1153 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1154 {
1155 struct ieee80211_sub_if_data *sdata = rx->sdata;
1156 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1157 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1158 int tid, ac;
1159
1160 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1161 return RX_CONTINUE;
1162
1163 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1164 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1165 return RX_CONTINUE;
1166
1167 /*
1168 * The device handles station powersave, so don't do anything about
1169 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1170 * it to mac80211 since they're handled.)
1171 */
1172 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1173 return RX_CONTINUE;
1174
1175 /*
1176 * Don't do anything if the station isn't already asleep. In
1177 * the uAPSD case, the station will probably be marked asleep,
1178 * in the PS-Poll case the station must be confused ...
1179 */
1180 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1181 return RX_CONTINUE;
1182
1183 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1184 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1185 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1186 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1187 else
1188 set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1189 }
1190
1191 /* Free PS Poll skb here instead of returning RX_DROP that would
1192 * count as an dropped frame. */
1193 dev_kfree_skb(rx->skb);
1194
1195 return RX_QUEUED;
1196 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1197 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1198 ieee80211_has_pm(hdr->frame_control) &&
1199 (ieee80211_is_data_qos(hdr->frame_control) ||
1200 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1201 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1202 ac = ieee802_1d_to_ac[tid & 7];
1203
1204 /*
1205 * If this AC is not trigger-enabled do nothing.
1206 *
1207 * NB: This could/should check a separate bitmap of trigger-
1208 * enabled queues, but for now we only implement uAPSD w/o
1209 * TSPEC changes to the ACs, so they're always the same.
1210 */
1211 if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1212 return RX_CONTINUE;
1213
1214 /* if we are in a service period, do nothing */
1215 if (test_sta_flag(rx->sta, WLAN_STA_SP))
1216 return RX_CONTINUE;
1217
1218 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1219 ieee80211_sta_ps_deliver_uapsd(rx->sta);
1220 else
1221 set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1222 }
1223
1224 return RX_CONTINUE;
1225 }
1226
1227 static ieee80211_rx_result debug_noinline
1228 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1229 {
1230 struct sta_info *sta = rx->sta;
1231 struct sk_buff *skb = rx->skb;
1232 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1233 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1234
1235 if (!sta)
1236 return RX_CONTINUE;
1237
1238 /*
1239 * Update last_rx only for IBSS packets which are for the current
1240 * BSSID to avoid keeping the current IBSS network alive in cases
1241 * where other STAs start using different BSSID.
1242 */
1243 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1244 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1245 NL80211_IFTYPE_ADHOC);
1246 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) {
1247 sta->last_rx = jiffies;
1248 if (ieee80211_is_data(hdr->frame_control)) {
1249 sta->last_rx_rate_idx = status->rate_idx;
1250 sta->last_rx_rate_flag = status->flag;
1251 }
1252 }
1253 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1254 /*
1255 * Mesh beacons will update last_rx when if they are found to
1256 * match the current local configuration when processed.
1257 */
1258 sta->last_rx = jiffies;
1259 if (ieee80211_is_data(hdr->frame_control)) {
1260 sta->last_rx_rate_idx = status->rate_idx;
1261 sta->last_rx_rate_flag = status->flag;
1262 }
1263 }
1264
1265 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1266 return RX_CONTINUE;
1267
1268 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1269 ieee80211_sta_rx_notify(rx->sdata, hdr);
1270
1271 sta->rx_fragments++;
1272 sta->rx_bytes += rx->skb->len;
1273 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1274 sta->last_signal = status->signal;
1275 ewma_add(&sta->avg_signal, -status->signal);
1276 }
1277
1278 /*
1279 * Change STA power saving mode only at the end of a frame
1280 * exchange sequence.
1281 */
1282 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
1283 !ieee80211_has_morefrags(hdr->frame_control) &&
1284 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1285 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1286 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1287 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1288 /*
1289 * Ignore doze->wake transitions that are
1290 * indicated by non-data frames, the standard
1291 * is unclear here, but for example going to
1292 * PS mode and then scanning would cause a
1293 * doze->wake transition for the probe request,
1294 * and that is clearly undesirable.
1295 */
1296 if (ieee80211_is_data(hdr->frame_control) &&
1297 !ieee80211_has_pm(hdr->frame_control))
1298 ap_sta_ps_end(sta);
1299 } else {
1300 if (ieee80211_has_pm(hdr->frame_control))
1301 ap_sta_ps_start(sta);
1302 }
1303 }
1304
1305 /*
1306 * Drop (qos-)data::nullfunc frames silently, since they
1307 * are used only to control station power saving mode.
1308 */
1309 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1310 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1311 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1312
1313 /*
1314 * If we receive a 4-addr nullfunc frame from a STA
1315 * that was not moved to a 4-addr STA vlan yet send
1316 * the event to userspace and for older hostapd drop
1317 * the frame to the monitor interface.
1318 */
1319 if (ieee80211_has_a4(hdr->frame_control) &&
1320 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1321 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1322 !rx->sdata->u.vlan.sta))) {
1323 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1324 cfg80211_rx_unexpected_4addr_frame(
1325 rx->sdata->dev, sta->sta.addr,
1326 GFP_ATOMIC);
1327 return RX_DROP_MONITOR;
1328 }
1329 /*
1330 * Update counter and free packet here to avoid
1331 * counting this as a dropped packed.
1332 */
1333 sta->rx_packets++;
1334 dev_kfree_skb(rx->skb);
1335 return RX_QUEUED;
1336 }
1337
1338 return RX_CONTINUE;
1339 } /* ieee80211_rx_h_sta_process */
1340
1341 static inline struct ieee80211_fragment_entry *
1342 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1343 unsigned int frag, unsigned int seq, int rx_queue,
1344 struct sk_buff **skb)
1345 {
1346 struct ieee80211_fragment_entry *entry;
1347 int idx;
1348
1349 idx = sdata->fragment_next;
1350 entry = &sdata->fragments[sdata->fragment_next++];
1351 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1352 sdata->fragment_next = 0;
1353
1354 if (!skb_queue_empty(&entry->skb_list))
1355 __skb_queue_purge(&entry->skb_list);
1356
1357 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1358 *skb = NULL;
1359 entry->first_frag_time = jiffies;
1360 entry->seq = seq;
1361 entry->rx_queue = rx_queue;
1362 entry->last_frag = frag;
1363 entry->ccmp = 0;
1364 entry->extra_len = 0;
1365
1366 return entry;
1367 }
1368
1369 static inline struct ieee80211_fragment_entry *
1370 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1371 unsigned int frag, unsigned int seq,
1372 int rx_queue, struct ieee80211_hdr *hdr)
1373 {
1374 struct ieee80211_fragment_entry *entry;
1375 int i, idx;
1376
1377 idx = sdata->fragment_next;
1378 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1379 struct ieee80211_hdr *f_hdr;
1380
1381 idx--;
1382 if (idx < 0)
1383 idx = IEEE80211_FRAGMENT_MAX - 1;
1384
1385 entry = &sdata->fragments[idx];
1386 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1387 entry->rx_queue != rx_queue ||
1388 entry->last_frag + 1 != frag)
1389 continue;
1390
1391 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1392
1393 /*
1394 * Check ftype and addresses are equal, else check next fragment
1395 */
1396 if (((hdr->frame_control ^ f_hdr->frame_control) &
1397 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1398 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
1399 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
1400 continue;
1401
1402 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1403 __skb_queue_purge(&entry->skb_list);
1404 continue;
1405 }
1406 return entry;
1407 }
1408
1409 return NULL;
1410 }
1411
1412 static ieee80211_rx_result debug_noinline
1413 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1414 {
1415 struct ieee80211_hdr *hdr;
1416 u16 sc;
1417 __le16 fc;
1418 unsigned int frag, seq;
1419 struct ieee80211_fragment_entry *entry;
1420 struct sk_buff *skb;
1421 struct ieee80211_rx_status *status;
1422
1423 hdr = (struct ieee80211_hdr *)rx->skb->data;
1424 fc = hdr->frame_control;
1425 sc = le16_to_cpu(hdr->seq_ctrl);
1426 frag = sc & IEEE80211_SCTL_FRAG;
1427
1428 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1429 (rx->skb)->len < 24 ||
1430 is_multicast_ether_addr(hdr->addr1))) {
1431 /* not fragmented */
1432 goto out;
1433 }
1434 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1435
1436 if (skb_linearize(rx->skb))
1437 return RX_DROP_UNUSABLE;
1438
1439 /*
1440 * skb_linearize() might change the skb->data and
1441 * previously cached variables (in this case, hdr) need to
1442 * be refreshed with the new data.
1443 */
1444 hdr = (struct ieee80211_hdr *)rx->skb->data;
1445 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1446
1447 if (frag == 0) {
1448 /* This is the first fragment of a new frame. */
1449 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1450 rx->seqno_idx, &(rx->skb));
1451 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1452 ieee80211_has_protected(fc)) {
1453 int queue = rx->security_idx;
1454 /* Store CCMP PN so that we can verify that the next
1455 * fragment has a sequential PN value. */
1456 entry->ccmp = 1;
1457 memcpy(entry->last_pn,
1458 rx->key->u.ccmp.rx_pn[queue],
1459 CCMP_PN_LEN);
1460 }
1461 return RX_QUEUED;
1462 }
1463
1464 /* This is a fragment for a frame that should already be pending in
1465 * fragment cache. Add this fragment to the end of the pending entry.
1466 */
1467 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1468 rx->seqno_idx, hdr);
1469 if (!entry) {
1470 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1471 return RX_DROP_MONITOR;
1472 }
1473
1474 /* Verify that MPDUs within one MSDU have sequential PN values.
1475 * (IEEE 802.11i, 8.3.3.4.5) */
1476 if (entry->ccmp) {
1477 int i;
1478 u8 pn[CCMP_PN_LEN], *rpn;
1479 int queue;
1480 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1481 return RX_DROP_UNUSABLE;
1482 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1483 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1484 pn[i]++;
1485 if (pn[i])
1486 break;
1487 }
1488 queue = rx->security_idx;
1489 rpn = rx->key->u.ccmp.rx_pn[queue];
1490 if (memcmp(pn, rpn, CCMP_PN_LEN))
1491 return RX_DROP_UNUSABLE;
1492 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1493 }
1494
1495 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1496 __skb_queue_tail(&entry->skb_list, rx->skb);
1497 entry->last_frag = frag;
1498 entry->extra_len += rx->skb->len;
1499 if (ieee80211_has_morefrags(fc)) {
1500 rx->skb = NULL;
1501 return RX_QUEUED;
1502 }
1503
1504 rx->skb = __skb_dequeue(&entry->skb_list);
1505 if (skb_tailroom(rx->skb) < entry->extra_len) {
1506 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1507 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1508 GFP_ATOMIC))) {
1509 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1510 __skb_queue_purge(&entry->skb_list);
1511 return RX_DROP_UNUSABLE;
1512 }
1513 }
1514 while ((skb = __skb_dequeue(&entry->skb_list))) {
1515 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1516 dev_kfree_skb(skb);
1517 }
1518
1519 /* Complete frame has been reassembled - process it now */
1520 status = IEEE80211_SKB_RXCB(rx->skb);
1521 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1522
1523 out:
1524 if (rx->sta)
1525 rx->sta->rx_packets++;
1526 if (is_multicast_ether_addr(hdr->addr1))
1527 rx->local->dot11MulticastReceivedFrameCount++;
1528 else
1529 ieee80211_led_rx(rx->local);
1530 return RX_CONTINUE;
1531 }
1532
1533 static int
1534 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1535 {
1536 if (unlikely(!rx->sta ||
1537 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1538 return -EACCES;
1539
1540 return 0;
1541 }
1542
1543 static int
1544 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1545 {
1546 struct sk_buff *skb = rx->skb;
1547 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1548
1549 /*
1550 * Pass through unencrypted frames if the hardware has
1551 * decrypted them already.
1552 */
1553 if (status->flag & RX_FLAG_DECRYPTED)
1554 return 0;
1555
1556 /* Drop unencrypted frames if key is set. */
1557 if (unlikely(!ieee80211_has_protected(fc) &&
1558 !ieee80211_is_nullfunc(fc) &&
1559 ieee80211_is_data(fc) &&
1560 (rx->key || rx->sdata->drop_unencrypted)))
1561 return -EACCES;
1562
1563 return 0;
1564 }
1565
1566 static int
1567 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1568 {
1569 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1570 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1571 __le16 fc = hdr->frame_control;
1572
1573 /*
1574 * Pass through unencrypted frames if the hardware has
1575 * decrypted them already.
1576 */
1577 if (status->flag & RX_FLAG_DECRYPTED)
1578 return 0;
1579
1580 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1581 if (unlikely(!ieee80211_has_protected(fc) &&
1582 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1583 rx->key)) {
1584 if (ieee80211_is_deauth(fc))
1585 cfg80211_send_unprot_deauth(rx->sdata->dev,
1586 rx->skb->data,
1587 rx->skb->len);
1588 else if (ieee80211_is_disassoc(fc))
1589 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1590 rx->skb->data,
1591 rx->skb->len);
1592 return -EACCES;
1593 }
1594 /* BIP does not use Protected field, so need to check MMIE */
1595 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1596 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
1597 if (ieee80211_is_deauth(fc))
1598 cfg80211_send_unprot_deauth(rx->sdata->dev,
1599 rx->skb->data,
1600 rx->skb->len);
1601 else if (ieee80211_is_disassoc(fc))
1602 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1603 rx->skb->data,
1604 rx->skb->len);
1605 return -EACCES;
1606 }
1607 /*
1608 * When using MFP, Action frames are not allowed prior to
1609 * having configured keys.
1610 */
1611 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1612 ieee80211_is_robust_mgmt_frame(
1613 (struct ieee80211_hdr *) rx->skb->data)))
1614 return -EACCES;
1615 }
1616
1617 return 0;
1618 }
1619
1620 static int
1621 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1622 {
1623 struct ieee80211_sub_if_data *sdata = rx->sdata;
1624 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1625 bool check_port_control = false;
1626 struct ethhdr *ehdr;
1627 int ret;
1628
1629 *port_control = false;
1630 if (ieee80211_has_a4(hdr->frame_control) &&
1631 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1632 return -1;
1633
1634 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1635 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
1636
1637 if (!sdata->u.mgd.use_4addr)
1638 return -1;
1639 else
1640 check_port_control = true;
1641 }
1642
1643 if (is_multicast_ether_addr(hdr->addr1) &&
1644 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
1645 return -1;
1646
1647 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1648 if (ret < 0)
1649 return ret;
1650
1651 ehdr = (struct ethhdr *) rx->skb->data;
1652 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1653 *port_control = true;
1654 else if (check_port_control)
1655 return -1;
1656
1657 return 0;
1658 }
1659
1660 /*
1661 * requires that rx->skb is a frame with ethernet header
1662 */
1663 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1664 {
1665 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1666 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1667 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1668
1669 /*
1670 * Allow EAPOL frames to us/the PAE group address regardless
1671 * of whether the frame was encrypted or not.
1672 */
1673 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1674 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
1675 ether_addr_equal(ehdr->h_dest, pae_group_addr)))
1676 return true;
1677
1678 if (ieee80211_802_1x_port_control(rx) ||
1679 ieee80211_drop_unencrypted(rx, fc))
1680 return false;
1681
1682 return true;
1683 }
1684
1685 /*
1686 * requires that rx->skb is a frame with ethernet header
1687 */
1688 static void
1689 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1690 {
1691 struct ieee80211_sub_if_data *sdata = rx->sdata;
1692 struct net_device *dev = sdata->dev;
1693 struct sk_buff *skb, *xmit_skb;
1694 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1695 struct sta_info *dsta;
1696 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1697
1698 skb = rx->skb;
1699 xmit_skb = NULL;
1700
1701 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1702 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1703 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1704 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1705 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1706 if (is_multicast_ether_addr(ehdr->h_dest)) {
1707 /*
1708 * send multicast frames both to higher layers in
1709 * local net stack and back to the wireless medium
1710 */
1711 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1712 if (!xmit_skb)
1713 net_info_ratelimited("%s: failed to clone multicast frame\n",
1714 dev->name);
1715 } else {
1716 dsta = sta_info_get(sdata, skb->data);
1717 if (dsta) {
1718 /*
1719 * The destination station is associated to
1720 * this AP (in this VLAN), so send the frame
1721 * directly to it and do not pass it to local
1722 * net stack.
1723 */
1724 xmit_skb = skb;
1725 skb = NULL;
1726 }
1727 }
1728 }
1729
1730 if (skb) {
1731 int align __maybe_unused;
1732
1733 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1734 /*
1735 * 'align' will only take the values 0 or 2 here
1736 * since all frames are required to be aligned
1737 * to 2-byte boundaries when being passed to
1738 * mac80211. That also explains the __skb_push()
1739 * below.
1740 */
1741 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1742 if (align) {
1743 if (WARN_ON(skb_headroom(skb) < 3)) {
1744 dev_kfree_skb(skb);
1745 skb = NULL;
1746 } else {
1747 u8 *data = skb->data;
1748 size_t len = skb_headlen(skb);
1749 skb->data -= align;
1750 memmove(skb->data, data, len);
1751 skb_set_tail_pointer(skb, len);
1752 }
1753 }
1754 #endif
1755
1756 if (skb) {
1757 /* deliver to local stack */
1758 skb->protocol = eth_type_trans(skb, dev);
1759 memset(skb->cb, 0, sizeof(skb->cb));
1760 netif_receive_skb(skb);
1761 }
1762 }
1763
1764 if (xmit_skb) {
1765 /*
1766 * Send to wireless media and increase priority by 256 to
1767 * keep the received priority instead of reclassifying
1768 * the frame (see cfg80211_classify8021d).
1769 */
1770 xmit_skb->priority += 256;
1771 xmit_skb->protocol = htons(ETH_P_802_3);
1772 skb_reset_network_header(xmit_skb);
1773 skb_reset_mac_header(xmit_skb);
1774 dev_queue_xmit(xmit_skb);
1775 }
1776 }
1777
1778 static ieee80211_rx_result debug_noinline
1779 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1780 {
1781 struct net_device *dev = rx->sdata->dev;
1782 struct sk_buff *skb = rx->skb;
1783 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1784 __le16 fc = hdr->frame_control;
1785 struct sk_buff_head frame_list;
1786 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1787
1788 if (unlikely(!ieee80211_is_data(fc)))
1789 return RX_CONTINUE;
1790
1791 if (unlikely(!ieee80211_is_data_present(fc)))
1792 return RX_DROP_MONITOR;
1793
1794 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1795 return RX_CONTINUE;
1796
1797 if (ieee80211_has_a4(hdr->frame_control) &&
1798 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1799 !rx->sdata->u.vlan.sta)
1800 return RX_DROP_UNUSABLE;
1801
1802 if (is_multicast_ether_addr(hdr->addr1) &&
1803 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1804 rx->sdata->u.vlan.sta) ||
1805 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1806 rx->sdata->u.mgd.use_4addr)))
1807 return RX_DROP_UNUSABLE;
1808
1809 skb->dev = dev;
1810 __skb_queue_head_init(&frame_list);
1811
1812 if (skb_linearize(skb))
1813 return RX_DROP_UNUSABLE;
1814
1815 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1816 rx->sdata->vif.type,
1817 rx->local->hw.extra_tx_headroom, true);
1818
1819 while (!skb_queue_empty(&frame_list)) {
1820 rx->skb = __skb_dequeue(&frame_list);
1821
1822 if (!ieee80211_frame_allowed(rx, fc)) {
1823 dev_kfree_skb(rx->skb);
1824 continue;
1825 }
1826 dev->stats.rx_packets++;
1827 dev->stats.rx_bytes += rx->skb->len;
1828
1829 ieee80211_deliver_skb(rx);
1830 }
1831
1832 return RX_QUEUED;
1833 }
1834
1835 #ifdef CONFIG_MAC80211_MESH
1836 static ieee80211_rx_result
1837 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1838 {
1839 struct ieee80211_hdr *fwd_hdr, *hdr;
1840 struct ieee80211_tx_info *info;
1841 struct ieee80211s_hdr *mesh_hdr;
1842 struct sk_buff *skb = rx->skb, *fwd_skb;
1843 struct ieee80211_local *local = rx->local;
1844 struct ieee80211_sub_if_data *sdata = rx->sdata;
1845 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1846 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1847 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
1848 u16 q, hdrlen;
1849
1850 hdr = (struct ieee80211_hdr *) skb->data;
1851 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1852 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1853
1854 /* frame is in RMC, don't forward */
1855 if (ieee80211_is_data(hdr->frame_control) &&
1856 is_multicast_ether_addr(hdr->addr1) &&
1857 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1858 return RX_DROP_MONITOR;
1859
1860 if (!ieee80211_is_data(hdr->frame_control))
1861 return RX_CONTINUE;
1862
1863 if (!mesh_hdr->ttl)
1864 return RX_DROP_MONITOR;
1865
1866 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1867 struct mesh_path *mppath;
1868 char *proxied_addr;
1869 char *mpp_addr;
1870
1871 if (is_multicast_ether_addr(hdr->addr1)) {
1872 mpp_addr = hdr->addr3;
1873 proxied_addr = mesh_hdr->eaddr1;
1874 } else {
1875 mpp_addr = hdr->addr4;
1876 proxied_addr = mesh_hdr->eaddr2;
1877 }
1878
1879 rcu_read_lock();
1880 mppath = mpp_path_lookup(proxied_addr, sdata);
1881 if (!mppath) {
1882 mpp_path_add(proxied_addr, mpp_addr, sdata);
1883 } else {
1884 spin_lock_bh(&mppath->state_lock);
1885 if (!ether_addr_equal(mppath->mpp, mpp_addr))
1886 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1887 spin_unlock_bh(&mppath->state_lock);
1888 }
1889 rcu_read_unlock();
1890 }
1891
1892 /* Frame has reached destination. Don't forward */
1893 if (!is_multicast_ether_addr(hdr->addr1) &&
1894 ether_addr_equal(sdata->vif.addr, hdr->addr3))
1895 return RX_CONTINUE;
1896
1897 q = ieee80211_select_queue_80211(sdata, skb, hdr);
1898 if (ieee80211_queue_stopped(&local->hw, q)) {
1899 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
1900 return RX_DROP_MONITOR;
1901 }
1902 skb_set_queue_mapping(skb, q);
1903
1904 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1905 goto out;
1906
1907 if (!--mesh_hdr->ttl) {
1908 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
1909 return RX_DROP_MONITOR;
1910 }
1911
1912 if (!ifmsh->mshcfg.dot11MeshForwarding)
1913 goto out;
1914
1915 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1916 if (!fwd_skb) {
1917 net_info_ratelimited("%s: failed to clone mesh frame\n",
1918 sdata->name);
1919 goto out;
1920 }
1921
1922 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1923 info = IEEE80211_SKB_CB(fwd_skb);
1924 memset(info, 0, sizeof(*info));
1925 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1926 info->control.vif = &rx->sdata->vif;
1927 info->control.jiffies = jiffies;
1928 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1929 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
1930 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1931 } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
1932 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
1933 } else {
1934 /* unable to resolve next hop */
1935 mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
1936 0, reason, fwd_hdr->addr2, sdata);
1937 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
1938 kfree_skb(fwd_skb);
1939 return RX_DROP_MONITOR;
1940 }
1941
1942 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
1943 ieee80211_add_pending_skb(local, fwd_skb);
1944 out:
1945 if (is_multicast_ether_addr(hdr->addr1) ||
1946 sdata->dev->flags & IFF_PROMISC)
1947 return RX_CONTINUE;
1948 else
1949 return RX_DROP_MONITOR;
1950 }
1951 #endif
1952
1953 static ieee80211_rx_result debug_noinline
1954 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1955 {
1956 struct ieee80211_sub_if_data *sdata = rx->sdata;
1957 struct ieee80211_local *local = rx->local;
1958 struct net_device *dev = sdata->dev;
1959 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1960 __le16 fc = hdr->frame_control;
1961 bool port_control;
1962 int err;
1963
1964 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1965 return RX_CONTINUE;
1966
1967 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1968 return RX_DROP_MONITOR;
1969
1970 /*
1971 * Send unexpected-4addr-frame event to hostapd. For older versions,
1972 * also drop the frame to cooked monitor interfaces.
1973 */
1974 if (ieee80211_has_a4(hdr->frame_control) &&
1975 sdata->vif.type == NL80211_IFTYPE_AP) {
1976 if (rx->sta &&
1977 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
1978 cfg80211_rx_unexpected_4addr_frame(
1979 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
1980 return RX_DROP_MONITOR;
1981 }
1982
1983 err = __ieee80211_data_to_8023(rx, &port_control);
1984 if (unlikely(err))
1985 return RX_DROP_UNUSABLE;
1986
1987 if (!ieee80211_frame_allowed(rx, fc))
1988 return RX_DROP_MONITOR;
1989
1990 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1991 unlikely(port_control) && sdata->bss) {
1992 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
1993 u.ap);
1994 dev = sdata->dev;
1995 rx->sdata = sdata;
1996 }
1997
1998 rx->skb->dev = dev;
1999
2000 dev->stats.rx_packets++;
2001 dev->stats.rx_bytes += rx->skb->len;
2002
2003 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2004 !is_multicast_ether_addr(
2005 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2006 (!local->scanning &&
2007 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
2008 mod_timer(&local->dynamic_ps_timer, jiffies +
2009 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2010 }
2011
2012 ieee80211_deliver_skb(rx);
2013
2014 return RX_QUEUED;
2015 }
2016
2017 static ieee80211_rx_result debug_noinline
2018 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2019 {
2020 struct sk_buff *skb = rx->skb;
2021 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2022 struct tid_ampdu_rx *tid_agg_rx;
2023 u16 start_seq_num;
2024 u16 tid;
2025
2026 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2027 return RX_CONTINUE;
2028
2029 if (ieee80211_is_back_req(bar->frame_control)) {
2030 struct {
2031 __le16 control, start_seq_num;
2032 } __packed bar_data;
2033
2034 if (!rx->sta)
2035 return RX_DROP_MONITOR;
2036
2037 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2038 &bar_data, sizeof(bar_data)))
2039 return RX_DROP_MONITOR;
2040
2041 tid = le16_to_cpu(bar_data.control) >> 12;
2042
2043 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2044 if (!tid_agg_rx)
2045 return RX_DROP_MONITOR;
2046
2047 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2048
2049 /* reset session timer */
2050 if (tid_agg_rx->timeout)
2051 mod_timer(&tid_agg_rx->session_timer,
2052 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2053
2054 spin_lock(&tid_agg_rx->reorder_lock);
2055 /* release stored frames up to start of BAR */
2056 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2057 start_seq_num);
2058 spin_unlock(&tid_agg_rx->reorder_lock);
2059
2060 kfree_skb(skb);
2061 return RX_QUEUED;
2062 }
2063
2064 /*
2065 * After this point, we only want management frames,
2066 * so we can drop all remaining control frames to
2067 * cooked monitor interfaces.
2068 */
2069 return RX_DROP_MONITOR;
2070 }
2071
2072 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2073 struct ieee80211_mgmt *mgmt,
2074 size_t len)
2075 {
2076 struct ieee80211_local *local = sdata->local;
2077 struct sk_buff *skb;
2078 struct ieee80211_mgmt *resp;
2079
2080 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
2081 /* Not to own unicast address */
2082 return;
2083 }
2084
2085 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
2086 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
2087 /* Not from the current AP or not associated yet. */
2088 return;
2089 }
2090
2091 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2092 /* Too short SA Query request frame */
2093 return;
2094 }
2095
2096 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2097 if (skb == NULL)
2098 return;
2099
2100 skb_reserve(skb, local->hw.extra_tx_headroom);
2101 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2102 memset(resp, 0, 24);
2103 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2104 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2105 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2106 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2107 IEEE80211_STYPE_ACTION);
2108 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2109 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2110 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2111 memcpy(resp->u.action.u.sa_query.trans_id,
2112 mgmt->u.action.u.sa_query.trans_id,
2113 WLAN_SA_QUERY_TR_ID_LEN);
2114
2115 ieee80211_tx_skb(sdata, skb);
2116 }
2117
2118 static ieee80211_rx_result debug_noinline
2119 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2120 {
2121 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2122 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2123
2124 /*
2125 * From here on, look only at management frames.
2126 * Data and control frames are already handled,
2127 * and unknown (reserved) frames are useless.
2128 */
2129 if (rx->skb->len < 24)
2130 return RX_DROP_MONITOR;
2131
2132 if (!ieee80211_is_mgmt(mgmt->frame_control))
2133 return RX_DROP_MONITOR;
2134
2135 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2136 ieee80211_is_beacon(mgmt->frame_control) &&
2137 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2138 int sig = 0;
2139
2140 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2141 sig = status->signal;
2142
2143 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2144 rx->skb->data, rx->skb->len,
2145 status->freq, sig, GFP_ATOMIC);
2146 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2147 }
2148
2149 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2150 return RX_DROP_MONITOR;
2151
2152 if (ieee80211_drop_unencrypted_mgmt(rx))
2153 return RX_DROP_UNUSABLE;
2154
2155 return RX_CONTINUE;
2156 }
2157
2158 static ieee80211_rx_result debug_noinline
2159 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2160 {
2161 struct ieee80211_local *local = rx->local;
2162 struct ieee80211_sub_if_data *sdata = rx->sdata;
2163 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2164 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2165 int len = rx->skb->len;
2166
2167 if (!ieee80211_is_action(mgmt->frame_control))
2168 return RX_CONTINUE;
2169
2170 /* drop too small frames */
2171 if (len < IEEE80211_MIN_ACTION_SIZE)
2172 return RX_DROP_UNUSABLE;
2173
2174 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2175 return RX_DROP_UNUSABLE;
2176
2177 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2178 return RX_DROP_UNUSABLE;
2179
2180 switch (mgmt->u.action.category) {
2181 case WLAN_CATEGORY_HT:
2182 /* reject HT action frames from stations not supporting HT */
2183 if (!rx->sta->sta.ht_cap.ht_supported)
2184 goto invalid;
2185
2186 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2187 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2188 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2189 sdata->vif.type != NL80211_IFTYPE_AP &&
2190 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2191 break;
2192
2193 /* verify action & smps_control are present */
2194 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2195 goto invalid;
2196
2197 switch (mgmt->u.action.u.ht_smps.action) {
2198 case WLAN_HT_ACTION_SMPS: {
2199 struct ieee80211_supported_band *sband;
2200 u8 smps;
2201
2202 /* convert to HT capability */
2203 switch (mgmt->u.action.u.ht_smps.smps_control) {
2204 case WLAN_HT_SMPS_CONTROL_DISABLED:
2205 smps = WLAN_HT_CAP_SM_PS_DISABLED;
2206 break;
2207 case WLAN_HT_SMPS_CONTROL_STATIC:
2208 smps = WLAN_HT_CAP_SM_PS_STATIC;
2209 break;
2210 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
2211 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
2212 break;
2213 default:
2214 goto invalid;
2215 }
2216 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
2217
2218 /* if no change do nothing */
2219 if ((rx->sta->sta.ht_cap.cap &
2220 IEEE80211_HT_CAP_SM_PS) == smps)
2221 goto handled;
2222
2223 rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS;
2224 rx->sta->sta.ht_cap.cap |= smps;
2225
2226 sband = rx->local->hw.wiphy->bands[status->band];
2227
2228 rate_control_rate_update(local, sband, rx->sta,
2229 IEEE80211_RC_SMPS_CHANGED);
2230 goto handled;
2231 }
2232 default:
2233 goto invalid;
2234 }
2235
2236 break;
2237 case WLAN_CATEGORY_BACK:
2238 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2239 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2240 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2241 sdata->vif.type != NL80211_IFTYPE_AP &&
2242 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2243 break;
2244
2245 /* verify action_code is present */
2246 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2247 break;
2248
2249 switch (mgmt->u.action.u.addba_req.action_code) {
2250 case WLAN_ACTION_ADDBA_REQ:
2251 if (len < (IEEE80211_MIN_ACTION_SIZE +
2252 sizeof(mgmt->u.action.u.addba_req)))
2253 goto invalid;
2254 break;
2255 case WLAN_ACTION_ADDBA_RESP:
2256 if (len < (IEEE80211_MIN_ACTION_SIZE +
2257 sizeof(mgmt->u.action.u.addba_resp)))
2258 goto invalid;
2259 break;
2260 case WLAN_ACTION_DELBA:
2261 if (len < (IEEE80211_MIN_ACTION_SIZE +
2262 sizeof(mgmt->u.action.u.delba)))
2263 goto invalid;
2264 break;
2265 default:
2266 goto invalid;
2267 }
2268
2269 goto queue;
2270 case WLAN_CATEGORY_SPECTRUM_MGMT:
2271 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2272 break;
2273
2274 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2275 break;
2276
2277 /* verify action_code is present */
2278 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2279 break;
2280
2281 switch (mgmt->u.action.u.measurement.action_code) {
2282 case WLAN_ACTION_SPCT_MSR_REQ:
2283 if (len < (IEEE80211_MIN_ACTION_SIZE +
2284 sizeof(mgmt->u.action.u.measurement)))
2285 break;
2286 ieee80211_process_measurement_req(sdata, mgmt, len);
2287 goto handled;
2288 case WLAN_ACTION_SPCT_CHL_SWITCH:
2289 if (len < (IEEE80211_MIN_ACTION_SIZE +
2290 sizeof(mgmt->u.action.u.chan_switch)))
2291 break;
2292
2293 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2294 break;
2295
2296 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
2297 break;
2298
2299 goto queue;
2300 }
2301 break;
2302 case WLAN_CATEGORY_SA_QUERY:
2303 if (len < (IEEE80211_MIN_ACTION_SIZE +
2304 sizeof(mgmt->u.action.u.sa_query)))
2305 break;
2306
2307 switch (mgmt->u.action.u.sa_query.action) {
2308 case WLAN_ACTION_SA_QUERY_REQUEST:
2309 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2310 break;
2311 ieee80211_process_sa_query_req(sdata, mgmt, len);
2312 goto handled;
2313 }
2314 break;
2315 case WLAN_CATEGORY_SELF_PROTECTED:
2316 switch (mgmt->u.action.u.self_prot.action_code) {
2317 case WLAN_SP_MESH_PEERING_OPEN:
2318 case WLAN_SP_MESH_PEERING_CLOSE:
2319 case WLAN_SP_MESH_PEERING_CONFIRM:
2320 if (!ieee80211_vif_is_mesh(&sdata->vif))
2321 goto invalid;
2322 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2323 /* userspace handles this frame */
2324 break;
2325 goto queue;
2326 case WLAN_SP_MGK_INFORM:
2327 case WLAN_SP_MGK_ACK:
2328 if (!ieee80211_vif_is_mesh(&sdata->vif))
2329 goto invalid;
2330 break;
2331 }
2332 break;
2333 case WLAN_CATEGORY_MESH_ACTION:
2334 if (!ieee80211_vif_is_mesh(&sdata->vif))
2335 break;
2336 if (mesh_action_is_path_sel(mgmt) &&
2337 (!mesh_path_sel_is_hwmp(sdata)))
2338 break;
2339 goto queue;
2340 }
2341
2342 return RX_CONTINUE;
2343
2344 invalid:
2345 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2346 /* will return in the next handlers */
2347 return RX_CONTINUE;
2348
2349 handled:
2350 if (rx->sta)
2351 rx->sta->rx_packets++;
2352 dev_kfree_skb(rx->skb);
2353 return RX_QUEUED;
2354
2355 queue:
2356 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2357 skb_queue_tail(&sdata->skb_queue, rx->skb);
2358 ieee80211_queue_work(&local->hw, &sdata->work);
2359 if (rx->sta)
2360 rx->sta->rx_packets++;
2361 return RX_QUEUED;
2362 }
2363
2364 static ieee80211_rx_result debug_noinline
2365 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2366 {
2367 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2368 int sig = 0;
2369
2370 /* skip known-bad action frames and return them in the next handler */
2371 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2372 return RX_CONTINUE;
2373
2374 /*
2375 * Getting here means the kernel doesn't know how to handle
2376 * it, but maybe userspace does ... include returned frames
2377 * so userspace can register for those to know whether ones
2378 * it transmitted were processed or returned.
2379 */
2380
2381 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2382 sig = status->signal;
2383
2384 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
2385 rx->skb->data, rx->skb->len,
2386 GFP_ATOMIC)) {
2387 if (rx->sta)
2388 rx->sta->rx_packets++;
2389 dev_kfree_skb(rx->skb);
2390 return RX_QUEUED;
2391 }
2392
2393
2394 return RX_CONTINUE;
2395 }
2396
2397 static ieee80211_rx_result debug_noinline
2398 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2399 {
2400 struct ieee80211_local *local = rx->local;
2401 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2402 struct sk_buff *nskb;
2403 struct ieee80211_sub_if_data *sdata = rx->sdata;
2404 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2405
2406 if (!ieee80211_is_action(mgmt->frame_control))
2407 return RX_CONTINUE;
2408
2409 /*
2410 * For AP mode, hostapd is responsible for handling any action
2411 * frames that we didn't handle, including returning unknown
2412 * ones. For all other modes we will return them to the sender,
2413 * setting the 0x80 bit in the action category, as required by
2414 * 802.11-2012 9.24.4.
2415 * Newer versions of hostapd shall also use the management frame
2416 * registration mechanisms, but older ones still use cooked
2417 * monitor interfaces so push all frames there.
2418 */
2419 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2420 (sdata->vif.type == NL80211_IFTYPE_AP ||
2421 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2422 return RX_DROP_MONITOR;
2423
2424 if (is_multicast_ether_addr(mgmt->da))
2425 return RX_DROP_MONITOR;
2426
2427 /* do not return rejected action frames */
2428 if (mgmt->u.action.category & 0x80)
2429 return RX_DROP_UNUSABLE;
2430
2431 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2432 GFP_ATOMIC);
2433 if (nskb) {
2434 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2435
2436 nmgmt->u.action.category |= 0x80;
2437 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2438 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2439
2440 memset(nskb->cb, 0, sizeof(nskb->cb));
2441
2442 ieee80211_tx_skb(rx->sdata, nskb);
2443 }
2444 dev_kfree_skb(rx->skb);
2445 return RX_QUEUED;
2446 }
2447
2448 static ieee80211_rx_result debug_noinline
2449 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2450 {
2451 struct ieee80211_sub_if_data *sdata = rx->sdata;
2452 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2453 __le16 stype;
2454
2455 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2456
2457 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2458 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2459 sdata->vif.type != NL80211_IFTYPE_STATION)
2460 return RX_DROP_MONITOR;
2461
2462 switch (stype) {
2463 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2464 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2465 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2466 /* process for all: mesh, mlme, ibss */
2467 break;
2468 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
2469 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
2470 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2471 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2472 if (is_multicast_ether_addr(mgmt->da) &&
2473 !is_broadcast_ether_addr(mgmt->da))
2474 return RX_DROP_MONITOR;
2475
2476 /* process only for station */
2477 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2478 return RX_DROP_MONITOR;
2479 break;
2480 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2481 /* process only for ibss */
2482 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2483 return RX_DROP_MONITOR;
2484 break;
2485 default:
2486 return RX_DROP_MONITOR;
2487 }
2488
2489 /* queue up frame and kick off work to process it */
2490 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2491 skb_queue_tail(&sdata->skb_queue, rx->skb);
2492 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2493 if (rx->sta)
2494 rx->sta->rx_packets++;
2495
2496 return RX_QUEUED;
2497 }
2498
2499 /* TODO: use IEEE80211_RX_FRAGMENTED */
2500 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2501 struct ieee80211_rate *rate)
2502 {
2503 struct ieee80211_sub_if_data *sdata;
2504 struct ieee80211_local *local = rx->local;
2505 struct sk_buff *skb = rx->skb, *skb2;
2506 struct net_device *prev_dev = NULL;
2507 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2508 int needed_headroom;
2509
2510 /*
2511 * If cooked monitor has been processed already, then
2512 * don't do it again. If not, set the flag.
2513 */
2514 if (rx->flags & IEEE80211_RX_CMNTR)
2515 goto out_free_skb;
2516 rx->flags |= IEEE80211_RX_CMNTR;
2517
2518 /* If there are no cooked monitor interfaces, just free the SKB */
2519 if (!local->cooked_mntrs)
2520 goto out_free_skb;
2521
2522 /* room for the radiotap header based on driver features */
2523 needed_headroom = ieee80211_rx_radiotap_len(local, status);
2524
2525 if (skb_headroom(skb) < needed_headroom &&
2526 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
2527 goto out_free_skb;
2528
2529 /* prepend radiotap information */
2530 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
2531 false);
2532
2533 skb_set_mac_header(skb, 0);
2534 skb->ip_summed = CHECKSUM_UNNECESSARY;
2535 skb->pkt_type = PACKET_OTHERHOST;
2536 skb->protocol = htons(ETH_P_802_2);
2537
2538 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2539 if (!ieee80211_sdata_running(sdata))
2540 continue;
2541
2542 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2543 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2544 continue;
2545
2546 if (prev_dev) {
2547 skb2 = skb_clone(skb, GFP_ATOMIC);
2548 if (skb2) {
2549 skb2->dev = prev_dev;
2550 netif_receive_skb(skb2);
2551 }
2552 }
2553
2554 prev_dev = sdata->dev;
2555 sdata->dev->stats.rx_packets++;
2556 sdata->dev->stats.rx_bytes += skb->len;
2557 }
2558
2559 if (prev_dev) {
2560 skb->dev = prev_dev;
2561 netif_receive_skb(skb);
2562 return;
2563 }
2564
2565 out_free_skb:
2566 dev_kfree_skb(skb);
2567 }
2568
2569 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2570 ieee80211_rx_result res)
2571 {
2572 switch (res) {
2573 case RX_DROP_MONITOR:
2574 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2575 if (rx->sta)
2576 rx->sta->rx_dropped++;
2577 /* fall through */
2578 case RX_CONTINUE: {
2579 struct ieee80211_rate *rate = NULL;
2580 struct ieee80211_supported_band *sband;
2581 struct ieee80211_rx_status *status;
2582
2583 status = IEEE80211_SKB_RXCB((rx->skb));
2584
2585 sband = rx->local->hw.wiphy->bands[status->band];
2586 if (!(status->flag & RX_FLAG_HT))
2587 rate = &sband->bitrates[status->rate_idx];
2588
2589 ieee80211_rx_cooked_monitor(rx, rate);
2590 break;
2591 }
2592 case RX_DROP_UNUSABLE:
2593 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2594 if (rx->sta)
2595 rx->sta->rx_dropped++;
2596 dev_kfree_skb(rx->skb);
2597 break;
2598 case RX_QUEUED:
2599 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2600 break;
2601 }
2602 }
2603
2604 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2605 {
2606 ieee80211_rx_result res = RX_DROP_MONITOR;
2607 struct sk_buff *skb;
2608
2609 #define CALL_RXH(rxh) \
2610 do { \
2611 res = rxh(rx); \
2612 if (res != RX_CONTINUE) \
2613 goto rxh_next; \
2614 } while (0);
2615
2616 spin_lock(&rx->local->rx_skb_queue.lock);
2617 if (rx->local->running_rx_handler)
2618 goto unlock;
2619
2620 rx->local->running_rx_handler = true;
2621
2622 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2623 spin_unlock(&rx->local->rx_skb_queue.lock);
2624
2625 /*
2626 * all the other fields are valid across frames
2627 * that belong to an aMPDU since they are on the
2628 * same TID from the same station
2629 */
2630 rx->skb = skb;
2631
2632 CALL_RXH(ieee80211_rx_h_decrypt)
2633 CALL_RXH(ieee80211_rx_h_check_more_data)
2634 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2635 CALL_RXH(ieee80211_rx_h_sta_process)
2636 CALL_RXH(ieee80211_rx_h_defragment)
2637 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2638 /* must be after MMIC verify so header is counted in MPDU mic */
2639 #ifdef CONFIG_MAC80211_MESH
2640 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2641 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2642 #endif
2643 CALL_RXH(ieee80211_rx_h_amsdu)
2644 CALL_RXH(ieee80211_rx_h_data)
2645 CALL_RXH(ieee80211_rx_h_ctrl);
2646 CALL_RXH(ieee80211_rx_h_mgmt_check)
2647 CALL_RXH(ieee80211_rx_h_action)
2648 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2649 CALL_RXH(ieee80211_rx_h_action_return)
2650 CALL_RXH(ieee80211_rx_h_mgmt)
2651
2652 rxh_next:
2653 ieee80211_rx_handlers_result(rx, res);
2654 spin_lock(&rx->local->rx_skb_queue.lock);
2655 #undef CALL_RXH
2656 }
2657
2658 rx->local->running_rx_handler = false;
2659
2660 unlock:
2661 spin_unlock(&rx->local->rx_skb_queue.lock);
2662 }
2663
2664 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2665 {
2666 ieee80211_rx_result res = RX_DROP_MONITOR;
2667
2668 #define CALL_RXH(rxh) \
2669 do { \
2670 res = rxh(rx); \
2671 if (res != RX_CONTINUE) \
2672 goto rxh_next; \
2673 } while (0);
2674
2675 CALL_RXH(ieee80211_rx_h_check)
2676
2677 ieee80211_rx_reorder_ampdu(rx);
2678
2679 ieee80211_rx_handlers(rx);
2680 return;
2681
2682 rxh_next:
2683 ieee80211_rx_handlers_result(rx, res);
2684
2685 #undef CALL_RXH
2686 }
2687
2688 /*
2689 * This function makes calls into the RX path, therefore
2690 * it has to be invoked under RCU read lock.
2691 */
2692 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2693 {
2694 struct ieee80211_rx_data rx = {
2695 .sta = sta,
2696 .sdata = sta->sdata,
2697 .local = sta->local,
2698 /* This is OK -- must be QoS data frame */
2699 .security_idx = tid,
2700 .seqno_idx = tid,
2701 .flags = 0,
2702 };
2703 struct tid_ampdu_rx *tid_agg_rx;
2704
2705 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2706 if (!tid_agg_rx)
2707 return;
2708
2709 spin_lock(&tid_agg_rx->reorder_lock);
2710 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx);
2711 spin_unlock(&tid_agg_rx->reorder_lock);
2712
2713 ieee80211_rx_handlers(&rx);
2714 }
2715
2716 /* main receive path */
2717
2718 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2719 struct ieee80211_hdr *hdr)
2720 {
2721 struct ieee80211_sub_if_data *sdata = rx->sdata;
2722 struct sk_buff *skb = rx->skb;
2723 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2724 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2725 int multicast = is_multicast_ether_addr(hdr->addr1);
2726
2727 switch (sdata->vif.type) {
2728 case NL80211_IFTYPE_STATION:
2729 if (!bssid && !sdata->u.mgd.use_4addr)
2730 return 0;
2731 if (!multicast &&
2732 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2733 if (!(sdata->dev->flags & IFF_PROMISC) ||
2734 sdata->u.mgd.use_4addr)
2735 return 0;
2736 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2737 }
2738 break;
2739 case NL80211_IFTYPE_ADHOC:
2740 if (!bssid)
2741 return 0;
2742 if (ieee80211_is_beacon(hdr->frame_control)) {
2743 return 1;
2744 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2745 return 0;
2746 } else if (!multicast &&
2747 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2748 if (!(sdata->dev->flags & IFF_PROMISC))
2749 return 0;
2750 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2751 } else if (!rx->sta) {
2752 int rate_idx;
2753 if (status->flag & RX_FLAG_HT)
2754 rate_idx = 0; /* TODO: HT rates */
2755 else
2756 rate_idx = status->rate_idx;
2757 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
2758 BIT(rate_idx));
2759 }
2760 break;
2761 case NL80211_IFTYPE_MESH_POINT:
2762 if (!multicast &&
2763 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2764 if (!(sdata->dev->flags & IFF_PROMISC))
2765 return 0;
2766
2767 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2768 }
2769 break;
2770 case NL80211_IFTYPE_AP_VLAN:
2771 case NL80211_IFTYPE_AP:
2772 if (!bssid) {
2773 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
2774 return 0;
2775 } else if (!ieee80211_bssid_match(bssid,
2776 sdata->vif.addr)) {
2777 /*
2778 * Accept public action frames even when the
2779 * BSSID doesn't match, this is used for P2P
2780 * and location updates. Note that mac80211
2781 * itself never looks at these frames.
2782 */
2783 if (ieee80211_is_public_action(hdr, skb->len))
2784 return 1;
2785 if (!ieee80211_is_beacon(hdr->frame_control))
2786 return 0;
2787 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2788 }
2789 break;
2790 case NL80211_IFTYPE_WDS:
2791 if (bssid || !ieee80211_is_data(hdr->frame_control))
2792 return 0;
2793 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
2794 return 0;
2795 break;
2796 default:
2797 /* should never get here */
2798 WARN_ON(1);
2799 break;
2800 }
2801
2802 return 1;
2803 }
2804
2805 /*
2806 * This function returns whether or not the SKB
2807 * was destined for RX processing or not, which,
2808 * if consume is true, is equivalent to whether
2809 * or not the skb was consumed.
2810 */
2811 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2812 struct sk_buff *skb, bool consume)
2813 {
2814 struct ieee80211_local *local = rx->local;
2815 struct ieee80211_sub_if_data *sdata = rx->sdata;
2816 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2817 struct ieee80211_hdr *hdr = (void *)skb->data;
2818 int prepares;
2819
2820 rx->skb = skb;
2821 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2822 prepares = prepare_for_handlers(rx, hdr);
2823
2824 if (!prepares)
2825 return false;
2826
2827 if (!consume) {
2828 skb = skb_copy(skb, GFP_ATOMIC);
2829 if (!skb) {
2830 if (net_ratelimit())
2831 wiphy_debug(local->hw.wiphy,
2832 "failed to copy skb for %s\n",
2833 sdata->name);
2834 return true;
2835 }
2836
2837 rx->skb = skb;
2838 }
2839
2840 ieee80211_invoke_rx_handlers(rx);
2841 return true;
2842 }
2843
2844 /*
2845 * This is the actual Rx frames handler. as it blongs to Rx path it must
2846 * be called with rcu_read_lock protection.
2847 */
2848 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2849 struct sk_buff *skb)
2850 {
2851 struct ieee80211_local *local = hw_to_local(hw);
2852 struct ieee80211_sub_if_data *sdata;
2853 struct ieee80211_hdr *hdr;
2854 __le16 fc;
2855 struct ieee80211_rx_data rx;
2856 struct ieee80211_sub_if_data *prev;
2857 struct sta_info *sta, *tmp, *prev_sta;
2858 int err = 0;
2859
2860 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2861 memset(&rx, 0, sizeof(rx));
2862 rx.skb = skb;
2863 rx.local = local;
2864
2865 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2866 local->dot11ReceivedFragmentCount++;
2867
2868 if (ieee80211_is_mgmt(fc))
2869 err = skb_linearize(skb);
2870 else
2871 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2872
2873 if (err) {
2874 dev_kfree_skb(skb);
2875 return;
2876 }
2877
2878 hdr = (struct ieee80211_hdr *)skb->data;
2879 ieee80211_parse_qos(&rx);
2880 ieee80211_verify_alignment(&rx);
2881
2882 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
2883 ieee80211_is_beacon(hdr->frame_control)))
2884 ieee80211_scan_rx(local, skb);
2885
2886 if (ieee80211_is_data(fc)) {
2887 prev_sta = NULL;
2888
2889 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2890 if (!prev_sta) {
2891 prev_sta = sta;
2892 continue;
2893 }
2894
2895 rx.sta = prev_sta;
2896 rx.sdata = prev_sta->sdata;
2897 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2898
2899 prev_sta = sta;
2900 }
2901
2902 if (prev_sta) {
2903 rx.sta = prev_sta;
2904 rx.sdata = prev_sta->sdata;
2905
2906 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2907 return;
2908 goto out;
2909 }
2910 }
2911
2912 prev = NULL;
2913
2914 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2915 if (!ieee80211_sdata_running(sdata))
2916 continue;
2917
2918 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2919 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2920 continue;
2921
2922 /*
2923 * frame is destined for this interface, but if it's
2924 * not also for the previous one we handle that after
2925 * the loop to avoid copying the SKB once too much
2926 */
2927
2928 if (!prev) {
2929 prev = sdata;
2930 continue;
2931 }
2932
2933 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2934 rx.sdata = prev;
2935 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2936
2937 prev = sdata;
2938 }
2939
2940 if (prev) {
2941 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2942 rx.sdata = prev;
2943
2944 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2945 return;
2946 }
2947
2948 out:
2949 dev_kfree_skb(skb);
2950 }
2951
2952 /*
2953 * This is the receive path handler. It is called by a low level driver when an
2954 * 802.11 MPDU is received from the hardware.
2955 */
2956 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2957 {
2958 struct ieee80211_local *local = hw_to_local(hw);
2959 struct ieee80211_rate *rate = NULL;
2960 struct ieee80211_supported_band *sband;
2961 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2962
2963 WARN_ON_ONCE(softirq_count() == 0);
2964
2965 if (WARN_ON(status->band < 0 ||
2966 status->band >= IEEE80211_NUM_BANDS))
2967 goto drop;
2968
2969 sband = local->hw.wiphy->bands[status->band];
2970 if (WARN_ON(!sband))
2971 goto drop;
2972
2973 /*
2974 * If we're suspending, it is possible although not too likely
2975 * that we'd be receiving frames after having already partially
2976 * quiesced the stack. We can't process such frames then since
2977 * that might, for example, cause stations to be added or other
2978 * driver callbacks be invoked.
2979 */
2980 if (unlikely(local->quiescing || local->suspended))
2981 goto drop;
2982
2983 /* We might be during a HW reconfig, prevent Rx for the same reason */
2984 if (unlikely(local->in_reconfig))
2985 goto drop;
2986
2987 /*
2988 * The same happens when we're not even started,
2989 * but that's worth a warning.
2990 */
2991 if (WARN_ON(!local->started))
2992 goto drop;
2993
2994 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
2995 /*
2996 * Validate the rate, unless a PLCP error means that
2997 * we probably can't have a valid rate here anyway.
2998 */
2999
3000 if (status->flag & RX_FLAG_HT) {
3001 /*
3002 * rate_idx is MCS index, which can be [0-76]
3003 * as documented on:
3004 *
3005 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
3006 *
3007 * Anything else would be some sort of driver or
3008 * hardware error. The driver should catch hardware
3009 * errors.
3010 */
3011 if (WARN((status->rate_idx < 0 ||
3012 status->rate_idx > 76),
3013 "Rate marked as an HT rate but passed "
3014 "status->rate_idx is not "
3015 "an MCS index [0-76]: %d (0x%02x)\n",
3016 status->rate_idx,
3017 status->rate_idx))
3018 goto drop;
3019 } else {
3020 if (WARN_ON(status->rate_idx < 0 ||
3021 status->rate_idx >= sband->n_bitrates))
3022 goto drop;
3023 rate = &sband->bitrates[status->rate_idx];
3024 }
3025 }
3026
3027 status->rx_flags = 0;
3028
3029 /*
3030 * key references and virtual interfaces are protected using RCU
3031 * and this requires that we are in a read-side RCU section during
3032 * receive processing
3033 */
3034 rcu_read_lock();
3035
3036 /*
3037 * Frames with failed FCS/PLCP checksum are not returned,
3038 * all other frames are returned without radiotap header
3039 * if it was previously present.
3040 * Also, frames with less than 16 bytes are dropped.
3041 */
3042 skb = ieee80211_rx_monitor(local, skb, rate);
3043 if (!skb) {
3044 rcu_read_unlock();
3045 return;
3046 }
3047
3048 ieee80211_tpt_led_trig_rx(local,
3049 ((struct ieee80211_hdr *)skb->data)->frame_control,
3050 skb->len);
3051 __ieee80211_rx_handle_packet(hw, skb);
3052
3053 rcu_read_unlock();
3054
3055 return;
3056 drop:
3057 kfree_skb(skb);
3058 }
3059 EXPORT_SYMBOL(ieee80211_rx);
3060
3061 /* This is a version of the rx handler that can be called from hard irq
3062 * context. Post the skb on the queue and schedule the tasklet */
3063 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
3064 {
3065 struct ieee80211_local *local = hw_to_local(hw);
3066
3067 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
3068
3069 skb->pkt_type = IEEE80211_RX_MSG;
3070 skb_queue_tail(&local->skb_queue, skb);
3071 tasklet_schedule(&local->tasklet);
3072 }
3073 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
This page took 0.09153 seconds and 5 git commands to generate.