Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[deliverable/linux.git] / drivers / staging / rtl8192e / rtllib_tx.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25 ******************************************************************************
26
27 Few modifications for Realtek's Wi-Fi drivers by
28 Andrea Merello <andrea.merello@gmail.com>
29
30 A special thanks goes to Realtek for their support !
31
32 ******************************************************************************/
33
34 #include <linux/compiler.h>
35 #include <linux/errno.h>
36 #include <linux/if_arp.h>
37 #include <linux/in6.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/netdevice.h>
43 #include <linux/pci.h>
44 #include <linux/proc_fs.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/tcp.h>
48 #include <linux/types.h>
49 #include <linux/wireless.h>
50 #include <linux/etherdevice.h>
51 #include <linux/uaccess.h>
52 #include <linux/if_vlan.h>
53
54 #include "rtllib.h"
55
56 /*
57
58
59 802.11 Data Frame
60
61
62 802.11 frame_control for data frames - 2 bytes
63 ,-----------------------------------------------------------------------------------------.
64 bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
65 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
66 val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
67 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
68 desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
69 | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
70 '-----------------------------------------------------------------------------------------'
71 /\
72 |
73 802.11 Data Frame |
74 ,--------- 'ctrl' expands to >-----------'
75 |
76 ,--'---,-------------------------------------------------------------.
77 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
78 |------|------|---------|---------|---------|------|---------|------|
79 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
80 | | tion | (BSSID) | | | ence | data | |
81 `--------------------------------------------------| |------'
82 Total: 28 non-data bytes `----.----'
83 |
84 .- 'Frame data' expands to <---------------------------'
85 |
86 V
87 ,---------------------------------------------------.
88 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
89 |------|------|---------|----------|------|---------|
90 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
91 | DSAP | SSAP | | | | Packet |
92 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
93 `-----------------------------------------| |
94 Total: 8 non-data bytes `----.----'
95 |
96 .- 'IP Packet' expands, if WEP enabled, to <--'
97 |
98 V
99 ,-----------------------.
100 Bytes | 4 | 0-2296 | 4 |
101 |-----|-----------|-----|
102 Desc. | IV | Encrypted | ICV |
103 | | IP Packet | |
104 `-----------------------'
105 Total: 8 non-data bytes
106
107
108 802.3 Ethernet Data Frame
109
110 ,-----------------------------------------.
111 Bytes | 6 | 6 | 2 | Variable | 4 |
112 |-------|-------|------|-----------|------|
113 Desc. | Dest. | Source| Type | IP Packet | fcs |
114 | MAC | MAC | | | |
115 `-----------------------------------------'
116 Total: 18 non-data bytes
117
118 In the event that fragmentation is required, the incoming payload is split into
119 N parts of size ieee->fts. The first fragment contains the SNAP header and the
120 remaining packets are just data.
121
122 If encryption is enabled, each fragment payload size is reduced by enough space
123 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
124 So if you have 1500 bytes of payload with ieee->fts set to 500 without
125 encryption it will take 3 frames. With WEP it will take 4 frames as the
126 payload of each frame is reduced to 492 bytes.
127
128 * SKB visualization
129 *
130 * ,- skb->data
131 * |
132 * | ETHERNET HEADER ,-<-- PAYLOAD
133 * | | 14 bytes from skb->data
134 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
135 * | | | |
136 * |,-Dest.--. ,--Src.---. | | |
137 * | 6 bytes| | 6 bytes | | | |
138 * v | | | | | |
139 * 0 | v 1 | v | v 2
140 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
141 * ^ | ^ | ^ |
142 * | | | | | |
143 * | | | | `T' <---- 2 bytes for Type
144 * | | | |
145 * | | '---SNAP--' <-------- 6 bytes for SNAP
146 * | |
147 * `-IV--' <-------------------- 4 bytes for IV (WEP)
148 *
149 * SNAP HEADER
150 *
151 */
152
153 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
154 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
155
156 inline int rtllib_put_snap(u8 *data, u16 h_proto)
157 {
158 struct rtllib_snap_hdr *snap;
159 u8 *oui;
160
161 snap = (struct rtllib_snap_hdr *)data;
162 snap->dsap = 0xaa;
163 snap->ssap = 0xaa;
164 snap->ctrl = 0x03;
165
166 if (h_proto == 0x8137 || h_proto == 0x80f3)
167 oui = P802_1H_OUI;
168 else
169 oui = RFC1042_OUI;
170 snap->oui[0] = oui[0];
171 snap->oui[1] = oui[1];
172 snap->oui[2] = oui[2];
173
174 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
175
176 return SNAP_SIZE + sizeof(u16);
177 }
178
179 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
180 int hdr_len)
181 {
182 struct lib80211_crypt_data *crypt = NULL;
183 int res;
184
185 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
186
187 if (!(crypt && crypt->ops)) {
188 printk(KERN_INFO "=========>%s(), crypt is null\n", __func__);
189 return -1;
190 }
191 /* To encrypt, frame format is:
192 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
193
194 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
195 * call both MSDU and MPDU encryption functions from here. */
196 atomic_inc(&crypt->refcnt);
197 res = 0;
198 if (crypt->ops->encrypt_msdu)
199 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
200 if (res == 0 && crypt->ops->encrypt_mpdu)
201 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
202
203 atomic_dec(&crypt->refcnt);
204 if (res < 0) {
205 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
206 ieee->dev->name, frag->len);
207 ieee->ieee_stats.tx_discards++;
208 return -1;
209 }
210
211 return 0;
212 }
213
214
215 void rtllib_txb_free(struct rtllib_txb *txb)
216 {
217 if (unlikely(!txb))
218 return;
219 kfree(txb);
220 }
221
222 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
223 gfp_t gfp_mask)
224 {
225 struct rtllib_txb *txb;
226 int i;
227 txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
228 gfp_mask);
229 if (!txb)
230 return NULL;
231
232 memset(txb, 0, sizeof(struct rtllib_txb));
233 txb->nr_frags = nr_frags;
234 txb->frag_size = cpu_to_le16(txb_size);
235
236 for (i = 0; i < nr_frags; i++) {
237 txb->fragments[i] = dev_alloc_skb(txb_size);
238 if (unlikely(!txb->fragments[i])) {
239 i--;
240 break;
241 }
242 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
243 }
244 if (unlikely(i != nr_frags)) {
245 while (i >= 0)
246 dev_kfree_skb_any(txb->fragments[i--]);
247 kfree(txb);
248 return NULL;
249 }
250 return txb;
251 }
252
253 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
254 {
255 struct ethhdr *eth;
256 struct iphdr *ip;
257
258 eth = (struct ethhdr *)skb->data;
259 if (eth->h_proto != htons(ETH_P_IP))
260 return 0;
261
262 RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, skb->data, skb->len);
263 ip = ip_hdr(skb);
264 switch (ip->tos & 0xfc) {
265 case 0x20:
266 return 2;
267 case 0x40:
268 return 1;
269 case 0x60:
270 return 3;
271 case 0x80:
272 return 4;
273 case 0xa0:
274 return 5;
275 case 0xc0:
276 return 6;
277 case 0xe0:
278 return 7;
279 default:
280 return 0;
281 }
282 }
283
284 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
285 struct sk_buff *skb,
286 struct cb_desc *tcb_desc)
287 {
288 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
289 struct tx_ts_record *pTxTs = NULL;
290 struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
291
292 if (rtllib_act_scanning(ieee, false))
293 return;
294
295 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
296 return;
297 if (!IsQoSDataFrame(skb->data))
298 return;
299 if (is_multicast_ether_addr(hdr->addr1))
300 return;
301
302 if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
303 return;
304
305 if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
306 return;
307
308 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
309 return;
310 if (pHTInfo->bCurrentAMPDUEnable) {
311 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
312 skb->priority, TX_DIR, true)) {
313 printk(KERN_INFO "%s: can't get TS\n", __func__);
314 return;
315 }
316 if (pTxTs->TxAdmittedBARecord.bValid == false) {
317 if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
318 KEY_TYPE_NA)) {
319 ;
320 } else if (tcb_desc->bdhcp == 1) {
321 ;
322 } else if (!pTxTs->bDisable_AddBa) {
323 TsStartAddBaProcess(ieee, pTxTs);
324 }
325 goto FORCED_AGG_SETTING;
326 } else if (pTxTs->bUsingBa == false) {
327 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
328 (pTxTs->TxCurSeq+1)%4096))
329 pTxTs->bUsingBa = true;
330 else
331 goto FORCED_AGG_SETTING;
332 }
333 if (ieee->iw_mode == IW_MODE_INFRA) {
334 tcb_desc->bAMPDUEnable = true;
335 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
336 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
337 }
338 }
339 FORCED_AGG_SETTING:
340 switch (pHTInfo->ForcedAMPDUMode) {
341 case HT_AGG_AUTO:
342 break;
343
344 case HT_AGG_FORCE_ENABLE:
345 tcb_desc->bAMPDUEnable = true;
346 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
347 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
348 break;
349
350 case HT_AGG_FORCE_DISABLE:
351 tcb_desc->bAMPDUEnable = false;
352 tcb_desc->ampdu_density = 0;
353 tcb_desc->ampdu_factor = 0;
354 break;
355 }
356 return;
357 }
358
359 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
360 struct cb_desc *tcb_desc)
361 {
362 tcb_desc->bUseShortPreamble = false;
363 if (tcb_desc->data_rate == 2)
364 return;
365 else if (ieee->current_network.capability &
366 WLAN_CAPABILITY_SHORT_PREAMBLE)
367 tcb_desc->bUseShortPreamble = true;
368 return;
369 }
370
371 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
372 struct cb_desc *tcb_desc)
373 {
374 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
375
376 tcb_desc->bUseShortGI = false;
377
378 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
379 return;
380
381 if (pHTInfo->bForcedShortGI) {
382 tcb_desc->bUseShortGI = true;
383 return;
384 }
385
386 if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
387 tcb_desc->bUseShortGI = true;
388 else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
389 tcb_desc->bUseShortGI = true;
390 }
391
392 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
393 struct cb_desc *tcb_desc)
394 {
395 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
396
397 tcb_desc->bPacketBW = false;
398
399 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
400 return;
401
402 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
403 return;
404
405 if ((tcb_desc->data_rate & 0x80) == 0)
406 return;
407 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
408 !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
409 tcb_desc->bPacketBW = true;
410 return;
411 }
412
413 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
414 struct cb_desc *tcb_desc,
415 struct sk_buff *skb)
416 {
417 tcb_desc->bRTSSTBC = false;
418 tcb_desc->bRTSUseShortGI = false;
419 tcb_desc->bCTSEnable = false;
420 tcb_desc->RTSSC = 0;
421 tcb_desc->bRTSBW = false;
422
423 if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
424 return;
425
426 if (is_broadcast_ether_addr(skb->data+16))
427 return;
428
429 if (ieee->mode < IEEE_N_24G) {
430 if (skb->len > ieee->rts) {
431 tcb_desc->bRTSEnable = true;
432 tcb_desc->rts_rate = MGN_24M;
433 } else if (ieee->current_network.buseprotection) {
434 tcb_desc->bRTSEnable = true;
435 tcb_desc->bCTSEnable = true;
436 tcb_desc->rts_rate = MGN_24M;
437 }
438 return;
439 } else {
440 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
441 while (true) {
442 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
443 tcb_desc->bCTSEnable = true;
444 tcb_desc->rts_rate = MGN_24M;
445 tcb_desc->bRTSEnable = true;
446 break;
447 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
448 HT_IOT_ACT_PURE_N_MODE)) {
449 tcb_desc->bRTSEnable = true;
450 tcb_desc->rts_rate = MGN_24M;
451 break;
452 }
453 if (ieee->current_network.buseprotection) {
454 tcb_desc->bRTSEnable = true;
455 tcb_desc->bCTSEnable = true;
456 tcb_desc->rts_rate = MGN_24M;
457 break;
458 }
459 if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
460 u8 HTOpMode = pHTInfo->CurrentOpMode;
461 if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
462 HTOpMode == 3)) ||
463 (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
464 tcb_desc->rts_rate = MGN_24M;
465 tcb_desc->bRTSEnable = true;
466 break;
467 }
468 }
469 if (skb->len > ieee->rts) {
470 tcb_desc->rts_rate = MGN_24M;
471 tcb_desc->bRTSEnable = true;
472 break;
473 }
474 if (tcb_desc->bAMPDUEnable) {
475 tcb_desc->rts_rate = MGN_24M;
476 tcb_desc->bRTSEnable = false;
477 break;
478 }
479 goto NO_PROTECTION;
480 }
481 }
482 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
483 tcb_desc->bUseShortPreamble = true;
484 if (ieee->iw_mode == IW_MODE_MASTER)
485 goto NO_PROTECTION;
486 return;
487 NO_PROTECTION:
488 tcb_desc->bRTSEnable = false;
489 tcb_desc->bCTSEnable = false;
490 tcb_desc->rts_rate = 0;
491 tcb_desc->RTSSC = 0;
492 tcb_desc->bRTSBW = false;
493 }
494
495
496 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
497 struct cb_desc *tcb_desc)
498 {
499 if (ieee->bTxDisableRateFallBack)
500 tcb_desc->bTxDisableRateFallBack = true;
501
502 if (ieee->bTxUseDriverAssingedRate)
503 tcb_desc->bTxUseDriverAssingedRate = true;
504 if (!tcb_desc->bTxDisableRateFallBack ||
505 !tcb_desc->bTxUseDriverAssingedRate) {
506 if (ieee->iw_mode == IW_MODE_INFRA ||
507 ieee->iw_mode == IW_MODE_ADHOC)
508 tcb_desc->RATRIndex = 0;
509 }
510 }
511
512 u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
513 u8 *dst)
514 {
515 u16 seqnum = 0;
516
517 if (is_multicast_ether_addr(dst))
518 return 0;
519 if (IsQoSDataFrame(skb->data)) {
520 struct tx_ts_record *pTS = NULL;
521 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
522 skb->priority, TX_DIR, true))
523 return 0;
524 seqnum = pTS->TxCurSeq;
525 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
526 return seqnum;
527 }
528 return 0;
529 }
530
531 static int wme_downgrade_ac(struct sk_buff *skb)
532 {
533 switch (skb->priority) {
534 case 6:
535 case 7:
536 skb->priority = 5; /* VO -> VI */
537 return 0;
538 case 4:
539 case 5:
540 skb->priority = 3; /* VI -> BE */
541 return 0;
542 case 0:
543 case 3:
544 skb->priority = 1; /* BE -> BK */
545 return 0;
546 default:
547 return -1;
548 }
549 }
550
551 int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
552 {
553 struct rtllib_device *ieee = (struct rtllib_device *)
554 netdev_priv_rsl(dev);
555 struct rtllib_txb *txb = NULL;
556 struct rtllib_hdr_3addrqos *frag_hdr;
557 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
558 unsigned long flags;
559 struct net_device_stats *stats = &ieee->stats;
560 int ether_type = 0, encrypt;
561 int bytes, fc, qos_ctl = 0, hdr_len;
562 struct sk_buff *skb_frag;
563 struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
564 .duration_id = 0,
565 .seq_ctl = 0,
566 .qos_ctl = 0
567 };
568 u8 dest[ETH_ALEN], src[ETH_ALEN];
569 int qos_actived = ieee->current_network.qos_data.active;
570 struct lib80211_crypt_data *crypt = NULL;
571 struct cb_desc *tcb_desc;
572 u8 bIsMulticast = false;
573 u8 IsAmsdu = false;
574 bool bdhcp = false;
575
576 spin_lock_irqsave(&ieee->lock, flags);
577
578 /* If there is no driver handler to take the TXB, don't bother
579 * creating it... */
580 if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
581 IEEE_SOFTMAC_TX_QUEUE)) ||
582 ((!ieee->softmac_data_hard_start_xmit &&
583 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
584 printk(KERN_WARNING "%s: No xmit handler.\n",
585 ieee->dev->name);
586 goto success;
587 }
588
589
590 if (likely(ieee->raw_tx == 0)) {
591 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
592 printk(KERN_WARNING "%s: skb too small (%d).\n",
593 ieee->dev->name, skb->len);
594 goto success;
595 }
596 /* Save source and destination addresses */
597 memcpy(dest, skb->data, ETH_ALEN);
598 memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);
599
600 memset(skb->cb, 0, sizeof(skb->cb));
601 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
602
603 if (ieee->iw_mode == IW_MODE_MONITOR) {
604 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
605 if (unlikely(!txb)) {
606 printk(KERN_WARNING "%s: Could not allocate "
607 "TXB\n",
608 ieee->dev->name);
609 goto failed;
610 }
611
612 txb->encrypted = 0;
613 txb->payload_size = cpu_to_le16(skb->len);
614 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
615 skb->len);
616
617 goto success;
618 }
619
620 if (skb->len > 282) {
621 if (ETH_P_IP == ether_type) {
622 const struct iphdr *ip = (struct iphdr *)
623 ((u8 *)skb->data+14);
624 if (IPPROTO_UDP == ip->protocol) {
625 struct udphdr *udp;
626
627 udp = (struct udphdr *)((u8 *)ip +
628 (ip->ihl << 2));
629 if (((((u8 *)udp)[1] == 68) &&
630 (((u8 *)udp)[3] == 67)) ||
631 ((((u8 *)udp)[1] == 67) &&
632 (((u8 *)udp)[3] == 68))) {
633 bdhcp = true;
634 ieee->LPSDelayCnt = 200;
635 }
636 }
637 } else if (ETH_P_ARP == ether_type) {
638 printk(KERN_INFO "=================>DHCP "
639 "Protocol start tx ARP pkt!!\n");
640 bdhcp = true;
641 ieee->LPSDelayCnt =
642 ieee->current_network.tim.tim_count;
643 }
644 }
645
646 skb->priority = rtllib_classify(skb, IsAmsdu);
647 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
648 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
649 ieee->host_encrypt && crypt && crypt->ops;
650 if (!encrypt && ieee->ieee802_1x &&
651 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
652 stats->tx_dropped++;
653 goto success;
654 }
655 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
656 struct eapol *eap = (struct eapol *)(skb->data +
657 sizeof(struct ethhdr) - SNAP_SIZE -
658 sizeof(u16));
659 RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
660 eap_get_type(eap->type));
661 }
662
663 /* Advance the SKB to the start of the payload */
664 skb_pull(skb, sizeof(struct ethhdr));
665
666 /* Determine total amount of storage required for TXB packets */
667 bytes = skb->len + SNAP_SIZE + sizeof(u16);
668
669 if (encrypt)
670 fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
671 else
672 fc = RTLLIB_FTYPE_DATA;
673
674 if (qos_actived)
675 fc |= RTLLIB_STYPE_QOS_DATA;
676 else
677 fc |= RTLLIB_STYPE_DATA;
678
679 if (ieee->iw_mode == IW_MODE_INFRA) {
680 fc |= RTLLIB_FCTL_TODS;
681 /* To DS: Addr1 = BSSID, Addr2 = SA,
682 Addr3 = DA */
683 memcpy(&header.addr1, ieee->current_network.bssid,
684 ETH_ALEN);
685 memcpy(&header.addr2, &src, ETH_ALEN);
686 if (IsAmsdu)
687 memcpy(&header.addr3,
688 ieee->current_network.bssid, ETH_ALEN);
689 else
690 memcpy(&header.addr3, &dest, ETH_ALEN);
691 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
692 /* not From/To DS: Addr1 = DA, Addr2 = SA,
693 Addr3 = BSSID */
694 memcpy(&header.addr1, dest, ETH_ALEN);
695 memcpy(&header.addr2, src, ETH_ALEN);
696 memcpy(&header.addr3, ieee->current_network.bssid,
697 ETH_ALEN);
698 }
699
700 bIsMulticast = is_multicast_ether_addr(header.addr1);
701
702 header.frame_ctl = cpu_to_le16(fc);
703
704 /* Determine fragmentation size based on destination (multicast
705 * and broadcast are not fragmented) */
706 if (bIsMulticast) {
707 frag_size = MAX_FRAG_THRESHOLD;
708 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
709 } else {
710 frag_size = ieee->fts;
711 qos_ctl = 0;
712 }
713
714 if (qos_actived) {
715 hdr_len = RTLLIB_3ADDR_LEN + 2;
716
717 /* in case we are a client verify acm is not set for this ac */
718 while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
719 printk(KERN_INFO "skb->priority = %x\n", skb->priority);
720 if (wme_downgrade_ac(skb))
721 break;
722 printk(KERN_INFO "converted skb->priority = %x\n",
723 skb->priority);
724 }
725 qos_ctl |= skb->priority;
726 header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
727 } else {
728 hdr_len = RTLLIB_3ADDR_LEN;
729 }
730 /* Determine amount of payload per fragment. Regardless of if
731 * this stack is providing the full 802.11 header, one will
732 * eventually be affixed to this fragment -- so we must account
733 * for it when determining the amount of payload space. */
734 bytes_per_frag = frag_size - hdr_len;
735 if (ieee->config &
736 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
737 bytes_per_frag -= RTLLIB_FCS_LEN;
738
739 /* Each fragment may need to have room for encrypting
740 * pre/postfix */
741 if (encrypt) {
742 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
743 crypt->ops->extra_mpdu_postfix_len +
744 crypt->ops->extra_msdu_prefix_len +
745 crypt->ops->extra_msdu_postfix_len;
746 }
747 /* Number of fragments is the total bytes_per_frag /
748 * payload_per_fragment */
749 nr_frags = bytes / bytes_per_frag;
750 bytes_last_frag = bytes % bytes_per_frag;
751 if (bytes_last_frag)
752 nr_frags++;
753 else
754 bytes_last_frag = bytes_per_frag;
755
756 /* When we allocate the TXB we allocate enough space for the
757 * reserve and full fragment bytes (bytes_per_frag doesn't
758 * include prefix, postfix, header, FCS, etc.) */
759 txb = rtllib_alloc_txb(nr_frags, frag_size +
760 ieee->tx_headroom, GFP_ATOMIC);
761 if (unlikely(!txb)) {
762 printk(KERN_WARNING "%s: Could not allocate TXB\n",
763 ieee->dev->name);
764 goto failed;
765 }
766 txb->encrypted = encrypt;
767 txb->payload_size = cpu_to_le16(bytes);
768
769 if (qos_actived)
770 txb->queue_index = UP2AC(skb->priority);
771 else
772 txb->queue_index = WME_AC_BE;
773
774 for (i = 0; i < nr_frags; i++) {
775 skb_frag = txb->fragments[i];
776 tcb_desc = (struct cb_desc *)(skb_frag->cb +
777 MAX_DEV_ADDR_SIZE);
778 if (qos_actived) {
779 skb_frag->priority = skb->priority;
780 tcb_desc->queue_index = UP2AC(skb->priority);
781 } else {
782 skb_frag->priority = WME_AC_BE;
783 tcb_desc->queue_index = WME_AC_BE;
784 }
785 skb_reserve(skb_frag, ieee->tx_headroom);
786
787 if (encrypt) {
788 if (ieee->hwsec_active)
789 tcb_desc->bHwSec = 1;
790 else
791 tcb_desc->bHwSec = 0;
792 skb_reserve(skb_frag,
793 crypt->ops->extra_mpdu_prefix_len +
794 crypt->ops->extra_msdu_prefix_len);
795 } else {
796 tcb_desc->bHwSec = 0;
797 }
798 frag_hdr = (struct rtllib_hdr_3addrqos *)
799 skb_put(skb_frag, hdr_len);
800 memcpy(frag_hdr, &header, hdr_len);
801
802 /* If this is not the last fragment, then add the
803 * MOREFRAGS bit to the frame control */
804 if (i != nr_frags - 1) {
805 frag_hdr->frame_ctl = cpu_to_le16(
806 fc | RTLLIB_FCTL_MOREFRAGS);
807 bytes = bytes_per_frag;
808
809 } else {
810 /* The last fragment has the remaining length */
811 bytes = bytes_last_frag;
812 }
813 if ((qos_actived) && (!bIsMulticast)) {
814 frag_hdr->seq_ctl =
815 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
816 header.addr1));
817 frag_hdr->seq_ctl =
818 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
819 } else {
820 frag_hdr->seq_ctl =
821 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
822 }
823 /* Put a SNAP header on the first fragment */
824 if (i == 0) {
825 rtllib_put_snap(
826 skb_put(skb_frag, SNAP_SIZE +
827 sizeof(u16)), ether_type);
828 bytes -= SNAP_SIZE + sizeof(u16);
829 }
830
831 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
832
833 /* Advance the SKB... */
834 skb_pull(skb, bytes);
835
836 /* Encryption routine will move the header forward in
837 * order to insert the IV between the header and the
838 * payload */
839 if (encrypt)
840 rtllib_encrypt_fragment(ieee, skb_frag,
841 hdr_len);
842 if (ieee->config &
843 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
844 skb_put(skb_frag, 4);
845 }
846
847 if ((qos_actived) && (!bIsMulticast)) {
848 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
849 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
850 else
851 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
852 } else {
853 if (ieee->seq_ctrl[0] == 0xFFF)
854 ieee->seq_ctrl[0] = 0;
855 else
856 ieee->seq_ctrl[0]++;
857 }
858 } else {
859 if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
860 printk(KERN_WARNING "%s: skb too small (%d).\n",
861 ieee->dev->name, skb->len);
862 goto success;
863 }
864
865 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
866 if (!txb) {
867 printk(KERN_WARNING "%s: Could not allocate TXB\n",
868 ieee->dev->name);
869 goto failed;
870 }
871
872 txb->encrypted = 0;
873 txb->payload_size = cpu_to_le16(skb->len);
874 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
875 skb->len);
876 }
877
878 success:
879 if (txb) {
880 struct cb_desc *tcb_desc = (struct cb_desc *)
881 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
882 tcb_desc->bTxEnableFwCalcDur = 1;
883 tcb_desc->priority = skb->priority;
884
885 if (ether_type == ETH_P_PAE) {
886 if (ieee->pHTInfo->IOTAction &
887 HT_IOT_ACT_WA_IOT_Broadcom) {
888 tcb_desc->data_rate =
889 MgntQuery_TxRateExcludeCCKRates(ieee);
890 tcb_desc->bTxDisableRateFallBack = false;
891 } else {
892 tcb_desc->data_rate = ieee->basic_rate;
893 tcb_desc->bTxDisableRateFallBack = 1;
894 }
895
896
897 tcb_desc->RATRIndex = 7;
898 tcb_desc->bTxUseDriverAssingedRate = 1;
899 } else {
900 if (is_multicast_ether_addr(header.addr1))
901 tcb_desc->bMulticast = 1;
902 if (is_broadcast_ether_addr(header.addr1))
903 tcb_desc->bBroadcast = 1;
904 rtllib_txrate_selectmode(ieee, tcb_desc);
905 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
906 tcb_desc->data_rate = ieee->basic_rate;
907 else
908 tcb_desc->data_rate = CURRENT_RATE(ieee->mode,
909 ieee->rate, ieee->HTCurrentOperaRate);
910
911 if (bdhcp) {
912 if (ieee->pHTInfo->IOTAction &
913 HT_IOT_ACT_WA_IOT_Broadcom) {
914 tcb_desc->data_rate =
915 MgntQuery_TxRateExcludeCCKRates(ieee);
916 tcb_desc->bTxDisableRateFallBack = false;
917 } else {
918 tcb_desc->data_rate = MGN_1M;
919 tcb_desc->bTxDisableRateFallBack = 1;
920 }
921
922
923 tcb_desc->RATRIndex = 7;
924 tcb_desc->bTxUseDriverAssingedRate = 1;
925 tcb_desc->bdhcp = 1;
926 }
927
928 rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
929 rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
930 tcb_desc);
931 rtllib_query_HTCapShortGI(ieee, tcb_desc);
932 rtllib_query_BandwidthMode(ieee, tcb_desc);
933 rtllib_query_protectionmode(ieee, tcb_desc,
934 txb->fragments[0]);
935 }
936 }
937 spin_unlock_irqrestore(&ieee->lock, flags);
938 dev_kfree_skb_any(skb);
939 if (txb) {
940 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
941 dev->stats.tx_packets++;
942 dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
943 rtllib_softmac_xmit(txb, ieee);
944 } else {
945 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
946 stats->tx_packets++;
947 stats->tx_bytes += le16_to_cpu(txb->payload_size);
948 return 0;
949 }
950 rtllib_txb_free(txb);
951 }
952 }
953
954 return 0;
955
956 failed:
957 spin_unlock_irqrestore(&ieee->lock, flags);
958 netif_stop_queue(dev);
959 stats->tx_errors++;
960 return 1;
961
962 }
963 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
964 {
965 memset(skb->cb, 0, sizeof(skb->cb));
966 return rtllib_xmit_inter(skb, dev);
967 }
968 EXPORT_SYMBOL(rtllib_xmit);
This page took 0.051706 seconds and 5 git commands to generate.