1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 The full GNU General Public License is included in this distribution in the
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************
27 Few modifications for Realtek's Wi-Fi drivers by
28 Andrea Merello <andrea.merello@gmail.com>
30 A special thanks goes to Realtek for their support !
32 ******************************************************************************/
34 #include <linux/compiler.h>
35 #include <linux/errno.h>
36 #include <linux/if_arp.h>
37 #include <linux/in6.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/netdevice.h>
43 #include <linux/pci.h>
44 #include <linux/proc_fs.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/tcp.h>
48 #include <linux/types.h>
49 #include <linux/wireless.h>
50 #include <linux/etherdevice.h>
51 #include <linux/uaccess.h>
52 #include <linux/if_vlan.h>
62 802.11 frame_control for data frames - 2 bytes
63 ,-----------------------------------------------------------------------------------------.
64 bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
65 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
66 val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
67 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
68 desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
69 | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
70 '-----------------------------------------------------------------------------------------'
74 ,--------- 'ctrl' expands to >-----------'
76 ,--'---,-------------------------------------------------------------.
77 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
78 |------|------|---------|---------|---------|------|---------|------|
79 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
80 | | tion | (BSSID) | | | ence | data | |
81 `--------------------------------------------------| |------'
82 Total: 28 non-data bytes `----.----'
84 .- 'Frame data' expands to <---------------------------'
87 ,---------------------------------------------------.
88 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
89 |------|------|---------|----------|------|---------|
90 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
91 | DSAP | SSAP | | | | Packet |
92 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
93 `-----------------------------------------| |
94 Total: 8 non-data bytes `----.----'
96 .- 'IP Packet' expands, if WEP enabled, to <--'
99 ,-----------------------.
100 Bytes | 4 | 0-2296 | 4 |
101 |-----|-----------|-----|
102 Desc. | IV | Encrypted | ICV |
104 `-----------------------'
105 Total: 8 non-data bytes
108 802.3 Ethernet Data Frame
110 ,-----------------------------------------.
111 Bytes | 6 | 6 | 2 | Variable | 4 |
112 |-------|-------|------|-----------|------|
113 Desc. | Dest. | Source| Type | IP Packet | fcs |
115 `-----------------------------------------'
116 Total: 18 non-data bytes
118 In the event that fragmentation is required, the incoming payload is split into
119 N parts of size ieee->fts. The first fragment contains the SNAP header and the
120 remaining packets are just data.
122 If encryption is enabled, each fragment payload size is reduced by enough space
123 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
124 So if you have 1500 bytes of payload with ieee->fts set to 500 without
125 encryption it will take 3 frames. With WEP it will take 4 frames as the
126 payload of each frame is reduced to 492 bytes.
132 * | ETHERNET HEADER ,-<-- PAYLOAD
133 * | | 14 bytes from skb->data
134 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
136 * |,-Dest.--. ,--Src.---. | | |
137 * | 6 bytes| | 6 bytes | | | |
140 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
143 * | | | | `T' <---- 2 bytes for Type
145 * | | '---SNAP--' <-------- 6 bytes for SNAP
147 * `-IV--' <-------------------- 4 bytes for IV (WEP)
153 static u8 P802_1H_OUI
[P80211_OUI_LEN
] = { 0x00, 0x00, 0xf8 };
154 static u8 RFC1042_OUI
[P80211_OUI_LEN
] = { 0x00, 0x00, 0x00 };
156 inline int rtllib_put_snap(u8
*data
, u16 h_proto
)
158 struct rtllib_snap_hdr
*snap
;
161 snap
= (struct rtllib_snap_hdr
*)data
;
166 if (h_proto
== 0x8137 || h_proto
== 0x80f3)
170 snap
->oui
[0] = oui
[0];
171 snap
->oui
[1] = oui
[1];
172 snap
->oui
[2] = oui
[2];
174 *(__be16
*)(data
+ SNAP_SIZE
) = htons(h_proto
);
176 return SNAP_SIZE
+ sizeof(u16
);
179 int rtllib_encrypt_fragment(struct rtllib_device
*ieee
, struct sk_buff
*frag
,
182 struct lib80211_crypt_data
*crypt
= NULL
;
185 crypt
= ieee
->crypt_info
.crypt
[ieee
->crypt_info
.tx_keyidx
];
187 if (!(crypt
&& crypt
->ops
)) {
188 netdev_info(ieee
->dev
, "=========>%s(), crypt is null\n",
192 /* To encrypt, frame format is:
193 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
195 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
196 * call both MSDU and MPDU encryption functions from here. */
197 atomic_inc(&crypt
->refcnt
);
199 if (crypt
->ops
->encrypt_msdu
)
200 res
= crypt
->ops
->encrypt_msdu(frag
, hdr_len
, crypt
->priv
);
201 if (res
== 0 && crypt
->ops
->encrypt_mpdu
)
202 res
= crypt
->ops
->encrypt_mpdu(frag
, hdr_len
, crypt
->priv
);
204 atomic_dec(&crypt
->refcnt
);
206 netdev_info(ieee
->dev
, "%s: Encryption failed: len=%d.\n",
207 ieee
->dev
->name
, frag
->len
);
208 ieee
->ieee_stats
.tx_discards
++;
216 void rtllib_txb_free(struct rtllib_txb
*txb
)
223 static struct rtllib_txb
*rtllib_alloc_txb(int nr_frags
, int txb_size
,
226 struct rtllib_txb
*txb
;
229 txb
= kmalloc(sizeof(struct rtllib_txb
) + (sizeof(u8
*) * nr_frags
),
234 memset(txb
, 0, sizeof(struct rtllib_txb
));
235 txb
->nr_frags
= nr_frags
;
236 txb
->frag_size
= cpu_to_le16(txb_size
);
238 for (i
= 0; i
< nr_frags
; i
++) {
239 txb
->fragments
[i
] = dev_alloc_skb(txb_size
);
240 if (unlikely(!txb
->fragments
[i
])) {
244 memset(txb
->fragments
[i
]->cb
, 0, sizeof(txb
->fragments
[i
]->cb
));
246 if (unlikely(i
!= nr_frags
)) {
248 dev_kfree_skb_any(txb
->fragments
[i
--]);
255 static int rtllib_classify(struct sk_buff
*skb
, u8 bIsAmsdu
)
260 eth
= (struct ethhdr
*)skb
->data
;
261 if (eth
->h_proto
!= htons(ETH_P_IP
))
264 RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA
, skb
->data
, skb
->len
);
266 switch (ip
->tos
& 0xfc) {
286 static void rtllib_tx_query_agg_cap(struct rtllib_device
*ieee
,
288 struct cb_desc
*tcb_desc
)
290 struct rt_hi_throughput
*pHTInfo
= ieee
->pHTInfo
;
291 struct tx_ts_record
*pTxTs
= NULL
;
292 struct rtllib_hdr_1addr
*hdr
= (struct rtllib_hdr_1addr
*)skb
->data
;
294 if (rtllib_act_scanning(ieee
, false))
297 if (!pHTInfo
->bCurrentHTSupport
|| !pHTInfo
->bEnableHT
)
299 if (!IsQoSDataFrame(skb
->data
))
301 if (is_multicast_ether_addr(hdr
->addr1
))
304 if (tcb_desc
->bdhcp
|| ieee
->CntAfterLink
< 2)
307 if (pHTInfo
->IOTAction
& HT_IOT_ACT_TX_NO_AGGREGATION
)
310 if (!ieee
->GetNmodeSupportBySecCfg(ieee
->dev
))
312 if (pHTInfo
->bCurrentAMPDUEnable
) {
313 if (!GetTs(ieee
, (struct ts_common_info
**)(&pTxTs
), hdr
->addr1
,
314 skb
->priority
, TX_DIR
, true)) {
315 netdev_info(ieee
->dev
, "%s: can't get TS\n", __func__
);
318 if (pTxTs
->TxAdmittedBARecord
.bValid
== false) {
319 if (ieee
->wpa_ie_len
&& (ieee
->pairwise_key_type
==
322 } else if (tcb_desc
->bdhcp
== 1) {
324 } else if (!pTxTs
->bDisable_AddBa
) {
325 TsStartAddBaProcess(ieee
, pTxTs
);
327 goto FORCED_AGG_SETTING
;
328 } else if (pTxTs
->bUsingBa
== false) {
329 if (SN_LESS(pTxTs
->TxAdmittedBARecord
.BaStartSeqCtrl
.field
.SeqNum
,
330 (pTxTs
->TxCurSeq
+1)%4096))
331 pTxTs
->bUsingBa
= true;
333 goto FORCED_AGG_SETTING
;
335 if (ieee
->iw_mode
== IW_MODE_INFRA
) {
336 tcb_desc
->bAMPDUEnable
= true;
337 tcb_desc
->ampdu_factor
= pHTInfo
->CurrentAMPDUFactor
;
338 tcb_desc
->ampdu_density
= pHTInfo
->CurrentMPDUDensity
;
342 switch (pHTInfo
->ForcedAMPDUMode
) {
346 case HT_AGG_FORCE_ENABLE
:
347 tcb_desc
->bAMPDUEnable
= true;
348 tcb_desc
->ampdu_density
= pHTInfo
->ForcedMPDUDensity
;
349 tcb_desc
->ampdu_factor
= pHTInfo
->ForcedAMPDUFactor
;
352 case HT_AGG_FORCE_DISABLE
:
353 tcb_desc
->bAMPDUEnable
= false;
354 tcb_desc
->ampdu_density
= 0;
355 tcb_desc
->ampdu_factor
= 0;
360 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device
*ieee
,
361 struct cb_desc
*tcb_desc
)
363 tcb_desc
->bUseShortPreamble
= false;
364 if (tcb_desc
->data_rate
== 2)
366 else if (ieee
->current_network
.capability
&
367 WLAN_CAPABILITY_SHORT_PREAMBLE
)
368 tcb_desc
->bUseShortPreamble
= true;
371 static void rtllib_query_HTCapShortGI(struct rtllib_device
*ieee
,
372 struct cb_desc
*tcb_desc
)
374 struct rt_hi_throughput
*pHTInfo
= ieee
->pHTInfo
;
376 tcb_desc
->bUseShortGI
= false;
378 if (!pHTInfo
->bCurrentHTSupport
|| !pHTInfo
->bEnableHT
)
381 if (pHTInfo
->bForcedShortGI
) {
382 tcb_desc
->bUseShortGI
= true;
386 if ((pHTInfo
->bCurBW40MHz
== true) && pHTInfo
->bCurShortGI40MHz
)
387 tcb_desc
->bUseShortGI
= true;
388 else if ((pHTInfo
->bCurBW40MHz
== false) && pHTInfo
->bCurShortGI20MHz
)
389 tcb_desc
->bUseShortGI
= true;
392 static void rtllib_query_BandwidthMode(struct rtllib_device
*ieee
,
393 struct cb_desc
*tcb_desc
)
395 struct rt_hi_throughput
*pHTInfo
= ieee
->pHTInfo
;
397 tcb_desc
->bPacketBW
= false;
399 if (!pHTInfo
->bCurrentHTSupport
|| !pHTInfo
->bEnableHT
)
402 if (tcb_desc
->bMulticast
|| tcb_desc
->bBroadcast
)
405 if ((tcb_desc
->data_rate
& 0x80) == 0)
407 if (pHTInfo
->bCurBW40MHz
&& pHTInfo
->bCurTxBW40MHz
&&
408 !ieee
->bandwidth_auto_switch
.bforced_tx20Mhz
)
409 tcb_desc
->bPacketBW
= true;
412 static void rtllib_query_protectionmode(struct rtllib_device
*ieee
,
413 struct cb_desc
*tcb_desc
,
416 tcb_desc
->bRTSSTBC
= false;
417 tcb_desc
->bRTSUseShortGI
= false;
418 tcb_desc
->bCTSEnable
= false;
420 tcb_desc
->bRTSBW
= false;
422 if (tcb_desc
->bBroadcast
|| tcb_desc
->bMulticast
)
425 if (is_broadcast_ether_addr(skb
->data
+16))
428 if (ieee
->mode
< IEEE_N_24G
) {
429 if (skb
->len
> ieee
->rts
) {
430 tcb_desc
->bRTSEnable
= true;
431 tcb_desc
->rts_rate
= MGN_24M
;
432 } else if (ieee
->current_network
.buseprotection
) {
433 tcb_desc
->bRTSEnable
= true;
434 tcb_desc
->bCTSEnable
= true;
435 tcb_desc
->rts_rate
= MGN_24M
;
439 struct rt_hi_throughput
*pHTInfo
= ieee
->pHTInfo
;
442 if (pHTInfo
->IOTAction
& HT_IOT_ACT_FORCED_CTS2SELF
) {
443 tcb_desc
->bCTSEnable
= true;
444 tcb_desc
->rts_rate
= MGN_24M
;
445 tcb_desc
->bRTSEnable
= true;
447 } else if (pHTInfo
->IOTAction
& (HT_IOT_ACT_FORCED_RTS
|
448 HT_IOT_ACT_PURE_N_MODE
)) {
449 tcb_desc
->bRTSEnable
= true;
450 tcb_desc
->rts_rate
= MGN_24M
;
453 if (ieee
->current_network
.buseprotection
) {
454 tcb_desc
->bRTSEnable
= true;
455 tcb_desc
->bCTSEnable
= true;
456 tcb_desc
->rts_rate
= MGN_24M
;
459 if (pHTInfo
->bCurrentHTSupport
&& pHTInfo
->bEnableHT
) {
460 u8 HTOpMode
= pHTInfo
->CurrentOpMode
;
462 if ((pHTInfo
->bCurBW40MHz
&& (HTOpMode
== 2 ||
464 (!pHTInfo
->bCurBW40MHz
&& HTOpMode
== 3)) {
465 tcb_desc
->rts_rate
= MGN_24M
;
466 tcb_desc
->bRTSEnable
= true;
470 if (skb
->len
> ieee
->rts
) {
471 tcb_desc
->rts_rate
= MGN_24M
;
472 tcb_desc
->bRTSEnable
= true;
475 if (tcb_desc
->bAMPDUEnable
) {
476 tcb_desc
->rts_rate
= MGN_24M
;
477 tcb_desc
->bRTSEnable
= false;
483 if (ieee
->current_network
.capability
& WLAN_CAPABILITY_SHORT_PREAMBLE
)
484 tcb_desc
->bUseShortPreamble
= true;
485 if (ieee
->iw_mode
== IW_MODE_MASTER
)
489 tcb_desc
->bRTSEnable
= false;
490 tcb_desc
->bCTSEnable
= false;
491 tcb_desc
->rts_rate
= 0;
493 tcb_desc
->bRTSBW
= false;
497 static void rtllib_txrate_selectmode(struct rtllib_device
*ieee
,
498 struct cb_desc
*tcb_desc
)
500 if (ieee
->bTxDisableRateFallBack
)
501 tcb_desc
->bTxDisableRateFallBack
= true;
503 if (ieee
->bTxUseDriverAssingedRate
)
504 tcb_desc
->bTxUseDriverAssingedRate
= true;
505 if (!tcb_desc
->bTxDisableRateFallBack
||
506 !tcb_desc
->bTxUseDriverAssingedRate
) {
507 if (ieee
->iw_mode
== IW_MODE_INFRA
||
508 ieee
->iw_mode
== IW_MODE_ADHOC
)
509 tcb_desc
->RATRIndex
= 0;
513 u16
rtllib_query_seqnum(struct rtllib_device
*ieee
, struct sk_buff
*skb
,
518 if (is_multicast_ether_addr(dst
))
520 if (IsQoSDataFrame(skb
->data
)) {
521 struct tx_ts_record
*pTS
= NULL
;
523 if (!GetTs(ieee
, (struct ts_common_info
**)(&pTS
), dst
,
524 skb
->priority
, TX_DIR
, true))
526 seqnum
= pTS
->TxCurSeq
;
527 pTS
->TxCurSeq
= (pTS
->TxCurSeq
+1)%4096;
533 static int wme_downgrade_ac(struct sk_buff
*skb
)
535 switch (skb
->priority
) {
538 skb
->priority
= 5; /* VO -> VI */
542 skb
->priority
= 3; /* VI -> BE */
546 skb
->priority
= 1; /* BE -> BK */
553 int rtllib_xmit_inter(struct sk_buff
*skb
, struct net_device
*dev
)
555 struct rtllib_device
*ieee
= (struct rtllib_device
*)
556 netdev_priv_rsl(dev
);
557 struct rtllib_txb
*txb
= NULL
;
558 struct rtllib_hdr_3addrqos
*frag_hdr
;
559 int i
, bytes_per_frag
, nr_frags
, bytes_last_frag
, frag_size
;
561 struct net_device_stats
*stats
= &ieee
->stats
;
562 int ether_type
= 0, encrypt
;
563 int bytes
, fc
, qos_ctl
= 0, hdr_len
;
564 struct sk_buff
*skb_frag
;
565 struct rtllib_hdr_3addrqos header
= { /* Ensure zero initialized */
570 u8 dest
[ETH_ALEN
], src
[ETH_ALEN
];
571 int qos_actived
= ieee
->current_network
.qos_data
.active
;
572 struct lib80211_crypt_data
*crypt
= NULL
;
573 struct cb_desc
*tcb_desc
;
574 u8 bIsMulticast
= false;
578 spin_lock_irqsave(&ieee
->lock
, flags
);
580 /* If there is no driver handler to take the TXB, don't bother
582 if ((!ieee
->hard_start_xmit
&& !(ieee
->softmac_features
&
583 IEEE_SOFTMAC_TX_QUEUE
)) ||
584 ((!ieee
->softmac_data_hard_start_xmit
&&
585 (ieee
->softmac_features
& IEEE_SOFTMAC_TX_QUEUE
)))) {
586 netdev_warn(ieee
->dev
, "No xmit handler.\n");
591 if (likely(ieee
->raw_tx
== 0)) {
592 if (unlikely(skb
->len
< SNAP_SIZE
+ sizeof(u16
))) {
593 netdev_warn(ieee
->dev
, "skb too small (%d).\n",
597 /* Save source and destination addresses */
598 memcpy(dest
, skb
->data
, ETH_ALEN
);
599 memcpy(src
, skb
->data
+ETH_ALEN
, ETH_ALEN
);
601 memset(skb
->cb
, 0, sizeof(skb
->cb
));
602 ether_type
= ntohs(((struct ethhdr
*)skb
->data
)->h_proto
);
604 if (ieee
->iw_mode
== IW_MODE_MONITOR
) {
605 txb
= rtllib_alloc_txb(1, skb
->len
, GFP_ATOMIC
);
606 if (unlikely(!txb
)) {
607 netdev_warn(ieee
->dev
,
608 "Could not allocate TXB\n");
613 txb
->payload_size
= cpu_to_le16(skb
->len
);
614 memcpy(skb_put(txb
->fragments
[0], skb
->len
), skb
->data
,
620 if (skb
->len
> 282) {
621 if (ETH_P_IP
== ether_type
) {
622 const struct iphdr
*ip
= (struct iphdr
*)
623 ((u8
*)skb
->data
+14);
624 if (IPPROTO_UDP
== ip
->protocol
) {
627 udp
= (struct udphdr
*)((u8
*)ip
+
629 if (((((u8
*)udp
)[1] == 68) &&
630 (((u8
*)udp
)[3] == 67)) ||
631 ((((u8
*)udp
)[1] == 67) &&
632 (((u8
*)udp
)[3] == 68))) {
634 ieee
->LPSDelayCnt
= 200;
637 } else if (ETH_P_ARP
== ether_type
) {
638 netdev_info(ieee
->dev
,
639 "=================>DHCP Protocol start tx ARP pkt!!\n");
642 ieee
->current_network
.tim
.tim_count
;
646 skb
->priority
= rtllib_classify(skb
, IsAmsdu
);
647 crypt
= ieee
->crypt_info
.crypt
[ieee
->crypt_info
.tx_keyidx
];
648 encrypt
= !(ether_type
== ETH_P_PAE
&& ieee
->ieee802_1x
) &&
649 ieee
->host_encrypt
&& crypt
&& crypt
->ops
;
650 if (!encrypt
&& ieee
->ieee802_1x
&&
651 ieee
->drop_unencrypted
&& ether_type
!= ETH_P_PAE
) {
655 if (crypt
&& !encrypt
&& ether_type
== ETH_P_PAE
) {
656 struct eapol
*eap
= (struct eapol
*)(skb
->data
+
657 sizeof(struct ethhdr
) - SNAP_SIZE
-
659 RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
660 eap_get_type(eap
->type
));
663 /* Advance the SKB to the start of the payload */
664 skb_pull(skb
, sizeof(struct ethhdr
));
666 /* Determine total amount of storage required for TXB packets */
667 bytes
= skb
->len
+ SNAP_SIZE
+ sizeof(u16
);
670 fc
= RTLLIB_FTYPE_DATA
| RTLLIB_FCTL_WEP
;
672 fc
= RTLLIB_FTYPE_DATA
;
675 fc
|= RTLLIB_STYPE_QOS_DATA
;
677 fc
|= RTLLIB_STYPE_DATA
;
679 if (ieee
->iw_mode
== IW_MODE_INFRA
) {
680 fc
|= RTLLIB_FCTL_TODS
;
681 /* To DS: Addr1 = BSSID, Addr2 = SA,
683 memcpy(&header
.addr1
, ieee
->current_network
.bssid
,
685 memcpy(&header
.addr2
, &src
, ETH_ALEN
);
687 memcpy(&header
.addr3
,
688 ieee
->current_network
.bssid
, ETH_ALEN
);
690 memcpy(&header
.addr3
, &dest
, ETH_ALEN
);
691 } else if (ieee
->iw_mode
== IW_MODE_ADHOC
) {
692 /* not From/To DS: Addr1 = DA, Addr2 = SA,
694 memcpy(&header
.addr1
, dest
, ETH_ALEN
);
695 memcpy(&header
.addr2
, src
, ETH_ALEN
);
696 memcpy(&header
.addr3
, ieee
->current_network
.bssid
,
700 bIsMulticast
= is_multicast_ether_addr(header
.addr1
);
702 header
.frame_ctl
= cpu_to_le16(fc
);
704 /* Determine fragmentation size based on destination (multicast
705 * and broadcast are not fragmented) */
707 frag_size
= MAX_FRAG_THRESHOLD
;
708 qos_ctl
|= QOS_CTL_NOTCONTAIN_ACK
;
710 frag_size
= ieee
->fts
;
715 hdr_len
= RTLLIB_3ADDR_LEN
+ 2;
717 /* in case we are a client verify acm is not set for this ac */
718 while (unlikely(ieee
->wmm_acm
& (0x01 << skb
->priority
))) {
719 netdev_info(ieee
->dev
, "skb->priority = %x\n",
721 if (wme_downgrade_ac(skb
))
723 netdev_info(ieee
->dev
, "converted skb->priority = %x\n",
726 qos_ctl
|= skb
->priority
;
727 header
.qos_ctl
= cpu_to_le16(qos_ctl
& RTLLIB_QOS_TID
);
729 hdr_len
= RTLLIB_3ADDR_LEN
;
731 /* Determine amount of payload per fragment. Regardless of if
732 * this stack is providing the full 802.11 header, one will
733 * eventually be affixed to this fragment -- so we must account
734 * for it when determining the amount of payload space. */
735 bytes_per_frag
= frag_size
- hdr_len
;
737 (CFG_RTLLIB_COMPUTE_FCS
| CFG_RTLLIB_RESERVE_FCS
))
738 bytes_per_frag
-= RTLLIB_FCS_LEN
;
740 /* Each fragment may need to have room for encrypting
743 bytes_per_frag
-= crypt
->ops
->extra_mpdu_prefix_len
+
744 crypt
->ops
->extra_mpdu_postfix_len
+
745 crypt
->ops
->extra_msdu_prefix_len
+
746 crypt
->ops
->extra_msdu_postfix_len
;
748 /* Number of fragments is the total bytes_per_frag /
749 * payload_per_fragment */
750 nr_frags
= bytes
/ bytes_per_frag
;
751 bytes_last_frag
= bytes
% bytes_per_frag
;
755 bytes_last_frag
= bytes_per_frag
;
757 /* When we allocate the TXB we allocate enough space for the
758 * reserve and full fragment bytes (bytes_per_frag doesn't
759 * include prefix, postfix, header, FCS, etc.) */
760 txb
= rtllib_alloc_txb(nr_frags
, frag_size
+
761 ieee
->tx_headroom
, GFP_ATOMIC
);
762 if (unlikely(!txb
)) {
763 netdev_warn(ieee
->dev
, "Could not allocate TXB\n");
766 txb
->encrypted
= encrypt
;
767 txb
->payload_size
= cpu_to_le16(bytes
);
770 txb
->queue_index
= UP2AC(skb
->priority
);
772 txb
->queue_index
= WME_AC_BE
;
774 for (i
= 0; i
< nr_frags
; i
++) {
775 skb_frag
= txb
->fragments
[i
];
776 tcb_desc
= (struct cb_desc
*)(skb_frag
->cb
+
779 skb_frag
->priority
= skb
->priority
;
780 tcb_desc
->queue_index
= UP2AC(skb
->priority
);
782 skb_frag
->priority
= WME_AC_BE
;
783 tcb_desc
->queue_index
= WME_AC_BE
;
785 skb_reserve(skb_frag
, ieee
->tx_headroom
);
788 if (ieee
->hwsec_active
)
789 tcb_desc
->bHwSec
= 1;
791 tcb_desc
->bHwSec
= 0;
792 skb_reserve(skb_frag
,
793 crypt
->ops
->extra_mpdu_prefix_len
+
794 crypt
->ops
->extra_msdu_prefix_len
);
796 tcb_desc
->bHwSec
= 0;
798 frag_hdr
= (struct rtllib_hdr_3addrqos
*)
799 skb_put(skb_frag
, hdr_len
);
800 memcpy(frag_hdr
, &header
, hdr_len
);
802 /* If this is not the last fragment, then add the
803 * MOREFRAGS bit to the frame control */
804 if (i
!= nr_frags
- 1) {
805 frag_hdr
->frame_ctl
= cpu_to_le16(
806 fc
| RTLLIB_FCTL_MOREFRAGS
);
807 bytes
= bytes_per_frag
;
810 /* The last fragment has the remaining length */
811 bytes
= bytes_last_frag
;
813 if ((qos_actived
) && (!bIsMulticast
)) {
815 cpu_to_le16(rtllib_query_seqnum(ieee
, skb_frag
,
818 cpu_to_le16(le16_to_cpu(frag_hdr
->seq_ctl
)<<4 | i
);
821 cpu_to_le16(ieee
->seq_ctrl
[0]<<4 | i
);
823 /* Put a SNAP header on the first fragment */
826 skb_put(skb_frag
, SNAP_SIZE
+
827 sizeof(u16
)), ether_type
);
828 bytes
-= SNAP_SIZE
+ sizeof(u16
);
831 memcpy(skb_put(skb_frag
, bytes
), skb
->data
, bytes
);
833 /* Advance the SKB... */
834 skb_pull(skb
, bytes
);
836 /* Encryption routine will move the header forward in
837 * order to insert the IV between the header and the
840 rtllib_encrypt_fragment(ieee
, skb_frag
,
843 (CFG_RTLLIB_COMPUTE_FCS
| CFG_RTLLIB_RESERVE_FCS
))
844 skb_put(skb_frag
, 4);
847 if ((qos_actived
) && (!bIsMulticast
)) {
848 if (ieee
->seq_ctrl
[UP2AC(skb
->priority
) + 1] == 0xFFF)
849 ieee
->seq_ctrl
[UP2AC(skb
->priority
) + 1] = 0;
851 ieee
->seq_ctrl
[UP2AC(skb
->priority
) + 1]++;
853 if (ieee
->seq_ctrl
[0] == 0xFFF)
854 ieee
->seq_ctrl
[0] = 0;
859 if (unlikely(skb
->len
< sizeof(struct rtllib_hdr_3addr
))) {
860 netdev_warn(ieee
->dev
, "skb too small (%d).\n",
865 txb
= rtllib_alloc_txb(1, skb
->len
, GFP_ATOMIC
);
867 netdev_warn(ieee
->dev
, "Could not allocate TXB\n");
872 txb
->payload_size
= cpu_to_le16(skb
->len
);
873 memcpy(skb_put(txb
->fragments
[0], skb
->len
), skb
->data
,
879 struct cb_desc
*tcb_desc
= (struct cb_desc
*)
880 (txb
->fragments
[0]->cb
+ MAX_DEV_ADDR_SIZE
);
881 tcb_desc
->bTxEnableFwCalcDur
= 1;
882 tcb_desc
->priority
= skb
->priority
;
884 if (ether_type
== ETH_P_PAE
) {
885 if (ieee
->pHTInfo
->IOTAction
&
886 HT_IOT_ACT_WA_IOT_Broadcom
) {
887 tcb_desc
->data_rate
=
888 MgntQuery_TxRateExcludeCCKRates(ieee
);
889 tcb_desc
->bTxDisableRateFallBack
= false;
891 tcb_desc
->data_rate
= ieee
->basic_rate
;
892 tcb_desc
->bTxDisableRateFallBack
= 1;
896 tcb_desc
->RATRIndex
= 7;
897 tcb_desc
->bTxUseDriverAssingedRate
= 1;
899 if (is_multicast_ether_addr(header
.addr1
))
900 tcb_desc
->bMulticast
= 1;
901 if (is_broadcast_ether_addr(header
.addr1
))
902 tcb_desc
->bBroadcast
= 1;
903 rtllib_txrate_selectmode(ieee
, tcb_desc
);
904 if (tcb_desc
->bMulticast
|| tcb_desc
->bBroadcast
)
905 tcb_desc
->data_rate
= ieee
->basic_rate
;
907 tcb_desc
->data_rate
= CURRENT_RATE(ieee
->mode
,
908 ieee
->rate
, ieee
->HTCurrentOperaRate
);
911 if (ieee
->pHTInfo
->IOTAction
&
912 HT_IOT_ACT_WA_IOT_Broadcom
) {
913 tcb_desc
->data_rate
=
914 MgntQuery_TxRateExcludeCCKRates(ieee
);
915 tcb_desc
->bTxDisableRateFallBack
= false;
917 tcb_desc
->data_rate
= MGN_1M
;
918 tcb_desc
->bTxDisableRateFallBack
= 1;
922 tcb_desc
->RATRIndex
= 7;
923 tcb_desc
->bTxUseDriverAssingedRate
= 1;
927 rtllib_qurey_ShortPreambleMode(ieee
, tcb_desc
);
928 rtllib_tx_query_agg_cap(ieee
, txb
->fragments
[0],
930 rtllib_query_HTCapShortGI(ieee
, tcb_desc
);
931 rtllib_query_BandwidthMode(ieee
, tcb_desc
);
932 rtllib_query_protectionmode(ieee
, tcb_desc
,
936 spin_unlock_irqrestore(&ieee
->lock
, flags
);
937 dev_kfree_skb_any(skb
);
939 if (ieee
->softmac_features
& IEEE_SOFTMAC_TX_QUEUE
) {
940 dev
->stats
.tx_packets
++;
941 dev
->stats
.tx_bytes
+= le16_to_cpu(txb
->payload_size
);
942 rtllib_softmac_xmit(txb
, ieee
);
944 if ((*ieee
->hard_start_xmit
)(txb
, dev
) == 0) {
946 stats
->tx_bytes
+= le16_to_cpu(txb
->payload_size
);
949 rtllib_txb_free(txb
);
956 spin_unlock_irqrestore(&ieee
->lock
, flags
);
957 netif_stop_queue(dev
);
962 int rtllib_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
964 memset(skb
->cb
, 0, sizeof(skb
->cb
));
965 return rtllib_xmit_inter(skb
, dev
);
967 EXPORT_SYMBOL(rtllib_xmit
);