Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / rtl8188eu / hal / rtl8188eu_xmit.c
CommitLineData
7bc88639
LF
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
7bc88639
LF
14 ******************************************************************************/
15#define _RTL8188E_XMIT_C_
16#include <osdep_service.h>
17#include <drv_types.h>
0a0796eb 18#include <mon.h>
7bc88639
LF
19#include <wifi.h>
20#include <osdep_intf.h>
17452ee9 21#include <usb_ops_linux.h>
7bc88639
LF
22#include <rtl8188e_hal.h>
23
e582af5d 24s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
7bc88639
LF
25{
26 struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
27
28 tasklet_init(&pxmitpriv->xmit_tasklet,
29 (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
30 (unsigned long)adapt);
31 return _SUCCESS;
32}
33
7bc88639
LF
34static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
35{
36 u8 set_tx_desc_offset;
37 struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
38 set_tx_desc_offset = (((sz + TXDESC_SIZE) % haldata->UsbBulkOutSize) == 0) ? 1 : 0;
39
40 return set_tx_desc_offset;
41}
42
43static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
44{
45 u16 *usptr = (u16 *)ptxdesc;
46 u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
47 u32 index;
48 u16 checksum = 0;
49
50 /* Clear first */
51 ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
52
53 for (index = 0; index < count; index++)
54 checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
55 ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
56}
57
58/* Description: In normal chip, we should send some packet to Hw which will be used by Fw */
59/* in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
60/* Fw can tell Hw to send these packet derectly. */
61void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8 ispspoll, u8 is_btqosnull)
62{
63 struct tx_desc *ptxdesc;
64
65 /* Clear all status */
66 ptxdesc = (struct tx_desc *)desc;
1ce39848 67 memset(desc, 0, TXDESC_SIZE);
7bc88639
LF
68
69 /* offset 0 */
70 ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
71
72 ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */
73
74 ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /* Buffer size + command header */
75
76 /* offset 4 */
77 ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /* Fixed queue of Mgnt queue */
78
79 /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
80 if (ispspoll) {
81 ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
82 } else {
83 ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /* Hw set sequence number */
84 ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
85 }
86
87 if (is_btqosnull)
88 ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /* BT NULL */
89
90 /* offset 16 */
91 ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
92
93 /* USB interface drop packet if the checksum of descriptor isn't correct. */
94 /* Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
95 rtl8188eu_cal_txdesc_chksum(ptxdesc);
96}
97
98static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
99{
100 if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
101 switch (pattrib->encrypt) {
102 /* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
103 case _WEP40_:
104 case _WEP104_:
105 ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
106 ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
107 break;
108 case _TKIP_:
109 case _TKIP_WTMIC_:
110 ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
111 ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
112 break;
113 case _AES_:
114 ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
115 ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
116 break;
117 case _NO_PRIVACY_:
118 default:
119 break;
120 }
121 }
122}
123
124static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
125{
126 switch (pattrib->vcs_mode) {
127 case RTS_CTS:
128 *pdw |= cpu_to_le32(RTS_EN);
129 break;
130 case CTS_TO_SELF:
131 *pdw |= cpu_to_le32(CTS_2_SELF);
132 break;
133 case NONE_VCS:
134 default:
135 break;
136 }
137 if (pattrib->vcs_mode) {
138 *pdw |= cpu_to_le32(HW_RTS_EN);
139 /* Set RTS BW */
140 if (pattrib->ht_en) {
141 *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
142
143 if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
144 *pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
145 else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
146 *pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
147 else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
148 *pdw |= 0;
149 else
150 *pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
151 }
152 }
153}
154
155static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
156{
157 if (pattrib->ht_en) {
158 *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
159
160 if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
161 *pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
162 else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
163 *pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
164 else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
165 *pdw |= 0;
166 else
167 *pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
168 }
169}
170
171static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
172{
173 int pull = 0;
174 uint qsel;
175 u8 data_rate, pwr_status, offset;
176 struct adapter *adapt = pxmitframe->padapter;
177 struct pkt_attrib *pattrib = &pxmitframe->attrib;
178 struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
179 struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
180 struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
181 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
182 int bmcst = IS_MCAST(pattrib->ra);
183
184 if (adapt->registrypriv.mp_mode == 0) {
185 if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
186 ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
187 pull = 1;
188 }
189 }
190
1ce39848 191 memset(ptxdesc, 0, sizeof(struct tx_desc));
7bc88639
LF
192
193 /* 4 offset 0 */
194 ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
195 ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
196
197 offset = TXDESC_SIZE + OFFSET_SZ;
198
199 ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
200
201 if (bmcst)
202 ptxdesc->txdw0 |= cpu_to_le32(BMC);
203
204 if (adapt->registrypriv.mp_mode == 0) {
205 if (!bagg_pkt) {
206 if ((pull) && (pxmitframe->pkt_offset > 0))
207 pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
208 }
209 }
210
211 /* pkt_offset, unit:8 bytes padding */
212 if (pxmitframe->pkt_offset > 0)
213 ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
214
215 /* driver uses rate */
216 ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
217
218 if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
219 /* offset 4 */
220 ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
221
222 qsel = (uint)(pattrib->qsel & 0x0000001f);
223 ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
224
225 ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
226
227 fill_txdesc_sectype(pattrib, ptxdesc);
228
229 if (pattrib->ampdu_en) {
230 ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
231 ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
232 } else {
233 ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
234 }
235
236 /* offset 8 */
237
238 /* offset 12 */
239 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
240
241 /* offset 16 , offset 20 */
242 if (pattrib->qos_en)
243 ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
244
245 /* offset 20 */
246 if (pxmitframe->agg_num > 1)
247 ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
248
249 if ((pattrib->ether_type != 0x888e) &&
250 (pattrib->ether_type != 0x0806) &&
251 (pattrib->ether_type != 0x88b4) &&
252 (pattrib->dhcp_pkt != 1)) {
253 /* Non EAP & ARP & DHCP type data packet */
254
255 fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
256 fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
257
258 ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
259 ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS Rate FB LMT */
260
261 if (pattrib->ht_en) {
262 if (ODM_RA_GetShortGI_8188E(&haldata->odmpriv, pattrib->mac_id))
263 ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
264 }
265 data_rate = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, pattrib->mac_id);
266 ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
267 pwr_status = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, pattrib->mac_id);
268 ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
269 } else {
270 /* EAP data packet and ARP packet and DHCP. */
271 /* Use the 1M data rate to send the EAP/ARP packet. */
272 /* This will maybe make the handshake smooth. */
273 ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
274 if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
275 ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/* DATA_SHORT */
276 ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
277 }
278 } else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
279 /* offset 4 */
280 ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
281
282 qsel = (uint)(pattrib->qsel&0x0000001f);
283 ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
284
285 ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
286
287 /* offset 8 */
288 /* CCX-TXRPT ack for xmit mgmt frames. */
289 if (pxmitframe->ack_report)
290 ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
291
292 /* offset 12 */
293 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
294
295 /* offset 20 */
296 ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
297 if (pattrib->retry_ctrl)
298 ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
299 else
300 ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
301
302 ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
303 } else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
304 DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
7bc88639
LF
305 } else {
306 DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
307
308 /* offset 4 */
309 ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
310
311 ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
312
313 /* offset 8 */
314
315 /* offset 12 */
316 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
317
318 /* offset 20 */
319 ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
320 }
321
322 /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
323 /* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
5e809e50 324 /* mgnt frame should be controlled by Hw because Fw will also send null data */
7bc88639
LF
325 /* which we cannot control when Fw LPS enable. */
326 /* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
327 /* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
328 /* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
329 /* 2010.06.23. Added by tynli. */
330 if (!pattrib->qos_en) {
331 ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /* Hw set sequence number */
332 ptxdesc->txdw4 |= cpu_to_le32(HW_SSN); /* Hw set sequence number */
333 }
334
6b26b75d 335 rtl88eu_dm_set_tx_ant_by_tx_info(&haldata->odmpriv, pmem,
336 pattrib->mac_id);
7bc88639
LF
337
338 rtl8188eu_cal_txdesc_chksum(ptxdesc);
339 _dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
340 return pull;
341}
342
343/* for non-agg data frame or management frame */
344static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
345{
346 s32 ret = _SUCCESS;
347 s32 inner_ret = _SUCCESS;
348 int t, sz, w_sz, pull = 0;
349 u8 *mem_addr;
350 u32 ff_hwaddr;
351 struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
352 struct pkt_attrib *pattrib = &pxmitframe->attrib;
353 struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
354 struct security_priv *psecuritypriv = &adapt->securitypriv;
355 if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
356 (pxmitframe->attrib.ether_type != 0x0806) &&
357 (pxmitframe->attrib.ether_type != 0x888e) &&
358 (pxmitframe->attrib.ether_type != 0x88b4) &&
359 (pxmitframe->attrib.dhcp_pkt != 1))
360 rtw_issue_addbareq_cmd(adapt, pxmitframe);
361 mem_addr = pxmitframe->buf_addr;
362
363 RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
364
365 for (t = 0; t < pattrib->nr_frags; t++) {
366 if (inner_ret != _SUCCESS && ret == _SUCCESS)
367 ret = _FAIL;
368
369 if (t != (pattrib->nr_frags - 1)) {
370 RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
371
372 sz = pxmitpriv->frag_len;
373 sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
374 } else {
375 /* no frag */
376 sz = pattrib->last_txcmdsz;
377 }
378
379 pull = update_txdesc(pxmitframe, mem_addr, sz, false);
380
381 if (pull) {
382 mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
383 pxmitframe->buf_addr = mem_addr;
384 w_sz = sz + TXDESC_SIZE;
385 } else {
386 w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
387 }
388 ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
389
05fa5ec2 390 inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf);
7bc88639
LF
391
392 rtw_count_tx_stats(adapt, pxmitframe, sz);
393
394 RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
395
396 mem_addr += w_sz;
397
7be921a2 398 mem_addr = (u8 *)round_up((size_t)mem_addr, 4);
7bc88639
LF
399 }
400
401 rtw_free_xmitframe(pxmitpriv, pxmitframe);
402
403 if (ret != _SUCCESS)
404 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
405
406 return ret;
407}
408
409static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
410{
411 struct pkt_attrib *pattrib = &pxmitframe->attrib;
412
3edd192b 413 u32 len;
7bc88639
LF
414
415 /* no consider fragement */
416 len = pattrib->hdrlen + pattrib->iv_len +
417 SNAP_SIZE + sizeof(u16) +
418 pattrib->pktlen +
419 ((pattrib->bswenc) ? pattrib->icv_len : 0);
420
421 if (pattrib->encrypt == _TKIP_)
422 len += 8;
423
424 return len;
425}
426
11c71742 427s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv)
7bc88639
LF
428{
429 struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
430 struct xmit_frame *pxmitframe = NULL;
431 struct xmit_frame *pfirstframe = NULL;
11c71742 432 struct xmit_buf *pxmitbuf;
7bc88639
LF
433
434 /* aggregate variable */
435 struct hw_xmit *phwxmit;
436 struct sta_info *psta = NULL;
437 struct tx_servq *ptxservq = NULL;
438
7bc88639
LF
439 struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
440
441 u32 pbuf; /* next pkt address */
442 u32 pbuf_tail; /* last pkt tail */
443 u32 len; /* packet length, except TXDESC_SIZE and PKT_OFFSET */
444
445 u32 bulksize = haldata->UsbBulkOutSize;
446 u8 desc_cnt;
447 u32 bulkptr;
448
449 /* dump frame variable */
450 u32 ff_hwaddr;
451
452 RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
453
11c71742
IS
454 pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
455 if (pxmitbuf == NULL)
456 return false;
7bc88639
LF
457
458 /* 3 1. pick up first frame */
37efd082 459 rtw_free_xmitframe(pxmitpriv, pxmitframe);
7bc88639 460
37efd082
IS
461 pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
462 if (pxmitframe == NULL) {
463 /* no more xmit frame, release xmit buffer */
464 rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
465 return false;
466 }
7bc88639 467
37efd082
IS
468 pxmitframe->pxmitbuf = pxmitbuf;
469 pxmitframe->buf_addr = pxmitbuf->pbuf;
470 pxmitbuf->priv_data = pxmitframe;
7bc88639 471
37efd082
IS
472 pxmitframe->agg_num = 1; /* alloc xmitframe should assign to 1. */
473 pxmitframe->pkt_offset = 1; /* first frame of aggregation, reserve offset */
7bc88639 474
37efd082 475 rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
7bc88639 476
37efd082
IS
477 /* always return ndis_packet after rtw_xmitframe_coalesce */
478 rtw_os_xmit_complete(adapt, pxmitframe);
7bc88639
LF
479
480 /* 3 2. aggregate same priority and same DA(AP or STA) frames */
481 pfirstframe = pxmitframe;
482 len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
483 pbuf_tail = len;
248df424 484 pbuf = round_up(pbuf_tail, 8);
7bc88639
LF
485
486 /* check pkt amount in one bulk */
487 desc_cnt = 0;
488 bulkptr = bulksize;
489 if (pbuf < bulkptr) {
490 desc_cnt++;
491 } else {
492 desc_cnt = 0;
493 bulkptr = ((pbuf / bulksize) + 1) * bulksize; /* round to next bulksize */
494 }
495
496 /* dequeue same priority packet from station tx queue */
497 psta = pfirstframe->attrib.psta;
498 switch (pfirstframe->attrib.priority) {
499 case 1:
500 case 2:
501 ptxservq = &(psta->sta_xmitpriv.bk_q);
502 phwxmit = pxmitpriv->hwxmits + 3;
503 break;
504 case 4:
505 case 5:
506 ptxservq = &(psta->sta_xmitpriv.vi_q);
507 phwxmit = pxmitpriv->hwxmits + 1;
508 break;
509 case 6:
510 case 7:
511 ptxservq = &(psta->sta_xmitpriv.vo_q);
512 phwxmit = pxmitpriv->hwxmits;
513 break;
514 case 0:
515 case 3:
516 default:
517 ptxservq = &(psta->sta_xmitpriv.be_q);
518 phwxmit = pxmitpriv->hwxmits + 2;
519 break;
520 }
7057dcb3 521 spin_lock_bh(&pxmitpriv->lock);
7bc88639
LF
522
523 xmitframe_phead = get_list_head(&ptxservq->sta_pending);
c44e5e39 524 xmitframe_plist = xmitframe_phead->next;
7bc88639 525
84660700 526 while (xmitframe_phead != xmitframe_plist) {
bea88100 527 pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
c44e5e39 528 xmitframe_plist = xmitframe_plist->next;
7bc88639
LF
529
530 pxmitframe->agg_num = 0; /* not first frame of aggregation */
531 pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */
532
533 len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
534
eab5d543 535 if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) {
7bc88639
LF
536 pxmitframe->agg_num = 1;
537 pxmitframe->pkt_offset = 1;
538 break;
539 }
8d5bdece 540 list_del_init(&pxmitframe->list);
7bc88639
LF
541 ptxservq->qcnt--;
542 phwxmit->accnt--;
543
544 pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
545
546 rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
547 /* always return ndis_packet after rtw_xmitframe_coalesce */
548 rtw_os_xmit_complete(adapt, pxmitframe);
549
550 /* (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
551 update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
552
553 /* don't need xmitframe any more */
554 rtw_free_xmitframe(pxmitpriv, pxmitframe);
555
556 /* handle pointer and stop condition */
557 pbuf_tail = pbuf + len;
eab5d543 558 pbuf = round_up(pbuf_tail, 8);
7bc88639
LF
559
560 pfirstframe->agg_num++;
561 if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
562 break;
563
564 if (pbuf < bulkptr) {
565 desc_cnt++;
566 if (desc_cnt == haldata->UsbTxAggDescNum)
567 break;
568 } else {
569 desc_cnt = 0;
570 bulkptr = ((pbuf / bulksize) + 1) * bulksize;
571 }
572 } /* end while (aggregate same priority and same DA(AP or STA) frames) */
573
f7091bc6 574 if (list_empty(&ptxservq->sta_pending.queue))
8d5bdece 575 list_del_init(&ptxservq->tx_pending);
7bc88639 576
e02bcf61 577 spin_unlock_bh(&pxmitpriv->lock);
7bc88639
LF
578 if ((pfirstframe->attrib.ether_type != 0x0806) &&
579 (pfirstframe->attrib.ether_type != 0x888e) &&
580 (pfirstframe->attrib.ether_type != 0x88b4) &&
581 (pfirstframe->attrib.dhcp_pkt != 1))
582 rtw_issue_addbareq_cmd(adapt, pfirstframe);
583 /* 3 3. update first frame txdesc */
584 if ((pbuf_tail % bulksize) == 0) {
585 /* remove pkt_offset */
586 pbuf_tail -= PACKET_OFFSET_SZ;
587 pfirstframe->buf_addr += PACKET_OFFSET_SZ;
588 pfirstframe->pkt_offset--;
589 }
590
591 update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
592
593 /* 3 4. write xmit buffer to USB FIFO */
594 ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
05fa5ec2 595 usb_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);
7bc88639
LF
596
597 /* 3 5. update statisitc */
598 pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
599 pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
600
601 rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
602
603 rtw_free_xmitframe(pxmitpriv, pfirstframe);
604
605 return true;
606}
607
608static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
609{
3edd192b 610 s32 res;
7bc88639
LF
611
612 res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
613 if (res == _SUCCESS)
614 rtw_dump_xframe(adapt, pxmitframe);
615 else
616 DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
617 return res;
618}
619
620/*
621 * Return
622 * true dump packet directly
623 * false enqueue packet
624 */
b3a4942f 625s32 rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
7bc88639 626{
7bc88639
LF
627 s32 res;
628 struct xmit_buf *pxmitbuf = NULL;
629 struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
630 struct pkt_attrib *pattrib = &pxmitframe->attrib;
631 struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
632
7057dcb3 633 spin_lock_bh(&pxmitpriv->lock);
7bc88639
LF
634
635 if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
636 goto enqueue;
637
638 if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
639 goto enqueue;
640
641 pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
b210e0dc 642 if (!pxmitbuf)
7bc88639
LF
643 goto enqueue;
644
e02bcf61 645 spin_unlock_bh(&pxmitpriv->lock);
7bc88639
LF
646
647 pxmitframe->pxmitbuf = pxmitbuf;
648 pxmitframe->buf_addr = pxmitbuf->pbuf;
649 pxmitbuf->priv_data = pxmitframe;
650
651 if (xmitframe_direct(adapt, pxmitframe) != _SUCCESS) {
652 rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
653 rtw_free_xmitframe(pxmitpriv, pxmitframe);
654 }
655
656 return true;
657
658enqueue:
659 res = rtw_xmitframe_enqueue(adapt, pxmitframe);
e02bcf61 660 spin_unlock_bh(&pxmitpriv->lock);
7bc88639
LF
661
662 if (res != _SUCCESS) {
663 RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
664 rtw_free_xmitframe(pxmitpriv, pxmitframe);
665
666 /* Trick, make the statistics correct */
667 pxmitpriv->tx_pkts--;
668 pxmitpriv->tx_drop++;
669 return true;
670 }
671
672 return false;
673}
674
61a30f36 675s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
7bc88639 676{
0a0796eb
JS
677 struct xmit_priv *xmitpriv = &adapt->xmitpriv;
678
679 rtl88eu_mon_xmit_hook(adapt->pmondev, pmgntframe, xmitpriv->frag_len);
7bc88639
LF
680 return rtw_dump_xframe(adapt, pmgntframe);
681}
This page took 0.453005 seconds and 5 git commands to generate.