Merge tag 'f2fs-fixes-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[deliverable/linux.git] / drivers / staging / rtl8192ee / pci.c
CommitLineData
78de2c06
LF
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called LICENSE.
16 *
17 * Contact Information:
18 * wlanfae <wlanfae@realtek.com>
19 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
20 * Hsinchu 300, Taiwan.
21 *
22 * Larry Finger <Larry.Finger@lwfinger.net>
23 *
24 *****************************************************************************/
25
26#include "core.h"
27#include "wifi.h"
28#include "pci.h"
29#include "base.h"
30#include "ps.h"
31#include "efuse.h"
32
33static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
34 INTEL_VENDOR_ID,
35 ATI_VENDOR_ID,
36 AMD_VENDOR_ID,
37 SIS_VENDOR_ID
38};
39
40static const u8 ac_to_hwq[] = {
41 VO_QUEUE,
42 VI_QUEUE,
43 BE_QUEUE,
44 BK_QUEUE
45};
46
47static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
48 struct sk_buff *skb)
49{
50 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
51 __le16 fc = rtl_get_fc(skb);
52 u8 queue_index = skb_get_queue_mapping(skb);
53
54 if (unlikely(ieee80211_is_beacon(fc)))
55 return BEACON_QUEUE;
56 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
57 return MGNT_QUEUE;
58 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
59 if (ieee80211_is_nullfunc(fc))
60 return HIGH_QUEUE;
61
62 return ac_to_hwq[queue_index];
63}
64
65/* Update PCI dependent default settings*/
66static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
67{
68 struct rtl_priv *rtlpriv = rtl_priv(hw);
69 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
70 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
71 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
72 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
73 u8 init_aspm;
74
75 ppsc->reg_rfps_level = 0;
76 ppsc->b_support_aspm = 0;
77
78 /*Update PCI ASPM setting */
79 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
80 switch (rtlpci->const_pci_aspm) {
81 case 0:
82 /*No ASPM */
83 break;
84
85 case 1:
86 /*ASPM dynamically enabled/disable. */
87 ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
88 break;
89
90 case 2:
91 /*ASPM with Clock Req dynamically enabled/disable. */
92 ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
93 RT_RF_OFF_LEVL_CLK_REQ);
94 break;
95
96 case 3:
97 /*
98 * Always enable ASPM and Clock Req
99 * from initialization to halt.
100 * */
101 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
102 ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
103 RT_RF_OFF_LEVL_CLK_REQ);
104 break;
105
106 case 4:
107 /*
108 * Always enable ASPM without Clock Req
109 * from initialization to halt.
110 * */
111 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
112 RT_RF_OFF_LEVL_CLK_REQ);
113 ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
114 break;
115 }
116
117 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
118
119 /*Update Radio OFF setting */
120 switch (rtlpci->const_hwsw_rfoff_d3) {
121 case 1:
122 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
123 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
124 break;
125
126 case 2:
127 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
128 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
129 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
130 break;
131
132 case 3:
133 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
134 break;
135 }
136
137 /*Set HW definition to determine if it supports ASPM. */
138 switch (rtlpci->const_support_pciaspm) {
139 case 0:{
140 /*Not support ASPM. */
141 bool b_support_aspm = false;
142 ppsc->b_support_aspm = b_support_aspm;
143 break;
144 }
145 case 1:{
146 /*Support ASPM. */
147 bool b_support_aspm = true;
148 bool b_support_backdoor = true;
149 ppsc->b_support_aspm = b_support_aspm;
150
151 /*if (priv->oem_id == RT_CID_TOSHIBA &&
152 !priv->ndis_adapter.amd_l1_patch)
153 b_support_backdoor = false; */
154
155 ppsc->b_support_backdoor = b_support_backdoor;
156
157 break;
158 }
159 case 2:
160 /*ASPM value set by chipset. */
161 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
162 bool b_support_aspm = true;
163 ppsc->b_support_aspm = b_support_aspm;
164 }
165 break;
166 default:
167 RT_TRACE(COMP_ERR, DBG_EMERG,
168 ("switch case not process\n"));
169 break;
170 }
171
172 /* toshiba aspm issue, toshiba will set aspm selfly
173 * so we should not set aspm in driver */
174 pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
175 if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
176 init_aspm == 0x43)
177 ppsc->b_support_aspm = false;
178}
179
180static bool _rtl_pci_platform_switch_device_pci_aspm(struct ieee80211_hw *hw,
181 u8 value)
182{
183 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
184 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
185 bool bresult = false;
186
187 if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
188 value |= 0x40;
189
190 pci_write_config_byte(rtlpci->pdev, 0x80, value);
191
192 return bresult;
193}
194
195/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
196static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
197{
198 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
199 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
200 bool bresult = false;
201
202 pci_write_config_byte(rtlpci->pdev, 0x81, value);
203 bresult = true;
204
205 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
206 udelay(100);
207
208 return bresult;
209}
210
211/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
212static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
213{
214 struct rtl_priv *rtlpriv = rtl_priv(hw);
215 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
216 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
217 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
218 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
219 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
220 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
221 /*Retrieve original configuration settings. */
222 u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
223 u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
224 pcibridge_linkctrlreg;
225 u16 aspmlevel = 0;
226
227 if (!ppsc->b_support_aspm)
228 return;
229
230 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
231 RT_TRACE(COMP_POWER, DBG_TRACE,
232 ("PCI(Bridge) UNKNOWN.\n"));
233
234 return;
235 }
236
237 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
238 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
239 _rtl_pci_switch_clk_req(hw, 0x0);
240 }
241
242 if (1) {
243 /*for promising device will in L0 state after an I/O. */
244 u8 tmp_u1b;
245 pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
246 }
247
248 /*Set corresponding value. */
249 aspmlevel |= BIT(0) | BIT(1);
250 linkctrl_reg &= ~aspmlevel;
251 pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
252
253 _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
254 udelay(50);
255
256 /*4 Disable Pci Bridge ASPM */
257 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
258 pcicfg_addrport + (num4bytes << 2));
259 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
260
261 udelay(50);
262}
263
264/*
265 *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
266 *power saving We should follow the sequence to enable
267 *RTL8192SE first then enable Pci Bridge ASPM
268 *or the system will show bluescreen.
269 */
270static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
271{
272 struct rtl_priv *rtlpriv = rtl_priv(hw);
273 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
274 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
275 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
276 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
277 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
278 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
279 u16 aspmlevel;
280 u8 u_pcibridge_aspmsetting;
281 u8 u_device_aspmsetting;
282
283 if (!ppsc->b_support_aspm)
284 return;
285
286 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
287 RT_TRACE(COMP_POWER, DBG_TRACE,
288 ("PCI(Bridge) UNKNOWN.\n"));
289 return;
290 }
291
292 /*4 Enable Pci Bridge ASPM */
293 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
294 pcicfg_addrport + (num4bytes << 2));
295
296 u_pcibridge_aspmsetting =
297 pcipriv->ndis_adapter.pcibridge_linkctrlreg |
298 rtlpci->const_hostpci_aspm_setting;
299
300 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
301 u_pcibridge_aspmsetting &= ~BIT(0);
302
303 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
304
305 RT_TRACE(COMP_INIT, DBG_LOUD,
306 ("PlatformEnableASPM(): Write reg[%x] = %x\n",
307 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
308 u_pcibridge_aspmsetting));
309
310 udelay(50);
311
312 /*Get ASPM level (with/without Clock Req) */
313 aspmlevel = rtlpci->const_devicepci_aspm_setting;
314 u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
315
316 /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
317 /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
318
319 u_device_aspmsetting |= aspmlevel;
320
321 _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
322
323 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
324 _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
325 RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
326 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
327 }
328 udelay(100);
329}
330
331static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
332{
333 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
334 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
335
336 bool status = false;
337 u8 offset_e0;
338 unsigned offset_e4;
339
340 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS, pcicfg_addrport + 0xE0);
341 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
342
343 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS, pcicfg_addrport + 0xE0);
344 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
345
346 if (offset_e0 == 0xA0) {
347 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
348 pcicfg_addrport + 0xE4);
349 rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
350 if (offset_e4 & BIT(23))
351 status = true;
352 }
353
354 return status;
355}
356
357static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
358 struct rtl_priv **buddy_priv)
359{
360 struct rtl_priv *rtlpriv = rtl_priv(hw);
361 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
362 bool find_buddy_priv = false;
363 struct rtl_priv *tpriv = NULL;
364 struct rtl_pci_priv *tpcipriv = NULL;
365
366 if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
367 list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
368 list) {
369 if (tpriv == NULL)
370 break;
371
372 tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
373 RT_TRACE(COMP_INIT, DBG_LOUD,
374 ("pcipriv->ndis_adapter.funcnumber %x\n",
375 pcipriv->ndis_adapter.funcnumber));
376 RT_TRACE(COMP_INIT, DBG_LOUD,
377 ("tpcipriv->ndis_adapter.funcnumber %x\n",
378 tpcipriv->ndis_adapter.funcnumber));
379
380 if ((pcipriv->ndis_adapter.busnumber ==
381 tpcipriv->ndis_adapter.busnumber) &&
382 (pcipriv->ndis_adapter.devnumber ==
383 tpcipriv->ndis_adapter.devnumber) &&
384 (pcipriv->ndis_adapter.funcnumber !=
385 tpcipriv->ndis_adapter.funcnumber)) {
386 find_buddy_priv = true;
387 break;
388 }
389 }
390 }
391
392 RT_TRACE(COMP_INIT, DBG_LOUD,
393 ("find_buddy_priv %d\n", find_buddy_priv));
394
395 if (find_buddy_priv)
396 *buddy_priv = tpriv;
397
398 return find_buddy_priv;
399}
400
401static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
402{
403 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
404 u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
405 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
406 u8 linkctrl_reg;
407 u8 num4bbytes;
408
409 num4bbytes = (capabilityoffset + 0x10) / 4;
410
411 /*Read Link Control Register */
412 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
413 pcicfg_addrport + (num4bbytes << 2));
414 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
415
416 pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
417}
418
419static void rtl_pci_parse_configuration(struct pci_dev *pdev,
420 struct ieee80211_hw *hw)
421{
422 struct rtl_priv *rtlpriv = rtl_priv(hw);
423 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
424
425 u8 tmp;
426 int pos;
427 u8 linkctrl_reg;
428
429 /*Link Control Register */
430 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
431 pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
432 pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
433
434 RT_TRACE(COMP_INIT, DBG_TRACE,
435 ("Link Control Register =%x\n",
436 pcipriv->ndis_adapter.linkctrl_reg));
437
438 pci_read_config_byte(pdev, 0x98, &tmp);
439 tmp |= BIT(4);
440 pci_write_config_byte(pdev, 0x98, tmp);
441
442 tmp = 0x17;
443 pci_write_config_byte(pdev, 0x70f, tmp);
444}
445
446static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
447{
448 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
449
450 _rtl_pci_update_default_setting(hw);
451
452 if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
453 /*Always enable ASPM & Clock Req. */
454 rtl_pci_enable_aspm(hw);
455 RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
456 }
457}
458
459static void _rtl_pci_io_handler_init(struct device *dev,
460 struct ieee80211_hw *hw)
461{
462 struct rtl_priv *rtlpriv = rtl_priv(hw);
463
464 rtlpriv->io.dev = dev;
465
466 rtlpriv->io.write8_async = pci_write8_async;
467 rtlpriv->io.write16_async = pci_write16_async;
468 rtlpriv->io.write32_async = pci_write32_async;
469
470 rtlpriv->io.read8_sync = pci_read8_sync;
471 rtlpriv->io.read16_sync = pci_read16_sync;
472 rtlpriv->io.read32_sync = pci_read32_sync;
473}
474
475static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
476 struct sk_buff *skb,
477 struct rtl_tcb_desc *tcb_desc,
478 u8 tid)
479{
480 struct rtl_priv *rtlpriv = rtl_priv(hw);
481 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
482 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
483 struct sk_buff *next_skb;
484 u8 additionlen = FCS_LEN;
485
486 /* here open is 4, wep/tkip is 8, aes is 12*/
487 if (info->control.hw_key)
488 additionlen += info->control.hw_key->icv_len;
489
490 /* The most skb num is 6 */
491 tcb_desc->empkt_num = 0;
492 spin_lock_bh(&rtlpriv->locks.waitq_lock);
493 skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
494 struct ieee80211_tx_info *next_info;
495
496 next_info = IEEE80211_SKB_CB(next_skb);
497 if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
498 tcb_desc->empkt_len[tcb_desc->empkt_num] =
499 next_skb->len + additionlen;
500 tcb_desc->empkt_num++;
501 } else {
502 break;
503 }
504
505 if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
506 next_skb))
507 break;
508
509 if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
510 break;
511 }
512 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
513 return true;
514}
515
516/* just for early mode now */
517static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
518{
519 struct rtl_priv *rtlpriv = rtl_priv(hw);
520 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
521 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
522 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
523 struct sk_buff *skb = NULL;
524 struct ieee80211_tx_info *info = NULL;
525 int tid; /* should be int */
526
527 if (!rtlpriv->rtlhal.b_earlymode_enable)
528 return;
529 if (rtlpriv->dm.supp_phymode_switch &&
530 (rtlpriv->easy_concurrent_ctl.bswitch_in_process ||
531 (rtlpriv->buddy_priv &&
532 rtlpriv->buddy_priv->easy_concurrent_ctl.bswitch_in_process)))
533 return;
534 /* we juse use em for BE/BK/VI/VO */
535 for (tid = 7; tid >= 0; tid--) {
536 u8 hw_queue = ac_to_hwq[rtl92e_tid_to_ac(hw, tid)];
537 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
538 while (!mac->act_scanning &&
539 rtlpriv->psc.rfpwr_state == ERFON) {
540 struct rtl_tcb_desc tcb_desc;
541 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
542
543 spin_lock_bh(&rtlpriv->locks.waitq_lock);
544 if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
545 (ring->entries - skb_queue_len(&ring->queue) >
546 rtlhal->max_earlymode_num)) {
547 skb = skb_dequeue(&mac->skb_waitq[tid]);
548 } else {
549 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
550 break;
551 }
552 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
553
554 /* Some macaddr can't do early mode. like
555 * multicast/broadcast/no_qos data */
556 info = IEEE80211_SKB_CB(skb);
557 if (info->flags & IEEE80211_TX_CTL_AMPDU)
558 _rtl_pci_update_earlymode_info(hw, skb,
559 &tcb_desc, tid);
560
561 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
562 }
563 }
564}
565
566static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
567{
568 struct rtl_priv *rtlpriv = rtl_priv(hw);
569 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
570 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
571
572 while (skb_queue_len(&ring->queue)) {
573 struct sk_buff *skb;
574 struct ieee80211_tx_info *info;
575 __le16 fc;
576 u8 tid;
577 u8 *entry;
578
579
580 if (rtlpriv->use_new_trx_flow)
581 entry = (u8 *)(&ring->buffer_desc[ring->idx]);
582 else
583 entry = (u8 *)(&ring->desc[ring->idx]);
584
585 if (rtlpriv->cfg->ops->is_tx_desc_closed &&
586 !rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
587 return;
588
589 ring->idx = (ring->idx + 1) % ring->entries;
590
591 skb = __skb_dequeue(&ring->queue);
592
593 pci_unmap_single(rtlpci->pdev,
594 rtlpriv->cfg->ops->
595 get_desc((u8 *)entry, true,
596 HW_DESC_TXBUFF_ADDR),
597 skb->len, PCI_DMA_TODEVICE);
598
599 /* remove early mode header */
600 if (rtlpriv->rtlhal.b_earlymode_enable)
601 skb_pull(skb, EM_HDR_LEN);
602
603 RT_TRACE((COMP_INTR | COMP_SEND), DBG_TRACE,
604 ("new ring->idx:%d, free: skb_queue_len:%d, free: seq:%d\n",
605 ring->idx,
606 skb_queue_len(&ring->queue),
607 *(u16 *)(skb->data + 22)));
608
609 if (prio == TXCMD_QUEUE) {
610 dev_kfree_skb(skb);
611 goto tx_status_ok;
612 }
613
614 /* for sw LPS, just after NULL skb send out, we can
615 * sure AP knows that we are sleeping, our we should not let
616 * rf to sleep
617 */
618 fc = rtl_get_fc(skb);
619 if (ieee80211_is_nullfunc(fc)) {
620 if (ieee80211_has_pm(fc)) {
621 rtlpriv->mac80211.offchan_deley = true;
622 rtlpriv->psc.state_inap = 1;
623 } else {
624 rtlpriv->psc.state_inap = 0;
625 }
626 }
627 if (ieee80211_is_action(fc)) {
628 struct ieee80211_mgmt_compat *action_frame =
629 (struct ieee80211_mgmt_compat *)skb->data;
630 if (action_frame->u.action.u.ht_smps.action ==
631 WLAN_HT_ACTION_SMPS) {
632 dev_kfree_skb(skb);
633 goto tx_status_ok;
634 }
635 }
636
637 /* update tid tx pkt num */
638 tid = rtl_get_tid(skb);
639 if (tid <= 7)
640 rtlpriv->link_info.tidtx_inperiod[tid]++;
641
642 info = IEEE80211_SKB_CB(skb);
643 ieee80211_tx_info_clear_status(info);
644
645 info->flags |= IEEE80211_TX_STAT_ACK;
646 /*info->status.rates[0].count = 1; */
647
648 ieee80211_tx_status_irqsafe(hw, skb);
649
650 if ((ring->entries - skb_queue_len(&ring->queue)) == 2) {
651 RT_TRACE(COMP_ERR, DBG_LOUD,
652 ("more desc left, wake skb_queue@%d,ring->idx = %d, skb_queue_len = 0x%d\n",
653 prio, ring->idx,
654 skb_queue_len(&ring->queue)));
655
656 ieee80211_wake_queue(hw, skb_get_queue_mapping
657 (skb));
658 }
659tx_status_ok:
660 skb = NULL;
661 }
662
663 if (((rtlpriv->link_info.num_rx_inperiod +
664 rtlpriv->link_info.num_tx_inperiod) > 8) ||
665 (rtlpriv->link_info.num_rx_inperiod > 2)) {
666 rtl92e_lps_leave(hw);
667 }
668}
669
670static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
671 u8 *entry, int rxring_idx, int desc_idx)
672{
673 struct rtl_priv *rtlpriv = rtl_priv(hw);
674 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
675 struct sk_buff *skb;
676 u32 bufferaddress;
677 u8 tmp_one = 1;
678
679 skb = dev_alloc_skb(rtlpci->rxbuffersize);
680 if (!skb)
681 return 0;
682 rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
683
684 /* just set skb->cb to mapping addr
685 * for pci_unmap_single use
686 */
687 *((dma_addr_t *)skb->cb) = pci_map_single(rtlpci->pdev,
688 skb_tail_pointer(skb), rtlpci->rxbuffersize,
689 PCI_DMA_FROMDEVICE);
690 bufferaddress = *((dma_addr_t *)skb->cb);
691 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
692 return 0;
693 if (rtlpriv->use_new_trx_flow) {
694 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
695 HW_DESC_RX_PREPARE,
696 (u8 *)&bufferaddress);
697 } else {
698 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
699 HW_DESC_RXBUFF_ADDR,
700 (u8 *)&bufferaddress);
701 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
702 HW_DESC_RXPKT_LEN,
703 (u8 *)&rtlpci->rxbuffersize);
704 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
705 HW_DESC_RXOWN,
706 (u8 *)&tmp_one);
707 }
708 return 1;
709}
710
711/* inorder to receive 8K AMSDU we have set skb to
712 * 9100bytes in init rx ring, but if this packet is
713 * not a AMSDU, this so big packet will be sent to
714 * TCP/IP directly, this cause big packet ping fail
715 * like: "ping -s 65507", so here we will realloc skb
716 * based on the true size of packet, I think mac80211
717 * do it will be better, but now mac80211 haven't */
718
719/* but some platform will fail when alloc skb sometimes.
720 * in this condition, we will send the old skb to
721 * mac80211 directly, this will not cause any other
722 * issues, but only be losted by TCP/IP */
723static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
724 struct sk_buff *skb,
725 struct ieee80211_rx_status rx_status)
726{
727 if (unlikely(!rtl92e_action_proc(hw, skb, false))) {
728 dev_kfree_skb_any(skb);
729 } else {
730 struct sk_buff *uskb = NULL;
731 u8 *pdata;
732
733 uskb = dev_alloc_skb(skb->len + 128);
734 if (likely(uskb)) {
735 memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
736 sizeof(rx_status));
737 pdata = (u8 *)skb_put(uskb, skb->len);
738 memcpy(pdata, skb->data, skb->len);
739 dev_kfree_skb_any(skb);
740
741 ieee80211_rx_irqsafe(hw, uskb);
742 } else {
743 ieee80211_rx_irqsafe(hw, skb);
744 }
745 }
746}
747
748/*hsisr interrupt handler*/
749static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw)
750{
751 struct rtl_priv *rtlpriv = rtl_priv(hw);
752 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
753
754 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR],
755 rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) |
756 rtlpci->sys_irq_mask);
757}
758
759static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
760 struct ieee80211_rx_status rx_status)
761{
762 struct rtl_priv *rtlpriv = rtl_priv(hw);
763 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
764 __le16 fc = rtl_get_fc(skb);
765 bool unicast = false;
766
767 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
768
769 if (is_broadcast_ether_addr(hdr->addr1)) {
770 ;/*TODO*/
771 } else if (is_multicast_ether_addr(hdr->addr1)) {
772 ;/*TODO*/
773 } else {
774 unicast = true;
775 rtlpriv->stats.rxbytesunicast += skb->len;
776 }
777
778 rtl92e_is_special_data(hw, skb, false);
779 if (ieee80211_is_data(fc)) {
780 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
781
782 if (unicast)
783 rtlpriv->link_info.num_rx_inperiod++;
784 }
785
786 /* static bcn for roaming */
787 rtl92e_beacon_statistic(hw, skb);
788 rtl92e_p2p_info(hw, (void *)skb->data, skb->len);
789
790 /* for sw lps */
791 rtl92e_swlps_beacon(hw, (void *)skb->data, skb->len);
792 rtl92e_recognize_peer(hw, (void *)skb->data, skb->len);
793 if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
794 (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) &&
795 (ieee80211_is_beacon(fc) ||
796 ieee80211_is_probe_resp(fc)))
797 dev_kfree_skb_any(skb);
798 else
799 _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
800}
801
802static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
803{
804 struct rtl_priv *rtlpriv = rtl_priv(hw);
805 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
806 struct ieee80211_rx_status rx_status = { 0 };
807 int rxring_idx = RTL_PCI_RX_MPDU_QUEUE;
808 unsigned int count = rtlpci->rxringcount;
809 u8 hw_queue = 0;
810 unsigned int rx_remained_cnt;
811 u8 own;
812 u8 tmp_one;
813 static int err_count;
814 struct rtl_stats stats = {
815 .signal = 0,
816 .rate = 0,
817 };
818
819 /*RX NORMAL PKT */
820 while (count--) {
821 struct ieee80211_hdr *hdr;
822 __le16 fc;
823 u16 len;
824 /*rx buffer descriptor */
825 struct rtl_rx_buffer_desc *buffer_desc = NULL;
826 /*if use new trx flow, it means wifi info */
827 struct rtl_rx_desc *pdesc = NULL;
828 /*rx pkt */
829 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
830 rtlpci->rx_ring[rxring_idx].idx];
831
832 if (rtlpriv->use_new_trx_flow) {
833 rx_remained_cnt =
834 rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
835 hw_queue);
836 if (rx_remained_cnt < 1)
837 return;
838
839 } else { /* rx descriptor */
840 pdesc = &rtlpci->rx_ring[rxring_idx].desc[
841 rtlpci->rx_ring[rxring_idx].idx];
842
843 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc,
844 false,
845 HW_DESC_OWN);
846 if (own) /* wait data to be filled by hardware */
847 return;
848 }
849
850 /* If we get here, the data is filled already
851 * Attention !!!
852 * We can NOT access 'skb' before 'pci_unmap_single'
853 */
854 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
855 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
856
857 if (rtlpriv->use_new_trx_flow) {
858 buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
859 rtlpci->rx_ring[rxring_idx].idx];
860 /*means rx wifi info*/
861 pdesc = (struct rtl_rx_desc *)skb->data;
862 }
863 memset(&rx_status , 0 , sizeof(rx_status));
864 rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
865 &rx_status, (u8 *)pdesc, skb);
866
867 if (rtlpriv->use_new_trx_flow)
868 rtlpriv->cfg->ops->rx_check_dma_ok(hw,
869 (u8 *)buffer_desc,
870 hw_queue);
871 len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false,
872 HW_DESC_RXPKT_LEN);
873
874 if (skb->end - skb->tail > len) {
875 skb_put(skb, len);
876 if (rtlpriv->use_new_trx_flow)
877 skb_reserve(skb, stats.rx_drvinfo_size +
878 stats.rx_bufshift + 24);
879 else
880 skb_reserve(skb, stats.rx_drvinfo_size +
881 stats.rx_bufshift);
882
883 } else {
884 if (err_count++ < 10) {
885 pr_info("skb->end (%d) - skb->tail (%d) > len (%d)\n",
886 skb->end, skb->tail, len);
887 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_EMERG,
888 "RX desc\n",
889 (u8 *)pdesc, 32);
890 }
891 break;
892 }
893
894 /* handle command packet here */
895 if (rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
896 dev_kfree_skb_any(skb);
897 goto end;
898 }
899
900 /* NOTICE This can not be use for mac80211,
901 *this is done in mac80211 code,
902 *if you done here sec DHCP will fail
903 *skb_trim(skb, skb->len - 4);
904 */
905
906 hdr = rtl_get_hdr(skb);
907 fc = rtl_get_fc(skb);
908
909 if (!stats.b_crc && !stats.b_hwerror)
910 _rtl_receive_one(hw, skb, rx_status);
911 else
912 dev_kfree_skb_any(skb);
913 if (rtlpriv->use_new_trx_flow) {
914 rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
915 rtlpci->rx_ring[hw_queue].next_rx_rp %=
916 RTL_PCI_MAX_RX_COUNT;
917
918
919 rx_remained_cnt--;
920 rtl_write_word(rtlpriv, 0x3B4,
921 rtlpci->rx_ring[hw_queue].next_rx_rp);
922 }
923 if (((rtlpriv->link_info.num_rx_inperiod +
924 rtlpriv->link_info.num_tx_inperiod) > 8) ||
925 (rtlpriv->link_info.num_rx_inperiod > 2)) {
926 rtl92e_lps_leave(hw);
927 }
928end:
929 if (rtlpriv->use_new_trx_flow) {
930 _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
931 rxring_idx,
932 rtlpci->rx_ring[rxring_idx].idx);
933 } else {
934 _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
935 rtlpci->rx_ring[rxring_idx].idx);
936
937 if (rtlpci->rx_ring[rxring_idx].idx ==
938 rtlpci->rxringcount - 1)
939 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
940 false,
941 HW_DESC_RXERO,
942 (u8 *)&tmp_one);
943 }
944 rtlpci->rx_ring[rxring_idx].idx =
945 (rtlpci->rx_ring[rxring_idx].idx + 1) %
946 rtlpci->rxringcount;
947 }
948}
949
950static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
951{
952 struct ieee80211_hw *hw = dev_id;
953 struct rtl_priv *rtlpriv = rtl_priv(hw);
954 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
955 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
956 unsigned long flags;
957 u32 inta = 0;
958 u32 intb = 0;
959
960 if (rtlpci->irq_enabled == 0)
961 return IRQ_HANDLED;
962
963 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock , flags);
964 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR], 0x0);
965 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE], 0x0);
966
967 /*read ISR: 4/8bytes */
968 rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
969
970 /*Shared IRQ or HW disappared */
971 if (!inta || inta == 0xffff)
972 goto done;
973 /*<1> beacon related */
974 if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
975 RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon ok interrupt!\n"));
976
977 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
978 RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon err interrupt!\n"));
979
980 if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
981 RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon interrupt!\n"));
982
983 if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
984 RT_TRACE(COMP_INTR, DBG_TRACE,
985 ("prepare beacon for interrupt!\n"));
986 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
987 }
988
989 /*<2> tx related */
990 if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
991 RT_TRACE(COMP_ERR, DBG_TRACE, ("IMR_TXFOVW!\n"));
992
993 if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
994 RT_TRACE(COMP_INTR, DBG_TRACE, ("Manage ok interrupt!\n"));
995 _rtl_pci_tx_isr(hw, MGNT_QUEUE);
996 }
997
998 if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
999 RT_TRACE(COMP_INTR, DBG_TRACE, ("HIGH_QUEUE ok interrupt!\n"));
1000 _rtl_pci_tx_isr(hw, HIGH_QUEUE);
1001 }
1002
1003 if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
1004 rtlpriv->link_info.num_tx_inperiod++;
1005
1006 RT_TRACE(COMP_INTR, DBG_TRACE, ("BK Tx OK interrupt!\n"));
1007 _rtl_pci_tx_isr(hw, BK_QUEUE);
1008 }
1009
1010 if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
1011 rtlpriv->link_info.num_tx_inperiod++;
1012
1013 RT_TRACE(COMP_INTR, DBG_TRACE, ("BE TX OK interrupt!\n"));
1014 _rtl_pci_tx_isr(hw, BE_QUEUE);
1015 }
1016
1017 if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
1018 rtlpriv->link_info.num_tx_inperiod++;
1019
1020 RT_TRACE(COMP_INTR, DBG_TRACE, ("VI TX OK interrupt!\n"));
1021 _rtl_pci_tx_isr(hw, VI_QUEUE);
1022 }
1023
1024 if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
1025 rtlpriv->link_info.num_tx_inperiod++;
1026
1027 RT_TRACE(COMP_INTR, DBG_TRACE, ("Vo TX OK interrupt!\n"));
1028 _rtl_pci_tx_isr(hw, VO_QUEUE);
1029 }
1030
1031 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
1032 if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
1033 rtlpriv->link_info.num_tx_inperiod++;
1034
1035 RT_TRACE(COMP_INTR, DBG_TRACE,
1036 ("CMD TX OK interrupt!\n"));
1037 _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
1038 }
1039 }
1040
1041 /*<3> rx related */
1042 if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
1043 RT_TRACE(COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
1044 _rtl_pci_rx_interrupt(hw);
1045 }
1046
1047 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
1048 RT_TRACE(COMP_ERR, DBG_WARNING,
1049 ("rx descriptor unavailable!\n"));
1050 _rtl_pci_rx_interrupt(hw);
1051 }
1052
1053 if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
1054 RT_TRACE(COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
1055 _rtl_pci_rx_interrupt(hw);
1056 }
1057
1058 /*<4> fw related*/
1059 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
1060 if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
1061 RT_TRACE(COMP_INTR, DBG_TRACE,
1062 ("firmware interrupt!\n"));
1063 queue_delayed_work(rtlpriv->works.rtl_wq,
1064 &rtlpriv->works.fwevt_wq, 0);
1065 }
1066 }
1067
1068 /*<5> hsisr related*/
1069 /* Only 8188EE & 8723BE Supported.
1070 * If Other ICs Come in, System will corrupt,
1071 * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR]
1072 * are not initialized*/
1073 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
1074 rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
1075 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
1076 RT_TRACE(COMP_INTR, DBG_TRACE,
1077 ("hsisr interrupt!\n"));
1078 _rtl_pci_hs_interrupt(hw);
1079 }
1080 }
1081
1082
1083 if (rtlpriv->rtlhal.b_earlymode_enable)
1084 tasklet_schedule(&rtlpriv->works.irq_tasklet);
1085
1086done:
1087 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR],
1088 rtlpci->irq_mask[0]);
1089 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE],
1090 rtlpci->irq_mask[1]);
1091 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1092 return IRQ_HANDLED;
1093}
1094
1095static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
1096{
1097 _rtl_pci_tx_chk_waitq(hw);
1098}
1099
1100static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
1101{
1102 struct rtl_priv *rtlpriv = rtl_priv(hw);
1103 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1104 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1105 struct rtl8192_tx_ring *ring = NULL;
1106 struct ieee80211_hdr *hdr = NULL;
1107 struct ieee80211_tx_info *info = NULL;
1108 struct sk_buff *pskb = NULL;
1109 struct rtl_tx_desc *pdesc = NULL;
1110 struct rtl_tcb_desc tcb_desc;
1111 /*This is for new trx flow*/
1112 struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
1113 u8 temp_one = 1;
1114
1115 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1116 ring = &rtlpci->tx_ring[BEACON_QUEUE];
1117 pskb = __skb_dequeue(&ring->queue);
1118 if (pskb)
1119 kfree_skb(pskb);
1120
1121 /*NB: the beacon data buffer must be 32-bit aligned. */
1122 pskb = ieee80211_beacon_get(hw, mac->vif);
1123 if (pskb == NULL)
1124 return;
1125 hdr = rtl_get_hdr(pskb);
1126 info = IEEE80211_SKB_CB(pskb);
1127 pdesc = &ring->desc[0];
1128 if (rtlpriv->use_new_trx_flow)
1129 pbuffer_desc = &ring->buffer_desc[0];
1130
1131 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1132 (u8 *)pbuffer_desc, info, NULL, pskb,
1133 BEACON_QUEUE, &tcb_desc);
1134
1135 __skb_queue_tail(&ring->queue, pskb);
1136
1137 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
1138 (u8 *)&temp_one);
1139
1140 return;
1141}
1142
1143static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
1144{
1145 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1146 struct rtl_priv *rtlpriv = rtl_priv(hw);
1147 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1148 u8 i;
1149 u16 desc_num;
1150
1151 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
1152 desc_num = TX_DESC_NUM_92E;
1153 else
1154 desc_num = RT_TXDESC_NUM;
1155
1156 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1157 rtlpci->txringcount[i] = desc_num;
1158 /*
1159 *we just alloc 2 desc for beacon queue,
1160 *because we just need first desc in hw beacon.
1161 */
1162 rtlpci->txringcount[BEACON_QUEUE] = 2;
1163
1164 /*
1165 *BE queue need more descriptor for performance
1166 *consideration or, No more tx desc will happen,
1167 *and may cause mac80211 mem leakage.
1168 */
1169 if (!rtl_priv(hw)->use_new_trx_flow)
1170 rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
1171
1172 rtlpci->rxbuffersize = 9100; /*2048/1024; */
1173 rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
1174}
1175
1176static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
1177 struct pci_dev *pdev)
1178{
1179 struct rtl_priv *rtlpriv = rtl_priv(hw);
1180 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1181 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1182 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1183
1184 rtlpriv->rtlhal.up_first_time = true;
1185 rtlpriv->rtlhal.being_init_adapter = false;
1186
1187 rtlhal->hw = hw;
1188 rtlpci->pdev = pdev;
1189
1190 /*Tx/Rx related var */
1191 _rtl_pci_init_trx_var(hw);
1192
1193 /*IBSS*/ mac->beacon_interval = 100;
1194
1195 /*AMPDU*/
1196 mac->min_space_cfg = 0;
1197 mac->max_mss_density = 0;
1198 /*set sane AMPDU defaults */
1199 mac->current_ampdu_density = 7;
1200 mac->current_ampdu_factor = 3;
1201
1202 /*QOS*/
1203 rtlpci->acm_method = eAcmWay2_SW;
1204
1205 /*task */
1206 tasklet_init(&rtlpriv->works.irq_tasklet,
1207 (void (*)(unsigned long))_rtl_pci_irq_tasklet,
1208 (unsigned long)hw);
1209 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1210 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1211 (unsigned long)hw);
1212}
1213
1214static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1215 unsigned int prio, unsigned int entries)
1216{
1217 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1218 struct rtl_priv *rtlpriv = rtl_priv(hw);
1219 struct rtl_tx_buffer_desc *buffer_desc;
1220 struct rtl_tx_desc *desc;
1221 dma_addr_t buffer_desc_dma, desc_dma;
1222 u32 nextdescaddress;
1223 int i;
1224
1225 /* alloc tx buffer desc for new trx flow*/
1226 if (rtlpriv->use_new_trx_flow) {
1227 buffer_desc = pci_alloc_consistent(rtlpci->pdev,
1228 sizeof(*buffer_desc) * entries,
1229 &buffer_desc_dma);
1230
1231 if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
1232 RT_TRACE(COMP_ERR, DBG_EMERG,
1233 ("Cannot allocate TX ring (prio = %d)\n",
1234 prio));
1235 return -ENOMEM;
1236 }
1237
1238 memset(buffer_desc, 0, sizeof(*buffer_desc) * entries);
1239 rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
1240 rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
1241
1242 rtlpci->tx_ring[prio].cur_tx_rp = 0;
1243 rtlpci->tx_ring[prio].cur_tx_wp = 0;
1244 rtlpci->tx_ring[prio].avl_desc = entries;
1245 }
1246
1247 /* alloc dma for this ring */
1248 desc = pci_alloc_consistent(rtlpci->pdev,
1249 sizeof(*desc) * entries, &desc_dma);
1250
1251 if (!desc || (unsigned long)desc & 0xFF) {
1252 RT_TRACE(COMP_ERR, DBG_EMERG,
1253 ("Cannot allocate TX ring (prio = %d)\n", prio));
1254 return -ENOMEM;
1255 }
1256
1257 memset(desc, 0, sizeof(*desc) * entries);
1258 rtlpci->tx_ring[prio].desc = desc;
1259 rtlpci->tx_ring[prio].dma = desc_dma;
1260
1261 rtlpci->tx_ring[prio].idx = 0;
1262 rtlpci->tx_ring[prio].entries = entries;
1263 skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
1264 RT_TRACE(COMP_INIT, DBG_LOUD,
1265 ("queue:%d, ring_addr:%p\n", prio, desc));
1266
1267 /* init every desc in this ring */
1268 if (!rtlpriv->use_new_trx_flow) {
1269 for (i = 0; i < entries; i++) {
1270 nextdescaddress = (u32) desc_dma +
1271 ((i + 1) % entries) *
1272 sizeof(*desc);
1273
1274 rtlpriv->cfg->ops->set_desc(hw, (u8 *)&(desc[i]),
1275 true,
1276 HW_DESC_TX_NEXTDESC_ADDR,
1277 (u8 *)&nextdescaddress);
1278 }
1279 }
1280 return 0;
1281}
1282
1283static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
1284{
1285 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1286 struct rtl_priv *rtlpriv = rtl_priv(hw);
1287 int i;
1288
1289 if (rtlpriv->use_new_trx_flow) {
1290 struct rtl_rx_buffer_desc *entry = NULL;
1291 /* alloc dma for this ring */
1292 rtlpci->rx_ring[rxring_idx].buffer_desc =
1293 pci_alloc_consistent(rtlpci->pdev,
1294 sizeof(*rtlpci->rx_ring[rxring_idx].
1295 buffer_desc) *
1296 rtlpci->rxringcount,
1297 &rtlpci->rx_ring[rxring_idx].dma);
1298 if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
1299 (unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
1300 RT_TRACE(COMP_ERR, DBG_EMERG,
1301 ("Cannot allocate RX ring\n"));
1302 return -ENOMEM;
1303 }
1304
1305 memset(rtlpci->rx_ring[rxring_idx].buffer_desc, 0,
1306 sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
1307 rtlpci->rxringcount);
1308
1309 /* init every desc in this ring */
1310 rtlpci->rx_ring[rxring_idx].idx = 0;
1311
1312 for (i = 0; i < rtlpci->rxringcount; i++) {
1313 entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
1314 if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
1315 rxring_idx, i))
1316 return -ENOMEM;
1317 }
1318 } else {
1319 struct rtl_rx_desc *entry = NULL;
1320 u8 tmp_one = 1;
1321 /* alloc dma for this ring */
1322 rtlpci->rx_ring[rxring_idx].desc =
1323 pci_alloc_consistent(rtlpci->pdev,
1324 sizeof(*rtlpci->rx_ring[rxring_idx].
1325 desc) * rtlpci->rxringcount,
1326 &rtlpci->rx_ring[rxring_idx].dma);
1327 if (!rtlpci->rx_ring[rxring_idx].desc ||
1328 (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
1329 RT_TRACE(COMP_ERR, DBG_EMERG,
1330 ("Cannot allocate RX ring\n"));
1331 return -ENOMEM;
1332 }
1333 memset(rtlpci->rx_ring[rxring_idx].desc, 0,
1334 sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
1335 rtlpci->rxringcount);
1336
1337 /* init every desc in this ring */
1338 rtlpci->rx_ring[rxring_idx].idx = 0;
1339 for (i = 0; i < rtlpci->rxringcount; i++) {
1340 entry = &rtlpci->rx_ring[rxring_idx].desc[i];
1341 if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
1342 rxring_idx, i))
1343 return -ENOMEM;
1344 }
1345 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1346 HW_DESC_RXERO, (u8 *) &tmp_one);
1347 }
1348 return 0;
1349}
1350
1351static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1352 unsigned int prio)
1353{
1354 struct rtl_priv *rtlpriv = rtl_priv(hw);
1355 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1356 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
1357
1358 /* free every desc in this ring */
1359 while (skb_queue_len(&ring->queue)) {
1360 struct sk_buff *skb = __skb_dequeue(&ring->queue);
1361 u8 *entry;
1362
1363 if (rtlpriv->use_new_trx_flow)
1364 entry = (u8 *)(&ring->buffer_desc[ring->idx]);
1365 else
1366 entry = (u8 *)(&ring->desc[ring->idx]);
1367
1368 pci_unmap_single(rtlpci->pdev,
1369 rtlpriv->cfg->ops->get_desc((u8 *)entry, true,
1370 HW_DESC_TXBUFF_ADDR),
1371 skb->len, PCI_DMA_TODEVICE);
1372 kfree_skb(skb);
1373 ring->idx = (ring->idx + 1) % ring->entries;
1374 }
1375
1376 /* free dma of this ring */
1377 pci_free_consistent(rtlpci->pdev,
1378 sizeof(*ring->desc) * ring->entries,
1379 ring->desc, ring->dma);
1380 ring->desc = NULL;
1381 if (rtlpriv->use_new_trx_flow) {
1382 pci_free_consistent(rtlpci->pdev,
1383 sizeof(*ring->buffer_desc) * ring->entries,
1384 ring->buffer_desc, ring->buffer_desc_dma);
1385 ring->buffer_desc = NULL;
1386 }
1387}
1388
1389static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
1390{
1391 struct rtl_priv *rtlpriv = rtl_priv(hw);
1392 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1393 int i;
1394
1395 /* free every desc in this ring */
1396 for (i = 0; i < rtlpci->rxringcount; i++) {
1397 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i];
1398
1399 if (!skb)
1400 continue;
1401
1402 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
1403 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
1404 kfree_skb(skb);
1405 }
1406
1407 /* free dma of this ring */
1408 if (rtlpriv->use_new_trx_flow) {
1409 pci_free_consistent(rtlpci->pdev,
1410 sizeof(*rtlpci->rx_ring[rxring_idx].
1411 buffer_desc) * rtlpci->rxringcount,
1412 rtlpci->rx_ring[rxring_idx].buffer_desc,
1413 rtlpci->rx_ring[rxring_idx].dma);
1414 rtlpci->rx_ring[rxring_idx].buffer_desc = NULL;
1415 } else {
1416 pci_free_consistent(rtlpci->pdev,
1417 sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
1418 rtlpci->rxringcount,
1419 rtlpci->rx_ring[rxring_idx].desc,
1420 rtlpci->rx_ring[rxring_idx].dma);
1421 rtlpci->rx_ring[rxring_idx].desc = NULL;
1422 }
1423}
1424
1425static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
1426{
1427 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1428 int ret;
1429 int i, rxring_idx;
1430
1431 /* rxring_idx 0:RX_MPDU_QUEUE
1432 * rxring_idx 1:RX_CMD_QUEUE */
1433 for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
1434 ret = _rtl_pci_init_rx_ring(hw, rxring_idx);
1435 if (ret)
1436 return ret;
1437 }
1438
1439 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1440 ret = _rtl_pci_init_tx_ring(hw, i,
1441 rtlpci->txringcount[i]);
1442 if (ret)
1443 goto err_free_rings;
1444 }
1445
1446 return 0;
1447
1448err_free_rings:
1449 for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
1450 _rtl_pci_free_rx_ring(hw, rxring_idx);
1451
1452 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1453 if (rtlpci->tx_ring[i].desc ||
1454 rtlpci->tx_ring[i].buffer_desc)
1455 _rtl_pci_free_tx_ring(hw, i);
1456
1457 return 1;
1458}
1459
1460static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
1461{
1462 u32 i, rxring_idx;
1463
1464 /*free rx rings */
1465 for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
1466 _rtl_pci_free_rx_ring(hw, rxring_idx);
1467
1468 /*free tx rings */
1469 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1470 _rtl_pci_free_tx_ring(hw, i);
1471
1472 return 0;
1473}
1474
1475int rtl92e_pci_reset_trx_ring(struct ieee80211_hw *hw)
1476{
1477 struct rtl_priv *rtlpriv = rtl_priv(hw);
1478 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1479 int i, rxring_idx;
1480 unsigned long flags;
1481 u8 tmp_one = 1;
1482 /* rxring_idx 0:RX_MPDU_QUEUE */
1483 /* rxring_idx 1:RX_CMD_QUEUE */
1484 for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
1485 /* force the rx_ring[RX_MPDU_QUEUE]
1486 * RX_CMD_QUEUE].idx to the first one
1487 * If using the new trx flow, do nothing
1488 */
1489 if (!rtlpriv->use_new_trx_flow &&
1490 rtlpci->rx_ring[rxring_idx].desc) {
1491 struct rtl_rx_desc *entry = NULL;
1492
1493 for (i = 0; i < rtlpci->rxringcount; i++) {
1494 entry = &rtlpci->rx_ring[rxring_idx].desc[i];
1495 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry,
1496 false,
1497 HW_DESC_RXOWN,
1498 &tmp_one);
1499 }
1500 }
1501 rtlpci->rx_ring[rxring_idx].idx = 0;
1502 }
1503
1504 /* after reset, release previous pending packet,
1505 * and force the tx idx to the first one
1506 */
1507 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1508 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1509 if (rtlpci->tx_ring[i].desc ||
1510 rtlpci->tx_ring[i].buffer_desc) {
1511 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1512
1513 while (skb_queue_len(&ring->queue)) {
1514 struct sk_buff *skb =
1515 __skb_dequeue(&ring->queue);
1516 u8 *entry;
1517
1518 if (rtlpriv->use_new_trx_flow)
1519 entry = (u8 *)(&ring->buffer_desc
1520 [ring->idx]);
1521 else
1522 entry = (u8 *)(&ring->desc[ring->idx]);
1523
1524 pci_unmap_single(rtlpci->pdev,
1525 rtlpriv->cfg->ops->get_desc(
1526 (u8 *)entry, true,
1527 HW_DESC_TXBUFF_ADDR),
1528 skb->len, PCI_DMA_TODEVICE);
1529 kfree_skb(skb);
1530 ring->idx = (ring->idx + 1) % ring->entries;
1531 }
1532 ring->idx = 0;
1533 }
1534 }
1535
1536 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1537
1538 return 0;
1539}
1540
1541static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1542 struct ieee80211_sta *sta,
1543 struct sk_buff *skb)
1544{
1545 struct rtl_priv *rtlpriv = rtl_priv(hw);
1546 struct rtl_sta_info *sta_entry = NULL;
1547 u8 tid = rtl_get_tid(skb);
1548 __le16 fc = rtl_get_fc(skb);
1549
1550 if (!sta)
1551 return false;
1552 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1553
1554 if (!rtlpriv->rtlhal.b_earlymode_enable)
1555 return false;
1556 if (ieee80211_is_nullfunc(fc))
1557 return false;
1558 if (ieee80211_is_qos_nullfunc(fc))
1559 return false;
1560 if (ieee80211_is_pspoll(fc))
1561 return false;
1562
1563 if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1564 return false;
1565 if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
1566 return false;
1567 if (tid > 7)
1568 return false;
1569 /* maybe every tid should be checked */
1570 if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1571 return false;
1572
1573 spin_lock_bh(&rtlpriv->locks.waitq_lock);
1574 skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1575 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
1576
1577 return true;
1578}
1579
1580static int rtl_pci_tx(struct ieee80211_hw *hw,
1581 struct ieee80211_sta *sta,
1582 struct sk_buff *skb,
1583 struct rtl_tcb_desc *ptcb_desc)
1584{
1585 struct rtl_priv *rtlpriv = rtl_priv(hw);
1586 struct rtl_sta_info *sta_entry = NULL;
1587 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1588 struct rtl8192_tx_ring *ring;
1589 struct rtl_tx_desc *pdesc;
1590 struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
1591 u16 idx;
1592 u8 own;
1593 u8 temp_one = 1;
1594 u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
1595 unsigned long flags;
1596 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
1597 __le16 fc = rtl_get_fc(skb);
1598 u8 *pda_addr = hdr->addr1;
1599 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1600 /*ssn */
1601 u8 tid = 0;
1602 u16 seq_number = 0;
1603
1604 if (ieee80211_is_mgmt(fc))
1605 rtl92e_tx_mgmt_proc(hw, skb);
1606
1607 if (rtlpriv->psc.sw_ps_enabled) {
1608 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
1609 !ieee80211_has_pm(fc))
1610 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
1611 }
1612
1613 rtl92e_action_proc(hw, skb, true);
1614
1615 if (is_multicast_ether_addr(pda_addr))
1616 rtlpriv->stats.txbytesmulticast += skb->len;
1617 else if (is_broadcast_ether_addr(pda_addr))
1618 rtlpriv->stats.txbytesbroadcast += skb->len;
1619 else
1620 rtlpriv->stats.txbytesunicast += skb->len;
1621
1622 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1623 ring = &rtlpci->tx_ring[hw_queue];
1624 if (hw_queue != BEACON_QUEUE) {
1625 if (rtlpriv->use_new_trx_flow)
1626 idx = ring->cur_tx_wp;
1627 else
1628 idx = (ring->idx + skb_queue_len(&ring->queue)) %
1629 ring->entries;
1630 } else {
1631 idx = 0;
1632 }
1633
1634 pdesc = &ring->desc[idx];
1635
1636 if (rtlpriv->use_new_trx_flow) {
1637 ptx_bd_desc = &ring->buffer_desc[idx];
1638 } else {
1639 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc,
1640 true, HW_DESC_OWN);
1641
1642 if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1643 RT_TRACE(COMP_ERR, DBG_WARNING,
1644 ("No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1645 hw_queue, ring->idx, idx,
1646 skb_queue_len(&ring->queue)));
1647
1648 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1649 flags);
1650 return skb->len;
1651 }
1652 }
1653
1654 if (ieee80211_is_data_qos(fc)) {
1655 tid = rtl_get_tid(skb);
1656 if (sta) {
1657 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1658 seq_number = (le16_to_cpu(hdr->seq_ctrl) &
1659 IEEE80211_SCTL_SEQ) >> 4;
1660 seq_number += 1;
1661
1662 if (!ieee80211_has_morefrags(hdr->frame_control))
1663 sta_entry->tids[tid].seq_number = seq_number;
1664 }
1665 }
1666
1667 if (ieee80211_is_data(fc))
1668 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1669
1670 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1671 (u8 *)ptx_bd_desc, info, sta, skb,
1672 hw_queue, ptcb_desc);
1673
1674 __skb_queue_tail(&ring->queue, skb);
1675 if (rtlpriv->use_new_trx_flow) {
1676 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
1677 HW_DESC_OWN, (u8 *)&hw_queue);
1678 } else {
1679 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
1680 HW_DESC_OWN, (u8 *)&temp_one);
1681 }
1682
1683 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1684 hw_queue != BEACON_QUEUE) {
1685 RT_TRACE(COMP_ERR, DBG_LOUD,
1686 ("less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1687 hw_queue, ring->idx, idx,
1688 skb_queue_len(&ring->queue)));
1689
1690 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1691 }
1692
1693 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1694
1695 if (rtlpriv->cfg->ops->tx_polling)
1696 rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1697
1698 return 0;
1699}
1700static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1701{
1702 struct rtl_priv *rtlpriv = rtl_priv(hw);
1703 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1704 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1705 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1706 u16 i = 0;
1707 int queue_id;
1708 struct rtl8192_tx_ring *ring;
1709
1710 if (mac->skip_scan)
1711 return;
1712
1713 for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1714 u32 queue_len;
1715 if (((queues >> queue_id) & 0x1) == 0) {
1716 queue_id--;
1717 continue;
1718 }
1719 ring = &pcipriv->dev.tx_ring[queue_id];
1720 queue_len = skb_queue_len(&ring->queue);
1721 if (queue_len == 0 || queue_id == BEACON_QUEUE ||
1722 queue_id == TXCMD_QUEUE) {
1723 queue_id--;
1724 continue;
1725 } else {
1726 msleep(5);
1727 i++;
1728 }
1729
1730 /* we just wait 1s for all queues */
1731 if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1732 is_hal_stop(rtlhal) || i >= 200)
1733 return;
1734 }
1735}
1736
1737static void rtl_pci_deinit(struct ieee80211_hw *hw)
1738{
1739 struct rtl_priv *rtlpriv = rtl_priv(hw);
1740 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1741
1742 _rtl_pci_deinit_trx_ring(hw);
1743
1744 synchronize_irq(rtlpci->pdev->irq);
1745 tasklet_kill(&rtlpriv->works.irq_tasklet);
1746
1747 flush_workqueue(rtlpriv->works.rtl_wq);
1748 destroy_workqueue(rtlpriv->works.rtl_wq);
1749}
1750
1751static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1752{
1753 struct rtl_priv *rtlpriv = rtl_priv(hw);
1754 int err;
1755
1756 _rtl_pci_init_struct(hw, pdev);
1757
1758 err = _rtl_pci_init_trx_ring(hw);
1759 if (err) {
1760 RT_TRACE(COMP_ERR, DBG_EMERG,
1761 ("tx ring initialization failed"));
1762 return err;
1763 }
1764
1765 return 1;
1766}
1767
1768static int rtl_pci_start(struct ieee80211_hw *hw)
1769{
1770 struct rtl_priv *rtlpriv = rtl_priv(hw);
1771 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1772 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1773 int err = 0;
1774
1775 RT_TRACE(COMP_INIT, DBG_DMESG, (" rtl_pci_start\n"));
1776 rtl92e_pci_reset_trx_ring(hw);
1777
1778 rtlpriv->rtlhal.driver_is_goingto_unload = false;
1779 if (rtlpriv->cfg->ops->get_btc_status()) {
1780 rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
1781 rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
1782 }
1783
1784 err = rtlpriv->cfg->ops->hw_init(hw);
1785 if (err) {
1786 RT_TRACE(COMP_INIT, DBG_DMESG,
1787 ("Failed to config hardware err %x!\n" , err));
1788 return err;
1789 }
1790
1791 rtlpriv->cfg->ops->enable_interrupt(hw);
1792 RT_TRACE(COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
1793
1794 rtl92e_init_rx_config(hw);
1795
1796 /*should after adapter start and interrupt enable. */
1797 set_hal_start(rtlhal);
1798
1799 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1800
1801 rtlpriv->rtlhal.up_first_time = false;
1802
1803 RT_TRACE(COMP_INIT, DBG_DMESG, ("rtl_pci_start OK\n"));
1804 return 0;
1805}
1806
1807static void rtl_pci_stop(struct ieee80211_hw *hw)
1808{
1809 struct rtl_priv *rtlpriv = rtl_priv(hw);
1810 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1811 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1812 u8 RFInProgressTimeOut = 0;
1813
1814 if (rtlpriv->cfg->ops->get_btc_status())
1815 rtlpriv->btcoexist.btc_ops->btc_halt_notify();
1816
1817 /*
1818 *should before disable interrrupt&adapter
1819 *and will do it immediately.
1820 */
1821 set_hal_stop(rtlhal);
1822
1823 rtlpriv->cfg->ops->disable_interrupt(hw);
1824
1825 spin_lock(&rtlpriv->locks.rf_ps_lock);
1826 while (ppsc->rfchange_inprogress) {
1827 spin_unlock(&rtlpriv->locks.rf_ps_lock);
1828 if (RFInProgressTimeOut > 100) {
1829 spin_lock(&rtlpriv->locks.rf_ps_lock);
1830 break;
1831 }
1832 mdelay(1);
1833 RFInProgressTimeOut++;
1834 spin_lock(&rtlpriv->locks.rf_ps_lock);
1835 }
1836 ppsc->rfchange_inprogress = true;
1837 spin_unlock(&rtlpriv->locks.rf_ps_lock);
1838
1839 rtlpriv->rtlhal.driver_is_goingto_unload = true;
1840 rtlpriv->cfg->ops->hw_disable(hw);
1841 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1842
1843 spin_lock(&rtlpriv->locks.rf_ps_lock);
1844 ppsc->rfchange_inprogress = false;
1845 spin_unlock(&rtlpriv->locks.rf_ps_lock);
1846
1847 rtl_pci_enable_aspm(hw);
1848}
1849
1850static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1851 struct ieee80211_hw *hw)
1852{
1853 struct rtl_priv *rtlpriv = rtl_priv(hw);
1854 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1855 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1856 struct pci_dev *bridge_pdev = pdev->bus->self;
1857 u16 venderid;
1858 u16 deviceid;
1859 u8 revisionid;
1860 u16 irqline;
1861 u8 tmp;
1862
1863 venderid = pdev->vendor;
1864 deviceid = pdev->device;
1865 pci_read_config_byte(pdev, 0x8, &revisionid);
1866 pci_read_config_word(pdev, 0x3C, &irqline);
1867
1868 if (deviceid == RTL_PCI_8192_DID ||
1869 deviceid == RTL_PCI_0044_DID ||
1870 deviceid == RTL_PCI_0047_DID ||
1871 deviceid == RTL_PCI_8192SE_DID ||
1872 deviceid == RTL_PCI_8174_DID ||
1873 deviceid == RTL_PCI_8173_DID ||
1874 deviceid == RTL_PCI_8172_DID ||
1875 deviceid == RTL_PCI_8171_DID) {
1876 switch (revisionid) {
1877 case RTL_PCI_REVISION_ID_8192PCIE:
1878 RT_TRACE(COMP_INIT, DBG_DMESG,
1879 ("8192E is found but not supported now-vid/did=%x/%x\n",
1880 venderid, deviceid));
1881 rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1882 return false;
1883 break;
1884 case RTL_PCI_REVISION_ID_8192SE:
1885 RT_TRACE(COMP_INIT, DBG_DMESG,
1886 ("8192SE is found - vid/did=%x/%x\n",
1887 venderid, deviceid));
1888 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1889 break;
1890 default:
1891 RT_TRACE(COMP_ERR, DBG_WARNING,
1892 ("Err: Unknown device - vid/did=%x/%x\n",
1893 venderid, deviceid));
1894 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1895 break;
1896 }
1897 } else if (deviceid == RTL_PCI_8723AE_DID) {
1898 rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
1899 RT_TRACE(COMP_INIT, DBG_DMESG,
1900 ("8723AE PCI-E is found - vid/did=%x/%x\n",
1901 venderid, deviceid));
1902 } else if (deviceid == RTL_PCI_8192CET_DID ||
1903 deviceid == RTL_PCI_8192CE_DID ||
1904 deviceid == RTL_PCI_8191CE_DID ||
1905 deviceid == RTL_PCI_8188CE_DID) {
1906 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
1907 RT_TRACE(COMP_INIT, DBG_DMESG,
1908 ("8192C PCI-E is found - vid/did=%x/%x\n",
1909 venderid, deviceid));
1910 } else if (deviceid == RTL_PCI_8192DE_DID ||
1911 deviceid == RTL_PCI_8192DE_DID2) {
1912 rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
1913 RT_TRACE(COMP_INIT, DBG_DMESG,
1914 ("8192D PCI-E is found - vid/did=%x/%x\n",
1915 venderid, deviceid));
1916 } else if (deviceid == RTL_PCI_8188EE_DID) {
1917 rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
1918 RT_TRACE(COMP_INIT , DBG_LOUD,
1919 ("Find adapter, Hardware type is 8188EE\n"));
1920 } else if (deviceid == RTL_PCI_8723BE_DID) {
1921 rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
1922 RT_TRACE(COMP_INIT , DBG_LOUD,
1923 ("Find adapter, Hardware type is 8723BE\n"));
1924 } else if (deviceid == RTL_PCI_8192EE_DID) {
1925 rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
1926 RT_TRACE(COMP_INIT , DBG_LOUD,
1927 ("Find adapter, Hardware type is 8192EE\n"));
1928 } else if (deviceid == RTL_PCI_8821AE_DID) {
1929 rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
1930 RT_TRACE(COMP_INIT , DBG_LOUD,
1931 ("Find adapter, Hardware type is 8821AE\n"));
1932 } else if (deviceid == RTL_PCI_8812AE_DID) {
1933 rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
1934 RT_TRACE(COMP_INIT , DBG_LOUD,
1935 ("Find adapter, Hardware type is 8812AE\n"));
1936 } else {
1937 RT_TRACE(COMP_ERR, DBG_WARNING,
1938 ("Err: Unknown device - vid/did=%x/%x\n",
1939 venderid, deviceid));
1940
1941 rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
1942 }
1943
1944 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
1945 if (revisionid == 0 || revisionid == 1) {
1946 if (revisionid == 0) {
1947 RT_TRACE(COMP_INIT, DBG_LOUD,
1948 ("Find 92DE MAC0.\n"));
1949 rtlhal->interfaceindex = 0;
1950 } else if (revisionid == 1) {
1951 RT_TRACE(COMP_INIT, DBG_LOUD,
1952 ("Find 92DE MAC1.\n"));
1953 rtlhal->interfaceindex = 1;
1954 }
1955 } else {
1956 RT_TRACE(COMP_INIT, DBG_LOUD,
1957 ("Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
1958 venderid, deviceid, revisionid));
1959 rtlhal->interfaceindex = 0;
1960 }
1961 }
1962
1963 /* 92ee use new trx flow */
1964 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
1965 rtlpriv->use_new_trx_flow = true;
1966 else
1967 rtlpriv->use_new_trx_flow = false;
1968
1969 /*find bus info */
1970 pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1971 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1972 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1973
1974 /*find bridge info */
1975 pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
1976 /* some ARM have no bridge_pdev and will crash here
1977 * so we should check if bridge_pdev is NULL */
1978 if (bridge_pdev) {
1979 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1980 for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1981 if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1982 pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1983 RT_TRACE(COMP_INIT, DBG_DMESG,
1984 ("Pci Bridge Vendor is found index: %d\n",
1985 tmp));
1986 break;
1987 }
1988 }
1989 }
1990
1991 if (pcipriv->ndis_adapter.pcibridge_vendor !=
1992 PCI_BRIDGE_VENDOR_UNKNOWN) {
1993 pcipriv->ndis_adapter.pcibridge_busnum =
1994 bridge_pdev->bus->number;
1995 pcipriv->ndis_adapter.pcibridge_devnum =
1996 PCI_SLOT(bridge_pdev->devfn);
1997 pcipriv->ndis_adapter.pcibridge_funcnum =
1998 PCI_FUNC(bridge_pdev->devfn);
1999 pcipriv->ndis_adapter.pcicfg_addrport =
2000 (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
2001 (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
2002 (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
2003 pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
2004 pci_pcie_cap(bridge_pdev);
2005 pcipriv->ndis_adapter.num4bytes =
2006 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
2007
2008 rtl_pci_get_linkcontrol_field(hw);
2009
2010 if (pcipriv->ndis_adapter.pcibridge_vendor ==
2011 PCI_BRIDGE_VENDOR_AMD) {
2012 pcipriv->ndis_adapter.amd_l1_patch =
2013 rtl_pci_get_amd_l1_patch(hw);
2014 }
2015 }
2016
2017 RT_TRACE(COMP_INIT, DBG_DMESG,
2018 ("pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
2019 pcipriv->ndis_adapter.busnumber,
2020 pcipriv->ndis_adapter.devnumber,
2021 pcipriv->ndis_adapter.funcnumber,
2022 pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
2023
2024 RT_TRACE(COMP_INIT, DBG_DMESG,
2025 ("pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
2026 pcipriv->ndis_adapter.pcibridge_busnum,
2027 pcipriv->ndis_adapter.pcibridge_devnum,
2028 pcipriv->ndis_adapter.pcibridge_funcnum,
2029 pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
2030 pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
2031 pcipriv->ndis_adapter.pcibridge_linkctrlreg,
2032 pcipriv->ndis_adapter.amd_l1_patch));
2033
2034 rtl_pci_parse_configuration(pdev, hw);
2035 list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
2036 return true;
2037}
2038
2039static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
2040{
2041 struct rtl_priv *rtlpriv = rtl_priv(hw);
2042 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2043 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2044 int ret;
2045 ret = pci_enable_msi(rtlpci->pdev);
2046 if (ret < 0)
2047 return ret;
2048
2049 ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
2050 IRQF_SHARED, KBUILD_MODNAME, hw);
2051 if (ret < 0) {
2052 pci_disable_msi(rtlpci->pdev);
2053 return ret;
2054 }
2055
2056 rtlpci->using_msi = true;
2057
2058 RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, ("MSI Interrupt Mode!\n"));
2059 return 0;
2060}
2061
2062static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
2063{
2064 struct rtl_priv *rtlpriv = rtl_priv(hw);
2065 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2066 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2067 int ret;
2068
2069 ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
2070 IRQF_SHARED, KBUILD_MODNAME, hw);
2071 if (ret < 0)
2072 return ret;
2073
2074 rtlpci->using_msi = false;
2075 RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG,
2076 ("Pin-based Interrupt Mode!\n"));
2077 return 0;
2078}
2079
2080static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
2081{
2082 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2083 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2084 int ret;
2085 if (rtlpci->msi_support) {
2086 ret = rtl_pci_intr_mode_msi(hw);
2087 if (ret < 0)
2088 ret = rtl_pci_intr_mode_legacy(hw);
2089 } else {
2090 ret = rtl_pci_intr_mode_legacy(hw);
2091 }
2092 return ret;
2093}
2094
2095/* this is used for other modules get
2096 * hw pointer in rtl_pci_get_hw_pointer */
2097static struct ieee80211_hw *hw_export;
2098
2099int stg_rtl_pci_probe(struct pci_dev *pdev,
2100 const struct pci_device_id *id)
2101{
2102 struct ieee80211_hw *hw = NULL;
2103 struct rtl_priv *rtlpriv = NULL;
2104 struct rtl_pci_priv *pcipriv = NULL;
2105 struct rtl_pci *rtlpci;
2106 unsigned long pmem_start, pmem_len, pmem_flags;
2107 int err;
2108
2109 err = pci_enable_device(pdev);
2110 if (err) {
2111 RT_ASSERT(false,
2112 ("%s : Cannot enable new PCI device\n",
2113 pci_name(pdev)));
2114 return err;
2115 }
2116
2117 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
2118 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2119 RT_ASSERT(false,
2120 ("Unable to obtain 32bit DMA for consistent allocations\n"));
2121 pci_disable_device(pdev);
2122 return -ENOMEM;
2123 }
2124 }
2125
2126 pci_set_master(pdev);
2127
2128 hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
2129 sizeof(struct rtl_priv), &rtl92e_ops);
2130 if (!hw) {
2131 RT_ASSERT(false,
2132 ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
2133 err = -ENOMEM;
2134 goto fail1;
2135 }
2136 hw_export = hw;
2137
2138 SET_IEEE80211_DEV(hw, &pdev->dev);
2139 pci_set_drvdata(pdev, hw);
2140
2141 rtlpriv = hw->priv;
2142 pcipriv = (void *)rtlpriv->priv;
2143 pcipriv->dev.pdev = pdev;
2144
2145 /* init cfg & intf_ops */
2146 rtlpriv->rtlhal.interface = INTF_PCI;
2147 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
2148 rtlpriv->intf_ops = &rtl92e_pci_ops;
2149 rtlpriv->glb_var = &global_var;
2150
2151 /*
2152 *init dbgp flags before all
2153 *other functions, because we will
2154 *use it in other funtions like
2155 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
2156 *you can not use these macro
2157 *before this
2158 */
2159 rtl92e_dbgp_flag_init(hw);
2160
2161 /* MEM map */
2162 err = pci_request_regions(pdev, KBUILD_MODNAME);
2163 if (err) {
2164 RT_ASSERT(false, ("Can't obtain PCI resources\n"));
2165 return err;
2166 }
2167
2168 pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
2169 pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
2170 pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
2171
2172 /*shared mem start */
2173 rtlpriv->io.pci_mem_start =
2174 (unsigned long)pci_iomap(pdev,
2175 rtlpriv->cfg->bar_id, pmem_len);
2176 if (rtlpriv->io.pci_mem_start == 0) {
2177 RT_ASSERT(false, ("Can't map PCI mem\n"));
2178 goto fail2;
2179 }
2180
2181 RT_TRACE(COMP_INIT, DBG_DMESG,
2182 ("mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
2183 pmem_start, pmem_len, pmem_flags,
2184 rtlpriv->io.pci_mem_start));
2185
2186 /* Disable Clk Request */
2187 pci_write_config_byte(pdev, 0x81, 0);
2188 /* leave D3 mode */
2189 pci_write_config_byte(pdev, 0x44, 0);
2190 pci_write_config_byte(pdev, 0x04, 0x06);
2191 pci_write_config_byte(pdev, 0x04, 0x07);
2192
2193 /* The next statement is needed when built as single module */
2194 rtl_core_module_init();
2195
2196 /* find adapter */
2197 /* if chip not support, will return false */
2198 if (!_rtl_pci_find_adapter(pdev, hw))
2199 goto fail3;
2200
2201 /* Init IO handler */
2202 _rtl_pci_io_handler_init(&pdev->dev, hw);
2203
2204 /*like read eeprom and so on */
2205 rtlpriv->cfg->ops->read_eeprom_info(hw);
2206
2207 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2208 RT_TRACE(COMP_ERR, DBG_EMERG, ("Can't init_sw_vars.\n"));
2209 goto fail3;
2210 }
2211
2212 rtlpriv->cfg->ops->init_sw_leds(hw);
2213
2214 /*aspm */
2215 rtl_pci_init_aspm(hw);
2216
2217 /* Init mac80211 sw */
2218 err = rtl92e_init_core(hw);
2219 if (err) {
2220 RT_TRACE(COMP_ERR, DBG_EMERG,
2221 ("Can't allocate sw for mac80211.\n"));
2222 goto fail3;
2223 }
2224
2225 /* Init PCI sw */
2226 err = !rtl_pci_init(hw, pdev);
2227 if (err) {
2228 RT_TRACE(COMP_ERR, DBG_EMERG, ("Failed to init PCI.\n"));
2229 goto fail3;
2230 }
2231
2232 err = ieee80211_register_hw(hw);
2233 if (err) {
2234 RT_TRACE(COMP_ERR, DBG_EMERG,
2235 ("Can't register mac80211 hw.\n"));
2236 goto fail3;
2237 } else {
2238 rtlpriv->mac80211.mac80211_registered = 1;
2239 }
2240 /* the wiphy must have been registed to
2241 * cfg80211 prior to regulatory_hint */
2242 if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2))
2243 RT_TRACE(COMP_ERR, DBG_WARNING, ("regulatory_hint fail\n"));
2244
2245 /* add for prov */
2246 rtl_proc_add_one(hw);
2247
2248 /*init rfkill */
2249 rtl92e_init_rfkill(hw);
2250
2251 rtlpci = rtl_pcidev(pcipriv);
2252 err = rtl_pci_intr_mode_decide(hw);
2253 if (err) {
2254 RT_TRACE(COMP_INIT, DBG_DMESG,
2255 ("%s: failed to register IRQ handler\n",
2256 wiphy_name(hw->wiphy)));
2257 goto fail3;
2258 } else {
2259 rtlpci->irq_alloc = 1;
2260 }
2261
2262 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2263 return 0;
2264
2265fail3:
2266 pci_set_drvdata(pdev, NULL);
2267 rtl92e_deinit_core(hw);
2268 ieee80211_free_hw(hw);
2269
2270 if (rtlpriv->io.pci_mem_start != 0)
2271 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
2272
2273fail2:
2274 pci_release_regions(pdev);
2275
2276fail1:
2277
2278 pci_disable_device(pdev);
2279
2280 return -ENODEV;
2281}
2282EXPORT_SYMBOL(stg_rtl_pci_probe);
2283
2284struct ieee80211_hw *rtl_pci_get_hw_pointer(void)
2285{
2286 return hw_export;
2287}
2288EXPORT_SYMBOL(rtl_pci_get_hw_pointer);
2289
2290void stg_rtl_pci_disconnect(struct pci_dev *pdev)
2291{
2292 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2293 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2294 struct rtl_priv *rtlpriv = rtl_priv(hw);
2295 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2296 struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
2297
2298 clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2299
2300 /* add for prov */
2301 rtl_proc_remove_one(hw);
2302
2303 /*ieee80211_unregister_hw will call ops_stop */
2304 if (rtlmac->mac80211_registered == 1) {
2305 ieee80211_unregister_hw(hw);
2306 rtlmac->mac80211_registered = 0;
2307 } else {
2308 rtl92e_deinit_deferred_work(hw);
2309 rtlpriv->intf_ops->adapter_stop(hw);
2310 }
2311
2312 /*deinit rfkill */
2313 rtl92e_deinit_rfkill(hw);
2314
2315 rtl_pci_deinit(hw);
2316 rtl92e_deinit_core(hw);
2317 rtlpriv->cfg->ops->deinit_sw_vars(hw);
2318
2319 if (rtlpci->irq_alloc) {
2320 synchronize_irq(rtlpci->pdev->irq);
2321 free_irq(rtlpci->pdev->irq, hw);
2322 rtlpci->irq_alloc = 0;
2323 }
2324
2325 if (rtlpci->using_msi)
2326 pci_disable_msi(rtlpci->pdev);
2327
2328 list_del(&rtlpriv->list);
2329 if (rtlpriv->io.pci_mem_start != 0) {
2330 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
2331 pci_release_regions(pdev);
2332 }
2333
2334 pci_disable_device(pdev);
2335
2336 rtl_pci_disable_aspm(hw);
2337
2338 pci_set_drvdata(pdev, NULL);
2339
2340 ieee80211_free_hw(hw);
2341}
2342EXPORT_SYMBOL(stg_rtl_pci_disconnect);
2343
2344/***************************************
2345kernel pci power state define:
2346PCI_D0 ((pci_power_t __force) 0)
2347PCI_D1 ((pci_power_t __force) 1)
2348PCI_D2 ((pci_power_t __force) 2)
2349PCI_D3hot ((pci_power_t __force) 3)
2350PCI_D3cold ((pci_power_t __force) 4)
2351PCI_UNKNOWN ((pci_power_t __force) 5)
2352
2353This function is called when system
2354goes into suspend state mac80211 will
2355call rtl_mac_stop() from the mac80211
2356suspend function first, So there is
2357no need to call hw_disable here.
2358****************************************/
2359int stg_rtl_pci_suspend(struct device *dev)
2360{
2361 struct pci_dev *pdev = to_pci_dev(dev);
2362 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2363 struct rtl_priv *rtlpriv = rtl_priv(hw);
2364
2365 rtlpriv->cfg->ops->hw_suspend(hw);
2366 rtl92e_deinit_rfkill(hw);
2367
2368 return 0;
2369}
2370EXPORT_SYMBOL(stg_rtl_pci_suspend);
2371
2372int stg_rtl_pci_resume(struct device *dev)
2373{
2374 struct pci_dev *pdev = to_pci_dev(dev);
2375 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2376 struct rtl_priv *rtlpriv = rtl_priv(hw);
2377
2378 rtlpriv->cfg->ops->hw_resume(hw);
2379 rtl92e_init_rfkill(hw);
2380
2381 return 0;
2382}
2383EXPORT_SYMBOL(stg_rtl_pci_resume);
2384
2385struct rtl_intf_ops rtl92e_pci_ops = {
2386 .read92e_efuse_byte = read92e_efuse_byte,
2387 .adapter_start = rtl_pci_start,
2388 .adapter_stop = rtl_pci_stop,
2389 .check_buddy_priv = rtl_pci_check_buddy_priv,
2390 .adapter_tx = rtl_pci_tx,
2391 .flush = rtl_pci_flush,
2392 .reset_trx_ring = rtl92e_pci_reset_trx_ring,
2393 .waitq_insert = rtl_pci_tx_chk_waitq_insert,
2394
2395 .disable_aspm = rtl_pci_disable_aspm,
2396 .enable_aspm = rtl_pci_enable_aspm,
2397};
This page took 0.13381 seconds and 5 git commands to generate.