Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. | |
3 | * | |
4 | * Note: This driver is a cleanroom reimplementation based on reverse | |
5 | * engineered documentation written by Carl-Daniel Hailfinger | |
87046e50 | 6 | * and Andrew de Quincey. |
1da177e4 LT |
7 | * |
8 | * NVIDIA, nForce and other NVIDIA marks are trademarks or registered | |
9 | * trademarks of NVIDIA Corporation in the United States and other | |
10 | * countries. | |
11 | * | |
1836098f | 12 | * Copyright (C) 2003,4,5 Manfred Spraul |
1da177e4 LT |
13 | * Copyright (C) 2004 Andrew de Quincey (wol support) |
14 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane | |
15 | * IRQ rate fixes, bigendian fixes, cleanups, verification) | |
87046e50 | 16 | * Copyright (c) 2004,5,6 NVIDIA Corporation |
1da177e4 LT |
17 | * |
18 | * This program is free software; you can redistribute it and/or modify | |
19 | * it under the terms of the GNU General Public License as published by | |
20 | * the Free Software Foundation; either version 2 of the License, or | |
21 | * (at your option) any later version. | |
22 | * | |
23 | * This program is distributed in the hope that it will be useful, | |
24 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
26 | * GNU General Public License for more details. | |
27 | * | |
28 | * You should have received a copy of the GNU General Public License | |
29 | * along with this program; if not, write to the Free Software | |
30 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
31 | * | |
32 | * Changelog: | |
33 | * 0.01: 05 Oct 2003: First release that compiles without warnings. | |
34 | * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. | |
35 | * Check all PCI BARs for the register window. | |
36 | * udelay added to mii_rw. | |
37 | * 0.03: 06 Oct 2003: Initialize dev->irq. | |
38 | * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. | |
39 | * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. | |
40 | * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, | |
41 | * irq mask updated | |
42 | * 0.07: 14 Oct 2003: Further irq mask updates. | |
43 | * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill | |
44 | * added into irq handler, NULL check for drain_ring. | |
45 | * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the | |
46 | * requested interrupt sources. | |
47 | * 0.10: 20 Oct 2003: First cleanup for release. | |
48 | * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. | |
49 | * MAC Address init fix, set_multicast cleanup. | |
50 | * 0.12: 23 Oct 2003: Cleanups for release. | |
51 | * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. | |
52 | * Set link speed correctly. start rx before starting | |
53 | * tx (nv_start_rx sets the link speed). | |
54 | * 0.14: 25 Oct 2003: Nic dependant irq mask. | |
55 | * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during | |
56 | * open. | |
57 | * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size | |
58 | * increased to 1628 bytes. | |
59 | * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from | |
60 | * the tx length. | |
61 | * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats | |
62 | * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac | |
63 | * addresses, really stop rx if already running | |
64 | * in nv_start_rx, clean up a bit. | |
65 | * 0.20: 07 Dec 2003: alloc fixes | |
66 | * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. | |
67 | * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup | |
68 | * on close. | |
69 | * 0.23: 26 Jan 2004: various small cleanups | |
70 | * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces | |
71 | * 0.25: 09 Mar 2004: wol support | |
72 | * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes | |
73 | * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, | |
74 | * added CK804/MCP04 device IDs, code fixes | |
75 | * for registers, link status and other minor fixes. | |
76 | * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe | |
77 | * 0.29: 31 Aug 2004: Add backup timer for link change notification. | |
78 | * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset | |
79 | * into nv_close, otherwise reenabling for wol can | |
80 | * cause DMA to kfree'd memory. | |
81 | * 0.31: 14 Nov 2004: ethtool support for getting/setting link | |
4ea7f299 | 82 | * capabilities. |
22c6d143 | 83 | * 0.32: 16 Apr 2005: RX_ERROR4 handling added. |
8f767fc8 MS |
84 | * 0.33: 16 May 2005: Support for MCP51 added. |
85 | * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. | |
f49d16ef | 86 | * 0.35: 26 Jun 2005: Support for MCP55 added. |
dc8216c1 MS |
87 | * 0.36: 28 Jun 2005: Add jumbo frame support. |
88 | * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list | |
c2dba06d MS |
89 | * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of |
90 | * per-packet flags. | |
4ea7f299 AA |
91 | * 0.39: 18 Jul 2005: Add 64bit descriptor support. |
92 | * 0.40: 19 Jul 2005: Add support for mac address change. | |
93 | * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead | |
b3df9f81 | 94 | * of nv_remove |
4ea7f299 | 95 | * 0.42: 06 Aug 2005: Fix lack of link speed initialization |
1b1b3c9b | 96 | * in the second (and later) nv_open call |
4ea7f299 AA |
97 | * 0.43: 10 Aug 2005: Add support for tx checksum. |
98 | * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. | |
99 | * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check | |
a971c324 | 100 | * 0.46: 20 Oct 2005: Add irq optimization modes. |
7a33e45a | 101 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. |
1836098f | 102 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single |
fa45459e | 103 | * 0.49: 10 Dec 2005: Fix tso for large buffers. |
ee407b02 | 104 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. |
0832b25a | 105 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. |
d33a73c8 | 106 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. |
86a0f043 | 107 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. |
84b3932b | 108 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. |
eb91f61b | 109 | * 0.55: 22 Mar 2006: Add flow control (pause frame). |
ebe611a4 | 110 | * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. |
5070d340 | 111 | * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. |
7e680c22 | 112 | * 0.58: 30 Oct 2006: Added support for sideband management unit. |
c5cf9101 | 113 | * 0.59: 30 Oct 2006: Added support for recoverable error. |
21828163 | 114 | * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats. |
1da177e4 LT |
115 | * |
116 | * Known bugs: | |
117 | * We suspect that on some hardware no TX done interrupts are generated. | |
118 | * This means recovery from netif_stop_queue only happens if the hw timer | |
119 | * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) | |
120 | * and the timer is active in the IRQMask, or if a rx packet arrives by chance. | |
121 | * If your hardware reliably generates tx done interrupts, then you can remove | |
122 | * DEV_NEED_TIMERIRQ from the driver_data flags. | |
123 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | |
124 | * superfluous timer interrupts from the nic. | |
125 | */ | |
e27cdba5 SH |
126 | #ifdef CONFIG_FORCEDETH_NAPI |
127 | #define DRIVERNAPI "-NAPI" | |
128 | #else | |
129 | #define DRIVERNAPI | |
130 | #endif | |
21828163 | 131 | #define FORCEDETH_VERSION "0.60" |
1da177e4 LT |
132 | #define DRV_NAME "forcedeth" |
133 | ||
134 | #include <linux/module.h> | |
135 | #include <linux/types.h> | |
136 | #include <linux/pci.h> | |
137 | #include <linux/interrupt.h> | |
138 | #include <linux/netdevice.h> | |
139 | #include <linux/etherdevice.h> | |
140 | #include <linux/delay.h> | |
141 | #include <linux/spinlock.h> | |
142 | #include <linux/ethtool.h> | |
143 | #include <linux/timer.h> | |
144 | #include <linux/skbuff.h> | |
145 | #include <linux/mii.h> | |
146 | #include <linux/random.h> | |
147 | #include <linux/init.h> | |
22c6d143 | 148 | #include <linux/if_vlan.h> |
910638ae | 149 | #include <linux/dma-mapping.h> |
1da177e4 LT |
150 | |
151 | #include <asm/irq.h> | |
152 | #include <asm/io.h> | |
153 | #include <asm/uaccess.h> | |
154 | #include <asm/system.h> | |
155 | ||
156 | #if 0 | |
157 | #define dprintk printk | |
158 | #else | |
159 | #define dprintk(x...) do { } while (0) | |
160 | #endif | |
161 | ||
bea3348e SH |
162 | #define TX_WORK_PER_LOOP 64 |
163 | #define RX_WORK_PER_LOOP 64 | |
1da177e4 LT |
164 | |
165 | /* | |
166 | * Hardware access: | |
167 | */ | |
168 | ||
c2dba06d MS |
169 | #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ |
170 | #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ | |
171 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ | |
ee73362c | 172 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ |
8a4ae7f2 | 173 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ |
ee407b02 | 174 | #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ |
d33a73c8 AA |
175 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ |
176 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | |
86a0f043 | 177 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ |
eb91f61b | 178 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ |
57fff698 AA |
179 | #define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */ |
180 | #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ | |
181 | #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ | |
182 | #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ | |
ef756b3e | 183 | #define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */ |
1da177e4 LT |
184 | |
185 | enum { | |
186 | NvRegIrqStatus = 0x000, | |
187 | #define NVREG_IRQSTAT_MIIEVENT 0x040 | |
c5cf9101 | 188 | #define NVREG_IRQSTAT_MASK 0x81ff |
1da177e4 LT |
189 | NvRegIrqMask = 0x004, |
190 | #define NVREG_IRQ_RX_ERROR 0x0001 | |
191 | #define NVREG_IRQ_RX 0x0002 | |
192 | #define NVREG_IRQ_RX_NOBUF 0x0004 | |
193 | #define NVREG_IRQ_TX_ERR 0x0008 | |
c2dba06d | 194 | #define NVREG_IRQ_TX_OK 0x0010 |
1da177e4 LT |
195 | #define NVREG_IRQ_TIMER 0x0020 |
196 | #define NVREG_IRQ_LINK 0x0040 | |
d33a73c8 AA |
197 | #define NVREG_IRQ_RX_FORCED 0x0080 |
198 | #define NVREG_IRQ_TX_FORCED 0x0100 | |
c5cf9101 | 199 | #define NVREG_IRQ_RECOVER_ERROR 0x8000 |
a971c324 | 200 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
096a458c | 201 | #define NVREG_IRQMASK_CPU 0x0060 |
d33a73c8 AA |
202 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) |
203 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) | |
c5cf9101 | 204 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) |
c2dba06d MS |
205 | |
206 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ | |
d33a73c8 | 207 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ |
c5cf9101 | 208 | NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR)) |
1da177e4 LT |
209 | |
210 | NvRegUnknownSetupReg6 = 0x008, | |
211 | #define NVREG_UNKSETUP6_VAL 3 | |
212 | ||
213 | /* | |
214 | * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic | |
215 | * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms | |
216 | */ | |
217 | NvRegPollingInterval = 0x00c, | |
4e16ed1b | 218 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */ |
a971c324 | 219 | #define NVREG_POLL_DEFAULT_CPU 13 |
d33a73c8 AA |
220 | NvRegMSIMap0 = 0x020, |
221 | NvRegMSIMap1 = 0x024, | |
222 | NvRegMSIIrqMask = 0x030, | |
223 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 | |
1da177e4 | 224 | NvRegMisc1 = 0x080, |
eb91f61b | 225 | #define NVREG_MISC1_PAUSE_TX 0x01 |
1da177e4 LT |
226 | #define NVREG_MISC1_HD 0x02 |
227 | #define NVREG_MISC1_FORCE 0x3b0f3c | |
228 | ||
86a0f043 AA |
229 | NvRegMacReset = 0x3c, |
230 | #define NVREG_MAC_RESET_ASSERT 0x0F3 | |
1da177e4 LT |
231 | NvRegTransmitterControl = 0x084, |
232 | #define NVREG_XMITCTL_START 0x01 | |
7e680c22 AA |
233 | #define NVREG_XMITCTL_MGMT_ST 0x40000000 |
234 | #define NVREG_XMITCTL_SYNC_MASK 0x000f0000 | |
235 | #define NVREG_XMITCTL_SYNC_NOT_READY 0x0 | |
236 | #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 | |
237 | #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 | |
238 | #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 | |
239 | #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 | |
240 | #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 | |
241 | #define NVREG_XMITCTL_HOST_LOADED 0x00004000 | |
f35723ec | 242 | #define NVREG_XMITCTL_TX_PATH_EN 0x01000000 |
1da177e4 LT |
243 | NvRegTransmitterStatus = 0x088, |
244 | #define NVREG_XMITSTAT_BUSY 0x01 | |
245 | ||
246 | NvRegPacketFilterFlags = 0x8c, | |
eb91f61b AA |
247 | #define NVREG_PFF_PAUSE_RX 0x08 |
248 | #define NVREG_PFF_ALWAYS 0x7F0000 | |
1da177e4 LT |
249 | #define NVREG_PFF_PROMISC 0x80 |
250 | #define NVREG_PFF_MYADDR 0x20 | |
9589c77a | 251 | #define NVREG_PFF_LOOPBACK 0x10 |
1da177e4 LT |
252 | |
253 | NvRegOffloadConfig = 0x90, | |
254 | #define NVREG_OFFLOAD_HOMEPHY 0x601 | |
255 | #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE | |
256 | NvRegReceiverControl = 0x094, | |
257 | #define NVREG_RCVCTL_START 0x01 | |
f35723ec | 258 | #define NVREG_RCVCTL_RX_PATH_EN 0x01000000 |
1da177e4 LT |
259 | NvRegReceiverStatus = 0x98, |
260 | #define NVREG_RCVSTAT_BUSY 0x01 | |
261 | ||
262 | NvRegRandomSeed = 0x9c, | |
263 | #define NVREG_RNDSEED_MASK 0x00ff | |
264 | #define NVREG_RNDSEED_FORCE 0x7f00 | |
265 | #define NVREG_RNDSEED_FORCE2 0x2d00 | |
266 | #define NVREG_RNDSEED_FORCE3 0x7400 | |
267 | ||
9744e218 AA |
268 | NvRegTxDeferral = 0xA0, |
269 | #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f | |
270 | #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f | |
271 | #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f | |
272 | NvRegRxDeferral = 0xA4, | |
273 | #define NVREG_RX_DEFERRAL_DEFAULT 0x16 | |
1da177e4 LT |
274 | NvRegMacAddrA = 0xA8, |
275 | NvRegMacAddrB = 0xAC, | |
276 | NvRegMulticastAddrA = 0xB0, | |
277 | #define NVREG_MCASTADDRA_FORCE 0x01 | |
278 | NvRegMulticastAddrB = 0xB4, | |
279 | NvRegMulticastMaskA = 0xB8, | |
280 | NvRegMulticastMaskB = 0xBC, | |
281 | ||
282 | NvRegPhyInterface = 0xC0, | |
283 | #define PHY_RGMII 0x10000000 | |
284 | ||
285 | NvRegTxRingPhysAddr = 0x100, | |
286 | NvRegRxRingPhysAddr = 0x104, | |
287 | NvRegRingSizes = 0x108, | |
288 | #define NVREG_RINGSZ_TXSHIFT 0 | |
289 | #define NVREG_RINGSZ_RXSHIFT 16 | |
5070d340 AA |
290 | NvRegTransmitPoll = 0x10c, |
291 | #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 | |
1da177e4 LT |
292 | NvRegLinkSpeed = 0x110, |
293 | #define NVREG_LINKSPEED_FORCE 0x10000 | |
294 | #define NVREG_LINKSPEED_10 1000 | |
295 | #define NVREG_LINKSPEED_100 100 | |
296 | #define NVREG_LINKSPEED_1000 50 | |
297 | #define NVREG_LINKSPEED_MASK (0xFFF) | |
298 | NvRegUnknownSetupReg5 = 0x130, | |
299 | #define NVREG_UNKSETUP5_BIT31 (1<<31) | |
95d161cb AA |
300 | NvRegTxWatermark = 0x13c, |
301 | #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 | |
302 | #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 | |
303 | #define NVREG_TX_WM_DESC2_3_1000 0xfe08000 | |
1da177e4 LT |
304 | NvRegTxRxControl = 0x144, |
305 | #define NVREG_TXRXCTL_KICK 0x0001 | |
306 | #define NVREG_TXRXCTL_BIT1 0x0002 | |
307 | #define NVREG_TXRXCTL_BIT2 0x0004 | |
308 | #define NVREG_TXRXCTL_IDLE 0x0008 | |
309 | #define NVREG_TXRXCTL_RESET 0x0010 | |
310 | #define NVREG_TXRXCTL_RXCHECK 0x0400 | |
8a4ae7f2 | 311 | #define NVREG_TXRXCTL_DESC_1 0 |
d2f78412 AA |
312 | #define NVREG_TXRXCTL_DESC_2 0x002100 |
313 | #define NVREG_TXRXCTL_DESC_3 0xc02200 | |
ee407b02 AA |
314 | #define NVREG_TXRXCTL_VLANSTRIP 0x00040 |
315 | #define NVREG_TXRXCTL_VLANINS 0x00080 | |
0832b25a AA |
316 | NvRegTxRingPhysAddrHigh = 0x148, |
317 | NvRegRxRingPhysAddrHigh = 0x14C, | |
eb91f61b AA |
318 | NvRegTxPauseFrame = 0x170, |
319 | #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 | |
320 | #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 | |
1da177e4 LT |
321 | NvRegMIIStatus = 0x180, |
322 | #define NVREG_MIISTAT_ERROR 0x0001 | |
323 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | |
324 | #define NVREG_MIISTAT_MASK 0x000f | |
325 | #define NVREG_MIISTAT_MASK2 0x000f | |
7e680c22 AA |
326 | NvRegMIIMask = 0x184, |
327 | #define NVREG_MII_LINKCHANGE 0x0008 | |
1da177e4 LT |
328 | |
329 | NvRegAdapterControl = 0x188, | |
330 | #define NVREG_ADAPTCTL_START 0x02 | |
331 | #define NVREG_ADAPTCTL_LINKUP 0x04 | |
332 | #define NVREG_ADAPTCTL_PHYVALID 0x40000 | |
333 | #define NVREG_ADAPTCTL_RUNNING 0x100000 | |
334 | #define NVREG_ADAPTCTL_PHYSHIFT 24 | |
335 | NvRegMIISpeed = 0x18c, | |
336 | #define NVREG_MIISPEED_BIT8 (1<<8) | |
337 | #define NVREG_MIIDELAY 5 | |
338 | NvRegMIIControl = 0x190, | |
339 | #define NVREG_MIICTL_INUSE 0x08000 | |
340 | #define NVREG_MIICTL_WRITE 0x00400 | |
341 | #define NVREG_MIICTL_ADDRSHIFT 5 | |
342 | NvRegMIIData = 0x194, | |
343 | NvRegWakeUpFlags = 0x200, | |
344 | #define NVREG_WAKEUPFLAGS_VAL 0x7770 | |
345 | #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 | |
346 | #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 | |
347 | #define NVREG_WAKEUPFLAGS_D3SHIFT 12 | |
348 | #define NVREG_WAKEUPFLAGS_D2SHIFT 8 | |
349 | #define NVREG_WAKEUPFLAGS_D1SHIFT 4 | |
350 | #define NVREG_WAKEUPFLAGS_D0SHIFT 0 | |
351 | #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 | |
352 | #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 | |
353 | #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 | |
354 | #define NVREG_WAKEUPFLAGS_ENABLE 0x1111 | |
355 | ||
356 | NvRegPatternCRC = 0x204, | |
357 | NvRegPatternMask = 0x208, | |
358 | NvRegPowerCap = 0x268, | |
359 | #define NVREG_POWERCAP_D3SUPP (1<<30) | |
360 | #define NVREG_POWERCAP_D2SUPP (1<<26) | |
361 | #define NVREG_POWERCAP_D1SUPP (1<<25) | |
362 | NvRegPowerState = 0x26c, | |
363 | #define NVREG_POWERSTATE_POWEREDUP 0x8000 | |
364 | #define NVREG_POWERSTATE_VALID 0x0100 | |
365 | #define NVREG_POWERSTATE_MASK 0x0003 | |
366 | #define NVREG_POWERSTATE_D0 0x0000 | |
367 | #define NVREG_POWERSTATE_D1 0x0001 | |
368 | #define NVREG_POWERSTATE_D2 0x0002 | |
369 | #define NVREG_POWERSTATE_D3 0x0003 | |
52da3578 AA |
370 | NvRegTxCnt = 0x280, |
371 | NvRegTxZeroReXmt = 0x284, | |
372 | NvRegTxOneReXmt = 0x288, | |
373 | NvRegTxManyReXmt = 0x28c, | |
374 | NvRegTxLateCol = 0x290, | |
375 | NvRegTxUnderflow = 0x294, | |
376 | NvRegTxLossCarrier = 0x298, | |
377 | NvRegTxExcessDef = 0x29c, | |
378 | NvRegTxRetryErr = 0x2a0, | |
379 | NvRegRxFrameErr = 0x2a4, | |
380 | NvRegRxExtraByte = 0x2a8, | |
381 | NvRegRxLateCol = 0x2ac, | |
382 | NvRegRxRunt = 0x2b0, | |
383 | NvRegRxFrameTooLong = 0x2b4, | |
384 | NvRegRxOverflow = 0x2b8, | |
385 | NvRegRxFCSErr = 0x2bc, | |
386 | NvRegRxFrameAlignErr = 0x2c0, | |
387 | NvRegRxLenErr = 0x2c4, | |
388 | NvRegRxUnicast = 0x2c8, | |
389 | NvRegRxMulticast = 0x2cc, | |
390 | NvRegRxBroadcast = 0x2d0, | |
391 | NvRegTxDef = 0x2d4, | |
392 | NvRegTxFrame = 0x2d8, | |
393 | NvRegRxCnt = 0x2dc, | |
394 | NvRegTxPause = 0x2e0, | |
395 | NvRegRxPause = 0x2e4, | |
396 | NvRegRxDropFrame = 0x2e8, | |
ee407b02 AA |
397 | NvRegVlanControl = 0x300, |
398 | #define NVREG_VLANCONTROL_ENABLE 0x2000 | |
d33a73c8 AA |
399 | NvRegMSIXMap0 = 0x3e0, |
400 | NvRegMSIXMap1 = 0x3e4, | |
401 | NvRegMSIXIrqStatus = 0x3f0, | |
86a0f043 AA |
402 | |
403 | NvRegPowerState2 = 0x600, | |
404 | #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 | |
405 | #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 | |
1da177e4 LT |
406 | }; |
407 | ||
408 | /* Big endian: should work, but is untested */ | |
409 | struct ring_desc { | |
a8bed49e SH |
410 | __le32 buf; |
411 | __le32 flaglen; | |
1da177e4 LT |
412 | }; |
413 | ||
ee73362c | 414 | struct ring_desc_ex { |
a8bed49e SH |
415 | __le32 bufhigh; |
416 | __le32 buflow; | |
417 | __le32 txvlan; | |
418 | __le32 flaglen; | |
ee73362c MS |
419 | }; |
420 | ||
f82a9352 | 421 | union ring_type { |
ee73362c MS |
422 | struct ring_desc* orig; |
423 | struct ring_desc_ex* ex; | |
f82a9352 | 424 | }; |
ee73362c | 425 | |
1da177e4 LT |
426 | #define FLAG_MASK_V1 0xffff0000 |
427 | #define FLAG_MASK_V2 0xffffc000 | |
428 | #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) | |
429 | #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) | |
430 | ||
431 | #define NV_TX_LASTPACKET (1<<16) | |
432 | #define NV_TX_RETRYERROR (1<<19) | |
c2dba06d | 433 | #define NV_TX_FORCED_INTERRUPT (1<<24) |
1da177e4 LT |
434 | #define NV_TX_DEFERRED (1<<26) |
435 | #define NV_TX_CARRIERLOST (1<<27) | |
436 | #define NV_TX_LATECOLLISION (1<<28) | |
437 | #define NV_TX_UNDERFLOW (1<<29) | |
438 | #define NV_TX_ERROR (1<<30) | |
439 | #define NV_TX_VALID (1<<31) | |
440 | ||
441 | #define NV_TX2_LASTPACKET (1<<29) | |
442 | #define NV_TX2_RETRYERROR (1<<18) | |
c2dba06d | 443 | #define NV_TX2_FORCED_INTERRUPT (1<<30) |
1da177e4 LT |
444 | #define NV_TX2_DEFERRED (1<<25) |
445 | #define NV_TX2_CARRIERLOST (1<<26) | |
446 | #define NV_TX2_LATECOLLISION (1<<27) | |
447 | #define NV_TX2_UNDERFLOW (1<<28) | |
448 | /* error and valid are the same for both */ | |
449 | #define NV_TX2_ERROR (1<<30) | |
450 | #define NV_TX2_VALID (1<<31) | |
ac9c1897 AA |
451 | #define NV_TX2_TSO (1<<28) |
452 | #define NV_TX2_TSO_SHIFT 14 | |
fa45459e AA |
453 | #define NV_TX2_TSO_MAX_SHIFT 14 |
454 | #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) | |
8a4ae7f2 MS |
455 | #define NV_TX2_CHECKSUM_L3 (1<<27) |
456 | #define NV_TX2_CHECKSUM_L4 (1<<26) | |
1da177e4 | 457 | |
ee407b02 AA |
458 | #define NV_TX3_VLAN_TAG_PRESENT (1<<18) |
459 | ||
1da177e4 LT |
460 | #define NV_RX_DESCRIPTORVALID (1<<16) |
461 | #define NV_RX_MISSEDFRAME (1<<17) | |
462 | #define NV_RX_SUBSTRACT1 (1<<18) | |
463 | #define NV_RX_ERROR1 (1<<23) | |
464 | #define NV_RX_ERROR2 (1<<24) | |
465 | #define NV_RX_ERROR3 (1<<25) | |
466 | #define NV_RX_ERROR4 (1<<26) | |
467 | #define NV_RX_CRCERR (1<<27) | |
468 | #define NV_RX_OVERFLOW (1<<28) | |
469 | #define NV_RX_FRAMINGERR (1<<29) | |
470 | #define NV_RX_ERROR (1<<30) | |
471 | #define NV_RX_AVAIL (1<<31) | |
472 | ||
473 | #define NV_RX2_CHECKSUMMASK (0x1C000000) | |
474 | #define NV_RX2_CHECKSUMOK1 (0x10000000) | |
475 | #define NV_RX2_CHECKSUMOK2 (0x14000000) | |
476 | #define NV_RX2_CHECKSUMOK3 (0x18000000) | |
477 | #define NV_RX2_DESCRIPTORVALID (1<<29) | |
478 | #define NV_RX2_SUBSTRACT1 (1<<25) | |
479 | #define NV_RX2_ERROR1 (1<<18) | |
480 | #define NV_RX2_ERROR2 (1<<19) | |
481 | #define NV_RX2_ERROR3 (1<<20) | |
482 | #define NV_RX2_ERROR4 (1<<21) | |
483 | #define NV_RX2_CRCERR (1<<22) | |
484 | #define NV_RX2_OVERFLOW (1<<23) | |
485 | #define NV_RX2_FRAMINGERR (1<<24) | |
486 | /* error and avail are the same for both */ | |
487 | #define NV_RX2_ERROR (1<<30) | |
488 | #define NV_RX2_AVAIL (1<<31) | |
489 | ||
ee407b02 AA |
490 | #define NV_RX3_VLAN_TAG_PRESENT (1<<16) |
491 | #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) | |
492 | ||
1da177e4 | 493 | /* Miscelaneous hardware related defines: */ |
86a0f043 | 494 | #define NV_PCI_REGSZ_VER1 0x270 |
57fff698 AA |
495 | #define NV_PCI_REGSZ_VER2 0x2d4 |
496 | #define NV_PCI_REGSZ_VER3 0x604 | |
1da177e4 LT |
497 | |
498 | /* various timeout delays: all in usec */ | |
499 | #define NV_TXRX_RESET_DELAY 4 | |
500 | #define NV_TXSTOP_DELAY1 10 | |
501 | #define NV_TXSTOP_DELAY1MAX 500000 | |
502 | #define NV_TXSTOP_DELAY2 100 | |
503 | #define NV_RXSTOP_DELAY1 10 | |
504 | #define NV_RXSTOP_DELAY1MAX 500000 | |
505 | #define NV_RXSTOP_DELAY2 100 | |
506 | #define NV_SETUP5_DELAY 5 | |
507 | #define NV_SETUP5_DELAYMAX 50000 | |
508 | #define NV_POWERUP_DELAY 5 | |
509 | #define NV_POWERUP_DELAYMAX 5000 | |
510 | #define NV_MIIBUSY_DELAY 50 | |
511 | #define NV_MIIPHY_DELAY 10 | |
512 | #define NV_MIIPHY_DELAYMAX 10000 | |
86a0f043 | 513 | #define NV_MAC_RESET_DELAY 64 |
1da177e4 LT |
514 | |
515 | #define NV_WAKEUPPATTERNS 5 | |
516 | #define NV_WAKEUPMASKENTRIES 4 | |
517 | ||
518 | /* General driver defaults */ | |
519 | #define NV_WATCHDOG_TIMEO (5*HZ) | |
520 | ||
eafa59f6 AA |
521 | #define RX_RING_DEFAULT 128 |
522 | #define TX_RING_DEFAULT 256 | |
523 | #define RX_RING_MIN 128 | |
524 | #define TX_RING_MIN 64 | |
525 | #define RING_MAX_DESC_VER_1 1024 | |
526 | #define RING_MAX_DESC_VER_2_3 16384 | |
1da177e4 LT |
527 | |
528 | /* rx/tx mac addr + type + vlan + align + slack*/ | |
d81c0983 MS |
529 | #define NV_RX_HEADERS (64) |
530 | /* even more slack. */ | |
531 | #define NV_RX_ALLOC_PAD (64) | |
532 | ||
533 | /* maximum mtu size */ | |
534 | #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ | |
535 | #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ | |
1da177e4 LT |
536 | |
537 | #define OOM_REFILL (1+HZ/20) | |
538 | #define POLL_WAIT (1+HZ/100) | |
539 | #define LINK_TIMEOUT (3*HZ) | |
52da3578 | 540 | #define STATS_INTERVAL (10*HZ) |
1da177e4 | 541 | |
f3b197ac | 542 | /* |
1da177e4 | 543 | * desc_ver values: |
8a4ae7f2 MS |
544 | * The nic supports three different descriptor types: |
545 | * - DESC_VER_1: Original | |
546 | * - DESC_VER_2: support for jumbo frames. | |
547 | * - DESC_VER_3: 64-bit format. | |
1da177e4 | 548 | */ |
8a4ae7f2 MS |
549 | #define DESC_VER_1 1 |
550 | #define DESC_VER_2 2 | |
551 | #define DESC_VER_3 3 | |
1da177e4 LT |
552 | |
553 | /* PHY defines */ | |
554 | #define PHY_OUI_MARVELL 0x5043 | |
555 | #define PHY_OUI_CICADA 0x03f1 | |
d215d8a2 | 556 | #define PHY_OUI_VITESSE 0x01c1 |
ba685fb2 | 557 | #define PHY_OUI_REALTEK 0x0732 |
1da177e4 LT |
558 | #define PHYID1_OUI_MASK 0x03ff |
559 | #define PHYID1_OUI_SHFT 6 | |
560 | #define PHYID2_OUI_MASK 0xfc00 | |
561 | #define PHYID2_OUI_SHFT 10 | |
edf7e5ec AA |
562 | #define PHYID2_MODEL_MASK 0x03f0 |
563 | #define PHY_MODEL_MARVELL_E3016 0x220 | |
564 | #define PHY_MARVELL_E3016_INITMASK 0x0300 | |
14a67f3c AA |
565 | #define PHY_CICADA_INIT1 0x0f000 |
566 | #define PHY_CICADA_INIT2 0x0e00 | |
567 | #define PHY_CICADA_INIT3 0x01000 | |
568 | #define PHY_CICADA_INIT4 0x0200 | |
569 | #define PHY_CICADA_INIT5 0x0004 | |
570 | #define PHY_CICADA_INIT6 0x02000 | |
d215d8a2 AA |
571 | #define PHY_VITESSE_INIT_REG1 0x1f |
572 | #define PHY_VITESSE_INIT_REG2 0x10 | |
573 | #define PHY_VITESSE_INIT_REG3 0x11 | |
574 | #define PHY_VITESSE_INIT_REG4 0x12 | |
575 | #define PHY_VITESSE_INIT_MSK1 0xc | |
576 | #define PHY_VITESSE_INIT_MSK2 0x0180 | |
577 | #define PHY_VITESSE_INIT1 0x52b5 | |
578 | #define PHY_VITESSE_INIT2 0xaf8a | |
579 | #define PHY_VITESSE_INIT3 0x8 | |
580 | #define PHY_VITESSE_INIT4 0x8f8a | |
581 | #define PHY_VITESSE_INIT5 0xaf86 | |
582 | #define PHY_VITESSE_INIT6 0x8f86 | |
583 | #define PHY_VITESSE_INIT7 0xaf82 | |
584 | #define PHY_VITESSE_INIT8 0x0100 | |
585 | #define PHY_VITESSE_INIT9 0x8f82 | |
586 | #define PHY_VITESSE_INIT10 0x0 | |
c5e3ae88 AA |
587 | #define PHY_REALTEK_INIT_REG1 0x1f |
588 | #define PHY_REALTEK_INIT_REG2 0x19 | |
589 | #define PHY_REALTEK_INIT_REG3 0x13 | |
590 | #define PHY_REALTEK_INIT1 0x0000 | |
591 | #define PHY_REALTEK_INIT2 0x8e00 | |
592 | #define PHY_REALTEK_INIT3 0x0001 | |
593 | #define PHY_REALTEK_INIT4 0xad17 | |
d215d8a2 | 594 | |
1da177e4 LT |
595 | #define PHY_GIGABIT 0x0100 |
596 | ||
597 | #define PHY_TIMEOUT 0x1 | |
598 | #define PHY_ERROR 0x2 | |
599 | ||
600 | #define PHY_100 0x1 | |
601 | #define PHY_1000 0x2 | |
602 | #define PHY_HALF 0x100 | |
603 | ||
eb91f61b AA |
604 | #define NV_PAUSEFRAME_RX_CAPABLE 0x0001 |
605 | #define NV_PAUSEFRAME_TX_CAPABLE 0x0002 | |
606 | #define NV_PAUSEFRAME_RX_ENABLE 0x0004 | |
607 | #define NV_PAUSEFRAME_TX_ENABLE 0x0008 | |
b6d0773f AA |
608 | #define NV_PAUSEFRAME_RX_REQ 0x0010 |
609 | #define NV_PAUSEFRAME_TX_REQ 0x0020 | |
610 | #define NV_PAUSEFRAME_AUTONEG 0x0040 | |
1da177e4 | 611 | |
d33a73c8 AA |
612 | /* MSI/MSI-X defines */ |
613 | #define NV_MSI_X_MAX_VECTORS 8 | |
614 | #define NV_MSI_X_VECTORS_MASK 0x000f | |
615 | #define NV_MSI_CAPABLE 0x0010 | |
616 | #define NV_MSI_X_CAPABLE 0x0020 | |
617 | #define NV_MSI_ENABLED 0x0040 | |
618 | #define NV_MSI_X_ENABLED 0x0080 | |
619 | ||
620 | #define NV_MSI_X_VECTOR_ALL 0x0 | |
621 | #define NV_MSI_X_VECTOR_RX 0x0 | |
622 | #define NV_MSI_X_VECTOR_TX 0x1 | |
623 | #define NV_MSI_X_VECTOR_OTHER 0x2 | |
1da177e4 | 624 | |
52da3578 AA |
625 | /* statistics */ |
626 | struct nv_ethtool_str { | |
627 | char name[ETH_GSTRING_LEN]; | |
628 | }; | |
629 | ||
630 | static const struct nv_ethtool_str nv_estats_str[] = { | |
631 | { "tx_bytes" }, | |
632 | { "tx_zero_rexmt" }, | |
633 | { "tx_one_rexmt" }, | |
634 | { "tx_many_rexmt" }, | |
635 | { "tx_late_collision" }, | |
636 | { "tx_fifo_errors" }, | |
637 | { "tx_carrier_errors" }, | |
638 | { "tx_excess_deferral" }, | |
639 | { "tx_retry_error" }, | |
52da3578 AA |
640 | { "rx_frame_error" }, |
641 | { "rx_extra_byte" }, | |
642 | { "rx_late_collision" }, | |
643 | { "rx_runt" }, | |
644 | { "rx_frame_too_long" }, | |
645 | { "rx_over_errors" }, | |
646 | { "rx_crc_errors" }, | |
647 | { "rx_frame_align_error" }, | |
648 | { "rx_length_error" }, | |
649 | { "rx_unicast" }, | |
650 | { "rx_multicast" }, | |
651 | { "rx_broadcast" }, | |
57fff698 AA |
652 | { "rx_packets" }, |
653 | { "rx_errors_total" }, | |
654 | { "tx_errors_total" }, | |
655 | ||
656 | /* version 2 stats */ | |
657 | { "tx_deferral" }, | |
658 | { "tx_packets" }, | |
52da3578 | 659 | { "rx_bytes" }, |
57fff698 | 660 | { "tx_pause" }, |
52da3578 | 661 | { "rx_pause" }, |
57fff698 | 662 | { "rx_drop_frame" } |
52da3578 AA |
663 | }; |
664 | ||
665 | struct nv_ethtool_stats { | |
666 | u64 tx_bytes; | |
667 | u64 tx_zero_rexmt; | |
668 | u64 tx_one_rexmt; | |
669 | u64 tx_many_rexmt; | |
670 | u64 tx_late_collision; | |
671 | u64 tx_fifo_errors; | |
672 | u64 tx_carrier_errors; | |
673 | u64 tx_excess_deferral; | |
674 | u64 tx_retry_error; | |
52da3578 AA |
675 | u64 rx_frame_error; |
676 | u64 rx_extra_byte; | |
677 | u64 rx_late_collision; | |
678 | u64 rx_runt; | |
679 | u64 rx_frame_too_long; | |
680 | u64 rx_over_errors; | |
681 | u64 rx_crc_errors; | |
682 | u64 rx_frame_align_error; | |
683 | u64 rx_length_error; | |
684 | u64 rx_unicast; | |
685 | u64 rx_multicast; | |
686 | u64 rx_broadcast; | |
57fff698 AA |
687 | u64 rx_packets; |
688 | u64 rx_errors_total; | |
689 | u64 tx_errors_total; | |
690 | ||
691 | /* version 2 stats */ | |
692 | u64 tx_deferral; | |
693 | u64 tx_packets; | |
52da3578 | 694 | u64 rx_bytes; |
57fff698 | 695 | u64 tx_pause; |
52da3578 AA |
696 | u64 rx_pause; |
697 | u64 rx_drop_frame; | |
52da3578 AA |
698 | }; |
699 | ||
57fff698 AA |
700 | #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) |
701 | #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) | |
702 | ||
9589c77a AA |
703 | /* diagnostics */ |
704 | #define NV_TEST_COUNT_BASE 3 | |
705 | #define NV_TEST_COUNT_EXTENDED 4 | |
706 | ||
707 | static const struct nv_ethtool_str nv_etests_str[] = { | |
708 | { "link (online/offline)" }, | |
709 | { "register (offline) " }, | |
710 | { "interrupt (offline) " }, | |
711 | { "loopback (offline) " } | |
712 | }; | |
713 | ||
714 | struct register_test { | |
a8bed49e SH |
715 | __le32 reg; |
716 | __le32 mask; | |
9589c77a AA |
717 | }; |
718 | ||
719 | static const struct register_test nv_registers_test[] = { | |
720 | { NvRegUnknownSetupReg6, 0x01 }, | |
721 | { NvRegMisc1, 0x03c }, | |
722 | { NvRegOffloadConfig, 0x03ff }, | |
723 | { NvRegMulticastAddrA, 0xffffffff }, | |
95d161cb | 724 | { NvRegTxWatermark, 0x0ff }, |
9589c77a AA |
725 | { NvRegWakeUpFlags, 0x07777 }, |
726 | { 0,0 } | |
727 | }; | |
728 | ||
761fcd9e AA |
729 | struct nv_skb_map { |
730 | struct sk_buff *skb; | |
731 | dma_addr_t dma; | |
732 | unsigned int dma_len; | |
733 | }; | |
734 | ||
1da177e4 LT |
735 | /* |
736 | * SMP locking: | |
737 | * All hardware access under dev->priv->lock, except the performance | |
738 | * critical parts: | |
739 | * - rx is (pseudo-) lockless: it relies on the single-threading provided | |
740 | * by the arch code for interrupts. | |
932ff279 | 741 | * - tx setup is lockless: it relies on netif_tx_lock. Actual submission |
1da177e4 | 742 | * needs dev->priv->lock :-( |
932ff279 | 743 | * - set_multicast_list: preparation lockless, relies on netif_tx_lock. |
1da177e4 LT |
744 | */ |
745 | ||
746 | /* in dev: base, irq */ | |
747 | struct fe_priv { | |
748 | spinlock_t lock; | |
749 | ||
bea3348e SH |
750 | struct net_device *dev; |
751 | struct napi_struct napi; | |
752 | ||
1da177e4 LT |
753 | /* General data: |
754 | * Locking: spin_lock(&np->lock); */ | |
755 | struct net_device_stats stats; | |
52da3578 | 756 | struct nv_ethtool_stats estats; |
1da177e4 LT |
757 | int in_shutdown; |
758 | u32 linkspeed; | |
759 | int duplex; | |
760 | int autoneg; | |
761 | int fixed_mode; | |
762 | int phyaddr; | |
763 | int wolenabled; | |
764 | unsigned int phy_oui; | |
edf7e5ec | 765 | unsigned int phy_model; |
1da177e4 | 766 | u16 gigabit; |
9589c77a | 767 | int intr_test; |
c5cf9101 | 768 | int recover_error; |
1da177e4 LT |
769 | |
770 | /* General data: RO fields */ | |
771 | dma_addr_t ring_addr; | |
772 | struct pci_dev *pci_dev; | |
773 | u32 orig_mac[2]; | |
774 | u32 irqmask; | |
775 | u32 desc_ver; | |
8a4ae7f2 | 776 | u32 txrxctl_bits; |
ee407b02 | 777 | u32 vlanctl_bits; |
86a0f043 AA |
778 | u32 driver_data; |
779 | u32 register_size; | |
f2ad2d9b | 780 | int rx_csum; |
7e680c22 | 781 | u32 mac_in_use; |
1da177e4 LT |
782 | |
783 | void __iomem *base; | |
784 | ||
785 | /* rx specific fields. | |
786 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | |
787 | */ | |
761fcd9e AA |
788 | union ring_type get_rx, put_rx, first_rx, last_rx; |
789 | struct nv_skb_map *get_rx_ctx, *put_rx_ctx; | |
790 | struct nv_skb_map *first_rx_ctx, *last_rx_ctx; | |
791 | struct nv_skb_map *rx_skb; | |
792 | ||
f82a9352 | 793 | union ring_type rx_ring; |
1da177e4 | 794 | unsigned int rx_buf_sz; |
d81c0983 | 795 | unsigned int pkt_limit; |
1da177e4 LT |
796 | struct timer_list oom_kick; |
797 | struct timer_list nic_poll; | |
52da3578 | 798 | struct timer_list stats_poll; |
d33a73c8 | 799 | u32 nic_poll_irq; |
eafa59f6 | 800 | int rx_ring_size; |
1da177e4 LT |
801 | |
802 | /* media detection workaround. | |
803 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | |
804 | */ | |
805 | int need_linktimer; | |
806 | unsigned long link_timeout; | |
807 | /* | |
808 | * tx specific fields. | |
809 | */ | |
761fcd9e AA |
810 | union ring_type get_tx, put_tx, first_tx, last_tx; |
811 | struct nv_skb_map *get_tx_ctx, *put_tx_ctx; | |
812 | struct nv_skb_map *first_tx_ctx, *last_tx_ctx; | |
813 | struct nv_skb_map *tx_skb; | |
814 | ||
f82a9352 | 815 | union ring_type tx_ring; |
1da177e4 | 816 | u32 tx_flags; |
eafa59f6 | 817 | int tx_ring_size; |
aaa37d2d | 818 | int tx_stop; |
ee407b02 AA |
819 | |
820 | /* vlan fields */ | |
821 | struct vlan_group *vlangrp; | |
d33a73c8 AA |
822 | |
823 | /* msi/msi-x fields */ | |
824 | u32 msi_flags; | |
825 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | |
eb91f61b AA |
826 | |
827 | /* flow control */ | |
828 | u32 pause_flags; | |
1da177e4 LT |
829 | }; |
830 | ||
831 | /* | |
832 | * Maximum number of loops until we assume that a bit in the irq mask | |
833 | * is stuck. Overridable with module param. | |
834 | */ | |
835 | static int max_interrupt_work = 5; | |
836 | ||
a971c324 AA |
837 | /* |
838 | * Optimization can be either throuput mode or cpu mode | |
f3b197ac | 839 | * |
a971c324 AA |
840 | * Throughput Mode: Every tx and rx packet will generate an interrupt. |
841 | * CPU Mode: Interrupts are controlled by a timer. | |
842 | */ | |
69fe3fd7 AA |
843 | enum { |
844 | NV_OPTIMIZATION_MODE_THROUGHPUT, | |
845 | NV_OPTIMIZATION_MODE_CPU | |
846 | }; | |
a971c324 AA |
847 | static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; |
848 | ||
849 | /* | |
850 | * Poll interval for timer irq | |
851 | * | |
852 | * This interval determines how frequent an interrupt is generated. | |
853 | * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] | |
854 | * Min = 0, and Max = 65535 | |
855 | */ | |
856 | static int poll_interval = -1; | |
857 | ||
d33a73c8 | 858 | /* |
69fe3fd7 | 859 | * MSI interrupts |
d33a73c8 | 860 | */ |
69fe3fd7 AA |
861 | enum { |
862 | NV_MSI_INT_DISABLED, | |
863 | NV_MSI_INT_ENABLED | |
864 | }; | |
865 | static int msi = NV_MSI_INT_ENABLED; | |
d33a73c8 AA |
866 | |
867 | /* | |
69fe3fd7 | 868 | * MSIX interrupts |
d33a73c8 | 869 | */ |
69fe3fd7 AA |
870 | enum { |
871 | NV_MSIX_INT_DISABLED, | |
872 | NV_MSIX_INT_ENABLED | |
873 | }; | |
caf96469 | 874 | static int msix = NV_MSIX_INT_DISABLED; |
69fe3fd7 AA |
875 | |
876 | /* | |
877 | * DMA 64bit | |
878 | */ | |
879 | enum { | |
880 | NV_DMA_64BIT_DISABLED, | |
881 | NV_DMA_64BIT_ENABLED | |
882 | }; | |
883 | static int dma_64bit = NV_DMA_64BIT_ENABLED; | |
d33a73c8 | 884 | |
1da177e4 LT |
885 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
886 | { | |
887 | return netdev_priv(dev); | |
888 | } | |
889 | ||
890 | static inline u8 __iomem *get_hwbase(struct net_device *dev) | |
891 | { | |
ac9c1897 | 892 | return ((struct fe_priv *)netdev_priv(dev))->base; |
1da177e4 LT |
893 | } |
894 | ||
895 | static inline void pci_push(u8 __iomem *base) | |
896 | { | |
897 | /* force out pending posted writes */ | |
898 | readl(base); | |
899 | } | |
900 | ||
901 | static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) | |
902 | { | |
f82a9352 | 903 | return le32_to_cpu(prd->flaglen) |
1da177e4 LT |
904 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); |
905 | } | |
906 | ||
ee73362c MS |
907 | static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) |
908 | { | |
f82a9352 | 909 | return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; |
ee73362c MS |
910 | } |
911 | ||
1da177e4 LT |
912 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, |
913 | int delay, int delaymax, const char *msg) | |
914 | { | |
915 | u8 __iomem *base = get_hwbase(dev); | |
916 | ||
917 | pci_push(base); | |
918 | do { | |
919 | udelay(delay); | |
920 | delaymax -= delay; | |
921 | if (delaymax < 0) { | |
922 | if (msg) | |
923 | printk(msg); | |
924 | return 1; | |
925 | } | |
926 | } while ((readl(base + offset) & mask) != target); | |
927 | return 0; | |
928 | } | |
929 | ||
0832b25a AA |
930 | #define NV_SETUP_RX_RING 0x01 |
931 | #define NV_SETUP_TX_RING 0x02 | |
932 | ||
933 | static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |
934 | { | |
935 | struct fe_priv *np = get_nvpriv(dev); | |
936 | u8 __iomem *base = get_hwbase(dev); | |
937 | ||
938 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
939 | if (rxtx_flags & NV_SETUP_RX_RING) { | |
940 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | |
941 | } | |
942 | if (rxtx_flags & NV_SETUP_TX_RING) { | |
eafa59f6 | 943 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); |
0832b25a AA |
944 | } |
945 | } else { | |
946 | if (rxtx_flags & NV_SETUP_RX_RING) { | |
947 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | |
948 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); | |
949 | } | |
950 | if (rxtx_flags & NV_SETUP_TX_RING) { | |
eafa59f6 AA |
951 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); |
952 | writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); | |
0832b25a AA |
953 | } |
954 | } | |
955 | } | |
956 | ||
eafa59f6 AA |
957 | static void free_rings(struct net_device *dev) |
958 | { | |
959 | struct fe_priv *np = get_nvpriv(dev); | |
960 | ||
961 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 | 962 | if (np->rx_ring.orig) |
eafa59f6 AA |
963 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
964 | np->rx_ring.orig, np->ring_addr); | |
965 | } else { | |
966 | if (np->rx_ring.ex) | |
967 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), | |
968 | np->rx_ring.ex, np->ring_addr); | |
969 | } | |
761fcd9e AA |
970 | if (np->rx_skb) |
971 | kfree(np->rx_skb); | |
972 | if (np->tx_skb) | |
973 | kfree(np->tx_skb); | |
eafa59f6 AA |
974 | } |
975 | ||
84b3932b AA |
976 | static int using_multi_irqs(struct net_device *dev) |
977 | { | |
978 | struct fe_priv *np = get_nvpriv(dev); | |
979 | ||
980 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | |
981 | ((np->msi_flags & NV_MSI_X_ENABLED) && | |
982 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) | |
983 | return 0; | |
984 | else | |
985 | return 1; | |
986 | } | |
987 | ||
988 | static void nv_enable_irq(struct net_device *dev) | |
989 | { | |
990 | struct fe_priv *np = get_nvpriv(dev); | |
991 | ||
992 | if (!using_multi_irqs(dev)) { | |
993 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
994 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
995 | else | |
996 | enable_irq(dev->irq); | |
997 | } else { | |
998 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
999 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
1000 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
1001 | } | |
1002 | } | |
1003 | ||
1004 | static void nv_disable_irq(struct net_device *dev) | |
1005 | { | |
1006 | struct fe_priv *np = get_nvpriv(dev); | |
1007 | ||
1008 | if (!using_multi_irqs(dev)) { | |
1009 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
1010 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
1011 | else | |
1012 | disable_irq(dev->irq); | |
1013 | } else { | |
1014 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1015 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
1016 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
1017 | } | |
1018 | } | |
1019 | ||
1020 | /* In MSIX mode, a write to irqmask behaves as XOR */ | |
1021 | static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) | |
1022 | { | |
1023 | u8 __iomem *base = get_hwbase(dev); | |
1024 | ||
1025 | writel(mask, base + NvRegIrqMask); | |
1026 | } | |
1027 | ||
1028 | static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) | |
1029 | { | |
1030 | struct fe_priv *np = get_nvpriv(dev); | |
1031 | u8 __iomem *base = get_hwbase(dev); | |
1032 | ||
1033 | if (np->msi_flags & NV_MSI_X_ENABLED) { | |
1034 | writel(mask, base + NvRegIrqMask); | |
1035 | } else { | |
1036 | if (np->msi_flags & NV_MSI_ENABLED) | |
1037 | writel(0, base + NvRegMSIIrqMask); | |
1038 | writel(0, base + NvRegIrqMask); | |
1039 | } | |
1040 | } | |
1041 | ||
1da177e4 LT |
1042 | #define MII_READ (-1) |
1043 | /* mii_rw: read/write a register on the PHY. | |
1044 | * | |
1045 | * Caller must guarantee serialization | |
1046 | */ | |
1047 | static int mii_rw(struct net_device *dev, int addr, int miireg, int value) | |
1048 | { | |
1049 | u8 __iomem *base = get_hwbase(dev); | |
1050 | u32 reg; | |
1051 | int retval; | |
1052 | ||
1053 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
1054 | ||
1055 | reg = readl(base + NvRegMIIControl); | |
1056 | if (reg & NVREG_MIICTL_INUSE) { | |
1057 | writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); | |
1058 | udelay(NV_MIIBUSY_DELAY); | |
1059 | } | |
1060 | ||
1061 | reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; | |
1062 | if (value != MII_READ) { | |
1063 | writel(value, base + NvRegMIIData); | |
1064 | reg |= NVREG_MIICTL_WRITE; | |
1065 | } | |
1066 | writel(reg, base + NvRegMIIControl); | |
1067 | ||
1068 | if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, | |
1069 | NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { | |
1070 | dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", | |
1071 | dev->name, miireg, addr); | |
1072 | retval = -1; | |
1073 | } else if (value != MII_READ) { | |
1074 | /* it was a write operation - fewer failures are detectable */ | |
1075 | dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", | |
1076 | dev->name, value, miireg, addr); | |
1077 | retval = 0; | |
1078 | } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { | |
1079 | dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", | |
1080 | dev->name, miireg, addr); | |
1081 | retval = -1; | |
1082 | } else { | |
1083 | retval = readl(base + NvRegMIIData); | |
1084 | dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", | |
1085 | dev->name, miireg, addr, retval); | |
1086 | } | |
1087 | ||
1088 | return retval; | |
1089 | } | |
1090 | ||
edf7e5ec | 1091 | static int phy_reset(struct net_device *dev, u32 bmcr_setup) |
1da177e4 | 1092 | { |
ac9c1897 | 1093 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1094 | u32 miicontrol; |
1095 | unsigned int tries = 0; | |
1096 | ||
edf7e5ec | 1097 | miicontrol = BMCR_RESET | bmcr_setup; |
1da177e4 LT |
1098 | if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { |
1099 | return -1; | |
1100 | } | |
1101 | ||
1102 | /* wait for 500ms */ | |
1103 | msleep(500); | |
1104 | ||
1105 | /* must wait till reset is deasserted */ | |
1106 | while (miicontrol & BMCR_RESET) { | |
1107 | msleep(10); | |
1108 | miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
1109 | /* FIXME: 100 tries seem excessive */ | |
1110 | if (tries++ > 100) | |
1111 | return -1; | |
1112 | } | |
1113 | return 0; | |
1114 | } | |
1115 | ||
1116 | static int phy_init(struct net_device *dev) | |
1117 | { | |
1118 | struct fe_priv *np = get_nvpriv(dev); | |
1119 | u8 __iomem *base = get_hwbase(dev); | |
1120 | u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; | |
1121 | ||
edf7e5ec AA |
1122 | /* phy errata for E3016 phy */ |
1123 | if (np->phy_model == PHY_MODEL_MARVELL_E3016) { | |
1124 | reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); | |
1125 | reg &= ~PHY_MARVELL_E3016_INITMASK; | |
1126 | if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { | |
1127 | printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); | |
1128 | return PHY_ERROR; | |
1129 | } | |
1130 | } | |
c5e3ae88 AA |
1131 | if (np->phy_oui == PHY_OUI_REALTEK) { |
1132 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { | |
1133 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1134 | return PHY_ERROR; | |
1135 | } | |
1136 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { | |
1137 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1138 | return PHY_ERROR; | |
1139 | } | |
1140 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { | |
1141 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1142 | return PHY_ERROR; | |
1143 | } | |
1144 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { | |
1145 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1146 | return PHY_ERROR; | |
1147 | } | |
1148 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { | |
1149 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1150 | return PHY_ERROR; | |
1151 | } | |
1152 | } | |
edf7e5ec | 1153 | |
1da177e4 LT |
1154 | /* set advertise register */ |
1155 | reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
eb91f61b | 1156 | reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); |
1da177e4 LT |
1157 | if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { |
1158 | printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); | |
1159 | return PHY_ERROR; | |
1160 | } | |
1161 | ||
1162 | /* get phy interface type */ | |
1163 | phyinterface = readl(base + NvRegPhyInterface); | |
1164 | ||
1165 | /* see if gigabit phy */ | |
1166 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
1167 | if (mii_status & PHY_GIGABIT) { | |
1168 | np->gigabit = PHY_GIGABIT; | |
eb91f61b | 1169 | mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 LT |
1170 | mii_control_1000 &= ~ADVERTISE_1000HALF; |
1171 | if (phyinterface & PHY_RGMII) | |
1172 | mii_control_1000 |= ADVERTISE_1000FULL; | |
1173 | else | |
1174 | mii_control_1000 &= ~ADVERTISE_1000FULL; | |
1175 | ||
eb91f61b | 1176 | if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { |
1da177e4 LT |
1177 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); |
1178 | return PHY_ERROR; | |
1179 | } | |
1180 | } | |
1181 | else | |
1182 | np->gigabit = 0; | |
1183 | ||
edf7e5ec AA |
1184 | mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
1185 | mii_control |= BMCR_ANENABLE; | |
1186 | ||
1187 | /* reset the phy | |
1188 | * (certain phys need bmcr to be setup with reset) | |
1189 | */ | |
1190 | if (phy_reset(dev, mii_control)) { | |
1da177e4 LT |
1191 | printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); |
1192 | return PHY_ERROR; | |
1193 | } | |
1194 | ||
1195 | /* phy vendor specific configuration */ | |
1196 | if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { | |
1197 | phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); | |
14a67f3c AA |
1198 | phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); |
1199 | phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); | |
1da177e4 LT |
1200 | if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { |
1201 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1202 | return PHY_ERROR; | |
1203 | } | |
1204 | phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); | |
14a67f3c | 1205 | phy_reserved |= PHY_CICADA_INIT5; |
1da177e4 LT |
1206 | if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { |
1207 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1208 | return PHY_ERROR; | |
1209 | } | |
1210 | } | |
1211 | if (np->phy_oui == PHY_OUI_CICADA) { | |
1212 | phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); | |
14a67f3c | 1213 | phy_reserved |= PHY_CICADA_INIT6; |
1da177e4 LT |
1214 | if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { |
1215 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1216 | return PHY_ERROR; | |
1217 | } | |
1218 | } | |
d215d8a2 AA |
1219 | if (np->phy_oui == PHY_OUI_VITESSE) { |
1220 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { | |
1221 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1222 | return PHY_ERROR; | |
1223 | } | |
1224 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { | |
1225 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1226 | return PHY_ERROR; | |
1227 | } | |
1228 | phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); | |
1229 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { | |
1230 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1231 | return PHY_ERROR; | |
1232 | } | |
1233 | phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); | |
1234 | phy_reserved &= ~PHY_VITESSE_INIT_MSK1; | |
1235 | phy_reserved |= PHY_VITESSE_INIT3; | |
1236 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { | |
1237 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1238 | return PHY_ERROR; | |
1239 | } | |
1240 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { | |
1241 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1242 | return PHY_ERROR; | |
1243 | } | |
1244 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { | |
1245 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1246 | return PHY_ERROR; | |
1247 | } | |
1248 | phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); | |
1249 | phy_reserved &= ~PHY_VITESSE_INIT_MSK1; | |
1250 | phy_reserved |= PHY_VITESSE_INIT3; | |
1251 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { | |
1252 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1253 | return PHY_ERROR; | |
1254 | } | |
1255 | phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); | |
1256 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { | |
1257 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1258 | return PHY_ERROR; | |
1259 | } | |
1260 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { | |
1261 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1262 | return PHY_ERROR; | |
1263 | } | |
1264 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { | |
1265 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1266 | return PHY_ERROR; | |
1267 | } | |
1268 | phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); | |
1269 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { | |
1270 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1271 | return PHY_ERROR; | |
1272 | } | |
1273 | phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); | |
1274 | phy_reserved &= ~PHY_VITESSE_INIT_MSK2; | |
1275 | phy_reserved |= PHY_VITESSE_INIT8; | |
1276 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { | |
1277 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1278 | return PHY_ERROR; | |
1279 | } | |
1280 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { | |
1281 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1282 | return PHY_ERROR; | |
1283 | } | |
1284 | if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { | |
1285 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1286 | return PHY_ERROR; | |
1287 | } | |
1288 | } | |
c5e3ae88 AA |
1289 | if (np->phy_oui == PHY_OUI_REALTEK) { |
1290 | /* reset could have cleared these out, set them back */ | |
1291 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { | |
1292 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1293 | return PHY_ERROR; | |
1294 | } | |
1295 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { | |
1296 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1297 | return PHY_ERROR; | |
1298 | } | |
1299 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { | |
1300 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1301 | return PHY_ERROR; | |
1302 | } | |
1303 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { | |
1304 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1305 | return PHY_ERROR; | |
1306 | } | |
1307 | if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { | |
1308 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1309 | return PHY_ERROR; | |
1310 | } | |
1311 | } | |
1312 | ||
eb91f61b AA |
1313 | /* some phys clear out pause advertisment on reset, set it back */ |
1314 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); | |
1da177e4 LT |
1315 | |
1316 | /* restart auto negotiation */ | |
1317 | mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
1318 | mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); | |
1319 | if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { | |
1320 | return PHY_ERROR; | |
1321 | } | |
1322 | ||
1323 | return 0; | |
1324 | } | |
1325 | ||
1326 | static void nv_start_rx(struct net_device *dev) | |
1327 | { | |
ac9c1897 | 1328 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1329 | u8 __iomem *base = get_hwbase(dev); |
f35723ec | 1330 | u32 rx_ctrl = readl(base + NvRegReceiverControl); |
1da177e4 LT |
1331 | |
1332 | dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); | |
1333 | /* Already running? Stop it. */ | |
f35723ec AA |
1334 | if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { |
1335 | rx_ctrl &= ~NVREG_RCVCTL_START; | |
1336 | writel(rx_ctrl, base + NvRegReceiverControl); | |
1da177e4 LT |
1337 | pci_push(base); |
1338 | } | |
1339 | writel(np->linkspeed, base + NvRegLinkSpeed); | |
1340 | pci_push(base); | |
f35723ec AA |
1341 | rx_ctrl |= NVREG_RCVCTL_START; |
1342 | if (np->mac_in_use) | |
1343 | rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; | |
1344 | writel(rx_ctrl, base + NvRegReceiverControl); | |
1da177e4 LT |
1345 | dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", |
1346 | dev->name, np->duplex, np->linkspeed); | |
1347 | pci_push(base); | |
1348 | } | |
1349 | ||
1350 | static void nv_stop_rx(struct net_device *dev) | |
1351 | { | |
f35723ec | 1352 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1353 | u8 __iomem *base = get_hwbase(dev); |
f35723ec | 1354 | u32 rx_ctrl = readl(base + NvRegReceiverControl); |
1da177e4 LT |
1355 | |
1356 | dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); | |
f35723ec AA |
1357 | if (!np->mac_in_use) |
1358 | rx_ctrl &= ~NVREG_RCVCTL_START; | |
1359 | else | |
1360 | rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; | |
1361 | writel(rx_ctrl, base + NvRegReceiverControl); | |
1da177e4 LT |
1362 | reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, |
1363 | NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, | |
1364 | KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); | |
1365 | ||
1366 | udelay(NV_RXSTOP_DELAY2); | |
f35723ec AA |
1367 | if (!np->mac_in_use) |
1368 | writel(0, base + NvRegLinkSpeed); | |
1da177e4 LT |
1369 | } |
1370 | ||
1371 | static void nv_start_tx(struct net_device *dev) | |
1372 | { | |
f35723ec | 1373 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1374 | u8 __iomem *base = get_hwbase(dev); |
f35723ec | 1375 | u32 tx_ctrl = readl(base + NvRegTransmitterControl); |
1da177e4 LT |
1376 | |
1377 | dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); | |
f35723ec AA |
1378 | tx_ctrl |= NVREG_XMITCTL_START; |
1379 | if (np->mac_in_use) | |
1380 | tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; | |
1381 | writel(tx_ctrl, base + NvRegTransmitterControl); | |
1da177e4 LT |
1382 | pci_push(base); |
1383 | } | |
1384 | ||
1385 | static void nv_stop_tx(struct net_device *dev) | |
1386 | { | |
f35723ec | 1387 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1388 | u8 __iomem *base = get_hwbase(dev); |
f35723ec | 1389 | u32 tx_ctrl = readl(base + NvRegTransmitterControl); |
1da177e4 LT |
1390 | |
1391 | dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); | |
f35723ec AA |
1392 | if (!np->mac_in_use) |
1393 | tx_ctrl &= ~NVREG_XMITCTL_START; | |
1394 | else | |
1395 | tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; | |
1396 | writel(tx_ctrl, base + NvRegTransmitterControl); | |
1da177e4 LT |
1397 | reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, |
1398 | NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, | |
1399 | KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); | |
1400 | ||
1401 | udelay(NV_TXSTOP_DELAY2); | |
f35723ec AA |
1402 | if (!np->mac_in_use) |
1403 | writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, | |
1404 | base + NvRegTransmitPoll); | |
1da177e4 LT |
1405 | } |
1406 | ||
1407 | static void nv_txrx_reset(struct net_device *dev) | |
1408 | { | |
ac9c1897 | 1409 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1410 | u8 __iomem *base = get_hwbase(dev); |
1411 | ||
1412 | dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); | |
8a4ae7f2 | 1413 | writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); |
1da177e4 LT |
1414 | pci_push(base); |
1415 | udelay(NV_TXRX_RESET_DELAY); | |
8a4ae7f2 | 1416 | writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); |
1da177e4 LT |
1417 | pci_push(base); |
1418 | } | |
1419 | ||
86a0f043 AA |
1420 | static void nv_mac_reset(struct net_device *dev) |
1421 | { | |
1422 | struct fe_priv *np = netdev_priv(dev); | |
1423 | u8 __iomem *base = get_hwbase(dev); | |
1424 | ||
1425 | dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); | |
1426 | writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); | |
1427 | pci_push(base); | |
1428 | writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); | |
1429 | pci_push(base); | |
1430 | udelay(NV_MAC_RESET_DELAY); | |
1431 | writel(0, base + NvRegMacReset); | |
1432 | pci_push(base); | |
1433 | udelay(NV_MAC_RESET_DELAY); | |
1434 | writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); | |
1435 | pci_push(base); | |
1436 | } | |
1437 | ||
57fff698 AA |
1438 | static void nv_get_hw_stats(struct net_device *dev) |
1439 | { | |
1440 | struct fe_priv *np = netdev_priv(dev); | |
1441 | u8 __iomem *base = get_hwbase(dev); | |
1442 | ||
1443 | np->estats.tx_bytes += readl(base + NvRegTxCnt); | |
1444 | np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); | |
1445 | np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); | |
1446 | np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); | |
1447 | np->estats.tx_late_collision += readl(base + NvRegTxLateCol); | |
1448 | np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); | |
1449 | np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); | |
1450 | np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); | |
1451 | np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); | |
1452 | np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); | |
1453 | np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); | |
1454 | np->estats.rx_late_collision += readl(base + NvRegRxLateCol); | |
1455 | np->estats.rx_runt += readl(base + NvRegRxRunt); | |
1456 | np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); | |
1457 | np->estats.rx_over_errors += readl(base + NvRegRxOverflow); | |
1458 | np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); | |
1459 | np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); | |
1460 | np->estats.rx_length_error += readl(base + NvRegRxLenErr); | |
1461 | np->estats.rx_unicast += readl(base + NvRegRxUnicast); | |
1462 | np->estats.rx_multicast += readl(base + NvRegRxMulticast); | |
1463 | np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); | |
1464 | np->estats.rx_packets = | |
1465 | np->estats.rx_unicast + | |
1466 | np->estats.rx_multicast + | |
1467 | np->estats.rx_broadcast; | |
1468 | np->estats.rx_errors_total = | |
1469 | np->estats.rx_crc_errors + | |
1470 | np->estats.rx_over_errors + | |
1471 | np->estats.rx_frame_error + | |
1472 | (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + | |
1473 | np->estats.rx_late_collision + | |
1474 | np->estats.rx_runt + | |
1475 | np->estats.rx_frame_too_long; | |
1476 | np->estats.tx_errors_total = | |
1477 | np->estats.tx_late_collision + | |
1478 | np->estats.tx_fifo_errors + | |
1479 | np->estats.tx_carrier_errors + | |
1480 | np->estats.tx_excess_deferral + | |
1481 | np->estats.tx_retry_error; | |
1482 | ||
1483 | if (np->driver_data & DEV_HAS_STATISTICS_V2) { | |
1484 | np->estats.tx_deferral += readl(base + NvRegTxDef); | |
1485 | np->estats.tx_packets += readl(base + NvRegTxFrame); | |
1486 | np->estats.rx_bytes += readl(base + NvRegRxCnt); | |
1487 | np->estats.tx_pause += readl(base + NvRegTxPause); | |
1488 | np->estats.rx_pause += readl(base + NvRegRxPause); | |
1489 | np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); | |
1490 | } | |
1491 | } | |
1492 | ||
1da177e4 LT |
1493 | /* |
1494 | * nv_get_stats: dev->get_stats function | |
1495 | * Get latest stats value from the nic. | |
1496 | * Called with read_lock(&dev_base_lock) held for read - | |
1497 | * only synchronized against unregister_netdevice. | |
1498 | */ | |
1499 | static struct net_device_stats *nv_get_stats(struct net_device *dev) | |
1500 | { | |
ac9c1897 | 1501 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1502 | |
21828163 AA |
1503 | /* If the nic supports hw counters then retrieve latest values */ |
1504 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { | |
1505 | nv_get_hw_stats(dev); | |
1506 | ||
1507 | /* copy to net_device stats */ | |
1508 | np->stats.tx_bytes = np->estats.tx_bytes; | |
1509 | np->stats.tx_fifo_errors = np->estats.tx_fifo_errors; | |
1510 | np->stats.tx_carrier_errors = np->estats.tx_carrier_errors; | |
1511 | np->stats.rx_crc_errors = np->estats.rx_crc_errors; | |
1512 | np->stats.rx_over_errors = np->estats.rx_over_errors; | |
1513 | np->stats.rx_errors = np->estats.rx_errors_total; | |
1514 | np->stats.tx_errors = np->estats.tx_errors_total; | |
1515 | } | |
1da177e4 LT |
1516 | return &np->stats; |
1517 | } | |
1518 | ||
1519 | /* | |
1520 | * nv_alloc_rx: fill rx ring entries. | |
1521 | * Return 1 if the allocations for the skbs failed and the | |
1522 | * rx engine is without Available descriptors | |
1523 | */ | |
1524 | static int nv_alloc_rx(struct net_device *dev) | |
1525 | { | |
ac9c1897 | 1526 | struct fe_priv *np = netdev_priv(dev); |
86b22b0d | 1527 | struct ring_desc* less_rx; |
1da177e4 | 1528 | |
86b22b0d AA |
1529 | less_rx = np->get_rx.orig; |
1530 | if (less_rx-- == np->first_rx.orig) | |
1531 | less_rx = np->last_rx.orig; | |
761fcd9e | 1532 | |
86b22b0d AA |
1533 | while (np->put_rx.orig != less_rx) { |
1534 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | |
1535 | if (skb) { | |
86b22b0d | 1536 | np->put_rx_ctx->skb = skb; |
4305b541 ACM |
1537 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, |
1538 | skb->data, | |
8b5be268 | 1539 | skb_tailroom(skb), |
4305b541 | 1540 | PCI_DMA_FROMDEVICE); |
8b5be268 | 1541 | np->put_rx_ctx->dma_len = skb_tailroom(skb); |
86b22b0d AA |
1542 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); |
1543 | wmb(); | |
1544 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | |
b01867cb | 1545 | if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) |
86b22b0d | 1546 | np->put_rx.orig = np->first_rx.orig; |
b01867cb | 1547 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) |
86b22b0d | 1548 | np->put_rx_ctx = np->first_rx_ctx; |
761fcd9e | 1549 | } else { |
86b22b0d | 1550 | return 1; |
761fcd9e | 1551 | } |
86b22b0d AA |
1552 | } |
1553 | return 0; | |
1554 | } | |
1555 | ||
1556 | static int nv_alloc_rx_optimized(struct net_device *dev) | |
1557 | { | |
1558 | struct fe_priv *np = netdev_priv(dev); | |
1559 | struct ring_desc_ex* less_rx; | |
1560 | ||
1561 | less_rx = np->get_rx.ex; | |
1562 | if (less_rx-- == np->first_rx.ex) | |
1563 | less_rx = np->last_rx.ex; | |
761fcd9e | 1564 | |
86b22b0d AA |
1565 | while (np->put_rx.ex != less_rx) { |
1566 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | |
0d63fb32 | 1567 | if (skb) { |
761fcd9e | 1568 | np->put_rx_ctx->skb = skb; |
4305b541 ACM |
1569 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, |
1570 | skb->data, | |
8b5be268 | 1571 | skb_tailroom(skb), |
4305b541 | 1572 | PCI_DMA_FROMDEVICE); |
8b5be268 | 1573 | np->put_rx_ctx->dma_len = skb_tailroom(skb); |
86b22b0d AA |
1574 | np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; |
1575 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; | |
1576 | wmb(); | |
1577 | np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); | |
b01867cb | 1578 | if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) |
86b22b0d | 1579 | np->put_rx.ex = np->first_rx.ex; |
b01867cb | 1580 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) |
0d63fb32 | 1581 | np->put_rx_ctx = np->first_rx_ctx; |
1da177e4 | 1582 | } else { |
0d63fb32 | 1583 | return 1; |
ee73362c | 1584 | } |
1da177e4 | 1585 | } |
1da177e4 LT |
1586 | return 0; |
1587 | } | |
1588 | ||
e27cdba5 SH |
1589 | /* If rx bufs are exhausted called after 50ms to attempt to refresh */ |
1590 | #ifdef CONFIG_FORCEDETH_NAPI | |
1591 | static void nv_do_rx_refill(unsigned long data) | |
1592 | { | |
1593 | struct net_device *dev = (struct net_device *) data; | |
bea3348e | 1594 | struct fe_priv *np = netdev_priv(dev); |
e27cdba5 SH |
1595 | |
1596 | /* Just reschedule NAPI rx processing */ | |
bea3348e | 1597 | netif_rx_schedule(dev, &np->napi); |
e27cdba5 SH |
1598 | } |
1599 | #else | |
1da177e4 LT |
1600 | static void nv_do_rx_refill(unsigned long data) |
1601 | { | |
1602 | struct net_device *dev = (struct net_device *) data; | |
ac9c1897 | 1603 | struct fe_priv *np = netdev_priv(dev); |
86b22b0d | 1604 | int retcode; |
1da177e4 | 1605 | |
84b3932b AA |
1606 | if (!using_multi_irqs(dev)) { |
1607 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
1608 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
1609 | else | |
1610 | disable_irq(dev->irq); | |
d33a73c8 AA |
1611 | } else { |
1612 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1613 | } | |
86b22b0d AA |
1614 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1615 | retcode = nv_alloc_rx(dev); | |
1616 | else | |
1617 | retcode = nv_alloc_rx_optimized(dev); | |
1618 | if (retcode) { | |
84b3932b | 1619 | spin_lock_irq(&np->lock); |
1da177e4 LT |
1620 | if (!np->in_shutdown) |
1621 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
84b3932b | 1622 | spin_unlock_irq(&np->lock); |
1da177e4 | 1623 | } |
84b3932b AA |
1624 | if (!using_multi_irqs(dev)) { |
1625 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
1626 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
1627 | else | |
1628 | enable_irq(dev->irq); | |
d33a73c8 AA |
1629 | } else { |
1630 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1631 | } | |
1da177e4 | 1632 | } |
e27cdba5 | 1633 | #endif |
1da177e4 | 1634 | |
f3b197ac | 1635 | static void nv_init_rx(struct net_device *dev) |
1da177e4 | 1636 | { |
ac9c1897 | 1637 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1638 | int i; |
761fcd9e AA |
1639 | np->get_rx = np->put_rx = np->first_rx = np->rx_ring; |
1640 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | |
1641 | np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; | |
1642 | else | |
1643 | np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; | |
1644 | np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; | |
1645 | np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; | |
1da177e4 | 1646 | |
761fcd9e AA |
1647 | for (i = 0; i < np->rx_ring_size; i++) { |
1648 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 | 1649 | np->rx_ring.orig[i].flaglen = 0; |
761fcd9e AA |
1650 | np->rx_ring.orig[i].buf = 0; |
1651 | } else { | |
f82a9352 | 1652 | np->rx_ring.ex[i].flaglen = 0; |
761fcd9e AA |
1653 | np->rx_ring.ex[i].txvlan = 0; |
1654 | np->rx_ring.ex[i].bufhigh = 0; | |
1655 | np->rx_ring.ex[i].buflow = 0; | |
1656 | } | |
1657 | np->rx_skb[i].skb = NULL; | |
1658 | np->rx_skb[i].dma = 0; | |
1659 | } | |
d81c0983 MS |
1660 | } |
1661 | ||
1662 | static void nv_init_tx(struct net_device *dev) | |
1663 | { | |
ac9c1897 | 1664 | struct fe_priv *np = netdev_priv(dev); |
d81c0983 | 1665 | int i; |
761fcd9e AA |
1666 | np->get_tx = np->put_tx = np->first_tx = np->tx_ring; |
1667 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | |
1668 | np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; | |
1669 | else | |
1670 | np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; | |
1671 | np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; | |
1672 | np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; | |
d81c0983 | 1673 | |
eafa59f6 | 1674 | for (i = 0; i < np->tx_ring_size; i++) { |
761fcd9e | 1675 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
f82a9352 | 1676 | np->tx_ring.orig[i].flaglen = 0; |
761fcd9e AA |
1677 | np->tx_ring.orig[i].buf = 0; |
1678 | } else { | |
f82a9352 | 1679 | np->tx_ring.ex[i].flaglen = 0; |
761fcd9e AA |
1680 | np->tx_ring.ex[i].txvlan = 0; |
1681 | np->tx_ring.ex[i].bufhigh = 0; | |
1682 | np->tx_ring.ex[i].buflow = 0; | |
1683 | } | |
1684 | np->tx_skb[i].skb = NULL; | |
1685 | np->tx_skb[i].dma = 0; | |
ac9c1897 | 1686 | } |
d81c0983 MS |
1687 | } |
1688 | ||
1689 | static int nv_init_ring(struct net_device *dev) | |
1690 | { | |
86b22b0d AA |
1691 | struct fe_priv *np = netdev_priv(dev); |
1692 | ||
d81c0983 MS |
1693 | nv_init_tx(dev); |
1694 | nv_init_rx(dev); | |
86b22b0d AA |
1695 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1696 | return nv_alloc_rx(dev); | |
1697 | else | |
1698 | return nv_alloc_rx_optimized(dev); | |
1da177e4 LT |
1699 | } |
1700 | ||
761fcd9e | 1701 | static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) |
ac9c1897 AA |
1702 | { |
1703 | struct fe_priv *np = netdev_priv(dev); | |
fa45459e | 1704 | |
761fcd9e AA |
1705 | if (tx_skb->dma) { |
1706 | pci_unmap_page(np->pci_dev, tx_skb->dma, | |
1707 | tx_skb->dma_len, | |
fa45459e | 1708 | PCI_DMA_TODEVICE); |
761fcd9e | 1709 | tx_skb->dma = 0; |
fa45459e | 1710 | } |
761fcd9e AA |
1711 | if (tx_skb->skb) { |
1712 | dev_kfree_skb_any(tx_skb->skb); | |
1713 | tx_skb->skb = NULL; | |
fa45459e AA |
1714 | return 1; |
1715 | } else { | |
1716 | return 0; | |
ac9c1897 | 1717 | } |
ac9c1897 AA |
1718 | } |
1719 | ||
1da177e4 LT |
1720 | static void nv_drain_tx(struct net_device *dev) |
1721 | { | |
ac9c1897 AA |
1722 | struct fe_priv *np = netdev_priv(dev); |
1723 | unsigned int i; | |
f3b197ac | 1724 | |
eafa59f6 | 1725 | for (i = 0; i < np->tx_ring_size; i++) { |
761fcd9e | 1726 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
f82a9352 | 1727 | np->tx_ring.orig[i].flaglen = 0; |
761fcd9e AA |
1728 | np->tx_ring.orig[i].buf = 0; |
1729 | } else { | |
f82a9352 | 1730 | np->tx_ring.ex[i].flaglen = 0; |
761fcd9e AA |
1731 | np->tx_ring.ex[i].txvlan = 0; |
1732 | np->tx_ring.ex[i].bufhigh = 0; | |
1733 | np->tx_ring.ex[i].buflow = 0; | |
1734 | } | |
1735 | if (nv_release_txskb(dev, &np->tx_skb[i])) | |
1da177e4 | 1736 | np->stats.tx_dropped++; |
1da177e4 LT |
1737 | } |
1738 | } | |
1739 | ||
1740 | static void nv_drain_rx(struct net_device *dev) | |
1741 | { | |
ac9c1897 | 1742 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1743 | int i; |
761fcd9e | 1744 | |
eafa59f6 | 1745 | for (i = 0; i < np->rx_ring_size; i++) { |
761fcd9e | 1746 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
f82a9352 | 1747 | np->rx_ring.orig[i].flaglen = 0; |
761fcd9e AA |
1748 | np->rx_ring.orig[i].buf = 0; |
1749 | } else { | |
f82a9352 | 1750 | np->rx_ring.ex[i].flaglen = 0; |
761fcd9e AA |
1751 | np->rx_ring.ex[i].txvlan = 0; |
1752 | np->rx_ring.ex[i].bufhigh = 0; | |
1753 | np->rx_ring.ex[i].buflow = 0; | |
1754 | } | |
1da177e4 | 1755 | wmb(); |
761fcd9e AA |
1756 | if (np->rx_skb[i].skb) { |
1757 | pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, | |
4305b541 ACM |
1758 | (skb_end_pointer(np->rx_skb[i].skb) - |
1759 | np->rx_skb[i].skb->data), | |
1760 | PCI_DMA_FROMDEVICE); | |
761fcd9e AA |
1761 | dev_kfree_skb(np->rx_skb[i].skb); |
1762 | np->rx_skb[i].skb = NULL; | |
1da177e4 LT |
1763 | } |
1764 | } | |
1765 | } | |
1766 | ||
1767 | static void drain_ring(struct net_device *dev) | |
1768 | { | |
1769 | nv_drain_tx(dev); | |
1770 | nv_drain_rx(dev); | |
1771 | } | |
1772 | ||
761fcd9e AA |
1773 | static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) |
1774 | { | |
1775 | return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); | |
1776 | } | |
1777 | ||
1da177e4 LT |
1778 | /* |
1779 | * nv_start_xmit: dev->hard_start_xmit function | |
932ff279 | 1780 | * Called with netif_tx_lock held. |
1da177e4 LT |
1781 | */ |
1782 | static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1783 | { | |
ac9c1897 | 1784 | struct fe_priv *np = netdev_priv(dev); |
fa45459e | 1785 | u32 tx_flags = 0; |
ac9c1897 AA |
1786 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); |
1787 | unsigned int fragments = skb_shinfo(skb)->nr_frags; | |
ac9c1897 | 1788 | unsigned int i; |
fa45459e AA |
1789 | u32 offset = 0; |
1790 | u32 bcnt; | |
1791 | u32 size = skb->len-skb->data_len; | |
1792 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | |
761fcd9e | 1793 | u32 empty_slots; |
86b22b0d AA |
1794 | struct ring_desc* put_tx; |
1795 | struct ring_desc* start_tx; | |
1796 | struct ring_desc* prev_tx; | |
761fcd9e | 1797 | struct nv_skb_map* prev_tx_ctx; |
fa45459e AA |
1798 | |
1799 | /* add fragments to entries count */ | |
1800 | for (i = 0; i < fragments; i++) { | |
1801 | entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + | |
1802 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | |
1803 | } | |
ac9c1897 | 1804 | |
761fcd9e | 1805 | empty_slots = nv_get_empty_tx_slots(np); |
445583b8 | 1806 | if (unlikely(empty_slots <= entries)) { |
164a86e4 | 1807 | spin_lock_irq(&np->lock); |
ac9c1897 | 1808 | netif_stop_queue(dev); |
aaa37d2d | 1809 | np->tx_stop = 1; |
164a86e4 | 1810 | spin_unlock_irq(&np->lock); |
ac9c1897 AA |
1811 | return NETDEV_TX_BUSY; |
1812 | } | |
1da177e4 | 1813 | |
86b22b0d | 1814 | start_tx = put_tx = np->put_tx.orig; |
761fcd9e | 1815 | |
fa45459e AA |
1816 | /* setup the header buffer */ |
1817 | do { | |
761fcd9e AA |
1818 | prev_tx = put_tx; |
1819 | prev_tx_ctx = np->put_tx_ctx; | |
fa45459e | 1820 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
761fcd9e | 1821 | np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, |
fa45459e | 1822 | PCI_DMA_TODEVICE); |
761fcd9e | 1823 | np->put_tx_ctx->dma_len = bcnt; |
86b22b0d AA |
1824 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); |
1825 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | |
445583b8 | 1826 | |
fa45459e AA |
1827 | tx_flags = np->tx_flags; |
1828 | offset += bcnt; | |
1829 | size -= bcnt; | |
445583b8 | 1830 | if (unlikely(put_tx++ == np->last_tx.orig)) |
86b22b0d | 1831 | put_tx = np->first_tx.orig; |
445583b8 | 1832 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
761fcd9e | 1833 | np->put_tx_ctx = np->first_tx_ctx; |
f82a9352 | 1834 | } while (size); |
fa45459e AA |
1835 | |
1836 | /* setup the fragments */ | |
1837 | for (i = 0; i < fragments; i++) { | |
1838 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1839 | u32 size = frag->size; | |
1840 | offset = 0; | |
1841 | ||
1842 | do { | |
761fcd9e AA |
1843 | prev_tx = put_tx; |
1844 | prev_tx_ctx = np->put_tx_ctx; | |
fa45459e | 1845 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
761fcd9e AA |
1846 | np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, |
1847 | PCI_DMA_TODEVICE); | |
1848 | np->put_tx_ctx->dma_len = bcnt; | |
86b22b0d AA |
1849 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); |
1850 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | |
445583b8 | 1851 | |
fa45459e AA |
1852 | offset += bcnt; |
1853 | size -= bcnt; | |
445583b8 | 1854 | if (unlikely(put_tx++ == np->last_tx.orig)) |
86b22b0d | 1855 | put_tx = np->first_tx.orig; |
445583b8 | 1856 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
761fcd9e | 1857 | np->put_tx_ctx = np->first_tx_ctx; |
fa45459e AA |
1858 | } while (size); |
1859 | } | |
ac9c1897 | 1860 | |
fa45459e | 1861 | /* set last fragment flag */ |
86b22b0d | 1862 | prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); |
ac9c1897 | 1863 | |
761fcd9e AA |
1864 | /* save skb in this slot's context area */ |
1865 | prev_tx_ctx->skb = skb; | |
fa45459e | 1866 | |
89114afd | 1867 | if (skb_is_gso(skb)) |
7967168c | 1868 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); |
ac9c1897 | 1869 | else |
1d39ed56 | 1870 | tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? |
84fa7933 | 1871 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; |
ac9c1897 | 1872 | |
164a86e4 AA |
1873 | spin_lock_irq(&np->lock); |
1874 | ||
fa45459e | 1875 | /* set tx flags */ |
86b22b0d AA |
1876 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1877 | np->put_tx.orig = put_tx; | |
1da177e4 | 1878 | |
164a86e4 | 1879 | spin_unlock_irq(&np->lock); |
761fcd9e AA |
1880 | |
1881 | dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", | |
1882 | dev->name, entries, tx_flags_extra); | |
1da177e4 LT |
1883 | { |
1884 | int j; | |
1885 | for (j=0; j<64; j++) { | |
1886 | if ((j%16) == 0) | |
1887 | dprintk("\n%03x:", j); | |
1888 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); | |
1889 | } | |
1890 | dprintk("\n"); | |
1891 | } | |
1892 | ||
1da177e4 | 1893 | dev->trans_start = jiffies; |
8a4ae7f2 | 1894 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
ac9c1897 | 1895 | return NETDEV_TX_OK; |
1da177e4 LT |
1896 | } |
1897 | ||
86b22b0d AA |
1898 | static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) |
1899 | { | |
1900 | struct fe_priv *np = netdev_priv(dev); | |
1901 | u32 tx_flags = 0; | |
445583b8 | 1902 | u32 tx_flags_extra; |
86b22b0d AA |
1903 | unsigned int fragments = skb_shinfo(skb)->nr_frags; |
1904 | unsigned int i; | |
1905 | u32 offset = 0; | |
1906 | u32 bcnt; | |
1907 | u32 size = skb->len-skb->data_len; | |
1908 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | |
1909 | u32 empty_slots; | |
86b22b0d AA |
1910 | struct ring_desc_ex* put_tx; |
1911 | struct ring_desc_ex* start_tx; | |
1912 | struct ring_desc_ex* prev_tx; | |
1913 | struct nv_skb_map* prev_tx_ctx; | |
1914 | ||
1915 | /* add fragments to entries count */ | |
1916 | for (i = 0; i < fragments; i++) { | |
1917 | entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + | |
1918 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | |
1919 | } | |
1920 | ||
1921 | empty_slots = nv_get_empty_tx_slots(np); | |
445583b8 | 1922 | if (unlikely(empty_slots <= entries)) { |
86b22b0d AA |
1923 | spin_lock_irq(&np->lock); |
1924 | netif_stop_queue(dev); | |
aaa37d2d | 1925 | np->tx_stop = 1; |
86b22b0d AA |
1926 | spin_unlock_irq(&np->lock); |
1927 | return NETDEV_TX_BUSY; | |
1928 | } | |
1929 | ||
1930 | start_tx = put_tx = np->put_tx.ex; | |
1931 | ||
1932 | /* setup the header buffer */ | |
1933 | do { | |
1934 | prev_tx = put_tx; | |
1935 | prev_tx_ctx = np->put_tx_ctx; | |
1936 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | |
1937 | np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, | |
1938 | PCI_DMA_TODEVICE); | |
1939 | np->put_tx_ctx->dma_len = bcnt; | |
1940 | put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; | |
1941 | put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; | |
1942 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | |
445583b8 AA |
1943 | |
1944 | tx_flags = NV_TX2_VALID; | |
86b22b0d AA |
1945 | offset += bcnt; |
1946 | size -= bcnt; | |
445583b8 | 1947 | if (unlikely(put_tx++ == np->last_tx.ex)) |
86b22b0d | 1948 | put_tx = np->first_tx.ex; |
445583b8 | 1949 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
86b22b0d AA |
1950 | np->put_tx_ctx = np->first_tx_ctx; |
1951 | } while (size); | |
1952 | ||
1953 | /* setup the fragments */ | |
1954 | for (i = 0; i < fragments; i++) { | |
1955 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1956 | u32 size = frag->size; | |
1957 | offset = 0; | |
1958 | ||
1959 | do { | |
1960 | prev_tx = put_tx; | |
1961 | prev_tx_ctx = np->put_tx_ctx; | |
1962 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | |
1963 | np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, | |
1964 | PCI_DMA_TODEVICE); | |
1965 | np->put_tx_ctx->dma_len = bcnt; | |
86b22b0d AA |
1966 | put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; |
1967 | put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; | |
1968 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | |
445583b8 | 1969 | |
86b22b0d AA |
1970 | offset += bcnt; |
1971 | size -= bcnt; | |
445583b8 | 1972 | if (unlikely(put_tx++ == np->last_tx.ex)) |
86b22b0d | 1973 | put_tx = np->first_tx.ex; |
445583b8 | 1974 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
86b22b0d AA |
1975 | np->put_tx_ctx = np->first_tx_ctx; |
1976 | } while (size); | |
1977 | } | |
1978 | ||
1979 | /* set last fragment flag */ | |
445583b8 | 1980 | prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); |
86b22b0d AA |
1981 | |
1982 | /* save skb in this slot's context area */ | |
1983 | prev_tx_ctx->skb = skb; | |
1984 | ||
1985 | if (skb_is_gso(skb)) | |
1986 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); | |
1987 | else | |
1988 | tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? | |
1989 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; | |
1990 | ||
1991 | /* vlan tag */ | |
445583b8 AA |
1992 | if (likely(!np->vlangrp)) { |
1993 | start_tx->txvlan = 0; | |
1994 | } else { | |
1995 | if (vlan_tx_tag_present(skb)) | |
1996 | start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); | |
1997 | else | |
1998 | start_tx->txvlan = 0; | |
86b22b0d AA |
1999 | } |
2000 | ||
2001 | spin_lock_irq(&np->lock); | |
2002 | ||
2003 | /* set tx flags */ | |
86b22b0d AA |
2004 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
2005 | np->put_tx.ex = put_tx; | |
2006 | ||
2007 | spin_unlock_irq(&np->lock); | |
2008 | ||
2009 | dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", | |
2010 | dev->name, entries, tx_flags_extra); | |
2011 | { | |
2012 | int j; | |
2013 | for (j=0; j<64; j++) { | |
2014 | if ((j%16) == 0) | |
2015 | dprintk("\n%03x:", j); | |
2016 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); | |
2017 | } | |
2018 | dprintk("\n"); | |
2019 | } | |
2020 | ||
2021 | dev->trans_start = jiffies; | |
2022 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
86b22b0d AA |
2023 | return NETDEV_TX_OK; |
2024 | } | |
2025 | ||
1da177e4 LT |
2026 | /* |
2027 | * nv_tx_done: check for completed packets, release the skbs. | |
2028 | * | |
2029 | * Caller must own np->lock. | |
2030 | */ | |
2031 | static void nv_tx_done(struct net_device *dev) | |
2032 | { | |
ac9c1897 | 2033 | struct fe_priv *np = netdev_priv(dev); |
f82a9352 | 2034 | u32 flags; |
aaa37d2d | 2035 | struct ring_desc* orig_get_tx = np->get_tx.orig; |
1da177e4 | 2036 | |
445583b8 AA |
2037 | while ((np->get_tx.orig != np->put_tx.orig) && |
2038 | !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { | |
1da177e4 | 2039 | |
761fcd9e AA |
2040 | dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", |
2041 | dev->name, flags); | |
445583b8 AA |
2042 | |
2043 | pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, | |
2044 | np->get_tx_ctx->dma_len, | |
2045 | PCI_DMA_TODEVICE); | |
2046 | np->get_tx_ctx->dma = 0; | |
2047 | ||
1da177e4 | 2048 | if (np->desc_ver == DESC_VER_1) { |
f82a9352 | 2049 | if (flags & NV_TX_LASTPACKET) { |
445583b8 | 2050 | if (flags & NV_TX_ERROR) { |
f82a9352 | 2051 | if (flags & NV_TX_UNDERFLOW) |
ac9c1897 | 2052 | np->stats.tx_fifo_errors++; |
f82a9352 | 2053 | if (flags & NV_TX_CARRIERLOST) |
ac9c1897 AA |
2054 | np->stats.tx_carrier_errors++; |
2055 | np->stats.tx_errors++; | |
2056 | } else { | |
2057 | np->stats.tx_packets++; | |
445583b8 | 2058 | np->stats.tx_bytes += np->get_tx_ctx->skb->len; |
ac9c1897 | 2059 | } |
445583b8 AA |
2060 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2061 | np->get_tx_ctx->skb = NULL; | |
1da177e4 LT |
2062 | } |
2063 | } else { | |
f82a9352 | 2064 | if (flags & NV_TX2_LASTPACKET) { |
445583b8 | 2065 | if (flags & NV_TX2_ERROR) { |
f82a9352 | 2066 | if (flags & NV_TX2_UNDERFLOW) |
ac9c1897 | 2067 | np->stats.tx_fifo_errors++; |
f82a9352 | 2068 | if (flags & NV_TX2_CARRIERLOST) |
ac9c1897 AA |
2069 | np->stats.tx_carrier_errors++; |
2070 | np->stats.tx_errors++; | |
2071 | } else { | |
2072 | np->stats.tx_packets++; | |
445583b8 | 2073 | np->stats.tx_bytes += np->get_tx_ctx->skb->len; |
f3b197ac | 2074 | } |
445583b8 AA |
2075 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2076 | np->get_tx_ctx->skb = NULL; | |
1da177e4 LT |
2077 | } |
2078 | } | |
445583b8 | 2079 | if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) |
86b22b0d | 2080 | np->get_tx.orig = np->first_tx.orig; |
445583b8 | 2081 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) |
86b22b0d AA |
2082 | np->get_tx_ctx = np->first_tx_ctx; |
2083 | } | |
445583b8 | 2084 | if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { |
aaa37d2d | 2085 | np->tx_stop = 0; |
86b22b0d | 2086 | netif_wake_queue(dev); |
aaa37d2d | 2087 | } |
86b22b0d AA |
2088 | } |
2089 | ||
4e16ed1b | 2090 | static void nv_tx_done_optimized(struct net_device *dev, int limit) |
86b22b0d AA |
2091 | { |
2092 | struct fe_priv *np = netdev_priv(dev); | |
2093 | u32 flags; | |
aaa37d2d | 2094 | struct ring_desc_ex* orig_get_tx = np->get_tx.ex; |
86b22b0d | 2095 | |
445583b8 | 2096 | while ((np->get_tx.ex != np->put_tx.ex) && |
4e16ed1b AA |
2097 | !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) && |
2098 | (limit-- > 0)) { | |
86b22b0d AA |
2099 | |
2100 | dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", | |
2101 | dev->name, flags); | |
445583b8 AA |
2102 | |
2103 | pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, | |
2104 | np->get_tx_ctx->dma_len, | |
2105 | PCI_DMA_TODEVICE); | |
2106 | np->get_tx_ctx->dma = 0; | |
2107 | ||
86b22b0d | 2108 | if (flags & NV_TX2_LASTPACKET) { |
21828163 | 2109 | if (!(flags & NV_TX2_ERROR)) |
86b22b0d | 2110 | np->stats.tx_packets++; |
445583b8 AA |
2111 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2112 | np->get_tx_ctx->skb = NULL; | |
761fcd9e | 2113 | } |
445583b8 | 2114 | if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) |
86b22b0d | 2115 | np->get_tx.ex = np->first_tx.ex; |
445583b8 | 2116 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) |
761fcd9e | 2117 | np->get_tx_ctx = np->first_tx_ctx; |
1da177e4 | 2118 | } |
445583b8 | 2119 | if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { |
aaa37d2d | 2120 | np->tx_stop = 0; |
1da177e4 | 2121 | netif_wake_queue(dev); |
aaa37d2d | 2122 | } |
1da177e4 LT |
2123 | } |
2124 | ||
2125 | /* | |
2126 | * nv_tx_timeout: dev->tx_timeout function | |
932ff279 | 2127 | * Called with netif_tx_lock held. |
1da177e4 LT |
2128 | */ |
2129 | static void nv_tx_timeout(struct net_device *dev) | |
2130 | { | |
ac9c1897 | 2131 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 2132 | u8 __iomem *base = get_hwbase(dev); |
d33a73c8 AA |
2133 | u32 status; |
2134 | ||
2135 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
2136 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
2137 | else | |
2138 | status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
1da177e4 | 2139 | |
d33a73c8 | 2140 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); |
1da177e4 | 2141 | |
c2dba06d MS |
2142 | { |
2143 | int i; | |
2144 | ||
761fcd9e AA |
2145 | printk(KERN_INFO "%s: Ring at %lx\n", |
2146 | dev->name, (unsigned long)np->ring_addr); | |
c2dba06d | 2147 | printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); |
86a0f043 | 2148 | for (i=0;i<=np->register_size;i+= 32) { |
c2dba06d MS |
2149 | printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", |
2150 | i, | |
2151 | readl(base + i + 0), readl(base + i + 4), | |
2152 | readl(base + i + 8), readl(base + i + 12), | |
2153 | readl(base + i + 16), readl(base + i + 20), | |
2154 | readl(base + i + 24), readl(base + i + 28)); | |
2155 | } | |
2156 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); | |
eafa59f6 | 2157 | for (i=0;i<np->tx_ring_size;i+= 4) { |
ee73362c MS |
2158 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
2159 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | |
f3b197ac | 2160 | i, |
f82a9352 SH |
2161 | le32_to_cpu(np->tx_ring.orig[i].buf), |
2162 | le32_to_cpu(np->tx_ring.orig[i].flaglen), | |
2163 | le32_to_cpu(np->tx_ring.orig[i+1].buf), | |
2164 | le32_to_cpu(np->tx_ring.orig[i+1].flaglen), | |
2165 | le32_to_cpu(np->tx_ring.orig[i+2].buf), | |
2166 | le32_to_cpu(np->tx_ring.orig[i+2].flaglen), | |
2167 | le32_to_cpu(np->tx_ring.orig[i+3].buf), | |
2168 | le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); | |
ee73362c MS |
2169 | } else { |
2170 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", | |
f3b197ac | 2171 | i, |
f82a9352 SH |
2172 | le32_to_cpu(np->tx_ring.ex[i].bufhigh), |
2173 | le32_to_cpu(np->tx_ring.ex[i].buflow), | |
2174 | le32_to_cpu(np->tx_ring.ex[i].flaglen), | |
2175 | le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), | |
2176 | le32_to_cpu(np->tx_ring.ex[i+1].buflow), | |
2177 | le32_to_cpu(np->tx_ring.ex[i+1].flaglen), | |
2178 | le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), | |
2179 | le32_to_cpu(np->tx_ring.ex[i+2].buflow), | |
2180 | le32_to_cpu(np->tx_ring.ex[i+2].flaglen), | |
2181 | le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), | |
2182 | le32_to_cpu(np->tx_ring.ex[i+3].buflow), | |
2183 | le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); | |
ee73362c | 2184 | } |
c2dba06d MS |
2185 | } |
2186 | } | |
2187 | ||
1da177e4 LT |
2188 | spin_lock_irq(&np->lock); |
2189 | ||
2190 | /* 1) stop tx engine */ | |
2191 | nv_stop_tx(dev); | |
2192 | ||
2193 | /* 2) check that the packets were not sent already: */ | |
86b22b0d AA |
2194 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
2195 | nv_tx_done(dev); | |
2196 | else | |
4e16ed1b | 2197 | nv_tx_done_optimized(dev, np->tx_ring_size); |
1da177e4 LT |
2198 | |
2199 | /* 3) if there are dead entries: clear everything */ | |
761fcd9e | 2200 | if (np->get_tx_ctx != np->put_tx_ctx) { |
1da177e4 LT |
2201 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); |
2202 | nv_drain_tx(dev); | |
761fcd9e | 2203 | nv_init_tx(dev); |
0832b25a | 2204 | setup_hw_rings(dev, NV_SETUP_TX_RING); |
1da177e4 LT |
2205 | } |
2206 | ||
3ba4d093 AA |
2207 | netif_wake_queue(dev); |
2208 | ||
1da177e4 LT |
2209 | /* 4) restart tx engine */ |
2210 | nv_start_tx(dev); | |
2211 | spin_unlock_irq(&np->lock); | |
2212 | } | |
2213 | ||
22c6d143 MS |
2214 | /* |
2215 | * Called when the nic notices a mismatch between the actual data len on the | |
2216 | * wire and the len indicated in the 802 header | |
2217 | */ | |
2218 | static int nv_getlen(struct net_device *dev, void *packet, int datalen) | |
2219 | { | |
2220 | int hdrlen; /* length of the 802 header */ | |
2221 | int protolen; /* length as stored in the proto field */ | |
2222 | ||
2223 | /* 1) calculate len according to header */ | |
f82a9352 | 2224 | if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { |
22c6d143 MS |
2225 | protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); |
2226 | hdrlen = VLAN_HLEN; | |
2227 | } else { | |
2228 | protolen = ntohs( ((struct ethhdr *)packet)->h_proto); | |
2229 | hdrlen = ETH_HLEN; | |
2230 | } | |
2231 | dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", | |
2232 | dev->name, datalen, protolen, hdrlen); | |
2233 | if (protolen > ETH_DATA_LEN) | |
2234 | return datalen; /* Value in proto field not a len, no checks possible */ | |
2235 | ||
2236 | protolen += hdrlen; | |
2237 | /* consistency checks: */ | |
2238 | if (datalen > ETH_ZLEN) { | |
2239 | if (datalen >= protolen) { | |
2240 | /* more data on wire than in 802 header, trim of | |
2241 | * additional data. | |
2242 | */ | |
2243 | dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", | |
2244 | dev->name, protolen); | |
2245 | return protolen; | |
2246 | } else { | |
2247 | /* less data on wire than mentioned in header. | |
2248 | * Discard the packet. | |
2249 | */ | |
2250 | dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", | |
2251 | dev->name); | |
2252 | return -1; | |
2253 | } | |
2254 | } else { | |
2255 | /* short packet. Accept only if 802 values are also short */ | |
2256 | if (protolen > ETH_ZLEN) { | |
2257 | dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", | |
2258 | dev->name); | |
2259 | return -1; | |
2260 | } | |
2261 | dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", | |
2262 | dev->name, datalen); | |
2263 | return datalen; | |
2264 | } | |
2265 | } | |
2266 | ||
e27cdba5 | 2267 | static int nv_rx_process(struct net_device *dev, int limit) |
1da177e4 | 2268 | { |
ac9c1897 | 2269 | struct fe_priv *np = netdev_priv(dev); |
f82a9352 | 2270 | u32 flags; |
b01867cb AA |
2271 | u32 rx_processed_cnt = 0; |
2272 | struct sk_buff *skb; | |
2273 | int len; | |
1da177e4 | 2274 | |
b01867cb AA |
2275 | while((np->get_rx.orig != np->put_rx.orig) && |
2276 | !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && | |
2277 | (rx_processed_cnt++ < limit)) { | |
1da177e4 | 2278 | |
761fcd9e AA |
2279 | dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", |
2280 | dev->name, flags); | |
1da177e4 | 2281 | |
1da177e4 LT |
2282 | /* |
2283 | * the packet is for us - immediately tear down the pci mapping. | |
2284 | * TODO: check if a prefetch of the first cacheline improves | |
2285 | * the performance. | |
2286 | */ | |
761fcd9e AA |
2287 | pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, |
2288 | np->get_rx_ctx->dma_len, | |
1da177e4 | 2289 | PCI_DMA_FROMDEVICE); |
0d63fb32 AA |
2290 | skb = np->get_rx_ctx->skb; |
2291 | np->get_rx_ctx->skb = NULL; | |
1da177e4 LT |
2292 | |
2293 | { | |
2294 | int j; | |
f82a9352 | 2295 | dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); |
1da177e4 LT |
2296 | for (j=0; j<64; j++) { |
2297 | if ((j%16) == 0) | |
2298 | dprintk("\n%03x:", j); | |
0d63fb32 | 2299 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); |
1da177e4 LT |
2300 | } |
2301 | dprintk("\n"); | |
2302 | } | |
2303 | /* look at what we actually got: */ | |
2304 | if (np->desc_ver == DESC_VER_1) { | |
b01867cb AA |
2305 | if (likely(flags & NV_RX_DESCRIPTORVALID)) { |
2306 | len = flags & LEN_MASK_V1; | |
2307 | if (unlikely(flags & NV_RX_ERROR)) { | |
2308 | if (flags & NV_RX_ERROR4) { | |
2309 | len = nv_getlen(dev, skb->data, len); | |
2310 | if (len < 0) { | |
2311 | np->stats.rx_errors++; | |
2312 | dev_kfree_skb(skb); | |
2313 | goto next_pkt; | |
2314 | } | |
2315 | } | |
2316 | /* framing errors are soft errors */ | |
2317 | else if (flags & NV_RX_FRAMINGERR) { | |
2318 | if (flags & NV_RX_SUBSTRACT1) { | |
2319 | len--; | |
2320 | } | |
2321 | } | |
2322 | /* the rest are hard errors */ | |
2323 | else { | |
2324 | if (flags & NV_RX_MISSEDFRAME) | |
2325 | np->stats.rx_missed_errors++; | |
2326 | if (flags & NV_RX_CRCERR) | |
2327 | np->stats.rx_crc_errors++; | |
2328 | if (flags & NV_RX_OVERFLOW) | |
2329 | np->stats.rx_over_errors++; | |
a971c324 | 2330 | np->stats.rx_errors++; |
0d63fb32 | 2331 | dev_kfree_skb(skb); |
a971c324 AA |
2332 | goto next_pkt; |
2333 | } | |
2334 | } | |
b01867cb | 2335 | } else { |
0d63fb32 | 2336 | dev_kfree_skb(skb); |
1da177e4 | 2337 | goto next_pkt; |
0d63fb32 | 2338 | } |
b01867cb AA |
2339 | } else { |
2340 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { | |
2341 | len = flags & LEN_MASK_V2; | |
2342 | if (unlikely(flags & NV_RX2_ERROR)) { | |
2343 | if (flags & NV_RX2_ERROR4) { | |
2344 | len = nv_getlen(dev, skb->data, len); | |
2345 | if (len < 0) { | |
2346 | np->stats.rx_errors++; | |
2347 | dev_kfree_skb(skb); | |
2348 | goto next_pkt; | |
2349 | } | |
2350 | } | |
2351 | /* framing errors are soft errors */ | |
2352 | else if (flags & NV_RX2_FRAMINGERR) { | |
2353 | if (flags & NV_RX2_SUBSTRACT1) { | |
2354 | len--; | |
2355 | } | |
2356 | } | |
2357 | /* the rest are hard errors */ | |
2358 | else { | |
2359 | if (flags & NV_RX2_CRCERR) | |
2360 | np->stats.rx_crc_errors++; | |
2361 | if (flags & NV_RX2_OVERFLOW) | |
2362 | np->stats.rx_over_errors++; | |
a971c324 | 2363 | np->stats.rx_errors++; |
0d63fb32 | 2364 | dev_kfree_skb(skb); |
a971c324 AA |
2365 | goto next_pkt; |
2366 | } | |
2367 | } | |
b01867cb | 2368 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { |
0d63fb32 | 2369 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
5ed2616f | 2370 | } else { |
b01867cb AA |
2371 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || |
2372 | (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { | |
2373 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
2374 | } | |
5ed2616f | 2375 | } |
b01867cb AA |
2376 | } else { |
2377 | dev_kfree_skb(skb); | |
2378 | goto next_pkt; | |
1da177e4 LT |
2379 | } |
2380 | } | |
2381 | /* got a valid packet - forward it to the network core */ | |
1da177e4 LT |
2382 | skb_put(skb, len); |
2383 | skb->protocol = eth_type_trans(skb, dev); | |
761fcd9e AA |
2384 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", |
2385 | dev->name, len, skb->protocol); | |
e27cdba5 | 2386 | #ifdef CONFIG_FORCEDETH_NAPI |
b01867cb | 2387 | netif_receive_skb(skb); |
e27cdba5 | 2388 | #else |
b01867cb | 2389 | netif_rx(skb); |
e27cdba5 | 2390 | #endif |
1da177e4 LT |
2391 | dev->last_rx = jiffies; |
2392 | np->stats.rx_packets++; | |
2393 | np->stats.rx_bytes += len; | |
2394 | next_pkt: | |
b01867cb | 2395 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) |
86b22b0d | 2396 | np->get_rx.orig = np->first_rx.orig; |
b01867cb | 2397 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) |
86b22b0d AA |
2398 | np->get_rx_ctx = np->first_rx_ctx; |
2399 | } | |
2400 | ||
b01867cb | 2401 | return rx_processed_cnt; |
86b22b0d AA |
2402 | } |
2403 | ||
2404 | static int nv_rx_process_optimized(struct net_device *dev, int limit) | |
2405 | { | |
2406 | struct fe_priv *np = netdev_priv(dev); | |
2407 | u32 flags; | |
2408 | u32 vlanflags = 0; | |
b01867cb AA |
2409 | u32 rx_processed_cnt = 0; |
2410 | struct sk_buff *skb; | |
2411 | int len; | |
86b22b0d | 2412 | |
b01867cb AA |
2413 | while((np->get_rx.ex != np->put_rx.ex) && |
2414 | !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && | |
2415 | (rx_processed_cnt++ < limit)) { | |
86b22b0d AA |
2416 | |
2417 | dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", | |
2418 | dev->name, flags); | |
2419 | ||
86b22b0d AA |
2420 | /* |
2421 | * the packet is for us - immediately tear down the pci mapping. | |
2422 | * TODO: check if a prefetch of the first cacheline improves | |
2423 | * the performance. | |
2424 | */ | |
2425 | pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, | |
2426 | np->get_rx_ctx->dma_len, | |
2427 | PCI_DMA_FROMDEVICE); | |
2428 | skb = np->get_rx_ctx->skb; | |
2429 | np->get_rx_ctx->skb = NULL; | |
2430 | ||
2431 | { | |
2432 | int j; | |
2433 | dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); | |
2434 | for (j=0; j<64; j++) { | |
2435 | if ((j%16) == 0) | |
2436 | dprintk("\n%03x:", j); | |
2437 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); | |
2438 | } | |
2439 | dprintk("\n"); | |
761fcd9e | 2440 | } |
86b22b0d | 2441 | /* look at what we actually got: */ |
b01867cb AA |
2442 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { |
2443 | len = flags & LEN_MASK_V2; | |
2444 | if (unlikely(flags & NV_RX2_ERROR)) { | |
2445 | if (flags & NV_RX2_ERROR4) { | |
2446 | len = nv_getlen(dev, skb->data, len); | |
2447 | if (len < 0) { | |
b01867cb AA |
2448 | dev_kfree_skb(skb); |
2449 | goto next_pkt; | |
2450 | } | |
2451 | } | |
2452 | /* framing errors are soft errors */ | |
2453 | else if (flags & NV_RX2_FRAMINGERR) { | |
2454 | if (flags & NV_RX2_SUBSTRACT1) { | |
2455 | len--; | |
2456 | } | |
2457 | } | |
2458 | /* the rest are hard errors */ | |
2459 | else { | |
86b22b0d AA |
2460 | dev_kfree_skb(skb); |
2461 | goto next_pkt; | |
2462 | } | |
2463 | } | |
b01867cb AA |
2464 | |
2465 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { | |
86b22b0d AA |
2466 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2467 | } else { | |
b01867cb AA |
2468 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || |
2469 | (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { | |
2470 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
2471 | } | |
86b22b0d | 2472 | } |
b01867cb AA |
2473 | |
2474 | /* got a valid packet - forward it to the network core */ | |
2475 | skb_put(skb, len); | |
2476 | skb->protocol = eth_type_trans(skb, dev); | |
2477 | prefetch(skb->data); | |
2478 | ||
2479 | dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", | |
2480 | dev->name, len, skb->protocol); | |
2481 | ||
2482 | if (likely(!np->vlangrp)) { | |
86b22b0d | 2483 | #ifdef CONFIG_FORCEDETH_NAPI |
b01867cb | 2484 | netif_receive_skb(skb); |
86b22b0d | 2485 | #else |
b01867cb | 2486 | netif_rx(skb); |
86b22b0d | 2487 | #endif |
b01867cb AA |
2488 | } else { |
2489 | vlanflags = le32_to_cpu(np->get_rx.ex->buflow); | |
2490 | if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { | |
2491 | #ifdef CONFIG_FORCEDETH_NAPI | |
2492 | vlan_hwaccel_receive_skb(skb, np->vlangrp, | |
2493 | vlanflags & NV_RX3_VLAN_TAG_MASK); | |
2494 | #else | |
2495 | vlan_hwaccel_rx(skb, np->vlangrp, | |
2496 | vlanflags & NV_RX3_VLAN_TAG_MASK); | |
2497 | #endif | |
2498 | } else { | |
2499 | #ifdef CONFIG_FORCEDETH_NAPI | |
2500 | netif_receive_skb(skb); | |
2501 | #else | |
2502 | netif_rx(skb); | |
2503 | #endif | |
2504 | } | |
2505 | } | |
2506 | ||
2507 | dev->last_rx = jiffies; | |
2508 | np->stats.rx_packets++; | |
2509 | np->stats.rx_bytes += len; | |
2510 | } else { | |
2511 | dev_kfree_skb(skb); | |
2512 | } | |
86b22b0d | 2513 | next_pkt: |
b01867cb | 2514 | if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) |
86b22b0d | 2515 | np->get_rx.ex = np->first_rx.ex; |
b01867cb | 2516 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) |
761fcd9e | 2517 | np->get_rx_ctx = np->first_rx_ctx; |
1da177e4 | 2518 | } |
e27cdba5 | 2519 | |
b01867cb | 2520 | return rx_processed_cnt; |
1da177e4 LT |
2521 | } |
2522 | ||
d81c0983 MS |
2523 | static void set_bufsize(struct net_device *dev) |
2524 | { | |
2525 | struct fe_priv *np = netdev_priv(dev); | |
2526 | ||
2527 | if (dev->mtu <= ETH_DATA_LEN) | |
2528 | np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; | |
2529 | else | |
2530 | np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; | |
2531 | } | |
2532 | ||
1da177e4 LT |
2533 | /* |
2534 | * nv_change_mtu: dev->change_mtu function | |
2535 | * Called with dev_base_lock held for read. | |
2536 | */ | |
2537 | static int nv_change_mtu(struct net_device *dev, int new_mtu) | |
2538 | { | |
ac9c1897 | 2539 | struct fe_priv *np = netdev_priv(dev); |
d81c0983 MS |
2540 | int old_mtu; |
2541 | ||
2542 | if (new_mtu < 64 || new_mtu > np->pkt_limit) | |
1da177e4 | 2543 | return -EINVAL; |
d81c0983 MS |
2544 | |
2545 | old_mtu = dev->mtu; | |
1da177e4 | 2546 | dev->mtu = new_mtu; |
d81c0983 MS |
2547 | |
2548 | /* return early if the buffer sizes will not change */ | |
2549 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) | |
2550 | return 0; | |
2551 | if (old_mtu == new_mtu) | |
2552 | return 0; | |
2553 | ||
2554 | /* synchronized against open : rtnl_lock() held by caller */ | |
2555 | if (netif_running(dev)) { | |
25097d4b | 2556 | u8 __iomem *base = get_hwbase(dev); |
d81c0983 MS |
2557 | /* |
2558 | * It seems that the nic preloads valid ring entries into an | |
2559 | * internal buffer. The procedure for flushing everything is | |
2560 | * guessed, there is probably a simpler approach. | |
2561 | * Changing the MTU is a rare event, it shouldn't matter. | |
2562 | */ | |
84b3932b | 2563 | nv_disable_irq(dev); |
932ff279 | 2564 | netif_tx_lock_bh(dev); |
d81c0983 MS |
2565 | spin_lock(&np->lock); |
2566 | /* stop engines */ | |
2567 | nv_stop_rx(dev); | |
2568 | nv_stop_tx(dev); | |
2569 | nv_txrx_reset(dev); | |
2570 | /* drain rx queue */ | |
2571 | nv_drain_rx(dev); | |
2572 | nv_drain_tx(dev); | |
2573 | /* reinit driver view of the rx queue */ | |
d81c0983 | 2574 | set_bufsize(dev); |
eafa59f6 | 2575 | if (nv_init_ring(dev)) { |
d81c0983 MS |
2576 | if (!np->in_shutdown) |
2577 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
2578 | } | |
2579 | /* reinit nic view of the rx queue */ | |
2580 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
0832b25a | 2581 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
eafa59f6 | 2582 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
d81c0983 MS |
2583 | base + NvRegRingSizes); |
2584 | pci_push(base); | |
8a4ae7f2 | 2585 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
d81c0983 MS |
2586 | pci_push(base); |
2587 | ||
2588 | /* restart rx engine */ | |
2589 | nv_start_rx(dev); | |
2590 | nv_start_tx(dev); | |
2591 | spin_unlock(&np->lock); | |
932ff279 | 2592 | netif_tx_unlock_bh(dev); |
84b3932b | 2593 | nv_enable_irq(dev); |
d81c0983 | 2594 | } |
1da177e4 LT |
2595 | return 0; |
2596 | } | |
2597 | ||
72b31782 MS |
2598 | static void nv_copy_mac_to_hw(struct net_device *dev) |
2599 | { | |
25097d4b | 2600 | u8 __iomem *base = get_hwbase(dev); |
72b31782 MS |
2601 | u32 mac[2]; |
2602 | ||
2603 | mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + | |
2604 | (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); | |
2605 | mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); | |
2606 | ||
2607 | writel(mac[0], base + NvRegMacAddrA); | |
2608 | writel(mac[1], base + NvRegMacAddrB); | |
2609 | } | |
2610 | ||
2611 | /* | |
2612 | * nv_set_mac_address: dev->set_mac_address function | |
2613 | * Called with rtnl_lock() held. | |
2614 | */ | |
2615 | static int nv_set_mac_address(struct net_device *dev, void *addr) | |
2616 | { | |
ac9c1897 | 2617 | struct fe_priv *np = netdev_priv(dev); |
72b31782 MS |
2618 | struct sockaddr *macaddr = (struct sockaddr*)addr; |
2619 | ||
f82a9352 | 2620 | if (!is_valid_ether_addr(macaddr->sa_data)) |
72b31782 MS |
2621 | return -EADDRNOTAVAIL; |
2622 | ||
2623 | /* synchronized against open : rtnl_lock() held by caller */ | |
2624 | memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); | |
2625 | ||
2626 | if (netif_running(dev)) { | |
932ff279 | 2627 | netif_tx_lock_bh(dev); |
72b31782 MS |
2628 | spin_lock_irq(&np->lock); |
2629 | ||
2630 | /* stop rx engine */ | |
2631 | nv_stop_rx(dev); | |
2632 | ||
2633 | /* set mac address */ | |
2634 | nv_copy_mac_to_hw(dev); | |
2635 | ||
2636 | /* restart rx engine */ | |
2637 | nv_start_rx(dev); | |
2638 | spin_unlock_irq(&np->lock); | |
932ff279 | 2639 | netif_tx_unlock_bh(dev); |
72b31782 MS |
2640 | } else { |
2641 | nv_copy_mac_to_hw(dev); | |
2642 | } | |
2643 | return 0; | |
2644 | } | |
2645 | ||
1da177e4 LT |
2646 | /* |
2647 | * nv_set_multicast: dev->set_multicast function | |
932ff279 | 2648 | * Called with netif_tx_lock held. |
1da177e4 LT |
2649 | */ |
2650 | static void nv_set_multicast(struct net_device *dev) | |
2651 | { | |
ac9c1897 | 2652 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2653 | u8 __iomem *base = get_hwbase(dev); |
2654 | u32 addr[2]; | |
2655 | u32 mask[2]; | |
b6d0773f | 2656 | u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; |
1da177e4 LT |
2657 | |
2658 | memset(addr, 0, sizeof(addr)); | |
2659 | memset(mask, 0, sizeof(mask)); | |
2660 | ||
2661 | if (dev->flags & IFF_PROMISC) { | |
b6d0773f | 2662 | pff |= NVREG_PFF_PROMISC; |
1da177e4 | 2663 | } else { |
b6d0773f | 2664 | pff |= NVREG_PFF_MYADDR; |
1da177e4 LT |
2665 | |
2666 | if (dev->flags & IFF_ALLMULTI || dev->mc_list) { | |
2667 | u32 alwaysOff[2]; | |
2668 | u32 alwaysOn[2]; | |
2669 | ||
2670 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; | |
2671 | if (dev->flags & IFF_ALLMULTI) { | |
2672 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; | |
2673 | } else { | |
2674 | struct dev_mc_list *walk; | |
2675 | ||
2676 | walk = dev->mc_list; | |
2677 | while (walk != NULL) { | |
2678 | u32 a, b; | |
2679 | a = le32_to_cpu(*(u32 *) walk->dmi_addr); | |
2680 | b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); | |
2681 | alwaysOn[0] &= a; | |
2682 | alwaysOff[0] &= ~a; | |
2683 | alwaysOn[1] &= b; | |
2684 | alwaysOff[1] &= ~b; | |
2685 | walk = walk->next; | |
2686 | } | |
2687 | } | |
2688 | addr[0] = alwaysOn[0]; | |
2689 | addr[1] = alwaysOn[1]; | |
2690 | mask[0] = alwaysOn[0] | alwaysOff[0]; | |
2691 | mask[1] = alwaysOn[1] | alwaysOff[1]; | |
2692 | } | |
2693 | } | |
2694 | addr[0] |= NVREG_MCASTADDRA_FORCE; | |
2695 | pff |= NVREG_PFF_ALWAYS; | |
2696 | spin_lock_irq(&np->lock); | |
2697 | nv_stop_rx(dev); | |
2698 | writel(addr[0], base + NvRegMulticastAddrA); | |
2699 | writel(addr[1], base + NvRegMulticastAddrB); | |
2700 | writel(mask[0], base + NvRegMulticastMaskA); | |
2701 | writel(mask[1], base + NvRegMulticastMaskB); | |
2702 | writel(pff, base + NvRegPacketFilterFlags); | |
2703 | dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", | |
2704 | dev->name); | |
2705 | nv_start_rx(dev); | |
2706 | spin_unlock_irq(&np->lock); | |
2707 | } | |
2708 | ||
c7985051 | 2709 | static void nv_update_pause(struct net_device *dev, u32 pause_flags) |
b6d0773f AA |
2710 | { |
2711 | struct fe_priv *np = netdev_priv(dev); | |
2712 | u8 __iomem *base = get_hwbase(dev); | |
2713 | ||
2714 | np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); | |
2715 | ||
2716 | if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { | |
2717 | u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; | |
2718 | if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { | |
2719 | writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); | |
2720 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2721 | } else { | |
2722 | writel(pff, base + NvRegPacketFilterFlags); | |
2723 | } | |
2724 | } | |
2725 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { | |
2726 | u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; | |
2727 | if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { | |
2728 | writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); | |
2729 | writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); | |
2730 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2731 | } else { | |
2732 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | |
2733 | writel(regmisc, base + NvRegMisc1); | |
2734 | } | |
2735 | } | |
2736 | } | |
2737 | ||
4ea7f299 AA |
2738 | /** |
2739 | * nv_update_linkspeed: Setup the MAC according to the link partner | |
2740 | * @dev: Network device to be configured | |
2741 | * | |
2742 | * The function queries the PHY and checks if there is a link partner. | |
2743 | * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is | |
2744 | * set to 10 MBit HD. | |
2745 | * | |
2746 | * The function returns 0 if there is no link partner and 1 if there is | |
2747 | * a good link partner. | |
2748 | */ | |
1da177e4 LT |
2749 | static int nv_update_linkspeed(struct net_device *dev) |
2750 | { | |
ac9c1897 | 2751 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 2752 | u8 __iomem *base = get_hwbase(dev); |
eb91f61b AA |
2753 | int adv = 0; |
2754 | int lpa = 0; | |
2755 | int adv_lpa, adv_pause, lpa_pause; | |
1da177e4 LT |
2756 | int newls = np->linkspeed; |
2757 | int newdup = np->duplex; | |
2758 | int mii_status; | |
2759 | int retval = 0; | |
9744e218 | 2760 | u32 control_1000, status_1000, phyreg, pause_flags, txreg; |
1da177e4 LT |
2761 | |
2762 | /* BMSR_LSTATUS is latched, read it twice: | |
2763 | * we want the current value. | |
2764 | */ | |
2765 | mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
2766 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
2767 | ||
2768 | if (!(mii_status & BMSR_LSTATUS)) { | |
2769 | dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", | |
2770 | dev->name); | |
2771 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2772 | newdup = 0; | |
2773 | retval = 0; | |
2774 | goto set_speed; | |
2775 | } | |
2776 | ||
2777 | if (np->autoneg == 0) { | |
2778 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", | |
2779 | dev->name, np->fixed_mode); | |
2780 | if (np->fixed_mode & LPA_100FULL) { | |
2781 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | |
2782 | newdup = 1; | |
2783 | } else if (np->fixed_mode & LPA_100HALF) { | |
2784 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | |
2785 | newdup = 0; | |
2786 | } else if (np->fixed_mode & LPA_10FULL) { | |
2787 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2788 | newdup = 1; | |
2789 | } else { | |
2790 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2791 | newdup = 0; | |
2792 | } | |
2793 | retval = 1; | |
2794 | goto set_speed; | |
2795 | } | |
2796 | /* check auto negotiation is complete */ | |
2797 | if (!(mii_status & BMSR_ANEGCOMPLETE)) { | |
2798 | /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ | |
2799 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2800 | newdup = 0; | |
2801 | retval = 0; | |
2802 | dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); | |
2803 | goto set_speed; | |
2804 | } | |
2805 | ||
b6d0773f AA |
2806 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
2807 | lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); | |
2808 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", | |
2809 | dev->name, adv, lpa); | |
2810 | ||
1da177e4 LT |
2811 | retval = 1; |
2812 | if (np->gigabit == PHY_GIGABIT) { | |
eb91f61b AA |
2813 | control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
2814 | status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); | |
1da177e4 LT |
2815 | |
2816 | if ((control_1000 & ADVERTISE_1000FULL) && | |
2817 | (status_1000 & LPA_1000FULL)) { | |
2818 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", | |
2819 | dev->name); | |
2820 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; | |
2821 | newdup = 1; | |
2822 | goto set_speed; | |
2823 | } | |
2824 | } | |
2825 | ||
1da177e4 | 2826 | /* FIXME: handle parallel detection properly */ |
eb91f61b AA |
2827 | adv_lpa = lpa & adv; |
2828 | if (adv_lpa & LPA_100FULL) { | |
1da177e4 LT |
2829 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
2830 | newdup = 1; | |
eb91f61b | 2831 | } else if (adv_lpa & LPA_100HALF) { |
1da177e4 LT |
2832 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
2833 | newdup = 0; | |
eb91f61b | 2834 | } else if (adv_lpa & LPA_10FULL) { |
1da177e4 LT |
2835 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
2836 | newdup = 1; | |
eb91f61b | 2837 | } else if (adv_lpa & LPA_10HALF) { |
1da177e4 LT |
2838 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
2839 | newdup = 0; | |
2840 | } else { | |
eb91f61b | 2841 | dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); |
1da177e4 LT |
2842 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
2843 | newdup = 0; | |
2844 | } | |
2845 | ||
2846 | set_speed: | |
2847 | if (np->duplex == newdup && np->linkspeed == newls) | |
2848 | return retval; | |
2849 | ||
2850 | dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", | |
2851 | dev->name, np->linkspeed, np->duplex, newls, newdup); | |
2852 | ||
2853 | np->duplex = newdup; | |
2854 | np->linkspeed = newls; | |
2855 | ||
2856 | if (np->gigabit == PHY_GIGABIT) { | |
2857 | phyreg = readl(base + NvRegRandomSeed); | |
2858 | phyreg &= ~(0x3FF00); | |
2859 | if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) | |
2860 | phyreg |= NVREG_RNDSEED_FORCE3; | |
2861 | else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) | |
2862 | phyreg |= NVREG_RNDSEED_FORCE2; | |
2863 | else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) | |
2864 | phyreg |= NVREG_RNDSEED_FORCE; | |
2865 | writel(phyreg, base + NvRegRandomSeed); | |
2866 | } | |
2867 | ||
2868 | phyreg = readl(base + NvRegPhyInterface); | |
2869 | phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); | |
2870 | if (np->duplex == 0) | |
2871 | phyreg |= PHY_HALF; | |
2872 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) | |
2873 | phyreg |= PHY_100; | |
2874 | else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | |
2875 | phyreg |= PHY_1000; | |
2876 | writel(phyreg, base + NvRegPhyInterface); | |
2877 | ||
9744e218 AA |
2878 | if (phyreg & PHY_RGMII) { |
2879 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | |
2880 | txreg = NVREG_TX_DEFERRAL_RGMII_1000; | |
2881 | else | |
2882 | txreg = NVREG_TX_DEFERRAL_RGMII_10_100; | |
2883 | } else { | |
2884 | txreg = NVREG_TX_DEFERRAL_DEFAULT; | |
2885 | } | |
2886 | writel(txreg, base + NvRegTxDeferral); | |
2887 | ||
95d161cb AA |
2888 | if (np->desc_ver == DESC_VER_1) { |
2889 | txreg = NVREG_TX_WM_DESC1_DEFAULT; | |
2890 | } else { | |
2891 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | |
2892 | txreg = NVREG_TX_WM_DESC2_3_1000; | |
2893 | else | |
2894 | txreg = NVREG_TX_WM_DESC2_3_DEFAULT; | |
2895 | } | |
2896 | writel(txreg, base + NvRegTxWatermark); | |
2897 | ||
1da177e4 LT |
2898 | writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), |
2899 | base + NvRegMisc1); | |
2900 | pci_push(base); | |
2901 | writel(np->linkspeed, base + NvRegLinkSpeed); | |
2902 | pci_push(base); | |
2903 | ||
b6d0773f AA |
2904 | pause_flags = 0; |
2905 | /* setup pause frame */ | |
eb91f61b | 2906 | if (np->duplex != 0) { |
b6d0773f AA |
2907 | if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { |
2908 | adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); | |
2909 | lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); | |
2910 | ||
2911 | switch (adv_pause) { | |
f82a9352 | 2912 | case ADVERTISE_PAUSE_CAP: |
b6d0773f AA |
2913 | if (lpa_pause & LPA_PAUSE_CAP) { |
2914 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2915 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
2916 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2917 | } | |
2918 | break; | |
f82a9352 | 2919 | case ADVERTISE_PAUSE_ASYM: |
b6d0773f AA |
2920 | if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) |
2921 | { | |
2922 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2923 | } | |
2924 | break; | |
f82a9352 | 2925 | case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: |
b6d0773f AA |
2926 | if (lpa_pause & LPA_PAUSE_CAP) |
2927 | { | |
2928 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2929 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
2930 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2931 | } | |
2932 | if (lpa_pause == LPA_PAUSE_ASYM) | |
2933 | { | |
2934 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2935 | } | |
2936 | break; | |
f3b197ac | 2937 | } |
eb91f61b | 2938 | } else { |
b6d0773f | 2939 | pause_flags = np->pause_flags; |
eb91f61b AA |
2940 | } |
2941 | } | |
b6d0773f | 2942 | nv_update_pause(dev, pause_flags); |
eb91f61b | 2943 | |
1da177e4 LT |
2944 | return retval; |
2945 | } | |
2946 | ||
2947 | static void nv_linkchange(struct net_device *dev) | |
2948 | { | |
2949 | if (nv_update_linkspeed(dev)) { | |
4ea7f299 | 2950 | if (!netif_carrier_ok(dev)) { |
1da177e4 LT |
2951 | netif_carrier_on(dev); |
2952 | printk(KERN_INFO "%s: link up.\n", dev->name); | |
4ea7f299 | 2953 | nv_start_rx(dev); |
1da177e4 | 2954 | } |
1da177e4 LT |
2955 | } else { |
2956 | if (netif_carrier_ok(dev)) { | |
2957 | netif_carrier_off(dev); | |
2958 | printk(KERN_INFO "%s: link down.\n", dev->name); | |
2959 | nv_stop_rx(dev); | |
2960 | } | |
2961 | } | |
2962 | } | |
2963 | ||
2964 | static void nv_link_irq(struct net_device *dev) | |
2965 | { | |
2966 | u8 __iomem *base = get_hwbase(dev); | |
2967 | u32 miistat; | |
2968 | ||
2969 | miistat = readl(base + NvRegMIIStatus); | |
2970 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
2971 | dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); | |
2972 | ||
2973 | if (miistat & (NVREG_MIISTAT_LINKCHANGE)) | |
2974 | nv_linkchange(dev); | |
2975 | dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); | |
2976 | } | |
2977 | ||
7d12e780 | 2978 | static irqreturn_t nv_nic_irq(int foo, void *data) |
1da177e4 LT |
2979 | { |
2980 | struct net_device *dev = (struct net_device *) data; | |
ac9c1897 | 2981 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2982 | u8 __iomem *base = get_hwbase(dev); |
2983 | u32 events; | |
2984 | int i; | |
2985 | ||
2986 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); | |
2987 | ||
2988 | for (i=0; ; i++) { | |
d33a73c8 AA |
2989 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
2990 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
2991 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
2992 | } else { | |
2993 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
2994 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | |
2995 | } | |
1da177e4 LT |
2996 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
2997 | if (!(events & np->irqmask)) | |
2998 | break; | |
2999 | ||
a971c324 AA |
3000 | spin_lock(&np->lock); |
3001 | nv_tx_done(dev); | |
3002 | spin_unlock(&np->lock); | |
f3b197ac | 3003 | |
f0734ab6 AA |
3004 | #ifdef CONFIG_FORCEDETH_NAPI |
3005 | if (events & NVREG_IRQ_RX_ALL) { | |
bea3348e | 3006 | netif_rx_schedule(dev, &np->napi); |
f0734ab6 AA |
3007 | |
3008 | /* Disable furthur receive irq's */ | |
3009 | spin_lock(&np->lock); | |
3010 | np->irqmask &= ~NVREG_IRQ_RX_ALL; | |
3011 | ||
3012 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
3013 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
3014 | else | |
3015 | writel(np->irqmask, base + NvRegIrqMask); | |
3016 | spin_unlock(&np->lock); | |
3017 | } | |
3018 | #else | |
bea3348e | 3019 | if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { |
f0734ab6 AA |
3020 | if (unlikely(nv_alloc_rx(dev))) { |
3021 | spin_lock(&np->lock); | |
3022 | if (!np->in_shutdown) | |
3023 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
3024 | spin_unlock(&np->lock); | |
3025 | } | |
3026 | } | |
3027 | #endif | |
3028 | if (unlikely(events & NVREG_IRQ_LINK)) { | |
1da177e4 LT |
3029 | spin_lock(&np->lock); |
3030 | nv_link_irq(dev); | |
3031 | spin_unlock(&np->lock); | |
3032 | } | |
f0734ab6 | 3033 | if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { |
1da177e4 LT |
3034 | spin_lock(&np->lock); |
3035 | nv_linkchange(dev); | |
3036 | spin_unlock(&np->lock); | |
3037 | np->link_timeout = jiffies + LINK_TIMEOUT; | |
3038 | } | |
f0734ab6 | 3039 | if (unlikely(events & (NVREG_IRQ_TX_ERR))) { |
1da177e4 LT |
3040 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
3041 | dev->name, events); | |
3042 | } | |
f0734ab6 | 3043 | if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { |
1da177e4 LT |
3044 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", |
3045 | dev->name, events); | |
3046 | } | |
c5cf9101 AA |
3047 | if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { |
3048 | spin_lock(&np->lock); | |
3049 | /* disable interrupts on the nic */ | |
3050 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | |
3051 | writel(0, base + NvRegIrqMask); | |
3052 | else | |
3053 | writel(np->irqmask, base + NvRegIrqMask); | |
3054 | pci_push(base); | |
3055 | ||
3056 | if (!np->in_shutdown) { | |
3057 | np->nic_poll_irq = np->irqmask; | |
3058 | np->recover_error = 1; | |
3059 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
3060 | } | |
3061 | spin_unlock(&np->lock); | |
3062 | break; | |
3063 | } | |
f0734ab6 | 3064 | if (unlikely(i > max_interrupt_work)) { |
1da177e4 LT |
3065 | spin_lock(&np->lock); |
3066 | /* disable interrupts on the nic */ | |
d33a73c8 AA |
3067 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
3068 | writel(0, base + NvRegIrqMask); | |
3069 | else | |
3070 | writel(np->irqmask, base + NvRegIrqMask); | |
1da177e4 LT |
3071 | pci_push(base); |
3072 | ||
d33a73c8 AA |
3073 | if (!np->in_shutdown) { |
3074 | np->nic_poll_irq = np->irqmask; | |
1da177e4 | 3075 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
d33a73c8 | 3076 | } |
1da177e4 | 3077 | spin_unlock(&np->lock); |
1a2b7330 | 3078 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); |
1da177e4 LT |
3079 | break; |
3080 | } | |
3081 | ||
3082 | } | |
3083 | dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); | |
3084 | ||
3085 | return IRQ_RETVAL(i); | |
3086 | } | |
3087 | ||
f0734ab6 AA |
3088 | /** |
3089 | * All _optimized functions are used to help increase performance | |
3090 | * (reduce CPU and increase throughput). They use descripter version 3, | |
3091 | * compiler directives, and reduce memory accesses. | |
3092 | */ | |
86b22b0d AA |
3093 | static irqreturn_t nv_nic_irq_optimized(int foo, void *data) |
3094 | { | |
3095 | struct net_device *dev = (struct net_device *) data; | |
3096 | struct fe_priv *np = netdev_priv(dev); | |
3097 | u8 __iomem *base = get_hwbase(dev); | |
3098 | u32 events; | |
3099 | int i; | |
3100 | ||
3101 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); | |
3102 | ||
3103 | for (i=0; ; i++) { | |
3104 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | |
3105 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
3106 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
3107 | } else { | |
3108 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
3109 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | |
3110 | } | |
86b22b0d AA |
3111 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
3112 | if (!(events & np->irqmask)) | |
3113 | break; | |
3114 | ||
3115 | spin_lock(&np->lock); | |
4e16ed1b | 3116 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); |
86b22b0d AA |
3117 | spin_unlock(&np->lock); |
3118 | ||
f0734ab6 AA |
3119 | #ifdef CONFIG_FORCEDETH_NAPI |
3120 | if (events & NVREG_IRQ_RX_ALL) { | |
bea3348e | 3121 | netif_rx_schedule(dev, &np->napi); |
f0734ab6 AA |
3122 | |
3123 | /* Disable furthur receive irq's */ | |
3124 | spin_lock(&np->lock); | |
3125 | np->irqmask &= ~NVREG_IRQ_RX_ALL; | |
3126 | ||
3127 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
3128 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
3129 | else | |
3130 | writel(np->irqmask, base + NvRegIrqMask); | |
3131 | spin_unlock(&np->lock); | |
3132 | } | |
3133 | #else | |
bea3348e | 3134 | if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { |
f0734ab6 AA |
3135 | if (unlikely(nv_alloc_rx_optimized(dev))) { |
3136 | spin_lock(&np->lock); | |
3137 | if (!np->in_shutdown) | |
3138 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
3139 | spin_unlock(&np->lock); | |
3140 | } | |
3141 | } | |
3142 | #endif | |
3143 | if (unlikely(events & NVREG_IRQ_LINK)) { | |
86b22b0d AA |
3144 | spin_lock(&np->lock); |
3145 | nv_link_irq(dev); | |
3146 | spin_unlock(&np->lock); | |
3147 | } | |
f0734ab6 | 3148 | if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { |
86b22b0d AA |
3149 | spin_lock(&np->lock); |
3150 | nv_linkchange(dev); | |
3151 | spin_unlock(&np->lock); | |
3152 | np->link_timeout = jiffies + LINK_TIMEOUT; | |
3153 | } | |
f0734ab6 | 3154 | if (unlikely(events & (NVREG_IRQ_TX_ERR))) { |
86b22b0d AA |
3155 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
3156 | dev->name, events); | |
3157 | } | |
f0734ab6 | 3158 | if (unlikely(events & (NVREG_IRQ_UNKNOWN))) { |
86b22b0d AA |
3159 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", |
3160 | dev->name, events); | |
3161 | } | |
3162 | if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { | |
3163 | spin_lock(&np->lock); | |
3164 | /* disable interrupts on the nic */ | |
3165 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | |
3166 | writel(0, base + NvRegIrqMask); | |
3167 | else | |
3168 | writel(np->irqmask, base + NvRegIrqMask); | |
3169 | pci_push(base); | |
3170 | ||
3171 | if (!np->in_shutdown) { | |
3172 | np->nic_poll_irq = np->irqmask; | |
3173 | np->recover_error = 1; | |
3174 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
3175 | } | |
3176 | spin_unlock(&np->lock); | |
3177 | break; | |
3178 | } | |
3179 | ||
f0734ab6 | 3180 | if (unlikely(i > max_interrupt_work)) { |
86b22b0d AA |
3181 | spin_lock(&np->lock); |
3182 | /* disable interrupts on the nic */ | |
3183 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | |
3184 | writel(0, base + NvRegIrqMask); | |
3185 | else | |
3186 | writel(np->irqmask, base + NvRegIrqMask); | |
3187 | pci_push(base); | |
3188 | ||
3189 | if (!np->in_shutdown) { | |
3190 | np->nic_poll_irq = np->irqmask; | |
3191 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
3192 | } | |
86b22b0d | 3193 | spin_unlock(&np->lock); |
1a2b7330 | 3194 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); |
86b22b0d AA |
3195 | break; |
3196 | } | |
3197 | ||
3198 | } | |
3199 | dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); | |
3200 | ||
3201 | return IRQ_RETVAL(i); | |
3202 | } | |
3203 | ||
7d12e780 | 3204 | static irqreturn_t nv_nic_irq_tx(int foo, void *data) |
d33a73c8 AA |
3205 | { |
3206 | struct net_device *dev = (struct net_device *) data; | |
3207 | struct fe_priv *np = netdev_priv(dev); | |
3208 | u8 __iomem *base = get_hwbase(dev); | |
3209 | u32 events; | |
3210 | int i; | |
0a07bc64 | 3211 | unsigned long flags; |
d33a73c8 AA |
3212 | |
3213 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); | |
3214 | ||
3215 | for (i=0; ; i++) { | |
3216 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | |
3217 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | |
d33a73c8 AA |
3218 | dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); |
3219 | if (!(events & np->irqmask)) | |
3220 | break; | |
3221 | ||
0a07bc64 | 3222 | spin_lock_irqsave(&np->lock, flags); |
4e16ed1b | 3223 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); |
0a07bc64 | 3224 | spin_unlock_irqrestore(&np->lock, flags); |
f3b197ac | 3225 | |
f0734ab6 | 3226 | if (unlikely(events & (NVREG_IRQ_TX_ERR))) { |
d33a73c8 AA |
3227 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
3228 | dev->name, events); | |
3229 | } | |
f0734ab6 | 3230 | if (unlikely(i > max_interrupt_work)) { |
0a07bc64 | 3231 | spin_lock_irqsave(&np->lock, flags); |
d33a73c8 AA |
3232 | /* disable interrupts on the nic */ |
3233 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | |
3234 | pci_push(base); | |
3235 | ||
3236 | if (!np->in_shutdown) { | |
3237 | np->nic_poll_irq |= NVREG_IRQ_TX_ALL; | |
3238 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
3239 | } | |
0a07bc64 | 3240 | spin_unlock_irqrestore(&np->lock, flags); |
1a2b7330 | 3241 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); |
d33a73c8 AA |
3242 | break; |
3243 | } | |
3244 | ||
3245 | } | |
3246 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); | |
3247 | ||
3248 | return IRQ_RETVAL(i); | |
3249 | } | |
3250 | ||
e27cdba5 | 3251 | #ifdef CONFIG_FORCEDETH_NAPI |
bea3348e | 3252 | static int nv_napi_poll(struct napi_struct *napi, int budget) |
e27cdba5 | 3253 | { |
bea3348e SH |
3254 | struct fe_priv *np = container_of(napi, struct fe_priv, napi); |
3255 | struct net_device *dev = np->dev; | |
e27cdba5 | 3256 | u8 __iomem *base = get_hwbase(dev); |
d15e9c4d | 3257 | unsigned long flags; |
bea3348e | 3258 | int pkts, retcode; |
e27cdba5 | 3259 | |
e0379a14 | 3260 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
bea3348e | 3261 | pkts = nv_rx_process(dev, budget); |
e0379a14 AA |
3262 | retcode = nv_alloc_rx(dev); |
3263 | } else { | |
bea3348e | 3264 | pkts = nv_rx_process_optimized(dev, budget); |
e0379a14 AA |
3265 | retcode = nv_alloc_rx_optimized(dev); |
3266 | } | |
e27cdba5 | 3267 | |
e0379a14 | 3268 | if (retcode) { |
d15e9c4d | 3269 | spin_lock_irqsave(&np->lock, flags); |
e27cdba5 SH |
3270 | if (!np->in_shutdown) |
3271 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
d15e9c4d | 3272 | spin_unlock_irqrestore(&np->lock, flags); |
e27cdba5 SH |
3273 | } |
3274 | ||
bea3348e | 3275 | if (pkts < budget) { |
e27cdba5 | 3276 | /* re-enable receive interrupts */ |
d15e9c4d FR |
3277 | spin_lock_irqsave(&np->lock, flags); |
3278 | ||
bea3348e SH |
3279 | __netif_rx_complete(dev, napi); |
3280 | ||
e27cdba5 SH |
3281 | np->irqmask |= NVREG_IRQ_RX_ALL; |
3282 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
3283 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
3284 | else | |
3285 | writel(np->irqmask, base + NvRegIrqMask); | |
d15e9c4d FR |
3286 | |
3287 | spin_unlock_irqrestore(&np->lock, flags); | |
e27cdba5 | 3288 | } |
bea3348e | 3289 | return pkts; |
e27cdba5 SH |
3290 | } |
3291 | #endif | |
3292 | ||
3293 | #ifdef CONFIG_FORCEDETH_NAPI | |
7d12e780 | 3294 | static irqreturn_t nv_nic_irq_rx(int foo, void *data) |
e27cdba5 SH |
3295 | { |
3296 | struct net_device *dev = (struct net_device *) data; | |
bea3348e | 3297 | struct fe_priv *np = netdev_priv(dev); |
e27cdba5 SH |
3298 | u8 __iomem *base = get_hwbase(dev); |
3299 | u32 events; | |
3300 | ||
3301 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | |
3302 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | |
3303 | ||
3304 | if (events) { | |
bea3348e | 3305 | netif_rx_schedule(dev, &np->napi); |
e27cdba5 SH |
3306 | /* disable receive interrupts on the nic */ |
3307 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
3308 | pci_push(base); | |
3309 | } | |
3310 | return IRQ_HANDLED; | |
3311 | } | |
3312 | #else | |
7d12e780 | 3313 | static irqreturn_t nv_nic_irq_rx(int foo, void *data) |
d33a73c8 AA |
3314 | { |
3315 | struct net_device *dev = (struct net_device *) data; | |
3316 | struct fe_priv *np = netdev_priv(dev); | |
3317 | u8 __iomem *base = get_hwbase(dev); | |
3318 | u32 events; | |
3319 | int i; | |
0a07bc64 | 3320 | unsigned long flags; |
d33a73c8 AA |
3321 | |
3322 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); | |
3323 | ||
3324 | for (i=0; ; i++) { | |
3325 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | |
3326 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | |
d33a73c8 AA |
3327 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); |
3328 | if (!(events & np->irqmask)) | |
3329 | break; | |
f3b197ac | 3330 | |
bea3348e | 3331 | if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { |
f0734ab6 AA |
3332 | if (unlikely(nv_alloc_rx_optimized(dev))) { |
3333 | spin_lock_irqsave(&np->lock, flags); | |
3334 | if (!np->in_shutdown) | |
3335 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
3336 | spin_unlock_irqrestore(&np->lock, flags); | |
3337 | } | |
d33a73c8 | 3338 | } |
f3b197ac | 3339 | |
f0734ab6 | 3340 | if (unlikely(i > max_interrupt_work)) { |
0a07bc64 | 3341 | spin_lock_irqsave(&np->lock, flags); |
d33a73c8 AA |
3342 | /* disable interrupts on the nic */ |
3343 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
3344 | pci_push(base); | |
3345 | ||
3346 | if (!np->in_shutdown) { | |
3347 | np->nic_poll_irq |= NVREG_IRQ_RX_ALL; | |
3348 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
3349 | } | |
0a07bc64 | 3350 | spin_unlock_irqrestore(&np->lock, flags); |
1a2b7330 | 3351 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); |
d33a73c8 AA |
3352 | break; |
3353 | } | |
d33a73c8 AA |
3354 | } |
3355 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); | |
3356 | ||
3357 | return IRQ_RETVAL(i); | |
3358 | } | |
e27cdba5 | 3359 | #endif |
d33a73c8 | 3360 | |
7d12e780 | 3361 | static irqreturn_t nv_nic_irq_other(int foo, void *data) |
d33a73c8 AA |
3362 | { |
3363 | struct net_device *dev = (struct net_device *) data; | |
3364 | struct fe_priv *np = netdev_priv(dev); | |
3365 | u8 __iomem *base = get_hwbase(dev); | |
3366 | u32 events; | |
3367 | int i; | |
0a07bc64 | 3368 | unsigned long flags; |
d33a73c8 AA |
3369 | |
3370 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); | |
3371 | ||
3372 | for (i=0; ; i++) { | |
3373 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | |
3374 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | |
d33a73c8 AA |
3375 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
3376 | if (!(events & np->irqmask)) | |
3377 | break; | |
f3b197ac | 3378 | |
4e16ed1b AA |
3379 | /* check tx in case we reached max loop limit in tx isr */ |
3380 | spin_lock_irqsave(&np->lock, flags); | |
3381 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); | |
3382 | spin_unlock_irqrestore(&np->lock, flags); | |
3383 | ||
d33a73c8 | 3384 | if (events & NVREG_IRQ_LINK) { |
0a07bc64 | 3385 | spin_lock_irqsave(&np->lock, flags); |
d33a73c8 | 3386 | nv_link_irq(dev); |
0a07bc64 | 3387 | spin_unlock_irqrestore(&np->lock, flags); |
d33a73c8 AA |
3388 | } |
3389 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | |
0a07bc64 | 3390 | spin_lock_irqsave(&np->lock, flags); |
d33a73c8 | 3391 | nv_linkchange(dev); |
0a07bc64 | 3392 | spin_unlock_irqrestore(&np->lock, flags); |
d33a73c8 AA |
3393 | np->link_timeout = jiffies + LINK_TIMEOUT; |
3394 | } | |
c5cf9101 AA |
3395 | if (events & NVREG_IRQ_RECOVER_ERROR) { |
3396 | spin_lock_irq(&np->lock); | |
3397 | /* disable interrupts on the nic */ | |
3398 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | |
3399 | pci_push(base); | |
3400 | ||
3401 | if (!np->in_shutdown) { | |
3402 | np->nic_poll_irq |= NVREG_IRQ_OTHER; | |
3403 | np->recover_error = 1; | |
3404 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
3405 | } | |
3406 | spin_unlock_irq(&np->lock); | |
3407 | break; | |
3408 | } | |
d33a73c8 AA |
3409 | if (events & (NVREG_IRQ_UNKNOWN)) { |
3410 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | |
3411 | dev->name, events); | |
3412 | } | |
f0734ab6 | 3413 | if (unlikely(i > max_interrupt_work)) { |
0a07bc64 | 3414 | spin_lock_irqsave(&np->lock, flags); |
d33a73c8 AA |
3415 | /* disable interrupts on the nic */ |
3416 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | |
3417 | pci_push(base); | |
3418 | ||
3419 | if (!np->in_shutdown) { | |
3420 | np->nic_poll_irq |= NVREG_IRQ_OTHER; | |
3421 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
3422 | } | |
0a07bc64 | 3423 | spin_unlock_irqrestore(&np->lock, flags); |
1a2b7330 | 3424 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); |
d33a73c8 AA |
3425 | break; |
3426 | } | |
3427 | ||
3428 | } | |
3429 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); | |
3430 | ||
3431 | return IRQ_RETVAL(i); | |
3432 | } | |
3433 | ||
7d12e780 | 3434 | static irqreturn_t nv_nic_irq_test(int foo, void *data) |
9589c77a AA |
3435 | { |
3436 | struct net_device *dev = (struct net_device *) data; | |
3437 | struct fe_priv *np = netdev_priv(dev); | |
3438 | u8 __iomem *base = get_hwbase(dev); | |
3439 | u32 events; | |
3440 | ||
3441 | dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); | |
3442 | ||
3443 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | |
3444 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
3445 | writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); | |
3446 | } else { | |
3447 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
3448 | writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); | |
3449 | } | |
3450 | pci_push(base); | |
3451 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | |
3452 | if (!(events & NVREG_IRQ_TIMER)) | |
3453 | return IRQ_RETVAL(0); | |
3454 | ||
3455 | spin_lock(&np->lock); | |
3456 | np->intr_test = 1; | |
3457 | spin_unlock(&np->lock); | |
3458 | ||
3459 | dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); | |
3460 | ||
3461 | return IRQ_RETVAL(1); | |
3462 | } | |
3463 | ||
7a1854b7 AA |
3464 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) |
3465 | { | |
3466 | u8 __iomem *base = get_hwbase(dev); | |
3467 | int i; | |
3468 | u32 msixmap = 0; | |
3469 | ||
3470 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | |
3471 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | |
3472 | * the remaining 8 interrupts. | |
3473 | */ | |
3474 | for (i = 0; i < 8; i++) { | |
3475 | if ((irqmask >> i) & 0x1) { | |
3476 | msixmap |= vector << (i << 2); | |
3477 | } | |
3478 | } | |
3479 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | |
3480 | ||
3481 | msixmap = 0; | |
3482 | for (i = 0; i < 8; i++) { | |
3483 | if ((irqmask >> (i + 8)) & 0x1) { | |
3484 | msixmap |= vector << (i << 2); | |
3485 | } | |
3486 | } | |
3487 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | |
3488 | } | |
3489 | ||
9589c77a | 3490 | static int nv_request_irq(struct net_device *dev, int intr_test) |
7a1854b7 AA |
3491 | { |
3492 | struct fe_priv *np = get_nvpriv(dev); | |
3493 | u8 __iomem *base = get_hwbase(dev); | |
3494 | int ret = 1; | |
3495 | int i; | |
86b22b0d AA |
3496 | irqreturn_t (*handler)(int foo, void *data); |
3497 | ||
3498 | if (intr_test) { | |
3499 | handler = nv_nic_irq_test; | |
3500 | } else { | |
3501 | if (np->desc_ver == DESC_VER_3) | |
3502 | handler = nv_nic_irq_optimized; | |
3503 | else | |
3504 | handler = nv_nic_irq; | |
3505 | } | |
7a1854b7 AA |
3506 | |
3507 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | |
3508 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | |
3509 | np->msi_x_entry[i].entry = i; | |
3510 | } | |
3511 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | |
3512 | np->msi_flags |= NV_MSI_X_ENABLED; | |
9589c77a | 3513 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { |
7a1854b7 | 3514 | /* Request irq for rx handling */ |
1fb9df5d | 3515 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { |
7a1854b7 AA |
3516 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); |
3517 | pci_disable_msix(np->pci_dev); | |
3518 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3519 | goto out_err; | |
3520 | } | |
3521 | /* Request irq for tx handling */ | |
1fb9df5d | 3522 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { |
7a1854b7 AA |
3523 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); |
3524 | pci_disable_msix(np->pci_dev); | |
3525 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3526 | goto out_free_rx; | |
3527 | } | |
3528 | /* Request irq for link and timer handling */ | |
1fb9df5d | 3529 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { |
7a1854b7 AA |
3530 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); |
3531 | pci_disable_msix(np->pci_dev); | |
3532 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3533 | goto out_free_tx; | |
3534 | } | |
3535 | /* map interrupts to their respective vector */ | |
3536 | writel(0, base + NvRegMSIXMap0); | |
3537 | writel(0, base + NvRegMSIXMap1); | |
3538 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | |
3539 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | |
3540 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | |
3541 | } else { | |
3542 | /* Request irq for all interrupts */ | |
86b22b0d | 3543 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { |
7a1854b7 AA |
3544 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); |
3545 | pci_disable_msix(np->pci_dev); | |
3546 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3547 | goto out_err; | |
3548 | } | |
3549 | ||
3550 | /* map interrupts to vector 0 */ | |
3551 | writel(0, base + NvRegMSIXMap0); | |
3552 | writel(0, base + NvRegMSIXMap1); | |
3553 | } | |
3554 | } | |
3555 | } | |
3556 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | |
3557 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | |
3558 | np->msi_flags |= NV_MSI_ENABLED; | |
86b22b0d | 3559 | if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { |
7a1854b7 AA |
3560 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); |
3561 | pci_disable_msi(np->pci_dev); | |
3562 | np->msi_flags &= ~NV_MSI_ENABLED; | |
3563 | goto out_err; | |
3564 | } | |
3565 | ||
3566 | /* map interrupts to vector 0 */ | |
3567 | writel(0, base + NvRegMSIMap0); | |
3568 | writel(0, base + NvRegMSIMap1); | |
3569 | /* enable msi vector 0 */ | |
3570 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | |
3571 | } | |
3572 | } | |
3573 | if (ret != 0) { | |
86b22b0d | 3574 | if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) |
7a1854b7 | 3575 | goto out_err; |
9589c77a | 3576 | |
7a1854b7 AA |
3577 | } |
3578 | ||
3579 | return 0; | |
3580 | out_free_tx: | |
3581 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | |
3582 | out_free_rx: | |
3583 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | |
3584 | out_err: | |
3585 | return 1; | |
3586 | } | |
3587 | ||
3588 | static void nv_free_irq(struct net_device *dev) | |
3589 | { | |
3590 | struct fe_priv *np = get_nvpriv(dev); | |
3591 | int i; | |
3592 | ||
3593 | if (np->msi_flags & NV_MSI_X_ENABLED) { | |
3594 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | |
3595 | free_irq(np->msi_x_entry[i].vector, dev); | |
3596 | } | |
3597 | pci_disable_msix(np->pci_dev); | |
3598 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
3599 | } else { | |
3600 | free_irq(np->pci_dev->irq, dev); | |
3601 | if (np->msi_flags & NV_MSI_ENABLED) { | |
3602 | pci_disable_msi(np->pci_dev); | |
3603 | np->msi_flags &= ~NV_MSI_ENABLED; | |
3604 | } | |
3605 | } | |
3606 | } | |
3607 | ||
1da177e4 LT |
3608 | static void nv_do_nic_poll(unsigned long data) |
3609 | { | |
3610 | struct net_device *dev = (struct net_device *) data; | |
ac9c1897 | 3611 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 3612 | u8 __iomem *base = get_hwbase(dev); |
d33a73c8 | 3613 | u32 mask = 0; |
1da177e4 | 3614 | |
1da177e4 | 3615 | /* |
d33a73c8 | 3616 | * First disable irq(s) and then |
1da177e4 LT |
3617 | * reenable interrupts on the nic, we have to do this before calling |
3618 | * nv_nic_irq because that may decide to do otherwise | |
3619 | */ | |
d33a73c8 | 3620 | |
84b3932b AA |
3621 | if (!using_multi_irqs(dev)) { |
3622 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
8688cfce | 3623 | disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
84b3932b | 3624 | else |
8688cfce | 3625 | disable_irq_lockdep(dev->irq); |
d33a73c8 AA |
3626 | mask = np->irqmask; |
3627 | } else { | |
3628 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | |
8688cfce | 3629 | disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
d33a73c8 AA |
3630 | mask |= NVREG_IRQ_RX_ALL; |
3631 | } | |
3632 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | |
8688cfce | 3633 | disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); |
d33a73c8 AA |
3634 | mask |= NVREG_IRQ_TX_ALL; |
3635 | } | |
3636 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | |
8688cfce | 3637 | disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); |
d33a73c8 AA |
3638 | mask |= NVREG_IRQ_OTHER; |
3639 | } | |
3640 | } | |
3641 | np->nic_poll_irq = 0; | |
3642 | ||
c5cf9101 AA |
3643 | if (np->recover_error) { |
3644 | np->recover_error = 0; | |
3645 | printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); | |
3646 | if (netif_running(dev)) { | |
3647 | netif_tx_lock_bh(dev); | |
3648 | spin_lock(&np->lock); | |
3649 | /* stop engines */ | |
3650 | nv_stop_rx(dev); | |
3651 | nv_stop_tx(dev); | |
3652 | nv_txrx_reset(dev); | |
3653 | /* drain rx queue */ | |
3654 | nv_drain_rx(dev); | |
3655 | nv_drain_tx(dev); | |
3656 | /* reinit driver view of the rx queue */ | |
3657 | set_bufsize(dev); | |
3658 | if (nv_init_ring(dev)) { | |
3659 | if (!np->in_shutdown) | |
3660 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
3661 | } | |
3662 | /* reinit nic view of the rx queue */ | |
3663 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
3664 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | |
3665 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | |
3666 | base + NvRegRingSizes); | |
3667 | pci_push(base); | |
3668 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
3669 | pci_push(base); | |
3670 | ||
3671 | /* restart rx engine */ | |
3672 | nv_start_rx(dev); | |
3673 | nv_start_tx(dev); | |
3674 | spin_unlock(&np->lock); | |
3675 | netif_tx_unlock_bh(dev); | |
3676 | } | |
3677 | } | |
3678 | ||
d33a73c8 | 3679 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ |
f3b197ac | 3680 | |
d33a73c8 | 3681 | writel(mask, base + NvRegIrqMask); |
1da177e4 | 3682 | pci_push(base); |
d33a73c8 | 3683 | |
84b3932b | 3684 | if (!using_multi_irqs(dev)) { |
fcc5f266 AA |
3685 | if (np->desc_ver == DESC_VER_3) |
3686 | nv_nic_irq_optimized(0, dev); | |
3687 | else | |
3688 | nv_nic_irq(0, dev); | |
84b3932b | 3689 | if (np->msi_flags & NV_MSI_X_ENABLED) |
8688cfce | 3690 | enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
84b3932b | 3691 | else |
8688cfce | 3692 | enable_irq_lockdep(dev->irq); |
d33a73c8 AA |
3693 | } else { |
3694 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | |
7d12e780 | 3695 | nv_nic_irq_rx(0, dev); |
8688cfce | 3696 | enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
d33a73c8 AA |
3697 | } |
3698 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | |
7d12e780 | 3699 | nv_nic_irq_tx(0, dev); |
8688cfce | 3700 | enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); |
d33a73c8 AA |
3701 | } |
3702 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | |
7d12e780 | 3703 | nv_nic_irq_other(0, dev); |
8688cfce | 3704 | enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); |
d33a73c8 AA |
3705 | } |
3706 | } | |
1da177e4 LT |
3707 | } |
3708 | ||
2918c35d MS |
3709 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3710 | static void nv_poll_controller(struct net_device *dev) | |
3711 | { | |
3712 | nv_do_nic_poll((unsigned long) dev); | |
3713 | } | |
3714 | #endif | |
3715 | ||
52da3578 AA |
3716 | static void nv_do_stats_poll(unsigned long data) |
3717 | { | |
3718 | struct net_device *dev = (struct net_device *) data; | |
3719 | struct fe_priv *np = netdev_priv(dev); | |
52da3578 | 3720 | |
57fff698 | 3721 | nv_get_hw_stats(dev); |
52da3578 AA |
3722 | |
3723 | if (!np->in_shutdown) | |
3724 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); | |
3725 | } | |
3726 | ||
1da177e4 LT |
3727 | static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
3728 | { | |
ac9c1897 | 3729 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
3730 | strcpy(info->driver, "forcedeth"); |
3731 | strcpy(info->version, FORCEDETH_VERSION); | |
3732 | strcpy(info->bus_info, pci_name(np->pci_dev)); | |
3733 | } | |
3734 | ||
3735 | static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) | |
3736 | { | |
ac9c1897 | 3737 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
3738 | wolinfo->supported = WAKE_MAGIC; |
3739 | ||
3740 | spin_lock_irq(&np->lock); | |
3741 | if (np->wolenabled) | |
3742 | wolinfo->wolopts = WAKE_MAGIC; | |
3743 | spin_unlock_irq(&np->lock); | |
3744 | } | |
3745 | ||
3746 | static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) | |
3747 | { | |
ac9c1897 | 3748 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 3749 | u8 __iomem *base = get_hwbase(dev); |
c42d9df9 | 3750 | u32 flags = 0; |
1da177e4 | 3751 | |
1da177e4 | 3752 | if (wolinfo->wolopts == 0) { |
1da177e4 | 3753 | np->wolenabled = 0; |
c42d9df9 | 3754 | } else if (wolinfo->wolopts & WAKE_MAGIC) { |
1da177e4 | 3755 | np->wolenabled = 1; |
c42d9df9 AA |
3756 | flags = NVREG_WAKEUPFLAGS_ENABLE; |
3757 | } | |
3758 | if (netif_running(dev)) { | |
3759 | spin_lock_irq(&np->lock); | |
3760 | writel(flags, base + NvRegWakeUpFlags); | |
3761 | spin_unlock_irq(&np->lock); | |
1da177e4 | 3762 | } |
1da177e4 LT |
3763 | return 0; |
3764 | } | |
3765 | ||
3766 | static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |
3767 | { | |
3768 | struct fe_priv *np = netdev_priv(dev); | |
3769 | int adv; | |
3770 | ||
3771 | spin_lock_irq(&np->lock); | |
3772 | ecmd->port = PORT_MII; | |
3773 | if (!netif_running(dev)) { | |
3774 | /* We do not track link speed / duplex setting if the | |
3775 | * interface is disabled. Force a link check */ | |
f9430a01 AA |
3776 | if (nv_update_linkspeed(dev)) { |
3777 | if (!netif_carrier_ok(dev)) | |
3778 | netif_carrier_on(dev); | |
3779 | } else { | |
3780 | if (netif_carrier_ok(dev)) | |
3781 | netif_carrier_off(dev); | |
3782 | } | |
1da177e4 | 3783 | } |
f9430a01 AA |
3784 | |
3785 | if (netif_carrier_ok(dev)) { | |
3786 | switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { | |
1da177e4 LT |
3787 | case NVREG_LINKSPEED_10: |
3788 | ecmd->speed = SPEED_10; | |
3789 | break; | |
3790 | case NVREG_LINKSPEED_100: | |
3791 | ecmd->speed = SPEED_100; | |
3792 | break; | |
3793 | case NVREG_LINKSPEED_1000: | |
3794 | ecmd->speed = SPEED_1000; | |
3795 | break; | |
f9430a01 AA |
3796 | } |
3797 | ecmd->duplex = DUPLEX_HALF; | |
3798 | if (np->duplex) | |
3799 | ecmd->duplex = DUPLEX_FULL; | |
3800 | } else { | |
3801 | ecmd->speed = -1; | |
3802 | ecmd->duplex = -1; | |
1da177e4 | 3803 | } |
1da177e4 LT |
3804 | |
3805 | ecmd->autoneg = np->autoneg; | |
3806 | ||
3807 | ecmd->advertising = ADVERTISED_MII; | |
3808 | if (np->autoneg) { | |
3809 | ecmd->advertising |= ADVERTISED_Autoneg; | |
3810 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
f9430a01 AA |
3811 | if (adv & ADVERTISE_10HALF) |
3812 | ecmd->advertising |= ADVERTISED_10baseT_Half; | |
3813 | if (adv & ADVERTISE_10FULL) | |
3814 | ecmd->advertising |= ADVERTISED_10baseT_Full; | |
3815 | if (adv & ADVERTISE_100HALF) | |
3816 | ecmd->advertising |= ADVERTISED_100baseT_Half; | |
3817 | if (adv & ADVERTISE_100FULL) | |
3818 | ecmd->advertising |= ADVERTISED_100baseT_Full; | |
3819 | if (np->gigabit == PHY_GIGABIT) { | |
3820 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); | |
3821 | if (adv & ADVERTISE_1000FULL) | |
3822 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | |
3823 | } | |
1da177e4 | 3824 | } |
1da177e4 LT |
3825 | ecmd->supported = (SUPPORTED_Autoneg | |
3826 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | |
3827 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | |
3828 | SUPPORTED_MII); | |
3829 | if (np->gigabit == PHY_GIGABIT) | |
3830 | ecmd->supported |= SUPPORTED_1000baseT_Full; | |
3831 | ||
3832 | ecmd->phy_address = np->phyaddr; | |
3833 | ecmd->transceiver = XCVR_EXTERNAL; | |
3834 | ||
3835 | /* ignore maxtxpkt, maxrxpkt for now */ | |
3836 | spin_unlock_irq(&np->lock); | |
3837 | return 0; | |
3838 | } | |
3839 | ||
3840 | static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |
3841 | { | |
3842 | struct fe_priv *np = netdev_priv(dev); | |
3843 | ||
3844 | if (ecmd->port != PORT_MII) | |
3845 | return -EINVAL; | |
3846 | if (ecmd->transceiver != XCVR_EXTERNAL) | |
3847 | return -EINVAL; | |
3848 | if (ecmd->phy_address != np->phyaddr) { | |
3849 | /* TODO: support switching between multiple phys. Should be | |
3850 | * trivial, but not enabled due to lack of test hardware. */ | |
3851 | return -EINVAL; | |
3852 | } | |
3853 | if (ecmd->autoneg == AUTONEG_ENABLE) { | |
3854 | u32 mask; | |
3855 | ||
3856 | mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | |
3857 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; | |
3858 | if (np->gigabit == PHY_GIGABIT) | |
3859 | mask |= ADVERTISED_1000baseT_Full; | |
3860 | ||
3861 | if ((ecmd->advertising & mask) == 0) | |
3862 | return -EINVAL; | |
3863 | ||
3864 | } else if (ecmd->autoneg == AUTONEG_DISABLE) { | |
3865 | /* Note: autonegotiation disable, speed 1000 intentionally | |
3866 | * forbidden - noone should need that. */ | |
3867 | ||
3868 | if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) | |
3869 | return -EINVAL; | |
3870 | if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) | |
3871 | return -EINVAL; | |
3872 | } else { | |
3873 | return -EINVAL; | |
3874 | } | |
3875 | ||
f9430a01 AA |
3876 | netif_carrier_off(dev); |
3877 | if (netif_running(dev)) { | |
3878 | nv_disable_irq(dev); | |
58dfd9c1 | 3879 | netif_tx_lock_bh(dev); |
f9430a01 AA |
3880 | spin_lock(&np->lock); |
3881 | /* stop engines */ | |
3882 | nv_stop_rx(dev); | |
3883 | nv_stop_tx(dev); | |
3884 | spin_unlock(&np->lock); | |
58dfd9c1 | 3885 | netif_tx_unlock_bh(dev); |
f9430a01 AA |
3886 | } |
3887 | ||
1da177e4 LT |
3888 | if (ecmd->autoneg == AUTONEG_ENABLE) { |
3889 | int adv, bmcr; | |
3890 | ||
3891 | np->autoneg = 1; | |
3892 | ||
3893 | /* advertise only what has been requested */ | |
3894 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
eb91f61b | 3895 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
1da177e4 LT |
3896 | if (ecmd->advertising & ADVERTISED_10baseT_Half) |
3897 | adv |= ADVERTISE_10HALF; | |
3898 | if (ecmd->advertising & ADVERTISED_10baseT_Full) | |
b6d0773f | 3899 | adv |= ADVERTISE_10FULL; |
1da177e4 LT |
3900 | if (ecmd->advertising & ADVERTISED_100baseT_Half) |
3901 | adv |= ADVERTISE_100HALF; | |
3902 | if (ecmd->advertising & ADVERTISED_100baseT_Full) | |
b6d0773f AA |
3903 | adv |= ADVERTISE_100FULL; |
3904 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ | |
3905 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | |
3906 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
3907 | adv |= ADVERTISE_PAUSE_ASYM; | |
1da177e4 LT |
3908 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
3909 | ||
3910 | if (np->gigabit == PHY_GIGABIT) { | |
eb91f61b | 3911 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 LT |
3912 | adv &= ~ADVERTISE_1000FULL; |
3913 | if (ecmd->advertising & ADVERTISED_1000baseT_Full) | |
3914 | adv |= ADVERTISE_1000FULL; | |
eb91f61b | 3915 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
1da177e4 LT |
3916 | } |
3917 | ||
f9430a01 AA |
3918 | if (netif_running(dev)) |
3919 | printk(KERN_INFO "%s: link down.\n", dev->name); | |
1da177e4 | 3920 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
edf7e5ec AA |
3921 | if (np->phy_model == PHY_MODEL_MARVELL_E3016) { |
3922 | bmcr |= BMCR_ANENABLE; | |
3923 | /* reset the phy in order for settings to stick, | |
3924 | * and cause autoneg to start */ | |
3925 | if (phy_reset(dev, bmcr)) { | |
3926 | printk(KERN_INFO "%s: phy reset failed\n", dev->name); | |
3927 | return -EINVAL; | |
3928 | } | |
3929 | } else { | |
3930 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
3931 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
3932 | } | |
1da177e4 LT |
3933 | } else { |
3934 | int adv, bmcr; | |
3935 | ||
3936 | np->autoneg = 0; | |
3937 | ||
3938 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
eb91f61b | 3939 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
1da177e4 LT |
3940 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) |
3941 | adv |= ADVERTISE_10HALF; | |
3942 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) | |
b6d0773f | 3943 | adv |= ADVERTISE_10FULL; |
1da177e4 LT |
3944 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) |
3945 | adv |= ADVERTISE_100HALF; | |
3946 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) | |
b6d0773f AA |
3947 | adv |= ADVERTISE_100FULL; |
3948 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); | |
3949 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ | |
3950 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | |
3951 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
3952 | } | |
3953 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { | |
3954 | adv |= ADVERTISE_PAUSE_ASYM; | |
3955 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
3956 | } | |
1da177e4 LT |
3957 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
3958 | np->fixed_mode = adv; | |
3959 | ||
3960 | if (np->gigabit == PHY_GIGABIT) { | |
eb91f61b | 3961 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 | 3962 | adv &= ~ADVERTISE_1000FULL; |
eb91f61b | 3963 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
1da177e4 LT |
3964 | } |
3965 | ||
3966 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
f9430a01 AA |
3967 | bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); |
3968 | if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) | |
1da177e4 | 3969 | bmcr |= BMCR_FULLDPLX; |
f9430a01 | 3970 | if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) |
1da177e4 | 3971 | bmcr |= BMCR_SPEED100; |
f9430a01 | 3972 | if (np->phy_oui == PHY_OUI_MARVELL) { |
edf7e5ec AA |
3973 | /* reset the phy in order for forced mode settings to stick */ |
3974 | if (phy_reset(dev, bmcr)) { | |
f9430a01 AA |
3975 | printk(KERN_INFO "%s: phy reset failed\n", dev->name); |
3976 | return -EINVAL; | |
3977 | } | |
edf7e5ec AA |
3978 | } else { |
3979 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
3980 | if (netif_running(dev)) { | |
3981 | /* Wait a bit and then reconfigure the nic. */ | |
3982 | udelay(10); | |
3983 | nv_linkchange(dev); | |
3984 | } | |
1da177e4 LT |
3985 | } |
3986 | } | |
f9430a01 AA |
3987 | |
3988 | if (netif_running(dev)) { | |
3989 | nv_start_rx(dev); | |
3990 | nv_start_tx(dev); | |
3991 | nv_enable_irq(dev); | |
3992 | } | |
1da177e4 LT |
3993 | |
3994 | return 0; | |
3995 | } | |
3996 | ||
dc8216c1 | 3997 | #define FORCEDETH_REGS_VER 1 |
dc8216c1 MS |
3998 | |
3999 | static int nv_get_regs_len(struct net_device *dev) | |
4000 | { | |
86a0f043 AA |
4001 | struct fe_priv *np = netdev_priv(dev); |
4002 | return np->register_size; | |
dc8216c1 MS |
4003 | } |
4004 | ||
4005 | static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) | |
4006 | { | |
ac9c1897 | 4007 | struct fe_priv *np = netdev_priv(dev); |
dc8216c1 MS |
4008 | u8 __iomem *base = get_hwbase(dev); |
4009 | u32 *rbuf = buf; | |
4010 | int i; | |
4011 | ||
4012 | regs->version = FORCEDETH_REGS_VER; | |
4013 | spin_lock_irq(&np->lock); | |
86a0f043 | 4014 | for (i = 0;i <= np->register_size/sizeof(u32); i++) |
dc8216c1 MS |
4015 | rbuf[i] = readl(base + i*sizeof(u32)); |
4016 | spin_unlock_irq(&np->lock); | |
4017 | } | |
4018 | ||
4019 | static int nv_nway_reset(struct net_device *dev) | |
4020 | { | |
ac9c1897 | 4021 | struct fe_priv *np = netdev_priv(dev); |
dc8216c1 MS |
4022 | int ret; |
4023 | ||
dc8216c1 MS |
4024 | if (np->autoneg) { |
4025 | int bmcr; | |
4026 | ||
f9430a01 AA |
4027 | netif_carrier_off(dev); |
4028 | if (netif_running(dev)) { | |
4029 | nv_disable_irq(dev); | |
58dfd9c1 | 4030 | netif_tx_lock_bh(dev); |
f9430a01 AA |
4031 | spin_lock(&np->lock); |
4032 | /* stop engines */ | |
4033 | nv_stop_rx(dev); | |
4034 | nv_stop_tx(dev); | |
4035 | spin_unlock(&np->lock); | |
58dfd9c1 | 4036 | netif_tx_unlock_bh(dev); |
f9430a01 AA |
4037 | printk(KERN_INFO "%s: link down.\n", dev->name); |
4038 | } | |
4039 | ||
dc8216c1 | 4040 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
edf7e5ec AA |
4041 | if (np->phy_model == PHY_MODEL_MARVELL_E3016) { |
4042 | bmcr |= BMCR_ANENABLE; | |
4043 | /* reset the phy in order for settings to stick*/ | |
4044 | if (phy_reset(dev, bmcr)) { | |
4045 | printk(KERN_INFO "%s: phy reset failed\n", dev->name); | |
4046 | return -EINVAL; | |
4047 | } | |
4048 | } else { | |
4049 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
4050 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
4051 | } | |
dc8216c1 | 4052 | |
f9430a01 AA |
4053 | if (netif_running(dev)) { |
4054 | nv_start_rx(dev); | |
4055 | nv_start_tx(dev); | |
4056 | nv_enable_irq(dev); | |
4057 | } | |
dc8216c1 MS |
4058 | ret = 0; |
4059 | } else { | |
4060 | ret = -EINVAL; | |
4061 | } | |
dc8216c1 MS |
4062 | |
4063 | return ret; | |
4064 | } | |
4065 | ||
0674d594 ZA |
4066 | static int nv_set_tso(struct net_device *dev, u32 value) |
4067 | { | |
4068 | struct fe_priv *np = netdev_priv(dev); | |
4069 | ||
4070 | if ((np->driver_data & DEV_HAS_CHECKSUM)) | |
4071 | return ethtool_op_set_tso(dev, value); | |
4072 | else | |
6a78814f | 4073 | return -EOPNOTSUPP; |
0674d594 | 4074 | } |
0674d594 | 4075 | |
eafa59f6 AA |
4076 | static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) |
4077 | { | |
4078 | struct fe_priv *np = netdev_priv(dev); | |
4079 | ||
4080 | ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; | |
4081 | ring->rx_mini_max_pending = 0; | |
4082 | ring->rx_jumbo_max_pending = 0; | |
4083 | ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; | |
4084 | ||
4085 | ring->rx_pending = np->rx_ring_size; | |
4086 | ring->rx_mini_pending = 0; | |
4087 | ring->rx_jumbo_pending = 0; | |
4088 | ring->tx_pending = np->tx_ring_size; | |
4089 | } | |
4090 | ||
4091 | static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) | |
4092 | { | |
4093 | struct fe_priv *np = netdev_priv(dev); | |
4094 | u8 __iomem *base = get_hwbase(dev); | |
761fcd9e | 4095 | u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; |
eafa59f6 AA |
4096 | dma_addr_t ring_addr; |
4097 | ||
4098 | if (ring->rx_pending < RX_RING_MIN || | |
4099 | ring->tx_pending < TX_RING_MIN || | |
4100 | ring->rx_mini_pending != 0 || | |
4101 | ring->rx_jumbo_pending != 0 || | |
4102 | (np->desc_ver == DESC_VER_1 && | |
4103 | (ring->rx_pending > RING_MAX_DESC_VER_1 || | |
4104 | ring->tx_pending > RING_MAX_DESC_VER_1)) || | |
4105 | (np->desc_ver != DESC_VER_1 && | |
4106 | (ring->rx_pending > RING_MAX_DESC_VER_2_3 || | |
4107 | ring->tx_pending > RING_MAX_DESC_VER_2_3))) { | |
4108 | return -EINVAL; | |
4109 | } | |
4110 | ||
4111 | /* allocate new rings */ | |
4112 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
4113 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | |
4114 | sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | |
4115 | &ring_addr); | |
4116 | } else { | |
4117 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | |
4118 | sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | |
4119 | &ring_addr); | |
4120 | } | |
761fcd9e AA |
4121 | rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); |
4122 | tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); | |
4123 | if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { | |
eafa59f6 AA |
4124 | /* fall back to old rings */ |
4125 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 | 4126 | if (rxtx_ring) |
eafa59f6 AA |
4127 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), |
4128 | rxtx_ring, ring_addr); | |
4129 | } else { | |
4130 | if (rxtx_ring) | |
4131 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | |
4132 | rxtx_ring, ring_addr); | |
4133 | } | |
4134 | if (rx_skbuff) | |
4135 | kfree(rx_skbuff); | |
eafa59f6 AA |
4136 | if (tx_skbuff) |
4137 | kfree(tx_skbuff); | |
eafa59f6 AA |
4138 | goto exit; |
4139 | } | |
4140 | ||
4141 | if (netif_running(dev)) { | |
4142 | nv_disable_irq(dev); | |
58dfd9c1 | 4143 | netif_tx_lock_bh(dev); |
eafa59f6 AA |
4144 | spin_lock(&np->lock); |
4145 | /* stop engines */ | |
4146 | nv_stop_rx(dev); | |
4147 | nv_stop_tx(dev); | |
4148 | nv_txrx_reset(dev); | |
4149 | /* drain queues */ | |
4150 | nv_drain_rx(dev); | |
4151 | nv_drain_tx(dev); | |
4152 | /* delete queues */ | |
4153 | free_rings(dev); | |
4154 | } | |
4155 | ||
4156 | /* set new values */ | |
4157 | np->rx_ring_size = ring->rx_pending; | |
4158 | np->tx_ring_size = ring->tx_pending; | |
eafa59f6 AA |
4159 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
4160 | np->rx_ring.orig = (struct ring_desc*)rxtx_ring; | |
4161 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; | |
4162 | } else { | |
4163 | np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; | |
4164 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; | |
4165 | } | |
761fcd9e AA |
4166 | np->rx_skb = (struct nv_skb_map*)rx_skbuff; |
4167 | np->tx_skb = (struct nv_skb_map*)tx_skbuff; | |
eafa59f6 AA |
4168 | np->ring_addr = ring_addr; |
4169 | ||
761fcd9e AA |
4170 | memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); |
4171 | memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); | |
eafa59f6 AA |
4172 | |
4173 | if (netif_running(dev)) { | |
4174 | /* reinit driver view of the queues */ | |
4175 | set_bufsize(dev); | |
4176 | if (nv_init_ring(dev)) { | |
4177 | if (!np->in_shutdown) | |
4178 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
4179 | } | |
4180 | ||
4181 | /* reinit nic view of the queues */ | |
4182 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
4183 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | |
4184 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | |
4185 | base + NvRegRingSizes); | |
4186 | pci_push(base); | |
4187 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
4188 | pci_push(base); | |
4189 | ||
4190 | /* restart engines */ | |
4191 | nv_start_rx(dev); | |
4192 | nv_start_tx(dev); | |
4193 | spin_unlock(&np->lock); | |
58dfd9c1 | 4194 | netif_tx_unlock_bh(dev); |
eafa59f6 AA |
4195 | nv_enable_irq(dev); |
4196 | } | |
4197 | return 0; | |
4198 | exit: | |
4199 | return -ENOMEM; | |
4200 | } | |
4201 | ||
b6d0773f AA |
4202 | static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) |
4203 | { | |
4204 | struct fe_priv *np = netdev_priv(dev); | |
4205 | ||
4206 | pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; | |
4207 | pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; | |
4208 | pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; | |
4209 | } | |
4210 | ||
4211 | static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) | |
4212 | { | |
4213 | struct fe_priv *np = netdev_priv(dev); | |
4214 | int adv, bmcr; | |
4215 | ||
4216 | if ((!np->autoneg && np->duplex == 0) || | |
4217 | (np->autoneg && !pause->autoneg && np->duplex == 0)) { | |
4218 | printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", | |
4219 | dev->name); | |
4220 | return -EINVAL; | |
4221 | } | |
4222 | if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { | |
4223 | printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); | |
4224 | return -EINVAL; | |
4225 | } | |
4226 | ||
4227 | netif_carrier_off(dev); | |
4228 | if (netif_running(dev)) { | |
4229 | nv_disable_irq(dev); | |
58dfd9c1 | 4230 | netif_tx_lock_bh(dev); |
b6d0773f AA |
4231 | spin_lock(&np->lock); |
4232 | /* stop engines */ | |
4233 | nv_stop_rx(dev); | |
4234 | nv_stop_tx(dev); | |
4235 | spin_unlock(&np->lock); | |
58dfd9c1 | 4236 | netif_tx_unlock_bh(dev); |
b6d0773f AA |
4237 | } |
4238 | ||
4239 | np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); | |
4240 | if (pause->rx_pause) | |
4241 | np->pause_flags |= NV_PAUSEFRAME_RX_REQ; | |
4242 | if (pause->tx_pause) | |
4243 | np->pause_flags |= NV_PAUSEFRAME_TX_REQ; | |
4244 | ||
4245 | if (np->autoneg && pause->autoneg) { | |
4246 | np->pause_flags |= NV_PAUSEFRAME_AUTONEG; | |
4247 | ||
4248 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
4249 | adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); | |
4250 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ | |
4251 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | |
4252 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
4253 | adv |= ADVERTISE_PAUSE_ASYM; | |
4254 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); | |
4255 | ||
4256 | if (netif_running(dev)) | |
4257 | printk(KERN_INFO "%s: link down.\n", dev->name); | |
4258 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
4259 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
4260 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
4261 | } else { | |
4262 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); | |
4263 | if (pause->rx_pause) | |
4264 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
4265 | if (pause->tx_pause) | |
4266 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
4267 | ||
4268 | if (!netif_running(dev)) | |
4269 | nv_update_linkspeed(dev); | |
4270 | else | |
4271 | nv_update_pause(dev, np->pause_flags); | |
4272 | } | |
4273 | ||
4274 | if (netif_running(dev)) { | |
4275 | nv_start_rx(dev); | |
4276 | nv_start_tx(dev); | |
4277 | nv_enable_irq(dev); | |
4278 | } | |
4279 | return 0; | |
4280 | } | |
4281 | ||
5ed2616f AA |
4282 | static u32 nv_get_rx_csum(struct net_device *dev) |
4283 | { | |
4284 | struct fe_priv *np = netdev_priv(dev); | |
f2ad2d9b | 4285 | return (np->rx_csum) != 0; |
5ed2616f AA |
4286 | } |
4287 | ||
4288 | static int nv_set_rx_csum(struct net_device *dev, u32 data) | |
4289 | { | |
4290 | struct fe_priv *np = netdev_priv(dev); | |
4291 | u8 __iomem *base = get_hwbase(dev); | |
4292 | int retcode = 0; | |
4293 | ||
4294 | if (np->driver_data & DEV_HAS_CHECKSUM) { | |
5ed2616f | 4295 | if (data) { |
f2ad2d9b | 4296 | np->rx_csum = 1; |
5ed2616f | 4297 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
5ed2616f | 4298 | } else { |
f2ad2d9b AA |
4299 | np->rx_csum = 0; |
4300 | /* vlan is dependent on rx checksum offload */ | |
4301 | if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) | |
4302 | np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; | |
5ed2616f | 4303 | } |
5ed2616f AA |
4304 | if (netif_running(dev)) { |
4305 | spin_lock_irq(&np->lock); | |
4306 | writel(np->txrxctl_bits, base + NvRegTxRxControl); | |
4307 | spin_unlock_irq(&np->lock); | |
4308 | } | |
4309 | } else { | |
4310 | return -EINVAL; | |
4311 | } | |
4312 | ||
4313 | return retcode; | |
4314 | } | |
4315 | ||
4316 | static int nv_set_tx_csum(struct net_device *dev, u32 data) | |
4317 | { | |
4318 | struct fe_priv *np = netdev_priv(dev); | |
4319 | ||
4320 | if (np->driver_data & DEV_HAS_CHECKSUM) | |
4321 | return ethtool_op_set_tx_hw_csum(dev, data); | |
4322 | else | |
4323 | return -EOPNOTSUPP; | |
4324 | } | |
4325 | ||
4326 | static int nv_set_sg(struct net_device *dev, u32 data) | |
4327 | { | |
4328 | struct fe_priv *np = netdev_priv(dev); | |
4329 | ||
4330 | if (np->driver_data & DEV_HAS_CHECKSUM) | |
4331 | return ethtool_op_set_sg(dev, data); | |
4332 | else | |
4333 | return -EOPNOTSUPP; | |
4334 | } | |
4335 | ||
b9f2c044 | 4336 | static int nv_get_sset_count(struct net_device *dev, int sset) |
52da3578 AA |
4337 | { |
4338 | struct fe_priv *np = netdev_priv(dev); | |
4339 | ||
b9f2c044 JG |
4340 | switch (sset) { |
4341 | case ETH_SS_TEST: | |
4342 | if (np->driver_data & DEV_HAS_TEST_EXTENDED) | |
4343 | return NV_TEST_COUNT_EXTENDED; | |
4344 | else | |
4345 | return NV_TEST_COUNT_BASE; | |
4346 | case ETH_SS_STATS: | |
4347 | if (np->driver_data & DEV_HAS_STATISTICS_V1) | |
4348 | return NV_DEV_STATISTICS_V1_COUNT; | |
4349 | else if (np->driver_data & DEV_HAS_STATISTICS_V2) | |
4350 | return NV_DEV_STATISTICS_V2_COUNT; | |
4351 | else | |
4352 | return 0; | |
4353 | default: | |
4354 | return -EOPNOTSUPP; | |
4355 | } | |
52da3578 AA |
4356 | } |
4357 | ||
4358 | static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) | |
4359 | { | |
4360 | struct fe_priv *np = netdev_priv(dev); | |
4361 | ||
4362 | /* update stats */ | |
4363 | nv_do_stats_poll((unsigned long)dev); | |
4364 | ||
b9f2c044 | 4365 | memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); |
9589c77a AA |
4366 | } |
4367 | ||
4368 | static int nv_link_test(struct net_device *dev) | |
4369 | { | |
4370 | struct fe_priv *np = netdev_priv(dev); | |
4371 | int mii_status; | |
4372 | ||
4373 | mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
4374 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
4375 | ||
4376 | /* check phy link status */ | |
4377 | if (!(mii_status & BMSR_LSTATUS)) | |
4378 | return 0; | |
4379 | else | |
4380 | return 1; | |
4381 | } | |
4382 | ||
4383 | static int nv_register_test(struct net_device *dev) | |
4384 | { | |
4385 | u8 __iomem *base = get_hwbase(dev); | |
4386 | int i = 0; | |
4387 | u32 orig_read, new_read; | |
4388 | ||
4389 | do { | |
4390 | orig_read = readl(base + nv_registers_test[i].reg); | |
4391 | ||
4392 | /* xor with mask to toggle bits */ | |
4393 | orig_read ^= nv_registers_test[i].mask; | |
4394 | ||
4395 | writel(orig_read, base + nv_registers_test[i].reg); | |
4396 | ||
4397 | new_read = readl(base + nv_registers_test[i].reg); | |
4398 | ||
4399 | if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) | |
4400 | return 0; | |
4401 | ||
4402 | /* restore original value */ | |
4403 | orig_read ^= nv_registers_test[i].mask; | |
4404 | writel(orig_read, base + nv_registers_test[i].reg); | |
4405 | ||
4406 | } while (nv_registers_test[++i].reg != 0); | |
4407 | ||
4408 | return 1; | |
4409 | } | |
4410 | ||
4411 | static int nv_interrupt_test(struct net_device *dev) | |
4412 | { | |
4413 | struct fe_priv *np = netdev_priv(dev); | |
4414 | u8 __iomem *base = get_hwbase(dev); | |
4415 | int ret = 1; | |
4416 | int testcnt; | |
4417 | u32 save_msi_flags, save_poll_interval = 0; | |
4418 | ||
4419 | if (netif_running(dev)) { | |
4420 | /* free current irq */ | |
4421 | nv_free_irq(dev); | |
4422 | save_poll_interval = readl(base+NvRegPollingInterval); | |
4423 | } | |
4424 | ||
4425 | /* flag to test interrupt handler */ | |
4426 | np->intr_test = 0; | |
4427 | ||
4428 | /* setup test irq */ | |
4429 | save_msi_flags = np->msi_flags; | |
4430 | np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; | |
4431 | np->msi_flags |= 0x001; /* setup 1 vector */ | |
4432 | if (nv_request_irq(dev, 1)) | |
4433 | return 0; | |
4434 | ||
4435 | /* setup timer interrupt */ | |
4436 | writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); | |
4437 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); | |
4438 | ||
4439 | nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); | |
4440 | ||
4441 | /* wait for at least one interrupt */ | |
4442 | msleep(100); | |
4443 | ||
4444 | spin_lock_irq(&np->lock); | |
4445 | ||
4446 | /* flag should be set within ISR */ | |
4447 | testcnt = np->intr_test; | |
4448 | if (!testcnt) | |
4449 | ret = 2; | |
4450 | ||
4451 | nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); | |
4452 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | |
4453 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
4454 | else | |
4455 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | |
4456 | ||
4457 | spin_unlock_irq(&np->lock); | |
4458 | ||
4459 | nv_free_irq(dev); | |
4460 | ||
4461 | np->msi_flags = save_msi_flags; | |
4462 | ||
4463 | if (netif_running(dev)) { | |
4464 | writel(save_poll_interval, base + NvRegPollingInterval); | |
4465 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); | |
4466 | /* restore original irq */ | |
4467 | if (nv_request_irq(dev, 0)) | |
4468 | return 0; | |
4469 | } | |
4470 | ||
4471 | return ret; | |
4472 | } | |
4473 | ||
4474 | static int nv_loopback_test(struct net_device *dev) | |
4475 | { | |
4476 | struct fe_priv *np = netdev_priv(dev); | |
4477 | u8 __iomem *base = get_hwbase(dev); | |
4478 | struct sk_buff *tx_skb, *rx_skb; | |
4479 | dma_addr_t test_dma_addr; | |
4480 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); | |
f82a9352 | 4481 | u32 flags; |
9589c77a AA |
4482 | int len, i, pkt_len; |
4483 | u8 *pkt_data; | |
4484 | u32 filter_flags = 0; | |
4485 | u32 misc1_flags = 0; | |
4486 | int ret = 1; | |
4487 | ||
4488 | if (netif_running(dev)) { | |
4489 | nv_disable_irq(dev); | |
4490 | filter_flags = readl(base + NvRegPacketFilterFlags); | |
4491 | misc1_flags = readl(base + NvRegMisc1); | |
4492 | } else { | |
4493 | nv_txrx_reset(dev); | |
4494 | } | |
4495 | ||
4496 | /* reinit driver view of the rx queue */ | |
4497 | set_bufsize(dev); | |
4498 | nv_init_ring(dev); | |
4499 | ||
4500 | /* setup hardware for loopback */ | |
4501 | writel(NVREG_MISC1_FORCE, base + NvRegMisc1); | |
4502 | writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); | |
4503 | ||
4504 | /* reinit nic view of the rx queue */ | |
4505 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
4506 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | |
4507 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | |
4508 | base + NvRegRingSizes); | |
4509 | pci_push(base); | |
4510 | ||
4511 | /* restart rx engine */ | |
4512 | nv_start_rx(dev); | |
4513 | nv_start_tx(dev); | |
4514 | ||
4515 | /* setup packet for tx */ | |
4516 | pkt_len = ETH_DATA_LEN; | |
4517 | tx_skb = dev_alloc_skb(pkt_len); | |
46798c89 JJ |
4518 | if (!tx_skb) { |
4519 | printk(KERN_ERR "dev_alloc_skb() failed during loopback test" | |
4520 | " of %s\n", dev->name); | |
4521 | ret = 0; | |
4522 | goto out; | |
4523 | } | |
8b5be268 ACM |
4524 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, |
4525 | skb_tailroom(tx_skb), | |
4526 | PCI_DMA_FROMDEVICE); | |
9589c77a AA |
4527 | pkt_data = skb_put(tx_skb, pkt_len); |
4528 | for (i = 0; i < pkt_len; i++) | |
4529 | pkt_data[i] = (u8)(i & 0xff); | |
9589c77a AA |
4530 | |
4531 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 SH |
4532 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); |
4533 | np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | |
9589c77a | 4534 | } else { |
f82a9352 SH |
4535 | np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32; |
4536 | np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; | |
4537 | np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | |
9589c77a AA |
4538 | } |
4539 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
4540 | pci_push(get_hwbase(dev)); | |
4541 | ||
4542 | msleep(500); | |
4543 | ||
4544 | /* check for rx of the packet */ | |
4545 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 | 4546 | flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); |
9589c77a AA |
4547 | len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); |
4548 | ||
4549 | } else { | |
f82a9352 | 4550 | flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); |
9589c77a AA |
4551 | len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); |
4552 | } | |
4553 | ||
f82a9352 | 4554 | if (flags & NV_RX_AVAIL) { |
9589c77a AA |
4555 | ret = 0; |
4556 | } else if (np->desc_ver == DESC_VER_1) { | |
f82a9352 | 4557 | if (flags & NV_RX_ERROR) |
9589c77a AA |
4558 | ret = 0; |
4559 | } else { | |
f82a9352 | 4560 | if (flags & NV_RX2_ERROR) { |
9589c77a AA |
4561 | ret = 0; |
4562 | } | |
4563 | } | |
4564 | ||
4565 | if (ret) { | |
4566 | if (len != pkt_len) { | |
4567 | ret = 0; | |
4568 | dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", | |
4569 | dev->name, len, pkt_len); | |
4570 | } else { | |
761fcd9e | 4571 | rx_skb = np->rx_skb[0].skb; |
9589c77a AA |
4572 | for (i = 0; i < pkt_len; i++) { |
4573 | if (rx_skb->data[i] != (u8)(i & 0xff)) { | |
4574 | ret = 0; | |
4575 | dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", | |
4576 | dev->name, i); | |
4577 | break; | |
4578 | } | |
4579 | } | |
4580 | } | |
4581 | } else { | |
4582 | dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); | |
4583 | } | |
4584 | ||
4585 | pci_unmap_page(np->pci_dev, test_dma_addr, | |
4305b541 | 4586 | (skb_end_pointer(tx_skb) - tx_skb->data), |
9589c77a AA |
4587 | PCI_DMA_TODEVICE); |
4588 | dev_kfree_skb_any(tx_skb); | |
46798c89 | 4589 | out: |
9589c77a AA |
4590 | /* stop engines */ |
4591 | nv_stop_rx(dev); | |
4592 | nv_stop_tx(dev); | |
4593 | nv_txrx_reset(dev); | |
4594 | /* drain rx queue */ | |
4595 | nv_drain_rx(dev); | |
4596 | nv_drain_tx(dev); | |
4597 | ||
4598 | if (netif_running(dev)) { | |
4599 | writel(misc1_flags, base + NvRegMisc1); | |
4600 | writel(filter_flags, base + NvRegPacketFilterFlags); | |
4601 | nv_enable_irq(dev); | |
4602 | } | |
4603 | ||
4604 | return ret; | |
4605 | } | |
4606 | ||
4607 | static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) | |
4608 | { | |
4609 | struct fe_priv *np = netdev_priv(dev); | |
4610 | u8 __iomem *base = get_hwbase(dev); | |
4611 | int result; | |
b9f2c044 | 4612 | memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); |
9589c77a AA |
4613 | |
4614 | if (!nv_link_test(dev)) { | |
4615 | test->flags |= ETH_TEST_FL_FAILED; | |
4616 | buffer[0] = 1; | |
4617 | } | |
4618 | ||
4619 | if (test->flags & ETH_TEST_FL_OFFLINE) { | |
4620 | if (netif_running(dev)) { | |
4621 | netif_stop_queue(dev); | |
bea3348e SH |
4622 | #ifdef CONFIG_FORCEDETH_NAPI |
4623 | napi_disable(&np->napi); | |
4624 | #endif | |
58dfd9c1 | 4625 | netif_tx_lock_bh(dev); |
9589c77a AA |
4626 | spin_lock_irq(&np->lock); |
4627 | nv_disable_hw_interrupts(dev, np->irqmask); | |
4628 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | |
4629 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
4630 | } else { | |
4631 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | |
4632 | } | |
4633 | /* stop engines */ | |
4634 | nv_stop_rx(dev); | |
4635 | nv_stop_tx(dev); | |
4636 | nv_txrx_reset(dev); | |
4637 | /* drain rx queue */ | |
4638 | nv_drain_rx(dev); | |
4639 | nv_drain_tx(dev); | |
4640 | spin_unlock_irq(&np->lock); | |
58dfd9c1 | 4641 | netif_tx_unlock_bh(dev); |
9589c77a AA |
4642 | } |
4643 | ||
4644 | if (!nv_register_test(dev)) { | |
4645 | test->flags |= ETH_TEST_FL_FAILED; | |
4646 | buffer[1] = 1; | |
4647 | } | |
4648 | ||
4649 | result = nv_interrupt_test(dev); | |
4650 | if (result != 1) { | |
4651 | test->flags |= ETH_TEST_FL_FAILED; | |
4652 | buffer[2] = 1; | |
4653 | } | |
4654 | if (result == 0) { | |
4655 | /* bail out */ | |
4656 | return; | |
4657 | } | |
4658 | ||
4659 | if (!nv_loopback_test(dev)) { | |
4660 | test->flags |= ETH_TEST_FL_FAILED; | |
4661 | buffer[3] = 1; | |
4662 | } | |
4663 | ||
4664 | if (netif_running(dev)) { | |
4665 | /* reinit driver view of the rx queue */ | |
4666 | set_bufsize(dev); | |
4667 | if (nv_init_ring(dev)) { | |
4668 | if (!np->in_shutdown) | |
4669 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
4670 | } | |
4671 | /* reinit nic view of the rx queue */ | |
4672 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
4673 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | |
4674 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | |
4675 | base + NvRegRingSizes); | |
4676 | pci_push(base); | |
4677 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
4678 | pci_push(base); | |
4679 | /* restart rx engine */ | |
4680 | nv_start_rx(dev); | |
4681 | nv_start_tx(dev); | |
4682 | netif_start_queue(dev); | |
bea3348e SH |
4683 | #ifdef CONFIG_FORCEDETH_NAPI |
4684 | napi_enable(&np->napi); | |
4685 | #endif | |
9589c77a AA |
4686 | nv_enable_hw_interrupts(dev, np->irqmask); |
4687 | } | |
4688 | } | |
4689 | } | |
4690 | ||
52da3578 AA |
4691 | static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) |
4692 | { | |
4693 | switch (stringset) { | |
4694 | case ETH_SS_STATS: | |
b9f2c044 | 4695 | memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); |
52da3578 | 4696 | break; |
9589c77a | 4697 | case ETH_SS_TEST: |
b9f2c044 | 4698 | memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); |
9589c77a | 4699 | break; |
52da3578 AA |
4700 | } |
4701 | } | |
4702 | ||
7282d491 | 4703 | static const struct ethtool_ops ops = { |
1da177e4 LT |
4704 | .get_drvinfo = nv_get_drvinfo, |
4705 | .get_link = ethtool_op_get_link, | |
4706 | .get_wol = nv_get_wol, | |
4707 | .set_wol = nv_set_wol, | |
4708 | .get_settings = nv_get_settings, | |
4709 | .set_settings = nv_set_settings, | |
dc8216c1 MS |
4710 | .get_regs_len = nv_get_regs_len, |
4711 | .get_regs = nv_get_regs, | |
4712 | .nway_reset = nv_nway_reset, | |
6a78814f | 4713 | .set_tso = nv_set_tso, |
eafa59f6 AA |
4714 | .get_ringparam = nv_get_ringparam, |
4715 | .set_ringparam = nv_set_ringparam, | |
b6d0773f AA |
4716 | .get_pauseparam = nv_get_pauseparam, |
4717 | .set_pauseparam = nv_set_pauseparam, | |
5ed2616f AA |
4718 | .get_rx_csum = nv_get_rx_csum, |
4719 | .set_rx_csum = nv_set_rx_csum, | |
5ed2616f | 4720 | .set_tx_csum = nv_set_tx_csum, |
5ed2616f | 4721 | .set_sg = nv_set_sg, |
52da3578 | 4722 | .get_strings = nv_get_strings, |
52da3578 | 4723 | .get_ethtool_stats = nv_get_ethtool_stats, |
b9f2c044 | 4724 | .get_sset_count = nv_get_sset_count, |
9589c77a | 4725 | .self_test = nv_self_test, |
1da177e4 LT |
4726 | }; |
4727 | ||
ee407b02 AA |
4728 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) |
4729 | { | |
4730 | struct fe_priv *np = get_nvpriv(dev); | |
4731 | ||
4732 | spin_lock_irq(&np->lock); | |
4733 | ||
4734 | /* save vlan group */ | |
4735 | np->vlangrp = grp; | |
4736 | ||
4737 | if (grp) { | |
4738 | /* enable vlan on MAC */ | |
4739 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; | |
4740 | } else { | |
4741 | /* disable vlan on MAC */ | |
4742 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; | |
4743 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; | |
4744 | } | |
4745 | ||
4746 | writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
4747 | ||
4748 | spin_unlock_irq(&np->lock); | |
25805dcf | 4749 | } |
ee407b02 | 4750 | |
7e680c22 AA |
4751 | /* The mgmt unit and driver use a semaphore to access the phy during init */ |
4752 | static int nv_mgmt_acquire_sema(struct net_device *dev) | |
4753 | { | |
4754 | u8 __iomem *base = get_hwbase(dev); | |
4755 | int i; | |
4756 | u32 tx_ctrl, mgmt_sema; | |
4757 | ||
4758 | for (i = 0; i < 10; i++) { | |
4759 | mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; | |
4760 | if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) | |
4761 | break; | |
4762 | msleep(500); | |
4763 | } | |
4764 | ||
4765 | if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) | |
4766 | return 0; | |
4767 | ||
4768 | for (i = 0; i < 2; i++) { | |
4769 | tx_ctrl = readl(base + NvRegTransmitterControl); | |
4770 | tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; | |
4771 | writel(tx_ctrl, base + NvRegTransmitterControl); | |
4772 | ||
4773 | /* verify that semaphore was acquired */ | |
4774 | tx_ctrl = readl(base + NvRegTransmitterControl); | |
4775 | if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && | |
4776 | ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) | |
4777 | return 1; | |
4778 | else | |
4779 | udelay(50); | |
4780 | } | |
4781 | ||
4782 | return 0; | |
4783 | } | |
4784 | ||
1da177e4 LT |
4785 | static int nv_open(struct net_device *dev) |
4786 | { | |
ac9c1897 | 4787 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 4788 | u8 __iomem *base = get_hwbase(dev); |
d33a73c8 AA |
4789 | int ret = 1; |
4790 | int oom, i; | |
1da177e4 LT |
4791 | |
4792 | dprintk(KERN_DEBUG "nv_open: begin\n"); | |
4793 | ||
f1489653 | 4794 | /* erase previous misconfiguration */ |
86a0f043 AA |
4795 | if (np->driver_data & DEV_HAS_POWER_CNTRL) |
4796 | nv_mac_reset(dev); | |
1da177e4 LT |
4797 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); |
4798 | writel(0, base + NvRegMulticastAddrB); | |
4799 | writel(0, base + NvRegMulticastMaskA); | |
4800 | writel(0, base + NvRegMulticastMaskB); | |
4801 | writel(0, base + NvRegPacketFilterFlags); | |
4802 | ||
4803 | writel(0, base + NvRegTransmitterControl); | |
4804 | writel(0, base + NvRegReceiverControl); | |
4805 | ||
4806 | writel(0, base + NvRegAdapterControl); | |
4807 | ||
eb91f61b AA |
4808 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) |
4809 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | |
4810 | ||
f1489653 | 4811 | /* initialize descriptor rings */ |
d81c0983 | 4812 | set_bufsize(dev); |
1da177e4 LT |
4813 | oom = nv_init_ring(dev); |
4814 | ||
4815 | writel(0, base + NvRegLinkSpeed); | |
5070d340 | 4816 | writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); |
1da177e4 LT |
4817 | nv_txrx_reset(dev); |
4818 | writel(0, base + NvRegUnknownSetupReg6); | |
4819 | ||
4820 | np->in_shutdown = 0; | |
4821 | ||
f1489653 | 4822 | /* give hw rings */ |
0832b25a | 4823 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
eafa59f6 | 4824 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
1da177e4 LT |
4825 | base + NvRegRingSizes); |
4826 | ||
1da177e4 | 4827 | writel(np->linkspeed, base + NvRegLinkSpeed); |
95d161cb AA |
4828 | if (np->desc_ver == DESC_VER_1) |
4829 | writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); | |
4830 | else | |
4831 | writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); | |
8a4ae7f2 | 4832 | writel(np->txrxctl_bits, base + NvRegTxRxControl); |
ee407b02 | 4833 | writel(np->vlanctl_bits, base + NvRegVlanControl); |
1da177e4 | 4834 | pci_push(base); |
8a4ae7f2 | 4835 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); |
1da177e4 LT |
4836 | reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, |
4837 | NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, | |
4838 | KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); | |
4839 | ||
7e680c22 | 4840 | writel(0, base + NvRegMIIMask); |
1da177e4 LT |
4841 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
4842 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | |
4843 | ||
1da177e4 LT |
4844 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); |
4845 | writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); | |
4846 | writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); | |
d81c0983 | 4847 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
1da177e4 LT |
4848 | |
4849 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); | |
4850 | get_random_bytes(&i, sizeof(i)); | |
4851 | writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); | |
9744e218 AA |
4852 | writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); |
4853 | writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); | |
a971c324 AA |
4854 | if (poll_interval == -1) { |
4855 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | |
4856 | writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); | |
4857 | else | |
4858 | writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); | |
4859 | } | |
4860 | else | |
4861 | writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); | |
1da177e4 LT |
4862 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); |
4863 | writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, | |
4864 | base + NvRegAdapterControl); | |
4865 | writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); | |
7e680c22 | 4866 | writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); |
c42d9df9 AA |
4867 | if (np->wolenabled) |
4868 | writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); | |
1da177e4 LT |
4869 | |
4870 | i = readl(base + NvRegPowerState); | |
4871 | if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) | |
4872 | writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); | |
4873 | ||
4874 | pci_push(base); | |
4875 | udelay(10); | |
4876 | writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); | |
4877 | ||
84b3932b | 4878 | nv_disable_hw_interrupts(dev, np->irqmask); |
1da177e4 LT |
4879 | pci_push(base); |
4880 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | |
4881 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
4882 | pci_push(base); | |
4883 | ||
9589c77a | 4884 | if (nv_request_irq(dev, 0)) { |
84b3932b | 4885 | goto out_drain; |
d33a73c8 | 4886 | } |
1da177e4 LT |
4887 | |
4888 | /* ask for interrupts */ | |
84b3932b | 4889 | nv_enable_hw_interrupts(dev, np->irqmask); |
1da177e4 LT |
4890 | |
4891 | spin_lock_irq(&np->lock); | |
4892 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | |
4893 | writel(0, base + NvRegMulticastAddrB); | |
4894 | writel(0, base + NvRegMulticastMaskA); | |
4895 | writel(0, base + NvRegMulticastMaskB); | |
4896 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); | |
4897 | /* One manual link speed update: Interrupts are enabled, future link | |
4898 | * speed changes cause interrupts and are handled by nv_link_irq(). | |
4899 | */ | |
4900 | { | |
4901 | u32 miistat; | |
4902 | miistat = readl(base + NvRegMIIStatus); | |
4903 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
4904 | dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); | |
4905 | } | |
1b1b3c9b MS |
4906 | /* set linkspeed to invalid value, thus force nv_update_linkspeed |
4907 | * to init hw */ | |
4908 | np->linkspeed = 0; | |
1da177e4 LT |
4909 | ret = nv_update_linkspeed(dev); |
4910 | nv_start_rx(dev); | |
4911 | nv_start_tx(dev); | |
4912 | netif_start_queue(dev); | |
bea3348e SH |
4913 | #ifdef CONFIG_FORCEDETH_NAPI |
4914 | napi_enable(&np->napi); | |
4915 | #endif | |
e27cdba5 | 4916 | |
1da177e4 LT |
4917 | if (ret) { |
4918 | netif_carrier_on(dev); | |
4919 | } else { | |
f7ab697d | 4920 | printk(KERN_INFO "%s: no link during initialization.\n", dev->name); |
1da177e4 LT |
4921 | netif_carrier_off(dev); |
4922 | } | |
4923 | if (oom) | |
4924 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
52da3578 AA |
4925 | |
4926 | /* start statistics timer */ | |
57fff698 | 4927 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) |
52da3578 AA |
4928 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); |
4929 | ||
1da177e4 LT |
4930 | spin_unlock_irq(&np->lock); |
4931 | ||
4932 | return 0; | |
4933 | out_drain: | |
4934 | drain_ring(dev); | |
4935 | return ret; | |
4936 | } | |
4937 | ||
4938 | static int nv_close(struct net_device *dev) | |
4939 | { | |
ac9c1897 | 4940 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
4941 | u8 __iomem *base; |
4942 | ||
4943 | spin_lock_irq(&np->lock); | |
4944 | np->in_shutdown = 1; | |
4945 | spin_unlock_irq(&np->lock); | |
bea3348e SH |
4946 | #ifdef CONFIG_FORCEDETH_NAPI |
4947 | napi_disable(&np->napi); | |
4948 | #endif | |
1da177e4 LT |
4949 | synchronize_irq(dev->irq); |
4950 | ||
4951 | del_timer_sync(&np->oom_kick); | |
4952 | del_timer_sync(&np->nic_poll); | |
52da3578 | 4953 | del_timer_sync(&np->stats_poll); |
1da177e4 LT |
4954 | |
4955 | netif_stop_queue(dev); | |
4956 | spin_lock_irq(&np->lock); | |
4957 | nv_stop_tx(dev); | |
4958 | nv_stop_rx(dev); | |
4959 | nv_txrx_reset(dev); | |
4960 | ||
4961 | /* disable interrupts on the nic or we will lock up */ | |
4962 | base = get_hwbase(dev); | |
84b3932b | 4963 | nv_disable_hw_interrupts(dev, np->irqmask); |
1da177e4 LT |
4964 | pci_push(base); |
4965 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | |
4966 | ||
4967 | spin_unlock_irq(&np->lock); | |
4968 | ||
84b3932b | 4969 | nv_free_irq(dev); |
1da177e4 LT |
4970 | |
4971 | drain_ring(dev); | |
4972 | ||
2cc49a5c TM |
4973 | if (np->wolenabled) { |
4974 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); | |
1da177e4 | 4975 | nv_start_rx(dev); |
2cc49a5c | 4976 | } |
1da177e4 LT |
4977 | |
4978 | /* FIXME: power down nic */ | |
4979 | ||
4980 | return 0; | |
4981 | } | |
4982 | ||
4983 | static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |
4984 | { | |
4985 | struct net_device *dev; | |
4986 | struct fe_priv *np; | |
4987 | unsigned long addr; | |
4988 | u8 __iomem *base; | |
4989 | int err, i; | |
5070d340 | 4990 | u32 powerstate, txreg; |
7e680c22 AA |
4991 | u32 phystate_orig = 0, phystate; |
4992 | int phyinitialized = 0; | |
0795af57 | 4993 | DECLARE_MAC_BUF(mac); |
1da177e4 LT |
4994 | |
4995 | dev = alloc_etherdev(sizeof(struct fe_priv)); | |
4996 | err = -ENOMEM; | |
4997 | if (!dev) | |
4998 | goto out; | |
4999 | ||
ac9c1897 | 5000 | np = netdev_priv(dev); |
bea3348e | 5001 | np->dev = dev; |
1da177e4 LT |
5002 | np->pci_dev = pci_dev; |
5003 | spin_lock_init(&np->lock); | |
1da177e4 LT |
5004 | SET_NETDEV_DEV(dev, &pci_dev->dev); |
5005 | ||
5006 | init_timer(&np->oom_kick); | |
5007 | np->oom_kick.data = (unsigned long) dev; | |
5008 | np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ | |
5009 | init_timer(&np->nic_poll); | |
5010 | np->nic_poll.data = (unsigned long) dev; | |
5011 | np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ | |
52da3578 AA |
5012 | init_timer(&np->stats_poll); |
5013 | np->stats_poll.data = (unsigned long) dev; | |
5014 | np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ | |
1da177e4 LT |
5015 | |
5016 | err = pci_enable_device(pci_dev); | |
5017 | if (err) { | |
5018 | printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", | |
5019 | err, pci_name(pci_dev)); | |
5020 | goto out_free; | |
5021 | } | |
5022 | ||
5023 | pci_set_master(pci_dev); | |
5024 | ||
5025 | err = pci_request_regions(pci_dev, DRV_NAME); | |
5026 | if (err < 0) | |
5027 | goto out_disable; | |
5028 | ||
57fff698 AA |
5029 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) |
5030 | np->register_size = NV_PCI_REGSZ_VER3; | |
5031 | else if (id->driver_data & DEV_HAS_STATISTICS_V1) | |
86a0f043 AA |
5032 | np->register_size = NV_PCI_REGSZ_VER2; |
5033 | else | |
5034 | np->register_size = NV_PCI_REGSZ_VER1; | |
5035 | ||
1da177e4 LT |
5036 | err = -EINVAL; |
5037 | addr = 0; | |
5038 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
5039 | dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", | |
5040 | pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), | |
5041 | pci_resource_len(pci_dev, i), | |
5042 | pci_resource_flags(pci_dev, i)); | |
5043 | if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && | |
86a0f043 | 5044 | pci_resource_len(pci_dev, i) >= np->register_size) { |
1da177e4 LT |
5045 | addr = pci_resource_start(pci_dev, i); |
5046 | break; | |
5047 | } | |
5048 | } | |
5049 | if (i == DEVICE_COUNT_RESOURCE) { | |
5050 | printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", | |
5051 | pci_name(pci_dev)); | |
5052 | goto out_relreg; | |
5053 | } | |
5054 | ||
86a0f043 AA |
5055 | /* copy of driver data */ |
5056 | np->driver_data = id->driver_data; | |
5057 | ||
1da177e4 | 5058 | /* handle different descriptor versions */ |
ee73362c MS |
5059 | if (id->driver_data & DEV_HAS_HIGH_DMA) { |
5060 | /* packet format 3: supports 40-bit addressing */ | |
5061 | np->desc_ver = DESC_VER_3; | |
84b3932b | 5062 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; |
69fe3fd7 AA |
5063 | if (dma_64bit) { |
5064 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { | |
5065 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | |
5066 | pci_name(pci_dev)); | |
5067 | } else { | |
5068 | dev->features |= NETIF_F_HIGHDMA; | |
5069 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | |
5070 | } | |
5071 | if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { | |
5072 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n", | |
5073 | pci_name(pci_dev)); | |
5074 | } | |
ee73362c MS |
5075 | } |
5076 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | |
5077 | /* packet format 2: supports jumbo frames */ | |
1da177e4 | 5078 | np->desc_ver = DESC_VER_2; |
8a4ae7f2 | 5079 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; |
ee73362c MS |
5080 | } else { |
5081 | /* original packet format */ | |
5082 | np->desc_ver = DESC_VER_1; | |
8a4ae7f2 | 5083 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; |
d81c0983 | 5084 | } |
ee73362c MS |
5085 | |
5086 | np->pkt_limit = NV_PKTLIMIT_1; | |
5087 | if (id->driver_data & DEV_HAS_LARGEDESC) | |
5088 | np->pkt_limit = NV_PKTLIMIT_2; | |
5089 | ||
8a4ae7f2 | 5090 | if (id->driver_data & DEV_HAS_CHECKSUM) { |
f2ad2d9b | 5091 | np->rx_csum = 1; |
8a4ae7f2 | 5092 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
ac9c1897 | 5093 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
fa45459e | 5094 | dev->features |= NETIF_F_TSO; |
21828163 | 5095 | } |
8a4ae7f2 | 5096 | |
ee407b02 AA |
5097 | np->vlanctl_bits = 0; |
5098 | if (id->driver_data & DEV_HAS_VLAN) { | |
5099 | np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; | |
5100 | dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; | |
5101 | dev->vlan_rx_register = nv_vlan_rx_register; | |
ee407b02 AA |
5102 | } |
5103 | ||
d33a73c8 | 5104 | np->msi_flags = 0; |
69fe3fd7 | 5105 | if ((id->driver_data & DEV_HAS_MSI) && msi) { |
d33a73c8 AA |
5106 | np->msi_flags |= NV_MSI_CAPABLE; |
5107 | } | |
69fe3fd7 | 5108 | if ((id->driver_data & DEV_HAS_MSI_X) && msix) { |
d33a73c8 AA |
5109 | np->msi_flags |= NV_MSI_X_CAPABLE; |
5110 | } | |
5111 | ||
b6d0773f | 5112 | np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; |
eb91f61b | 5113 | if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { |
b6d0773f | 5114 | np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; |
eb91f61b | 5115 | } |
f3b197ac | 5116 | |
eb91f61b | 5117 | |
1da177e4 | 5118 | err = -ENOMEM; |
86a0f043 | 5119 | np->base = ioremap(addr, np->register_size); |
1da177e4 LT |
5120 | if (!np->base) |
5121 | goto out_relreg; | |
5122 | dev->base_addr = (unsigned long)np->base; | |
ee73362c | 5123 | |
1da177e4 | 5124 | dev->irq = pci_dev->irq; |
ee73362c | 5125 | |
eafa59f6 AA |
5126 | np->rx_ring_size = RX_RING_DEFAULT; |
5127 | np->tx_ring_size = TX_RING_DEFAULT; | |
eafa59f6 | 5128 | |
ee73362c MS |
5129 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
5130 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, | |
eafa59f6 | 5131 | sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
ee73362c MS |
5132 | &np->ring_addr); |
5133 | if (!np->rx_ring.orig) | |
5134 | goto out_unmap; | |
eafa59f6 | 5135 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; |
ee73362c MS |
5136 | } else { |
5137 | np->rx_ring.ex = pci_alloc_consistent(pci_dev, | |
eafa59f6 | 5138 | sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), |
ee73362c MS |
5139 | &np->ring_addr); |
5140 | if (!np->rx_ring.ex) | |
5141 | goto out_unmap; | |
eafa59f6 AA |
5142 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
5143 | } | |
dd00cc48 YP |
5144 | np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); |
5145 | np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); | |
761fcd9e | 5146 | if (!np->rx_skb || !np->tx_skb) |
eafa59f6 | 5147 | goto out_freering; |
1da177e4 LT |
5148 | |
5149 | dev->open = nv_open; | |
5150 | dev->stop = nv_close; | |
86b22b0d AA |
5151 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
5152 | dev->hard_start_xmit = nv_start_xmit; | |
5153 | else | |
5154 | dev->hard_start_xmit = nv_start_xmit_optimized; | |
1da177e4 LT |
5155 | dev->get_stats = nv_get_stats; |
5156 | dev->change_mtu = nv_change_mtu; | |
72b31782 | 5157 | dev->set_mac_address = nv_set_mac_address; |
1da177e4 | 5158 | dev->set_multicast_list = nv_set_multicast; |
2918c35d MS |
5159 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5160 | dev->poll_controller = nv_poll_controller; | |
e27cdba5 | 5161 | #endif |
e27cdba5 | 5162 | #ifdef CONFIG_FORCEDETH_NAPI |
bea3348e | 5163 | netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); |
2918c35d | 5164 | #endif |
1da177e4 LT |
5165 | SET_ETHTOOL_OPS(dev, &ops); |
5166 | dev->tx_timeout = nv_tx_timeout; | |
5167 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; | |
5168 | ||
5169 | pci_set_drvdata(pci_dev, dev); | |
5170 | ||
5171 | /* read the mac address */ | |
5172 | base = get_hwbase(dev); | |
5173 | np->orig_mac[0] = readl(base + NvRegMacAddrA); | |
5174 | np->orig_mac[1] = readl(base + NvRegMacAddrB); | |
5175 | ||
5070d340 AA |
5176 | /* check the workaround bit for correct mac address order */ |
5177 | txreg = readl(base + NvRegTransmitPoll); | |
ef756b3e AA |
5178 | if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) || |
5179 | (id->driver_data & DEV_HAS_CORRECT_MACADDR)) { | |
5070d340 AA |
5180 | /* mac address is already in correct order */ |
5181 | dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; | |
5182 | dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; | |
5183 | dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; | |
5184 | dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; | |
5185 | dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; | |
5186 | dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; | |
5187 | } else { | |
5188 | /* need to reverse mac address to correct order */ | |
5189 | dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; | |
5190 | dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; | |
5191 | dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; | |
5192 | dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; | |
5193 | dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; | |
5194 | dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; | |
5195 | /* set permanent address to be correct aswell */ | |
5196 | np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + | |
5197 | (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); | |
5198 | np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); | |
5199 | writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); | |
5200 | } | |
c704b856 | 5201 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
1da177e4 | 5202 | |
c704b856 | 5203 | if (!is_valid_ether_addr(dev->perm_addr)) { |
1da177e4 LT |
5204 | /* |
5205 | * Bad mac address. At least one bios sets the mac address | |
5206 | * to 01:23:45:67:89:ab | |
5207 | */ | |
0795af57 JP |
5208 | printk(KERN_ERR "%s: Invalid Mac address detected: %s\n", |
5209 | pci_name(pci_dev), print_mac(mac, dev->dev_addr)); | |
1da177e4 LT |
5210 | printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); |
5211 | dev->dev_addr[0] = 0x00; | |
5212 | dev->dev_addr[1] = 0x00; | |
5213 | dev->dev_addr[2] = 0x6c; | |
5214 | get_random_bytes(&dev->dev_addr[3], 3); | |
5215 | } | |
5216 | ||
0795af57 JP |
5217 | dprintk(KERN_DEBUG "%s: MAC Address %s\n", |
5218 | pci_name(pci_dev), print_mac(mac, dev->dev_addr)); | |
1da177e4 | 5219 | |
f1489653 AA |
5220 | /* set mac address */ |
5221 | nv_copy_mac_to_hw(dev); | |
5222 | ||
1da177e4 LT |
5223 | /* disable WOL */ |
5224 | writel(0, base + NvRegWakeUpFlags); | |
5225 | np->wolenabled = 0; | |
5226 | ||
86a0f043 | 5227 | if (id->driver_data & DEV_HAS_POWER_CNTRL) { |
86a0f043 AA |
5228 | |
5229 | /* take phy and nic out of low power mode */ | |
5230 | powerstate = readl(base + NvRegPowerState2); | |
5231 | powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; | |
5232 | if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || | |
5233 | id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && | |
44c10138 | 5234 | pci_dev->revision >= 0xA3) |
86a0f043 AA |
5235 | powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; |
5236 | writel(powerstate, base + NvRegPowerState2); | |
5237 | } | |
5238 | ||
1da177e4 | 5239 | if (np->desc_ver == DESC_VER_1) { |
ac9c1897 | 5240 | np->tx_flags = NV_TX_VALID; |
1da177e4 | 5241 | } else { |
ac9c1897 | 5242 | np->tx_flags = NV_TX2_VALID; |
1da177e4 | 5243 | } |
d33a73c8 | 5244 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { |
a971c324 | 5245 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
d33a73c8 AA |
5246 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
5247 | np->msi_flags |= 0x0003; | |
5248 | } else { | |
a971c324 | 5249 | np->irqmask = NVREG_IRQMASK_CPU; |
d33a73c8 AA |
5250 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
5251 | np->msi_flags |= 0x0001; | |
5252 | } | |
a971c324 | 5253 | |
1da177e4 LT |
5254 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
5255 | np->irqmask |= NVREG_IRQ_TIMER; | |
5256 | if (id->driver_data & DEV_NEED_LINKTIMER) { | |
5257 | dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); | |
5258 | np->need_linktimer = 1; | |
5259 | np->link_timeout = jiffies + LINK_TIMEOUT; | |
5260 | } else { | |
5261 | dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); | |
5262 | np->need_linktimer = 0; | |
5263 | } | |
5264 | ||
7e680c22 AA |
5265 | /* clear phy state and temporarily halt phy interrupts */ |
5266 | writel(0, base + NvRegMIIMask); | |
5267 | phystate = readl(base + NvRegAdapterControl); | |
5268 | if (phystate & NVREG_ADAPTCTL_RUNNING) { | |
5269 | phystate_orig = 1; | |
5270 | phystate &= ~NVREG_ADAPTCTL_RUNNING; | |
5271 | writel(phystate, base + NvRegAdapterControl); | |
5272 | } | |
5273 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
5274 | ||
5275 | if (id->driver_data & DEV_HAS_MGMT_UNIT) { | |
7e680c22 | 5276 | /* management unit running on the mac? */ |
f35723ec AA |
5277 | if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { |
5278 | np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; | |
5279 | dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); | |
5280 | for (i = 0; i < 5000; i++) { | |
5281 | msleep(1); | |
5282 | if (nv_mgmt_acquire_sema(dev)) { | |
5283 | /* management unit setup the phy already? */ | |
5284 | if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == | |
5285 | NVREG_XMITCTL_SYNC_PHY_INIT) { | |
5286 | /* phy is inited by mgmt unit */ | |
5287 | phyinitialized = 1; | |
5288 | dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); | |
5289 | } else { | |
5290 | /* we need to init the phy */ | |
7e680c22 | 5291 | } |
f35723ec | 5292 | break; |
7e680c22 | 5293 | } |
7e680c22 AA |
5294 | } |
5295 | } | |
5296 | } | |
5297 | ||
1da177e4 | 5298 | /* find a suitable phy */ |
7a33e45a | 5299 | for (i = 1; i <= 32; i++) { |
1da177e4 | 5300 | int id1, id2; |
7a33e45a | 5301 | int phyaddr = i & 0x1F; |
1da177e4 LT |
5302 | |
5303 | spin_lock_irq(&np->lock); | |
7a33e45a | 5304 | id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); |
1da177e4 LT |
5305 | spin_unlock_irq(&np->lock); |
5306 | if (id1 < 0 || id1 == 0xffff) | |
5307 | continue; | |
5308 | spin_lock_irq(&np->lock); | |
7a33e45a | 5309 | id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); |
1da177e4 LT |
5310 | spin_unlock_irq(&np->lock); |
5311 | if (id2 < 0 || id2 == 0xffff) | |
5312 | continue; | |
5313 | ||
edf7e5ec | 5314 | np->phy_model = id2 & PHYID2_MODEL_MASK; |
1da177e4 LT |
5315 | id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; |
5316 | id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; | |
5317 | dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", | |
7a33e45a AA |
5318 | pci_name(pci_dev), id1, id2, phyaddr); |
5319 | np->phyaddr = phyaddr; | |
1da177e4 LT |
5320 | np->phy_oui = id1 | id2; |
5321 | break; | |
5322 | } | |
7a33e45a | 5323 | if (i == 33) { |
1da177e4 | 5324 | printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", |
7a33e45a | 5325 | pci_name(pci_dev)); |
eafa59f6 | 5326 | goto out_error; |
1da177e4 | 5327 | } |
f3b197ac | 5328 | |
7e680c22 AA |
5329 | if (!phyinitialized) { |
5330 | /* reset it */ | |
5331 | phy_init(dev); | |
f35723ec AA |
5332 | } else { |
5333 | /* see if it is a gigabit phy */ | |
5334 | u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
5335 | if (mii_status & PHY_GIGABIT) { | |
5336 | np->gigabit = PHY_GIGABIT; | |
5337 | } | |
7e680c22 | 5338 | } |
1da177e4 LT |
5339 | |
5340 | /* set default link speed settings */ | |
5341 | np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
5342 | np->duplex = 0; | |
5343 | np->autoneg = 1; | |
5344 | ||
5345 | err = register_netdev(dev); | |
5346 | if (err) { | |
5347 | printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); | |
eafa59f6 | 5348 | goto out_error; |
1da177e4 LT |
5349 | } |
5350 | printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", | |
5351 | dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, | |
5352 | pci_name(pci_dev)); | |
5353 | ||
5354 | return 0; | |
5355 | ||
eafa59f6 | 5356 | out_error: |
7e680c22 AA |
5357 | if (phystate_orig) |
5358 | writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); | |
1da177e4 | 5359 | pci_set_drvdata(pci_dev, NULL); |
eafa59f6 AA |
5360 | out_freering: |
5361 | free_rings(dev); | |
1da177e4 LT |
5362 | out_unmap: |
5363 | iounmap(get_hwbase(dev)); | |
5364 | out_relreg: | |
5365 | pci_release_regions(pci_dev); | |
5366 | out_disable: | |
5367 | pci_disable_device(pci_dev); | |
5368 | out_free: | |
5369 | free_netdev(dev); | |
5370 | out: | |
5371 | return err; | |
5372 | } | |
5373 | ||
5374 | static void __devexit nv_remove(struct pci_dev *pci_dev) | |
5375 | { | |
5376 | struct net_device *dev = pci_get_drvdata(pci_dev); | |
f1489653 AA |
5377 | struct fe_priv *np = netdev_priv(dev); |
5378 | u8 __iomem *base = get_hwbase(dev); | |
1da177e4 LT |
5379 | |
5380 | unregister_netdev(dev); | |
5381 | ||
f1489653 AA |
5382 | /* special op: write back the misordered MAC address - otherwise |
5383 | * the next nv_probe would see a wrong address. | |
5384 | */ | |
5385 | writel(np->orig_mac[0], base + NvRegMacAddrA); | |
5386 | writel(np->orig_mac[1], base + NvRegMacAddrB); | |
5387 | ||
1da177e4 | 5388 | /* free all structures */ |
eafa59f6 | 5389 | free_rings(dev); |
1da177e4 LT |
5390 | iounmap(get_hwbase(dev)); |
5391 | pci_release_regions(pci_dev); | |
5392 | pci_disable_device(pci_dev); | |
5393 | free_netdev(dev); | |
5394 | pci_set_drvdata(pci_dev, NULL); | |
5395 | } | |
5396 | ||
a189317f FR |
5397 | #ifdef CONFIG_PM |
5398 | static int nv_suspend(struct pci_dev *pdev, pm_message_t state) | |
5399 | { | |
5400 | struct net_device *dev = pci_get_drvdata(pdev); | |
5401 | struct fe_priv *np = netdev_priv(dev); | |
5402 | ||
5403 | if (!netif_running(dev)) | |
5404 | goto out; | |
5405 | ||
5406 | netif_device_detach(dev); | |
5407 | ||
5408 | // Gross. | |
5409 | nv_close(dev); | |
5410 | ||
5411 | pci_save_state(pdev); | |
5412 | pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); | |
5413 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
5414 | out: | |
5415 | return 0; | |
5416 | } | |
5417 | ||
5418 | static int nv_resume(struct pci_dev *pdev) | |
5419 | { | |
5420 | struct net_device *dev = pci_get_drvdata(pdev); | |
5421 | int rc = 0; | |
5422 | ||
5423 | if (!netif_running(dev)) | |
5424 | goto out; | |
5425 | ||
5426 | netif_device_attach(dev); | |
5427 | ||
5428 | pci_set_power_state(pdev, PCI_D0); | |
5429 | pci_restore_state(pdev); | |
5430 | pci_enable_wake(pdev, PCI_D0, 0); | |
5431 | ||
5432 | rc = nv_open(dev); | |
5433 | out: | |
5434 | return rc; | |
5435 | } | |
5436 | #else | |
5437 | #define nv_suspend NULL | |
5438 | #define nv_resume NULL | |
5439 | #endif /* CONFIG_PM */ | |
5440 | ||
1da177e4 LT |
5441 | static struct pci_device_id pci_tbl[] = { |
5442 | { /* nForce Ethernet Controller */ | |
dc8216c1 | 5443 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), |
c2dba06d | 5444 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
1da177e4 LT |
5445 | }, |
5446 | { /* nForce2 Ethernet Controller */ | |
dc8216c1 | 5447 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), |
c2dba06d | 5448 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
1da177e4 LT |
5449 | }, |
5450 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 5451 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), |
c2dba06d | 5452 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
1da177e4 LT |
5453 | }, |
5454 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 5455 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), |
8a4ae7f2 | 5456 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
5457 | }, |
5458 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 5459 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), |
8a4ae7f2 | 5460 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
5461 | }, |
5462 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 5463 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), |
8a4ae7f2 | 5464 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
5465 | }, |
5466 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 5467 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), |
8a4ae7f2 | 5468 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
5469 | }, |
5470 | { /* CK804 Ethernet Controller */ | |
dc8216c1 | 5471 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), |
57fff698 | 5472 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, |
1da177e4 LT |
5473 | }, |
5474 | { /* CK804 Ethernet Controller */ | |
dc8216c1 | 5475 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), |
57fff698 | 5476 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, |
1da177e4 LT |
5477 | }, |
5478 | { /* MCP04 Ethernet Controller */ | |
dc8216c1 | 5479 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), |
57fff698 | 5480 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, |
1da177e4 LT |
5481 | }, |
5482 | { /* MCP04 Ethernet Controller */ | |
dc8216c1 | 5483 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), |
57fff698 | 5484 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1, |
1da177e4 | 5485 | }, |
9992d4aa | 5486 | { /* MCP51 Ethernet Controller */ |
dc8216c1 | 5487 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), |
57fff698 | 5488 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, |
9992d4aa MS |
5489 | }, |
5490 | { /* MCP51 Ethernet Controller */ | |
dc8216c1 | 5491 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), |
57fff698 | 5492 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1, |
9992d4aa | 5493 | }, |
f49d16ef | 5494 | { /* MCP55 Ethernet Controller */ |
dc8216c1 | 5495 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
57fff698 | 5496 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
f49d16ef MS |
5497 | }, |
5498 | { /* MCP55 Ethernet Controller */ | |
dc8216c1 | 5499 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
57fff698 | 5500 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
f49d16ef | 5501 | }, |
c99ce7ee AA |
5502 | { /* MCP61 Ethernet Controller */ |
5503 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), | |
ef756b3e | 5504 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
c99ce7ee AA |
5505 | }, |
5506 | { /* MCP61 Ethernet Controller */ | |
5507 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), | |
ef756b3e | 5508 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
c99ce7ee AA |
5509 | }, |
5510 | { /* MCP61 Ethernet Controller */ | |
5511 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), | |
ef756b3e | 5512 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
c99ce7ee AA |
5513 | }, |
5514 | { /* MCP61 Ethernet Controller */ | |
5515 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), | |
ef756b3e | 5516 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
c99ce7ee AA |
5517 | }, |
5518 | { /* MCP65 Ethernet Controller */ | |
5519 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), | |
ef756b3e | 5520 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
c99ce7ee AA |
5521 | }, |
5522 | { /* MCP65 Ethernet Controller */ | |
5523 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), | |
ef756b3e | 5524 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
c99ce7ee AA |
5525 | }, |
5526 | { /* MCP65 Ethernet Controller */ | |
5527 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), | |
ef756b3e | 5528 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
c99ce7ee AA |
5529 | }, |
5530 | { /* MCP65 Ethernet Controller */ | |
5531 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), | |
ef756b3e | 5532 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
c99ce7ee | 5533 | }, |
f4344848 AA |
5534 | { /* MCP67 Ethernet Controller */ |
5535 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), | |
ef756b3e | 5536 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
f4344848 AA |
5537 | }, |
5538 | { /* MCP67 Ethernet Controller */ | |
5539 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), | |
ef756b3e | 5540 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
f4344848 AA |
5541 | }, |
5542 | { /* MCP67 Ethernet Controller */ | |
5543 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), | |
ef756b3e | 5544 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
f4344848 AA |
5545 | }, |
5546 | { /* MCP67 Ethernet Controller */ | |
5547 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), | |
ef756b3e | 5548 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
f4344848 | 5549 | }, |
1398661b AA |
5550 | { /* MCP73 Ethernet Controller */ |
5551 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), | |
ef756b3e | 5552 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
1398661b AA |
5553 | }, |
5554 | { /* MCP73 Ethernet Controller */ | |
5555 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), | |
ef756b3e | 5556 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
1398661b AA |
5557 | }, |
5558 | { /* MCP73 Ethernet Controller */ | |
5559 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), | |
ef756b3e | 5560 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
1398661b AA |
5561 | }, |
5562 | { /* MCP73 Ethernet Controller */ | |
5563 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), | |
ef756b3e | 5564 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
1398661b | 5565 | }, |
1da177e4 LT |
5566 | {0,}, |
5567 | }; | |
5568 | ||
5569 | static struct pci_driver driver = { | |
5570 | .name = "forcedeth", | |
5571 | .id_table = pci_tbl, | |
5572 | .probe = nv_probe, | |
5573 | .remove = __devexit_p(nv_remove), | |
a189317f FR |
5574 | .suspend = nv_suspend, |
5575 | .resume = nv_resume, | |
1da177e4 LT |
5576 | }; |
5577 | ||
1da177e4 LT |
5578 | static int __init init_nic(void) |
5579 | { | |
5580 | printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); | |
29917620 | 5581 | return pci_register_driver(&driver); |
1da177e4 LT |
5582 | } |
5583 | ||
5584 | static void __exit exit_nic(void) | |
5585 | { | |
5586 | pci_unregister_driver(&driver); | |
5587 | } | |
5588 | ||
5589 | module_param(max_interrupt_work, int, 0); | |
5590 | MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); | |
a971c324 AA |
5591 | module_param(optimization_mode, int, 0); |
5592 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); | |
5593 | module_param(poll_interval, int, 0); | |
5594 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); | |
69fe3fd7 AA |
5595 | module_param(msi, int, 0); |
5596 | MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); | |
5597 | module_param(msix, int, 0); | |
5598 | MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); | |
5599 | module_param(dma_64bit, int, 0); | |
5600 | MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); | |
1da177e4 LT |
5601 | |
5602 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); | |
5603 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); | |
5604 | MODULE_LICENSE("GPL"); | |
5605 | ||
5606 | MODULE_DEVICE_TABLE(pci, pci_tbl); | |
5607 | ||
5608 | module_init(init_nic); | |
5609 | module_exit(exit_nic); |