2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey. It's neither supported nor endorsed
7 * by NVIDIA Corp. Use at your own risk.
9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
10 * trademarks of NVIDIA Corporation in the United States and other
13 * Copyright (C) 2003,4,5 Manfred Spraul
14 * Copyright (C) 2004 Andrew de Quincey (wol support)
15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16 * IRQ rate fixes, bigendian fixes, cleanups, verification)
17 * Copyright (c) 2004 NVIDIA Corporation
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 * 0.01: 05 Oct 2003: First release that compiles without warnings.
35 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
36 * Check all PCI BARs for the register window.
37 * udelay added to mii_rw.
38 * 0.03: 06 Oct 2003: Initialize dev->irq.
39 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
40 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
41 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
43 * 0.07: 14 Oct 2003: Further irq mask updates.
44 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
45 * added into irq handler, NULL check for drain_ring.
46 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
47 * requested interrupt sources.
48 * 0.10: 20 Oct 2003: First cleanup for release.
49 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
50 * MAC Address init fix, set_multicast cleanup.
51 * 0.12: 23 Oct 2003: Cleanups for release.
52 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
53 * Set link speed correctly. start rx before starting
54 * tx (nv_start_rx sets the link speed).
55 * 0.14: 25 Oct 2003: Nic dependant irq mask.
56 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
58 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
59 * increased to 1628 bytes.
60 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
62 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
63 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
64 * addresses, really stop rx if already running
65 * in nv_start_rx, clean up a bit.
66 * 0.20: 07 Dec 2003: alloc fixes
67 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
68 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
70 * 0.23: 26 Jan 2004: various small cleanups
71 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
72 * 0.25: 09 Mar 2004: wol support
73 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
74 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
75 * added CK804/MCP04 device IDs, code fixes
76 * for registers, link status and other minor fixes.
77 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
78 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
79 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
80 * into nv_close, otherwise reenabling for wol can
81 * cause DMA to kfree'd memory.
82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
85 * 0.33: 16 May 2005: Support for MCP51 added.
86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
87 * 0.35: 26 Jun 2005: Support for MCP55 added.
88 * 0.36: 28 Jun 2005: Add jumbo frame support.
89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
92 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
93 * 0.40: 19 Jul 2005: Add support for mac address change.
94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
97 * in the second (and later) nv_open call
98 * 0.43: 10 Aug 2005: Add support for tx checksum.
99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
101 * 0.46: 20 Oct 2005: Add irq optimization modes.
102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
104 * 0.49: 10 Dec 2005: Fix tso for large buffers.
105 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
110 * 0.55: 22 Mar 2006: Add flow control (pause frame).
113 * We suspect that on some hardware no TX done interrupts are generated.
114 * This means recovery from netif_stop_queue only happens if the hw timer
115 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
116 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
117 * If your hardware reliably generates tx done interrupts, then you can remove
118 * DEV_NEED_TIMERIRQ from the driver_data flags.
119 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
120 * superfluous timer interrupts from the nic.
122 #define FORCEDETH_VERSION "0.55"
123 #define DRV_NAME "forcedeth"
125 #include <linux/module.h>
126 #include <linux/types.h>
127 #include <linux/pci.h>
128 #include <linux/interrupt.h>
129 #include <linux/netdevice.h>
130 #include <linux/etherdevice.h>
131 #include <linux/delay.h>
132 #include <linux/spinlock.h>
133 #include <linux/ethtool.h>
134 #include <linux/timer.h>
135 #include <linux/skbuff.h>
136 #include <linux/mii.h>
137 #include <linux/random.h>
138 #include <linux/init.h>
139 #include <linux/if_vlan.h>
140 #include <linux/dma-mapping.h>
144 #include <asm/uaccess.h>
145 #include <asm/system.h>
148 #define dprintk printk
150 #define dprintk(x...) do { } while (0)
158 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
159 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
160 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
161 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
162 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
163 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
164 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
165 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
166 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
167 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
170 NvRegIrqStatus
= 0x000,
171 #define NVREG_IRQSTAT_MIIEVENT 0x040
172 #define NVREG_IRQSTAT_MASK 0x1ff
173 NvRegIrqMask
= 0x004,
174 #define NVREG_IRQ_RX_ERROR 0x0001
175 #define NVREG_IRQ_RX 0x0002
176 #define NVREG_IRQ_RX_NOBUF 0x0004
177 #define NVREG_IRQ_TX_ERR 0x0008
178 #define NVREG_IRQ_TX_OK 0x0010
179 #define NVREG_IRQ_TIMER 0x0020
180 #define NVREG_IRQ_LINK 0x0040
181 #define NVREG_IRQ_RX_FORCED 0x0080
182 #define NVREG_IRQ_TX_FORCED 0x0100
183 #define NVREG_IRQMASK_THROUGHPUT 0x00df
184 #define NVREG_IRQMASK_CPU 0x0040
185 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
186 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
187 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
189 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
190 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
191 NVREG_IRQ_TX_FORCED))
193 NvRegUnknownSetupReg6
= 0x008,
194 #define NVREG_UNKSETUP6_VAL 3
197 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
198 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
200 NvRegPollingInterval
= 0x00c,
201 #define NVREG_POLL_DEFAULT_THROUGHPUT 970
202 #define NVREG_POLL_DEFAULT_CPU 13
203 NvRegMSIMap0
= 0x020,
204 NvRegMSIMap1
= 0x024,
205 NvRegMSIIrqMask
= 0x030,
206 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
208 #define NVREG_MISC1_PAUSE_TX 0x01
209 #define NVREG_MISC1_HD 0x02
210 #define NVREG_MISC1_FORCE 0x3b0f3c
212 NvRegMacReset
= 0x3c,
213 #define NVREG_MAC_RESET_ASSERT 0x0F3
214 NvRegTransmitterControl
= 0x084,
215 #define NVREG_XMITCTL_START 0x01
216 NvRegTransmitterStatus
= 0x088,
217 #define NVREG_XMITSTAT_BUSY 0x01
219 NvRegPacketFilterFlags
= 0x8c,
220 #define NVREG_PFF_PAUSE_RX 0x08
221 #define NVREG_PFF_ALWAYS 0x7F0000
222 #define NVREG_PFF_PROMISC 0x80
223 #define NVREG_PFF_MYADDR 0x20
225 NvRegOffloadConfig
= 0x90,
226 #define NVREG_OFFLOAD_HOMEPHY 0x601
227 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
228 NvRegReceiverControl
= 0x094,
229 #define NVREG_RCVCTL_START 0x01
230 NvRegReceiverStatus
= 0x98,
231 #define NVREG_RCVSTAT_BUSY 0x01
233 NvRegRandomSeed
= 0x9c,
234 #define NVREG_RNDSEED_MASK 0x00ff
235 #define NVREG_RNDSEED_FORCE 0x7f00
236 #define NVREG_RNDSEED_FORCE2 0x2d00
237 #define NVREG_RNDSEED_FORCE3 0x7400
239 NvRegUnknownSetupReg1
= 0xA0,
240 #define NVREG_UNKSETUP1_VAL 0x16070f
241 NvRegUnknownSetupReg2
= 0xA4,
242 #define NVREG_UNKSETUP2_VAL 0x16
243 NvRegMacAddrA
= 0xA8,
244 NvRegMacAddrB
= 0xAC,
245 NvRegMulticastAddrA
= 0xB0,
246 #define NVREG_MCASTADDRA_FORCE 0x01
247 NvRegMulticastAddrB
= 0xB4,
248 NvRegMulticastMaskA
= 0xB8,
249 NvRegMulticastMaskB
= 0xBC,
251 NvRegPhyInterface
= 0xC0,
252 #define PHY_RGMII 0x10000000
254 NvRegTxRingPhysAddr
= 0x100,
255 NvRegRxRingPhysAddr
= 0x104,
256 NvRegRingSizes
= 0x108,
257 #define NVREG_RINGSZ_TXSHIFT 0
258 #define NVREG_RINGSZ_RXSHIFT 16
259 NvRegUnknownTransmitterReg
= 0x10c,
260 NvRegLinkSpeed
= 0x110,
261 #define NVREG_LINKSPEED_FORCE 0x10000
262 #define NVREG_LINKSPEED_10 1000
263 #define NVREG_LINKSPEED_100 100
264 #define NVREG_LINKSPEED_1000 50
265 #define NVREG_LINKSPEED_MASK (0xFFF)
266 NvRegUnknownSetupReg5
= 0x130,
267 #define NVREG_UNKSETUP5_BIT31 (1<<31)
268 NvRegUnknownSetupReg3
= 0x13c,
269 #define NVREG_UNKSETUP3_VAL1 0x200010
270 NvRegTxRxControl
= 0x144,
271 #define NVREG_TXRXCTL_KICK 0x0001
272 #define NVREG_TXRXCTL_BIT1 0x0002
273 #define NVREG_TXRXCTL_BIT2 0x0004
274 #define NVREG_TXRXCTL_IDLE 0x0008
275 #define NVREG_TXRXCTL_RESET 0x0010
276 #define NVREG_TXRXCTL_RXCHECK 0x0400
277 #define NVREG_TXRXCTL_DESC_1 0
278 #define NVREG_TXRXCTL_DESC_2 0x02100
279 #define NVREG_TXRXCTL_DESC_3 0x02200
280 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
281 #define NVREG_TXRXCTL_VLANINS 0x00080
282 NvRegTxRingPhysAddrHigh
= 0x148,
283 NvRegRxRingPhysAddrHigh
= 0x14C,
284 NvRegTxPauseFrame
= 0x170,
285 #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
286 #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
287 NvRegMIIStatus
= 0x180,
288 #define NVREG_MIISTAT_ERROR 0x0001
289 #define NVREG_MIISTAT_LINKCHANGE 0x0008
290 #define NVREG_MIISTAT_MASK 0x000f
291 #define NVREG_MIISTAT_MASK2 0x000f
292 NvRegUnknownSetupReg4
= 0x184,
293 #define NVREG_UNKSETUP4_VAL 8
295 NvRegAdapterControl
= 0x188,
296 #define NVREG_ADAPTCTL_START 0x02
297 #define NVREG_ADAPTCTL_LINKUP 0x04
298 #define NVREG_ADAPTCTL_PHYVALID 0x40000
299 #define NVREG_ADAPTCTL_RUNNING 0x100000
300 #define NVREG_ADAPTCTL_PHYSHIFT 24
301 NvRegMIISpeed
= 0x18c,
302 #define NVREG_MIISPEED_BIT8 (1<<8)
303 #define NVREG_MIIDELAY 5
304 NvRegMIIControl
= 0x190,
305 #define NVREG_MIICTL_INUSE 0x08000
306 #define NVREG_MIICTL_WRITE 0x00400
307 #define NVREG_MIICTL_ADDRSHIFT 5
308 NvRegMIIData
= 0x194,
309 NvRegWakeUpFlags
= 0x200,
310 #define NVREG_WAKEUPFLAGS_VAL 0x7770
311 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
312 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
313 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
314 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
315 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
316 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
317 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
318 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
319 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
320 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
322 NvRegPatternCRC
= 0x204,
323 NvRegPatternMask
= 0x208,
324 NvRegPowerCap
= 0x268,
325 #define NVREG_POWERCAP_D3SUPP (1<<30)
326 #define NVREG_POWERCAP_D2SUPP (1<<26)
327 #define NVREG_POWERCAP_D1SUPP (1<<25)
328 NvRegPowerState
= 0x26c,
329 #define NVREG_POWERSTATE_POWEREDUP 0x8000
330 #define NVREG_POWERSTATE_VALID 0x0100
331 #define NVREG_POWERSTATE_MASK 0x0003
332 #define NVREG_POWERSTATE_D0 0x0000
333 #define NVREG_POWERSTATE_D1 0x0001
334 #define NVREG_POWERSTATE_D2 0x0002
335 #define NVREG_POWERSTATE_D3 0x0003
336 NvRegVlanControl
= 0x300,
337 #define NVREG_VLANCONTROL_ENABLE 0x2000
338 NvRegMSIXMap0
= 0x3e0,
339 NvRegMSIXMap1
= 0x3e4,
340 NvRegMSIXIrqStatus
= 0x3f0,
342 NvRegPowerState2
= 0x600,
343 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
344 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
347 /* Big endian: should work, but is untested */
353 struct ring_desc_ex
{
354 u32 PacketBufferHigh
;
360 typedef union _ring_type
{
361 struct ring_desc
* orig
;
362 struct ring_desc_ex
* ex
;
365 #define FLAG_MASK_V1 0xffff0000
366 #define FLAG_MASK_V2 0xffffc000
367 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
368 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
370 #define NV_TX_LASTPACKET (1<<16)
371 #define NV_TX_RETRYERROR (1<<19)
372 #define NV_TX_FORCED_INTERRUPT (1<<24)
373 #define NV_TX_DEFERRED (1<<26)
374 #define NV_TX_CARRIERLOST (1<<27)
375 #define NV_TX_LATECOLLISION (1<<28)
376 #define NV_TX_UNDERFLOW (1<<29)
377 #define NV_TX_ERROR (1<<30)
378 #define NV_TX_VALID (1<<31)
380 #define NV_TX2_LASTPACKET (1<<29)
381 #define NV_TX2_RETRYERROR (1<<18)
382 #define NV_TX2_FORCED_INTERRUPT (1<<30)
383 #define NV_TX2_DEFERRED (1<<25)
384 #define NV_TX2_CARRIERLOST (1<<26)
385 #define NV_TX2_LATECOLLISION (1<<27)
386 #define NV_TX2_UNDERFLOW (1<<28)
387 /* error and valid are the same for both */
388 #define NV_TX2_ERROR (1<<30)
389 #define NV_TX2_VALID (1<<31)
390 #define NV_TX2_TSO (1<<28)
391 #define NV_TX2_TSO_SHIFT 14
392 #define NV_TX2_TSO_MAX_SHIFT 14
393 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
394 #define NV_TX2_CHECKSUM_L3 (1<<27)
395 #define NV_TX2_CHECKSUM_L4 (1<<26)
397 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
399 #define NV_RX_DESCRIPTORVALID (1<<16)
400 #define NV_RX_MISSEDFRAME (1<<17)
401 #define NV_RX_SUBSTRACT1 (1<<18)
402 #define NV_RX_ERROR1 (1<<23)
403 #define NV_RX_ERROR2 (1<<24)
404 #define NV_RX_ERROR3 (1<<25)
405 #define NV_RX_ERROR4 (1<<26)
406 #define NV_RX_CRCERR (1<<27)
407 #define NV_RX_OVERFLOW (1<<28)
408 #define NV_RX_FRAMINGERR (1<<29)
409 #define NV_RX_ERROR (1<<30)
410 #define NV_RX_AVAIL (1<<31)
412 #define NV_RX2_CHECKSUMMASK (0x1C000000)
413 #define NV_RX2_CHECKSUMOK1 (0x10000000)
414 #define NV_RX2_CHECKSUMOK2 (0x14000000)
415 #define NV_RX2_CHECKSUMOK3 (0x18000000)
416 #define NV_RX2_DESCRIPTORVALID (1<<29)
417 #define NV_RX2_SUBSTRACT1 (1<<25)
418 #define NV_RX2_ERROR1 (1<<18)
419 #define NV_RX2_ERROR2 (1<<19)
420 #define NV_RX2_ERROR3 (1<<20)
421 #define NV_RX2_ERROR4 (1<<21)
422 #define NV_RX2_CRCERR (1<<22)
423 #define NV_RX2_OVERFLOW (1<<23)
424 #define NV_RX2_FRAMINGERR (1<<24)
425 /* error and avail are the same for both */
426 #define NV_RX2_ERROR (1<<30)
427 #define NV_RX2_AVAIL (1<<31)
429 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
430 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
432 /* Miscelaneous hardware related defines: */
433 #define NV_PCI_REGSZ_VER1 0x270
434 #define NV_PCI_REGSZ_VER2 0x604
436 /* various timeout delays: all in usec */
437 #define NV_TXRX_RESET_DELAY 4
438 #define NV_TXSTOP_DELAY1 10
439 #define NV_TXSTOP_DELAY1MAX 500000
440 #define NV_TXSTOP_DELAY2 100
441 #define NV_RXSTOP_DELAY1 10
442 #define NV_RXSTOP_DELAY1MAX 500000
443 #define NV_RXSTOP_DELAY2 100
444 #define NV_SETUP5_DELAY 5
445 #define NV_SETUP5_DELAYMAX 50000
446 #define NV_POWERUP_DELAY 5
447 #define NV_POWERUP_DELAYMAX 5000
448 #define NV_MIIBUSY_DELAY 50
449 #define NV_MIIPHY_DELAY 10
450 #define NV_MIIPHY_DELAYMAX 10000
451 #define NV_MAC_RESET_DELAY 64
453 #define NV_WAKEUPPATTERNS 5
454 #define NV_WAKEUPMASKENTRIES 4
456 /* General driver defaults */
457 #define NV_WATCHDOG_TIMEO (5*HZ)
462 * If your nic mysteriously hangs then try to reduce the limits
463 * to 1/0: It might be required to set NV_TX_LASTPACKET in the
464 * last valid ring entry. But this would be impossible to
465 * implement - probably a disassembly error.
467 #define TX_LIMIT_STOP 255
468 #define TX_LIMIT_START 254
470 /* rx/tx mac addr + type + vlan + align + slack*/
471 #define NV_RX_HEADERS (64)
472 /* even more slack. */
473 #define NV_RX_ALLOC_PAD (64)
475 /* maximum mtu size */
476 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
477 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
479 #define OOM_REFILL (1+HZ/20)
480 #define POLL_WAIT (1+HZ/100)
481 #define LINK_TIMEOUT (3*HZ)
485 * The nic supports three different descriptor types:
486 * - DESC_VER_1: Original
487 * - DESC_VER_2: support for jumbo frames.
488 * - DESC_VER_3: 64-bit format.
495 #define PHY_OUI_MARVELL 0x5043
496 #define PHY_OUI_CICADA 0x03f1
497 #define PHYID1_OUI_MASK 0x03ff
498 #define PHYID1_OUI_SHFT 6
499 #define PHYID2_OUI_MASK 0xfc00
500 #define PHYID2_OUI_SHFT 10
501 #define PHY_INIT1 0x0f000
502 #define PHY_INIT2 0x0e00
503 #define PHY_INIT3 0x01000
504 #define PHY_INIT4 0x0200
505 #define PHY_INIT5 0x0004
506 #define PHY_INIT6 0x02000
507 #define PHY_GIGABIT 0x0100
509 #define PHY_TIMEOUT 0x1
510 #define PHY_ERROR 0x2
514 #define PHY_HALF 0x100
516 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
517 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
518 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
519 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
521 /* MSI/MSI-X defines */
522 #define NV_MSI_X_MAX_VECTORS 8
523 #define NV_MSI_X_VECTORS_MASK 0x000f
524 #define NV_MSI_CAPABLE 0x0010
525 #define NV_MSI_X_CAPABLE 0x0020
526 #define NV_MSI_ENABLED 0x0040
527 #define NV_MSI_X_ENABLED 0x0080
529 #define NV_MSI_X_VECTOR_ALL 0x0
530 #define NV_MSI_X_VECTOR_RX 0x0
531 #define NV_MSI_X_VECTOR_TX 0x1
532 #define NV_MSI_X_VECTOR_OTHER 0x2
536 * All hardware access under dev->priv->lock, except the performance
538 * - rx is (pseudo-) lockless: it relies on the single-threading provided
539 * by the arch code for interrupts.
540 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
541 * needs dev->priv->lock :-(
542 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
545 /* in dev: base, irq */
550 * Locking: spin_lock(&np->lock); */
551 struct net_device_stats stats
;
559 unsigned int phy_oui
;
562 /* General data: RO fields */
563 dma_addr_t ring_addr
;
564 struct pci_dev
*pci_dev
;
575 /* rx specific fields.
576 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
579 unsigned int cur_rx
, refill_rx
;
580 struct sk_buff
*rx_skbuff
[RX_RING
];
581 dma_addr_t rx_dma
[RX_RING
];
582 unsigned int rx_buf_sz
;
583 unsigned int pkt_limit
;
584 struct timer_list oom_kick
;
585 struct timer_list nic_poll
;
588 /* media detection workaround.
589 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
592 unsigned long link_timeout
;
594 * tx specific fields.
597 unsigned int next_tx
, nic_tx
;
598 struct sk_buff
*tx_skbuff
[TX_RING
];
599 dma_addr_t tx_dma
[TX_RING
];
600 unsigned int tx_dma_len
[TX_RING
];
604 struct vlan_group
*vlangrp
;
606 /* msi/msi-x fields */
608 struct msix_entry msi_x_entry
[NV_MSI_X_MAX_VECTORS
];
615 * Maximum number of loops until we assume that a bit in the irq mask
616 * is stuck. Overridable with module param.
618 static int max_interrupt_work
= 5;
621 * Optimization can be either throuput mode or cpu mode
623 * Throughput Mode: Every tx and rx packet will generate an interrupt.
624 * CPU Mode: Interrupts are controlled by a timer.
626 #define NV_OPTIMIZATION_MODE_THROUGHPUT 0
627 #define NV_OPTIMIZATION_MODE_CPU 1
628 static int optimization_mode
= NV_OPTIMIZATION_MODE_THROUGHPUT
;
631 * Poll interval for timer irq
633 * This interval determines how frequent an interrupt is generated.
634 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
635 * Min = 0, and Max = 65535
637 static int poll_interval
= -1;
640 * Disable MSI interrupts
642 static int disable_msi
= 0;
645 * Disable MSIX interrupts
647 static int disable_msix
= 0;
649 static inline struct fe_priv
*get_nvpriv(struct net_device
*dev
)
651 return netdev_priv(dev
);
654 static inline u8 __iomem
*get_hwbase(struct net_device
*dev
)
656 return ((struct fe_priv
*)netdev_priv(dev
))->base
;
659 static inline void pci_push(u8 __iomem
*base
)
661 /* force out pending posted writes */
665 static inline u32
nv_descr_getlength(struct ring_desc
*prd
, u32 v
)
667 return le32_to_cpu(prd
->FlagLen
)
668 & ((v
== DESC_VER_1
) ? LEN_MASK_V1
: LEN_MASK_V2
);
671 static inline u32
nv_descr_getlength_ex(struct ring_desc_ex
*prd
, u32 v
)
673 return le32_to_cpu(prd
->FlagLen
) & LEN_MASK_V2
;
676 static int reg_delay(struct net_device
*dev
, int offset
, u32 mask
, u32 target
,
677 int delay
, int delaymax
, const char *msg
)
679 u8 __iomem
*base
= get_hwbase(dev
);
690 } while ((readl(base
+ offset
) & mask
) != target
);
694 #define NV_SETUP_RX_RING 0x01
695 #define NV_SETUP_TX_RING 0x02
697 static void setup_hw_rings(struct net_device
*dev
, int rxtx_flags
)
699 struct fe_priv
*np
= get_nvpriv(dev
);
700 u8 __iomem
*base
= get_hwbase(dev
);
702 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
703 if (rxtx_flags
& NV_SETUP_RX_RING
) {
704 writel((u32
) cpu_to_le64(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
706 if (rxtx_flags
& NV_SETUP_TX_RING
) {
707 writel((u32
) cpu_to_le64(np
->ring_addr
+ RX_RING
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
710 if (rxtx_flags
& NV_SETUP_RX_RING
) {
711 writel((u32
) cpu_to_le64(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
712 writel((u32
) (cpu_to_le64(np
->ring_addr
) >> 32), base
+ NvRegRxRingPhysAddrHigh
);
714 if (rxtx_flags
& NV_SETUP_TX_RING
) {
715 writel((u32
) cpu_to_le64(np
->ring_addr
+ RX_RING
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
716 writel((u32
) (cpu_to_le64(np
->ring_addr
+ RX_RING
*sizeof(struct ring_desc_ex
)) >> 32), base
+ NvRegTxRingPhysAddrHigh
);
721 static int using_multi_irqs(struct net_device
*dev
)
723 struct fe_priv
*np
= get_nvpriv(dev
);
725 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
) ||
726 ((np
->msi_flags
& NV_MSI_X_ENABLED
) &&
727 ((np
->msi_flags
& NV_MSI_X_VECTORS_MASK
) == 0x1)))
733 static void nv_enable_irq(struct net_device
*dev
)
735 struct fe_priv
*np
= get_nvpriv(dev
);
737 if (!using_multi_irqs(dev
)) {
738 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
739 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
741 enable_irq(dev
->irq
);
743 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
744 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
745 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
749 static void nv_disable_irq(struct net_device
*dev
)
751 struct fe_priv
*np
= get_nvpriv(dev
);
753 if (!using_multi_irqs(dev
)) {
754 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
755 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
757 disable_irq(dev
->irq
);
759 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
760 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
761 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
765 /* In MSIX mode, a write to irqmask behaves as XOR */
766 static void nv_enable_hw_interrupts(struct net_device
*dev
, u32 mask
)
768 u8 __iomem
*base
= get_hwbase(dev
);
770 writel(mask
, base
+ NvRegIrqMask
);
773 static void nv_disable_hw_interrupts(struct net_device
*dev
, u32 mask
)
775 struct fe_priv
*np
= get_nvpriv(dev
);
776 u8 __iomem
*base
= get_hwbase(dev
);
778 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
779 writel(mask
, base
+ NvRegIrqMask
);
781 if (np
->msi_flags
& NV_MSI_ENABLED
)
782 writel(0, base
+ NvRegMSIIrqMask
);
783 writel(0, base
+ NvRegIrqMask
);
787 #define MII_READ (-1)
788 /* mii_rw: read/write a register on the PHY.
790 * Caller must guarantee serialization
792 static int mii_rw(struct net_device
*dev
, int addr
, int miireg
, int value
)
794 u8 __iomem
*base
= get_hwbase(dev
);
798 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
800 reg
= readl(base
+ NvRegMIIControl
);
801 if (reg
& NVREG_MIICTL_INUSE
) {
802 writel(NVREG_MIICTL_INUSE
, base
+ NvRegMIIControl
);
803 udelay(NV_MIIBUSY_DELAY
);
806 reg
= (addr
<< NVREG_MIICTL_ADDRSHIFT
) | miireg
;
807 if (value
!= MII_READ
) {
808 writel(value
, base
+ NvRegMIIData
);
809 reg
|= NVREG_MIICTL_WRITE
;
811 writel(reg
, base
+ NvRegMIIControl
);
813 if (reg_delay(dev
, NvRegMIIControl
, NVREG_MIICTL_INUSE
, 0,
814 NV_MIIPHY_DELAY
, NV_MIIPHY_DELAYMAX
, NULL
)) {
815 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d timed out.\n",
816 dev
->name
, miireg
, addr
);
818 } else if (value
!= MII_READ
) {
819 /* it was a write operation - fewer failures are detectable */
820 dprintk(KERN_DEBUG
"%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
821 dev
->name
, value
, miireg
, addr
);
823 } else if (readl(base
+ NvRegMIIStatus
) & NVREG_MIISTAT_ERROR
) {
824 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d failed.\n",
825 dev
->name
, miireg
, addr
);
828 retval
= readl(base
+ NvRegMIIData
);
829 dprintk(KERN_DEBUG
"%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
830 dev
->name
, miireg
, addr
, retval
);
836 static int phy_reset(struct net_device
*dev
)
838 struct fe_priv
*np
= netdev_priv(dev
);
840 unsigned int tries
= 0;
842 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
843 miicontrol
|= BMCR_RESET
;
844 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
)) {
851 /* must wait till reset is deasserted */
852 while (miicontrol
& BMCR_RESET
) {
854 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
855 /* FIXME: 100 tries seem excessive */
862 static int phy_init(struct net_device
*dev
)
864 struct fe_priv
*np
= get_nvpriv(dev
);
865 u8 __iomem
*base
= get_hwbase(dev
);
866 u32 phyinterface
, phy_reserved
, mii_status
, mii_control
, mii_control_1000
,reg
;
868 /* set advertise register */
869 reg
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
870 reg
|= (ADVERTISE_10HALF
|ADVERTISE_10FULL
|ADVERTISE_100HALF
|ADVERTISE_100FULL
|ADVERTISE_PAUSE_ASYM
|ADVERTISE_PAUSE_CAP
);
871 if (mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
)) {
872 printk(KERN_INFO
"%s: phy write to advertise failed.\n", pci_name(np
->pci_dev
));
876 /* get phy interface type */
877 phyinterface
= readl(base
+ NvRegPhyInterface
);
879 /* see if gigabit phy */
880 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
881 if (mii_status
& PHY_GIGABIT
) {
882 np
->gigabit
= PHY_GIGABIT
;
883 mii_control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
884 mii_control_1000
&= ~ADVERTISE_1000HALF
;
885 if (phyinterface
& PHY_RGMII
)
886 mii_control_1000
|= ADVERTISE_1000FULL
;
888 mii_control_1000
&= ~ADVERTISE_1000FULL
;
890 if (mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, mii_control_1000
)) {
891 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
899 if (phy_reset(dev
)) {
900 printk(KERN_INFO
"%s: phy reset failed\n", pci_name(np
->pci_dev
));
904 /* phy vendor specific configuration */
905 if ((np
->phy_oui
== PHY_OUI_CICADA
) && (phyinterface
& PHY_RGMII
) ) {
906 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_RESV1
, MII_READ
);
907 phy_reserved
&= ~(PHY_INIT1
| PHY_INIT2
);
908 phy_reserved
|= (PHY_INIT3
| PHY_INIT4
);
909 if (mii_rw(dev
, np
->phyaddr
, MII_RESV1
, phy_reserved
)) {
910 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
913 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
914 phy_reserved
|= PHY_INIT5
;
915 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, phy_reserved
)) {
916 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
920 if (np
->phy_oui
== PHY_OUI_CICADA
) {
921 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, MII_READ
);
922 phy_reserved
|= PHY_INIT6
;
923 if (mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, phy_reserved
)) {
924 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
928 /* some phys clear out pause advertisment on reset, set it back */
929 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
);
931 /* restart auto negotiation */
932 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
933 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
934 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
941 static void nv_start_rx(struct net_device
*dev
)
943 struct fe_priv
*np
= netdev_priv(dev
);
944 u8 __iomem
*base
= get_hwbase(dev
);
946 dprintk(KERN_DEBUG
"%s: nv_start_rx\n", dev
->name
);
947 /* Already running? Stop it. */
948 if (readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) {
949 writel(0, base
+ NvRegReceiverControl
);
952 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
954 writel(NVREG_RCVCTL_START
, base
+ NvRegReceiverControl
);
955 dprintk(KERN_DEBUG
"%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
956 dev
->name
, np
->duplex
, np
->linkspeed
);
960 static void nv_stop_rx(struct net_device
*dev
)
962 u8 __iomem
*base
= get_hwbase(dev
);
964 dprintk(KERN_DEBUG
"%s: nv_stop_rx\n", dev
->name
);
965 writel(0, base
+ NvRegReceiverControl
);
966 reg_delay(dev
, NvRegReceiverStatus
, NVREG_RCVSTAT_BUSY
, 0,
967 NV_RXSTOP_DELAY1
, NV_RXSTOP_DELAY1MAX
,
968 KERN_INFO
"nv_stop_rx: ReceiverStatus remained busy");
970 udelay(NV_RXSTOP_DELAY2
);
971 writel(0, base
+ NvRegLinkSpeed
);
974 static void nv_start_tx(struct net_device
*dev
)
976 u8 __iomem
*base
= get_hwbase(dev
);
978 dprintk(KERN_DEBUG
"%s: nv_start_tx\n", dev
->name
);
979 writel(NVREG_XMITCTL_START
, base
+ NvRegTransmitterControl
);
983 static void nv_stop_tx(struct net_device
*dev
)
985 u8 __iomem
*base
= get_hwbase(dev
);
987 dprintk(KERN_DEBUG
"%s: nv_stop_tx\n", dev
->name
);
988 writel(0, base
+ NvRegTransmitterControl
);
989 reg_delay(dev
, NvRegTransmitterStatus
, NVREG_XMITSTAT_BUSY
, 0,
990 NV_TXSTOP_DELAY1
, NV_TXSTOP_DELAY1MAX
,
991 KERN_INFO
"nv_stop_tx: TransmitterStatus remained busy");
993 udelay(NV_TXSTOP_DELAY2
);
994 writel(0, base
+ NvRegUnknownTransmitterReg
);
997 static void nv_txrx_reset(struct net_device
*dev
)
999 struct fe_priv
*np
= netdev_priv(dev
);
1000 u8 __iomem
*base
= get_hwbase(dev
);
1002 dprintk(KERN_DEBUG
"%s: nv_txrx_reset\n", dev
->name
);
1003 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1005 udelay(NV_TXRX_RESET_DELAY
);
1006 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1010 static void nv_mac_reset(struct net_device
*dev
)
1012 struct fe_priv
*np
= netdev_priv(dev
);
1013 u8 __iomem
*base
= get_hwbase(dev
);
1015 dprintk(KERN_DEBUG
"%s: nv_mac_reset\n", dev
->name
);
1016 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1018 writel(NVREG_MAC_RESET_ASSERT
, base
+ NvRegMacReset
);
1020 udelay(NV_MAC_RESET_DELAY
);
1021 writel(0, base
+ NvRegMacReset
);
1023 udelay(NV_MAC_RESET_DELAY
);
1024 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1029 * nv_get_stats: dev->get_stats function
1030 * Get latest stats value from the nic.
1031 * Called with read_lock(&dev_base_lock) held for read -
1032 * only synchronized against unregister_netdevice.
1034 static struct net_device_stats
*nv_get_stats(struct net_device
*dev
)
1036 struct fe_priv
*np
= netdev_priv(dev
);
1038 /* It seems that the nic always generates interrupts and doesn't
1039 * accumulate errors internally. Thus the current values in np->stats
1040 * are already up to date.
1046 * nv_alloc_rx: fill rx ring entries.
1047 * Return 1 if the allocations for the skbs failed and the
1048 * rx engine is without Available descriptors
1050 static int nv_alloc_rx(struct net_device
*dev
)
1052 struct fe_priv
*np
= netdev_priv(dev
);
1053 unsigned int refill_rx
= np
->refill_rx
;
1056 while (np
->cur_rx
!= refill_rx
) {
1057 struct sk_buff
*skb
;
1059 nr
= refill_rx
% RX_RING
;
1060 if (np
->rx_skbuff
[nr
] == NULL
) {
1062 skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1067 np
->rx_skbuff
[nr
] = skb
;
1069 skb
= np
->rx_skbuff
[nr
];
1071 np
->rx_dma
[nr
] = pci_map_single(np
->pci_dev
, skb
->data
,
1072 skb
->end
-skb
->data
, PCI_DMA_FROMDEVICE
);
1073 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1074 np
->rx_ring
.orig
[nr
].PacketBuffer
= cpu_to_le32(np
->rx_dma
[nr
]);
1076 np
->rx_ring
.orig
[nr
].FlagLen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX_AVAIL
);
1078 np
->rx_ring
.ex
[nr
].PacketBufferHigh
= cpu_to_le64(np
->rx_dma
[nr
]) >> 32;
1079 np
->rx_ring
.ex
[nr
].PacketBufferLow
= cpu_to_le64(np
->rx_dma
[nr
]) & 0x0FFFFFFFF;
1081 np
->rx_ring
.ex
[nr
].FlagLen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX2_AVAIL
);
1083 dprintk(KERN_DEBUG
"%s: nv_alloc_rx: Packet %d marked as Available\n",
1084 dev
->name
, refill_rx
);
1087 np
->refill_rx
= refill_rx
;
1088 if (np
->cur_rx
- refill_rx
== RX_RING
)
1093 static void nv_do_rx_refill(unsigned long data
)
1095 struct net_device
*dev
= (struct net_device
*) data
;
1096 struct fe_priv
*np
= netdev_priv(dev
);
1098 if (!using_multi_irqs(dev
)) {
1099 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1100 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1102 disable_irq(dev
->irq
);
1104 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1106 if (nv_alloc_rx(dev
)) {
1107 spin_lock_irq(&np
->lock
);
1108 if (!np
->in_shutdown
)
1109 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1110 spin_unlock_irq(&np
->lock
);
1112 if (!using_multi_irqs(dev
)) {
1113 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1114 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1116 enable_irq(dev
->irq
);
1118 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1122 static void nv_init_rx(struct net_device
*dev
)
1124 struct fe_priv
*np
= netdev_priv(dev
);
1127 np
->cur_rx
= RX_RING
;
1129 for (i
= 0; i
< RX_RING
; i
++)
1130 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1131 np
->rx_ring
.orig
[i
].FlagLen
= 0;
1133 np
->rx_ring
.ex
[i
].FlagLen
= 0;
1136 static void nv_init_tx(struct net_device
*dev
)
1138 struct fe_priv
*np
= netdev_priv(dev
);
1141 np
->next_tx
= np
->nic_tx
= 0;
1142 for (i
= 0; i
< TX_RING
; i
++) {
1143 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1144 np
->tx_ring
.orig
[i
].FlagLen
= 0;
1146 np
->tx_ring
.ex
[i
].FlagLen
= 0;
1147 np
->tx_skbuff
[i
] = NULL
;
1152 static int nv_init_ring(struct net_device
*dev
)
1156 return nv_alloc_rx(dev
);
1159 static int nv_release_txskb(struct net_device
*dev
, unsigned int skbnr
)
1161 struct fe_priv
*np
= netdev_priv(dev
);
1163 dprintk(KERN_INFO
"%s: nv_release_txskb for skbnr %d\n",
1166 if (np
->tx_dma
[skbnr
]) {
1167 pci_unmap_page(np
->pci_dev
, np
->tx_dma
[skbnr
],
1168 np
->tx_dma_len
[skbnr
],
1170 np
->tx_dma
[skbnr
] = 0;
1173 if (np
->tx_skbuff
[skbnr
]) {
1174 dev_kfree_skb_any(np
->tx_skbuff
[skbnr
]);
1175 np
->tx_skbuff
[skbnr
] = NULL
;
1182 static void nv_drain_tx(struct net_device
*dev
)
1184 struct fe_priv
*np
= netdev_priv(dev
);
1187 for (i
= 0; i
< TX_RING
; i
++) {
1188 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1189 np
->tx_ring
.orig
[i
].FlagLen
= 0;
1191 np
->tx_ring
.ex
[i
].FlagLen
= 0;
1192 if (nv_release_txskb(dev
, i
))
1193 np
->stats
.tx_dropped
++;
1197 static void nv_drain_rx(struct net_device
*dev
)
1199 struct fe_priv
*np
= netdev_priv(dev
);
1201 for (i
= 0; i
< RX_RING
; i
++) {
1202 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1203 np
->rx_ring
.orig
[i
].FlagLen
= 0;
1205 np
->rx_ring
.ex
[i
].FlagLen
= 0;
1207 if (np
->rx_skbuff
[i
]) {
1208 pci_unmap_single(np
->pci_dev
, np
->rx_dma
[i
],
1209 np
->rx_skbuff
[i
]->end
-np
->rx_skbuff
[i
]->data
,
1210 PCI_DMA_FROMDEVICE
);
1211 dev_kfree_skb(np
->rx_skbuff
[i
]);
1212 np
->rx_skbuff
[i
] = NULL
;
1217 static void drain_ring(struct net_device
*dev
)
1224 * nv_start_xmit: dev->hard_start_xmit function
1225 * Called with dev->xmit_lock held.
1227 static int nv_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1229 struct fe_priv
*np
= netdev_priv(dev
);
1231 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
1232 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
1233 unsigned int nr
= (np
->next_tx
- 1) % TX_RING
;
1234 unsigned int start_nr
= np
->next_tx
% TX_RING
;
1238 u32 size
= skb
->len
-skb
->data_len
;
1239 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1240 u32 tx_flags_vlan
= 0;
1242 /* add fragments to entries count */
1243 for (i
= 0; i
< fragments
; i
++) {
1244 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
1245 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1248 spin_lock_irq(&np
->lock
);
1250 if ((np
->next_tx
- np
->nic_tx
+ entries
- 1) > TX_LIMIT_STOP
) {
1251 spin_unlock_irq(&np
->lock
);
1252 netif_stop_queue(dev
);
1253 return NETDEV_TX_BUSY
;
1256 /* setup the header buffer */
1258 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1259 nr
= (nr
+ 1) % TX_RING
;
1261 np
->tx_dma
[nr
] = pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
1263 np
->tx_dma_len
[nr
] = bcnt
;
1265 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1266 np
->tx_ring
.orig
[nr
].PacketBuffer
= cpu_to_le32(np
->tx_dma
[nr
]);
1267 np
->tx_ring
.orig
[nr
].FlagLen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1269 np
->tx_ring
.ex
[nr
].PacketBufferHigh
= cpu_to_le64(np
->tx_dma
[nr
]) >> 32;
1270 np
->tx_ring
.ex
[nr
].PacketBufferLow
= cpu_to_le64(np
->tx_dma
[nr
]) & 0x0FFFFFFFF;
1271 np
->tx_ring
.ex
[nr
].FlagLen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1273 tx_flags
= np
->tx_flags
;
1278 /* setup the fragments */
1279 for (i
= 0; i
< fragments
; i
++) {
1280 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1281 u32 size
= frag
->size
;
1285 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1286 nr
= (nr
+ 1) % TX_RING
;
1288 np
->tx_dma
[nr
] = pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
1290 np
->tx_dma_len
[nr
] = bcnt
;
1292 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1293 np
->tx_ring
.orig
[nr
].PacketBuffer
= cpu_to_le32(np
->tx_dma
[nr
]);
1294 np
->tx_ring
.orig
[nr
].FlagLen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1296 np
->tx_ring
.ex
[nr
].PacketBufferHigh
= cpu_to_le64(np
->tx_dma
[nr
]) >> 32;
1297 np
->tx_ring
.ex
[nr
].PacketBufferLow
= cpu_to_le64(np
->tx_dma
[nr
]) & 0x0FFFFFFFF;
1298 np
->tx_ring
.ex
[nr
].FlagLen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1305 /* set last fragment flag */
1306 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1307 np
->tx_ring
.orig
[nr
].FlagLen
|= cpu_to_le32(tx_flags_extra
);
1309 np
->tx_ring
.ex
[nr
].FlagLen
|= cpu_to_le32(tx_flags_extra
);
1312 np
->tx_skbuff
[nr
] = skb
;
1315 if (skb_shinfo(skb
)->tso_size
)
1316 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->tso_size
<< NV_TX2_TSO_SHIFT
);
1319 tx_flags_extra
= (skb
->ip_summed
== CHECKSUM_HW
? (NV_TX2_CHECKSUM_L3
|NV_TX2_CHECKSUM_L4
) : 0);
1322 if (np
->vlangrp
&& vlan_tx_tag_present(skb
)) {
1323 tx_flags_vlan
= NV_TX3_VLAN_TAG_PRESENT
| vlan_tx_tag_get(skb
);
1327 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1328 np
->tx_ring
.orig
[start_nr
].FlagLen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
1330 np
->tx_ring
.ex
[start_nr
].TxVlan
= cpu_to_le32(tx_flags_vlan
);
1331 np
->tx_ring
.ex
[start_nr
].FlagLen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
1334 dprintk(KERN_DEBUG
"%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
1335 dev
->name
, np
->next_tx
, entries
, tx_flags_extra
);
1338 for (j
=0; j
<64; j
++) {
1340 dprintk("\n%03x:", j
);
1341 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
1346 np
->next_tx
+= entries
;
1348 dev
->trans_start
= jiffies
;
1349 spin_unlock_irq(&np
->lock
);
1350 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
1351 pci_push(get_hwbase(dev
));
1352 return NETDEV_TX_OK
;
1356 * nv_tx_done: check for completed packets, release the skbs.
1358 * Caller must own np->lock.
1360 static void nv_tx_done(struct net_device
*dev
)
1362 struct fe_priv
*np
= netdev_priv(dev
);
1365 struct sk_buff
*skb
;
1367 while (np
->nic_tx
!= np
->next_tx
) {
1368 i
= np
->nic_tx
% TX_RING
;
1370 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1371 Flags
= le32_to_cpu(np
->tx_ring
.orig
[i
].FlagLen
);
1373 Flags
= le32_to_cpu(np
->tx_ring
.ex
[i
].FlagLen
);
1375 dprintk(KERN_DEBUG
"%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
1376 dev
->name
, np
->nic_tx
, Flags
);
1377 if (Flags
& NV_TX_VALID
)
1379 if (np
->desc_ver
== DESC_VER_1
) {
1380 if (Flags
& NV_TX_LASTPACKET
) {
1381 skb
= np
->tx_skbuff
[i
];
1382 if (Flags
& (NV_TX_RETRYERROR
|NV_TX_CARRIERLOST
|NV_TX_LATECOLLISION
|
1383 NV_TX_UNDERFLOW
|NV_TX_ERROR
)) {
1384 if (Flags
& NV_TX_UNDERFLOW
)
1385 np
->stats
.tx_fifo_errors
++;
1386 if (Flags
& NV_TX_CARRIERLOST
)
1387 np
->stats
.tx_carrier_errors
++;
1388 np
->stats
.tx_errors
++;
1390 np
->stats
.tx_packets
++;
1391 np
->stats
.tx_bytes
+= skb
->len
;
1395 if (Flags
& NV_TX2_LASTPACKET
) {
1396 skb
= np
->tx_skbuff
[i
];
1397 if (Flags
& (NV_TX2_RETRYERROR
|NV_TX2_CARRIERLOST
|NV_TX2_LATECOLLISION
|
1398 NV_TX2_UNDERFLOW
|NV_TX2_ERROR
)) {
1399 if (Flags
& NV_TX2_UNDERFLOW
)
1400 np
->stats
.tx_fifo_errors
++;
1401 if (Flags
& NV_TX2_CARRIERLOST
)
1402 np
->stats
.tx_carrier_errors
++;
1403 np
->stats
.tx_errors
++;
1405 np
->stats
.tx_packets
++;
1406 np
->stats
.tx_bytes
+= skb
->len
;
1410 nv_release_txskb(dev
, i
);
1413 if (np
->next_tx
- np
->nic_tx
< TX_LIMIT_START
)
1414 netif_wake_queue(dev
);
1418 * nv_tx_timeout: dev->tx_timeout function
1419 * Called with dev->xmit_lock held.
1421 static void nv_tx_timeout(struct net_device
*dev
)
1423 struct fe_priv
*np
= netdev_priv(dev
);
1424 u8 __iomem
*base
= get_hwbase(dev
);
1427 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1428 status
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
1430 status
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
1432 printk(KERN_INFO
"%s: Got tx_timeout. irq: %08x\n", dev
->name
, status
);
1437 printk(KERN_INFO
"%s: Ring at %lx: next %d nic %d\n",
1438 dev
->name
, (unsigned long)np
->ring_addr
,
1439 np
->next_tx
, np
->nic_tx
);
1440 printk(KERN_INFO
"%s: Dumping tx registers\n", dev
->name
);
1441 for (i
=0;i
<=np
->register_size
;i
+= 32) {
1442 printk(KERN_INFO
"%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1444 readl(base
+ i
+ 0), readl(base
+ i
+ 4),
1445 readl(base
+ i
+ 8), readl(base
+ i
+ 12),
1446 readl(base
+ i
+ 16), readl(base
+ i
+ 20),
1447 readl(base
+ i
+ 24), readl(base
+ i
+ 28));
1449 printk(KERN_INFO
"%s: Dumping tx ring\n", dev
->name
);
1450 for (i
=0;i
<TX_RING
;i
+= 4) {
1451 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1452 printk(KERN_INFO
"%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1454 le32_to_cpu(np
->tx_ring
.orig
[i
].PacketBuffer
),
1455 le32_to_cpu(np
->tx_ring
.orig
[i
].FlagLen
),
1456 le32_to_cpu(np
->tx_ring
.orig
[i
+1].PacketBuffer
),
1457 le32_to_cpu(np
->tx_ring
.orig
[i
+1].FlagLen
),
1458 le32_to_cpu(np
->tx_ring
.orig
[i
+2].PacketBuffer
),
1459 le32_to_cpu(np
->tx_ring
.orig
[i
+2].FlagLen
),
1460 le32_to_cpu(np
->tx_ring
.orig
[i
+3].PacketBuffer
),
1461 le32_to_cpu(np
->tx_ring
.orig
[i
+3].FlagLen
));
1463 printk(KERN_INFO
"%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1465 le32_to_cpu(np
->tx_ring
.ex
[i
].PacketBufferHigh
),
1466 le32_to_cpu(np
->tx_ring
.ex
[i
].PacketBufferLow
),
1467 le32_to_cpu(np
->tx_ring
.ex
[i
].FlagLen
),
1468 le32_to_cpu(np
->tx_ring
.ex
[i
+1].PacketBufferHigh
),
1469 le32_to_cpu(np
->tx_ring
.ex
[i
+1].PacketBufferLow
),
1470 le32_to_cpu(np
->tx_ring
.ex
[i
+1].FlagLen
),
1471 le32_to_cpu(np
->tx_ring
.ex
[i
+2].PacketBufferHigh
),
1472 le32_to_cpu(np
->tx_ring
.ex
[i
+2].PacketBufferLow
),
1473 le32_to_cpu(np
->tx_ring
.ex
[i
+2].FlagLen
),
1474 le32_to_cpu(np
->tx_ring
.ex
[i
+3].PacketBufferHigh
),
1475 le32_to_cpu(np
->tx_ring
.ex
[i
+3].PacketBufferLow
),
1476 le32_to_cpu(np
->tx_ring
.ex
[i
+3].FlagLen
));
1481 spin_lock_irq(&np
->lock
);
1483 /* 1) stop tx engine */
1486 /* 2) check that the packets were not sent already: */
1489 /* 3) if there are dead entries: clear everything */
1490 if (np
->next_tx
!= np
->nic_tx
) {
1491 printk(KERN_DEBUG
"%s: tx_timeout: dead entries!\n", dev
->name
);
1493 np
->next_tx
= np
->nic_tx
= 0;
1494 setup_hw_rings(dev
, NV_SETUP_TX_RING
);
1495 netif_wake_queue(dev
);
1498 /* 4) restart tx engine */
1500 spin_unlock_irq(&np
->lock
);
1504 * Called when the nic notices a mismatch between the actual data len on the
1505 * wire and the len indicated in the 802 header
1507 static int nv_getlen(struct net_device
*dev
, void *packet
, int datalen
)
1509 int hdrlen
; /* length of the 802 header */
1510 int protolen
; /* length as stored in the proto field */
1512 /* 1) calculate len according to header */
1513 if ( ((struct vlan_ethhdr
*)packet
)->h_vlan_proto
== __constant_htons(ETH_P_8021Q
)) {
1514 protolen
= ntohs( ((struct vlan_ethhdr
*)packet
)->h_vlan_encapsulated_proto
);
1517 protolen
= ntohs( ((struct ethhdr
*)packet
)->h_proto
);
1520 dprintk(KERN_DEBUG
"%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
1521 dev
->name
, datalen
, protolen
, hdrlen
);
1522 if (protolen
> ETH_DATA_LEN
)
1523 return datalen
; /* Value in proto field not a len, no checks possible */
1526 /* consistency checks: */
1527 if (datalen
> ETH_ZLEN
) {
1528 if (datalen
>= protolen
) {
1529 /* more data on wire than in 802 header, trim of
1532 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
1533 dev
->name
, protolen
);
1536 /* less data on wire than mentioned in header.
1537 * Discard the packet.
1539 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding long packet.\n",
1544 /* short packet. Accept only if 802 values are also short */
1545 if (protolen
> ETH_ZLEN
) {
1546 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding short packet.\n",
1550 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
1551 dev
->name
, datalen
);
1556 static void nv_rx_process(struct net_device
*dev
)
1558 struct fe_priv
*np
= netdev_priv(dev
);
1563 struct sk_buff
*skb
;
1566 if (np
->cur_rx
- np
->refill_rx
>= RX_RING
)
1567 break; /* we scanned the whole ring - do not continue */
1569 i
= np
->cur_rx
% RX_RING
;
1570 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1571 Flags
= le32_to_cpu(np
->rx_ring
.orig
[i
].FlagLen
);
1572 len
= nv_descr_getlength(&np
->rx_ring
.orig
[i
], np
->desc_ver
);
1574 Flags
= le32_to_cpu(np
->rx_ring
.ex
[i
].FlagLen
);
1575 len
= nv_descr_getlength_ex(&np
->rx_ring
.ex
[i
], np
->desc_ver
);
1576 vlanflags
= le32_to_cpu(np
->rx_ring
.ex
[i
].PacketBufferLow
);
1579 dprintk(KERN_DEBUG
"%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1580 dev
->name
, np
->cur_rx
, Flags
);
1582 if (Flags
& NV_RX_AVAIL
)
1583 break; /* still owned by hardware, */
1586 * the packet is for us - immediately tear down the pci mapping.
1587 * TODO: check if a prefetch of the first cacheline improves
1590 pci_unmap_single(np
->pci_dev
, np
->rx_dma
[i
],
1591 np
->rx_skbuff
[i
]->end
-np
->rx_skbuff
[i
]->data
,
1592 PCI_DMA_FROMDEVICE
);
1596 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",Flags
);
1597 for (j
=0; j
<64; j
++) {
1599 dprintk("\n%03x:", j
);
1600 dprintk(" %02x", ((unsigned char*)np
->rx_skbuff
[i
]->data
)[j
]);
1604 /* look at what we actually got: */
1605 if (np
->desc_ver
== DESC_VER_1
) {
1606 if (!(Flags
& NV_RX_DESCRIPTORVALID
))
1609 if (Flags
& NV_RX_ERROR
) {
1610 if (Flags
& NV_RX_MISSEDFRAME
) {
1611 np
->stats
.rx_missed_errors
++;
1612 np
->stats
.rx_errors
++;
1615 if (Flags
& (NV_RX_ERROR1
|NV_RX_ERROR2
|NV_RX_ERROR3
)) {
1616 np
->stats
.rx_errors
++;
1619 if (Flags
& NV_RX_CRCERR
) {
1620 np
->stats
.rx_crc_errors
++;
1621 np
->stats
.rx_errors
++;
1624 if (Flags
& NV_RX_OVERFLOW
) {
1625 np
->stats
.rx_over_errors
++;
1626 np
->stats
.rx_errors
++;
1629 if (Flags
& NV_RX_ERROR4
) {
1630 len
= nv_getlen(dev
, np
->rx_skbuff
[i
]->data
, len
);
1632 np
->stats
.rx_errors
++;
1636 /* framing errors are soft errors. */
1637 if (Flags
& NV_RX_FRAMINGERR
) {
1638 if (Flags
& NV_RX_SUBSTRACT1
) {
1644 if (!(Flags
& NV_RX2_DESCRIPTORVALID
))
1647 if (Flags
& NV_RX2_ERROR
) {
1648 if (Flags
& (NV_RX2_ERROR1
|NV_RX2_ERROR2
|NV_RX2_ERROR3
)) {
1649 np
->stats
.rx_errors
++;
1652 if (Flags
& NV_RX2_CRCERR
) {
1653 np
->stats
.rx_crc_errors
++;
1654 np
->stats
.rx_errors
++;
1657 if (Flags
& NV_RX2_OVERFLOW
) {
1658 np
->stats
.rx_over_errors
++;
1659 np
->stats
.rx_errors
++;
1662 if (Flags
& NV_RX2_ERROR4
) {
1663 len
= nv_getlen(dev
, np
->rx_skbuff
[i
]->data
, len
);
1665 np
->stats
.rx_errors
++;
1669 /* framing errors are soft errors */
1670 if (Flags
& NV_RX2_FRAMINGERR
) {
1671 if (Flags
& NV_RX2_SUBSTRACT1
) {
1676 Flags
&= NV_RX2_CHECKSUMMASK
;
1677 if (Flags
== NV_RX2_CHECKSUMOK1
||
1678 Flags
== NV_RX2_CHECKSUMOK2
||
1679 Flags
== NV_RX2_CHECKSUMOK3
) {
1680 dprintk(KERN_DEBUG
"%s: hw checksum hit!.\n", dev
->name
);
1681 np
->rx_skbuff
[i
]->ip_summed
= CHECKSUM_UNNECESSARY
;
1683 dprintk(KERN_DEBUG
"%s: hwchecksum miss!.\n", dev
->name
);
1686 /* got a valid packet - forward it to the network core */
1687 skb
= np
->rx_skbuff
[i
];
1688 np
->rx_skbuff
[i
] = NULL
;
1691 skb
->protocol
= eth_type_trans(skb
, dev
);
1692 dprintk(KERN_DEBUG
"%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1693 dev
->name
, np
->cur_rx
, len
, skb
->protocol
);
1694 if (np
->vlangrp
&& (vlanflags
& NV_RX3_VLAN_TAG_PRESENT
)) {
1695 vlan_hwaccel_rx(skb
, np
->vlangrp
, vlanflags
& NV_RX3_VLAN_TAG_MASK
);
1699 dev
->last_rx
= jiffies
;
1700 np
->stats
.rx_packets
++;
1701 np
->stats
.rx_bytes
+= len
;
1707 static void set_bufsize(struct net_device
*dev
)
1709 struct fe_priv
*np
= netdev_priv(dev
);
1711 if (dev
->mtu
<= ETH_DATA_LEN
)
1712 np
->rx_buf_sz
= ETH_DATA_LEN
+ NV_RX_HEADERS
;
1714 np
->rx_buf_sz
= dev
->mtu
+ NV_RX_HEADERS
;
1718 * nv_change_mtu: dev->change_mtu function
1719 * Called with dev_base_lock held for read.
1721 static int nv_change_mtu(struct net_device
*dev
, int new_mtu
)
1723 struct fe_priv
*np
= netdev_priv(dev
);
1726 if (new_mtu
< 64 || new_mtu
> np
->pkt_limit
)
1732 /* return early if the buffer sizes will not change */
1733 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
1735 if (old_mtu
== new_mtu
)
1738 /* synchronized against open : rtnl_lock() held by caller */
1739 if (netif_running(dev
)) {
1740 u8 __iomem
*base
= get_hwbase(dev
);
1742 * It seems that the nic preloads valid ring entries into an
1743 * internal buffer. The procedure for flushing everything is
1744 * guessed, there is probably a simpler approach.
1745 * Changing the MTU is a rare event, it shouldn't matter.
1747 nv_disable_irq(dev
);
1748 spin_lock_bh(&dev
->xmit_lock
);
1749 spin_lock(&np
->lock
);
1754 /* drain rx queue */
1757 /* reinit driver view of the rx queue */
1760 /* alloc new rx buffers */
1762 if (nv_alloc_rx(dev
)) {
1763 if (!np
->in_shutdown
)
1764 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1766 /* reinit nic view of the rx queue */
1767 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
1768 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
1769 writel( ((RX_RING
-1) << NVREG_RINGSZ_RXSHIFT
) + ((TX_RING
-1) << NVREG_RINGSZ_TXSHIFT
),
1770 base
+ NvRegRingSizes
);
1772 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
1775 /* restart rx engine */
1778 spin_unlock(&np
->lock
);
1779 spin_unlock_bh(&dev
->xmit_lock
);
1785 static void nv_copy_mac_to_hw(struct net_device
*dev
)
1787 u8 __iomem
*base
= get_hwbase(dev
);
1790 mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
1791 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
1792 mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
1794 writel(mac
[0], base
+ NvRegMacAddrA
);
1795 writel(mac
[1], base
+ NvRegMacAddrB
);
1799 * nv_set_mac_address: dev->set_mac_address function
1800 * Called with rtnl_lock() held.
1802 static int nv_set_mac_address(struct net_device
*dev
, void *addr
)
1804 struct fe_priv
*np
= netdev_priv(dev
);
1805 struct sockaddr
*macaddr
= (struct sockaddr
*)addr
;
1807 if(!is_valid_ether_addr(macaddr
->sa_data
))
1808 return -EADDRNOTAVAIL
;
1810 /* synchronized against open : rtnl_lock() held by caller */
1811 memcpy(dev
->dev_addr
, macaddr
->sa_data
, ETH_ALEN
);
1813 if (netif_running(dev
)) {
1814 spin_lock_bh(&dev
->xmit_lock
);
1815 spin_lock_irq(&np
->lock
);
1817 /* stop rx engine */
1820 /* set mac address */
1821 nv_copy_mac_to_hw(dev
);
1823 /* restart rx engine */
1825 spin_unlock_irq(&np
->lock
);
1826 spin_unlock_bh(&dev
->xmit_lock
);
1828 nv_copy_mac_to_hw(dev
);
1834 * nv_set_multicast: dev->set_multicast function
1835 * Called with dev->xmit_lock held.
1837 static void nv_set_multicast(struct net_device
*dev
)
1839 struct fe_priv
*np
= netdev_priv(dev
);
1840 u8 __iomem
*base
= get_hwbase(dev
);
1845 memset(addr
, 0, sizeof(addr
));
1846 memset(mask
, 0, sizeof(mask
));
1848 if (dev
->flags
& IFF_PROMISC
) {
1849 printk(KERN_NOTICE
"%s: Promiscuous mode enabled.\n", dev
->name
);
1850 pff
= NVREG_PFF_PROMISC
;
1852 pff
= NVREG_PFF_MYADDR
;
1854 if (dev
->flags
& IFF_ALLMULTI
|| dev
->mc_list
) {
1858 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0xffffffff;
1859 if (dev
->flags
& IFF_ALLMULTI
) {
1860 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0;
1862 struct dev_mc_list
*walk
;
1864 walk
= dev
->mc_list
;
1865 while (walk
!= NULL
) {
1867 a
= le32_to_cpu(*(u32
*) walk
->dmi_addr
);
1868 b
= le16_to_cpu(*(u16
*) (&walk
->dmi_addr
[4]));
1876 addr
[0] = alwaysOn
[0];
1877 addr
[1] = alwaysOn
[1];
1878 mask
[0] = alwaysOn
[0] | alwaysOff
[0];
1879 mask
[1] = alwaysOn
[1] | alwaysOff
[1];
1882 addr
[0] |= NVREG_MCASTADDRA_FORCE
;
1883 pff
|= NVREG_PFF_ALWAYS
;
1884 spin_lock_irq(&np
->lock
);
1886 writel(addr
[0], base
+ NvRegMulticastAddrA
);
1887 writel(addr
[1], base
+ NvRegMulticastAddrB
);
1888 writel(mask
[0], base
+ NvRegMulticastMaskA
);
1889 writel(mask
[1], base
+ NvRegMulticastMaskB
);
1890 writel(pff
, base
+ NvRegPacketFilterFlags
);
1891 dprintk(KERN_INFO
"%s: reconfiguration for multicast lists.\n",
1894 spin_unlock_irq(&np
->lock
);
1898 * nv_update_linkspeed: Setup the MAC according to the link partner
1899 * @dev: Network device to be configured
1901 * The function queries the PHY and checks if there is a link partner.
1902 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
1903 * set to 10 MBit HD.
1905 * The function returns 0 if there is no link partner and 1 if there is
1906 * a good link partner.
1908 static int nv_update_linkspeed(struct net_device
*dev
)
1910 struct fe_priv
*np
= netdev_priv(dev
);
1911 u8 __iomem
*base
= get_hwbase(dev
);
1914 int adv_lpa
, adv_pause
, lpa_pause
;
1915 int newls
= np
->linkspeed
;
1916 int newdup
= np
->duplex
;
1919 u32 control_1000
, status_1000
, phyreg
;
1921 /* BMSR_LSTATUS is latched, read it twice:
1922 * we want the current value.
1924 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1925 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1927 if (!(mii_status
& BMSR_LSTATUS
)) {
1928 dprintk(KERN_DEBUG
"%s: no link detected by phy - falling back to 10HD.\n",
1930 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1936 if (np
->autoneg
== 0) {
1937 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
1938 dev
->name
, np
->fixed_mode
);
1939 if (np
->fixed_mode
& LPA_100FULL
) {
1940 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
1942 } else if (np
->fixed_mode
& LPA_100HALF
) {
1943 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
1945 } else if (np
->fixed_mode
& LPA_10FULL
) {
1946 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1949 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1955 /* check auto negotiation is complete */
1956 if (!(mii_status
& BMSR_ANEGCOMPLETE
)) {
1957 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
1958 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1961 dprintk(KERN_DEBUG
"%s: autoneg not completed - falling back to 10HD.\n", dev
->name
);
1966 if (np
->gigabit
== PHY_GIGABIT
) {
1967 control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
1968 status_1000
= mii_rw(dev
, np
->phyaddr
, MII_STAT1000
, MII_READ
);
1970 if ((control_1000
& ADVERTISE_1000FULL
) &&
1971 (status_1000
& LPA_1000FULL
)) {
1972 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: GBit ethernet detected.\n",
1974 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_1000
;
1980 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1981 lpa
= mii_rw(dev
, np
->phyaddr
, MII_LPA
, MII_READ
);
1982 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
1983 dev
->name
, adv
, lpa
);
1985 /* FIXME: handle parallel detection properly */
1986 adv_lpa
= lpa
& adv
;
1987 if (adv_lpa
& LPA_100FULL
) {
1988 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
1990 } else if (adv_lpa
& LPA_100HALF
) {
1991 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
1993 } else if (adv_lpa
& LPA_10FULL
) {
1994 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1996 } else if (adv_lpa
& LPA_10HALF
) {
1997 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2000 dprintk(KERN_DEBUG
"%s: bad ability %04x - falling back to 10HD.\n", dev
->name
, adv_lpa
);
2001 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2006 if (np
->duplex
== newdup
&& np
->linkspeed
== newls
)
2009 dprintk(KERN_INFO
"%s: changing link setting from %d/%d to %d/%d.\n",
2010 dev
->name
, np
->linkspeed
, np
->duplex
, newls
, newdup
);
2012 np
->duplex
= newdup
;
2013 np
->linkspeed
= newls
;
2015 if (np
->gigabit
== PHY_GIGABIT
) {
2016 phyreg
= readl(base
+ NvRegRandomSeed
);
2017 phyreg
&= ~(0x3FF00);
2018 if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
)
2019 phyreg
|= NVREG_RNDSEED_FORCE3
;
2020 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
)
2021 phyreg
|= NVREG_RNDSEED_FORCE2
;
2022 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
2023 phyreg
|= NVREG_RNDSEED_FORCE
;
2024 writel(phyreg
, base
+ NvRegRandomSeed
);
2027 phyreg
= readl(base
+ NvRegPhyInterface
);
2028 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
2029 if (np
->duplex
== 0)
2031 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
2033 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
2035 writel(phyreg
, base
+ NvRegPhyInterface
);
2037 writel(NVREG_MISC1_FORCE
| ( np
->duplex
? 0 : NVREG_MISC1_HD
),
2040 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
2043 /* setup pause frame based on advertisement and link partner */
2044 np
->pause_flags
&= ~(NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
);
2046 if (np
->duplex
!= 0) {
2047 adv_pause
= adv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
2048 lpa_pause
= lpa
& (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
2050 switch (adv_pause
) {
2051 case (ADVERTISE_PAUSE_CAP
):
2052 if (lpa_pause
& LPA_PAUSE_CAP
) {
2053 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
;
2056 case (ADVERTISE_PAUSE_ASYM
):
2057 if (lpa_pause
== (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
))
2059 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2062 case (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
):
2063 if (lpa_pause
& LPA_PAUSE_CAP
)
2065 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
;
2067 if (lpa_pause
== LPA_PAUSE_ASYM
)
2069 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2075 if (np
->pause_flags
& NV_PAUSEFRAME_RX_CAPABLE
) {
2076 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & ~NVREG_PFF_PAUSE_RX
;
2077 if (np
->pause_flags
& NV_PAUSEFRAME_RX_ENABLE
)
2078 writel(pff
|NVREG_PFF_PAUSE_RX
, base
+ NvRegPacketFilterFlags
);
2080 writel(pff
, base
+ NvRegPacketFilterFlags
);
2082 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
) {
2083 u32 regmisc
= readl(base
+ NvRegMisc1
) & ~NVREG_MISC1_PAUSE_TX
;
2084 if (np
->pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) {
2085 writel(NVREG_TX_PAUSEFRAME_ENABLE
, base
+ NvRegTxPauseFrame
);
2086 writel(regmisc
|NVREG_MISC1_PAUSE_TX
, base
+ NvRegMisc1
);
2088 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
2089 writel(regmisc
, base
+ NvRegMisc1
);
2096 static void nv_linkchange(struct net_device
*dev
)
2098 if (nv_update_linkspeed(dev
)) {
2099 if (!netif_carrier_ok(dev
)) {
2100 netif_carrier_on(dev
);
2101 printk(KERN_INFO
"%s: link up.\n", dev
->name
);
2105 if (netif_carrier_ok(dev
)) {
2106 netif_carrier_off(dev
);
2107 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
2113 static void nv_link_irq(struct net_device
*dev
)
2115 u8 __iomem
*base
= get_hwbase(dev
);
2118 miistat
= readl(base
+ NvRegMIIStatus
);
2119 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
2120 dprintk(KERN_INFO
"%s: link change irq, status 0x%x.\n", dev
->name
, miistat
);
2122 if (miistat
& (NVREG_MIISTAT_LINKCHANGE
))
2124 dprintk(KERN_DEBUG
"%s: link change notification done.\n", dev
->name
);
2127 static irqreturn_t
nv_nic_irq(int foo
, void *data
, struct pt_regs
*regs
)
2129 struct net_device
*dev
= (struct net_device
*) data
;
2130 struct fe_priv
*np
= netdev_priv(dev
);
2131 u8 __iomem
*base
= get_hwbase(dev
);
2135 dprintk(KERN_DEBUG
"%s: nv_nic_irq\n", dev
->name
);
2138 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
2139 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
2140 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
2142 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
2143 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
2146 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
2147 if (!(events
& np
->irqmask
))
2150 spin_lock(&np
->lock
);
2152 spin_unlock(&np
->lock
);
2155 if (nv_alloc_rx(dev
)) {
2156 spin_lock(&np
->lock
);
2157 if (!np
->in_shutdown
)
2158 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
2159 spin_unlock(&np
->lock
);
2162 if (events
& NVREG_IRQ_LINK
) {
2163 spin_lock(&np
->lock
);
2165 spin_unlock(&np
->lock
);
2167 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
2168 spin_lock(&np
->lock
);
2170 spin_unlock(&np
->lock
);
2171 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
2173 if (events
& (NVREG_IRQ_TX_ERR
)) {
2174 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
2177 if (events
& (NVREG_IRQ_UNKNOWN
)) {
2178 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
2181 if (i
> max_interrupt_work
) {
2182 spin_lock(&np
->lock
);
2183 /* disable interrupts on the nic */
2184 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
2185 writel(0, base
+ NvRegIrqMask
);
2187 writel(np
->irqmask
, base
+ NvRegIrqMask
);
2190 if (!np
->in_shutdown
) {
2191 np
->nic_poll_irq
= np
->irqmask
;
2192 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
2194 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
2195 spin_unlock(&np
->lock
);
2200 dprintk(KERN_DEBUG
"%s: nv_nic_irq completed\n", dev
->name
);
2202 return IRQ_RETVAL(i
);
2205 static irqreturn_t
nv_nic_irq_tx(int foo
, void *data
, struct pt_regs
*regs
)
2207 struct net_device
*dev
= (struct net_device
*) data
;
2208 struct fe_priv
*np
= netdev_priv(dev
);
2209 u8 __iomem
*base
= get_hwbase(dev
);
2213 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx\n", dev
->name
);
2216 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_TX_ALL
;
2217 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegMSIXIrqStatus
);
2219 dprintk(KERN_DEBUG
"%s: tx irq: %08x\n", dev
->name
, events
);
2220 if (!(events
& np
->irqmask
))
2223 spin_lock_irq(&np
->lock
);
2225 spin_unlock_irq(&np
->lock
);
2227 if (events
& (NVREG_IRQ_TX_ERR
)) {
2228 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
2231 if (i
> max_interrupt_work
) {
2232 spin_lock_irq(&np
->lock
);
2233 /* disable interrupts on the nic */
2234 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegIrqMask
);
2237 if (!np
->in_shutdown
) {
2238 np
->nic_poll_irq
|= NVREG_IRQ_TX_ALL
;
2239 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
2241 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev
->name
, i
);
2242 spin_unlock_irq(&np
->lock
);
2247 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx completed\n", dev
->name
);
2249 return IRQ_RETVAL(i
);
2252 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
, struct pt_regs
*regs
)
2254 struct net_device
*dev
= (struct net_device
*) data
;
2255 struct fe_priv
*np
= netdev_priv(dev
);
2256 u8 __iomem
*base
= get_hwbase(dev
);
2260 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx\n", dev
->name
);
2263 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
2264 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
2266 dprintk(KERN_DEBUG
"%s: rx irq: %08x\n", dev
->name
, events
);
2267 if (!(events
& np
->irqmask
))
2271 if (nv_alloc_rx(dev
)) {
2272 spin_lock_irq(&np
->lock
);
2273 if (!np
->in_shutdown
)
2274 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
2275 spin_unlock_irq(&np
->lock
);
2278 if (i
> max_interrupt_work
) {
2279 spin_lock_irq(&np
->lock
);
2280 /* disable interrupts on the nic */
2281 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
2284 if (!np
->in_shutdown
) {
2285 np
->nic_poll_irq
|= NVREG_IRQ_RX_ALL
;
2286 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
2288 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev
->name
, i
);
2289 spin_unlock_irq(&np
->lock
);
2294 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx completed\n", dev
->name
);
2296 return IRQ_RETVAL(i
);
2299 static irqreturn_t
nv_nic_irq_other(int foo
, void *data
, struct pt_regs
*regs
)
2301 struct net_device
*dev
= (struct net_device
*) data
;
2302 struct fe_priv
*np
= netdev_priv(dev
);
2303 u8 __iomem
*base
= get_hwbase(dev
);
2307 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other\n", dev
->name
);
2310 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_OTHER
;
2311 writel(NVREG_IRQ_OTHER
, base
+ NvRegMSIXIrqStatus
);
2313 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
2314 if (!(events
& np
->irqmask
))
2317 if (events
& NVREG_IRQ_LINK
) {
2318 spin_lock_irq(&np
->lock
);
2320 spin_unlock_irq(&np
->lock
);
2322 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
2323 spin_lock_irq(&np
->lock
);
2325 spin_unlock_irq(&np
->lock
);
2326 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
2328 if (events
& (NVREG_IRQ_UNKNOWN
)) {
2329 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
2332 if (i
> max_interrupt_work
) {
2333 spin_lock_irq(&np
->lock
);
2334 /* disable interrupts on the nic */
2335 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
2338 if (!np
->in_shutdown
) {
2339 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
2340 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
2342 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_other.\n", dev
->name
, i
);
2343 spin_unlock_irq(&np
->lock
);
2348 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other completed\n", dev
->name
);
2350 return IRQ_RETVAL(i
);
2353 static void nv_do_nic_poll(unsigned long data
)
2355 struct net_device
*dev
= (struct net_device
*) data
;
2356 struct fe_priv
*np
= netdev_priv(dev
);
2357 u8 __iomem
*base
= get_hwbase(dev
);
2361 * First disable irq(s) and then
2362 * reenable interrupts on the nic, we have to do this before calling
2363 * nv_nic_irq because that may decide to do otherwise
2366 if (!using_multi_irqs(dev
)) {
2367 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
2368 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
2370 disable_irq(dev
->irq
);
2373 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
2374 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
2375 mask
|= NVREG_IRQ_RX_ALL
;
2377 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
2378 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
2379 mask
|= NVREG_IRQ_TX_ALL
;
2381 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
2382 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
2383 mask
|= NVREG_IRQ_OTHER
;
2386 np
->nic_poll_irq
= 0;
2388 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
2390 writel(mask
, base
+ NvRegIrqMask
);
2393 if (!using_multi_irqs(dev
)) {
2394 nv_nic_irq((int) 0, (void *) data
, (struct pt_regs
*) NULL
);
2395 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
2396 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
2398 enable_irq(dev
->irq
);
2400 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
2401 nv_nic_irq_rx((int) 0, (void *) data
, (struct pt_regs
*) NULL
);
2402 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
2404 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
2405 nv_nic_irq_tx((int) 0, (void *) data
, (struct pt_regs
*) NULL
);
2406 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
2408 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
2409 nv_nic_irq_other((int) 0, (void *) data
, (struct pt_regs
*) NULL
);
2410 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
2415 #ifdef CONFIG_NET_POLL_CONTROLLER
2416 static void nv_poll_controller(struct net_device
*dev
)
2418 nv_do_nic_poll((unsigned long) dev
);
2422 static void nv_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
2424 struct fe_priv
*np
= netdev_priv(dev
);
2425 strcpy(info
->driver
, "forcedeth");
2426 strcpy(info
->version
, FORCEDETH_VERSION
);
2427 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
2430 static void nv_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
2432 struct fe_priv
*np
= netdev_priv(dev
);
2433 wolinfo
->supported
= WAKE_MAGIC
;
2435 spin_lock_irq(&np
->lock
);
2437 wolinfo
->wolopts
= WAKE_MAGIC
;
2438 spin_unlock_irq(&np
->lock
);
2441 static int nv_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
2443 struct fe_priv
*np
= netdev_priv(dev
);
2444 u8 __iomem
*base
= get_hwbase(dev
);
2446 spin_lock_irq(&np
->lock
);
2447 if (wolinfo
->wolopts
== 0) {
2448 writel(0, base
+ NvRegWakeUpFlags
);
2451 if (wolinfo
->wolopts
& WAKE_MAGIC
) {
2452 writel(NVREG_WAKEUPFLAGS_ENABLE
, base
+ NvRegWakeUpFlags
);
2455 spin_unlock_irq(&np
->lock
);
2459 static int nv_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
2461 struct fe_priv
*np
= netdev_priv(dev
);
2464 spin_lock_irq(&np
->lock
);
2465 ecmd
->port
= PORT_MII
;
2466 if (!netif_running(dev
)) {
2467 /* We do not track link speed / duplex setting if the
2468 * interface is disabled. Force a link check */
2469 nv_update_linkspeed(dev
);
2471 switch(np
->linkspeed
& (NVREG_LINKSPEED_MASK
)) {
2472 case NVREG_LINKSPEED_10
:
2473 ecmd
->speed
= SPEED_10
;
2475 case NVREG_LINKSPEED_100
:
2476 ecmd
->speed
= SPEED_100
;
2478 case NVREG_LINKSPEED_1000
:
2479 ecmd
->speed
= SPEED_1000
;
2482 ecmd
->duplex
= DUPLEX_HALF
;
2484 ecmd
->duplex
= DUPLEX_FULL
;
2486 ecmd
->autoneg
= np
->autoneg
;
2488 ecmd
->advertising
= ADVERTISED_MII
;
2490 ecmd
->advertising
|= ADVERTISED_Autoneg
;
2491 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
2493 adv
= np
->fixed_mode
;
2495 if (adv
& ADVERTISE_10HALF
)
2496 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
2497 if (adv
& ADVERTISE_10FULL
)
2498 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
2499 if (adv
& ADVERTISE_100HALF
)
2500 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
2501 if (adv
& ADVERTISE_100FULL
)
2502 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
2503 if (np
->autoneg
&& np
->gigabit
== PHY_GIGABIT
) {
2504 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
2505 if (adv
& ADVERTISE_1000FULL
)
2506 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
2509 ecmd
->supported
= (SUPPORTED_Autoneg
|
2510 SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
2511 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
2513 if (np
->gigabit
== PHY_GIGABIT
)
2514 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
2516 ecmd
->phy_address
= np
->phyaddr
;
2517 ecmd
->transceiver
= XCVR_EXTERNAL
;
2519 /* ignore maxtxpkt, maxrxpkt for now */
2520 spin_unlock_irq(&np
->lock
);
2524 static int nv_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
2526 struct fe_priv
*np
= netdev_priv(dev
);
2528 if (ecmd
->port
!= PORT_MII
)
2530 if (ecmd
->transceiver
!= XCVR_EXTERNAL
)
2532 if (ecmd
->phy_address
!= np
->phyaddr
) {
2533 /* TODO: support switching between multiple phys. Should be
2534 * trivial, but not enabled due to lack of test hardware. */
2537 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
2540 mask
= ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
2541 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
;
2542 if (np
->gigabit
== PHY_GIGABIT
)
2543 mask
|= ADVERTISED_1000baseT_Full
;
2545 if ((ecmd
->advertising
& mask
) == 0)
2548 } else if (ecmd
->autoneg
== AUTONEG_DISABLE
) {
2549 /* Note: autonegotiation disable, speed 1000 intentionally
2550 * forbidden - noone should need that. */
2552 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
2554 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
2560 spin_lock_irq(&np
->lock
);
2561 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
2566 /* advertise only what has been requested */
2567 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
2568 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
2569 if (ecmd
->advertising
& ADVERTISED_10baseT_Half
)
2570 adv
|= ADVERTISE_10HALF
;
2571 if (ecmd
->advertising
& ADVERTISED_10baseT_Full
)
2572 adv
|= ADVERTISE_10FULL
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
2573 if (ecmd
->advertising
& ADVERTISED_100baseT_Half
)
2574 adv
|= ADVERTISE_100HALF
;
2575 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
2576 adv
|= ADVERTISE_100FULL
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
2577 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
2579 if (np
->gigabit
== PHY_GIGABIT
) {
2580 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
2581 adv
&= ~ADVERTISE_1000FULL
;
2582 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
2583 adv
|= ADVERTISE_1000FULL
;
2584 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
2587 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
2588 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
2589 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
2596 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
2597 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
2598 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
2599 adv
|= ADVERTISE_10HALF
;
2600 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
2601 adv
|= ADVERTISE_10FULL
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
2602 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
2603 adv
|= ADVERTISE_100HALF
;
2604 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
2605 adv
|= ADVERTISE_100FULL
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
2606 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
2607 np
->fixed_mode
= adv
;
2609 if (np
->gigabit
== PHY_GIGABIT
) {
2610 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
2611 adv
&= ~ADVERTISE_1000FULL
;
2612 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
2615 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
2616 bmcr
|= ~(BMCR_ANENABLE
|BMCR_SPEED100
|BMCR_FULLDPLX
);
2617 if (adv
& (ADVERTISE_10FULL
|ADVERTISE_100FULL
))
2618 bmcr
|= BMCR_FULLDPLX
;
2619 if (adv
& (ADVERTISE_100HALF
|ADVERTISE_100FULL
))
2620 bmcr
|= BMCR_SPEED100
;
2621 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
2623 if (netif_running(dev
)) {
2624 /* Wait a bit and then reconfigure the nic. */
2629 spin_unlock_irq(&np
->lock
);
2634 #define FORCEDETH_REGS_VER 1
2636 static int nv_get_regs_len(struct net_device
*dev
)
2638 struct fe_priv
*np
= netdev_priv(dev
);
2639 return np
->register_size
;
2642 static void nv_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
2644 struct fe_priv
*np
= netdev_priv(dev
);
2645 u8 __iomem
*base
= get_hwbase(dev
);
2649 regs
->version
= FORCEDETH_REGS_VER
;
2650 spin_lock_irq(&np
->lock
);
2651 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
2652 rbuf
[i
] = readl(base
+ i
*sizeof(u32
));
2653 spin_unlock_irq(&np
->lock
);
2656 static int nv_nway_reset(struct net_device
*dev
)
2658 struct fe_priv
*np
= netdev_priv(dev
);
2661 spin_lock_irq(&np
->lock
);
2665 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
2666 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
2667 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
2673 spin_unlock_irq(&np
->lock
);
2679 static int nv_set_tso(struct net_device
*dev
, u32 value
)
2681 struct fe_priv
*np
= netdev_priv(dev
);
2683 if ((np
->driver_data
& DEV_HAS_CHECKSUM
))
2684 return ethtool_op_set_tso(dev
, value
);
2686 return value
? -EOPNOTSUPP
: 0;
2690 static struct ethtool_ops ops
= {
2691 .get_drvinfo
= nv_get_drvinfo
,
2692 .get_link
= ethtool_op_get_link
,
2693 .get_wol
= nv_get_wol
,
2694 .set_wol
= nv_set_wol
,
2695 .get_settings
= nv_get_settings
,
2696 .set_settings
= nv_set_settings
,
2697 .get_regs_len
= nv_get_regs_len
,
2698 .get_regs
= nv_get_regs
,
2699 .nway_reset
= nv_nway_reset
,
2700 .get_perm_addr
= ethtool_op_get_perm_addr
,
2702 .get_tso
= ethtool_op_get_tso
,
2703 .set_tso
= nv_set_tso
2707 static void nv_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
2709 struct fe_priv
*np
= get_nvpriv(dev
);
2711 spin_lock_irq(&np
->lock
);
2713 /* save vlan group */
2717 /* enable vlan on MAC */
2718 np
->txrxctl_bits
|= NVREG_TXRXCTL_VLANSTRIP
| NVREG_TXRXCTL_VLANINS
;
2720 /* disable vlan on MAC */
2721 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANSTRIP
;
2722 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANINS
;
2725 writel(np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2727 spin_unlock_irq(&np
->lock
);
2730 static void nv_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
2735 static void set_msix_vector_map(struct net_device
*dev
, u32 vector
, u32 irqmask
)
2737 u8 __iomem
*base
= get_hwbase(dev
);
2741 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
2742 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
2743 * the remaining 8 interrupts.
2745 for (i
= 0; i
< 8; i
++) {
2746 if ((irqmask
>> i
) & 0x1) {
2747 msixmap
|= vector
<< (i
<< 2);
2750 writel(readl(base
+ NvRegMSIXMap0
) | msixmap
, base
+ NvRegMSIXMap0
);
2753 for (i
= 0; i
< 8; i
++) {
2754 if ((irqmask
>> (i
+ 8)) & 0x1) {
2755 msixmap
|= vector
<< (i
<< 2);
2758 writel(readl(base
+ NvRegMSIXMap1
) | msixmap
, base
+ NvRegMSIXMap1
);
2761 static int nv_request_irq(struct net_device
*dev
)
2763 struct fe_priv
*np
= get_nvpriv(dev
);
2764 u8 __iomem
*base
= get_hwbase(dev
);
2768 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) {
2769 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
2770 np
->msi_x_entry
[i
].entry
= i
;
2772 if ((ret
= pci_enable_msix(np
->pci_dev
, np
->msi_x_entry
, (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
))) == 0) {
2773 np
->msi_flags
|= NV_MSI_X_ENABLED
;
2774 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
) {
2775 /* Request irq for rx handling */
2776 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, &nv_nic_irq_rx
, SA_SHIRQ
, dev
->name
, dev
) != 0) {
2777 printk(KERN_INFO
"forcedeth: request_irq failed for rx %d\n", ret
);
2778 pci_disable_msix(np
->pci_dev
);
2779 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
2782 /* Request irq for tx handling */
2783 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, &nv_nic_irq_tx
, SA_SHIRQ
, dev
->name
, dev
) != 0) {
2784 printk(KERN_INFO
"forcedeth: request_irq failed for tx %d\n", ret
);
2785 pci_disable_msix(np
->pci_dev
);
2786 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
2789 /* Request irq for link and timer handling */
2790 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
, &nv_nic_irq_other
, SA_SHIRQ
, dev
->name
, dev
) != 0) {
2791 printk(KERN_INFO
"forcedeth: request_irq failed for link %d\n", ret
);
2792 pci_disable_msix(np
->pci_dev
);
2793 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
2796 /* map interrupts to their respective vector */
2797 writel(0, base
+ NvRegMSIXMap0
);
2798 writel(0, base
+ NvRegMSIXMap1
);
2799 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_RX
, NVREG_IRQ_RX_ALL
);
2800 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_TX
, NVREG_IRQ_TX_ALL
);
2801 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_OTHER
, NVREG_IRQ_OTHER
);
2803 /* Request irq for all interrupts */
2804 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
, &nv_nic_irq
, SA_SHIRQ
, dev
->name
, dev
) != 0) {
2805 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
2806 pci_disable_msix(np
->pci_dev
);
2807 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
2811 /* map interrupts to vector 0 */
2812 writel(0, base
+ NvRegMSIXMap0
);
2813 writel(0, base
+ NvRegMSIXMap1
);
2817 if (ret
!= 0 && np
->msi_flags
& NV_MSI_CAPABLE
) {
2818 if ((ret
= pci_enable_msi(np
->pci_dev
)) == 0) {
2819 np
->msi_flags
|= NV_MSI_ENABLED
;
2820 if (request_irq(np
->pci_dev
->irq
, &nv_nic_irq
, SA_SHIRQ
, dev
->name
, dev
) != 0) {
2821 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
2822 pci_disable_msi(np
->pci_dev
);
2823 np
->msi_flags
&= ~NV_MSI_ENABLED
;
2827 /* map interrupts to vector 0 */
2828 writel(0, base
+ NvRegMSIMap0
);
2829 writel(0, base
+ NvRegMSIMap1
);
2830 /* enable msi vector 0 */
2831 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
2835 if (request_irq(np
->pci_dev
->irq
, &nv_nic_irq
, SA_SHIRQ
, dev
->name
, dev
) != 0)
2841 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, dev
);
2843 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, dev
);
2848 static void nv_free_irq(struct net_device
*dev
)
2850 struct fe_priv
*np
= get_nvpriv(dev
);
2853 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
2854 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
2855 free_irq(np
->msi_x_entry
[i
].vector
, dev
);
2857 pci_disable_msix(np
->pci_dev
);
2858 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
2860 free_irq(np
->pci_dev
->irq
, dev
);
2861 if (np
->msi_flags
& NV_MSI_ENABLED
) {
2862 pci_disable_msi(np
->pci_dev
);
2863 np
->msi_flags
&= ~NV_MSI_ENABLED
;
2868 static int nv_open(struct net_device
*dev
)
2870 struct fe_priv
*np
= netdev_priv(dev
);
2871 u8 __iomem
*base
= get_hwbase(dev
);
2875 dprintk(KERN_DEBUG
"nv_open: begin\n");
2877 /* 1) erase previous misconfiguration */
2878 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
2880 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
2881 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
2882 writel(0, base
+ NvRegMulticastAddrB
);
2883 writel(0, base
+ NvRegMulticastMaskA
);
2884 writel(0, base
+ NvRegMulticastMaskB
);
2885 writel(0, base
+ NvRegPacketFilterFlags
);
2887 writel(0, base
+ NvRegTransmitterControl
);
2888 writel(0, base
+ NvRegReceiverControl
);
2890 writel(0, base
+ NvRegAdapterControl
);
2892 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)
2893 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
2895 /* 2) initialize descriptor rings */
2897 oom
= nv_init_ring(dev
);
2899 writel(0, base
+ NvRegLinkSpeed
);
2900 writel(0, base
+ NvRegUnknownTransmitterReg
);
2902 writel(0, base
+ NvRegUnknownSetupReg6
);
2904 np
->in_shutdown
= 0;
2906 /* 3) set mac address */
2907 nv_copy_mac_to_hw(dev
);
2909 /* 4) give hw rings */
2910 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
2911 writel( ((RX_RING
-1) << NVREG_RINGSZ_RXSHIFT
) + ((TX_RING
-1) << NVREG_RINGSZ_TXSHIFT
),
2912 base
+ NvRegRingSizes
);
2914 /* 5) continue setup */
2915 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
2916 writel(NVREG_UNKSETUP3_VAL1
, base
+ NvRegUnknownSetupReg3
);
2917 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
2918 writel(np
->vlanctl_bits
, base
+ NvRegVlanControl
);
2920 writel(NVREG_TXRXCTL_BIT1
|np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
2921 reg_delay(dev
, NvRegUnknownSetupReg5
, NVREG_UNKSETUP5_BIT31
, NVREG_UNKSETUP5_BIT31
,
2922 NV_SETUP5_DELAY
, NV_SETUP5_DELAYMAX
,
2923 KERN_INFO
"open: SetupReg5, Bit 31 remained off\n");
2925 writel(0, base
+ NvRegUnknownSetupReg4
);
2926 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
2927 writel(NVREG_MIISTAT_MASK2
, base
+ NvRegMIIStatus
);
2929 /* 6) continue setup */
2930 writel(NVREG_MISC1_FORCE
| NVREG_MISC1_HD
, base
+ NvRegMisc1
);
2931 writel(readl(base
+ NvRegTransmitterStatus
), base
+ NvRegTransmitterStatus
);
2932 writel(NVREG_PFF_ALWAYS
, base
+ NvRegPacketFilterFlags
);
2933 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
2935 writel(readl(base
+ NvRegReceiverStatus
), base
+ NvRegReceiverStatus
);
2936 get_random_bytes(&i
, sizeof(i
));
2937 writel(NVREG_RNDSEED_FORCE
| (i
&NVREG_RNDSEED_MASK
), base
+ NvRegRandomSeed
);
2938 writel(NVREG_UNKSETUP1_VAL
, base
+ NvRegUnknownSetupReg1
);
2939 writel(NVREG_UNKSETUP2_VAL
, base
+ NvRegUnknownSetupReg2
);
2940 if (poll_interval
== -1) {
2941 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
)
2942 writel(NVREG_POLL_DEFAULT_THROUGHPUT
, base
+ NvRegPollingInterval
);
2944 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
2947 writel(poll_interval
& 0xFFFF, base
+ NvRegPollingInterval
);
2948 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
2949 writel((np
->phyaddr
<< NVREG_ADAPTCTL_PHYSHIFT
)|NVREG_ADAPTCTL_PHYVALID
|NVREG_ADAPTCTL_RUNNING
,
2950 base
+ NvRegAdapterControl
);
2951 writel(NVREG_MIISPEED_BIT8
|NVREG_MIIDELAY
, base
+ NvRegMIISpeed
);
2952 writel(NVREG_UNKSETUP4_VAL
, base
+ NvRegUnknownSetupReg4
);
2953 writel(NVREG_WAKEUPFLAGS_VAL
, base
+ NvRegWakeUpFlags
);
2955 i
= readl(base
+ NvRegPowerState
);
2956 if ( (i
& NVREG_POWERSTATE_POWEREDUP
) == 0)
2957 writel(NVREG_POWERSTATE_POWEREDUP
|i
, base
+ NvRegPowerState
);
2961 writel(readl(base
+ NvRegPowerState
) | NVREG_POWERSTATE_VALID
, base
+ NvRegPowerState
);
2963 nv_disable_hw_interrupts(dev
, np
->irqmask
);
2965 writel(NVREG_MIISTAT_MASK2
, base
+ NvRegMIIStatus
);
2966 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
2969 if (nv_request_irq(dev
)) {
2973 /* ask for interrupts */
2974 nv_enable_hw_interrupts(dev
, np
->irqmask
);
2976 spin_lock_irq(&np
->lock
);
2977 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
2978 writel(0, base
+ NvRegMulticastAddrB
);
2979 writel(0, base
+ NvRegMulticastMaskA
);
2980 writel(0, base
+ NvRegMulticastMaskB
);
2981 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
2982 /* One manual link speed update: Interrupts are enabled, future link
2983 * speed changes cause interrupts and are handled by nv_link_irq().
2987 miistat
= readl(base
+ NvRegMIIStatus
);
2988 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
2989 dprintk(KERN_INFO
"startup: got 0x%08x.\n", miistat
);
2991 /* set linkspeed to invalid value, thus force nv_update_linkspeed
2994 ret
= nv_update_linkspeed(dev
);
2997 netif_start_queue(dev
);
2999 netif_carrier_on(dev
);
3001 printk("%s: no link during initialization.\n", dev
->name
);
3002 netif_carrier_off(dev
);
3005 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3006 spin_unlock_irq(&np
->lock
);
3014 static int nv_close(struct net_device
*dev
)
3016 struct fe_priv
*np
= netdev_priv(dev
);
3019 spin_lock_irq(&np
->lock
);
3020 np
->in_shutdown
= 1;
3021 spin_unlock_irq(&np
->lock
);
3022 synchronize_irq(dev
->irq
);
3024 del_timer_sync(&np
->oom_kick
);
3025 del_timer_sync(&np
->nic_poll
);
3027 netif_stop_queue(dev
);
3028 spin_lock_irq(&np
->lock
);
3033 /* disable interrupts on the nic or we will lock up */
3034 base
= get_hwbase(dev
);
3035 nv_disable_hw_interrupts(dev
, np
->irqmask
);
3037 dprintk(KERN_INFO
"%s: Irqmask is zero again\n", dev
->name
);
3039 spin_unlock_irq(&np
->lock
);
3048 /* special op: write back the misordered MAC address - otherwise
3049 * the next nv_probe would see a wrong address.
3051 writel(np
->orig_mac
[0], base
+ NvRegMacAddrA
);
3052 writel(np
->orig_mac
[1], base
+ NvRegMacAddrB
);
3054 /* FIXME: power down nic */
3059 static int __devinit
nv_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
3061 struct net_device
*dev
;
3068 dev
= alloc_etherdev(sizeof(struct fe_priv
));
3073 np
= netdev_priv(dev
);
3074 np
->pci_dev
= pci_dev
;
3075 spin_lock_init(&np
->lock
);
3076 SET_MODULE_OWNER(dev
);
3077 SET_NETDEV_DEV(dev
, &pci_dev
->dev
);
3079 init_timer(&np
->oom_kick
);
3080 np
->oom_kick
.data
= (unsigned long) dev
;
3081 np
->oom_kick
.function
= &nv_do_rx_refill
; /* timer handler */
3082 init_timer(&np
->nic_poll
);
3083 np
->nic_poll
.data
= (unsigned long) dev
;
3084 np
->nic_poll
.function
= &nv_do_nic_poll
; /* timer handler */
3086 err
= pci_enable_device(pci_dev
);
3088 printk(KERN_INFO
"forcedeth: pci_enable_dev failed (%d) for device %s\n",
3089 err
, pci_name(pci_dev
));
3093 pci_set_master(pci_dev
);
3095 err
= pci_request_regions(pci_dev
, DRV_NAME
);
3099 if (id
->driver_data
& (DEV_HAS_VLAN
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
))
3100 np
->register_size
= NV_PCI_REGSZ_VER2
;
3102 np
->register_size
= NV_PCI_REGSZ_VER1
;
3106 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
3107 dprintk(KERN_DEBUG
"%s: resource %d start %p len %ld flags 0x%08lx.\n",
3108 pci_name(pci_dev
), i
, (void*)pci_resource_start(pci_dev
, i
),
3109 pci_resource_len(pci_dev
, i
),
3110 pci_resource_flags(pci_dev
, i
));
3111 if (pci_resource_flags(pci_dev
, i
) & IORESOURCE_MEM
&&
3112 pci_resource_len(pci_dev
, i
) >= np
->register_size
) {
3113 addr
= pci_resource_start(pci_dev
, i
);
3117 if (i
== DEVICE_COUNT_RESOURCE
) {
3118 printk(KERN_INFO
"forcedeth: Couldn't find register window for device %s.\n",
3123 /* copy of driver data */
3124 np
->driver_data
= id
->driver_data
;
3126 /* handle different descriptor versions */
3127 if (id
->driver_data
& DEV_HAS_HIGH_DMA
) {
3128 /* packet format 3: supports 40-bit addressing */
3129 np
->desc_ver
= DESC_VER_3
;
3130 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_3
;
3131 if (pci_set_dma_mask(pci_dev
, DMA_39BIT_MASK
)) {
3132 printk(KERN_INFO
"forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
3135 dev
->features
|= NETIF_F_HIGHDMA
;
3136 printk(KERN_INFO
"forcedeth: using HIGHDMA\n");
3138 if (pci_set_consistent_dma_mask(pci_dev
, 0x0000007fffffffffULL
)) {
3139 printk(KERN_INFO
"forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
3142 } else if (id
->driver_data
& DEV_HAS_LARGEDESC
) {
3143 /* packet format 2: supports jumbo frames */
3144 np
->desc_ver
= DESC_VER_2
;
3145 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_2
;
3147 /* original packet format */
3148 np
->desc_ver
= DESC_VER_1
;
3149 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_1
;
3152 np
->pkt_limit
= NV_PKTLIMIT_1
;
3153 if (id
->driver_data
& DEV_HAS_LARGEDESC
)
3154 np
->pkt_limit
= NV_PKTLIMIT_2
;
3156 if (id
->driver_data
& DEV_HAS_CHECKSUM
) {
3157 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
3158 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
3160 dev
->features
|= NETIF_F_TSO
;
3164 np
->vlanctl_bits
= 0;
3165 if (id
->driver_data
& DEV_HAS_VLAN
) {
3166 np
->vlanctl_bits
= NVREG_VLANCONTROL_ENABLE
;
3167 dev
->features
|= NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
;
3168 dev
->vlan_rx_register
= nv_vlan_rx_register
;
3169 dev
->vlan_rx_kill_vid
= nv_vlan_rx_kill_vid
;
3173 if ((id
->driver_data
& DEV_HAS_MSI
) && !disable_msi
) {
3174 np
->msi_flags
|= NV_MSI_CAPABLE
;
3176 if ((id
->driver_data
& DEV_HAS_MSI_X
) && !disable_msix
) {
3177 np
->msi_flags
|= NV_MSI_X_CAPABLE
;
3180 np
->pause_flags
= NV_PAUSEFRAME_RX_CAPABLE
;
3181 if (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX
) {
3182 np
->pause_flags
|= NV_PAUSEFRAME_TX_CAPABLE
;
3187 np
->base
= ioremap(addr
, np
->register_size
);
3190 dev
->base_addr
= (unsigned long)np
->base
;
3192 dev
->irq
= pci_dev
->irq
;
3194 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
3195 np
->rx_ring
.orig
= pci_alloc_consistent(pci_dev
,
3196 sizeof(struct ring_desc
) * (RX_RING
+ TX_RING
),
3198 if (!np
->rx_ring
.orig
)
3200 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[RX_RING
];
3202 np
->rx_ring
.ex
= pci_alloc_consistent(pci_dev
,
3203 sizeof(struct ring_desc_ex
) * (RX_RING
+ TX_RING
),
3205 if (!np
->rx_ring
.ex
)
3207 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[RX_RING
];
3210 dev
->open
= nv_open
;
3211 dev
->stop
= nv_close
;
3212 dev
->hard_start_xmit
= nv_start_xmit
;
3213 dev
->get_stats
= nv_get_stats
;
3214 dev
->change_mtu
= nv_change_mtu
;
3215 dev
->set_mac_address
= nv_set_mac_address
;
3216 dev
->set_multicast_list
= nv_set_multicast
;
3217 #ifdef CONFIG_NET_POLL_CONTROLLER
3218 dev
->poll_controller
= nv_poll_controller
;
3220 SET_ETHTOOL_OPS(dev
, &ops
);
3221 dev
->tx_timeout
= nv_tx_timeout
;
3222 dev
->watchdog_timeo
= NV_WATCHDOG_TIMEO
;
3224 pci_set_drvdata(pci_dev
, dev
);
3226 /* read the mac address */
3227 base
= get_hwbase(dev
);
3228 np
->orig_mac
[0] = readl(base
+ NvRegMacAddrA
);
3229 np
->orig_mac
[1] = readl(base
+ NvRegMacAddrB
);
3231 dev
->dev_addr
[0] = (np
->orig_mac
[1] >> 8) & 0xff;
3232 dev
->dev_addr
[1] = (np
->orig_mac
[1] >> 0) & 0xff;
3233 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 24) & 0xff;
3234 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 16) & 0xff;
3235 dev
->dev_addr
[4] = (np
->orig_mac
[0] >> 8) & 0xff;
3236 dev
->dev_addr
[5] = (np
->orig_mac
[0] >> 0) & 0xff;
3237 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
3239 if (!is_valid_ether_addr(dev
->perm_addr
)) {
3241 * Bad mac address. At least one bios sets the mac address
3242 * to 01:23:45:67:89:ab
3244 printk(KERN_ERR
"%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
3246 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
3247 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
3248 printk(KERN_ERR
"Please complain to your hardware vendor. Switching to a random MAC.\n");
3249 dev
->dev_addr
[0] = 0x00;
3250 dev
->dev_addr
[1] = 0x00;
3251 dev
->dev_addr
[2] = 0x6c;
3252 get_random_bytes(&dev
->dev_addr
[3], 3);
3255 dprintk(KERN_DEBUG
"%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev
),
3256 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
3257 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
3260 writel(0, base
+ NvRegWakeUpFlags
);
3263 if (id
->driver_data
& DEV_HAS_POWER_CNTRL
) {
3265 pci_read_config_byte(pci_dev
, PCI_REVISION_ID
, &revision_id
);
3267 /* take phy and nic out of low power mode */
3268 powerstate
= readl(base
+ NvRegPowerState2
);
3269 powerstate
&= ~NVREG_POWERSTATE2_POWERUP_MASK
;
3270 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_12
||
3271 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_13
) &&
3272 revision_id
>= 0xA3)
3273 powerstate
|= NVREG_POWERSTATE2_POWERUP_REV_A3
;
3274 writel(powerstate
, base
+ NvRegPowerState2
);
3277 if (np
->desc_ver
== DESC_VER_1
) {
3278 np
->tx_flags
= NV_TX_VALID
;
3280 np
->tx_flags
= NV_TX2_VALID
;
3282 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
) {
3283 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
3284 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
3285 np
->msi_flags
|= 0x0003;
3287 np
->irqmask
= NVREG_IRQMASK_CPU
;
3288 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
3289 np
->msi_flags
|= 0x0001;
3292 if (id
->driver_data
& DEV_NEED_TIMERIRQ
)
3293 np
->irqmask
|= NVREG_IRQ_TIMER
;
3294 if (id
->driver_data
& DEV_NEED_LINKTIMER
) {
3295 dprintk(KERN_INFO
"%s: link timer on.\n", pci_name(pci_dev
));
3296 np
->need_linktimer
= 1;
3297 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3299 dprintk(KERN_INFO
"%s: link timer off.\n", pci_name(pci_dev
));
3300 np
->need_linktimer
= 0;
3303 /* find a suitable phy */
3304 for (i
= 1; i
<= 32; i
++) {
3306 int phyaddr
= i
& 0x1F;
3308 spin_lock_irq(&np
->lock
);
3309 id1
= mii_rw(dev
, phyaddr
, MII_PHYSID1
, MII_READ
);
3310 spin_unlock_irq(&np
->lock
);
3311 if (id1
< 0 || id1
== 0xffff)
3313 spin_lock_irq(&np
->lock
);
3314 id2
= mii_rw(dev
, phyaddr
, MII_PHYSID2
, MII_READ
);
3315 spin_unlock_irq(&np
->lock
);
3316 if (id2
< 0 || id2
== 0xffff)
3319 id1
= (id1
& PHYID1_OUI_MASK
) << PHYID1_OUI_SHFT
;
3320 id2
= (id2
& PHYID2_OUI_MASK
) >> PHYID2_OUI_SHFT
;
3321 dprintk(KERN_DEBUG
"%s: open: Found PHY %04x:%04x at address %d.\n",
3322 pci_name(pci_dev
), id1
, id2
, phyaddr
);
3323 np
->phyaddr
= phyaddr
;
3324 np
->phy_oui
= id1
| id2
;
3328 printk(KERN_INFO
"%s: open: Could not find a valid PHY.\n",
3336 /* set default link speed settings */
3337 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3341 err
= register_netdev(dev
);
3343 printk(KERN_INFO
"forcedeth: unable to register netdev: %d\n", err
);
3346 printk(KERN_INFO
"%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
3347 dev
->name
, pci_dev
->subsystem_vendor
, pci_dev
->subsystem_device
,
3353 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
3354 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (RX_RING
+ TX_RING
),
3355 np
->rx_ring
.orig
, np
->ring_addr
);
3357 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (RX_RING
+ TX_RING
),
3358 np
->rx_ring
.ex
, np
->ring_addr
);
3359 pci_set_drvdata(pci_dev
, NULL
);
3361 iounmap(get_hwbase(dev
));
3363 pci_release_regions(pci_dev
);
3365 pci_disable_device(pci_dev
);
3372 static void __devexit
nv_remove(struct pci_dev
*pci_dev
)
3374 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
3375 struct fe_priv
*np
= netdev_priv(dev
);
3377 unregister_netdev(dev
);
3379 /* free all structures */
3380 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
3381 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (RX_RING
+ TX_RING
), np
->rx_ring
.orig
, np
->ring_addr
);
3383 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (RX_RING
+ TX_RING
), np
->rx_ring
.ex
, np
->ring_addr
);
3384 iounmap(get_hwbase(dev
));
3385 pci_release_regions(pci_dev
);
3386 pci_disable_device(pci_dev
);
3388 pci_set_drvdata(pci_dev
, NULL
);
3391 static struct pci_device_id pci_tbl
[] = {
3392 { /* nForce Ethernet Controller */
3393 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_1
),
3394 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
3396 { /* nForce2 Ethernet Controller */
3397 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_2
),
3398 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
3400 { /* nForce3 Ethernet Controller */
3401 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_3
),
3402 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
3404 { /* nForce3 Ethernet Controller */
3405 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_4
),
3406 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
3408 { /* nForce3 Ethernet Controller */
3409 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_5
),
3410 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
3412 { /* nForce3 Ethernet Controller */
3413 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_6
),
3414 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
3416 { /* nForce3 Ethernet Controller */
3417 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_7
),
3418 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
3420 { /* CK804 Ethernet Controller */
3421 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_8
),
3422 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
3424 { /* CK804 Ethernet Controller */
3425 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_9
),
3426 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
3428 { /* MCP04 Ethernet Controller */
3429 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_10
),
3430 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
3432 { /* MCP04 Ethernet Controller */
3433 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_11
),
3434 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
3436 { /* MCP51 Ethernet Controller */
3437 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_12
),
3438 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
,
3440 { /* MCP51 Ethernet Controller */
3441 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_13
),
3442 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
,
3444 { /* MCP55 Ethernet Controller */
3445 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_14
),
3446 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
,
3448 { /* MCP55 Ethernet Controller */
3449 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_15
),
3450 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
,
3455 static struct pci_driver driver
= {
3456 .name
= "forcedeth",
3457 .id_table
= pci_tbl
,
3459 .remove
= __devexit_p(nv_remove
),
3463 static int __init
init_nic(void)
3465 printk(KERN_INFO
"forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION
);
3466 return pci_module_init(&driver
);
3469 static void __exit
exit_nic(void)
3471 pci_unregister_driver(&driver
);
3474 module_param(max_interrupt_work
, int, 0);
3475 MODULE_PARM_DESC(max_interrupt_work
, "forcedeth maximum events handled per interrupt");
3476 module_param(optimization_mode
, int, 0);
3477 MODULE_PARM_DESC(optimization_mode
, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
3478 module_param(poll_interval
, int, 0);
3479 MODULE_PARM_DESC(poll_interval
, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
3480 module_param(disable_msi
, int, 0);
3481 MODULE_PARM_DESC(disable_msi
, "Disable MSI interrupts by setting to 1.");
3482 module_param(disable_msix
, int, 0);
3483 MODULE_PARM_DESC(disable_msix
, "Disable MSIX interrupts by setting to 1.");
3485 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
3486 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
3487 MODULE_LICENSE("GPL");
3489 MODULE_DEVICE_TABLE(pci
, pci_tbl
);
3491 module_init(init_nic
);
3492 module_exit(exit_nic
);