2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
42 #define FORCEDETH_VERSION "0.64"
43 #define DRV_NAME "forcedeth"
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/interrupt.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/delay.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/timer.h>
55 #include <linux/skbuff.h>
56 #include <linux/mii.h>
57 #include <linux/random.h>
58 #include <linux/init.h>
59 #include <linux/if_vlan.h>
60 #include <linux/dma-mapping.h>
64 #include <asm/uaccess.h>
65 #include <asm/system.h>
68 #define dprintk printk
70 #define dprintk(x...) do { } while (0)
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
80 #define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI 0x000040 /* device supports MSI */
87 #define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2 0x000600 /* device supports hw statistics version 2 */
91 #define DEV_HAS_STATISTICS_V3 0x000e00 /* device supports hw statistics version 3 */
92 #define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
93 #define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
94 #define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
95 #define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
96 #define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
97 #define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
98 #define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
99 #define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
100 #define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
103 NvRegIrqStatus
= 0x000,
104 #define NVREG_IRQSTAT_MIIEVENT 0x040
105 #define NVREG_IRQSTAT_MASK 0x83ff
106 NvRegIrqMask
= 0x004,
107 #define NVREG_IRQ_RX_ERROR 0x0001
108 #define NVREG_IRQ_RX 0x0002
109 #define NVREG_IRQ_RX_NOBUF 0x0004
110 #define NVREG_IRQ_TX_ERR 0x0008
111 #define NVREG_IRQ_TX_OK 0x0010
112 #define NVREG_IRQ_TIMER 0x0020
113 #define NVREG_IRQ_LINK 0x0040
114 #define NVREG_IRQ_RX_FORCED 0x0080
115 #define NVREG_IRQ_TX_FORCED 0x0100
116 #define NVREG_IRQ_RECOVER_ERROR 0x8200
117 #define NVREG_IRQMASK_THROUGHPUT 0x00df
118 #define NVREG_IRQMASK_CPU 0x0060
119 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
120 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
121 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
123 NvRegUnknownSetupReg6
= 0x008,
124 #define NVREG_UNKSETUP6_VAL 3
127 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
128 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
130 NvRegPollingInterval
= 0x00c,
131 #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
132 #define NVREG_POLL_DEFAULT_CPU 13
133 NvRegMSIMap0
= 0x020,
134 NvRegMSIMap1
= 0x024,
135 NvRegMSIIrqMask
= 0x030,
136 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
138 #define NVREG_MISC1_PAUSE_TX 0x01
139 #define NVREG_MISC1_HD 0x02
140 #define NVREG_MISC1_FORCE 0x3b0f3c
142 NvRegMacReset
= 0x34,
143 #define NVREG_MAC_RESET_ASSERT 0x0F3
144 NvRegTransmitterControl
= 0x084,
145 #define NVREG_XMITCTL_START 0x01
146 #define NVREG_XMITCTL_MGMT_ST 0x40000000
147 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
148 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
149 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
150 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
151 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
152 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
153 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
154 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
155 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
156 #define NVREG_XMITCTL_DATA_START 0x00100000
157 #define NVREG_XMITCTL_DATA_READY 0x00010000
158 #define NVREG_XMITCTL_DATA_ERROR 0x00020000
159 NvRegTransmitterStatus
= 0x088,
160 #define NVREG_XMITSTAT_BUSY 0x01
162 NvRegPacketFilterFlags
= 0x8c,
163 #define NVREG_PFF_PAUSE_RX 0x08
164 #define NVREG_PFF_ALWAYS 0x7F0000
165 #define NVREG_PFF_PROMISC 0x80
166 #define NVREG_PFF_MYADDR 0x20
167 #define NVREG_PFF_LOOPBACK 0x10
169 NvRegOffloadConfig
= 0x90,
170 #define NVREG_OFFLOAD_HOMEPHY 0x601
171 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
172 NvRegReceiverControl
= 0x094,
173 #define NVREG_RCVCTL_START 0x01
174 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
175 NvRegReceiverStatus
= 0x98,
176 #define NVREG_RCVSTAT_BUSY 0x01
178 NvRegSlotTime
= 0x9c,
179 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
180 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
181 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
182 #define NVREG_SLOTTIME_HALF 0x0000ff00
183 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
184 #define NVREG_SLOTTIME_MASK 0x000000ff
186 NvRegTxDeferral
= 0xA0,
187 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
188 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
189 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
190 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
191 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
192 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
193 NvRegRxDeferral
= 0xA4,
194 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
195 NvRegMacAddrA
= 0xA8,
196 NvRegMacAddrB
= 0xAC,
197 NvRegMulticastAddrA
= 0xB0,
198 #define NVREG_MCASTADDRA_FORCE 0x01
199 NvRegMulticastAddrB
= 0xB4,
200 NvRegMulticastMaskA
= 0xB8,
201 #define NVREG_MCASTMASKA_NONE 0xffffffff
202 NvRegMulticastMaskB
= 0xBC,
203 #define NVREG_MCASTMASKB_NONE 0xffff
205 NvRegPhyInterface
= 0xC0,
206 #define PHY_RGMII 0x10000000
207 NvRegBackOffControl
= 0xC4,
208 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
209 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
210 #define NVREG_BKOFFCTRL_SELECT 24
211 #define NVREG_BKOFFCTRL_GEAR 12
213 NvRegTxRingPhysAddr
= 0x100,
214 NvRegRxRingPhysAddr
= 0x104,
215 NvRegRingSizes
= 0x108,
216 #define NVREG_RINGSZ_TXSHIFT 0
217 #define NVREG_RINGSZ_RXSHIFT 16
218 NvRegTransmitPoll
= 0x10c,
219 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
220 NvRegLinkSpeed
= 0x110,
221 #define NVREG_LINKSPEED_FORCE 0x10000
222 #define NVREG_LINKSPEED_10 1000
223 #define NVREG_LINKSPEED_100 100
224 #define NVREG_LINKSPEED_1000 50
225 #define NVREG_LINKSPEED_MASK (0xFFF)
226 NvRegUnknownSetupReg5
= 0x130,
227 #define NVREG_UNKSETUP5_BIT31 (1<<31)
228 NvRegTxWatermark
= 0x13c,
229 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
230 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
231 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
232 NvRegTxRxControl
= 0x144,
233 #define NVREG_TXRXCTL_KICK 0x0001
234 #define NVREG_TXRXCTL_BIT1 0x0002
235 #define NVREG_TXRXCTL_BIT2 0x0004
236 #define NVREG_TXRXCTL_IDLE 0x0008
237 #define NVREG_TXRXCTL_RESET 0x0010
238 #define NVREG_TXRXCTL_RXCHECK 0x0400
239 #define NVREG_TXRXCTL_DESC_1 0
240 #define NVREG_TXRXCTL_DESC_2 0x002100
241 #define NVREG_TXRXCTL_DESC_3 0xc02200
242 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
243 #define NVREG_TXRXCTL_VLANINS 0x00080
244 NvRegTxRingPhysAddrHigh
= 0x148,
245 NvRegRxRingPhysAddrHigh
= 0x14C,
246 NvRegTxPauseFrame
= 0x170,
247 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
248 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
249 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
250 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
251 NvRegTxPauseFrameLimit
= 0x174,
252 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
253 NvRegMIIStatus
= 0x180,
254 #define NVREG_MIISTAT_ERROR 0x0001
255 #define NVREG_MIISTAT_LINKCHANGE 0x0008
256 #define NVREG_MIISTAT_MASK_RW 0x0007
257 #define NVREG_MIISTAT_MASK_ALL 0x000f
258 NvRegMIIMask
= 0x184,
259 #define NVREG_MII_LINKCHANGE 0x0008
261 NvRegAdapterControl
= 0x188,
262 #define NVREG_ADAPTCTL_START 0x02
263 #define NVREG_ADAPTCTL_LINKUP 0x04
264 #define NVREG_ADAPTCTL_PHYVALID 0x40000
265 #define NVREG_ADAPTCTL_RUNNING 0x100000
266 #define NVREG_ADAPTCTL_PHYSHIFT 24
267 NvRegMIISpeed
= 0x18c,
268 #define NVREG_MIISPEED_BIT8 (1<<8)
269 #define NVREG_MIIDELAY 5
270 NvRegMIIControl
= 0x190,
271 #define NVREG_MIICTL_INUSE 0x08000
272 #define NVREG_MIICTL_WRITE 0x00400
273 #define NVREG_MIICTL_ADDRSHIFT 5
274 NvRegMIIData
= 0x194,
275 NvRegTxUnicast
= 0x1a0,
276 NvRegTxMulticast
= 0x1a4,
277 NvRegTxBroadcast
= 0x1a8,
278 NvRegWakeUpFlags
= 0x200,
279 #define NVREG_WAKEUPFLAGS_VAL 0x7770
280 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
281 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
282 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
283 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
284 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
285 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
286 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
287 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
288 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
289 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
291 NvRegMgmtUnitGetVersion
= 0x204,
292 #define NVREG_MGMTUNITGETVERSION 0x01
293 NvRegMgmtUnitVersion
= 0x208,
294 #define NVREG_MGMTUNITVERSION 0x08
295 NvRegPowerCap
= 0x268,
296 #define NVREG_POWERCAP_D3SUPP (1<<30)
297 #define NVREG_POWERCAP_D2SUPP (1<<26)
298 #define NVREG_POWERCAP_D1SUPP (1<<25)
299 NvRegPowerState
= 0x26c,
300 #define NVREG_POWERSTATE_POWEREDUP 0x8000
301 #define NVREG_POWERSTATE_VALID 0x0100
302 #define NVREG_POWERSTATE_MASK 0x0003
303 #define NVREG_POWERSTATE_D0 0x0000
304 #define NVREG_POWERSTATE_D1 0x0001
305 #define NVREG_POWERSTATE_D2 0x0002
306 #define NVREG_POWERSTATE_D3 0x0003
307 NvRegMgmtUnitControl
= 0x278,
308 #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
310 NvRegTxZeroReXmt
= 0x284,
311 NvRegTxOneReXmt
= 0x288,
312 NvRegTxManyReXmt
= 0x28c,
313 NvRegTxLateCol
= 0x290,
314 NvRegTxUnderflow
= 0x294,
315 NvRegTxLossCarrier
= 0x298,
316 NvRegTxExcessDef
= 0x29c,
317 NvRegTxRetryErr
= 0x2a0,
318 NvRegRxFrameErr
= 0x2a4,
319 NvRegRxExtraByte
= 0x2a8,
320 NvRegRxLateCol
= 0x2ac,
322 NvRegRxFrameTooLong
= 0x2b4,
323 NvRegRxOverflow
= 0x2b8,
324 NvRegRxFCSErr
= 0x2bc,
325 NvRegRxFrameAlignErr
= 0x2c0,
326 NvRegRxLenErr
= 0x2c4,
327 NvRegRxUnicast
= 0x2c8,
328 NvRegRxMulticast
= 0x2cc,
329 NvRegRxBroadcast
= 0x2d0,
331 NvRegTxFrame
= 0x2d8,
333 NvRegTxPause
= 0x2e0,
334 NvRegRxPause
= 0x2e4,
335 NvRegRxDropFrame
= 0x2e8,
336 NvRegVlanControl
= 0x300,
337 #define NVREG_VLANCONTROL_ENABLE 0x2000
338 NvRegMSIXMap0
= 0x3e0,
339 NvRegMSIXMap1
= 0x3e4,
340 NvRegMSIXIrqStatus
= 0x3f0,
342 NvRegPowerState2
= 0x600,
343 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
344 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
345 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
348 /* Big endian: should work, but is untested */
354 struct ring_desc_ex
{
362 struct ring_desc
* orig
;
363 struct ring_desc_ex
* ex
;
366 #define FLAG_MASK_V1 0xffff0000
367 #define FLAG_MASK_V2 0xffffc000
368 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
369 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
371 #define NV_TX_LASTPACKET (1<<16)
372 #define NV_TX_RETRYERROR (1<<19)
373 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
374 #define NV_TX_FORCED_INTERRUPT (1<<24)
375 #define NV_TX_DEFERRED (1<<26)
376 #define NV_TX_CARRIERLOST (1<<27)
377 #define NV_TX_LATECOLLISION (1<<28)
378 #define NV_TX_UNDERFLOW (1<<29)
379 #define NV_TX_ERROR (1<<30)
380 #define NV_TX_VALID (1<<31)
382 #define NV_TX2_LASTPACKET (1<<29)
383 #define NV_TX2_RETRYERROR (1<<18)
384 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
385 #define NV_TX2_FORCED_INTERRUPT (1<<30)
386 #define NV_TX2_DEFERRED (1<<25)
387 #define NV_TX2_CARRIERLOST (1<<26)
388 #define NV_TX2_LATECOLLISION (1<<27)
389 #define NV_TX2_UNDERFLOW (1<<28)
390 /* error and valid are the same for both */
391 #define NV_TX2_ERROR (1<<30)
392 #define NV_TX2_VALID (1<<31)
393 #define NV_TX2_TSO (1<<28)
394 #define NV_TX2_TSO_SHIFT 14
395 #define NV_TX2_TSO_MAX_SHIFT 14
396 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
397 #define NV_TX2_CHECKSUM_L3 (1<<27)
398 #define NV_TX2_CHECKSUM_L4 (1<<26)
400 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
402 #define NV_RX_DESCRIPTORVALID (1<<16)
403 #define NV_RX_MISSEDFRAME (1<<17)
404 #define NV_RX_SUBSTRACT1 (1<<18)
405 #define NV_RX_ERROR1 (1<<23)
406 #define NV_RX_ERROR2 (1<<24)
407 #define NV_RX_ERROR3 (1<<25)
408 #define NV_RX_ERROR4 (1<<26)
409 #define NV_RX_CRCERR (1<<27)
410 #define NV_RX_OVERFLOW (1<<28)
411 #define NV_RX_FRAMINGERR (1<<29)
412 #define NV_RX_ERROR (1<<30)
413 #define NV_RX_AVAIL (1<<31)
414 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
416 #define NV_RX2_CHECKSUMMASK (0x1C000000)
417 #define NV_RX2_CHECKSUM_IP (0x10000000)
418 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
419 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
420 #define NV_RX2_DESCRIPTORVALID (1<<29)
421 #define NV_RX2_SUBSTRACT1 (1<<25)
422 #define NV_RX2_ERROR1 (1<<18)
423 #define NV_RX2_ERROR2 (1<<19)
424 #define NV_RX2_ERROR3 (1<<20)
425 #define NV_RX2_ERROR4 (1<<21)
426 #define NV_RX2_CRCERR (1<<22)
427 #define NV_RX2_OVERFLOW (1<<23)
428 #define NV_RX2_FRAMINGERR (1<<24)
429 /* error and avail are the same for both */
430 #define NV_RX2_ERROR (1<<30)
431 #define NV_RX2_AVAIL (1<<31)
432 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
434 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
435 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
437 /* Miscelaneous hardware related defines: */
438 #define NV_PCI_REGSZ_VER1 0x270
439 #define NV_PCI_REGSZ_VER2 0x2d4
440 #define NV_PCI_REGSZ_VER3 0x604
441 #define NV_PCI_REGSZ_MAX 0x604
443 /* various timeout delays: all in usec */
444 #define NV_TXRX_RESET_DELAY 4
445 #define NV_TXSTOP_DELAY1 10
446 #define NV_TXSTOP_DELAY1MAX 500000
447 #define NV_TXSTOP_DELAY2 100
448 #define NV_RXSTOP_DELAY1 10
449 #define NV_RXSTOP_DELAY1MAX 500000
450 #define NV_RXSTOP_DELAY2 100
451 #define NV_SETUP5_DELAY 5
452 #define NV_SETUP5_DELAYMAX 50000
453 #define NV_POWERUP_DELAY 5
454 #define NV_POWERUP_DELAYMAX 5000
455 #define NV_MIIBUSY_DELAY 50
456 #define NV_MIIPHY_DELAY 10
457 #define NV_MIIPHY_DELAYMAX 10000
458 #define NV_MAC_RESET_DELAY 64
460 #define NV_WAKEUPPATTERNS 5
461 #define NV_WAKEUPMASKENTRIES 4
463 /* General driver defaults */
464 #define NV_WATCHDOG_TIMEO (5*HZ)
466 #define RX_RING_DEFAULT 512
467 #define TX_RING_DEFAULT 256
468 #define RX_RING_MIN 128
469 #define TX_RING_MIN 64
470 #define RING_MAX_DESC_VER_1 1024
471 #define RING_MAX_DESC_VER_2_3 16384
473 /* rx/tx mac addr + type + vlan + align + slack*/
474 #define NV_RX_HEADERS (64)
475 /* even more slack. */
476 #define NV_RX_ALLOC_PAD (64)
478 /* maximum mtu size */
479 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
480 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
482 #define OOM_REFILL (1+HZ/20)
483 #define POLL_WAIT (1+HZ/100)
484 #define LINK_TIMEOUT (3*HZ)
485 #define STATS_INTERVAL (10*HZ)
489 * The nic supports three different descriptor types:
490 * - DESC_VER_1: Original
491 * - DESC_VER_2: support for jumbo frames.
492 * - DESC_VER_3: 64-bit format.
499 #define PHY_OUI_MARVELL 0x5043
500 #define PHY_OUI_CICADA 0x03f1
501 #define PHY_OUI_VITESSE 0x01c1
502 #define PHY_OUI_REALTEK 0x0732
503 #define PHY_OUI_REALTEK2 0x0020
504 #define PHYID1_OUI_MASK 0x03ff
505 #define PHYID1_OUI_SHFT 6
506 #define PHYID2_OUI_MASK 0xfc00
507 #define PHYID2_OUI_SHFT 10
508 #define PHYID2_MODEL_MASK 0x03f0
509 #define PHY_MODEL_REALTEK_8211 0x0110
510 #define PHY_REV_MASK 0x0001
511 #define PHY_REV_REALTEK_8211B 0x0000
512 #define PHY_REV_REALTEK_8211C 0x0001
513 #define PHY_MODEL_REALTEK_8201 0x0200
514 #define PHY_MODEL_MARVELL_E3016 0x0220
515 #define PHY_MARVELL_E3016_INITMASK 0x0300
516 #define PHY_CICADA_INIT1 0x0f000
517 #define PHY_CICADA_INIT2 0x0e00
518 #define PHY_CICADA_INIT3 0x01000
519 #define PHY_CICADA_INIT4 0x0200
520 #define PHY_CICADA_INIT5 0x0004
521 #define PHY_CICADA_INIT6 0x02000
522 #define PHY_VITESSE_INIT_REG1 0x1f
523 #define PHY_VITESSE_INIT_REG2 0x10
524 #define PHY_VITESSE_INIT_REG3 0x11
525 #define PHY_VITESSE_INIT_REG4 0x12
526 #define PHY_VITESSE_INIT_MSK1 0xc
527 #define PHY_VITESSE_INIT_MSK2 0x0180
528 #define PHY_VITESSE_INIT1 0x52b5
529 #define PHY_VITESSE_INIT2 0xaf8a
530 #define PHY_VITESSE_INIT3 0x8
531 #define PHY_VITESSE_INIT4 0x8f8a
532 #define PHY_VITESSE_INIT5 0xaf86
533 #define PHY_VITESSE_INIT6 0x8f86
534 #define PHY_VITESSE_INIT7 0xaf82
535 #define PHY_VITESSE_INIT8 0x0100
536 #define PHY_VITESSE_INIT9 0x8f82
537 #define PHY_VITESSE_INIT10 0x0
538 #define PHY_REALTEK_INIT_REG1 0x1f
539 #define PHY_REALTEK_INIT_REG2 0x19
540 #define PHY_REALTEK_INIT_REG3 0x13
541 #define PHY_REALTEK_INIT_REG4 0x14
542 #define PHY_REALTEK_INIT_REG5 0x18
543 #define PHY_REALTEK_INIT_REG6 0x11
544 #define PHY_REALTEK_INIT_REG7 0x01
545 #define PHY_REALTEK_INIT1 0x0000
546 #define PHY_REALTEK_INIT2 0x8e00
547 #define PHY_REALTEK_INIT3 0x0001
548 #define PHY_REALTEK_INIT4 0xad17
549 #define PHY_REALTEK_INIT5 0xfb54
550 #define PHY_REALTEK_INIT6 0xf5c7
551 #define PHY_REALTEK_INIT7 0x1000
552 #define PHY_REALTEK_INIT8 0x0003
553 #define PHY_REALTEK_INIT9 0x0008
554 #define PHY_REALTEK_INIT10 0x0005
555 #define PHY_REALTEK_INIT11 0x0200
556 #define PHY_REALTEK_INIT_MSK1 0x0003
558 #define PHY_GIGABIT 0x0100
560 #define PHY_TIMEOUT 0x1
561 #define PHY_ERROR 0x2
565 #define PHY_HALF 0x100
567 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
568 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
569 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
570 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
571 #define NV_PAUSEFRAME_RX_REQ 0x0010
572 #define NV_PAUSEFRAME_TX_REQ 0x0020
573 #define NV_PAUSEFRAME_AUTONEG 0x0040
575 /* MSI/MSI-X defines */
576 #define NV_MSI_X_MAX_VECTORS 8
577 #define NV_MSI_X_VECTORS_MASK 0x000f
578 #define NV_MSI_CAPABLE 0x0010
579 #define NV_MSI_X_CAPABLE 0x0020
580 #define NV_MSI_ENABLED 0x0040
581 #define NV_MSI_X_ENABLED 0x0080
583 #define NV_MSI_X_VECTOR_ALL 0x0
584 #define NV_MSI_X_VECTOR_RX 0x0
585 #define NV_MSI_X_VECTOR_TX 0x1
586 #define NV_MSI_X_VECTOR_OTHER 0x2
588 #define NV_MSI_PRIV_OFFSET 0x68
589 #define NV_MSI_PRIV_VALUE 0xffffffff
591 #define NV_RESTART_TX 0x1
592 #define NV_RESTART_RX 0x2
594 #define NV_TX_LIMIT_COUNT 16
596 #define NV_DYNAMIC_THRESHOLD 4
597 #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
600 struct nv_ethtool_str
{
601 char name
[ETH_GSTRING_LEN
];
604 static const struct nv_ethtool_str nv_estats_str
[] = {
609 { "tx_late_collision" },
610 { "tx_fifo_errors" },
611 { "tx_carrier_errors" },
612 { "tx_excess_deferral" },
613 { "tx_retry_error" },
614 { "rx_frame_error" },
616 { "rx_late_collision" },
618 { "rx_frame_too_long" },
619 { "rx_over_errors" },
621 { "rx_frame_align_error" },
622 { "rx_length_error" },
627 { "rx_errors_total" },
628 { "tx_errors_total" },
630 /* version 2 stats */
638 /* version 3 stats */
644 struct nv_ethtool_stats
{
649 u64 tx_late_collision
;
651 u64 tx_carrier_errors
;
652 u64 tx_excess_deferral
;
656 u64 rx_late_collision
;
658 u64 rx_frame_too_long
;
661 u64 rx_frame_align_error
;
670 /* version 2 stats */
678 /* version 3 stats */
684 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
685 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
686 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
689 #define NV_TEST_COUNT_BASE 3
690 #define NV_TEST_COUNT_EXTENDED 4
692 static const struct nv_ethtool_str nv_etests_str
[] = {
693 { "link (online/offline)" },
694 { "register (offline) " },
695 { "interrupt (offline) " },
696 { "loopback (offline) " }
699 struct register_test
{
704 static const struct register_test nv_registers_test
[] = {
705 { NvRegUnknownSetupReg6
, 0x01 },
706 { NvRegMisc1
, 0x03c },
707 { NvRegOffloadConfig
, 0x03ff },
708 { NvRegMulticastAddrA
, 0xffffffff },
709 { NvRegTxWatermark
, 0x0ff },
710 { NvRegWakeUpFlags
, 0x07777 },
717 unsigned int dma_len
;
718 struct ring_desc_ex
*first_tx_desc
;
719 struct nv_skb_map
*next_tx_ctx
;
724 * All hardware access under netdev_priv(dev)->lock, except the performance
726 * - rx is (pseudo-) lockless: it relies on the single-threading provided
727 * by the arch code for interrupts.
728 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
729 * needs netdev_priv(dev)->lock :-(
730 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
733 /* in dev: base, irq */
737 struct net_device
*dev
;
738 struct napi_struct napi
;
741 * Locking: spin_lock(&np->lock); */
742 struct nv_ethtool_stats estats
;
750 unsigned int phy_oui
;
751 unsigned int phy_model
;
752 unsigned int phy_rev
;
758 /* General data: RO fields */
759 dma_addr_t ring_addr
;
760 struct pci_dev
*pci_dev
;
777 /* rx specific fields.
778 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
780 union ring_type get_rx
, put_rx
, first_rx
, last_rx
;
781 struct nv_skb_map
*get_rx_ctx
, *put_rx_ctx
;
782 struct nv_skb_map
*first_rx_ctx
, *last_rx_ctx
;
783 struct nv_skb_map
*rx_skb
;
785 union ring_type rx_ring
;
786 unsigned int rx_buf_sz
;
787 unsigned int pkt_limit
;
788 struct timer_list oom_kick
;
789 struct timer_list nic_poll
;
790 struct timer_list stats_poll
;
794 /* media detection workaround.
795 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
798 unsigned long link_timeout
;
800 * tx specific fields.
802 union ring_type get_tx
, put_tx
, first_tx
, last_tx
;
803 struct nv_skb_map
*get_tx_ctx
, *put_tx_ctx
;
804 struct nv_skb_map
*first_tx_ctx
, *last_tx_ctx
;
805 struct nv_skb_map
*tx_skb
;
807 union ring_type tx_ring
;
811 u32 tx_pkts_in_progress
;
812 struct nv_skb_map
*tx_change_owner
;
813 struct nv_skb_map
*tx_end_flip
;
817 struct vlan_group
*vlangrp
;
819 /* msi/msi-x fields */
821 struct msix_entry msi_x_entry
[NV_MSI_X_MAX_VECTORS
];
826 /* power saved state */
827 u32 saved_config_space
[NV_PCI_REGSZ_MAX
/4];
829 /* for different msi-x irq type */
830 char name_rx
[IFNAMSIZ
+ 3]; /* -rx */
831 char name_tx
[IFNAMSIZ
+ 3]; /* -tx */
832 char name_other
[IFNAMSIZ
+ 6]; /* -other */
836 * Maximum number of loops until we assume that a bit in the irq mask
837 * is stuck. Overridable with module param.
839 static int max_interrupt_work
= 4;
842 * Optimization can be either throuput mode or cpu mode
844 * Throughput Mode: Every tx and rx packet will generate an interrupt.
845 * CPU Mode: Interrupts are controlled by a timer.
848 NV_OPTIMIZATION_MODE_THROUGHPUT
,
849 NV_OPTIMIZATION_MODE_CPU
,
850 NV_OPTIMIZATION_MODE_DYNAMIC
852 static int optimization_mode
= NV_OPTIMIZATION_MODE_DYNAMIC
;
855 * Poll interval for timer irq
857 * This interval determines how frequent an interrupt is generated.
858 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
859 * Min = 0, and Max = 65535
861 static int poll_interval
= -1;
870 static int msi
= NV_MSI_INT_ENABLED
;
876 NV_MSIX_INT_DISABLED
,
879 static int msix
= NV_MSIX_INT_ENABLED
;
885 NV_DMA_64BIT_DISABLED
,
888 static int dma_64bit
= NV_DMA_64BIT_ENABLED
;
891 * Crossover Detection
892 * Realtek 8201 phy + some OEM boards do not work properly.
895 NV_CROSSOVER_DETECTION_DISABLED
,
896 NV_CROSSOVER_DETECTION_ENABLED
898 static int phy_cross
= NV_CROSSOVER_DETECTION_DISABLED
;
900 static inline struct fe_priv
*get_nvpriv(struct net_device
*dev
)
902 return netdev_priv(dev
);
905 static inline u8 __iomem
*get_hwbase(struct net_device
*dev
)
907 return ((struct fe_priv
*)netdev_priv(dev
))->base
;
910 static inline void pci_push(u8 __iomem
*base
)
912 /* force out pending posted writes */
916 static inline u32
nv_descr_getlength(struct ring_desc
*prd
, u32 v
)
918 return le32_to_cpu(prd
->flaglen
)
919 & ((v
== DESC_VER_1
) ? LEN_MASK_V1
: LEN_MASK_V2
);
922 static inline u32
nv_descr_getlength_ex(struct ring_desc_ex
*prd
, u32 v
)
924 return le32_to_cpu(prd
->flaglen
) & LEN_MASK_V2
;
927 static bool nv_optimized(struct fe_priv
*np
)
929 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
934 static int reg_delay(struct net_device
*dev
, int offset
, u32 mask
, u32 target
,
935 int delay
, int delaymax
, const char *msg
)
937 u8 __iomem
*base
= get_hwbase(dev
);
948 } while ((readl(base
+ offset
) & mask
) != target
);
952 #define NV_SETUP_RX_RING 0x01
953 #define NV_SETUP_TX_RING 0x02
955 static inline u32
dma_low(dma_addr_t addr
)
960 static inline u32
dma_high(dma_addr_t addr
)
962 return addr
>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
965 static void setup_hw_rings(struct net_device
*dev
, int rxtx_flags
)
967 struct fe_priv
*np
= get_nvpriv(dev
);
968 u8 __iomem
*base
= get_hwbase(dev
);
970 if (!nv_optimized(np
)) {
971 if (rxtx_flags
& NV_SETUP_RX_RING
) {
972 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
974 if (rxtx_flags
& NV_SETUP_TX_RING
) {
975 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
978 if (rxtx_flags
& NV_SETUP_RX_RING
) {
979 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
980 writel(dma_high(np
->ring_addr
), base
+ NvRegRxRingPhysAddrHigh
);
982 if (rxtx_flags
& NV_SETUP_TX_RING
) {
983 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
984 writel(dma_high(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddrHigh
);
989 static void free_rings(struct net_device
*dev
)
991 struct fe_priv
*np
= get_nvpriv(dev
);
993 if (!nv_optimized(np
)) {
994 if (np
->rx_ring
.orig
)
995 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
996 np
->rx_ring
.orig
, np
->ring_addr
);
999 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
1000 np
->rx_ring
.ex
, np
->ring_addr
);
1008 static int using_multi_irqs(struct net_device
*dev
)
1010 struct fe_priv
*np
= get_nvpriv(dev
);
1012 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
) ||
1013 ((np
->msi_flags
& NV_MSI_X_ENABLED
) &&
1014 ((np
->msi_flags
& NV_MSI_X_VECTORS_MASK
) == 0x1)))
1020 static void nv_enable_irq(struct net_device
*dev
)
1022 struct fe_priv
*np
= get_nvpriv(dev
);
1024 if (!using_multi_irqs(dev
)) {
1025 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1026 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1028 enable_irq(np
->pci_dev
->irq
);
1030 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1031 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1032 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1036 static void nv_disable_irq(struct net_device
*dev
)
1038 struct fe_priv
*np
= get_nvpriv(dev
);
1040 if (!using_multi_irqs(dev
)) {
1041 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1042 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1044 disable_irq(np
->pci_dev
->irq
);
1046 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1047 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1048 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1052 /* In MSIX mode, a write to irqmask behaves as XOR */
1053 static void nv_enable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1055 u8 __iomem
*base
= get_hwbase(dev
);
1057 writel(mask
, base
+ NvRegIrqMask
);
1060 static void nv_disable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1062 struct fe_priv
*np
= get_nvpriv(dev
);
1063 u8 __iomem
*base
= get_hwbase(dev
);
1065 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
1066 writel(mask
, base
+ NvRegIrqMask
);
1068 if (np
->msi_flags
& NV_MSI_ENABLED
)
1069 writel(0, base
+ NvRegMSIIrqMask
);
1070 writel(0, base
+ NvRegIrqMask
);
1074 static void nv_napi_enable(struct net_device
*dev
)
1076 #ifdef CONFIG_FORCEDETH_NAPI
1077 struct fe_priv
*np
= get_nvpriv(dev
);
1079 napi_enable(&np
->napi
);
1083 static void nv_napi_disable(struct net_device
*dev
)
1085 #ifdef CONFIG_FORCEDETH_NAPI
1086 struct fe_priv
*np
= get_nvpriv(dev
);
1088 napi_disable(&np
->napi
);
1092 #define MII_READ (-1)
1093 /* mii_rw: read/write a register on the PHY.
1095 * Caller must guarantee serialization
1097 static int mii_rw(struct net_device
*dev
, int addr
, int miireg
, int value
)
1099 u8 __iomem
*base
= get_hwbase(dev
);
1103 writel(NVREG_MIISTAT_MASK_RW
, base
+ NvRegMIIStatus
);
1105 reg
= readl(base
+ NvRegMIIControl
);
1106 if (reg
& NVREG_MIICTL_INUSE
) {
1107 writel(NVREG_MIICTL_INUSE
, base
+ NvRegMIIControl
);
1108 udelay(NV_MIIBUSY_DELAY
);
1111 reg
= (addr
<< NVREG_MIICTL_ADDRSHIFT
) | miireg
;
1112 if (value
!= MII_READ
) {
1113 writel(value
, base
+ NvRegMIIData
);
1114 reg
|= NVREG_MIICTL_WRITE
;
1116 writel(reg
, base
+ NvRegMIIControl
);
1118 if (reg_delay(dev
, NvRegMIIControl
, NVREG_MIICTL_INUSE
, 0,
1119 NV_MIIPHY_DELAY
, NV_MIIPHY_DELAYMAX
, NULL
)) {
1120 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d timed out.\n",
1121 dev
->name
, miireg
, addr
);
1123 } else if (value
!= MII_READ
) {
1124 /* it was a write operation - fewer failures are detectable */
1125 dprintk(KERN_DEBUG
"%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1126 dev
->name
, value
, miireg
, addr
);
1128 } else if (readl(base
+ NvRegMIIStatus
) & NVREG_MIISTAT_ERROR
) {
1129 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d failed.\n",
1130 dev
->name
, miireg
, addr
);
1133 retval
= readl(base
+ NvRegMIIData
);
1134 dprintk(KERN_DEBUG
"%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1135 dev
->name
, miireg
, addr
, retval
);
1141 static int phy_reset(struct net_device
*dev
, u32 bmcr_setup
)
1143 struct fe_priv
*np
= netdev_priv(dev
);
1145 unsigned int tries
= 0;
1147 miicontrol
= BMCR_RESET
| bmcr_setup
;
1148 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
)) {
1152 /* wait for 500ms */
1155 /* must wait till reset is deasserted */
1156 while (miicontrol
& BMCR_RESET
) {
1158 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1159 /* FIXME: 100 tries seem excessive */
1166 static int phy_init(struct net_device
*dev
)
1168 struct fe_priv
*np
= get_nvpriv(dev
);
1169 u8 __iomem
*base
= get_hwbase(dev
);
1170 u32 phyinterface
, phy_reserved
, mii_status
, mii_control
, mii_control_1000
,reg
;
1172 /* phy errata for E3016 phy */
1173 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
1174 reg
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1175 reg
&= ~PHY_MARVELL_E3016_INITMASK
;
1176 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, reg
)) {
1177 printk(KERN_INFO
"%s: phy write to errata reg failed.\n", pci_name(np
->pci_dev
));
1181 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1182 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1183 np
->phy_rev
== PHY_REV_REALTEK_8211B
) {
1184 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1185 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1188 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1189 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1192 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1193 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1196 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1197 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1200 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG4
, PHY_REALTEK_INIT5
)) {
1201 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1204 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG5
, PHY_REALTEK_INIT6
)) {
1205 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1208 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1209 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1213 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1214 np
->phy_rev
== PHY_REV_REALTEK_8211C
) {
1215 u32 powerstate
= readl(base
+ NvRegPowerState2
);
1217 /* need to perform hw phy reset */
1218 powerstate
|= NVREG_POWERSTATE2_PHY_RESET
;
1219 writel(powerstate
, base
+ NvRegPowerState2
);
1222 powerstate
&= ~NVREG_POWERSTATE2_PHY_RESET
;
1223 writel(powerstate
, base
+ NvRegPowerState2
);
1226 reg
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1227 reg
|= PHY_REALTEK_INIT9
;
1228 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, reg
)) {
1229 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1232 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT10
)) {
1233 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1236 reg
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG7
, MII_READ
);
1237 if (!(reg
& PHY_REALTEK_INIT11
)) {
1238 reg
|= PHY_REALTEK_INIT11
;
1239 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG7
, reg
)) {
1240 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1244 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1245 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1249 if (np
->phy_model
== PHY_MODEL_REALTEK_8201
) {
1250 if (np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
1251 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
1252 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
1253 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
1254 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
1255 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
1256 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
1257 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) {
1258 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1259 phy_reserved
|= PHY_REALTEK_INIT7
;
1260 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, phy_reserved
)) {
1261 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1268 /* set advertise register */
1269 reg
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1270 reg
|= (ADVERTISE_10HALF
|ADVERTISE_10FULL
|ADVERTISE_100HALF
|ADVERTISE_100FULL
|ADVERTISE_PAUSE_ASYM
|ADVERTISE_PAUSE_CAP
);
1271 if (mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
)) {
1272 printk(KERN_INFO
"%s: phy write to advertise failed.\n", pci_name(np
->pci_dev
));
1276 /* get phy interface type */
1277 phyinterface
= readl(base
+ NvRegPhyInterface
);
1279 /* see if gigabit phy */
1280 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1281 if (mii_status
& PHY_GIGABIT
) {
1282 np
->gigabit
= PHY_GIGABIT
;
1283 mii_control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
1284 mii_control_1000
&= ~ADVERTISE_1000HALF
;
1285 if (phyinterface
& PHY_RGMII
)
1286 mii_control_1000
|= ADVERTISE_1000FULL
;
1288 mii_control_1000
&= ~ADVERTISE_1000FULL
;
1290 if (mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, mii_control_1000
)) {
1291 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1298 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1299 mii_control
|= BMCR_ANENABLE
;
1301 if (np
->phy_oui
== PHY_OUI_REALTEK
&&
1302 np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1303 np
->phy_rev
== PHY_REV_REALTEK_8211C
) {
1304 /* start autoneg since we already performed hw reset above */
1305 mii_control
|= BMCR_ANRESTART
;
1306 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1307 printk(KERN_INFO
"%s: phy init failed\n", pci_name(np
->pci_dev
));
1312 * (certain phys need bmcr to be setup with reset)
1314 if (phy_reset(dev
, mii_control
)) {
1315 printk(KERN_INFO
"%s: phy reset failed\n", pci_name(np
->pci_dev
));
1320 /* phy vendor specific configuration */
1321 if ((np
->phy_oui
== PHY_OUI_CICADA
) && (phyinterface
& PHY_RGMII
) ) {
1322 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_RESV1
, MII_READ
);
1323 phy_reserved
&= ~(PHY_CICADA_INIT1
| PHY_CICADA_INIT2
);
1324 phy_reserved
|= (PHY_CICADA_INIT3
| PHY_CICADA_INIT4
);
1325 if (mii_rw(dev
, np
->phyaddr
, MII_RESV1
, phy_reserved
)) {
1326 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1329 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1330 phy_reserved
|= PHY_CICADA_INIT5
;
1331 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, phy_reserved
)) {
1332 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1336 if (np
->phy_oui
== PHY_OUI_CICADA
) {
1337 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, MII_READ
);
1338 phy_reserved
|= PHY_CICADA_INIT6
;
1339 if (mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, phy_reserved
)) {
1340 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1344 if (np
->phy_oui
== PHY_OUI_VITESSE
) {
1345 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT1
)) {
1346 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1349 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT2
)) {
1350 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1353 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1354 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1355 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1358 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1359 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1360 phy_reserved
|= PHY_VITESSE_INIT3
;
1361 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1362 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1365 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT4
)) {
1366 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1369 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT5
)) {
1370 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1373 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1374 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1375 phy_reserved
|= PHY_VITESSE_INIT3
;
1376 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1377 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1380 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1381 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1382 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1385 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT6
)) {
1386 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1389 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT7
)) {
1390 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1393 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1394 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1395 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1398 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1399 phy_reserved
&= ~PHY_VITESSE_INIT_MSK2
;
1400 phy_reserved
|= PHY_VITESSE_INIT8
;
1401 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1402 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1405 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT9
)) {
1406 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1409 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT10
)) {
1410 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1414 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1415 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1416 np
->phy_rev
== PHY_REV_REALTEK_8211B
) {
1417 /* reset could have cleared these out, set them back */
1418 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1419 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1422 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1423 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1426 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1427 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1430 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1431 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1434 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG4
, PHY_REALTEK_INIT5
)) {
1435 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1438 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG5
, PHY_REALTEK_INIT6
)) {
1439 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1442 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1443 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1447 if (np
->phy_model
== PHY_MODEL_REALTEK_8201
) {
1448 if (np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
1449 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
1450 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
1451 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
1452 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
1453 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
1454 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
1455 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) {
1456 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1457 phy_reserved
|= PHY_REALTEK_INIT7
;
1458 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, phy_reserved
)) {
1459 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1463 if (phy_cross
== NV_CROSSOVER_DETECTION_DISABLED
) {
1464 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1465 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1468 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, MII_READ
);
1469 phy_reserved
&= ~PHY_REALTEK_INIT_MSK1
;
1470 phy_reserved
|= PHY_REALTEK_INIT3
;
1471 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, phy_reserved
)) {
1472 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1475 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1476 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1483 /* some phys clear out pause advertisment on reset, set it back */
1484 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
);
1486 /* restart auto negotiation, power down phy */
1487 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1488 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
| BMCR_PDOWN
);
1489 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1496 static void nv_start_rx(struct net_device
*dev
)
1498 struct fe_priv
*np
= netdev_priv(dev
);
1499 u8 __iomem
*base
= get_hwbase(dev
);
1500 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1502 dprintk(KERN_DEBUG
"%s: nv_start_rx\n", dev
->name
);
1503 /* Already running? Stop it. */
1504 if ((readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) && !np
->mac_in_use
) {
1505 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1506 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1509 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
1511 rx_ctrl
|= NVREG_RCVCTL_START
;
1513 rx_ctrl
&= ~NVREG_RCVCTL_RX_PATH_EN
;
1514 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1515 dprintk(KERN_DEBUG
"%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1516 dev
->name
, np
->duplex
, np
->linkspeed
);
1520 static void nv_stop_rx(struct net_device
*dev
)
1522 struct fe_priv
*np
= netdev_priv(dev
);
1523 u8 __iomem
*base
= get_hwbase(dev
);
1524 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1526 dprintk(KERN_DEBUG
"%s: nv_stop_rx\n", dev
->name
);
1527 if (!np
->mac_in_use
)
1528 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1530 rx_ctrl
|= NVREG_RCVCTL_RX_PATH_EN
;
1531 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1532 reg_delay(dev
, NvRegReceiverStatus
, NVREG_RCVSTAT_BUSY
, 0,
1533 NV_RXSTOP_DELAY1
, NV_RXSTOP_DELAY1MAX
,
1534 KERN_INFO
"nv_stop_rx: ReceiverStatus remained busy");
1536 udelay(NV_RXSTOP_DELAY2
);
1537 if (!np
->mac_in_use
)
1538 writel(0, base
+ NvRegLinkSpeed
);
1541 static void nv_start_tx(struct net_device
*dev
)
1543 struct fe_priv
*np
= netdev_priv(dev
);
1544 u8 __iomem
*base
= get_hwbase(dev
);
1545 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1547 dprintk(KERN_DEBUG
"%s: nv_start_tx\n", dev
->name
);
1548 tx_ctrl
|= NVREG_XMITCTL_START
;
1550 tx_ctrl
&= ~NVREG_XMITCTL_TX_PATH_EN
;
1551 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1555 static void nv_stop_tx(struct net_device
*dev
)
1557 struct fe_priv
*np
= netdev_priv(dev
);
1558 u8 __iomem
*base
= get_hwbase(dev
);
1559 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1561 dprintk(KERN_DEBUG
"%s: nv_stop_tx\n", dev
->name
);
1562 if (!np
->mac_in_use
)
1563 tx_ctrl
&= ~NVREG_XMITCTL_START
;
1565 tx_ctrl
|= NVREG_XMITCTL_TX_PATH_EN
;
1566 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1567 reg_delay(dev
, NvRegTransmitterStatus
, NVREG_XMITSTAT_BUSY
, 0,
1568 NV_TXSTOP_DELAY1
, NV_TXSTOP_DELAY1MAX
,
1569 KERN_INFO
"nv_stop_tx: TransmitterStatus remained busy");
1571 udelay(NV_TXSTOP_DELAY2
);
1572 if (!np
->mac_in_use
)
1573 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
1574 base
+ NvRegTransmitPoll
);
1577 static void nv_start_rxtx(struct net_device
*dev
)
1583 static void nv_stop_rxtx(struct net_device
*dev
)
1589 static void nv_txrx_reset(struct net_device
*dev
)
1591 struct fe_priv
*np
= netdev_priv(dev
);
1592 u8 __iomem
*base
= get_hwbase(dev
);
1594 dprintk(KERN_DEBUG
"%s: nv_txrx_reset\n", dev
->name
);
1595 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1597 udelay(NV_TXRX_RESET_DELAY
);
1598 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1602 static void nv_mac_reset(struct net_device
*dev
)
1604 struct fe_priv
*np
= netdev_priv(dev
);
1605 u8 __iomem
*base
= get_hwbase(dev
);
1606 u32 temp1
, temp2
, temp3
;
1608 dprintk(KERN_DEBUG
"%s: nv_mac_reset\n", dev
->name
);
1610 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1613 /* save registers since they will be cleared on reset */
1614 temp1
= readl(base
+ NvRegMacAddrA
);
1615 temp2
= readl(base
+ NvRegMacAddrB
);
1616 temp3
= readl(base
+ NvRegTransmitPoll
);
1618 writel(NVREG_MAC_RESET_ASSERT
, base
+ NvRegMacReset
);
1620 udelay(NV_MAC_RESET_DELAY
);
1621 writel(0, base
+ NvRegMacReset
);
1623 udelay(NV_MAC_RESET_DELAY
);
1625 /* restore saved registers */
1626 writel(temp1
, base
+ NvRegMacAddrA
);
1627 writel(temp2
, base
+ NvRegMacAddrB
);
1628 writel(temp3
, base
+ NvRegTransmitPoll
);
1630 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1634 static void nv_get_hw_stats(struct net_device
*dev
)
1636 struct fe_priv
*np
= netdev_priv(dev
);
1637 u8 __iomem
*base
= get_hwbase(dev
);
1639 np
->estats
.tx_bytes
+= readl(base
+ NvRegTxCnt
);
1640 np
->estats
.tx_zero_rexmt
+= readl(base
+ NvRegTxZeroReXmt
);
1641 np
->estats
.tx_one_rexmt
+= readl(base
+ NvRegTxOneReXmt
);
1642 np
->estats
.tx_many_rexmt
+= readl(base
+ NvRegTxManyReXmt
);
1643 np
->estats
.tx_late_collision
+= readl(base
+ NvRegTxLateCol
);
1644 np
->estats
.tx_fifo_errors
+= readl(base
+ NvRegTxUnderflow
);
1645 np
->estats
.tx_carrier_errors
+= readl(base
+ NvRegTxLossCarrier
);
1646 np
->estats
.tx_excess_deferral
+= readl(base
+ NvRegTxExcessDef
);
1647 np
->estats
.tx_retry_error
+= readl(base
+ NvRegTxRetryErr
);
1648 np
->estats
.rx_frame_error
+= readl(base
+ NvRegRxFrameErr
);
1649 np
->estats
.rx_extra_byte
+= readl(base
+ NvRegRxExtraByte
);
1650 np
->estats
.rx_late_collision
+= readl(base
+ NvRegRxLateCol
);
1651 np
->estats
.rx_runt
+= readl(base
+ NvRegRxRunt
);
1652 np
->estats
.rx_frame_too_long
+= readl(base
+ NvRegRxFrameTooLong
);
1653 np
->estats
.rx_over_errors
+= readl(base
+ NvRegRxOverflow
);
1654 np
->estats
.rx_crc_errors
+= readl(base
+ NvRegRxFCSErr
);
1655 np
->estats
.rx_frame_align_error
+= readl(base
+ NvRegRxFrameAlignErr
);
1656 np
->estats
.rx_length_error
+= readl(base
+ NvRegRxLenErr
);
1657 np
->estats
.rx_unicast
+= readl(base
+ NvRegRxUnicast
);
1658 np
->estats
.rx_multicast
+= readl(base
+ NvRegRxMulticast
);
1659 np
->estats
.rx_broadcast
+= readl(base
+ NvRegRxBroadcast
);
1660 np
->estats
.rx_packets
=
1661 np
->estats
.rx_unicast
+
1662 np
->estats
.rx_multicast
+
1663 np
->estats
.rx_broadcast
;
1664 np
->estats
.rx_errors_total
=
1665 np
->estats
.rx_crc_errors
+
1666 np
->estats
.rx_over_errors
+
1667 np
->estats
.rx_frame_error
+
1668 (np
->estats
.rx_frame_align_error
- np
->estats
.rx_extra_byte
) +
1669 np
->estats
.rx_late_collision
+
1670 np
->estats
.rx_runt
+
1671 np
->estats
.rx_frame_too_long
;
1672 np
->estats
.tx_errors_total
=
1673 np
->estats
.tx_late_collision
+
1674 np
->estats
.tx_fifo_errors
+
1675 np
->estats
.tx_carrier_errors
+
1676 np
->estats
.tx_excess_deferral
+
1677 np
->estats
.tx_retry_error
;
1679 if (np
->driver_data
& DEV_HAS_STATISTICS_V2
) {
1680 np
->estats
.tx_deferral
+= readl(base
+ NvRegTxDef
);
1681 np
->estats
.tx_packets
+= readl(base
+ NvRegTxFrame
);
1682 np
->estats
.rx_bytes
+= readl(base
+ NvRegRxCnt
);
1683 np
->estats
.tx_pause
+= readl(base
+ NvRegTxPause
);
1684 np
->estats
.rx_pause
+= readl(base
+ NvRegRxPause
);
1685 np
->estats
.rx_drop_frame
+= readl(base
+ NvRegRxDropFrame
);
1688 if (np
->driver_data
& DEV_HAS_STATISTICS_V3
) {
1689 np
->estats
.tx_unicast
+= readl(base
+ NvRegTxUnicast
);
1690 np
->estats
.tx_multicast
+= readl(base
+ NvRegTxMulticast
);
1691 np
->estats
.tx_broadcast
+= readl(base
+ NvRegTxBroadcast
);
1696 * nv_get_stats: dev->get_stats function
1697 * Get latest stats value from the nic.
1698 * Called with read_lock(&dev_base_lock) held for read -
1699 * only synchronized against unregister_netdevice.
1701 static struct net_device_stats
*nv_get_stats(struct net_device
*dev
)
1703 struct fe_priv
*np
= netdev_priv(dev
);
1705 /* If the nic supports hw counters then retrieve latest values */
1706 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_STATISTICS_V3
)) {
1707 nv_get_hw_stats(dev
);
1709 /* copy to net_device stats */
1710 dev
->stats
.tx_bytes
= np
->estats
.tx_bytes
;
1711 dev
->stats
.tx_fifo_errors
= np
->estats
.tx_fifo_errors
;
1712 dev
->stats
.tx_carrier_errors
= np
->estats
.tx_carrier_errors
;
1713 dev
->stats
.rx_crc_errors
= np
->estats
.rx_crc_errors
;
1714 dev
->stats
.rx_over_errors
= np
->estats
.rx_over_errors
;
1715 dev
->stats
.rx_errors
= np
->estats
.rx_errors_total
;
1716 dev
->stats
.tx_errors
= np
->estats
.tx_errors_total
;
1723 * nv_alloc_rx: fill rx ring entries.
1724 * Return 1 if the allocations for the skbs failed and the
1725 * rx engine is without Available descriptors
1727 static int nv_alloc_rx(struct net_device
*dev
)
1729 struct fe_priv
*np
= netdev_priv(dev
);
1730 struct ring_desc
* less_rx
;
1732 less_rx
= np
->get_rx
.orig
;
1733 if (less_rx
-- == np
->first_rx
.orig
)
1734 less_rx
= np
->last_rx
.orig
;
1736 while (np
->put_rx
.orig
!= less_rx
) {
1737 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1739 np
->put_rx_ctx
->skb
= skb
;
1740 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1743 PCI_DMA_FROMDEVICE
);
1744 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1745 np
->put_rx
.orig
->buf
= cpu_to_le32(np
->put_rx_ctx
->dma
);
1747 np
->put_rx
.orig
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX_AVAIL
);
1748 if (unlikely(np
->put_rx
.orig
++ == np
->last_rx
.orig
))
1749 np
->put_rx
.orig
= np
->first_rx
.orig
;
1750 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1751 np
->put_rx_ctx
= np
->first_rx_ctx
;
1759 static int nv_alloc_rx_optimized(struct net_device
*dev
)
1761 struct fe_priv
*np
= netdev_priv(dev
);
1762 struct ring_desc_ex
* less_rx
;
1764 less_rx
= np
->get_rx
.ex
;
1765 if (less_rx
-- == np
->first_rx
.ex
)
1766 less_rx
= np
->last_rx
.ex
;
1768 while (np
->put_rx
.ex
!= less_rx
) {
1769 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1771 np
->put_rx_ctx
->skb
= skb
;
1772 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1775 PCI_DMA_FROMDEVICE
);
1776 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1777 np
->put_rx
.ex
->bufhigh
= cpu_to_le32(dma_high(np
->put_rx_ctx
->dma
));
1778 np
->put_rx
.ex
->buflow
= cpu_to_le32(dma_low(np
->put_rx_ctx
->dma
));
1780 np
->put_rx
.ex
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX2_AVAIL
);
1781 if (unlikely(np
->put_rx
.ex
++ == np
->last_rx
.ex
))
1782 np
->put_rx
.ex
= np
->first_rx
.ex
;
1783 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1784 np
->put_rx_ctx
= np
->first_rx_ctx
;
1792 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1793 #ifdef CONFIG_FORCEDETH_NAPI
1794 static void nv_do_rx_refill(unsigned long data
)
1796 struct net_device
*dev
= (struct net_device
*) data
;
1797 struct fe_priv
*np
= netdev_priv(dev
);
1799 /* Just reschedule NAPI rx processing */
1800 napi_schedule(&np
->napi
);
1803 static void nv_do_rx_refill(unsigned long data
)
1805 struct net_device
*dev
= (struct net_device
*) data
;
1806 struct fe_priv
*np
= netdev_priv(dev
);
1809 if (!using_multi_irqs(dev
)) {
1810 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1811 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1813 disable_irq(np
->pci_dev
->irq
);
1815 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1817 if (!nv_optimized(np
))
1818 retcode
= nv_alloc_rx(dev
);
1820 retcode
= nv_alloc_rx_optimized(dev
);
1822 spin_lock_irq(&np
->lock
);
1823 if (!np
->in_shutdown
)
1824 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1825 spin_unlock_irq(&np
->lock
);
1827 if (!using_multi_irqs(dev
)) {
1828 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1829 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1831 enable_irq(np
->pci_dev
->irq
);
1833 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1838 static void nv_init_rx(struct net_device
*dev
)
1840 struct fe_priv
*np
= netdev_priv(dev
);
1843 np
->get_rx
= np
->put_rx
= np
->first_rx
= np
->rx_ring
;
1845 if (!nv_optimized(np
))
1846 np
->last_rx
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
-1];
1848 np
->last_rx
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
-1];
1849 np
->get_rx_ctx
= np
->put_rx_ctx
= np
->first_rx_ctx
= np
->rx_skb
;
1850 np
->last_rx_ctx
= &np
->rx_skb
[np
->rx_ring_size
-1];
1852 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1853 if (!nv_optimized(np
)) {
1854 np
->rx_ring
.orig
[i
].flaglen
= 0;
1855 np
->rx_ring
.orig
[i
].buf
= 0;
1857 np
->rx_ring
.ex
[i
].flaglen
= 0;
1858 np
->rx_ring
.ex
[i
].txvlan
= 0;
1859 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1860 np
->rx_ring
.ex
[i
].buflow
= 0;
1862 np
->rx_skb
[i
].skb
= NULL
;
1863 np
->rx_skb
[i
].dma
= 0;
1867 static void nv_init_tx(struct net_device
*dev
)
1869 struct fe_priv
*np
= netdev_priv(dev
);
1872 np
->get_tx
= np
->put_tx
= np
->first_tx
= np
->tx_ring
;
1874 if (!nv_optimized(np
))
1875 np
->last_tx
.orig
= &np
->tx_ring
.orig
[np
->tx_ring_size
-1];
1877 np
->last_tx
.ex
= &np
->tx_ring
.ex
[np
->tx_ring_size
-1];
1878 np
->get_tx_ctx
= np
->put_tx_ctx
= np
->first_tx_ctx
= np
->tx_skb
;
1879 np
->last_tx_ctx
= &np
->tx_skb
[np
->tx_ring_size
-1];
1880 np
->tx_pkts_in_progress
= 0;
1881 np
->tx_change_owner
= NULL
;
1882 np
->tx_end_flip
= NULL
;
1885 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1886 if (!nv_optimized(np
)) {
1887 np
->tx_ring
.orig
[i
].flaglen
= 0;
1888 np
->tx_ring
.orig
[i
].buf
= 0;
1890 np
->tx_ring
.ex
[i
].flaglen
= 0;
1891 np
->tx_ring
.ex
[i
].txvlan
= 0;
1892 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1893 np
->tx_ring
.ex
[i
].buflow
= 0;
1895 np
->tx_skb
[i
].skb
= NULL
;
1896 np
->tx_skb
[i
].dma
= 0;
1897 np
->tx_skb
[i
].dma_len
= 0;
1898 np
->tx_skb
[i
].first_tx_desc
= NULL
;
1899 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
1903 static int nv_init_ring(struct net_device
*dev
)
1905 struct fe_priv
*np
= netdev_priv(dev
);
1910 if (!nv_optimized(np
))
1911 return nv_alloc_rx(dev
);
1913 return nv_alloc_rx_optimized(dev
);
1916 static int nv_release_txskb(struct net_device
*dev
, struct nv_skb_map
* tx_skb
)
1918 struct fe_priv
*np
= netdev_priv(dev
);
1921 pci_unmap_page(np
->pci_dev
, tx_skb
->dma
,
1927 dev_kfree_skb_any(tx_skb
->skb
);
1935 static void nv_drain_tx(struct net_device
*dev
)
1937 struct fe_priv
*np
= netdev_priv(dev
);
1940 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1941 if (!nv_optimized(np
)) {
1942 np
->tx_ring
.orig
[i
].flaglen
= 0;
1943 np
->tx_ring
.orig
[i
].buf
= 0;
1945 np
->tx_ring
.ex
[i
].flaglen
= 0;
1946 np
->tx_ring
.ex
[i
].txvlan
= 0;
1947 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1948 np
->tx_ring
.ex
[i
].buflow
= 0;
1950 if (nv_release_txskb(dev
, &np
->tx_skb
[i
]))
1951 dev
->stats
.tx_dropped
++;
1952 np
->tx_skb
[i
].dma
= 0;
1953 np
->tx_skb
[i
].dma_len
= 0;
1954 np
->tx_skb
[i
].first_tx_desc
= NULL
;
1955 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
1957 np
->tx_pkts_in_progress
= 0;
1958 np
->tx_change_owner
= NULL
;
1959 np
->tx_end_flip
= NULL
;
1962 static void nv_drain_rx(struct net_device
*dev
)
1964 struct fe_priv
*np
= netdev_priv(dev
);
1967 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1968 if (!nv_optimized(np
)) {
1969 np
->rx_ring
.orig
[i
].flaglen
= 0;
1970 np
->rx_ring
.orig
[i
].buf
= 0;
1972 np
->rx_ring
.ex
[i
].flaglen
= 0;
1973 np
->rx_ring
.ex
[i
].txvlan
= 0;
1974 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1975 np
->rx_ring
.ex
[i
].buflow
= 0;
1978 if (np
->rx_skb
[i
].skb
) {
1979 pci_unmap_single(np
->pci_dev
, np
->rx_skb
[i
].dma
,
1980 (skb_end_pointer(np
->rx_skb
[i
].skb
) -
1981 np
->rx_skb
[i
].skb
->data
),
1982 PCI_DMA_FROMDEVICE
);
1983 dev_kfree_skb(np
->rx_skb
[i
].skb
);
1984 np
->rx_skb
[i
].skb
= NULL
;
1989 static void nv_drain_rxtx(struct net_device
*dev
)
1995 static inline u32
nv_get_empty_tx_slots(struct fe_priv
*np
)
1997 return (u32
)(np
->tx_ring_size
- ((np
->tx_ring_size
+ (np
->put_tx_ctx
- np
->get_tx_ctx
)) % np
->tx_ring_size
));
2000 static void nv_legacybackoff_reseed(struct net_device
*dev
)
2002 u8 __iomem
*base
= get_hwbase(dev
);
2007 reg
= readl(base
+ NvRegSlotTime
) & ~NVREG_SLOTTIME_MASK
;
2008 get_random_bytes(&low
, sizeof(low
));
2009 reg
|= low
& NVREG_SLOTTIME_MASK
;
2011 /* Need to stop tx before change takes effect.
2012 * Caller has already gained np->lock.
2014 tx_status
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
;
2018 writel(reg
, base
+ NvRegSlotTime
);
2024 /* Gear Backoff Seeds */
2025 #define BACKOFF_SEEDSET_ROWS 8
2026 #define BACKOFF_SEEDSET_LFSRS 15
2028 /* Known Good seed sets */
2029 static const u32 main_seedset
[BACKOFF_SEEDSET_ROWS
][BACKOFF_SEEDSET_LFSRS
] = {
2030 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2031 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2032 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2033 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2034 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2035 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2036 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2037 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
2039 static const u32 gear_seedset
[BACKOFF_SEEDSET_ROWS
][BACKOFF_SEEDSET_LFSRS
] = {
2040 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2041 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2042 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2043 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2044 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2045 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2046 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2047 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
2049 static void nv_gear_backoff_reseed(struct net_device
*dev
)
2051 u8 __iomem
*base
= get_hwbase(dev
);
2052 u32 miniseed1
, miniseed2
, miniseed2_reversed
, miniseed3
, miniseed3_reversed
;
2053 u32 temp
, seedset
, combinedSeed
;
2056 /* Setup seed for free running LFSR */
2057 /* We are going to read the time stamp counter 3 times
2058 and swizzle bits around to increase randomness */
2059 get_random_bytes(&miniseed1
, sizeof(miniseed1
));
2060 miniseed1
&= 0x0fff;
2064 get_random_bytes(&miniseed2
, sizeof(miniseed2
));
2065 miniseed2
&= 0x0fff;
2068 miniseed2_reversed
=
2069 ((miniseed2
& 0xF00) >> 8) |
2070 (miniseed2
& 0x0F0) |
2071 ((miniseed2
& 0x00F) << 8);
2073 get_random_bytes(&miniseed3
, sizeof(miniseed3
));
2074 miniseed3
&= 0x0fff;
2077 miniseed3_reversed
=
2078 ((miniseed3
& 0xF00) >> 8) |
2079 (miniseed3
& 0x0F0) |
2080 ((miniseed3
& 0x00F) << 8);
2082 combinedSeed
= ((miniseed1
^ miniseed2_reversed
) << 12) |
2083 (miniseed2
^ miniseed3_reversed
);
2085 /* Seeds can not be zero */
2086 if ((combinedSeed
& NVREG_BKOFFCTRL_SEED_MASK
) == 0)
2087 combinedSeed
|= 0x08;
2088 if ((combinedSeed
& (NVREG_BKOFFCTRL_SEED_MASK
<< NVREG_BKOFFCTRL_GEAR
)) == 0)
2089 combinedSeed
|= 0x8000;
2091 /* No need to disable tx here */
2092 temp
= NVREG_BKOFFCTRL_DEFAULT
| (0 << NVREG_BKOFFCTRL_SELECT
);
2093 temp
|= combinedSeed
& NVREG_BKOFFCTRL_SEED_MASK
;
2094 temp
|= combinedSeed
>> NVREG_BKOFFCTRL_GEAR
;
2095 writel(temp
,base
+ NvRegBackOffControl
);
2097 /* Setup seeds for all gear LFSRs. */
2098 get_random_bytes(&seedset
, sizeof(seedset
));
2099 seedset
= seedset
% BACKOFF_SEEDSET_ROWS
;
2100 for (i
= 1; i
<= BACKOFF_SEEDSET_LFSRS
; i
++)
2102 temp
= NVREG_BKOFFCTRL_DEFAULT
| (i
<< NVREG_BKOFFCTRL_SELECT
);
2103 temp
|= main_seedset
[seedset
][i
-1] & 0x3ff;
2104 temp
|= ((gear_seedset
[seedset
][i
-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR
);
2105 writel(temp
, base
+ NvRegBackOffControl
);
2110 * nv_start_xmit: dev->hard_start_xmit function
2111 * Called with netif_tx_lock held.
2113 static int nv_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2115 struct fe_priv
*np
= netdev_priv(dev
);
2117 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
2118 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
2122 u32 size
= skb
->len
-skb
->data_len
;
2123 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2125 struct ring_desc
* put_tx
;
2126 struct ring_desc
* start_tx
;
2127 struct ring_desc
* prev_tx
;
2128 struct nv_skb_map
* prev_tx_ctx
;
2129 unsigned long flags
;
2131 /* add fragments to entries count */
2132 for (i
= 0; i
< fragments
; i
++) {
2133 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
2134 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2137 spin_lock_irqsave(&np
->lock
, flags
);
2138 empty_slots
= nv_get_empty_tx_slots(np
);
2139 if (unlikely(empty_slots
<= entries
)) {
2140 netif_stop_queue(dev
);
2142 spin_unlock_irqrestore(&np
->lock
, flags
);
2143 return NETDEV_TX_BUSY
;
2145 spin_unlock_irqrestore(&np
->lock
, flags
);
2147 start_tx
= put_tx
= np
->put_tx
.orig
;
2149 /* setup the header buffer */
2152 prev_tx_ctx
= np
->put_tx_ctx
;
2153 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2154 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
2156 np
->put_tx_ctx
->dma_len
= bcnt
;
2157 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
2158 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2160 tx_flags
= np
->tx_flags
;
2163 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
2164 put_tx
= np
->first_tx
.orig
;
2165 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2166 np
->put_tx_ctx
= np
->first_tx_ctx
;
2169 /* setup the fragments */
2170 for (i
= 0; i
< fragments
; i
++) {
2171 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2172 u32 size
= frag
->size
;
2177 prev_tx_ctx
= np
->put_tx_ctx
;
2178 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2179 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
2181 np
->put_tx_ctx
->dma_len
= bcnt
;
2182 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
2183 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2187 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
2188 put_tx
= np
->first_tx
.orig
;
2189 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2190 np
->put_tx_ctx
= np
->first_tx_ctx
;
2194 /* set last fragment flag */
2195 prev_tx
->flaglen
|= cpu_to_le32(tx_flags_extra
);
2197 /* save skb in this slot's context area */
2198 prev_tx_ctx
->skb
= skb
;
2200 if (skb_is_gso(skb
))
2201 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
2203 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
2204 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
2206 spin_lock_irqsave(&np
->lock
, flags
);
2209 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2210 np
->put_tx
.orig
= put_tx
;
2212 spin_unlock_irqrestore(&np
->lock
, flags
);
2214 dprintk(KERN_DEBUG
"%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2215 dev
->name
, entries
, tx_flags_extra
);
2218 for (j
=0; j
<64; j
++) {
2220 dprintk("\n%03x:", j
);
2221 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2226 dev
->trans_start
= jiffies
;
2227 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2228 return NETDEV_TX_OK
;
2231 static int nv_start_xmit_optimized(struct sk_buff
*skb
, struct net_device
*dev
)
2233 struct fe_priv
*np
= netdev_priv(dev
);
2236 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
2240 u32 size
= skb
->len
-skb
->data_len
;
2241 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2243 struct ring_desc_ex
* put_tx
;
2244 struct ring_desc_ex
* start_tx
;
2245 struct ring_desc_ex
* prev_tx
;
2246 struct nv_skb_map
* prev_tx_ctx
;
2247 struct nv_skb_map
* start_tx_ctx
;
2248 unsigned long flags
;
2250 /* add fragments to entries count */
2251 for (i
= 0; i
< fragments
; i
++) {
2252 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
2253 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2256 spin_lock_irqsave(&np
->lock
, flags
);
2257 empty_slots
= nv_get_empty_tx_slots(np
);
2258 if (unlikely(empty_slots
<= entries
)) {
2259 netif_stop_queue(dev
);
2261 spin_unlock_irqrestore(&np
->lock
, flags
);
2262 return NETDEV_TX_BUSY
;
2264 spin_unlock_irqrestore(&np
->lock
, flags
);
2266 start_tx
= put_tx
= np
->put_tx
.ex
;
2267 start_tx_ctx
= np
->put_tx_ctx
;
2269 /* setup the header buffer */
2272 prev_tx_ctx
= np
->put_tx_ctx
;
2273 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2274 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
2276 np
->put_tx_ctx
->dma_len
= bcnt
;
2277 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
2278 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
2279 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2281 tx_flags
= NV_TX2_VALID
;
2284 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
2285 put_tx
= np
->first_tx
.ex
;
2286 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2287 np
->put_tx_ctx
= np
->first_tx_ctx
;
2290 /* setup the fragments */
2291 for (i
= 0; i
< fragments
; i
++) {
2292 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2293 u32 size
= frag
->size
;
2298 prev_tx_ctx
= np
->put_tx_ctx
;
2299 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2300 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
2302 np
->put_tx_ctx
->dma_len
= bcnt
;
2303 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
2304 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
2305 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2309 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
2310 put_tx
= np
->first_tx
.ex
;
2311 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2312 np
->put_tx_ctx
= np
->first_tx_ctx
;
2316 /* set last fragment flag */
2317 prev_tx
->flaglen
|= cpu_to_le32(NV_TX2_LASTPACKET
);
2319 /* save skb in this slot's context area */
2320 prev_tx_ctx
->skb
= skb
;
2322 if (skb_is_gso(skb
))
2323 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
2325 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
2326 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
2329 if (likely(!np
->vlangrp
)) {
2330 start_tx
->txvlan
= 0;
2332 if (vlan_tx_tag_present(skb
))
2333 start_tx
->txvlan
= cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT
| vlan_tx_tag_get(skb
));
2335 start_tx
->txvlan
= 0;
2338 spin_lock_irqsave(&np
->lock
, flags
);
2341 /* Limit the number of outstanding tx. Setup all fragments, but
2342 * do not set the VALID bit on the first descriptor. Save a pointer
2343 * to that descriptor and also for next skb_map element.
2346 if (np
->tx_pkts_in_progress
== NV_TX_LIMIT_COUNT
) {
2347 if (!np
->tx_change_owner
)
2348 np
->tx_change_owner
= start_tx_ctx
;
2350 /* remove VALID bit */
2351 tx_flags
&= ~NV_TX2_VALID
;
2352 start_tx_ctx
->first_tx_desc
= start_tx
;
2353 start_tx_ctx
->next_tx_ctx
= np
->put_tx_ctx
;
2354 np
->tx_end_flip
= np
->put_tx_ctx
;
2356 np
->tx_pkts_in_progress
++;
2361 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2362 np
->put_tx
.ex
= put_tx
;
2364 spin_unlock_irqrestore(&np
->lock
, flags
);
2366 dprintk(KERN_DEBUG
"%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2367 dev
->name
, entries
, tx_flags_extra
);
2370 for (j
=0; j
<64; j
++) {
2372 dprintk("\n%03x:", j
);
2373 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2378 dev
->trans_start
= jiffies
;
2379 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2380 return NETDEV_TX_OK
;
2383 static inline void nv_tx_flip_ownership(struct net_device
*dev
)
2385 struct fe_priv
*np
= netdev_priv(dev
);
2387 np
->tx_pkts_in_progress
--;
2388 if (np
->tx_change_owner
) {
2389 np
->tx_change_owner
->first_tx_desc
->flaglen
|=
2390 cpu_to_le32(NV_TX2_VALID
);
2391 np
->tx_pkts_in_progress
++;
2393 np
->tx_change_owner
= np
->tx_change_owner
->next_tx_ctx
;
2394 if (np
->tx_change_owner
== np
->tx_end_flip
)
2395 np
->tx_change_owner
= NULL
;
2397 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2402 * nv_tx_done: check for completed packets, release the skbs.
2404 * Caller must own np->lock.
2406 static int nv_tx_done(struct net_device
*dev
, int limit
)
2408 struct fe_priv
*np
= netdev_priv(dev
);
2411 struct ring_desc
* orig_get_tx
= np
->get_tx
.orig
;
2413 while ((np
->get_tx
.orig
!= np
->put_tx
.orig
) &&
2414 !((flags
= le32_to_cpu(np
->get_tx
.orig
->flaglen
)) & NV_TX_VALID
) &&
2415 (tx_work
< limit
)) {
2417 dprintk(KERN_DEBUG
"%s: nv_tx_done: flags 0x%x.\n",
2420 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2421 np
->get_tx_ctx
->dma_len
,
2423 np
->get_tx_ctx
->dma
= 0;
2425 if (np
->desc_ver
== DESC_VER_1
) {
2426 if (flags
& NV_TX_LASTPACKET
) {
2427 if (flags
& NV_TX_ERROR
) {
2428 if (flags
& NV_TX_UNDERFLOW
)
2429 dev
->stats
.tx_fifo_errors
++;
2430 if (flags
& NV_TX_CARRIERLOST
)
2431 dev
->stats
.tx_carrier_errors
++;
2432 if ((flags
& NV_TX_RETRYERROR
) && !(flags
& NV_TX_RETRYCOUNT_MASK
))
2433 nv_legacybackoff_reseed(dev
);
2434 dev
->stats
.tx_errors
++;
2436 dev
->stats
.tx_packets
++;
2437 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2439 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2440 np
->get_tx_ctx
->skb
= NULL
;
2444 if (flags
& NV_TX2_LASTPACKET
) {
2445 if (flags
& NV_TX2_ERROR
) {
2446 if (flags
& NV_TX2_UNDERFLOW
)
2447 dev
->stats
.tx_fifo_errors
++;
2448 if (flags
& NV_TX2_CARRIERLOST
)
2449 dev
->stats
.tx_carrier_errors
++;
2450 if ((flags
& NV_TX2_RETRYERROR
) && !(flags
& NV_TX2_RETRYCOUNT_MASK
))
2451 nv_legacybackoff_reseed(dev
);
2452 dev
->stats
.tx_errors
++;
2454 dev
->stats
.tx_packets
++;
2455 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2457 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2458 np
->get_tx_ctx
->skb
= NULL
;
2462 if (unlikely(np
->get_tx
.orig
++ == np
->last_tx
.orig
))
2463 np
->get_tx
.orig
= np
->first_tx
.orig
;
2464 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2465 np
->get_tx_ctx
= np
->first_tx_ctx
;
2467 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.orig
!= orig_get_tx
))) {
2469 netif_wake_queue(dev
);
2474 static int nv_tx_done_optimized(struct net_device
*dev
, int limit
)
2476 struct fe_priv
*np
= netdev_priv(dev
);
2479 struct ring_desc_ex
* orig_get_tx
= np
->get_tx
.ex
;
2481 while ((np
->get_tx
.ex
!= np
->put_tx
.ex
) &&
2482 !((flags
= le32_to_cpu(np
->get_tx
.ex
->flaglen
)) & NV_TX_VALID
) &&
2483 (tx_work
< limit
)) {
2485 dprintk(KERN_DEBUG
"%s: nv_tx_done_optimized: flags 0x%x.\n",
2488 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2489 np
->get_tx_ctx
->dma_len
,
2491 np
->get_tx_ctx
->dma
= 0;
2493 if (flags
& NV_TX2_LASTPACKET
) {
2494 if (!(flags
& NV_TX2_ERROR
))
2495 dev
->stats
.tx_packets
++;
2497 if ((flags
& NV_TX2_RETRYERROR
) && !(flags
& NV_TX2_RETRYCOUNT_MASK
)) {
2498 if (np
->driver_data
& DEV_HAS_GEAR_MODE
)
2499 nv_gear_backoff_reseed(dev
);
2501 nv_legacybackoff_reseed(dev
);
2505 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2506 np
->get_tx_ctx
->skb
= NULL
;
2510 nv_tx_flip_ownership(dev
);
2513 if (unlikely(np
->get_tx
.ex
++ == np
->last_tx
.ex
))
2514 np
->get_tx
.ex
= np
->first_tx
.ex
;
2515 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2516 np
->get_tx_ctx
= np
->first_tx_ctx
;
2518 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.ex
!= orig_get_tx
))) {
2520 netif_wake_queue(dev
);
2526 * nv_tx_timeout: dev->tx_timeout function
2527 * Called with netif_tx_lock held.
2529 static void nv_tx_timeout(struct net_device
*dev
)
2531 struct fe_priv
*np
= netdev_priv(dev
);
2532 u8 __iomem
*base
= get_hwbase(dev
);
2534 union ring_type put_tx
;
2537 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
2538 status
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
2540 status
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
2542 printk(KERN_INFO
"%s: Got tx_timeout. irq: %08x\n", dev
->name
, status
);
2547 printk(KERN_INFO
"%s: Ring at %lx\n",
2548 dev
->name
, (unsigned long)np
->ring_addr
);
2549 printk(KERN_INFO
"%s: Dumping tx registers\n", dev
->name
);
2550 for (i
=0;i
<=np
->register_size
;i
+= 32) {
2551 printk(KERN_INFO
"%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2553 readl(base
+ i
+ 0), readl(base
+ i
+ 4),
2554 readl(base
+ i
+ 8), readl(base
+ i
+ 12),
2555 readl(base
+ i
+ 16), readl(base
+ i
+ 20),
2556 readl(base
+ i
+ 24), readl(base
+ i
+ 28));
2558 printk(KERN_INFO
"%s: Dumping tx ring\n", dev
->name
);
2559 for (i
=0;i
<np
->tx_ring_size
;i
+= 4) {
2560 if (!nv_optimized(np
)) {
2561 printk(KERN_INFO
"%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2563 le32_to_cpu(np
->tx_ring
.orig
[i
].buf
),
2564 le32_to_cpu(np
->tx_ring
.orig
[i
].flaglen
),
2565 le32_to_cpu(np
->tx_ring
.orig
[i
+1].buf
),
2566 le32_to_cpu(np
->tx_ring
.orig
[i
+1].flaglen
),
2567 le32_to_cpu(np
->tx_ring
.orig
[i
+2].buf
),
2568 le32_to_cpu(np
->tx_ring
.orig
[i
+2].flaglen
),
2569 le32_to_cpu(np
->tx_ring
.orig
[i
+3].buf
),
2570 le32_to_cpu(np
->tx_ring
.orig
[i
+3].flaglen
));
2572 printk(KERN_INFO
"%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2574 le32_to_cpu(np
->tx_ring
.ex
[i
].bufhigh
),
2575 le32_to_cpu(np
->tx_ring
.ex
[i
].buflow
),
2576 le32_to_cpu(np
->tx_ring
.ex
[i
].flaglen
),
2577 le32_to_cpu(np
->tx_ring
.ex
[i
+1].bufhigh
),
2578 le32_to_cpu(np
->tx_ring
.ex
[i
+1].buflow
),
2579 le32_to_cpu(np
->tx_ring
.ex
[i
+1].flaglen
),
2580 le32_to_cpu(np
->tx_ring
.ex
[i
+2].bufhigh
),
2581 le32_to_cpu(np
->tx_ring
.ex
[i
+2].buflow
),
2582 le32_to_cpu(np
->tx_ring
.ex
[i
+2].flaglen
),
2583 le32_to_cpu(np
->tx_ring
.ex
[i
+3].bufhigh
),
2584 le32_to_cpu(np
->tx_ring
.ex
[i
+3].buflow
),
2585 le32_to_cpu(np
->tx_ring
.ex
[i
+3].flaglen
));
2590 spin_lock_irq(&np
->lock
);
2592 /* 1) stop tx engine */
2595 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2596 saved_tx_limit
= np
->tx_limit
;
2597 np
->tx_limit
= 0; /* prevent giving HW any limited pkts */
2598 np
->tx_stop
= 0; /* prevent waking tx queue */
2599 if (!nv_optimized(np
))
2600 nv_tx_done(dev
, np
->tx_ring_size
);
2602 nv_tx_done_optimized(dev
, np
->tx_ring_size
);
2604 /* save current HW postion */
2605 if (np
->tx_change_owner
)
2606 put_tx
.ex
= np
->tx_change_owner
->first_tx_desc
;
2608 put_tx
= np
->put_tx
;
2610 /* 3) clear all tx state */
2614 /* 4) restore state to current HW position */
2615 np
->get_tx
= np
->put_tx
= put_tx
;
2616 np
->tx_limit
= saved_tx_limit
;
2618 /* 5) restart tx engine */
2620 netif_wake_queue(dev
);
2621 spin_unlock_irq(&np
->lock
);
2625 * Called when the nic notices a mismatch between the actual data len on the
2626 * wire and the len indicated in the 802 header
2628 static int nv_getlen(struct net_device
*dev
, void *packet
, int datalen
)
2630 int hdrlen
; /* length of the 802 header */
2631 int protolen
; /* length as stored in the proto field */
2633 /* 1) calculate len according to header */
2634 if ( ((struct vlan_ethhdr
*)packet
)->h_vlan_proto
== htons(ETH_P_8021Q
)) {
2635 protolen
= ntohs( ((struct vlan_ethhdr
*)packet
)->h_vlan_encapsulated_proto
);
2638 protolen
= ntohs( ((struct ethhdr
*)packet
)->h_proto
);
2641 dprintk(KERN_DEBUG
"%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2642 dev
->name
, datalen
, protolen
, hdrlen
);
2643 if (protolen
> ETH_DATA_LEN
)
2644 return datalen
; /* Value in proto field not a len, no checks possible */
2647 /* consistency checks: */
2648 if (datalen
> ETH_ZLEN
) {
2649 if (datalen
>= protolen
) {
2650 /* more data on wire than in 802 header, trim of
2653 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2654 dev
->name
, protolen
);
2657 /* less data on wire than mentioned in header.
2658 * Discard the packet.
2660 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding long packet.\n",
2665 /* short packet. Accept only if 802 values are also short */
2666 if (protolen
> ETH_ZLEN
) {
2667 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding short packet.\n",
2671 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2672 dev
->name
, datalen
);
2677 static int nv_rx_process(struct net_device
*dev
, int limit
)
2679 struct fe_priv
*np
= netdev_priv(dev
);
2682 struct sk_buff
*skb
;
2685 while((np
->get_rx
.orig
!= np
->put_rx
.orig
) &&
2686 !((flags
= le32_to_cpu(np
->get_rx
.orig
->flaglen
)) & NV_RX_AVAIL
) &&
2687 (rx_work
< limit
)) {
2689 dprintk(KERN_DEBUG
"%s: nv_rx_process: flags 0x%x.\n",
2693 * the packet is for us - immediately tear down the pci mapping.
2694 * TODO: check if a prefetch of the first cacheline improves
2697 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2698 np
->get_rx_ctx
->dma_len
,
2699 PCI_DMA_FROMDEVICE
);
2700 skb
= np
->get_rx_ctx
->skb
;
2701 np
->get_rx_ctx
->skb
= NULL
;
2705 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2706 for (j
=0; j
<64; j
++) {
2708 dprintk("\n%03x:", j
);
2709 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2713 /* look at what we actually got: */
2714 if (np
->desc_ver
== DESC_VER_1
) {
2715 if (likely(flags
& NV_RX_DESCRIPTORVALID
)) {
2716 len
= flags
& LEN_MASK_V1
;
2717 if (unlikely(flags
& NV_RX_ERROR
)) {
2718 if ((flags
& NV_RX_ERROR_MASK
) == NV_RX_ERROR4
) {
2719 len
= nv_getlen(dev
, skb
->data
, len
);
2721 dev
->stats
.rx_errors
++;
2726 /* framing errors are soft errors */
2727 else if ((flags
& NV_RX_ERROR_MASK
) == NV_RX_FRAMINGERR
) {
2728 if (flags
& NV_RX_SUBSTRACT1
) {
2732 /* the rest are hard errors */
2734 if (flags
& NV_RX_MISSEDFRAME
)
2735 dev
->stats
.rx_missed_errors
++;
2736 if (flags
& NV_RX_CRCERR
)
2737 dev
->stats
.rx_crc_errors
++;
2738 if (flags
& NV_RX_OVERFLOW
)
2739 dev
->stats
.rx_over_errors
++;
2740 dev
->stats
.rx_errors
++;
2750 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2751 len
= flags
& LEN_MASK_V2
;
2752 if (unlikely(flags
& NV_RX2_ERROR
)) {
2753 if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_ERROR4
) {
2754 len
= nv_getlen(dev
, skb
->data
, len
);
2756 dev
->stats
.rx_errors
++;
2761 /* framing errors are soft errors */
2762 else if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_FRAMINGERR
) {
2763 if (flags
& NV_RX2_SUBSTRACT1
) {
2767 /* the rest are hard errors */
2769 if (flags
& NV_RX2_CRCERR
)
2770 dev
->stats
.rx_crc_errors
++;
2771 if (flags
& NV_RX2_OVERFLOW
)
2772 dev
->stats
.rx_over_errors
++;
2773 dev
->stats
.rx_errors
++;
2778 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2779 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2780 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2786 /* got a valid packet - forward it to the network core */
2788 skb
->protocol
= eth_type_trans(skb
, dev
);
2789 dprintk(KERN_DEBUG
"%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2790 dev
->name
, len
, skb
->protocol
);
2791 #ifdef CONFIG_FORCEDETH_NAPI
2792 netif_receive_skb(skb
);
2796 dev
->stats
.rx_packets
++;
2797 dev
->stats
.rx_bytes
+= len
;
2799 if (unlikely(np
->get_rx
.orig
++ == np
->last_rx
.orig
))
2800 np
->get_rx
.orig
= np
->first_rx
.orig
;
2801 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2802 np
->get_rx_ctx
= np
->first_rx_ctx
;
2810 static int nv_rx_process_optimized(struct net_device
*dev
, int limit
)
2812 struct fe_priv
*np
= netdev_priv(dev
);
2816 struct sk_buff
*skb
;
2819 while((np
->get_rx
.ex
!= np
->put_rx
.ex
) &&
2820 !((flags
= le32_to_cpu(np
->get_rx
.ex
->flaglen
)) & NV_RX2_AVAIL
) &&
2821 (rx_work
< limit
)) {
2823 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: flags 0x%x.\n",
2827 * the packet is for us - immediately tear down the pci mapping.
2828 * TODO: check if a prefetch of the first cacheline improves
2831 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2832 np
->get_rx_ctx
->dma_len
,
2833 PCI_DMA_FROMDEVICE
);
2834 skb
= np
->get_rx_ctx
->skb
;
2835 np
->get_rx_ctx
->skb
= NULL
;
2839 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2840 for (j
=0; j
<64; j
++) {
2842 dprintk("\n%03x:", j
);
2843 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2847 /* look at what we actually got: */
2848 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2849 len
= flags
& LEN_MASK_V2
;
2850 if (unlikely(flags
& NV_RX2_ERROR
)) {
2851 if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_ERROR4
) {
2852 len
= nv_getlen(dev
, skb
->data
, len
);
2858 /* framing errors are soft errors */
2859 else if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_FRAMINGERR
) {
2860 if (flags
& NV_RX2_SUBSTRACT1
) {
2864 /* the rest are hard errors */
2871 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2872 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2873 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2875 /* got a valid packet - forward it to the network core */
2877 skb
->protocol
= eth_type_trans(skb
, dev
);
2878 prefetch(skb
->data
);
2880 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2881 dev
->name
, len
, skb
->protocol
);
2883 if (likely(!np
->vlangrp
)) {
2884 #ifdef CONFIG_FORCEDETH_NAPI
2885 netif_receive_skb(skb
);
2890 vlanflags
= le32_to_cpu(np
->get_rx
.ex
->buflow
);
2891 if (vlanflags
& NV_RX3_VLAN_TAG_PRESENT
) {
2892 #ifdef CONFIG_FORCEDETH_NAPI
2893 vlan_hwaccel_receive_skb(skb
, np
->vlangrp
,
2894 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2896 vlan_hwaccel_rx(skb
, np
->vlangrp
,
2897 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2900 #ifdef CONFIG_FORCEDETH_NAPI
2901 netif_receive_skb(skb
);
2908 dev
->stats
.rx_packets
++;
2909 dev
->stats
.rx_bytes
+= len
;
2914 if (unlikely(np
->get_rx
.ex
++ == np
->last_rx
.ex
))
2915 np
->get_rx
.ex
= np
->first_rx
.ex
;
2916 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2917 np
->get_rx_ctx
= np
->first_rx_ctx
;
2925 static void set_bufsize(struct net_device
*dev
)
2927 struct fe_priv
*np
= netdev_priv(dev
);
2929 if (dev
->mtu
<= ETH_DATA_LEN
)
2930 np
->rx_buf_sz
= ETH_DATA_LEN
+ NV_RX_HEADERS
;
2932 np
->rx_buf_sz
= dev
->mtu
+ NV_RX_HEADERS
;
2936 * nv_change_mtu: dev->change_mtu function
2937 * Called with dev_base_lock held for read.
2939 static int nv_change_mtu(struct net_device
*dev
, int new_mtu
)
2941 struct fe_priv
*np
= netdev_priv(dev
);
2944 if (new_mtu
< 64 || new_mtu
> np
->pkt_limit
)
2950 /* return early if the buffer sizes will not change */
2951 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
2953 if (old_mtu
== new_mtu
)
2956 /* synchronized against open : rtnl_lock() held by caller */
2957 if (netif_running(dev
)) {
2958 u8 __iomem
*base
= get_hwbase(dev
);
2960 * It seems that the nic preloads valid ring entries into an
2961 * internal buffer. The procedure for flushing everything is
2962 * guessed, there is probably a simpler approach.
2963 * Changing the MTU is a rare event, it shouldn't matter.
2965 nv_disable_irq(dev
);
2966 nv_napi_disable(dev
);
2967 netif_tx_lock_bh(dev
);
2968 netif_addr_lock(dev
);
2969 spin_lock(&np
->lock
);
2973 /* drain rx queue */
2975 /* reinit driver view of the rx queue */
2977 if (nv_init_ring(dev
)) {
2978 if (!np
->in_shutdown
)
2979 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
2981 /* reinit nic view of the rx queue */
2982 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
2983 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
2984 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
2985 base
+ NvRegRingSizes
);
2987 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2990 /* restart rx engine */
2992 spin_unlock(&np
->lock
);
2993 netif_addr_unlock(dev
);
2994 netif_tx_unlock_bh(dev
);
2995 nv_napi_enable(dev
);
3001 static void nv_copy_mac_to_hw(struct net_device
*dev
)
3003 u8 __iomem
*base
= get_hwbase(dev
);
3006 mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
3007 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
3008 mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
3010 writel(mac
[0], base
+ NvRegMacAddrA
);
3011 writel(mac
[1], base
+ NvRegMacAddrB
);
3015 * nv_set_mac_address: dev->set_mac_address function
3016 * Called with rtnl_lock() held.
3018 static int nv_set_mac_address(struct net_device
*dev
, void *addr
)
3020 struct fe_priv
*np
= netdev_priv(dev
);
3021 struct sockaddr
*macaddr
= (struct sockaddr
*)addr
;
3023 if (!is_valid_ether_addr(macaddr
->sa_data
))
3024 return -EADDRNOTAVAIL
;
3026 /* synchronized against open : rtnl_lock() held by caller */
3027 memcpy(dev
->dev_addr
, macaddr
->sa_data
, ETH_ALEN
);
3029 if (netif_running(dev
)) {
3030 netif_tx_lock_bh(dev
);
3031 netif_addr_lock(dev
);
3032 spin_lock_irq(&np
->lock
);
3034 /* stop rx engine */
3037 /* set mac address */
3038 nv_copy_mac_to_hw(dev
);
3040 /* restart rx engine */
3042 spin_unlock_irq(&np
->lock
);
3043 netif_addr_unlock(dev
);
3044 netif_tx_unlock_bh(dev
);
3046 nv_copy_mac_to_hw(dev
);
3052 * nv_set_multicast: dev->set_multicast function
3053 * Called with netif_tx_lock held.
3055 static void nv_set_multicast(struct net_device
*dev
)
3057 struct fe_priv
*np
= netdev_priv(dev
);
3058 u8 __iomem
*base
= get_hwbase(dev
);
3061 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & NVREG_PFF_PAUSE_RX
;
3063 memset(addr
, 0, sizeof(addr
));
3064 memset(mask
, 0, sizeof(mask
));
3066 if (dev
->flags
& IFF_PROMISC
) {
3067 pff
|= NVREG_PFF_PROMISC
;
3069 pff
|= NVREG_PFF_MYADDR
;
3071 if (dev
->flags
& IFF_ALLMULTI
|| dev
->mc_list
) {
3075 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0xffffffff;
3076 if (dev
->flags
& IFF_ALLMULTI
) {
3077 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0;
3079 struct dev_mc_list
*walk
;
3081 walk
= dev
->mc_list
;
3082 while (walk
!= NULL
) {
3084 a
= le32_to_cpu(*(__le32
*) walk
->dmi_addr
);
3085 b
= le16_to_cpu(*(__le16
*) (&walk
->dmi_addr
[4]));
3093 addr
[0] = alwaysOn
[0];
3094 addr
[1] = alwaysOn
[1];
3095 mask
[0] = alwaysOn
[0] | alwaysOff
[0];
3096 mask
[1] = alwaysOn
[1] | alwaysOff
[1];
3098 mask
[0] = NVREG_MCASTMASKA_NONE
;
3099 mask
[1] = NVREG_MCASTMASKB_NONE
;
3102 addr
[0] |= NVREG_MCASTADDRA_FORCE
;
3103 pff
|= NVREG_PFF_ALWAYS
;
3104 spin_lock_irq(&np
->lock
);
3106 writel(addr
[0], base
+ NvRegMulticastAddrA
);
3107 writel(addr
[1], base
+ NvRegMulticastAddrB
);
3108 writel(mask
[0], base
+ NvRegMulticastMaskA
);
3109 writel(mask
[1], base
+ NvRegMulticastMaskB
);
3110 writel(pff
, base
+ NvRegPacketFilterFlags
);
3111 dprintk(KERN_INFO
"%s: reconfiguration for multicast lists.\n",
3114 spin_unlock_irq(&np
->lock
);
3117 static void nv_update_pause(struct net_device
*dev
, u32 pause_flags
)
3119 struct fe_priv
*np
= netdev_priv(dev
);
3120 u8 __iomem
*base
= get_hwbase(dev
);
3122 np
->pause_flags
&= ~(NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
);
3124 if (np
->pause_flags
& NV_PAUSEFRAME_RX_CAPABLE
) {
3125 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & ~NVREG_PFF_PAUSE_RX
;
3126 if (pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) {
3127 writel(pff
|NVREG_PFF_PAUSE_RX
, base
+ NvRegPacketFilterFlags
);
3128 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3130 writel(pff
, base
+ NvRegPacketFilterFlags
);
3133 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
) {
3134 u32 regmisc
= readl(base
+ NvRegMisc1
) & ~NVREG_MISC1_PAUSE_TX
;
3135 if (pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) {
3136 u32 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V1
;
3137 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
)
3138 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V2
;
3139 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
) {
3140 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V3
;
3141 /* limit the number of tx pause frames to a default of 8 */
3142 writel(readl(base
+ NvRegTxPauseFrameLimit
)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE
, base
+ NvRegTxPauseFrameLimit
);
3144 writel(pause_enable
, base
+ NvRegTxPauseFrame
);
3145 writel(regmisc
|NVREG_MISC1_PAUSE_TX
, base
+ NvRegMisc1
);
3146 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3148 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
3149 writel(regmisc
, base
+ NvRegMisc1
);
3155 * nv_update_linkspeed: Setup the MAC according to the link partner
3156 * @dev: Network device to be configured
3158 * The function queries the PHY and checks if there is a link partner.
3159 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3160 * set to 10 MBit HD.
3162 * The function returns 0 if there is no link partner and 1 if there is
3163 * a good link partner.
3165 static int nv_update_linkspeed(struct net_device
*dev
)
3167 struct fe_priv
*np
= netdev_priv(dev
);
3168 u8 __iomem
*base
= get_hwbase(dev
);
3171 int adv_lpa
, adv_pause
, lpa_pause
;
3172 int newls
= np
->linkspeed
;
3173 int newdup
= np
->duplex
;
3176 u32 control_1000
, status_1000
, phyreg
, pause_flags
, txreg
;
3180 /* BMSR_LSTATUS is latched, read it twice:
3181 * we want the current value.
3183 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
3184 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
3186 if (!(mii_status
& BMSR_LSTATUS
)) {
3187 dprintk(KERN_DEBUG
"%s: no link detected by phy - falling back to 10HD.\n",
3189 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3195 if (np
->autoneg
== 0) {
3196 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3197 dev
->name
, np
->fixed_mode
);
3198 if (np
->fixed_mode
& LPA_100FULL
) {
3199 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3201 } else if (np
->fixed_mode
& LPA_100HALF
) {
3202 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3204 } else if (np
->fixed_mode
& LPA_10FULL
) {
3205 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3208 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3214 /* check auto negotiation is complete */
3215 if (!(mii_status
& BMSR_ANEGCOMPLETE
)) {
3216 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3217 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3220 dprintk(KERN_DEBUG
"%s: autoneg not completed - falling back to 10HD.\n", dev
->name
);
3224 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3225 lpa
= mii_rw(dev
, np
->phyaddr
, MII_LPA
, MII_READ
);
3226 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3227 dev
->name
, adv
, lpa
);
3230 if (np
->gigabit
== PHY_GIGABIT
) {
3231 control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3232 status_1000
= mii_rw(dev
, np
->phyaddr
, MII_STAT1000
, MII_READ
);
3234 if ((control_1000
& ADVERTISE_1000FULL
) &&
3235 (status_1000
& LPA_1000FULL
)) {
3236 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: GBit ethernet detected.\n",
3238 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_1000
;
3244 /* FIXME: handle parallel detection properly */
3245 adv_lpa
= lpa
& adv
;
3246 if (adv_lpa
& LPA_100FULL
) {
3247 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3249 } else if (adv_lpa
& LPA_100HALF
) {
3250 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3252 } else if (adv_lpa
& LPA_10FULL
) {
3253 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3255 } else if (adv_lpa
& LPA_10HALF
) {
3256 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3259 dprintk(KERN_DEBUG
"%s: bad ability %04x - falling back to 10HD.\n", dev
->name
, adv_lpa
);
3260 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3265 if (np
->duplex
== newdup
&& np
->linkspeed
== newls
)
3268 dprintk(KERN_INFO
"%s: changing link setting from %d/%d to %d/%d.\n",
3269 dev
->name
, np
->linkspeed
, np
->duplex
, newls
, newdup
);
3271 np
->duplex
= newdup
;
3272 np
->linkspeed
= newls
;
3274 /* The transmitter and receiver must be restarted for safe update */
3275 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
) {
3276 txrxFlags
|= NV_RESTART_TX
;
3279 if (readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) {
3280 txrxFlags
|= NV_RESTART_RX
;
3284 if (np
->gigabit
== PHY_GIGABIT
) {
3285 phyreg
= readl(base
+ NvRegSlotTime
);
3286 phyreg
&= ~(0x3FF00);
3287 if (((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
) ||
3288 ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
))
3289 phyreg
|= NVREG_SLOTTIME_10_100_FULL
;
3290 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
3291 phyreg
|= NVREG_SLOTTIME_1000_FULL
;
3292 writel(phyreg
, base
+ NvRegSlotTime
);
3295 phyreg
= readl(base
+ NvRegPhyInterface
);
3296 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
3297 if (np
->duplex
== 0)
3299 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
3301 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
3303 writel(phyreg
, base
+ NvRegPhyInterface
);
3305 phy_exp
= mii_rw(dev
, np
->phyaddr
, MII_EXPANSION
, MII_READ
) & EXPANSION_NWAY
; /* autoneg capable */
3306 if (phyreg
& PHY_RGMII
) {
3307 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
) {
3308 txreg
= NVREG_TX_DEFERRAL_RGMII_1000
;
3310 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
)) {
3311 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_10
)
3312 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_10
;
3314 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_100
;
3316 txreg
= NVREG_TX_DEFERRAL_RGMII_10_100
;
3320 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
))
3321 txreg
= NVREG_TX_DEFERRAL_MII_STRETCH
;
3323 txreg
= NVREG_TX_DEFERRAL_DEFAULT
;
3325 writel(txreg
, base
+ NvRegTxDeferral
);
3327 if (np
->desc_ver
== DESC_VER_1
) {
3328 txreg
= NVREG_TX_WM_DESC1_DEFAULT
;
3330 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
3331 txreg
= NVREG_TX_WM_DESC2_3_1000
;
3333 txreg
= NVREG_TX_WM_DESC2_3_DEFAULT
;
3335 writel(txreg
, base
+ NvRegTxWatermark
);
3337 writel(NVREG_MISC1_FORCE
| ( np
->duplex
? 0 : NVREG_MISC1_HD
),
3340 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
3344 /* setup pause frame */
3345 if (np
->duplex
!= 0) {
3346 if (np
->autoneg
&& np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) {
3347 adv_pause
= adv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3348 lpa_pause
= lpa
& (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
3350 switch (adv_pause
) {
3351 case ADVERTISE_PAUSE_CAP
:
3352 if (lpa_pause
& LPA_PAUSE_CAP
) {
3353 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3354 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3355 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3358 case ADVERTISE_PAUSE_ASYM
:
3359 if (lpa_pause
== (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
))
3361 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3364 case ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
:
3365 if (lpa_pause
& LPA_PAUSE_CAP
)
3367 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3368 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3369 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3371 if (lpa_pause
== LPA_PAUSE_ASYM
)
3373 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3378 pause_flags
= np
->pause_flags
;
3381 nv_update_pause(dev
, pause_flags
);
3383 if (txrxFlags
& NV_RESTART_TX
)
3385 if (txrxFlags
& NV_RESTART_RX
)
3391 static void nv_linkchange(struct net_device
*dev
)
3393 if (nv_update_linkspeed(dev
)) {
3394 if (!netif_carrier_ok(dev
)) {
3395 netif_carrier_on(dev
);
3396 printk(KERN_INFO
"%s: link up.\n", dev
->name
);
3400 if (netif_carrier_ok(dev
)) {
3401 netif_carrier_off(dev
);
3402 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
3408 static void nv_link_irq(struct net_device
*dev
)
3410 u8 __iomem
*base
= get_hwbase(dev
);
3413 miistat
= readl(base
+ NvRegMIIStatus
);
3414 writel(NVREG_MIISTAT_LINKCHANGE
, base
+ NvRegMIIStatus
);
3415 dprintk(KERN_INFO
"%s: link change irq, status 0x%x.\n", dev
->name
, miistat
);
3417 if (miistat
& (NVREG_MIISTAT_LINKCHANGE
))
3419 dprintk(KERN_DEBUG
"%s: link change notification done.\n", dev
->name
);
3422 static void nv_msi_workaround(struct fe_priv
*np
)
3425 /* Need to toggle the msi irq mask within the ethernet device,
3426 * otherwise, future interrupts will not be detected.
3428 if (np
->msi_flags
& NV_MSI_ENABLED
) {
3429 u8 __iomem
*base
= np
->base
;
3431 writel(0, base
+ NvRegMSIIrqMask
);
3432 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
3436 static inline int nv_change_interrupt_mode(struct net_device
*dev
, int total_work
)
3438 struct fe_priv
*np
= netdev_priv(dev
);
3440 if (optimization_mode
== NV_OPTIMIZATION_MODE_DYNAMIC
) {
3441 if (total_work
> NV_DYNAMIC_THRESHOLD
) {
3442 /* transition to poll based interrupts */
3443 np
->quiet_count
= 0;
3444 if (np
->irqmask
!= NVREG_IRQMASK_CPU
) {
3445 np
->irqmask
= NVREG_IRQMASK_CPU
;
3449 if (np
->quiet_count
< NV_DYNAMIC_MAX_QUIET_COUNT
) {
3452 /* reached a period of low activity, switch
3453 to per tx/rx packet interrupts */
3454 if (np
->irqmask
!= NVREG_IRQMASK_THROUGHPUT
) {
3455 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
3464 static irqreturn_t
nv_nic_irq(int foo
, void *data
)
3466 struct net_device
*dev
= (struct net_device
*) data
;
3467 struct fe_priv
*np
= netdev_priv(dev
);
3468 u8 __iomem
*base
= get_hwbase(dev
);
3469 #ifndef CONFIG_FORCEDETH_NAPI
3474 dprintk(KERN_DEBUG
"%s: nv_nic_irq\n", dev
->name
);
3476 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3477 np
->events
= readl(base
+ NvRegIrqStatus
);
3478 writel(np
->events
, base
+ NvRegIrqStatus
);
3480 np
->events
= readl(base
+ NvRegMSIXIrqStatus
);
3481 writel(np
->events
, base
+ NvRegMSIXIrqStatus
);
3483 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, np
->events
);
3484 if (!(np
->events
& np
->irqmask
))
3487 nv_msi_workaround(np
);
3489 #ifdef CONFIG_FORCEDETH_NAPI
3490 napi_schedule(&np
->napi
);
3492 /* Disable furthur irq's
3493 (msix not enabled with napi) */
3494 writel(0, base
+ NvRegIrqMask
);
3500 if ((work
= nv_rx_process(dev
, RX_WORK_PER_LOOP
))) {
3501 if (unlikely(nv_alloc_rx(dev
))) {
3502 spin_lock(&np
->lock
);
3503 if (!np
->in_shutdown
)
3504 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3505 spin_unlock(&np
->lock
);
3509 spin_lock(&np
->lock
);
3510 work
+= nv_tx_done(dev
, TX_WORK_PER_LOOP
);
3511 spin_unlock(&np
->lock
);
3520 while (loop_count
< max_interrupt_work
);
3522 if (nv_change_interrupt_mode(dev
, total_work
)) {
3523 /* setup new irq mask */
3524 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3527 if (unlikely(np
->events
& NVREG_IRQ_LINK
)) {
3528 spin_lock(&np
->lock
);
3530 spin_unlock(&np
->lock
);
3532 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3533 spin_lock(&np
->lock
);
3535 spin_unlock(&np
->lock
);
3536 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3538 if (unlikely(np
->events
& NVREG_IRQ_RECOVER_ERROR
)) {
3539 spin_lock(&np
->lock
);
3540 /* disable interrupts on the nic */
3541 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3542 writel(0, base
+ NvRegIrqMask
);
3544 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3547 if (!np
->in_shutdown
) {
3548 np
->nic_poll_irq
= np
->irqmask
;
3549 np
->recover_error
= 1;
3550 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3552 spin_unlock(&np
->lock
);
3555 dprintk(KERN_DEBUG
"%s: nv_nic_irq completed\n", dev
->name
);
3561 * All _optimized functions are used to help increase performance
3562 * (reduce CPU and increase throughput). They use descripter version 3,
3563 * compiler directives, and reduce memory accesses.
3565 static irqreturn_t
nv_nic_irq_optimized(int foo
, void *data
)
3567 struct net_device
*dev
= (struct net_device
*) data
;
3568 struct fe_priv
*np
= netdev_priv(dev
);
3569 u8 __iomem
*base
= get_hwbase(dev
);
3570 #ifndef CONFIG_FORCEDETH_NAPI
3575 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized\n", dev
->name
);
3577 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3578 np
->events
= readl(base
+ NvRegIrqStatus
);
3579 writel(np
->events
, base
+ NvRegIrqStatus
);
3581 np
->events
= readl(base
+ NvRegMSIXIrqStatus
);
3582 writel(np
->events
, base
+ NvRegMSIXIrqStatus
);
3584 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, np
->events
);
3585 if (!(np
->events
& np
->irqmask
))
3588 nv_msi_workaround(np
);
3590 #ifdef CONFIG_FORCEDETH_NAPI
3591 napi_schedule(&np
->napi
);
3593 /* Disable furthur irq's
3594 (msix not enabled with napi) */
3595 writel(0, base
+ NvRegIrqMask
);
3601 if ((work
= nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
))) {
3602 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3603 spin_lock(&np
->lock
);
3604 if (!np
->in_shutdown
)
3605 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3606 spin_unlock(&np
->lock
);
3610 spin_lock(&np
->lock
);
3611 work
+= nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3612 spin_unlock(&np
->lock
);
3621 while (loop_count
< max_interrupt_work
);
3623 if (nv_change_interrupt_mode(dev
, total_work
)) {
3624 /* setup new irq mask */
3625 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3628 if (unlikely(np
->events
& NVREG_IRQ_LINK
)) {
3629 spin_lock(&np
->lock
);
3631 spin_unlock(&np
->lock
);
3633 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3634 spin_lock(&np
->lock
);
3636 spin_unlock(&np
->lock
);
3637 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3639 if (unlikely(np
->events
& NVREG_IRQ_RECOVER_ERROR
)) {
3640 spin_lock(&np
->lock
);
3641 /* disable interrupts on the nic */
3642 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3643 writel(0, base
+ NvRegIrqMask
);
3645 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3648 if (!np
->in_shutdown
) {
3649 np
->nic_poll_irq
= np
->irqmask
;
3650 np
->recover_error
= 1;
3651 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3653 spin_unlock(&np
->lock
);
3657 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized completed\n", dev
->name
);
3662 static irqreturn_t
nv_nic_irq_tx(int foo
, void *data
)
3664 struct net_device
*dev
= (struct net_device
*) data
;
3665 struct fe_priv
*np
= netdev_priv(dev
);
3666 u8 __iomem
*base
= get_hwbase(dev
);
3669 unsigned long flags
;
3671 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx\n", dev
->name
);
3674 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_TX_ALL
;
3675 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegMSIXIrqStatus
);
3676 dprintk(KERN_DEBUG
"%s: tx irq: %08x\n", dev
->name
, events
);
3677 if (!(events
& np
->irqmask
))
3680 spin_lock_irqsave(&np
->lock
, flags
);
3681 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3682 spin_unlock_irqrestore(&np
->lock
, flags
);
3684 if (unlikely(i
> max_interrupt_work
)) {
3685 spin_lock_irqsave(&np
->lock
, flags
);
3686 /* disable interrupts on the nic */
3687 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegIrqMask
);
3690 if (!np
->in_shutdown
) {
3691 np
->nic_poll_irq
|= NVREG_IRQ_TX_ALL
;
3692 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3694 spin_unlock_irqrestore(&np
->lock
, flags
);
3695 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev
->name
, i
);
3700 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx completed\n", dev
->name
);
3702 return IRQ_RETVAL(i
);
3705 #ifdef CONFIG_FORCEDETH_NAPI
3706 static int nv_napi_poll(struct napi_struct
*napi
, int budget
)
3708 struct fe_priv
*np
= container_of(napi
, struct fe_priv
, napi
);
3709 struct net_device
*dev
= np
->dev
;
3710 u8 __iomem
*base
= get_hwbase(dev
);
3711 unsigned long flags
;
3713 int tx_work
, rx_work
;
3715 if (!nv_optimized(np
)) {
3716 spin_lock_irqsave(&np
->lock
, flags
);
3717 tx_work
= nv_tx_done(dev
, np
->tx_ring_size
);
3718 spin_unlock_irqrestore(&np
->lock
, flags
);
3720 rx_work
= nv_rx_process(dev
, budget
);
3721 retcode
= nv_alloc_rx(dev
);
3723 spin_lock_irqsave(&np
->lock
, flags
);
3724 tx_work
= nv_tx_done_optimized(dev
, np
->tx_ring_size
);
3725 spin_unlock_irqrestore(&np
->lock
, flags
);
3727 rx_work
= nv_rx_process_optimized(dev
, budget
);
3728 retcode
= nv_alloc_rx_optimized(dev
);
3732 spin_lock_irqsave(&np
->lock
, flags
);
3733 if (!np
->in_shutdown
)
3734 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3735 spin_unlock_irqrestore(&np
->lock
, flags
);
3738 nv_change_interrupt_mode(dev
, tx_work
+ rx_work
);
3740 if (unlikely(np
->events
& NVREG_IRQ_LINK
)) {
3741 spin_lock_irqsave(&np
->lock
, flags
);
3743 spin_unlock_irqrestore(&np
->lock
, flags
);
3745 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3746 spin_lock_irqsave(&np
->lock
, flags
);
3748 spin_unlock_irqrestore(&np
->lock
, flags
);
3749 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3751 if (unlikely(np
->events
& NVREG_IRQ_RECOVER_ERROR
)) {
3752 spin_lock_irqsave(&np
->lock
, flags
);
3753 if (!np
->in_shutdown
) {
3754 np
->nic_poll_irq
= np
->irqmask
;
3755 np
->recover_error
= 1;
3756 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3758 spin_unlock_irqrestore(&np
->lock
, flags
);
3759 napi_complete(napi
);
3763 if (rx_work
< budget
) {
3764 /* re-enable interrupts
3765 (msix not enabled in napi) */
3766 napi_complete(napi
);
3768 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3774 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3776 struct net_device
*dev
= (struct net_device
*) data
;
3777 struct fe_priv
*np
= netdev_priv(dev
);
3778 u8 __iomem
*base
= get_hwbase(dev
);
3781 unsigned long flags
;
3783 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx\n", dev
->name
);
3786 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3787 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3788 dprintk(KERN_DEBUG
"%s: rx irq: %08x\n", dev
->name
, events
);
3789 if (!(events
& np
->irqmask
))
3792 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3793 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3794 spin_lock_irqsave(&np
->lock
, flags
);
3795 if (!np
->in_shutdown
)
3796 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3797 spin_unlock_irqrestore(&np
->lock
, flags
);
3801 if (unlikely(i
> max_interrupt_work
)) {
3802 spin_lock_irqsave(&np
->lock
, flags
);
3803 /* disable interrupts on the nic */
3804 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3807 if (!np
->in_shutdown
) {
3808 np
->nic_poll_irq
|= NVREG_IRQ_RX_ALL
;
3809 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3811 spin_unlock_irqrestore(&np
->lock
, flags
);
3812 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev
->name
, i
);
3816 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx completed\n", dev
->name
);
3818 return IRQ_RETVAL(i
);
3821 static irqreturn_t
nv_nic_irq_other(int foo
, void *data
)
3823 struct net_device
*dev
= (struct net_device
*) data
;
3824 struct fe_priv
*np
= netdev_priv(dev
);
3825 u8 __iomem
*base
= get_hwbase(dev
);
3828 unsigned long flags
;
3830 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other\n", dev
->name
);
3833 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_OTHER
;
3834 writel(NVREG_IRQ_OTHER
, base
+ NvRegMSIXIrqStatus
);
3835 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3836 if (!(events
& np
->irqmask
))
3839 /* check tx in case we reached max loop limit in tx isr */
3840 spin_lock_irqsave(&np
->lock
, flags
);
3841 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3842 spin_unlock_irqrestore(&np
->lock
, flags
);
3844 if (events
& NVREG_IRQ_LINK
) {
3845 spin_lock_irqsave(&np
->lock
, flags
);
3847 spin_unlock_irqrestore(&np
->lock
, flags
);
3849 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
3850 spin_lock_irqsave(&np
->lock
, flags
);
3852 spin_unlock_irqrestore(&np
->lock
, flags
);
3853 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3855 if (events
& NVREG_IRQ_RECOVER_ERROR
) {
3856 spin_lock_irq(&np
->lock
);
3857 /* disable interrupts on the nic */
3858 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3861 if (!np
->in_shutdown
) {
3862 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3863 np
->recover_error
= 1;
3864 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3866 spin_unlock_irq(&np
->lock
);
3869 if (unlikely(i
> max_interrupt_work
)) {
3870 spin_lock_irqsave(&np
->lock
, flags
);
3871 /* disable interrupts on the nic */
3872 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3875 if (!np
->in_shutdown
) {
3876 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3877 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3879 spin_unlock_irqrestore(&np
->lock
, flags
);
3880 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_other.\n", dev
->name
, i
);
3885 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other completed\n", dev
->name
);
3887 return IRQ_RETVAL(i
);
3890 static irqreturn_t
nv_nic_irq_test(int foo
, void *data
)
3892 struct net_device
*dev
= (struct net_device
*) data
;
3893 struct fe_priv
*np
= netdev_priv(dev
);
3894 u8 __iomem
*base
= get_hwbase(dev
);
3897 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test\n", dev
->name
);
3899 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3900 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3901 writel(NVREG_IRQ_TIMER
, base
+ NvRegIrqStatus
);
3903 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3904 writel(NVREG_IRQ_TIMER
, base
+ NvRegMSIXIrqStatus
);
3907 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3908 if (!(events
& NVREG_IRQ_TIMER
))
3909 return IRQ_RETVAL(0);
3911 nv_msi_workaround(np
);
3913 spin_lock(&np
->lock
);
3915 spin_unlock(&np
->lock
);
3917 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test completed\n", dev
->name
);
3919 return IRQ_RETVAL(1);
3922 static void set_msix_vector_map(struct net_device
*dev
, u32 vector
, u32 irqmask
)
3924 u8 __iomem
*base
= get_hwbase(dev
);
3928 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3929 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3930 * the remaining 8 interrupts.
3932 for (i
= 0; i
< 8; i
++) {
3933 if ((irqmask
>> i
) & 0x1) {
3934 msixmap
|= vector
<< (i
<< 2);
3937 writel(readl(base
+ NvRegMSIXMap0
) | msixmap
, base
+ NvRegMSIXMap0
);
3940 for (i
= 0; i
< 8; i
++) {
3941 if ((irqmask
>> (i
+ 8)) & 0x1) {
3942 msixmap
|= vector
<< (i
<< 2);
3945 writel(readl(base
+ NvRegMSIXMap1
) | msixmap
, base
+ NvRegMSIXMap1
);
3948 static int nv_request_irq(struct net_device
*dev
, int intr_test
)
3950 struct fe_priv
*np
= get_nvpriv(dev
);
3951 u8 __iomem
*base
= get_hwbase(dev
);
3954 irqreturn_t (*handler
)(int foo
, void *data
);
3957 handler
= nv_nic_irq_test
;
3959 if (nv_optimized(np
))
3960 handler
= nv_nic_irq_optimized
;
3962 handler
= nv_nic_irq
;
3965 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) {
3966 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3967 np
->msi_x_entry
[i
].entry
= i
;
3969 if ((ret
= pci_enable_msix(np
->pci_dev
, np
->msi_x_entry
, (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
))) == 0) {
3970 np
->msi_flags
|= NV_MSI_X_ENABLED
;
3971 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
&& !intr_test
) {
3972 /* Request irq for rx handling */
3973 sprintf(np
->name_rx
, "%s-rx", dev
->name
);
3974 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
,
3975 &nv_nic_irq_rx
, IRQF_SHARED
, np
->name_rx
, dev
) != 0) {
3976 printk(KERN_INFO
"forcedeth: request_irq failed for rx %d\n", ret
);
3977 pci_disable_msix(np
->pci_dev
);
3978 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3981 /* Request irq for tx handling */
3982 sprintf(np
->name_tx
, "%s-tx", dev
->name
);
3983 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
,
3984 &nv_nic_irq_tx
, IRQF_SHARED
, np
->name_tx
, dev
) != 0) {
3985 printk(KERN_INFO
"forcedeth: request_irq failed for tx %d\n", ret
);
3986 pci_disable_msix(np
->pci_dev
);
3987 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3990 /* Request irq for link and timer handling */
3991 sprintf(np
->name_other
, "%s-other", dev
->name
);
3992 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
,
3993 &nv_nic_irq_other
, IRQF_SHARED
, np
->name_other
, dev
) != 0) {
3994 printk(KERN_INFO
"forcedeth: request_irq failed for link %d\n", ret
);
3995 pci_disable_msix(np
->pci_dev
);
3996 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3999 /* map interrupts to their respective vector */
4000 writel(0, base
+ NvRegMSIXMap0
);
4001 writel(0, base
+ NvRegMSIXMap1
);
4002 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_RX
, NVREG_IRQ_RX_ALL
);
4003 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_TX
, NVREG_IRQ_TX_ALL
);
4004 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_OTHER
, NVREG_IRQ_OTHER
);
4006 /* Request irq for all interrupts */
4007 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
4008 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
4009 pci_disable_msix(np
->pci_dev
);
4010 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
4014 /* map interrupts to vector 0 */
4015 writel(0, base
+ NvRegMSIXMap0
);
4016 writel(0, base
+ NvRegMSIXMap1
);
4020 if (ret
!= 0 && np
->msi_flags
& NV_MSI_CAPABLE
) {
4021 if ((ret
= pci_enable_msi(np
->pci_dev
)) == 0) {
4022 np
->msi_flags
|= NV_MSI_ENABLED
;
4023 dev
->irq
= np
->pci_dev
->irq
;
4024 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
4025 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
4026 pci_disable_msi(np
->pci_dev
);
4027 np
->msi_flags
&= ~NV_MSI_ENABLED
;
4028 dev
->irq
= np
->pci_dev
->irq
;
4032 /* map interrupts to vector 0 */
4033 writel(0, base
+ NvRegMSIMap0
);
4034 writel(0, base
+ NvRegMSIMap1
);
4035 /* enable msi vector 0 */
4036 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
4040 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0)
4047 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, dev
);
4049 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, dev
);
4054 static void nv_free_irq(struct net_device
*dev
)
4056 struct fe_priv
*np
= get_nvpriv(dev
);
4059 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
4060 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
4061 free_irq(np
->msi_x_entry
[i
].vector
, dev
);
4063 pci_disable_msix(np
->pci_dev
);
4064 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
4066 free_irq(np
->pci_dev
->irq
, dev
);
4067 if (np
->msi_flags
& NV_MSI_ENABLED
) {
4068 pci_disable_msi(np
->pci_dev
);
4069 np
->msi_flags
&= ~NV_MSI_ENABLED
;
4074 static void nv_do_nic_poll(unsigned long data
)
4076 struct net_device
*dev
= (struct net_device
*) data
;
4077 struct fe_priv
*np
= netdev_priv(dev
);
4078 u8 __iomem
*base
= get_hwbase(dev
);
4082 * First disable irq(s) and then
4083 * reenable interrupts on the nic, we have to do this before calling
4084 * nv_nic_irq because that may decide to do otherwise
4087 if (!using_multi_irqs(dev
)) {
4088 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
4089 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
4091 disable_irq_lockdep(np
->pci_dev
->irq
);
4094 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
4095 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
4096 mask
|= NVREG_IRQ_RX_ALL
;
4098 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
4099 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
4100 mask
|= NVREG_IRQ_TX_ALL
;
4102 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
4103 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
4104 mask
|= NVREG_IRQ_OTHER
;
4107 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4109 if (np
->recover_error
) {
4110 np
->recover_error
= 0;
4111 printk(KERN_INFO
"%s: MAC in recoverable error state\n", dev
->name
);
4112 if (netif_running(dev
)) {
4113 netif_tx_lock_bh(dev
);
4114 netif_addr_lock(dev
);
4115 spin_lock(&np
->lock
);
4118 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
4121 /* drain rx queue */
4123 /* reinit driver view of the rx queue */
4125 if (nv_init_ring(dev
)) {
4126 if (!np
->in_shutdown
)
4127 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4129 /* reinit nic view of the rx queue */
4130 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4131 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4132 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4133 base
+ NvRegRingSizes
);
4135 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4137 /* clear interrupts */
4138 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
4139 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4141 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4143 /* restart rx engine */
4145 spin_unlock(&np
->lock
);
4146 netif_addr_unlock(dev
);
4147 netif_tx_unlock_bh(dev
);
4151 writel(mask
, base
+ NvRegIrqMask
);
4154 if (!using_multi_irqs(dev
)) {
4155 np
->nic_poll_irq
= 0;
4156 if (nv_optimized(np
))
4157 nv_nic_irq_optimized(0, dev
);
4160 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
4161 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
4163 enable_irq_lockdep(np
->pci_dev
->irq
);
4165 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
4166 np
->nic_poll_irq
&= ~NVREG_IRQ_RX_ALL
;
4167 nv_nic_irq_rx(0, dev
);
4168 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
4170 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
4171 np
->nic_poll_irq
&= ~NVREG_IRQ_TX_ALL
;
4172 nv_nic_irq_tx(0, dev
);
4173 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
4175 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
4176 np
->nic_poll_irq
&= ~NVREG_IRQ_OTHER
;
4177 nv_nic_irq_other(0, dev
);
4178 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
4184 #ifdef CONFIG_NET_POLL_CONTROLLER
4185 static void nv_poll_controller(struct net_device
*dev
)
4187 nv_do_nic_poll((unsigned long) dev
);
4191 static void nv_do_stats_poll(unsigned long data
)
4193 struct net_device
*dev
= (struct net_device
*) data
;
4194 struct fe_priv
*np
= netdev_priv(dev
);
4196 nv_get_hw_stats(dev
);
4198 if (!np
->in_shutdown
)
4199 mod_timer(&np
->stats_poll
,
4200 round_jiffies(jiffies
+ STATS_INTERVAL
));
4203 static void nv_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
4205 struct fe_priv
*np
= netdev_priv(dev
);
4206 strcpy(info
->driver
, DRV_NAME
);
4207 strcpy(info
->version
, FORCEDETH_VERSION
);
4208 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
4211 static void nv_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
4213 struct fe_priv
*np
= netdev_priv(dev
);
4214 wolinfo
->supported
= WAKE_MAGIC
;
4216 spin_lock_irq(&np
->lock
);
4218 wolinfo
->wolopts
= WAKE_MAGIC
;
4219 spin_unlock_irq(&np
->lock
);
4222 static int nv_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
4224 struct fe_priv
*np
= netdev_priv(dev
);
4225 u8 __iomem
*base
= get_hwbase(dev
);
4228 if (wolinfo
->wolopts
== 0) {
4230 } else if (wolinfo
->wolopts
& WAKE_MAGIC
) {
4232 flags
= NVREG_WAKEUPFLAGS_ENABLE
;
4234 if (netif_running(dev
)) {
4235 spin_lock_irq(&np
->lock
);
4236 writel(flags
, base
+ NvRegWakeUpFlags
);
4237 spin_unlock_irq(&np
->lock
);
4242 static int nv_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
4244 struct fe_priv
*np
= netdev_priv(dev
);
4247 spin_lock_irq(&np
->lock
);
4248 ecmd
->port
= PORT_MII
;
4249 if (!netif_running(dev
)) {
4250 /* We do not track link speed / duplex setting if the
4251 * interface is disabled. Force a link check */
4252 if (nv_update_linkspeed(dev
)) {
4253 if (!netif_carrier_ok(dev
))
4254 netif_carrier_on(dev
);
4256 if (netif_carrier_ok(dev
))
4257 netif_carrier_off(dev
);
4261 if (netif_carrier_ok(dev
)) {
4262 switch(np
->linkspeed
& (NVREG_LINKSPEED_MASK
)) {
4263 case NVREG_LINKSPEED_10
:
4264 ecmd
->speed
= SPEED_10
;
4266 case NVREG_LINKSPEED_100
:
4267 ecmd
->speed
= SPEED_100
;
4269 case NVREG_LINKSPEED_1000
:
4270 ecmd
->speed
= SPEED_1000
;
4273 ecmd
->duplex
= DUPLEX_HALF
;
4275 ecmd
->duplex
= DUPLEX_FULL
;
4281 ecmd
->autoneg
= np
->autoneg
;
4283 ecmd
->advertising
= ADVERTISED_MII
;
4285 ecmd
->advertising
|= ADVERTISED_Autoneg
;
4286 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4287 if (adv
& ADVERTISE_10HALF
)
4288 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
4289 if (adv
& ADVERTISE_10FULL
)
4290 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
4291 if (adv
& ADVERTISE_100HALF
)
4292 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
4293 if (adv
& ADVERTISE_100FULL
)
4294 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
4295 if (np
->gigabit
== PHY_GIGABIT
) {
4296 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4297 if (adv
& ADVERTISE_1000FULL
)
4298 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
4301 ecmd
->supported
= (SUPPORTED_Autoneg
|
4302 SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
4303 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
4305 if (np
->gigabit
== PHY_GIGABIT
)
4306 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
4308 ecmd
->phy_address
= np
->phyaddr
;
4309 ecmd
->transceiver
= XCVR_EXTERNAL
;
4311 /* ignore maxtxpkt, maxrxpkt for now */
4312 spin_unlock_irq(&np
->lock
);
4316 static int nv_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
4318 struct fe_priv
*np
= netdev_priv(dev
);
4320 if (ecmd
->port
!= PORT_MII
)
4322 if (ecmd
->transceiver
!= XCVR_EXTERNAL
)
4324 if (ecmd
->phy_address
!= np
->phyaddr
) {
4325 /* TODO: support switching between multiple phys. Should be
4326 * trivial, but not enabled due to lack of test hardware. */
4329 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
4332 mask
= ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
4333 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
;
4334 if (np
->gigabit
== PHY_GIGABIT
)
4335 mask
|= ADVERTISED_1000baseT_Full
;
4337 if ((ecmd
->advertising
& mask
) == 0)
4340 } else if (ecmd
->autoneg
== AUTONEG_DISABLE
) {
4341 /* Note: autonegotiation disable, speed 1000 intentionally
4342 * forbidden - noone should need that. */
4344 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
4346 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
4352 netif_carrier_off(dev
);
4353 if (netif_running(dev
)) {
4354 unsigned long flags
;
4356 nv_disable_irq(dev
);
4357 netif_tx_lock_bh(dev
);
4358 netif_addr_lock(dev
);
4359 /* with plain spinlock lockdep complains */
4360 spin_lock_irqsave(&np
->lock
, flags
);
4363 * this can take some time, and interrupts are disabled
4364 * due to spin_lock_irqsave, but let's hope no daemon
4365 * is going to change the settings very often...
4367 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4368 * + some minor delays, which is up to a second approximately
4371 spin_unlock_irqrestore(&np
->lock
, flags
);
4372 netif_addr_unlock(dev
);
4373 netif_tx_unlock_bh(dev
);
4376 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
4381 /* advertise only what has been requested */
4382 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4383 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4384 if (ecmd
->advertising
& ADVERTISED_10baseT_Half
)
4385 adv
|= ADVERTISE_10HALF
;
4386 if (ecmd
->advertising
& ADVERTISED_10baseT_Full
)
4387 adv
|= ADVERTISE_10FULL
;
4388 if (ecmd
->advertising
& ADVERTISED_100baseT_Half
)
4389 adv
|= ADVERTISE_100HALF
;
4390 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
4391 adv
|= ADVERTISE_100FULL
;
4392 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
4393 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4394 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4395 adv
|= ADVERTISE_PAUSE_ASYM
;
4396 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4398 if (np
->gigabit
== PHY_GIGABIT
) {
4399 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4400 adv
&= ~ADVERTISE_1000FULL
;
4401 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
4402 adv
|= ADVERTISE_1000FULL
;
4403 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
4406 if (netif_running(dev
))
4407 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4408 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4409 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4410 bmcr
|= BMCR_ANENABLE
;
4411 /* reset the phy in order for settings to stick,
4412 * and cause autoneg to start */
4413 if (phy_reset(dev
, bmcr
)) {
4414 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4418 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4419 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4426 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4427 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4428 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
4429 adv
|= ADVERTISE_10HALF
;
4430 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
4431 adv
|= ADVERTISE_10FULL
;
4432 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
4433 adv
|= ADVERTISE_100HALF
;
4434 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
4435 adv
|= ADVERTISE_100FULL
;
4436 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4437 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) {/* for rx we set both advertisments but disable tx pause */
4438 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4439 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4441 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
) {
4442 adv
|= ADVERTISE_PAUSE_ASYM
;
4443 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4445 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4446 np
->fixed_mode
= adv
;
4448 if (np
->gigabit
== PHY_GIGABIT
) {
4449 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4450 adv
&= ~ADVERTISE_1000FULL
;
4451 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
4454 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4455 bmcr
&= ~(BMCR_ANENABLE
|BMCR_SPEED100
|BMCR_SPEED1000
|BMCR_FULLDPLX
);
4456 if (np
->fixed_mode
& (ADVERTISE_10FULL
|ADVERTISE_100FULL
))
4457 bmcr
|= BMCR_FULLDPLX
;
4458 if (np
->fixed_mode
& (ADVERTISE_100HALF
|ADVERTISE_100FULL
))
4459 bmcr
|= BMCR_SPEED100
;
4460 if (np
->phy_oui
== PHY_OUI_MARVELL
) {
4461 /* reset the phy in order for forced mode settings to stick */
4462 if (phy_reset(dev
, bmcr
)) {
4463 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4467 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4468 if (netif_running(dev
)) {
4469 /* Wait a bit and then reconfigure the nic. */
4476 if (netif_running(dev
)) {
4484 #define FORCEDETH_REGS_VER 1
4486 static int nv_get_regs_len(struct net_device
*dev
)
4488 struct fe_priv
*np
= netdev_priv(dev
);
4489 return np
->register_size
;
4492 static void nv_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
4494 struct fe_priv
*np
= netdev_priv(dev
);
4495 u8 __iomem
*base
= get_hwbase(dev
);
4499 regs
->version
= FORCEDETH_REGS_VER
;
4500 spin_lock_irq(&np
->lock
);
4501 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
4502 rbuf
[i
] = readl(base
+ i
*sizeof(u32
));
4503 spin_unlock_irq(&np
->lock
);
4506 static int nv_nway_reset(struct net_device
*dev
)
4508 struct fe_priv
*np
= netdev_priv(dev
);
4514 netif_carrier_off(dev
);
4515 if (netif_running(dev
)) {
4516 nv_disable_irq(dev
);
4517 netif_tx_lock_bh(dev
);
4518 netif_addr_lock(dev
);
4519 spin_lock(&np
->lock
);
4522 spin_unlock(&np
->lock
);
4523 netif_addr_unlock(dev
);
4524 netif_tx_unlock_bh(dev
);
4525 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4528 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4529 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4530 bmcr
|= BMCR_ANENABLE
;
4531 /* reset the phy in order for settings to stick*/
4532 if (phy_reset(dev
, bmcr
)) {
4533 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4537 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4538 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4541 if (netif_running(dev
)) {
4553 static int nv_set_tso(struct net_device
*dev
, u32 value
)
4555 struct fe_priv
*np
= netdev_priv(dev
);
4557 if ((np
->driver_data
& DEV_HAS_CHECKSUM
))
4558 return ethtool_op_set_tso(dev
, value
);
4563 static void nv_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4565 struct fe_priv
*np
= netdev_priv(dev
);
4567 ring
->rx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4568 ring
->rx_mini_max_pending
= 0;
4569 ring
->rx_jumbo_max_pending
= 0;
4570 ring
->tx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4572 ring
->rx_pending
= np
->rx_ring_size
;
4573 ring
->rx_mini_pending
= 0;
4574 ring
->rx_jumbo_pending
= 0;
4575 ring
->tx_pending
= np
->tx_ring_size
;
4578 static int nv_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4580 struct fe_priv
*np
= netdev_priv(dev
);
4581 u8 __iomem
*base
= get_hwbase(dev
);
4582 u8
*rxtx_ring
, *rx_skbuff
, *tx_skbuff
;
4583 dma_addr_t ring_addr
;
4585 if (ring
->rx_pending
< RX_RING_MIN
||
4586 ring
->tx_pending
< TX_RING_MIN
||
4587 ring
->rx_mini_pending
!= 0 ||
4588 ring
->rx_jumbo_pending
!= 0 ||
4589 (np
->desc_ver
== DESC_VER_1
&&
4590 (ring
->rx_pending
> RING_MAX_DESC_VER_1
||
4591 ring
->tx_pending
> RING_MAX_DESC_VER_1
)) ||
4592 (np
->desc_ver
!= DESC_VER_1
&&
4593 (ring
->rx_pending
> RING_MAX_DESC_VER_2_3
||
4594 ring
->tx_pending
> RING_MAX_DESC_VER_2_3
))) {
4598 /* allocate new rings */
4599 if (!nv_optimized(np
)) {
4600 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4601 sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4604 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4605 sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4608 rx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->rx_pending
, GFP_KERNEL
);
4609 tx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->tx_pending
, GFP_KERNEL
);
4610 if (!rxtx_ring
|| !rx_skbuff
|| !tx_skbuff
) {
4611 /* fall back to old rings */
4612 if (!nv_optimized(np
)) {
4614 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4615 rxtx_ring
, ring_addr
);
4618 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4619 rxtx_ring
, ring_addr
);
4628 if (netif_running(dev
)) {
4629 nv_disable_irq(dev
);
4630 nv_napi_disable(dev
);
4631 netif_tx_lock_bh(dev
);
4632 netif_addr_lock(dev
);
4633 spin_lock(&np
->lock
);
4643 /* set new values */
4644 np
->rx_ring_size
= ring
->rx_pending
;
4645 np
->tx_ring_size
= ring
->tx_pending
;
4647 if (!nv_optimized(np
)) {
4648 np
->rx_ring
.orig
= (struct ring_desc
*)rxtx_ring
;
4649 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
4651 np
->rx_ring
.ex
= (struct ring_desc_ex
*)rxtx_ring
;
4652 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
4654 np
->rx_skb
= (struct nv_skb_map
*)rx_skbuff
;
4655 np
->tx_skb
= (struct nv_skb_map
*)tx_skbuff
;
4656 np
->ring_addr
= ring_addr
;
4658 memset(np
->rx_skb
, 0, sizeof(struct nv_skb_map
) * np
->rx_ring_size
);
4659 memset(np
->tx_skb
, 0, sizeof(struct nv_skb_map
) * np
->tx_ring_size
);
4661 if (netif_running(dev
)) {
4662 /* reinit driver view of the queues */
4664 if (nv_init_ring(dev
)) {
4665 if (!np
->in_shutdown
)
4666 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4669 /* reinit nic view of the queues */
4670 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4671 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4672 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4673 base
+ NvRegRingSizes
);
4675 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4678 /* restart engines */
4680 spin_unlock(&np
->lock
);
4681 netif_addr_unlock(dev
);
4682 netif_tx_unlock_bh(dev
);
4683 nv_napi_enable(dev
);
4691 static void nv_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4693 struct fe_priv
*np
= netdev_priv(dev
);
4695 pause
->autoneg
= (np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) != 0;
4696 pause
->rx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) != 0;
4697 pause
->tx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) != 0;
4700 static int nv_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4702 struct fe_priv
*np
= netdev_priv(dev
);
4705 if ((!np
->autoneg
&& np
->duplex
== 0) ||
4706 (np
->autoneg
&& !pause
->autoneg
&& np
->duplex
== 0)) {
4707 printk(KERN_INFO
"%s: can not set pause settings when forced link is in half duplex.\n",
4711 if (pause
->tx_pause
&& !(np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)) {
4712 printk(KERN_INFO
"%s: hardware does not support tx pause frames.\n", dev
->name
);
4716 netif_carrier_off(dev
);
4717 if (netif_running(dev
)) {
4718 nv_disable_irq(dev
);
4719 netif_tx_lock_bh(dev
);
4720 netif_addr_lock(dev
);
4721 spin_lock(&np
->lock
);
4724 spin_unlock(&np
->lock
);
4725 netif_addr_unlock(dev
);
4726 netif_tx_unlock_bh(dev
);
4729 np
->pause_flags
&= ~(NV_PAUSEFRAME_RX_REQ
|NV_PAUSEFRAME_TX_REQ
);
4730 if (pause
->rx_pause
)
4731 np
->pause_flags
|= NV_PAUSEFRAME_RX_REQ
;
4732 if (pause
->tx_pause
)
4733 np
->pause_flags
|= NV_PAUSEFRAME_TX_REQ
;
4735 if (np
->autoneg
&& pause
->autoneg
) {
4736 np
->pause_flags
|= NV_PAUSEFRAME_AUTONEG
;
4738 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4739 adv
&= ~(ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4740 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
4741 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4742 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4743 adv
|= ADVERTISE_PAUSE_ASYM
;
4744 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4746 if (netif_running(dev
))
4747 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4748 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4749 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4750 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4752 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4753 if (pause
->rx_pause
)
4754 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4755 if (pause
->tx_pause
)
4756 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4758 if (!netif_running(dev
))
4759 nv_update_linkspeed(dev
);
4761 nv_update_pause(dev
, np
->pause_flags
);
4764 if (netif_running(dev
)) {
4771 static u32
nv_get_rx_csum(struct net_device
*dev
)
4773 struct fe_priv
*np
= netdev_priv(dev
);
4774 return (np
->rx_csum
) != 0;
4777 static int nv_set_rx_csum(struct net_device
*dev
, u32 data
)
4779 struct fe_priv
*np
= netdev_priv(dev
);
4780 u8 __iomem
*base
= get_hwbase(dev
);
4783 if (np
->driver_data
& DEV_HAS_CHECKSUM
) {
4786 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
4789 /* vlan is dependent on rx checksum offload */
4790 if (!(np
->vlanctl_bits
& NVREG_VLANCONTROL_ENABLE
))
4791 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_RXCHECK
;
4793 if (netif_running(dev
)) {
4794 spin_lock_irq(&np
->lock
);
4795 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4796 spin_unlock_irq(&np
->lock
);
4805 static int nv_set_tx_csum(struct net_device
*dev
, u32 data
)
4807 struct fe_priv
*np
= netdev_priv(dev
);
4809 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4810 return ethtool_op_set_tx_csum(dev
, data
);
4815 static int nv_set_sg(struct net_device
*dev
, u32 data
)
4817 struct fe_priv
*np
= netdev_priv(dev
);
4819 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4820 return ethtool_op_set_sg(dev
, data
);
4825 static int nv_get_sset_count(struct net_device
*dev
, int sset
)
4827 struct fe_priv
*np
= netdev_priv(dev
);
4831 if (np
->driver_data
& DEV_HAS_TEST_EXTENDED
)
4832 return NV_TEST_COUNT_EXTENDED
;
4834 return NV_TEST_COUNT_BASE
;
4836 if (np
->driver_data
& DEV_HAS_STATISTICS_V3
)
4837 return NV_DEV_STATISTICS_V3_COUNT
;
4838 else if (np
->driver_data
& DEV_HAS_STATISTICS_V2
)
4839 return NV_DEV_STATISTICS_V2_COUNT
;
4840 else if (np
->driver_data
& DEV_HAS_STATISTICS_V1
)
4841 return NV_DEV_STATISTICS_V1_COUNT
;
4849 static void nv_get_ethtool_stats(struct net_device
*dev
, struct ethtool_stats
*estats
, u64
*buffer
)
4851 struct fe_priv
*np
= netdev_priv(dev
);
4854 nv_do_stats_poll((unsigned long)dev
);
4856 memcpy(buffer
, &np
->estats
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(u64
));
4859 static int nv_link_test(struct net_device
*dev
)
4861 struct fe_priv
*np
= netdev_priv(dev
);
4864 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4865 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4867 /* check phy link status */
4868 if (!(mii_status
& BMSR_LSTATUS
))
4874 static int nv_register_test(struct net_device
*dev
)
4876 u8 __iomem
*base
= get_hwbase(dev
);
4878 u32 orig_read
, new_read
;
4881 orig_read
= readl(base
+ nv_registers_test
[i
].reg
);
4883 /* xor with mask to toggle bits */
4884 orig_read
^= nv_registers_test
[i
].mask
;
4886 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4888 new_read
= readl(base
+ nv_registers_test
[i
].reg
);
4890 if ((new_read
& nv_registers_test
[i
].mask
) != (orig_read
& nv_registers_test
[i
].mask
))
4893 /* restore original value */
4894 orig_read
^= nv_registers_test
[i
].mask
;
4895 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4897 } while (nv_registers_test
[++i
].reg
!= 0);
4902 static int nv_interrupt_test(struct net_device
*dev
)
4904 struct fe_priv
*np
= netdev_priv(dev
);
4905 u8 __iomem
*base
= get_hwbase(dev
);
4908 u32 save_msi_flags
, save_poll_interval
= 0;
4910 if (netif_running(dev
)) {
4911 /* free current irq */
4913 save_poll_interval
= readl(base
+NvRegPollingInterval
);
4916 /* flag to test interrupt handler */
4919 /* setup test irq */
4920 save_msi_flags
= np
->msi_flags
;
4921 np
->msi_flags
&= ~NV_MSI_X_VECTORS_MASK
;
4922 np
->msi_flags
|= 0x001; /* setup 1 vector */
4923 if (nv_request_irq(dev
, 1))
4926 /* setup timer interrupt */
4927 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4928 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4930 nv_enable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4932 /* wait for at least one interrupt */
4935 spin_lock_irq(&np
->lock
);
4937 /* flag should be set within ISR */
4938 testcnt
= np
->intr_test
;
4942 nv_disable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4943 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
4944 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4946 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4948 spin_unlock_irq(&np
->lock
);
4952 np
->msi_flags
= save_msi_flags
;
4954 if (netif_running(dev
)) {
4955 writel(save_poll_interval
, base
+ NvRegPollingInterval
);
4956 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4957 /* restore original irq */
4958 if (nv_request_irq(dev
, 0))
4965 static int nv_loopback_test(struct net_device
*dev
)
4967 struct fe_priv
*np
= netdev_priv(dev
);
4968 u8 __iomem
*base
= get_hwbase(dev
);
4969 struct sk_buff
*tx_skb
, *rx_skb
;
4970 dma_addr_t test_dma_addr
;
4971 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
4973 int len
, i
, pkt_len
;
4975 u32 filter_flags
= 0;
4976 u32 misc1_flags
= 0;
4979 if (netif_running(dev
)) {
4980 nv_disable_irq(dev
);
4981 filter_flags
= readl(base
+ NvRegPacketFilterFlags
);
4982 misc1_flags
= readl(base
+ NvRegMisc1
);
4987 /* reinit driver view of the rx queue */
4991 /* setup hardware for loopback */
4992 writel(NVREG_MISC1_FORCE
, base
+ NvRegMisc1
);
4993 writel(NVREG_PFF_ALWAYS
| NVREG_PFF_LOOPBACK
, base
+ NvRegPacketFilterFlags
);
4995 /* reinit nic view of the rx queue */
4996 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4997 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4998 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4999 base
+ NvRegRingSizes
);
5002 /* restart rx engine */
5005 /* setup packet for tx */
5006 pkt_len
= ETH_DATA_LEN
;
5007 tx_skb
= dev_alloc_skb(pkt_len
);
5009 printk(KERN_ERR
"dev_alloc_skb() failed during loopback test"
5010 " of %s\n", dev
->name
);
5014 test_dma_addr
= pci_map_single(np
->pci_dev
, tx_skb
->data
,
5015 skb_tailroom(tx_skb
),
5016 PCI_DMA_FROMDEVICE
);
5017 pkt_data
= skb_put(tx_skb
, pkt_len
);
5018 for (i
= 0; i
< pkt_len
; i
++)
5019 pkt_data
[i
] = (u8
)(i
& 0xff);
5021 if (!nv_optimized(np
)) {
5022 np
->tx_ring
.orig
[0].buf
= cpu_to_le32(test_dma_addr
);
5023 np
->tx_ring
.orig
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
5025 np
->tx_ring
.ex
[0].bufhigh
= cpu_to_le32(dma_high(test_dma_addr
));
5026 np
->tx_ring
.ex
[0].buflow
= cpu_to_le32(dma_low(test_dma_addr
));
5027 np
->tx_ring
.ex
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
5029 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
5030 pci_push(get_hwbase(dev
));
5034 /* check for rx of the packet */
5035 if (!nv_optimized(np
)) {
5036 flags
= le32_to_cpu(np
->rx_ring
.orig
[0].flaglen
);
5037 len
= nv_descr_getlength(&np
->rx_ring
.orig
[0], np
->desc_ver
);
5040 flags
= le32_to_cpu(np
->rx_ring
.ex
[0].flaglen
);
5041 len
= nv_descr_getlength_ex(&np
->rx_ring
.ex
[0], np
->desc_ver
);
5044 if (flags
& NV_RX_AVAIL
) {
5046 } else if (np
->desc_ver
== DESC_VER_1
) {
5047 if (flags
& NV_RX_ERROR
)
5050 if (flags
& NV_RX2_ERROR
) {
5056 if (len
!= pkt_len
) {
5058 dprintk(KERN_DEBUG
"%s: loopback len mismatch %d vs %d\n",
5059 dev
->name
, len
, pkt_len
);
5061 rx_skb
= np
->rx_skb
[0].skb
;
5062 for (i
= 0; i
< pkt_len
; i
++) {
5063 if (rx_skb
->data
[i
] != (u8
)(i
& 0xff)) {
5065 dprintk(KERN_DEBUG
"%s: loopback pattern check failed on byte %d\n",
5072 dprintk(KERN_DEBUG
"%s: loopback - did not receive test packet\n", dev
->name
);
5075 pci_unmap_page(np
->pci_dev
, test_dma_addr
,
5076 (skb_end_pointer(tx_skb
) - tx_skb
->data
),
5078 dev_kfree_skb_any(tx_skb
);
5083 /* drain rx queue */
5086 if (netif_running(dev
)) {
5087 writel(misc1_flags
, base
+ NvRegMisc1
);
5088 writel(filter_flags
, base
+ NvRegPacketFilterFlags
);
5095 static void nv_self_test(struct net_device
*dev
, struct ethtool_test
*test
, u64
*buffer
)
5097 struct fe_priv
*np
= netdev_priv(dev
);
5098 u8 __iomem
*base
= get_hwbase(dev
);
5100 memset(buffer
, 0, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(u64
));
5102 if (!nv_link_test(dev
)) {
5103 test
->flags
|= ETH_TEST_FL_FAILED
;
5107 if (test
->flags
& ETH_TEST_FL_OFFLINE
) {
5108 if (netif_running(dev
)) {
5109 netif_stop_queue(dev
);
5110 nv_napi_disable(dev
);
5111 netif_tx_lock_bh(dev
);
5112 netif_addr_lock(dev
);
5113 spin_lock_irq(&np
->lock
);
5114 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5115 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
5116 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5118 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
5123 /* drain rx queue */
5125 spin_unlock_irq(&np
->lock
);
5126 netif_addr_unlock(dev
);
5127 netif_tx_unlock_bh(dev
);
5130 if (!nv_register_test(dev
)) {
5131 test
->flags
|= ETH_TEST_FL_FAILED
;
5135 result
= nv_interrupt_test(dev
);
5137 test
->flags
|= ETH_TEST_FL_FAILED
;
5145 if (!nv_loopback_test(dev
)) {
5146 test
->flags
|= ETH_TEST_FL_FAILED
;
5150 if (netif_running(dev
)) {
5151 /* reinit driver view of the rx queue */
5153 if (nv_init_ring(dev
)) {
5154 if (!np
->in_shutdown
)
5155 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
5157 /* reinit nic view of the rx queue */
5158 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
5159 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
5160 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
5161 base
+ NvRegRingSizes
);
5163 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
5165 /* restart rx engine */
5167 netif_start_queue(dev
);
5168 nv_napi_enable(dev
);
5169 nv_enable_hw_interrupts(dev
, np
->irqmask
);
5174 static void nv_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buffer
)
5176 switch (stringset
) {
5178 memcpy(buffer
, &nv_estats_str
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(struct nv_ethtool_str
));
5181 memcpy(buffer
, &nv_etests_str
, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(struct nv_ethtool_str
));
5186 static const struct ethtool_ops ops
= {
5187 .get_drvinfo
= nv_get_drvinfo
,
5188 .get_link
= ethtool_op_get_link
,
5189 .get_wol
= nv_get_wol
,
5190 .set_wol
= nv_set_wol
,
5191 .get_settings
= nv_get_settings
,
5192 .set_settings
= nv_set_settings
,
5193 .get_regs_len
= nv_get_regs_len
,
5194 .get_regs
= nv_get_regs
,
5195 .nway_reset
= nv_nway_reset
,
5196 .set_tso
= nv_set_tso
,
5197 .get_ringparam
= nv_get_ringparam
,
5198 .set_ringparam
= nv_set_ringparam
,
5199 .get_pauseparam
= nv_get_pauseparam
,
5200 .set_pauseparam
= nv_set_pauseparam
,
5201 .get_rx_csum
= nv_get_rx_csum
,
5202 .set_rx_csum
= nv_set_rx_csum
,
5203 .set_tx_csum
= nv_set_tx_csum
,
5204 .set_sg
= nv_set_sg
,
5205 .get_strings
= nv_get_strings
,
5206 .get_ethtool_stats
= nv_get_ethtool_stats
,
5207 .get_sset_count
= nv_get_sset_count
,
5208 .self_test
= nv_self_test
,
5211 static void nv_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
5213 struct fe_priv
*np
= get_nvpriv(dev
);
5215 spin_lock_irq(&np
->lock
);
5217 /* save vlan group */
5221 /* enable vlan on MAC */
5222 np
->txrxctl_bits
|= NVREG_TXRXCTL_VLANSTRIP
| NVREG_TXRXCTL_VLANINS
;
5224 /* disable vlan on MAC */
5225 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANSTRIP
;
5226 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANINS
;
5229 writel(np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
5231 spin_unlock_irq(&np
->lock
);
5234 /* The mgmt unit and driver use a semaphore to access the phy during init */
5235 static int nv_mgmt_acquire_sema(struct net_device
*dev
)
5237 struct fe_priv
*np
= netdev_priv(dev
);
5238 u8 __iomem
*base
= get_hwbase(dev
);
5240 u32 tx_ctrl
, mgmt_sema
;
5242 for (i
= 0; i
< 10; i
++) {
5243 mgmt_sema
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_SEMA_MASK
;
5244 if (mgmt_sema
== NVREG_XMITCTL_MGMT_SEMA_FREE
)
5249 if (mgmt_sema
!= NVREG_XMITCTL_MGMT_SEMA_FREE
)
5252 for (i
= 0; i
< 2; i
++) {
5253 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5254 tx_ctrl
|= NVREG_XMITCTL_HOST_SEMA_ACQ
;
5255 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
5257 /* verify that semaphore was acquired */
5258 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5259 if (((tx_ctrl
& NVREG_XMITCTL_HOST_SEMA_MASK
) == NVREG_XMITCTL_HOST_SEMA_ACQ
) &&
5260 ((tx_ctrl
& NVREG_XMITCTL_MGMT_SEMA_MASK
) == NVREG_XMITCTL_MGMT_SEMA_FREE
)) {
5271 static void nv_mgmt_release_sema(struct net_device
*dev
)
5273 struct fe_priv
*np
= netdev_priv(dev
);
5274 u8 __iomem
*base
= get_hwbase(dev
);
5277 if (np
->driver_data
& DEV_HAS_MGMT_UNIT
) {
5278 if (np
->mgmt_sema
) {
5279 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5280 tx_ctrl
&= ~NVREG_XMITCTL_HOST_SEMA_ACQ
;
5281 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
5287 static int nv_mgmt_get_version(struct net_device
*dev
)
5289 struct fe_priv
*np
= netdev_priv(dev
);
5290 u8 __iomem
*base
= get_hwbase(dev
);
5291 u32 data_ready
= readl(base
+ NvRegTransmitterControl
);
5292 u32 data_ready2
= 0;
5293 unsigned long start
;
5296 writel(NVREG_MGMTUNITGETVERSION
, base
+ NvRegMgmtUnitGetVersion
);
5297 writel(data_ready
^ NVREG_XMITCTL_DATA_START
, base
+ NvRegTransmitterControl
);
5299 while (time_before(jiffies
, start
+ 5*HZ
)) {
5300 data_ready2
= readl(base
+ NvRegTransmitterControl
);
5301 if ((data_ready
& NVREG_XMITCTL_DATA_READY
) != (data_ready2
& NVREG_XMITCTL_DATA_READY
)) {
5305 schedule_timeout_uninterruptible(1);
5308 if (!ready
|| (data_ready2
& NVREG_XMITCTL_DATA_ERROR
))
5311 np
->mgmt_version
= readl(base
+ NvRegMgmtUnitVersion
) & NVREG_MGMTUNITVERSION
;
5316 static int nv_open(struct net_device
*dev
)
5318 struct fe_priv
*np
= netdev_priv(dev
);
5319 u8 __iomem
*base
= get_hwbase(dev
);
5324 dprintk(KERN_DEBUG
"nv_open: begin\n");
5327 mii_rw(dev
, np
->phyaddr
, MII_BMCR
,
5328 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
) & ~BMCR_PDOWN
);
5330 /* erase previous misconfiguration */
5331 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
5333 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
5334 writel(0, base
+ NvRegMulticastAddrB
);
5335 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
5336 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
5337 writel(0, base
+ NvRegPacketFilterFlags
);
5339 writel(0, base
+ NvRegTransmitterControl
);
5340 writel(0, base
+ NvRegReceiverControl
);
5342 writel(0, base
+ NvRegAdapterControl
);
5344 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)
5345 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
5347 /* initialize descriptor rings */
5349 oom
= nv_init_ring(dev
);
5351 writel(0, base
+ NvRegLinkSpeed
);
5352 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5354 writel(0, base
+ NvRegUnknownSetupReg6
);
5356 np
->in_shutdown
= 0;
5359 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
5360 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
5361 base
+ NvRegRingSizes
);
5363 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
5364 if (np
->desc_ver
== DESC_VER_1
)
5365 writel(NVREG_TX_WM_DESC1_DEFAULT
, base
+ NvRegTxWatermark
);
5367 writel(NVREG_TX_WM_DESC2_3_DEFAULT
, base
+ NvRegTxWatermark
);
5368 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
5369 writel(np
->vlanctl_bits
, base
+ NvRegVlanControl
);
5371 writel(NVREG_TXRXCTL_BIT1
|np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
5372 reg_delay(dev
, NvRegUnknownSetupReg5
, NVREG_UNKSETUP5_BIT31
, NVREG_UNKSETUP5_BIT31
,
5373 NV_SETUP5_DELAY
, NV_SETUP5_DELAYMAX
,
5374 KERN_INFO
"open: SetupReg5, Bit 31 remained off\n");
5376 writel(0, base
+ NvRegMIIMask
);
5377 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5378 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5380 writel(NVREG_MISC1_FORCE
| NVREG_MISC1_HD
, base
+ NvRegMisc1
);
5381 writel(readl(base
+ NvRegTransmitterStatus
), base
+ NvRegTransmitterStatus
);
5382 writel(NVREG_PFF_ALWAYS
, base
+ NvRegPacketFilterFlags
);
5383 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
5385 writel(readl(base
+ NvRegReceiverStatus
), base
+ NvRegReceiverStatus
);
5387 get_random_bytes(&low
, sizeof(low
));
5388 low
&= NVREG_SLOTTIME_MASK
;
5389 if (np
->desc_ver
== DESC_VER_1
) {
5390 writel(low
|NVREG_SLOTTIME_DEFAULT
, base
+ NvRegSlotTime
);
5392 if (!(np
->driver_data
& DEV_HAS_GEAR_MODE
)) {
5393 /* setup legacy backoff */
5394 writel(NVREG_SLOTTIME_LEGBF_ENABLED
|NVREG_SLOTTIME_10_100_FULL
|low
, base
+ NvRegSlotTime
);
5396 writel(NVREG_SLOTTIME_10_100_FULL
, base
+ NvRegSlotTime
);
5397 nv_gear_backoff_reseed(dev
);
5400 writel(NVREG_TX_DEFERRAL_DEFAULT
, base
+ NvRegTxDeferral
);
5401 writel(NVREG_RX_DEFERRAL_DEFAULT
, base
+ NvRegRxDeferral
);
5402 if (poll_interval
== -1) {
5403 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
)
5404 writel(NVREG_POLL_DEFAULT_THROUGHPUT
, base
+ NvRegPollingInterval
);
5406 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
5409 writel(poll_interval
& 0xFFFF, base
+ NvRegPollingInterval
);
5410 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
5411 writel((np
->phyaddr
<< NVREG_ADAPTCTL_PHYSHIFT
)|NVREG_ADAPTCTL_PHYVALID
|NVREG_ADAPTCTL_RUNNING
,
5412 base
+ NvRegAdapterControl
);
5413 writel(NVREG_MIISPEED_BIT8
|NVREG_MIIDELAY
, base
+ NvRegMIISpeed
);
5414 writel(NVREG_MII_LINKCHANGE
, base
+ NvRegMIIMask
);
5416 writel(NVREG_WAKEUPFLAGS_ENABLE
, base
+ NvRegWakeUpFlags
);
5418 i
= readl(base
+ NvRegPowerState
);
5419 if ( (i
& NVREG_POWERSTATE_POWEREDUP
) == 0)
5420 writel(NVREG_POWERSTATE_POWEREDUP
|i
, base
+ NvRegPowerState
);
5424 writel(readl(base
+ NvRegPowerState
) | NVREG_POWERSTATE_VALID
, base
+ NvRegPowerState
);
5426 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5428 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5429 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5432 if (nv_request_irq(dev
, 0)) {
5436 /* ask for interrupts */
5437 nv_enable_hw_interrupts(dev
, np
->irqmask
);
5439 spin_lock_irq(&np
->lock
);
5440 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
5441 writel(0, base
+ NvRegMulticastAddrB
);
5442 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
5443 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
5444 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5445 /* One manual link speed update: Interrupts are enabled, future link
5446 * speed changes cause interrupts and are handled by nv_link_irq().
5450 miistat
= readl(base
+ NvRegMIIStatus
);
5451 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5452 dprintk(KERN_INFO
"startup: got 0x%08x.\n", miistat
);
5454 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5457 ret
= nv_update_linkspeed(dev
);
5459 netif_start_queue(dev
);
5460 nv_napi_enable(dev
);
5463 netif_carrier_on(dev
);
5465 printk(KERN_INFO
"%s: no link during initialization.\n", dev
->name
);
5466 netif_carrier_off(dev
);
5469 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
5471 /* start statistics timer */
5472 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_STATISTICS_V3
))
5473 mod_timer(&np
->stats_poll
,
5474 round_jiffies(jiffies
+ STATS_INTERVAL
));
5476 spin_unlock_irq(&np
->lock
);
5484 static int nv_close(struct net_device
*dev
)
5486 struct fe_priv
*np
= netdev_priv(dev
);
5489 spin_lock_irq(&np
->lock
);
5490 np
->in_shutdown
= 1;
5491 spin_unlock_irq(&np
->lock
);
5492 nv_napi_disable(dev
);
5493 synchronize_irq(np
->pci_dev
->irq
);
5495 del_timer_sync(&np
->oom_kick
);
5496 del_timer_sync(&np
->nic_poll
);
5497 del_timer_sync(&np
->stats_poll
);
5499 netif_stop_queue(dev
);
5500 spin_lock_irq(&np
->lock
);
5504 /* disable interrupts on the nic or we will lock up */
5505 base
= get_hwbase(dev
);
5506 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5508 dprintk(KERN_INFO
"%s: Irqmask is zero again\n", dev
->name
);
5510 spin_unlock_irq(&np
->lock
);
5516 if (np
->wolenabled
) {
5517 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5520 /* power down phy */
5521 mii_rw(dev
, np
->phyaddr
, MII_BMCR
,
5522 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
)|BMCR_PDOWN
);
5525 /* FIXME: power down nic */
5530 static const struct net_device_ops nv_netdev_ops
= {
5531 .ndo_open
= nv_open
,
5532 .ndo_stop
= nv_close
,
5533 .ndo_get_stats
= nv_get_stats
,
5534 .ndo_start_xmit
= nv_start_xmit
,
5535 .ndo_tx_timeout
= nv_tx_timeout
,
5536 .ndo_change_mtu
= nv_change_mtu
,
5537 .ndo_validate_addr
= eth_validate_addr
,
5538 .ndo_set_mac_address
= nv_set_mac_address
,
5539 .ndo_set_multicast_list
= nv_set_multicast
,
5540 .ndo_vlan_rx_register
= nv_vlan_rx_register
,
5541 #ifdef CONFIG_NET_POLL_CONTROLLER
5542 .ndo_poll_controller
= nv_poll_controller
,
5546 static const struct net_device_ops nv_netdev_ops_optimized
= {
5547 .ndo_open
= nv_open
,
5548 .ndo_stop
= nv_close
,
5549 .ndo_get_stats
= nv_get_stats
,
5550 .ndo_start_xmit
= nv_start_xmit_optimized
,
5551 .ndo_tx_timeout
= nv_tx_timeout
,
5552 .ndo_change_mtu
= nv_change_mtu
,
5553 .ndo_validate_addr
= eth_validate_addr
,
5554 .ndo_set_mac_address
= nv_set_mac_address
,
5555 .ndo_set_multicast_list
= nv_set_multicast
,
5556 .ndo_vlan_rx_register
= nv_vlan_rx_register
,
5557 #ifdef CONFIG_NET_POLL_CONTROLLER
5558 .ndo_poll_controller
= nv_poll_controller
,
5562 static int __devinit
nv_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
5564 struct net_device
*dev
;
5569 u32 powerstate
, txreg
;
5570 u32 phystate_orig
= 0, phystate
;
5571 int phyinitialized
= 0;
5572 static int printed_version
;
5574 if (!printed_version
++)
5575 printk(KERN_INFO
"%s: Reverse Engineered nForce ethernet"
5576 " driver. Version %s.\n", DRV_NAME
, FORCEDETH_VERSION
);
5578 dev
= alloc_etherdev(sizeof(struct fe_priv
));
5583 np
= netdev_priv(dev
);
5585 np
->pci_dev
= pci_dev
;
5586 spin_lock_init(&np
->lock
);
5587 SET_NETDEV_DEV(dev
, &pci_dev
->dev
);
5589 init_timer(&np
->oom_kick
);
5590 np
->oom_kick
.data
= (unsigned long) dev
;
5591 np
->oom_kick
.function
= &nv_do_rx_refill
; /* timer handler */
5592 init_timer(&np
->nic_poll
);
5593 np
->nic_poll
.data
= (unsigned long) dev
;
5594 np
->nic_poll
.function
= &nv_do_nic_poll
; /* timer handler */
5595 init_timer(&np
->stats_poll
);
5596 np
->stats_poll
.data
= (unsigned long) dev
;
5597 np
->stats_poll
.function
= &nv_do_stats_poll
; /* timer handler */
5599 err
= pci_enable_device(pci_dev
);
5603 pci_set_master(pci_dev
);
5605 err
= pci_request_regions(pci_dev
, DRV_NAME
);
5609 if (id
->driver_data
& (DEV_HAS_VLAN
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V2
|DEV_HAS_STATISTICS_V3
))
5610 np
->register_size
= NV_PCI_REGSZ_VER3
;
5611 else if (id
->driver_data
& DEV_HAS_STATISTICS_V1
)
5612 np
->register_size
= NV_PCI_REGSZ_VER2
;
5614 np
->register_size
= NV_PCI_REGSZ_VER1
;
5618 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
5619 dprintk(KERN_DEBUG
"%s: resource %d start %p len %ld flags 0x%08lx.\n",
5620 pci_name(pci_dev
), i
, (void*)pci_resource_start(pci_dev
, i
),
5621 pci_resource_len(pci_dev
, i
),
5622 pci_resource_flags(pci_dev
, i
));
5623 if (pci_resource_flags(pci_dev
, i
) & IORESOURCE_MEM
&&
5624 pci_resource_len(pci_dev
, i
) >= np
->register_size
) {
5625 addr
= pci_resource_start(pci_dev
, i
);
5629 if (i
== DEVICE_COUNT_RESOURCE
) {
5630 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5631 "Couldn't find register window\n");
5635 /* copy of driver data */
5636 np
->driver_data
= id
->driver_data
;
5637 /* copy of device id */
5638 np
->device_id
= id
->device
;
5640 /* handle different descriptor versions */
5641 if (id
->driver_data
& DEV_HAS_HIGH_DMA
) {
5642 /* packet format 3: supports 40-bit addressing */
5643 np
->desc_ver
= DESC_VER_3
;
5644 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_3
;
5646 if (pci_set_dma_mask(pci_dev
, DMA_BIT_MASK(39)))
5647 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5648 "64-bit DMA failed, using 32-bit addressing\n");
5650 dev
->features
|= NETIF_F_HIGHDMA
;
5651 if (pci_set_consistent_dma_mask(pci_dev
, DMA_BIT_MASK(39))) {
5652 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5653 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5656 } else if (id
->driver_data
& DEV_HAS_LARGEDESC
) {
5657 /* packet format 2: supports jumbo frames */
5658 np
->desc_ver
= DESC_VER_2
;
5659 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_2
;
5661 /* original packet format */
5662 np
->desc_ver
= DESC_VER_1
;
5663 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_1
;
5666 np
->pkt_limit
= NV_PKTLIMIT_1
;
5667 if (id
->driver_data
& DEV_HAS_LARGEDESC
)
5668 np
->pkt_limit
= NV_PKTLIMIT_2
;
5670 if (id
->driver_data
& DEV_HAS_CHECKSUM
) {
5672 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
5673 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
5674 dev
->features
|= NETIF_F_TSO
;
5677 np
->vlanctl_bits
= 0;
5678 if (id
->driver_data
& DEV_HAS_VLAN
) {
5679 np
->vlanctl_bits
= NVREG_VLANCONTROL_ENABLE
;
5680 dev
->features
|= NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
;
5683 np
->pause_flags
= NV_PAUSEFRAME_RX_CAPABLE
| NV_PAUSEFRAME_RX_REQ
| NV_PAUSEFRAME_AUTONEG
;
5684 if ((id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V1
) ||
5685 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
) ||
5686 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
)) {
5687 np
->pause_flags
|= NV_PAUSEFRAME_TX_CAPABLE
| NV_PAUSEFRAME_TX_REQ
;
5692 np
->base
= ioremap(addr
, np
->register_size
);
5695 dev
->base_addr
= (unsigned long)np
->base
;
5697 dev
->irq
= pci_dev
->irq
;
5699 np
->rx_ring_size
= RX_RING_DEFAULT
;
5700 np
->tx_ring_size
= TX_RING_DEFAULT
;
5702 if (!nv_optimized(np
)) {
5703 np
->rx_ring
.orig
= pci_alloc_consistent(pci_dev
,
5704 sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5706 if (!np
->rx_ring
.orig
)
5708 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
5710 np
->rx_ring
.ex
= pci_alloc_consistent(pci_dev
,
5711 sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5713 if (!np
->rx_ring
.ex
)
5715 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
5717 np
->rx_skb
= kcalloc(np
->rx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5718 np
->tx_skb
= kcalloc(np
->tx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5719 if (!np
->rx_skb
|| !np
->tx_skb
)
5722 if (!nv_optimized(np
))
5723 dev
->netdev_ops
= &nv_netdev_ops
;
5725 dev
->netdev_ops
= &nv_netdev_ops_optimized
;
5727 #ifdef CONFIG_FORCEDETH_NAPI
5728 netif_napi_add(dev
, &np
->napi
, nv_napi_poll
, RX_WORK_PER_LOOP
);
5730 SET_ETHTOOL_OPS(dev
, &ops
);
5731 dev
->watchdog_timeo
= NV_WATCHDOG_TIMEO
;
5733 pci_set_drvdata(pci_dev
, dev
);
5735 /* read the mac address */
5736 base
= get_hwbase(dev
);
5737 np
->orig_mac
[0] = readl(base
+ NvRegMacAddrA
);
5738 np
->orig_mac
[1] = readl(base
+ NvRegMacAddrB
);
5740 /* check the workaround bit for correct mac address order */
5741 txreg
= readl(base
+ NvRegTransmitPoll
);
5742 if (id
->driver_data
& DEV_HAS_CORRECT_MACADDR
) {
5743 /* mac address is already in correct order */
5744 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5745 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5746 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5747 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5748 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5749 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5750 } else if (txreg
& NVREG_TRANSMITPOLL_MAC_ADDR_REV
) {
5751 /* mac address is already in correct order */
5752 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5753 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5754 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5755 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5756 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5757 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5759 * Set orig mac address back to the reversed version.
5760 * This flag will be cleared during low power transition.
5761 * Therefore, we should always put back the reversed address.
5763 np
->orig_mac
[0] = (dev
->dev_addr
[5] << 0) + (dev
->dev_addr
[4] << 8) +
5764 (dev
->dev_addr
[3] << 16) + (dev
->dev_addr
[2] << 24);
5765 np
->orig_mac
[1] = (dev
->dev_addr
[1] << 0) + (dev
->dev_addr
[0] << 8);
5767 /* need to reverse mac address to correct order */
5768 dev
->dev_addr
[0] = (np
->orig_mac
[1] >> 8) & 0xff;
5769 dev
->dev_addr
[1] = (np
->orig_mac
[1] >> 0) & 0xff;
5770 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 24) & 0xff;
5771 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 16) & 0xff;
5772 dev
->dev_addr
[4] = (np
->orig_mac
[0] >> 8) & 0xff;
5773 dev
->dev_addr
[5] = (np
->orig_mac
[0] >> 0) & 0xff;
5774 writel(txreg
|NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5775 printk(KERN_DEBUG
"nv_probe: set workaround bit for reversed mac addr\n");
5777 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
5779 if (!is_valid_ether_addr(dev
->perm_addr
)) {
5781 * Bad mac address. At least one bios sets the mac address
5782 * to 01:23:45:67:89:ab
5784 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5785 "Invalid Mac address detected: %pM\n",
5787 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5788 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5789 dev
->dev_addr
[0] = 0x00;
5790 dev
->dev_addr
[1] = 0x00;
5791 dev
->dev_addr
[2] = 0x6c;
5792 get_random_bytes(&dev
->dev_addr
[3], 3);
5795 dprintk(KERN_DEBUG
"%s: MAC Address %pM\n",
5796 pci_name(pci_dev
), dev
->dev_addr
);
5798 /* set mac address */
5799 nv_copy_mac_to_hw(dev
);
5801 /* Workaround current PCI init glitch: wakeup bits aren't
5802 * being set from PCI PM capability.
5804 device_init_wakeup(&pci_dev
->dev
, 1);
5807 writel(0, base
+ NvRegWakeUpFlags
);
5810 if (id
->driver_data
& DEV_HAS_POWER_CNTRL
) {
5812 /* take phy and nic out of low power mode */
5813 powerstate
= readl(base
+ NvRegPowerState2
);
5814 powerstate
&= ~NVREG_POWERSTATE2_POWERUP_MASK
;
5815 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_12
||
5816 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_13
) &&
5817 pci_dev
->revision
>= 0xA3)
5818 powerstate
|= NVREG_POWERSTATE2_POWERUP_REV_A3
;
5819 writel(powerstate
, base
+ NvRegPowerState2
);
5822 if (np
->desc_ver
== DESC_VER_1
) {
5823 np
->tx_flags
= NV_TX_VALID
;
5825 np
->tx_flags
= NV_TX2_VALID
;
5829 if ((id
->driver_data
& DEV_HAS_MSI
) && msi
) {
5830 np
->msi_flags
|= NV_MSI_CAPABLE
;
5832 if ((id
->driver_data
& DEV_HAS_MSI_X
) && msix
) {
5833 /* msix has had reported issues when modifying irqmask
5834 as in the case of napi, therefore, disable for now
5836 #ifndef CONFIG_FORCEDETH_NAPI
5837 np
->msi_flags
|= NV_MSI_X_CAPABLE
;
5841 if (optimization_mode
== NV_OPTIMIZATION_MODE_CPU
) {
5842 np
->irqmask
= NVREG_IRQMASK_CPU
;
5843 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5844 np
->msi_flags
|= 0x0001;
5845 } else if (optimization_mode
== NV_OPTIMIZATION_MODE_DYNAMIC
&&
5846 !(id
->driver_data
& DEV_NEED_TIMERIRQ
)) {
5847 /* start off in throughput mode */
5848 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
5849 /* remove support for msix mode */
5850 np
->msi_flags
&= ~NV_MSI_X_CAPABLE
;
5852 optimization_mode
= NV_OPTIMIZATION_MODE_THROUGHPUT
;
5853 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
5854 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5855 np
->msi_flags
|= 0x0003;
5858 if (id
->driver_data
& DEV_NEED_TIMERIRQ
)
5859 np
->irqmask
|= NVREG_IRQ_TIMER
;
5860 if (id
->driver_data
& DEV_NEED_LINKTIMER
) {
5861 dprintk(KERN_INFO
"%s: link timer on.\n", pci_name(pci_dev
));
5862 np
->need_linktimer
= 1;
5863 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
5865 dprintk(KERN_INFO
"%s: link timer off.\n", pci_name(pci_dev
));
5866 np
->need_linktimer
= 0;
5869 /* Limit the number of tx's outstanding for hw bug */
5870 if (id
->driver_data
& DEV_NEED_TX_LIMIT
) {
5872 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
5873 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
5874 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
5875 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
5876 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
5877 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
5878 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
5879 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) &&
5880 pci_dev
->revision
>= 0xA2)
5884 /* clear phy state and temporarily halt phy interrupts */
5885 writel(0, base
+ NvRegMIIMask
);
5886 phystate
= readl(base
+ NvRegAdapterControl
);
5887 if (phystate
& NVREG_ADAPTCTL_RUNNING
) {
5889 phystate
&= ~NVREG_ADAPTCTL_RUNNING
;
5890 writel(phystate
, base
+ NvRegAdapterControl
);
5892 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5894 if (id
->driver_data
& DEV_HAS_MGMT_UNIT
) {
5895 /* management unit running on the mac? */
5896 if ((readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_ST
) &&
5897 (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_PHY_INIT
) &&
5898 nv_mgmt_acquire_sema(dev
) &&
5899 nv_mgmt_get_version(dev
)) {
5901 if (np
->mgmt_version
> 0) {
5902 np
->mac_in_use
= readl(base
+ NvRegMgmtUnitControl
) & NVREG_MGMTUNITCONTROL_INUSE
;
5904 dprintk(KERN_INFO
"%s: mgmt unit is running. mac in use %x.\n",
5905 pci_name(pci_dev
), np
->mac_in_use
);
5906 /* management unit setup the phy already? */
5907 if (np
->mac_in_use
&&
5908 ((readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_MASK
) ==
5909 NVREG_XMITCTL_SYNC_PHY_INIT
)) {
5910 /* phy is inited by mgmt unit */
5912 dprintk(KERN_INFO
"%s: Phy already initialized by mgmt unit.\n",
5915 /* we need to init the phy */
5920 /* find a suitable phy */
5921 for (i
= 1; i
<= 32; i
++) {
5923 int phyaddr
= i
& 0x1F;
5925 spin_lock_irq(&np
->lock
);
5926 id1
= mii_rw(dev
, phyaddr
, MII_PHYSID1
, MII_READ
);
5927 spin_unlock_irq(&np
->lock
);
5928 if (id1
< 0 || id1
== 0xffff)
5930 spin_lock_irq(&np
->lock
);
5931 id2
= mii_rw(dev
, phyaddr
, MII_PHYSID2
, MII_READ
);
5932 spin_unlock_irq(&np
->lock
);
5933 if (id2
< 0 || id2
== 0xffff)
5936 np
->phy_model
= id2
& PHYID2_MODEL_MASK
;
5937 id1
= (id1
& PHYID1_OUI_MASK
) << PHYID1_OUI_SHFT
;
5938 id2
= (id2
& PHYID2_OUI_MASK
) >> PHYID2_OUI_SHFT
;
5939 dprintk(KERN_DEBUG
"%s: open: Found PHY %04x:%04x at address %d.\n",
5940 pci_name(pci_dev
), id1
, id2
, phyaddr
);
5941 np
->phyaddr
= phyaddr
;
5942 np
->phy_oui
= id1
| id2
;
5944 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5945 if (np
->phy_oui
== PHY_OUI_REALTEK2
)
5946 np
->phy_oui
= PHY_OUI_REALTEK
;
5947 /* Setup phy revision for Realtek */
5948 if (np
->phy_oui
== PHY_OUI_REALTEK
&& np
->phy_model
== PHY_MODEL_REALTEK_8211
)
5949 np
->phy_rev
= mii_rw(dev
, phyaddr
, MII_RESV1
, MII_READ
) & PHY_REV_MASK
;
5954 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5955 "open: Could not find a valid PHY.\n");
5959 if (!phyinitialized
) {
5963 /* see if it is a gigabit phy */
5964 u32 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
5965 if (mii_status
& PHY_GIGABIT
) {
5966 np
->gigabit
= PHY_GIGABIT
;
5970 /* set default link speed settings */
5971 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
5975 err
= register_netdev(dev
);
5977 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5978 "unable to register netdev: %d\n", err
);
5982 dev_printk(KERN_INFO
, &pci_dev
->dev
, "ifname %s, PHY OUI 0x%x @ %d, "
5983 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5994 dev_printk(KERN_INFO
, &pci_dev
->dev
, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5995 dev
->features
& NETIF_F_HIGHDMA
? "highdma " : "",
5996 dev
->features
& (NETIF_F_IP_CSUM
| NETIF_F_SG
) ?
5998 dev
->features
& (NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
) ?
6000 id
->driver_data
& DEV_HAS_POWER_CNTRL
? "pwrctl " : "",
6001 id
->driver_data
& DEV_HAS_MGMT_UNIT
? "mgmt " : "",
6002 id
->driver_data
& DEV_NEED_TIMERIRQ
? "timirq " : "",
6003 np
->gigabit
== PHY_GIGABIT
? "gbit " : "",
6004 np
->need_linktimer
? "lnktim " : "",
6005 np
->msi_flags
& NV_MSI_CAPABLE
? "msi " : "",
6006 np
->msi_flags
& NV_MSI_X_CAPABLE
? "msi-x " : "",
6013 writel(phystate
|NVREG_ADAPTCTL_RUNNING
, base
+ NvRegAdapterControl
);
6014 pci_set_drvdata(pci_dev
, NULL
);
6018 iounmap(get_hwbase(dev
));
6020 pci_release_regions(pci_dev
);
6022 pci_disable_device(pci_dev
);
6029 static void nv_restore_phy(struct net_device
*dev
)
6031 struct fe_priv
*np
= netdev_priv(dev
);
6032 u16 phy_reserved
, mii_control
;
6034 if (np
->phy_oui
== PHY_OUI_REALTEK
&&
6035 np
->phy_model
== PHY_MODEL_REALTEK_8201
&&
6036 phy_cross
== NV_CROSSOVER_DETECTION_DISABLED
) {
6037 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
);
6038 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, MII_READ
);
6039 phy_reserved
&= ~PHY_REALTEK_INIT_MSK1
;
6040 phy_reserved
|= PHY_REALTEK_INIT8
;
6041 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, phy_reserved
);
6042 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
);
6044 /* restart auto negotiation */
6045 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
6046 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
6047 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
);
6051 static void nv_restore_mac_addr(struct pci_dev
*pci_dev
)
6053 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
6054 struct fe_priv
*np
= netdev_priv(dev
);
6055 u8 __iomem
*base
= get_hwbase(dev
);
6057 /* special op: write back the misordered MAC address - otherwise
6058 * the next nv_probe would see a wrong address.
6060 writel(np
->orig_mac
[0], base
+ NvRegMacAddrA
);
6061 writel(np
->orig_mac
[1], base
+ NvRegMacAddrB
);
6062 writel(readl(base
+ NvRegTransmitPoll
) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
6063 base
+ NvRegTransmitPoll
);
6066 static void __devexit
nv_remove(struct pci_dev
*pci_dev
)
6068 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
6070 unregister_netdev(dev
);
6072 nv_restore_mac_addr(pci_dev
);
6074 /* restore any phy related changes */
6075 nv_restore_phy(dev
);
6077 nv_mgmt_release_sema(dev
);
6079 /* free all structures */
6081 iounmap(get_hwbase(dev
));
6082 pci_release_regions(pci_dev
);
6083 pci_disable_device(pci_dev
);
6085 pci_set_drvdata(pci_dev
, NULL
);
6089 static int nv_suspend(struct pci_dev
*pdev
, pm_message_t state
)
6091 struct net_device
*dev
= pci_get_drvdata(pdev
);
6092 struct fe_priv
*np
= netdev_priv(dev
);
6093 u8 __iomem
*base
= get_hwbase(dev
);
6096 if (netif_running(dev
)) {
6100 netif_device_detach(dev
);
6102 /* save non-pci configuration space */
6103 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
6104 np
->saved_config_space
[i
] = readl(base
+ i
*sizeof(u32
));
6106 pci_save_state(pdev
);
6107 pci_enable_wake(pdev
, pci_choose_state(pdev
, state
), np
->wolenabled
);
6108 pci_disable_device(pdev
);
6109 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
6113 static int nv_resume(struct pci_dev
*pdev
)
6115 struct net_device
*dev
= pci_get_drvdata(pdev
);
6116 struct fe_priv
*np
= netdev_priv(dev
);
6117 u8 __iomem
*base
= get_hwbase(dev
);
6120 pci_set_power_state(pdev
, PCI_D0
);
6121 pci_restore_state(pdev
);
6122 /* ack any pending wake events, disable PME */
6123 pci_enable_wake(pdev
, PCI_D0
, 0);
6125 /* restore non-pci configuration space */
6126 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
6127 writel(np
->saved_config_space
[i
], base
+i
*sizeof(u32
));
6129 pci_write_config_dword(pdev
, NV_MSI_PRIV_OFFSET
, NV_MSI_PRIV_VALUE
);
6131 /* restore phy state, including autoneg */
6134 netif_device_attach(dev
);
6135 if (netif_running(dev
)) {
6137 nv_set_multicast(dev
);
6142 static void nv_shutdown(struct pci_dev
*pdev
)
6144 struct net_device
*dev
= pci_get_drvdata(pdev
);
6145 struct fe_priv
*np
= netdev_priv(dev
);
6147 if (netif_running(dev
))
6151 * Restore the MAC so a kernel started by kexec won't get confused.
6152 * If we really go for poweroff, we must not restore the MAC,
6153 * otherwise the MAC for WOL will be reversed at least on some boards.
6155 if (system_state
!= SYSTEM_POWER_OFF
) {
6156 nv_restore_mac_addr(pdev
);
6159 pci_disable_device(pdev
);
6161 * Apparently it is not possible to reinitialise from D3 hot,
6162 * only put the device into D3 if we really go for poweroff.
6164 if (system_state
== SYSTEM_POWER_OFF
) {
6165 if (pci_enable_wake(pdev
, PCI_D3cold
, np
->wolenabled
))
6166 pci_enable_wake(pdev
, PCI_D3hot
, np
->wolenabled
);
6167 pci_set_power_state(pdev
, PCI_D3hot
);
6171 #define nv_suspend NULL
6172 #define nv_shutdown NULL
6173 #define nv_resume NULL
6174 #endif /* CONFIG_PM */
6176 static struct pci_device_id pci_tbl
[] = {
6177 { /* nForce Ethernet Controller */
6178 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_1
),
6179 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
6181 { /* nForce2 Ethernet Controller */
6182 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_2
),
6183 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
6185 { /* nForce3 Ethernet Controller */
6186 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_3
),
6187 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
6189 { /* nForce3 Ethernet Controller */
6190 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_4
),
6191 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6193 { /* nForce3 Ethernet Controller */
6194 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_5
),
6195 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6197 { /* nForce3 Ethernet Controller */
6198 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_6
),
6199 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6201 { /* nForce3 Ethernet Controller */
6202 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_7
),
6203 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6205 { /* CK804 Ethernet Controller */
6206 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_8
),
6207 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6209 { /* CK804 Ethernet Controller */
6210 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_9
),
6211 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6213 { /* MCP04 Ethernet Controller */
6214 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_10
),
6215 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6217 { /* MCP04 Ethernet Controller */
6218 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_11
),
6219 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6221 { /* MCP51 Ethernet Controller */
6222 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_12
),
6223 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
6225 { /* MCP51 Ethernet Controller */
6226 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_13
),
6227 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
6229 { /* MCP55 Ethernet Controller */
6230 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_14
),
6231 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
,
6233 { /* MCP55 Ethernet Controller */
6234 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_15
),
6235 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
,
6237 { /* MCP61 Ethernet Controller */
6238 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_16
),
6239 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6241 { /* MCP61 Ethernet Controller */
6242 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_17
),
6243 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6245 { /* MCP61 Ethernet Controller */
6246 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_18
),
6247 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6249 { /* MCP61 Ethernet Controller */
6250 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_19
),
6251 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6253 { /* MCP65 Ethernet Controller */
6254 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_20
),
6255 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6257 { /* MCP65 Ethernet Controller */
6258 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_21
),
6259 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6261 { /* MCP65 Ethernet Controller */
6262 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_22
),
6263 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6265 { /* MCP65 Ethernet Controller */
6266 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_23
),
6267 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6269 { /* MCP67 Ethernet Controller */
6270 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_24
),
6271 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6273 { /* MCP67 Ethernet Controller */
6274 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_25
),
6275 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6277 { /* MCP67 Ethernet Controller */
6278 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_26
),
6279 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6281 { /* MCP67 Ethernet Controller */
6282 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_27
),
6283 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6285 { /* MCP73 Ethernet Controller */
6286 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_28
),
6287 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6289 { /* MCP73 Ethernet Controller */
6290 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_29
),
6291 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6293 { /* MCP73 Ethernet Controller */
6294 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_30
),
6295 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6297 { /* MCP73 Ethernet Controller */
6298 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_31
),
6299 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6301 { /* MCP77 Ethernet Controller */
6302 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_32
),
6303 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6305 { /* MCP77 Ethernet Controller */
6306 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_33
),
6307 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6309 { /* MCP77 Ethernet Controller */
6310 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_34
),
6311 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6313 { /* MCP77 Ethernet Controller */
6314 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_35
),
6315 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6317 { /* MCP79 Ethernet Controller */
6318 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_36
),
6319 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6321 { /* MCP79 Ethernet Controller */
6322 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_37
),
6323 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6325 { /* MCP79 Ethernet Controller */
6326 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_38
),
6327 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6329 { /* MCP79 Ethernet Controller */
6330 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_39
),
6331 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6336 static struct pci_driver driver
= {
6338 .id_table
= pci_tbl
,
6340 .remove
= __devexit_p(nv_remove
),
6341 .suspend
= nv_suspend
,
6342 .resume
= nv_resume
,
6343 .shutdown
= nv_shutdown
,
6346 static int __init
init_nic(void)
6348 return pci_register_driver(&driver
);
6351 static void __exit
exit_nic(void)
6353 pci_unregister_driver(&driver
);
6356 module_param(max_interrupt_work
, int, 0);
6357 MODULE_PARM_DESC(max_interrupt_work
, "forcedeth maximum events handled per interrupt");
6358 module_param(optimization_mode
, int, 0);
6359 MODULE_PARM_DESC(optimization_mode
, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6360 module_param(poll_interval
, int, 0);
6361 MODULE_PARM_DESC(poll_interval
, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6362 module_param(msi
, int, 0);
6363 MODULE_PARM_DESC(msi
, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6364 module_param(msix
, int, 0);
6365 MODULE_PARM_DESC(msix
, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6366 module_param(dma_64bit
, int, 0);
6367 MODULE_PARM_DESC(dma_64bit
, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6368 module_param(phy_cross
, int, 0);
6369 MODULE_PARM_DESC(phy_cross
, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6371 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6372 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6373 MODULE_LICENSE("GPL");
6375 MODULE_DEVICE_TABLE(pci
, pci_tbl
);
6377 module_init(init_nic
);
6378 module_exit(exit_nic
);