2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <http://www.gnu.org/licenses/>.
32 * We suspect that on some hardware no TX done interrupts are generated.
33 * This means recovery from netif_stop_queue only happens if the hw timer
34 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
35 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
36 * If your hardware reliably generates tx done interrupts, then you can remove
37 * DEV_NEED_TIMERIRQ from the driver_data flags.
38 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
39 * superfluous timer interrupts from the nic.
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 #define FORCEDETH_VERSION "0.64"
45 #define DRV_NAME "forcedeth"
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/pci.h>
50 #include <linux/interrupt.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/delay.h>
54 #include <linux/sched.h>
55 #include <linux/spinlock.h>
56 #include <linux/ethtool.h>
57 #include <linux/timer.h>
58 #include <linux/skbuff.h>
59 #include <linux/mii.h>
60 #include <linux/random.h>
61 #include <linux/init.h>
62 #include <linux/if_vlan.h>
63 #include <linux/dma-mapping.h>
64 #include <linux/slab.h>
65 #include <linux/uaccess.h>
66 #include <linux/prefetch.h>
67 #include <linux/u64_stats_sync.h>
72 #define TX_WORK_PER_LOOP 64
73 #define RX_WORK_PER_LOOP 64
79 #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
80 #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
81 #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
82 #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
83 #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
84 #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
85 #define DEV_HAS_MSI 0x0000040 /* device supports MSI */
86 #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
87 #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
88 #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
89 #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */
90 #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */
91 #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */
92 #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */
93 #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
94 #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
95 #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
96 #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
97 #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
98 #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
99 #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
100 #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
101 #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
102 #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
103 #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
104 #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
105 #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
108 NvRegIrqStatus
= 0x000,
109 #define NVREG_IRQSTAT_MIIEVENT 0x040
110 #define NVREG_IRQSTAT_MASK 0x83ff
111 NvRegIrqMask
= 0x004,
112 #define NVREG_IRQ_RX_ERROR 0x0001
113 #define NVREG_IRQ_RX 0x0002
114 #define NVREG_IRQ_RX_NOBUF 0x0004
115 #define NVREG_IRQ_TX_ERR 0x0008
116 #define NVREG_IRQ_TX_OK 0x0010
117 #define NVREG_IRQ_TIMER 0x0020
118 #define NVREG_IRQ_LINK 0x0040
119 #define NVREG_IRQ_RX_FORCED 0x0080
120 #define NVREG_IRQ_TX_FORCED 0x0100
121 #define NVREG_IRQ_RECOVER_ERROR 0x8200
122 #define NVREG_IRQMASK_THROUGHPUT 0x00df
123 #define NVREG_IRQMASK_CPU 0x0060
124 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
125 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
126 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
128 NvRegUnknownSetupReg6
= 0x008,
129 #define NVREG_UNKSETUP6_VAL 3
132 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
133 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
135 NvRegPollingInterval
= 0x00c,
136 #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
137 #define NVREG_POLL_DEFAULT_CPU 13
138 NvRegMSIMap0
= 0x020,
139 NvRegMSIMap1
= 0x024,
140 NvRegMSIIrqMask
= 0x030,
141 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
143 #define NVREG_MISC1_PAUSE_TX 0x01
144 #define NVREG_MISC1_HD 0x02
145 #define NVREG_MISC1_FORCE 0x3b0f3c
147 NvRegMacReset
= 0x34,
148 #define NVREG_MAC_RESET_ASSERT 0x0F3
149 NvRegTransmitterControl
= 0x084,
150 #define NVREG_XMITCTL_START 0x01
151 #define NVREG_XMITCTL_MGMT_ST 0x40000000
152 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
153 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
154 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
155 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
156 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
157 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
158 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
159 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
160 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
161 #define NVREG_XMITCTL_DATA_START 0x00100000
162 #define NVREG_XMITCTL_DATA_READY 0x00010000
163 #define NVREG_XMITCTL_DATA_ERROR 0x00020000
164 NvRegTransmitterStatus
= 0x088,
165 #define NVREG_XMITSTAT_BUSY 0x01
167 NvRegPacketFilterFlags
= 0x8c,
168 #define NVREG_PFF_PAUSE_RX 0x08
169 #define NVREG_PFF_ALWAYS 0x7F0000
170 #define NVREG_PFF_PROMISC 0x80
171 #define NVREG_PFF_MYADDR 0x20
172 #define NVREG_PFF_LOOPBACK 0x10
174 NvRegOffloadConfig
= 0x90,
175 #define NVREG_OFFLOAD_HOMEPHY 0x601
176 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
177 NvRegReceiverControl
= 0x094,
178 #define NVREG_RCVCTL_START 0x01
179 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
180 NvRegReceiverStatus
= 0x98,
181 #define NVREG_RCVSTAT_BUSY 0x01
183 NvRegSlotTime
= 0x9c,
184 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
185 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
186 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
187 #define NVREG_SLOTTIME_HALF 0x0000ff00
188 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
189 #define NVREG_SLOTTIME_MASK 0x000000ff
191 NvRegTxDeferral
= 0xA0,
192 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
193 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
194 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
195 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
196 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
197 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
198 NvRegRxDeferral
= 0xA4,
199 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
200 NvRegMacAddrA
= 0xA8,
201 NvRegMacAddrB
= 0xAC,
202 NvRegMulticastAddrA
= 0xB0,
203 #define NVREG_MCASTADDRA_FORCE 0x01
204 NvRegMulticastAddrB
= 0xB4,
205 NvRegMulticastMaskA
= 0xB8,
206 #define NVREG_MCASTMASKA_NONE 0xffffffff
207 NvRegMulticastMaskB
= 0xBC,
208 #define NVREG_MCASTMASKB_NONE 0xffff
210 NvRegPhyInterface
= 0xC0,
211 #define PHY_RGMII 0x10000000
212 NvRegBackOffControl
= 0xC4,
213 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
214 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
215 #define NVREG_BKOFFCTRL_SELECT 24
216 #define NVREG_BKOFFCTRL_GEAR 12
218 NvRegTxRingPhysAddr
= 0x100,
219 NvRegRxRingPhysAddr
= 0x104,
220 NvRegRingSizes
= 0x108,
221 #define NVREG_RINGSZ_TXSHIFT 0
222 #define NVREG_RINGSZ_RXSHIFT 16
223 NvRegTransmitPoll
= 0x10c,
224 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
225 NvRegLinkSpeed
= 0x110,
226 #define NVREG_LINKSPEED_FORCE 0x10000
227 #define NVREG_LINKSPEED_10 1000
228 #define NVREG_LINKSPEED_100 100
229 #define NVREG_LINKSPEED_1000 50
230 #define NVREG_LINKSPEED_MASK (0xFFF)
231 NvRegUnknownSetupReg5
= 0x130,
232 #define NVREG_UNKSETUP5_BIT31 (1<<31)
233 NvRegTxWatermark
= 0x13c,
234 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
235 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
236 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
237 NvRegTxRxControl
= 0x144,
238 #define NVREG_TXRXCTL_KICK 0x0001
239 #define NVREG_TXRXCTL_BIT1 0x0002
240 #define NVREG_TXRXCTL_BIT2 0x0004
241 #define NVREG_TXRXCTL_IDLE 0x0008
242 #define NVREG_TXRXCTL_RESET 0x0010
243 #define NVREG_TXRXCTL_RXCHECK 0x0400
244 #define NVREG_TXRXCTL_DESC_1 0
245 #define NVREG_TXRXCTL_DESC_2 0x002100
246 #define NVREG_TXRXCTL_DESC_3 0xc02200
247 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
248 #define NVREG_TXRXCTL_VLANINS 0x00080
249 NvRegTxRingPhysAddrHigh
= 0x148,
250 NvRegRxRingPhysAddrHigh
= 0x14C,
251 NvRegTxPauseFrame
= 0x170,
252 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
253 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
254 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
255 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
256 NvRegTxPauseFrameLimit
= 0x174,
257 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
258 NvRegMIIStatus
= 0x180,
259 #define NVREG_MIISTAT_ERROR 0x0001
260 #define NVREG_MIISTAT_LINKCHANGE 0x0008
261 #define NVREG_MIISTAT_MASK_RW 0x0007
262 #define NVREG_MIISTAT_MASK_ALL 0x000f
263 NvRegMIIMask
= 0x184,
264 #define NVREG_MII_LINKCHANGE 0x0008
266 NvRegAdapterControl
= 0x188,
267 #define NVREG_ADAPTCTL_START 0x02
268 #define NVREG_ADAPTCTL_LINKUP 0x04
269 #define NVREG_ADAPTCTL_PHYVALID 0x40000
270 #define NVREG_ADAPTCTL_RUNNING 0x100000
271 #define NVREG_ADAPTCTL_PHYSHIFT 24
272 NvRegMIISpeed
= 0x18c,
273 #define NVREG_MIISPEED_BIT8 (1<<8)
274 #define NVREG_MIIDELAY 5
275 NvRegMIIControl
= 0x190,
276 #define NVREG_MIICTL_INUSE 0x08000
277 #define NVREG_MIICTL_WRITE 0x00400
278 #define NVREG_MIICTL_ADDRSHIFT 5
279 NvRegMIIData
= 0x194,
280 NvRegTxUnicast
= 0x1a0,
281 NvRegTxMulticast
= 0x1a4,
282 NvRegTxBroadcast
= 0x1a8,
283 NvRegWakeUpFlags
= 0x200,
284 #define NVREG_WAKEUPFLAGS_VAL 0x7770
285 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
286 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
287 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
288 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
289 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
290 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
291 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
292 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
293 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
294 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
296 NvRegMgmtUnitGetVersion
= 0x204,
297 #define NVREG_MGMTUNITGETVERSION 0x01
298 NvRegMgmtUnitVersion
= 0x208,
299 #define NVREG_MGMTUNITVERSION 0x08
300 NvRegPowerCap
= 0x268,
301 #define NVREG_POWERCAP_D3SUPP (1<<30)
302 #define NVREG_POWERCAP_D2SUPP (1<<26)
303 #define NVREG_POWERCAP_D1SUPP (1<<25)
304 NvRegPowerState
= 0x26c,
305 #define NVREG_POWERSTATE_POWEREDUP 0x8000
306 #define NVREG_POWERSTATE_VALID 0x0100
307 #define NVREG_POWERSTATE_MASK 0x0003
308 #define NVREG_POWERSTATE_D0 0x0000
309 #define NVREG_POWERSTATE_D1 0x0001
310 #define NVREG_POWERSTATE_D2 0x0002
311 #define NVREG_POWERSTATE_D3 0x0003
312 NvRegMgmtUnitControl
= 0x278,
313 #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
315 NvRegTxZeroReXmt
= 0x284,
316 NvRegTxOneReXmt
= 0x288,
317 NvRegTxManyReXmt
= 0x28c,
318 NvRegTxLateCol
= 0x290,
319 NvRegTxUnderflow
= 0x294,
320 NvRegTxLossCarrier
= 0x298,
321 NvRegTxExcessDef
= 0x29c,
322 NvRegTxRetryErr
= 0x2a0,
323 NvRegRxFrameErr
= 0x2a4,
324 NvRegRxExtraByte
= 0x2a8,
325 NvRegRxLateCol
= 0x2ac,
327 NvRegRxFrameTooLong
= 0x2b4,
328 NvRegRxOverflow
= 0x2b8,
329 NvRegRxFCSErr
= 0x2bc,
330 NvRegRxFrameAlignErr
= 0x2c0,
331 NvRegRxLenErr
= 0x2c4,
332 NvRegRxUnicast
= 0x2c8,
333 NvRegRxMulticast
= 0x2cc,
334 NvRegRxBroadcast
= 0x2d0,
336 NvRegTxFrame
= 0x2d8,
338 NvRegTxPause
= 0x2e0,
339 NvRegRxPause
= 0x2e4,
340 NvRegRxDropFrame
= 0x2e8,
341 NvRegVlanControl
= 0x300,
342 #define NVREG_VLANCONTROL_ENABLE 0x2000
343 NvRegMSIXMap0
= 0x3e0,
344 NvRegMSIXMap1
= 0x3e4,
345 NvRegMSIXIrqStatus
= 0x3f0,
347 NvRegPowerState2
= 0x600,
348 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
349 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
350 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
351 #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
354 /* Big endian: should work, but is untested */
360 struct ring_desc_ex
{
368 struct ring_desc
*orig
;
369 struct ring_desc_ex
*ex
;
372 #define FLAG_MASK_V1 0xffff0000
373 #define FLAG_MASK_V2 0xffffc000
374 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
375 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
377 #define NV_TX_LASTPACKET (1<<16)
378 #define NV_TX_RETRYERROR (1<<19)
379 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
380 #define NV_TX_FORCED_INTERRUPT (1<<24)
381 #define NV_TX_DEFERRED (1<<26)
382 #define NV_TX_CARRIERLOST (1<<27)
383 #define NV_TX_LATECOLLISION (1<<28)
384 #define NV_TX_UNDERFLOW (1<<29)
385 #define NV_TX_ERROR (1<<30)
386 #define NV_TX_VALID (1<<31)
388 #define NV_TX2_LASTPACKET (1<<29)
389 #define NV_TX2_RETRYERROR (1<<18)
390 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
391 #define NV_TX2_FORCED_INTERRUPT (1<<30)
392 #define NV_TX2_DEFERRED (1<<25)
393 #define NV_TX2_CARRIERLOST (1<<26)
394 #define NV_TX2_LATECOLLISION (1<<27)
395 #define NV_TX2_UNDERFLOW (1<<28)
396 /* error and valid are the same for both */
397 #define NV_TX2_ERROR (1<<30)
398 #define NV_TX2_VALID (1<<31)
399 #define NV_TX2_TSO (1<<28)
400 #define NV_TX2_TSO_SHIFT 14
401 #define NV_TX2_TSO_MAX_SHIFT 14
402 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
403 #define NV_TX2_CHECKSUM_L3 (1<<27)
404 #define NV_TX2_CHECKSUM_L4 (1<<26)
406 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
408 #define NV_RX_DESCRIPTORVALID (1<<16)
409 #define NV_RX_MISSEDFRAME (1<<17)
410 #define NV_RX_SUBSTRACT1 (1<<18)
411 #define NV_RX_ERROR1 (1<<23)
412 #define NV_RX_ERROR2 (1<<24)
413 #define NV_RX_ERROR3 (1<<25)
414 #define NV_RX_ERROR4 (1<<26)
415 #define NV_RX_CRCERR (1<<27)
416 #define NV_RX_OVERFLOW (1<<28)
417 #define NV_RX_FRAMINGERR (1<<29)
418 #define NV_RX_ERROR (1<<30)
419 #define NV_RX_AVAIL (1<<31)
420 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
422 #define NV_RX2_CHECKSUMMASK (0x1C000000)
423 #define NV_RX2_CHECKSUM_IP (0x10000000)
424 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
425 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
426 #define NV_RX2_DESCRIPTORVALID (1<<29)
427 #define NV_RX2_SUBSTRACT1 (1<<25)
428 #define NV_RX2_ERROR1 (1<<18)
429 #define NV_RX2_ERROR2 (1<<19)
430 #define NV_RX2_ERROR3 (1<<20)
431 #define NV_RX2_ERROR4 (1<<21)
432 #define NV_RX2_CRCERR (1<<22)
433 #define NV_RX2_OVERFLOW (1<<23)
434 #define NV_RX2_FRAMINGERR (1<<24)
435 /* error and avail are the same for both */
436 #define NV_RX2_ERROR (1<<30)
437 #define NV_RX2_AVAIL (1<<31)
438 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
440 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
441 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
443 /* Miscellaneous hardware related defines: */
444 #define NV_PCI_REGSZ_VER1 0x270
445 #define NV_PCI_REGSZ_VER2 0x2d4
446 #define NV_PCI_REGSZ_VER3 0x604
447 #define NV_PCI_REGSZ_MAX 0x604
449 /* various timeout delays: all in usec */
450 #define NV_TXRX_RESET_DELAY 4
451 #define NV_TXSTOP_DELAY1 10
452 #define NV_TXSTOP_DELAY1MAX 500000
453 #define NV_TXSTOP_DELAY2 100
454 #define NV_RXSTOP_DELAY1 10
455 #define NV_RXSTOP_DELAY1MAX 500000
456 #define NV_RXSTOP_DELAY2 100
457 #define NV_SETUP5_DELAY 5
458 #define NV_SETUP5_DELAYMAX 50000
459 #define NV_POWERUP_DELAY 5
460 #define NV_POWERUP_DELAYMAX 5000
461 #define NV_MIIBUSY_DELAY 50
462 #define NV_MIIPHY_DELAY 10
463 #define NV_MIIPHY_DELAYMAX 10000
464 #define NV_MAC_RESET_DELAY 64
466 #define NV_WAKEUPPATTERNS 5
467 #define NV_WAKEUPMASKENTRIES 4
469 /* General driver defaults */
470 #define NV_WATCHDOG_TIMEO (5*HZ)
472 #define RX_RING_DEFAULT 512
473 #define TX_RING_DEFAULT 256
474 #define RX_RING_MIN 128
475 #define TX_RING_MIN 64
476 #define RING_MAX_DESC_VER_1 1024
477 #define RING_MAX_DESC_VER_2_3 16384
479 /* rx/tx mac addr + type + vlan + align + slack*/
480 #define NV_RX_HEADERS (64)
481 /* even more slack. */
482 #define NV_RX_ALLOC_PAD (64)
484 /* maximum mtu size */
485 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
486 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
488 #define OOM_REFILL (1+HZ/20)
489 #define POLL_WAIT (1+HZ/100)
490 #define LINK_TIMEOUT (3*HZ)
491 #define STATS_INTERVAL (10*HZ)
495 * The nic supports three different descriptor types:
496 * - DESC_VER_1: Original
497 * - DESC_VER_2: support for jumbo frames.
498 * - DESC_VER_3: 64-bit format.
505 #define PHY_OUI_MARVELL 0x5043
506 #define PHY_OUI_CICADA 0x03f1
507 #define PHY_OUI_VITESSE 0x01c1
508 #define PHY_OUI_REALTEK 0x0732
509 #define PHY_OUI_REALTEK2 0x0020
510 #define PHYID1_OUI_MASK 0x03ff
511 #define PHYID1_OUI_SHFT 6
512 #define PHYID2_OUI_MASK 0xfc00
513 #define PHYID2_OUI_SHFT 10
514 #define PHYID2_MODEL_MASK 0x03f0
515 #define PHY_MODEL_REALTEK_8211 0x0110
516 #define PHY_REV_MASK 0x0001
517 #define PHY_REV_REALTEK_8211B 0x0000
518 #define PHY_REV_REALTEK_8211C 0x0001
519 #define PHY_MODEL_REALTEK_8201 0x0200
520 #define PHY_MODEL_MARVELL_E3016 0x0220
521 #define PHY_MARVELL_E3016_INITMASK 0x0300
522 #define PHY_CICADA_INIT1 0x0f000
523 #define PHY_CICADA_INIT2 0x0e00
524 #define PHY_CICADA_INIT3 0x01000
525 #define PHY_CICADA_INIT4 0x0200
526 #define PHY_CICADA_INIT5 0x0004
527 #define PHY_CICADA_INIT6 0x02000
528 #define PHY_VITESSE_INIT_REG1 0x1f
529 #define PHY_VITESSE_INIT_REG2 0x10
530 #define PHY_VITESSE_INIT_REG3 0x11
531 #define PHY_VITESSE_INIT_REG4 0x12
532 #define PHY_VITESSE_INIT_MSK1 0xc
533 #define PHY_VITESSE_INIT_MSK2 0x0180
534 #define PHY_VITESSE_INIT1 0x52b5
535 #define PHY_VITESSE_INIT2 0xaf8a
536 #define PHY_VITESSE_INIT3 0x8
537 #define PHY_VITESSE_INIT4 0x8f8a
538 #define PHY_VITESSE_INIT5 0xaf86
539 #define PHY_VITESSE_INIT6 0x8f86
540 #define PHY_VITESSE_INIT7 0xaf82
541 #define PHY_VITESSE_INIT8 0x0100
542 #define PHY_VITESSE_INIT9 0x8f82
543 #define PHY_VITESSE_INIT10 0x0
544 #define PHY_REALTEK_INIT_REG1 0x1f
545 #define PHY_REALTEK_INIT_REG2 0x19
546 #define PHY_REALTEK_INIT_REG3 0x13
547 #define PHY_REALTEK_INIT_REG4 0x14
548 #define PHY_REALTEK_INIT_REG5 0x18
549 #define PHY_REALTEK_INIT_REG6 0x11
550 #define PHY_REALTEK_INIT_REG7 0x01
551 #define PHY_REALTEK_INIT1 0x0000
552 #define PHY_REALTEK_INIT2 0x8e00
553 #define PHY_REALTEK_INIT3 0x0001
554 #define PHY_REALTEK_INIT4 0xad17
555 #define PHY_REALTEK_INIT5 0xfb54
556 #define PHY_REALTEK_INIT6 0xf5c7
557 #define PHY_REALTEK_INIT7 0x1000
558 #define PHY_REALTEK_INIT8 0x0003
559 #define PHY_REALTEK_INIT9 0x0008
560 #define PHY_REALTEK_INIT10 0x0005
561 #define PHY_REALTEK_INIT11 0x0200
562 #define PHY_REALTEK_INIT_MSK1 0x0003
564 #define PHY_GIGABIT 0x0100
566 #define PHY_TIMEOUT 0x1
567 #define PHY_ERROR 0x2
571 #define PHY_HALF 0x100
573 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
574 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
575 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
576 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
577 #define NV_PAUSEFRAME_RX_REQ 0x0010
578 #define NV_PAUSEFRAME_TX_REQ 0x0020
579 #define NV_PAUSEFRAME_AUTONEG 0x0040
581 /* MSI/MSI-X defines */
582 #define NV_MSI_X_MAX_VECTORS 8
583 #define NV_MSI_X_VECTORS_MASK 0x000f
584 #define NV_MSI_CAPABLE 0x0010
585 #define NV_MSI_X_CAPABLE 0x0020
586 #define NV_MSI_ENABLED 0x0040
587 #define NV_MSI_X_ENABLED 0x0080
589 #define NV_MSI_X_VECTOR_ALL 0x0
590 #define NV_MSI_X_VECTOR_RX 0x0
591 #define NV_MSI_X_VECTOR_TX 0x1
592 #define NV_MSI_X_VECTOR_OTHER 0x2
594 #define NV_MSI_PRIV_OFFSET 0x68
595 #define NV_MSI_PRIV_VALUE 0xffffffff
597 #define NV_RESTART_TX 0x1
598 #define NV_RESTART_RX 0x2
600 #define NV_TX_LIMIT_COUNT 16
602 #define NV_DYNAMIC_THRESHOLD 4
603 #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
606 struct nv_ethtool_str
{
607 char name
[ETH_GSTRING_LEN
];
610 static const struct nv_ethtool_str nv_estats_str
[] = {
611 { "tx_bytes" }, /* includes Ethernet FCS CRC */
615 { "tx_late_collision" },
616 { "tx_fifo_errors" },
617 { "tx_carrier_errors" },
618 { "tx_excess_deferral" },
619 { "tx_retry_error" },
620 { "rx_frame_error" },
622 { "rx_late_collision" },
624 { "rx_frame_too_long" },
625 { "rx_over_errors" },
627 { "rx_frame_align_error" },
628 { "rx_length_error" },
633 { "rx_errors_total" },
634 { "tx_errors_total" },
636 /* version 2 stats */
639 { "rx_bytes" }, /* includes Ethernet FCS CRC */
644 /* version 3 stats */
650 struct nv_ethtool_stats
{
651 u64 tx_bytes
; /* should be ifconfig->tx_bytes + 4*tx_packets */
655 u64 tx_late_collision
;
657 u64 tx_carrier_errors
;
658 u64 tx_excess_deferral
;
662 u64 rx_late_collision
;
664 u64 rx_frame_too_long
;
667 u64 rx_frame_align_error
;
672 u64 rx_packets
; /* should be ifconfig->rx_packets */
676 /* version 2 stats */
678 u64 tx_packets
; /* should be ifconfig->tx_packets */
679 u64 rx_bytes
; /* should be ifconfig->rx_bytes + 4*rx_packets */
684 /* version 3 stats */
690 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
691 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
692 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
695 #define NV_TEST_COUNT_BASE 3
696 #define NV_TEST_COUNT_EXTENDED 4
698 static const struct nv_ethtool_str nv_etests_str
[] = {
699 { "link (online/offline)" },
700 { "register (offline) " },
701 { "interrupt (offline) " },
702 { "loopback (offline) " }
705 struct register_test
{
710 static const struct register_test nv_registers_test
[] = {
711 { NvRegUnknownSetupReg6
, 0x01 },
712 { NvRegMisc1
, 0x03c },
713 { NvRegOffloadConfig
, 0x03ff },
714 { NvRegMulticastAddrA
, 0xffffffff },
715 { NvRegTxWatermark
, 0x0ff },
716 { NvRegWakeUpFlags
, 0x07777 },
723 unsigned int dma_len
:31;
724 unsigned int dma_single
:1;
725 struct ring_desc_ex
*first_tx_desc
;
726 struct nv_skb_map
*next_tx_ctx
;
731 * All hardware access under netdev_priv(dev)->lock, except the performance
733 * - rx is (pseudo-) lockless: it relies on the single-threading provided
734 * by the arch code for interrupts.
735 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
736 * needs netdev_priv(dev)->lock :-(
737 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
739 * Hardware stats updates are protected by hwstats_lock:
740 * - updated by nv_do_stats_poll (timer). This is meant to avoid
741 * integer wraparound in the NIC stats registers, at low frequency
743 * - updated by nv_get_ethtool_stats + nv_get_stats64
745 * Software stats are accessed only through 64b synchronization points
746 * and are not subject to other synchronization techniques (single
747 * update thread on the TX or RX paths).
750 /* in dev: base, irq */
754 struct net_device
*dev
;
755 struct napi_struct napi
;
757 /* hardware stats are updated in syscall and timer */
758 spinlock_t hwstats_lock
;
759 struct nv_ethtool_stats estats
;
768 unsigned int phy_oui
;
769 unsigned int phy_model
;
770 unsigned int phy_rev
;
776 /* General data: RO fields */
777 dma_addr_t ring_addr
;
778 struct pci_dev
*pci_dev
;
794 /* rx specific fields.
795 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
797 union ring_type get_rx
, put_rx
, first_rx
, last_rx
;
798 struct nv_skb_map
*get_rx_ctx
, *put_rx_ctx
;
799 struct nv_skb_map
*first_rx_ctx
, *last_rx_ctx
;
800 struct nv_skb_map
*rx_skb
;
802 union ring_type rx_ring
;
803 unsigned int rx_buf_sz
;
804 unsigned int pkt_limit
;
805 struct timer_list oom_kick
;
806 struct timer_list nic_poll
;
807 struct timer_list stats_poll
;
811 /* RX software stats */
812 struct u64_stats_sync swstats_rx_syncp
;
814 u64 stat_rx_bytes
; /* not always available in HW */
815 u64 stat_rx_missed_errors
;
818 /* media detection workaround.
819 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
822 unsigned long link_timeout
;
824 * tx specific fields.
826 union ring_type get_tx
, put_tx
, first_tx
, last_tx
;
827 struct nv_skb_map
*get_tx_ctx
, *put_tx_ctx
;
828 struct nv_skb_map
*first_tx_ctx
, *last_tx_ctx
;
829 struct nv_skb_map
*tx_skb
;
831 union ring_type tx_ring
;
835 u32 tx_pkts_in_progress
;
836 struct nv_skb_map
*tx_change_owner
;
837 struct nv_skb_map
*tx_end_flip
;
840 /* TX software stats */
841 struct u64_stats_sync swstats_tx_syncp
;
842 u64 stat_tx_packets
; /* not always available in HW */
846 /* msi/msi-x fields */
848 struct msix_entry msi_x_entry
[NV_MSI_X_MAX_VECTORS
];
853 /* power saved state */
854 u32 saved_config_space
[NV_PCI_REGSZ_MAX
/4];
856 /* for different msi-x irq type */
857 char name_rx
[IFNAMSIZ
+ 3]; /* -rx */
858 char name_tx
[IFNAMSIZ
+ 3]; /* -tx */
859 char name_other
[IFNAMSIZ
+ 6]; /* -other */
863 * Maximum number of loops until we assume that a bit in the irq mask
864 * is stuck. Overridable with module param.
866 static int max_interrupt_work
= 4;
869 * Optimization can be either throuput mode or cpu mode
871 * Throughput Mode: Every tx and rx packet will generate an interrupt.
872 * CPU Mode: Interrupts are controlled by a timer.
875 NV_OPTIMIZATION_MODE_THROUGHPUT
,
876 NV_OPTIMIZATION_MODE_CPU
,
877 NV_OPTIMIZATION_MODE_DYNAMIC
879 static int optimization_mode
= NV_OPTIMIZATION_MODE_DYNAMIC
;
882 * Poll interval for timer irq
884 * This interval determines how frequent an interrupt is generated.
885 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
886 * Min = 0, and Max = 65535
888 static int poll_interval
= -1;
897 static int msi
= NV_MSI_INT_ENABLED
;
903 NV_MSIX_INT_DISABLED
,
906 static int msix
= NV_MSIX_INT_ENABLED
;
912 NV_DMA_64BIT_DISABLED
,
915 static int dma_64bit
= NV_DMA_64BIT_ENABLED
;
918 * Debug output control for tx_timeout
920 static bool debug_tx_timeout
= false;
923 * Crossover Detection
924 * Realtek 8201 phy + some OEM boards do not work properly.
927 NV_CROSSOVER_DETECTION_DISABLED
,
928 NV_CROSSOVER_DETECTION_ENABLED
930 static int phy_cross
= NV_CROSSOVER_DETECTION_DISABLED
;
933 * Power down phy when interface is down (persists through reboot;
934 * older Linux and other OSes may not power it up again)
936 static int phy_power_down
;
938 static inline struct fe_priv
*get_nvpriv(struct net_device
*dev
)
940 return netdev_priv(dev
);
943 static inline u8 __iomem
*get_hwbase(struct net_device
*dev
)
945 return ((struct fe_priv
*)netdev_priv(dev
))->base
;
948 static inline void pci_push(u8 __iomem
*base
)
950 /* force out pending posted writes */
954 static inline u32
nv_descr_getlength(struct ring_desc
*prd
, u32 v
)
956 return le32_to_cpu(prd
->flaglen
)
957 & ((v
== DESC_VER_1
) ? LEN_MASK_V1
: LEN_MASK_V2
);
960 static inline u32
nv_descr_getlength_ex(struct ring_desc_ex
*prd
, u32 v
)
962 return le32_to_cpu(prd
->flaglen
) & LEN_MASK_V2
;
965 static bool nv_optimized(struct fe_priv
*np
)
967 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
972 static int reg_delay(struct net_device
*dev
, int offset
, u32 mask
, u32 target
,
973 int delay
, int delaymax
)
975 u8 __iomem
*base
= get_hwbase(dev
);
983 } while ((readl(base
+ offset
) & mask
) != target
);
987 #define NV_SETUP_RX_RING 0x01
988 #define NV_SETUP_TX_RING 0x02
990 static inline u32
dma_low(dma_addr_t addr
)
995 static inline u32
dma_high(dma_addr_t addr
)
997 return addr
>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
1000 static void setup_hw_rings(struct net_device
*dev
, int rxtx_flags
)
1002 struct fe_priv
*np
= get_nvpriv(dev
);
1003 u8 __iomem
*base
= get_hwbase(dev
);
1005 if (!nv_optimized(np
)) {
1006 if (rxtx_flags
& NV_SETUP_RX_RING
)
1007 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
1008 if (rxtx_flags
& NV_SETUP_TX_RING
)
1009 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
1011 if (rxtx_flags
& NV_SETUP_RX_RING
) {
1012 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
1013 writel(dma_high(np
->ring_addr
), base
+ NvRegRxRingPhysAddrHigh
);
1015 if (rxtx_flags
& NV_SETUP_TX_RING
) {
1016 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
1017 writel(dma_high(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddrHigh
);
1022 static void free_rings(struct net_device
*dev
)
1024 struct fe_priv
*np
= get_nvpriv(dev
);
1026 if (!nv_optimized(np
)) {
1027 if (np
->rx_ring
.orig
)
1028 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
1029 np
->rx_ring
.orig
, np
->ring_addr
);
1032 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
1033 np
->rx_ring
.ex
, np
->ring_addr
);
1039 static int using_multi_irqs(struct net_device
*dev
)
1041 struct fe_priv
*np
= get_nvpriv(dev
);
1043 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
) ||
1044 ((np
->msi_flags
& NV_MSI_X_ENABLED
) &&
1045 ((np
->msi_flags
& NV_MSI_X_VECTORS_MASK
) == 0x1)))
1051 static void nv_txrx_gate(struct net_device
*dev
, bool gate
)
1053 struct fe_priv
*np
= get_nvpriv(dev
);
1054 u8 __iomem
*base
= get_hwbase(dev
);
1057 if (!np
->mac_in_use
&&
1058 (np
->driver_data
& DEV_HAS_POWER_CNTRL
)) {
1059 powerstate
= readl(base
+ NvRegPowerState2
);
1061 powerstate
|= NVREG_POWERSTATE2_GATE_CLOCKS
;
1063 powerstate
&= ~NVREG_POWERSTATE2_GATE_CLOCKS
;
1064 writel(powerstate
, base
+ NvRegPowerState2
);
1068 static void nv_enable_irq(struct net_device
*dev
)
1070 struct fe_priv
*np
= get_nvpriv(dev
);
1072 if (!using_multi_irqs(dev
)) {
1073 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1074 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1076 enable_irq(np
->pci_dev
->irq
);
1078 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1079 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1080 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1084 static void nv_disable_irq(struct net_device
*dev
)
1086 struct fe_priv
*np
= get_nvpriv(dev
);
1088 if (!using_multi_irqs(dev
)) {
1089 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1090 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1092 disable_irq(np
->pci_dev
->irq
);
1094 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1095 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1096 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1100 /* In MSIX mode, a write to irqmask behaves as XOR */
1101 static void nv_enable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1103 u8 __iomem
*base
= get_hwbase(dev
);
1105 writel(mask
, base
+ NvRegIrqMask
);
1108 static void nv_disable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1110 struct fe_priv
*np
= get_nvpriv(dev
);
1111 u8 __iomem
*base
= get_hwbase(dev
);
1113 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
1114 writel(mask
, base
+ NvRegIrqMask
);
1116 if (np
->msi_flags
& NV_MSI_ENABLED
)
1117 writel(0, base
+ NvRegMSIIrqMask
);
1118 writel(0, base
+ NvRegIrqMask
);
1122 static void nv_napi_enable(struct net_device
*dev
)
1124 struct fe_priv
*np
= get_nvpriv(dev
);
1126 napi_enable(&np
->napi
);
1129 static void nv_napi_disable(struct net_device
*dev
)
1131 struct fe_priv
*np
= get_nvpriv(dev
);
1133 napi_disable(&np
->napi
);
1136 #define MII_READ (-1)
1137 /* mii_rw: read/write a register on the PHY.
1139 * Caller must guarantee serialization
1141 static int mii_rw(struct net_device
*dev
, int addr
, int miireg
, int value
)
1143 u8 __iomem
*base
= get_hwbase(dev
);
1147 writel(NVREG_MIISTAT_MASK_RW
, base
+ NvRegMIIStatus
);
1149 reg
= readl(base
+ NvRegMIIControl
);
1150 if (reg
& NVREG_MIICTL_INUSE
) {
1151 writel(NVREG_MIICTL_INUSE
, base
+ NvRegMIIControl
);
1152 udelay(NV_MIIBUSY_DELAY
);
1155 reg
= (addr
<< NVREG_MIICTL_ADDRSHIFT
) | miireg
;
1156 if (value
!= MII_READ
) {
1157 writel(value
, base
+ NvRegMIIData
);
1158 reg
|= NVREG_MIICTL_WRITE
;
1160 writel(reg
, base
+ NvRegMIIControl
);
1162 if (reg_delay(dev
, NvRegMIIControl
, NVREG_MIICTL_INUSE
, 0,
1163 NV_MIIPHY_DELAY
, NV_MIIPHY_DELAYMAX
)) {
1165 } else if (value
!= MII_READ
) {
1166 /* it was a write operation - fewer failures are detectable */
1168 } else if (readl(base
+ NvRegMIIStatus
) & NVREG_MIISTAT_ERROR
) {
1171 retval
= readl(base
+ NvRegMIIData
);
1177 static int phy_reset(struct net_device
*dev
, u32 bmcr_setup
)
1179 struct fe_priv
*np
= netdev_priv(dev
);
1181 unsigned int tries
= 0;
1183 miicontrol
= BMCR_RESET
| bmcr_setup
;
1184 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
))
1187 /* wait for 500ms */
1190 /* must wait till reset is deasserted */
1191 while (miicontrol
& BMCR_RESET
) {
1192 usleep_range(10000, 20000);
1193 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1194 /* FIXME: 100 tries seem excessive */
1201 static int init_realtek_8211b(struct net_device
*dev
, struct fe_priv
*np
)
1203 static const struct {
1207 { PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
},
1208 { PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
},
1209 { PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
},
1210 { PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
},
1211 { PHY_REALTEK_INIT_REG4
, PHY_REALTEK_INIT5
},
1212 { PHY_REALTEK_INIT_REG5
, PHY_REALTEK_INIT6
},
1213 { PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
},
1217 for (i
= 0; i
< ARRAY_SIZE(ri
); i
++) {
1218 if (mii_rw(dev
, np
->phyaddr
, ri
[i
].reg
, ri
[i
].init
))
1225 static int init_realtek_8211c(struct net_device
*dev
, struct fe_priv
*np
)
1228 u8 __iomem
*base
= get_hwbase(dev
);
1229 u32 powerstate
= readl(base
+ NvRegPowerState2
);
1231 /* need to perform hw phy reset */
1232 powerstate
|= NVREG_POWERSTATE2_PHY_RESET
;
1233 writel(powerstate
, base
+ NvRegPowerState2
);
1236 powerstate
&= ~NVREG_POWERSTATE2_PHY_RESET
;
1237 writel(powerstate
, base
+ NvRegPowerState2
);
1240 reg
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1241 reg
|= PHY_REALTEK_INIT9
;
1242 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, reg
))
1244 if (mii_rw(dev
, np
->phyaddr
,
1245 PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT10
))
1247 reg
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG7
, MII_READ
);
1248 if (!(reg
& PHY_REALTEK_INIT11
)) {
1249 reg
|= PHY_REALTEK_INIT11
;
1250 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG7
, reg
))
1253 if (mii_rw(dev
, np
->phyaddr
,
1254 PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
))
1260 static int init_realtek_8201(struct net_device
*dev
, struct fe_priv
*np
)
1264 if (np
->driver_data
& DEV_NEED_PHY_INIT_FIX
) {
1265 phy_reserved
= mii_rw(dev
, np
->phyaddr
,
1266 PHY_REALTEK_INIT_REG6
, MII_READ
);
1267 phy_reserved
|= PHY_REALTEK_INIT7
;
1268 if (mii_rw(dev
, np
->phyaddr
,
1269 PHY_REALTEK_INIT_REG6
, phy_reserved
))
1276 static int init_realtek_8201_cross(struct net_device
*dev
, struct fe_priv
*np
)
1280 if (phy_cross
== NV_CROSSOVER_DETECTION_DISABLED
) {
1281 if (mii_rw(dev
, np
->phyaddr
,
1282 PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
))
1284 phy_reserved
= mii_rw(dev
, np
->phyaddr
,
1285 PHY_REALTEK_INIT_REG2
, MII_READ
);
1286 phy_reserved
&= ~PHY_REALTEK_INIT_MSK1
;
1287 phy_reserved
|= PHY_REALTEK_INIT3
;
1288 if (mii_rw(dev
, np
->phyaddr
,
1289 PHY_REALTEK_INIT_REG2
, phy_reserved
))
1291 if (mii_rw(dev
, np
->phyaddr
,
1292 PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
))
1299 static int init_cicada(struct net_device
*dev
, struct fe_priv
*np
,
1304 if (phyinterface
& PHY_RGMII
) {
1305 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_RESV1
, MII_READ
);
1306 phy_reserved
&= ~(PHY_CICADA_INIT1
| PHY_CICADA_INIT2
);
1307 phy_reserved
|= (PHY_CICADA_INIT3
| PHY_CICADA_INIT4
);
1308 if (mii_rw(dev
, np
->phyaddr
, MII_RESV1
, phy_reserved
))
1310 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1311 phy_reserved
|= PHY_CICADA_INIT5
;
1312 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, phy_reserved
))
1315 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, MII_READ
);
1316 phy_reserved
|= PHY_CICADA_INIT6
;
1317 if (mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, phy_reserved
))
1323 static int init_vitesse(struct net_device
*dev
, struct fe_priv
*np
)
1327 if (mii_rw(dev
, np
->phyaddr
,
1328 PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT1
))
1330 if (mii_rw(dev
, np
->phyaddr
,
1331 PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT2
))
1333 phy_reserved
= mii_rw(dev
, np
->phyaddr
,
1334 PHY_VITESSE_INIT_REG4
, MII_READ
);
1335 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
))
1337 phy_reserved
= mii_rw(dev
, np
->phyaddr
,
1338 PHY_VITESSE_INIT_REG3
, MII_READ
);
1339 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1340 phy_reserved
|= PHY_VITESSE_INIT3
;
1341 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
))
1343 if (mii_rw(dev
, np
->phyaddr
,
1344 PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT4
))
1346 if (mii_rw(dev
, np
->phyaddr
,
1347 PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT5
))
1349 phy_reserved
= mii_rw(dev
, np
->phyaddr
,
1350 PHY_VITESSE_INIT_REG4
, MII_READ
);
1351 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1352 phy_reserved
|= PHY_VITESSE_INIT3
;
1353 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
))
1355 phy_reserved
= mii_rw(dev
, np
->phyaddr
,
1356 PHY_VITESSE_INIT_REG3
, MII_READ
);
1357 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
))
1359 if (mii_rw(dev
, np
->phyaddr
,
1360 PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT6
))
1362 if (mii_rw(dev
, np
->phyaddr
,
1363 PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT7
))
1365 phy_reserved
= mii_rw(dev
, np
->phyaddr
,
1366 PHY_VITESSE_INIT_REG4
, MII_READ
);
1367 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
))
1369 phy_reserved
= mii_rw(dev
, np
->phyaddr
,
1370 PHY_VITESSE_INIT_REG3
, MII_READ
);
1371 phy_reserved
&= ~PHY_VITESSE_INIT_MSK2
;
1372 phy_reserved
|= PHY_VITESSE_INIT8
;
1373 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
))
1375 if (mii_rw(dev
, np
->phyaddr
,
1376 PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT9
))
1378 if (mii_rw(dev
, np
->phyaddr
,
1379 PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT10
))
1385 static int phy_init(struct net_device
*dev
)
1387 struct fe_priv
*np
= get_nvpriv(dev
);
1388 u8 __iomem
*base
= get_hwbase(dev
);
1390 u32 mii_status
, mii_control
, mii_control_1000
, reg
;
1392 /* phy errata for E3016 phy */
1393 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
1394 reg
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1395 reg
&= ~PHY_MARVELL_E3016_INITMASK
;
1396 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, reg
)) {
1397 netdev_info(dev
, "%s: phy write to errata reg failed\n",
1398 pci_name(np
->pci_dev
));
1402 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1403 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1404 np
->phy_rev
== PHY_REV_REALTEK_8211B
) {
1405 if (init_realtek_8211b(dev
, np
)) {
1406 netdev_info(dev
, "%s: phy init failed\n",
1407 pci_name(np
->pci_dev
));
1410 } else if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1411 np
->phy_rev
== PHY_REV_REALTEK_8211C
) {
1412 if (init_realtek_8211c(dev
, np
)) {
1413 netdev_info(dev
, "%s: phy init failed\n",
1414 pci_name(np
->pci_dev
));
1417 } else if (np
->phy_model
== PHY_MODEL_REALTEK_8201
) {
1418 if (init_realtek_8201(dev
, np
)) {
1419 netdev_info(dev
, "%s: phy init failed\n",
1420 pci_name(np
->pci_dev
));
1426 /* set advertise register */
1427 reg
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1428 reg
|= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1429 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
1430 ADVERTISE_PAUSE_ASYM
| ADVERTISE_PAUSE_CAP
);
1431 if (mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
)) {
1432 netdev_info(dev
, "%s: phy write to advertise failed\n",
1433 pci_name(np
->pci_dev
));
1437 /* get phy interface type */
1438 phyinterface
= readl(base
+ NvRegPhyInterface
);
1440 /* see if gigabit phy */
1441 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1442 if (mii_status
& PHY_GIGABIT
) {
1443 np
->gigabit
= PHY_GIGABIT
;
1444 mii_control_1000
= mii_rw(dev
, np
->phyaddr
,
1445 MII_CTRL1000
, MII_READ
);
1446 mii_control_1000
&= ~ADVERTISE_1000HALF
;
1447 if (phyinterface
& PHY_RGMII
)
1448 mii_control_1000
|= ADVERTISE_1000FULL
;
1450 mii_control_1000
&= ~ADVERTISE_1000FULL
;
1452 if (mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, mii_control_1000
)) {
1453 netdev_info(dev
, "%s: phy init failed\n",
1454 pci_name(np
->pci_dev
));
1460 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1461 mii_control
|= BMCR_ANENABLE
;
1463 if (np
->phy_oui
== PHY_OUI_REALTEK
&&
1464 np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1465 np
->phy_rev
== PHY_REV_REALTEK_8211C
) {
1466 /* start autoneg since we already performed hw reset above */
1467 mii_control
|= BMCR_ANRESTART
;
1468 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1469 netdev_info(dev
, "%s: phy init failed\n",
1470 pci_name(np
->pci_dev
));
1475 * (certain phys need bmcr to be setup with reset)
1477 if (phy_reset(dev
, mii_control
)) {
1478 netdev_info(dev
, "%s: phy reset failed\n",
1479 pci_name(np
->pci_dev
));
1484 /* phy vendor specific configuration */
1485 if ((np
->phy_oui
== PHY_OUI_CICADA
)) {
1486 if (init_cicada(dev
, np
, phyinterface
)) {
1487 netdev_info(dev
, "%s: phy init failed\n",
1488 pci_name(np
->pci_dev
));
1491 } else if (np
->phy_oui
== PHY_OUI_VITESSE
) {
1492 if (init_vitesse(dev
, np
)) {
1493 netdev_info(dev
, "%s: phy init failed\n",
1494 pci_name(np
->pci_dev
));
1497 } else if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1498 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1499 np
->phy_rev
== PHY_REV_REALTEK_8211B
) {
1500 /* reset could have cleared these out, set them back */
1501 if (init_realtek_8211b(dev
, np
)) {
1502 netdev_info(dev
, "%s: phy init failed\n",
1503 pci_name(np
->pci_dev
));
1506 } else if (np
->phy_model
== PHY_MODEL_REALTEK_8201
) {
1507 if (init_realtek_8201(dev
, np
) ||
1508 init_realtek_8201_cross(dev
, np
)) {
1509 netdev_info(dev
, "%s: phy init failed\n",
1510 pci_name(np
->pci_dev
));
1516 /* some phys clear out pause advertisement on reset, set it back */
1517 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
);
1519 /* restart auto negotiation, power down phy */
1520 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1521 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
1523 mii_control
|= BMCR_PDOWN
;
1524 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
))
1530 static void nv_start_rx(struct net_device
*dev
)
1532 struct fe_priv
*np
= netdev_priv(dev
);
1533 u8 __iomem
*base
= get_hwbase(dev
);
1534 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1536 /* Already running? Stop it. */
1537 if ((readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) && !np
->mac_in_use
) {
1538 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1539 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1542 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
1544 rx_ctrl
|= NVREG_RCVCTL_START
;
1546 rx_ctrl
&= ~NVREG_RCVCTL_RX_PATH_EN
;
1547 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1551 static void nv_stop_rx(struct net_device
*dev
)
1553 struct fe_priv
*np
= netdev_priv(dev
);
1554 u8 __iomem
*base
= get_hwbase(dev
);
1555 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1557 if (!np
->mac_in_use
)
1558 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1560 rx_ctrl
|= NVREG_RCVCTL_RX_PATH_EN
;
1561 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1562 if (reg_delay(dev
, NvRegReceiverStatus
, NVREG_RCVSTAT_BUSY
, 0,
1563 NV_RXSTOP_DELAY1
, NV_RXSTOP_DELAY1MAX
))
1564 netdev_info(dev
, "%s: ReceiverStatus remained busy\n",
1567 udelay(NV_RXSTOP_DELAY2
);
1568 if (!np
->mac_in_use
)
1569 writel(0, base
+ NvRegLinkSpeed
);
1572 static void nv_start_tx(struct net_device
*dev
)
1574 struct fe_priv
*np
= netdev_priv(dev
);
1575 u8 __iomem
*base
= get_hwbase(dev
);
1576 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1578 tx_ctrl
|= NVREG_XMITCTL_START
;
1580 tx_ctrl
&= ~NVREG_XMITCTL_TX_PATH_EN
;
1581 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1585 static void nv_stop_tx(struct net_device
*dev
)
1587 struct fe_priv
*np
= netdev_priv(dev
);
1588 u8 __iomem
*base
= get_hwbase(dev
);
1589 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1591 if (!np
->mac_in_use
)
1592 tx_ctrl
&= ~NVREG_XMITCTL_START
;
1594 tx_ctrl
|= NVREG_XMITCTL_TX_PATH_EN
;
1595 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1596 if (reg_delay(dev
, NvRegTransmitterStatus
, NVREG_XMITSTAT_BUSY
, 0,
1597 NV_TXSTOP_DELAY1
, NV_TXSTOP_DELAY1MAX
))
1598 netdev_info(dev
, "%s: TransmitterStatus remained busy\n",
1601 udelay(NV_TXSTOP_DELAY2
);
1602 if (!np
->mac_in_use
)
1603 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
1604 base
+ NvRegTransmitPoll
);
1607 static void nv_start_rxtx(struct net_device
*dev
)
1613 static void nv_stop_rxtx(struct net_device
*dev
)
1619 static void nv_txrx_reset(struct net_device
*dev
)
1621 struct fe_priv
*np
= netdev_priv(dev
);
1622 u8 __iomem
*base
= get_hwbase(dev
);
1624 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1626 udelay(NV_TXRX_RESET_DELAY
);
1627 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1631 static void nv_mac_reset(struct net_device
*dev
)
1633 struct fe_priv
*np
= netdev_priv(dev
);
1634 u8 __iomem
*base
= get_hwbase(dev
);
1635 u32 temp1
, temp2
, temp3
;
1637 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1640 /* save registers since they will be cleared on reset */
1641 temp1
= readl(base
+ NvRegMacAddrA
);
1642 temp2
= readl(base
+ NvRegMacAddrB
);
1643 temp3
= readl(base
+ NvRegTransmitPoll
);
1645 writel(NVREG_MAC_RESET_ASSERT
, base
+ NvRegMacReset
);
1647 udelay(NV_MAC_RESET_DELAY
);
1648 writel(0, base
+ NvRegMacReset
);
1650 udelay(NV_MAC_RESET_DELAY
);
1652 /* restore saved registers */
1653 writel(temp1
, base
+ NvRegMacAddrA
);
1654 writel(temp2
, base
+ NvRegMacAddrB
);
1655 writel(temp3
, base
+ NvRegTransmitPoll
);
1657 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1661 /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
1662 static void nv_update_stats(struct net_device
*dev
)
1664 struct fe_priv
*np
= netdev_priv(dev
);
1665 u8 __iomem
*base
= get_hwbase(dev
);
1667 /* If it happens that this is run in top-half context, then
1668 * replace the spin_lock of hwstats_lock with
1669 * spin_lock_irqsave() in calling functions. */
1670 WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
1671 assert_spin_locked(&np
->hwstats_lock
);
1673 /* query hardware */
1674 np
->estats
.tx_bytes
+= readl(base
+ NvRegTxCnt
);
1675 np
->estats
.tx_zero_rexmt
+= readl(base
+ NvRegTxZeroReXmt
);
1676 np
->estats
.tx_one_rexmt
+= readl(base
+ NvRegTxOneReXmt
);
1677 np
->estats
.tx_many_rexmt
+= readl(base
+ NvRegTxManyReXmt
);
1678 np
->estats
.tx_late_collision
+= readl(base
+ NvRegTxLateCol
);
1679 np
->estats
.tx_fifo_errors
+= readl(base
+ NvRegTxUnderflow
);
1680 np
->estats
.tx_carrier_errors
+= readl(base
+ NvRegTxLossCarrier
);
1681 np
->estats
.tx_excess_deferral
+= readl(base
+ NvRegTxExcessDef
);
1682 np
->estats
.tx_retry_error
+= readl(base
+ NvRegTxRetryErr
);
1683 np
->estats
.rx_frame_error
+= readl(base
+ NvRegRxFrameErr
);
1684 np
->estats
.rx_extra_byte
+= readl(base
+ NvRegRxExtraByte
);
1685 np
->estats
.rx_late_collision
+= readl(base
+ NvRegRxLateCol
);
1686 np
->estats
.rx_runt
+= readl(base
+ NvRegRxRunt
);
1687 np
->estats
.rx_frame_too_long
+= readl(base
+ NvRegRxFrameTooLong
);
1688 np
->estats
.rx_over_errors
+= readl(base
+ NvRegRxOverflow
);
1689 np
->estats
.rx_crc_errors
+= readl(base
+ NvRegRxFCSErr
);
1690 np
->estats
.rx_frame_align_error
+= readl(base
+ NvRegRxFrameAlignErr
);
1691 np
->estats
.rx_length_error
+= readl(base
+ NvRegRxLenErr
);
1692 np
->estats
.rx_unicast
+= readl(base
+ NvRegRxUnicast
);
1693 np
->estats
.rx_multicast
+= readl(base
+ NvRegRxMulticast
);
1694 np
->estats
.rx_broadcast
+= readl(base
+ NvRegRxBroadcast
);
1695 np
->estats
.rx_packets
=
1696 np
->estats
.rx_unicast
+
1697 np
->estats
.rx_multicast
+
1698 np
->estats
.rx_broadcast
;
1699 np
->estats
.rx_errors_total
=
1700 np
->estats
.rx_crc_errors
+
1701 np
->estats
.rx_over_errors
+
1702 np
->estats
.rx_frame_error
+
1703 (np
->estats
.rx_frame_align_error
- np
->estats
.rx_extra_byte
) +
1704 np
->estats
.rx_late_collision
+
1705 np
->estats
.rx_runt
+
1706 np
->estats
.rx_frame_too_long
;
1707 np
->estats
.tx_errors_total
=
1708 np
->estats
.tx_late_collision
+
1709 np
->estats
.tx_fifo_errors
+
1710 np
->estats
.tx_carrier_errors
+
1711 np
->estats
.tx_excess_deferral
+
1712 np
->estats
.tx_retry_error
;
1714 if (np
->driver_data
& DEV_HAS_STATISTICS_V2
) {
1715 np
->estats
.tx_deferral
+= readl(base
+ NvRegTxDef
);
1716 np
->estats
.tx_packets
+= readl(base
+ NvRegTxFrame
);
1717 np
->estats
.rx_bytes
+= readl(base
+ NvRegRxCnt
);
1718 np
->estats
.tx_pause
+= readl(base
+ NvRegTxPause
);
1719 np
->estats
.rx_pause
+= readl(base
+ NvRegRxPause
);
1720 np
->estats
.rx_drop_frame
+= readl(base
+ NvRegRxDropFrame
);
1721 np
->estats
.rx_errors_total
+= np
->estats
.rx_drop_frame
;
1724 if (np
->driver_data
& DEV_HAS_STATISTICS_V3
) {
1725 np
->estats
.tx_unicast
+= readl(base
+ NvRegTxUnicast
);
1726 np
->estats
.tx_multicast
+= readl(base
+ NvRegTxMulticast
);
1727 np
->estats
.tx_broadcast
+= readl(base
+ NvRegTxBroadcast
);
1732 * nv_get_stats64: dev->ndo_get_stats64 function
1733 * Get latest stats value from the nic.
1734 * Called with read_lock(&dev_base_lock) held for read -
1735 * only synchronized against unregister_netdevice.
1737 static struct rtnl_link_stats64
*
1738 nv_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*storage
)
1739 __acquires(&netdev_priv(dev
)->hwstats_lock
)
1740 __releases(&netdev_priv(dev
)->hwstats_lock
)
1742 struct fe_priv
*np
= netdev_priv(dev
);
1743 unsigned int syncp_start
;
1746 * Note: because HW stats are not always available and for
1747 * consistency reasons, the following ifconfig stats are
1748 * managed by software: rx_bytes, tx_bytes, rx_packets and
1749 * tx_packets. The related hardware stats reported by ethtool
1750 * should be equivalent to these ifconfig stats, with 4
1751 * additional bytes per packet (Ethernet FCS CRC), except for
1752 * tx_packets when TSO kicks in.
1755 /* software stats */
1757 syncp_start
= u64_stats_fetch_begin_bh(&np
->swstats_rx_syncp
);
1758 storage
->rx_packets
= np
->stat_rx_packets
;
1759 storage
->rx_bytes
= np
->stat_rx_bytes
;
1760 storage
->rx_dropped
= np
->stat_rx_dropped
;
1761 storage
->rx_missed_errors
= np
->stat_rx_missed_errors
;
1762 } while (u64_stats_fetch_retry_bh(&np
->swstats_rx_syncp
, syncp_start
));
1765 syncp_start
= u64_stats_fetch_begin_bh(&np
->swstats_tx_syncp
);
1766 storage
->tx_packets
= np
->stat_tx_packets
;
1767 storage
->tx_bytes
= np
->stat_tx_bytes
;
1768 storage
->tx_dropped
= np
->stat_tx_dropped
;
1769 } while (u64_stats_fetch_retry_bh(&np
->swstats_tx_syncp
, syncp_start
));
1771 /* If the nic supports hw counters then retrieve latest values */
1772 if (np
->driver_data
& DEV_HAS_STATISTICS_V123
) {
1773 spin_lock_bh(&np
->hwstats_lock
);
1775 nv_update_stats(dev
);
1778 storage
->rx_errors
= np
->estats
.rx_errors_total
;
1779 storage
->tx_errors
= np
->estats
.tx_errors_total
;
1781 /* meaningful only when NIC supports stats v3 */
1782 storage
->multicast
= np
->estats
.rx_multicast
;
1784 /* detailed rx_errors */
1785 storage
->rx_length_errors
= np
->estats
.rx_length_error
;
1786 storage
->rx_over_errors
= np
->estats
.rx_over_errors
;
1787 storage
->rx_crc_errors
= np
->estats
.rx_crc_errors
;
1788 storage
->rx_frame_errors
= np
->estats
.rx_frame_align_error
;
1789 storage
->rx_fifo_errors
= np
->estats
.rx_drop_frame
;
1791 /* detailed tx_errors */
1792 storage
->tx_carrier_errors
= np
->estats
.tx_carrier_errors
;
1793 storage
->tx_fifo_errors
= np
->estats
.tx_fifo_errors
;
1795 spin_unlock_bh(&np
->hwstats_lock
);
1802 * nv_alloc_rx: fill rx ring entries.
1803 * Return 1 if the allocations for the skbs failed and the
1804 * rx engine is without Available descriptors
1806 static int nv_alloc_rx(struct net_device
*dev
)
1808 struct fe_priv
*np
= netdev_priv(dev
);
1809 struct ring_desc
*less_rx
;
1811 less_rx
= np
->get_rx
.orig
;
1812 if (less_rx
-- == np
->first_rx
.orig
)
1813 less_rx
= np
->last_rx
.orig
;
1815 while (np
->put_rx
.orig
!= less_rx
) {
1816 struct sk_buff
*skb
= netdev_alloc_skb(dev
, np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1818 np
->put_rx_ctx
->skb
= skb
;
1819 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1822 PCI_DMA_FROMDEVICE
);
1823 if (pci_dma_mapping_error(np
->pci_dev
,
1824 np
->put_rx_ctx
->dma
)) {
1826 goto packet_dropped
;
1828 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1829 np
->put_rx
.orig
->buf
= cpu_to_le32(np
->put_rx_ctx
->dma
);
1831 np
->put_rx
.orig
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX_AVAIL
);
1832 if (unlikely(np
->put_rx
.orig
++ == np
->last_rx
.orig
))
1833 np
->put_rx
.orig
= np
->first_rx
.orig
;
1834 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1835 np
->put_rx_ctx
= np
->first_rx_ctx
;
1838 u64_stats_update_begin(&np
->swstats_rx_syncp
);
1839 np
->stat_rx_dropped
++;
1840 u64_stats_update_end(&np
->swstats_rx_syncp
);
1847 static int nv_alloc_rx_optimized(struct net_device
*dev
)
1849 struct fe_priv
*np
= netdev_priv(dev
);
1850 struct ring_desc_ex
*less_rx
;
1852 less_rx
= np
->get_rx
.ex
;
1853 if (less_rx
-- == np
->first_rx
.ex
)
1854 less_rx
= np
->last_rx
.ex
;
1856 while (np
->put_rx
.ex
!= less_rx
) {
1857 struct sk_buff
*skb
= netdev_alloc_skb(dev
, np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1859 np
->put_rx_ctx
->skb
= skb
;
1860 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1863 PCI_DMA_FROMDEVICE
);
1864 if (pci_dma_mapping_error(np
->pci_dev
,
1865 np
->put_rx_ctx
->dma
)) {
1867 goto packet_dropped
;
1869 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1870 np
->put_rx
.ex
->bufhigh
= cpu_to_le32(dma_high(np
->put_rx_ctx
->dma
));
1871 np
->put_rx
.ex
->buflow
= cpu_to_le32(dma_low(np
->put_rx_ctx
->dma
));
1873 np
->put_rx
.ex
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX2_AVAIL
);
1874 if (unlikely(np
->put_rx
.ex
++ == np
->last_rx
.ex
))
1875 np
->put_rx
.ex
= np
->first_rx
.ex
;
1876 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1877 np
->put_rx_ctx
= np
->first_rx_ctx
;
1880 u64_stats_update_begin(&np
->swstats_rx_syncp
);
1881 np
->stat_rx_dropped
++;
1882 u64_stats_update_end(&np
->swstats_rx_syncp
);
1889 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1890 static void nv_do_rx_refill(unsigned long data
)
1892 struct net_device
*dev
= (struct net_device
*) data
;
1893 struct fe_priv
*np
= netdev_priv(dev
);
1895 /* Just reschedule NAPI rx processing */
1896 napi_schedule(&np
->napi
);
1899 static void nv_init_rx(struct net_device
*dev
)
1901 struct fe_priv
*np
= netdev_priv(dev
);
1904 np
->get_rx
= np
->put_rx
= np
->first_rx
= np
->rx_ring
;
1906 if (!nv_optimized(np
))
1907 np
->last_rx
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
-1];
1909 np
->last_rx
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
-1];
1910 np
->get_rx_ctx
= np
->put_rx_ctx
= np
->first_rx_ctx
= np
->rx_skb
;
1911 np
->last_rx_ctx
= &np
->rx_skb
[np
->rx_ring_size
-1];
1913 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1914 if (!nv_optimized(np
)) {
1915 np
->rx_ring
.orig
[i
].flaglen
= 0;
1916 np
->rx_ring
.orig
[i
].buf
= 0;
1918 np
->rx_ring
.ex
[i
].flaglen
= 0;
1919 np
->rx_ring
.ex
[i
].txvlan
= 0;
1920 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1921 np
->rx_ring
.ex
[i
].buflow
= 0;
1923 np
->rx_skb
[i
].skb
= NULL
;
1924 np
->rx_skb
[i
].dma
= 0;
1928 static void nv_init_tx(struct net_device
*dev
)
1930 struct fe_priv
*np
= netdev_priv(dev
);
1933 np
->get_tx
= np
->put_tx
= np
->first_tx
= np
->tx_ring
;
1935 if (!nv_optimized(np
))
1936 np
->last_tx
.orig
= &np
->tx_ring
.orig
[np
->tx_ring_size
-1];
1938 np
->last_tx
.ex
= &np
->tx_ring
.ex
[np
->tx_ring_size
-1];
1939 np
->get_tx_ctx
= np
->put_tx_ctx
= np
->first_tx_ctx
= np
->tx_skb
;
1940 np
->last_tx_ctx
= &np
->tx_skb
[np
->tx_ring_size
-1];
1941 netdev_reset_queue(np
->dev
);
1942 np
->tx_pkts_in_progress
= 0;
1943 np
->tx_change_owner
= NULL
;
1944 np
->tx_end_flip
= NULL
;
1947 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1948 if (!nv_optimized(np
)) {
1949 np
->tx_ring
.orig
[i
].flaglen
= 0;
1950 np
->tx_ring
.orig
[i
].buf
= 0;
1952 np
->tx_ring
.ex
[i
].flaglen
= 0;
1953 np
->tx_ring
.ex
[i
].txvlan
= 0;
1954 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1955 np
->tx_ring
.ex
[i
].buflow
= 0;
1957 np
->tx_skb
[i
].skb
= NULL
;
1958 np
->tx_skb
[i
].dma
= 0;
1959 np
->tx_skb
[i
].dma_len
= 0;
1960 np
->tx_skb
[i
].dma_single
= 0;
1961 np
->tx_skb
[i
].first_tx_desc
= NULL
;
1962 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
1966 static int nv_init_ring(struct net_device
*dev
)
1968 struct fe_priv
*np
= netdev_priv(dev
);
1973 if (!nv_optimized(np
))
1974 return nv_alloc_rx(dev
);
1976 return nv_alloc_rx_optimized(dev
);
1979 static void nv_unmap_txskb(struct fe_priv
*np
, struct nv_skb_map
*tx_skb
)
1982 if (tx_skb
->dma_single
)
1983 pci_unmap_single(np
->pci_dev
, tx_skb
->dma
,
1987 pci_unmap_page(np
->pci_dev
, tx_skb
->dma
,
1994 static int nv_release_txskb(struct fe_priv
*np
, struct nv_skb_map
*tx_skb
)
1996 nv_unmap_txskb(np
, tx_skb
);
1998 dev_kfree_skb_any(tx_skb
->skb
);
2005 static void nv_drain_tx(struct net_device
*dev
)
2007 struct fe_priv
*np
= netdev_priv(dev
);
2010 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
2011 if (!nv_optimized(np
)) {
2012 np
->tx_ring
.orig
[i
].flaglen
= 0;
2013 np
->tx_ring
.orig
[i
].buf
= 0;
2015 np
->tx_ring
.ex
[i
].flaglen
= 0;
2016 np
->tx_ring
.ex
[i
].txvlan
= 0;
2017 np
->tx_ring
.ex
[i
].bufhigh
= 0;
2018 np
->tx_ring
.ex
[i
].buflow
= 0;
2020 if (nv_release_txskb(np
, &np
->tx_skb
[i
])) {
2021 u64_stats_update_begin(&np
->swstats_tx_syncp
);
2022 np
->stat_tx_dropped
++;
2023 u64_stats_update_end(&np
->swstats_tx_syncp
);
2025 np
->tx_skb
[i
].dma
= 0;
2026 np
->tx_skb
[i
].dma_len
= 0;
2027 np
->tx_skb
[i
].dma_single
= 0;
2028 np
->tx_skb
[i
].first_tx_desc
= NULL
;
2029 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
2031 np
->tx_pkts_in_progress
= 0;
2032 np
->tx_change_owner
= NULL
;
2033 np
->tx_end_flip
= NULL
;
2036 static void nv_drain_rx(struct net_device
*dev
)
2038 struct fe_priv
*np
= netdev_priv(dev
);
2041 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
2042 if (!nv_optimized(np
)) {
2043 np
->rx_ring
.orig
[i
].flaglen
= 0;
2044 np
->rx_ring
.orig
[i
].buf
= 0;
2046 np
->rx_ring
.ex
[i
].flaglen
= 0;
2047 np
->rx_ring
.ex
[i
].txvlan
= 0;
2048 np
->rx_ring
.ex
[i
].bufhigh
= 0;
2049 np
->rx_ring
.ex
[i
].buflow
= 0;
2052 if (np
->rx_skb
[i
].skb
) {
2053 pci_unmap_single(np
->pci_dev
, np
->rx_skb
[i
].dma
,
2054 (skb_end_pointer(np
->rx_skb
[i
].skb
) -
2055 np
->rx_skb
[i
].skb
->data
),
2056 PCI_DMA_FROMDEVICE
);
2057 dev_kfree_skb(np
->rx_skb
[i
].skb
);
2058 np
->rx_skb
[i
].skb
= NULL
;
2063 static void nv_drain_rxtx(struct net_device
*dev
)
2069 static inline u32
nv_get_empty_tx_slots(struct fe_priv
*np
)
2071 return (u32
)(np
->tx_ring_size
- ((np
->tx_ring_size
+ (np
->put_tx_ctx
- np
->get_tx_ctx
)) % np
->tx_ring_size
));
2074 static void nv_legacybackoff_reseed(struct net_device
*dev
)
2076 u8 __iomem
*base
= get_hwbase(dev
);
2081 reg
= readl(base
+ NvRegSlotTime
) & ~NVREG_SLOTTIME_MASK
;
2082 get_random_bytes(&low
, sizeof(low
));
2083 reg
|= low
& NVREG_SLOTTIME_MASK
;
2085 /* Need to stop tx before change takes effect.
2086 * Caller has already gained np->lock.
2088 tx_status
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
;
2092 writel(reg
, base
+ NvRegSlotTime
);
2098 /* Gear Backoff Seeds */
2099 #define BACKOFF_SEEDSET_ROWS 8
2100 #define BACKOFF_SEEDSET_LFSRS 15
2102 /* Known Good seed sets */
2103 static const u32 main_seedset
[BACKOFF_SEEDSET_ROWS
][BACKOFF_SEEDSET_LFSRS
] = {
2104 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2105 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2106 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2107 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2108 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2109 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2110 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2111 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2113 static const u32 gear_seedset
[BACKOFF_SEEDSET_ROWS
][BACKOFF_SEEDSET_LFSRS
] = {
2114 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2115 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2116 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2117 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2118 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2119 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2120 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2121 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2123 static void nv_gear_backoff_reseed(struct net_device
*dev
)
2125 u8 __iomem
*base
= get_hwbase(dev
);
2126 u32 miniseed1
, miniseed2
, miniseed2_reversed
, miniseed3
, miniseed3_reversed
;
2127 u32 temp
, seedset
, combinedSeed
;
2130 /* Setup seed for free running LFSR */
2131 /* We are going to read the time stamp counter 3 times
2132 and swizzle bits around to increase randomness */
2133 get_random_bytes(&miniseed1
, sizeof(miniseed1
));
2134 miniseed1
&= 0x0fff;
2138 get_random_bytes(&miniseed2
, sizeof(miniseed2
));
2139 miniseed2
&= 0x0fff;
2142 miniseed2_reversed
=
2143 ((miniseed2
& 0xF00) >> 8) |
2144 (miniseed2
& 0x0F0) |
2145 ((miniseed2
& 0x00F) << 8);
2147 get_random_bytes(&miniseed3
, sizeof(miniseed3
));
2148 miniseed3
&= 0x0fff;
2151 miniseed3_reversed
=
2152 ((miniseed3
& 0xF00) >> 8) |
2153 (miniseed3
& 0x0F0) |
2154 ((miniseed3
& 0x00F) << 8);
2156 combinedSeed
= ((miniseed1
^ miniseed2_reversed
) << 12) |
2157 (miniseed2
^ miniseed3_reversed
);
2159 /* Seeds can not be zero */
2160 if ((combinedSeed
& NVREG_BKOFFCTRL_SEED_MASK
) == 0)
2161 combinedSeed
|= 0x08;
2162 if ((combinedSeed
& (NVREG_BKOFFCTRL_SEED_MASK
<< NVREG_BKOFFCTRL_GEAR
)) == 0)
2163 combinedSeed
|= 0x8000;
2165 /* No need to disable tx here */
2166 temp
= NVREG_BKOFFCTRL_DEFAULT
| (0 << NVREG_BKOFFCTRL_SELECT
);
2167 temp
|= combinedSeed
& NVREG_BKOFFCTRL_SEED_MASK
;
2168 temp
|= combinedSeed
>> NVREG_BKOFFCTRL_GEAR
;
2169 writel(temp
, base
+ NvRegBackOffControl
);
2171 /* Setup seeds for all gear LFSRs. */
2172 get_random_bytes(&seedset
, sizeof(seedset
));
2173 seedset
= seedset
% BACKOFF_SEEDSET_ROWS
;
2174 for (i
= 1; i
<= BACKOFF_SEEDSET_LFSRS
; i
++) {
2175 temp
= NVREG_BKOFFCTRL_DEFAULT
| (i
<< NVREG_BKOFFCTRL_SELECT
);
2176 temp
|= main_seedset
[seedset
][i
-1] & 0x3ff;
2177 temp
|= ((gear_seedset
[seedset
][i
-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR
);
2178 writel(temp
, base
+ NvRegBackOffControl
);
2183 * nv_start_xmit: dev->hard_start_xmit function
2184 * Called with netif_tx_lock held.
2186 static netdev_tx_t
nv_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2188 struct fe_priv
*np
= netdev_priv(dev
);
2190 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
2191 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
2195 u32 size
= skb_headlen(skb
);
2196 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2198 struct ring_desc
*put_tx
;
2199 struct ring_desc
*start_tx
;
2200 struct ring_desc
*prev_tx
;
2201 struct nv_skb_map
*prev_tx_ctx
;
2202 struct nv_skb_map
*tmp_tx_ctx
= NULL
, *start_tx_ctx
= NULL
;
2203 unsigned long flags
;
2205 /* add fragments to entries count */
2206 for (i
= 0; i
< fragments
; i
++) {
2207 u32 frag_size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
2209 entries
+= (frag_size
>> NV_TX2_TSO_MAX_SHIFT
) +
2210 ((frag_size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2213 spin_lock_irqsave(&np
->lock
, flags
);
2214 empty_slots
= nv_get_empty_tx_slots(np
);
2215 if (unlikely(empty_slots
<= entries
)) {
2216 netif_stop_queue(dev
);
2218 spin_unlock_irqrestore(&np
->lock
, flags
);
2219 return NETDEV_TX_BUSY
;
2221 spin_unlock_irqrestore(&np
->lock
, flags
);
2223 start_tx
= put_tx
= np
->put_tx
.orig
;
2225 /* setup the header buffer */
2228 prev_tx_ctx
= np
->put_tx_ctx
;
2229 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2230 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
2232 if (pci_dma_mapping_error(np
->pci_dev
,
2233 np
->put_tx_ctx
->dma
)) {
2234 /* on DMA mapping error - drop the packet */
2236 u64_stats_update_begin(&np
->swstats_tx_syncp
);
2237 np
->stat_tx_dropped
++;
2238 u64_stats_update_end(&np
->swstats_tx_syncp
);
2239 return NETDEV_TX_OK
;
2241 np
->put_tx_ctx
->dma_len
= bcnt
;
2242 np
->put_tx_ctx
->dma_single
= 1;
2243 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
2244 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2246 tx_flags
= np
->tx_flags
;
2249 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
2250 put_tx
= np
->first_tx
.orig
;
2251 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2252 np
->put_tx_ctx
= np
->first_tx_ctx
;
2255 /* setup the fragments */
2256 for (i
= 0; i
< fragments
; i
++) {
2257 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2258 u32 frag_size
= skb_frag_size(frag
);
2263 prev_tx_ctx
= np
->put_tx_ctx
;
2265 start_tx_ctx
= tmp_tx_ctx
= np
->put_tx_ctx
;
2267 bcnt
= (frag_size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: frag_size
;
2268 np
->put_tx_ctx
->dma
= skb_frag_dma_map(
2273 if (dma_mapping_error(&np
->pci_dev
->dev
, np
->put_tx_ctx
->dma
)) {
2275 /* Unwind the mapped fragments */
2277 nv_unmap_txskb(np
, start_tx_ctx
);
2278 if (unlikely(tmp_tx_ctx
++ == np
->last_tx_ctx
))
2279 tmp_tx_ctx
= np
->first_tx_ctx
;
2280 } while (tmp_tx_ctx
!= np
->put_tx_ctx
);
2282 np
->put_tx_ctx
= start_tx_ctx
;
2283 u64_stats_update_begin(&np
->swstats_tx_syncp
);
2284 np
->stat_tx_dropped
++;
2285 u64_stats_update_end(&np
->swstats_tx_syncp
);
2286 return NETDEV_TX_OK
;
2289 np
->put_tx_ctx
->dma_len
= bcnt
;
2290 np
->put_tx_ctx
->dma_single
= 0;
2291 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
2292 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2296 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
2297 put_tx
= np
->first_tx
.orig
;
2298 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2299 np
->put_tx_ctx
= np
->first_tx_ctx
;
2300 } while (frag_size
);
2303 /* set last fragment flag */
2304 prev_tx
->flaglen
|= cpu_to_le32(tx_flags_extra
);
2306 /* save skb in this slot's context area */
2307 prev_tx_ctx
->skb
= skb
;
2309 if (skb_is_gso(skb
))
2310 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
2312 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
2313 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
2315 spin_lock_irqsave(&np
->lock
, flags
);
2318 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2320 netdev_sent_queue(np
->dev
, skb
->len
);
2322 skb_tx_timestamp(skb
);
2324 np
->put_tx
.orig
= put_tx
;
2326 spin_unlock_irqrestore(&np
->lock
, flags
);
2328 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2329 return NETDEV_TX_OK
;
2332 static netdev_tx_t
nv_start_xmit_optimized(struct sk_buff
*skb
,
2333 struct net_device
*dev
)
2335 struct fe_priv
*np
= netdev_priv(dev
);
2338 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
2342 u32 size
= skb_headlen(skb
);
2343 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2345 struct ring_desc_ex
*put_tx
;
2346 struct ring_desc_ex
*start_tx
;
2347 struct ring_desc_ex
*prev_tx
;
2348 struct nv_skb_map
*prev_tx_ctx
;
2349 struct nv_skb_map
*start_tx_ctx
= NULL
;
2350 struct nv_skb_map
*tmp_tx_ctx
= NULL
;
2351 unsigned long flags
;
2353 /* add fragments to entries count */
2354 for (i
= 0; i
< fragments
; i
++) {
2355 u32 frag_size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
2357 entries
+= (frag_size
>> NV_TX2_TSO_MAX_SHIFT
) +
2358 ((frag_size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2361 spin_lock_irqsave(&np
->lock
, flags
);
2362 empty_slots
= nv_get_empty_tx_slots(np
);
2363 if (unlikely(empty_slots
<= entries
)) {
2364 netif_stop_queue(dev
);
2366 spin_unlock_irqrestore(&np
->lock
, flags
);
2367 return NETDEV_TX_BUSY
;
2369 spin_unlock_irqrestore(&np
->lock
, flags
);
2371 start_tx
= put_tx
= np
->put_tx
.ex
;
2372 start_tx_ctx
= np
->put_tx_ctx
;
2374 /* setup the header buffer */
2377 prev_tx_ctx
= np
->put_tx_ctx
;
2378 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2379 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
2381 if (pci_dma_mapping_error(np
->pci_dev
,
2382 np
->put_tx_ctx
->dma
)) {
2383 /* on DMA mapping error - drop the packet */
2385 u64_stats_update_begin(&np
->swstats_tx_syncp
);
2386 np
->stat_tx_dropped
++;
2387 u64_stats_update_end(&np
->swstats_tx_syncp
);
2388 return NETDEV_TX_OK
;
2390 np
->put_tx_ctx
->dma_len
= bcnt
;
2391 np
->put_tx_ctx
->dma_single
= 1;
2392 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
2393 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
2394 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2396 tx_flags
= NV_TX2_VALID
;
2399 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
2400 put_tx
= np
->first_tx
.ex
;
2401 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2402 np
->put_tx_ctx
= np
->first_tx_ctx
;
2405 /* setup the fragments */
2406 for (i
= 0; i
< fragments
; i
++) {
2407 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2408 u32 frag_size
= skb_frag_size(frag
);
2413 prev_tx_ctx
= np
->put_tx_ctx
;
2414 bcnt
= (frag_size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: frag_size
;
2416 start_tx_ctx
= tmp_tx_ctx
= np
->put_tx_ctx
;
2417 np
->put_tx_ctx
->dma
= skb_frag_dma_map(
2423 if (dma_mapping_error(&np
->pci_dev
->dev
, np
->put_tx_ctx
->dma
)) {
2425 /* Unwind the mapped fragments */
2427 nv_unmap_txskb(np
, start_tx_ctx
);
2428 if (unlikely(tmp_tx_ctx
++ == np
->last_tx_ctx
))
2429 tmp_tx_ctx
= np
->first_tx_ctx
;
2430 } while (tmp_tx_ctx
!= np
->put_tx_ctx
);
2432 np
->put_tx_ctx
= start_tx_ctx
;
2433 u64_stats_update_begin(&np
->swstats_tx_syncp
);
2434 np
->stat_tx_dropped
++;
2435 u64_stats_update_end(&np
->swstats_tx_syncp
);
2436 return NETDEV_TX_OK
;
2438 np
->put_tx_ctx
->dma_len
= bcnt
;
2439 np
->put_tx_ctx
->dma_single
= 0;
2440 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
2441 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
2442 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2446 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
2447 put_tx
= np
->first_tx
.ex
;
2448 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2449 np
->put_tx_ctx
= np
->first_tx_ctx
;
2450 } while (frag_size
);
2453 /* set last fragment flag */
2454 prev_tx
->flaglen
|= cpu_to_le32(NV_TX2_LASTPACKET
);
2456 /* save skb in this slot's context area */
2457 prev_tx_ctx
->skb
= skb
;
2459 if (skb_is_gso(skb
))
2460 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
2462 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
2463 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
2466 if (vlan_tx_tag_present(skb
))
2467 start_tx
->txvlan
= cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT
|
2468 vlan_tx_tag_get(skb
));
2470 start_tx
->txvlan
= 0;
2472 spin_lock_irqsave(&np
->lock
, flags
);
2475 /* Limit the number of outstanding tx. Setup all fragments, but
2476 * do not set the VALID bit on the first descriptor. Save a pointer
2477 * to that descriptor and also for next skb_map element.
2480 if (np
->tx_pkts_in_progress
== NV_TX_LIMIT_COUNT
) {
2481 if (!np
->tx_change_owner
)
2482 np
->tx_change_owner
= start_tx_ctx
;
2484 /* remove VALID bit */
2485 tx_flags
&= ~NV_TX2_VALID
;
2486 start_tx_ctx
->first_tx_desc
= start_tx
;
2487 start_tx_ctx
->next_tx_ctx
= np
->put_tx_ctx
;
2488 np
->tx_end_flip
= np
->put_tx_ctx
;
2490 np
->tx_pkts_in_progress
++;
2495 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2497 netdev_sent_queue(np
->dev
, skb
->len
);
2499 skb_tx_timestamp(skb
);
2501 np
->put_tx
.ex
= put_tx
;
2503 spin_unlock_irqrestore(&np
->lock
, flags
);
2505 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2506 return NETDEV_TX_OK
;
2509 static inline void nv_tx_flip_ownership(struct net_device
*dev
)
2511 struct fe_priv
*np
= netdev_priv(dev
);
2513 np
->tx_pkts_in_progress
--;
2514 if (np
->tx_change_owner
) {
2515 np
->tx_change_owner
->first_tx_desc
->flaglen
|=
2516 cpu_to_le32(NV_TX2_VALID
);
2517 np
->tx_pkts_in_progress
++;
2519 np
->tx_change_owner
= np
->tx_change_owner
->next_tx_ctx
;
2520 if (np
->tx_change_owner
== np
->tx_end_flip
)
2521 np
->tx_change_owner
= NULL
;
2523 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2528 * nv_tx_done: check for completed packets, release the skbs.
2530 * Caller must own np->lock.
2532 static int nv_tx_done(struct net_device
*dev
, int limit
)
2534 struct fe_priv
*np
= netdev_priv(dev
);
2537 struct ring_desc
*orig_get_tx
= np
->get_tx
.orig
;
2538 unsigned int bytes_compl
= 0;
2540 while ((np
->get_tx
.orig
!= np
->put_tx
.orig
) &&
2541 !((flags
= le32_to_cpu(np
->get_tx
.orig
->flaglen
)) & NV_TX_VALID
) &&
2542 (tx_work
< limit
)) {
2544 nv_unmap_txskb(np
, np
->get_tx_ctx
);
2546 if (np
->desc_ver
== DESC_VER_1
) {
2547 if (flags
& NV_TX_LASTPACKET
) {
2548 if (flags
& NV_TX_ERROR
) {
2549 if ((flags
& NV_TX_RETRYERROR
)
2550 && !(flags
& NV_TX_RETRYCOUNT_MASK
))
2551 nv_legacybackoff_reseed(dev
);
2553 u64_stats_update_begin(&np
->swstats_tx_syncp
);
2554 np
->stat_tx_packets
++;
2555 np
->stat_tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2556 u64_stats_update_end(&np
->swstats_tx_syncp
);
2558 bytes_compl
+= np
->get_tx_ctx
->skb
->len
;
2559 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2560 np
->get_tx_ctx
->skb
= NULL
;
2564 if (flags
& NV_TX2_LASTPACKET
) {
2565 if (flags
& NV_TX2_ERROR
) {
2566 if ((flags
& NV_TX2_RETRYERROR
)
2567 && !(flags
& NV_TX2_RETRYCOUNT_MASK
))
2568 nv_legacybackoff_reseed(dev
);
2570 u64_stats_update_begin(&np
->swstats_tx_syncp
);
2571 np
->stat_tx_packets
++;
2572 np
->stat_tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2573 u64_stats_update_end(&np
->swstats_tx_syncp
);
2575 bytes_compl
+= np
->get_tx_ctx
->skb
->len
;
2576 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2577 np
->get_tx_ctx
->skb
= NULL
;
2581 if (unlikely(np
->get_tx
.orig
++ == np
->last_tx
.orig
))
2582 np
->get_tx
.orig
= np
->first_tx
.orig
;
2583 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2584 np
->get_tx_ctx
= np
->first_tx_ctx
;
2587 netdev_completed_queue(np
->dev
, tx_work
, bytes_compl
);
2589 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.orig
!= orig_get_tx
))) {
2591 netif_wake_queue(dev
);
2596 static int nv_tx_done_optimized(struct net_device
*dev
, int limit
)
2598 struct fe_priv
*np
= netdev_priv(dev
);
2601 struct ring_desc_ex
*orig_get_tx
= np
->get_tx
.ex
;
2602 unsigned long bytes_cleaned
= 0;
2604 while ((np
->get_tx
.ex
!= np
->put_tx
.ex
) &&
2605 !((flags
= le32_to_cpu(np
->get_tx
.ex
->flaglen
)) & NV_TX2_VALID
) &&
2606 (tx_work
< limit
)) {
2608 nv_unmap_txskb(np
, np
->get_tx_ctx
);
2610 if (flags
& NV_TX2_LASTPACKET
) {
2611 if (flags
& NV_TX2_ERROR
) {
2612 if ((flags
& NV_TX2_RETRYERROR
)
2613 && !(flags
& NV_TX2_RETRYCOUNT_MASK
)) {
2614 if (np
->driver_data
& DEV_HAS_GEAR_MODE
)
2615 nv_gear_backoff_reseed(dev
);
2617 nv_legacybackoff_reseed(dev
);
2620 u64_stats_update_begin(&np
->swstats_tx_syncp
);
2621 np
->stat_tx_packets
++;
2622 np
->stat_tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2623 u64_stats_update_end(&np
->swstats_tx_syncp
);
2626 bytes_cleaned
+= np
->get_tx_ctx
->skb
->len
;
2627 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2628 np
->get_tx_ctx
->skb
= NULL
;
2632 nv_tx_flip_ownership(dev
);
2635 if (unlikely(np
->get_tx
.ex
++ == np
->last_tx
.ex
))
2636 np
->get_tx
.ex
= np
->first_tx
.ex
;
2637 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2638 np
->get_tx_ctx
= np
->first_tx_ctx
;
2641 netdev_completed_queue(np
->dev
, tx_work
, bytes_cleaned
);
2643 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.ex
!= orig_get_tx
))) {
2645 netif_wake_queue(dev
);
2651 * nv_tx_timeout: dev->tx_timeout function
2652 * Called with netif_tx_lock held.
2654 static void nv_tx_timeout(struct net_device
*dev
)
2656 struct fe_priv
*np
= netdev_priv(dev
);
2657 u8 __iomem
*base
= get_hwbase(dev
);
2659 union ring_type put_tx
;
2662 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
2663 status
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
2665 status
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
2667 netdev_warn(dev
, "Got tx_timeout. irq status: %08x\n", status
);
2669 if (unlikely(debug_tx_timeout
)) {
2672 netdev_info(dev
, "Ring at %lx\n", (unsigned long)np
->ring_addr
);
2673 netdev_info(dev
, "Dumping tx registers\n");
2674 for (i
= 0; i
<= np
->register_size
; i
+= 32) {
2676 "%3x: %08x %08x %08x %08x "
2677 "%08x %08x %08x %08x\n",
2679 readl(base
+ i
+ 0), readl(base
+ i
+ 4),
2680 readl(base
+ i
+ 8), readl(base
+ i
+ 12),
2681 readl(base
+ i
+ 16), readl(base
+ i
+ 20),
2682 readl(base
+ i
+ 24), readl(base
+ i
+ 28));
2684 netdev_info(dev
, "Dumping tx ring\n");
2685 for (i
= 0; i
< np
->tx_ring_size
; i
+= 4) {
2686 if (!nv_optimized(np
)) {
2688 "%03x: %08x %08x // %08x %08x "
2689 "// %08x %08x // %08x %08x\n",
2691 le32_to_cpu(np
->tx_ring
.orig
[i
].buf
),
2692 le32_to_cpu(np
->tx_ring
.orig
[i
].flaglen
),
2693 le32_to_cpu(np
->tx_ring
.orig
[i
+1].buf
),
2694 le32_to_cpu(np
->tx_ring
.orig
[i
+1].flaglen
),
2695 le32_to_cpu(np
->tx_ring
.orig
[i
+2].buf
),
2696 le32_to_cpu(np
->tx_ring
.orig
[i
+2].flaglen
),
2697 le32_to_cpu(np
->tx_ring
.orig
[i
+3].buf
),
2698 le32_to_cpu(np
->tx_ring
.orig
[i
+3].flaglen
));
2701 "%03x: %08x %08x %08x "
2702 "// %08x %08x %08x "
2703 "// %08x %08x %08x "
2704 "// %08x %08x %08x\n",
2706 le32_to_cpu(np
->tx_ring
.ex
[i
].bufhigh
),
2707 le32_to_cpu(np
->tx_ring
.ex
[i
].buflow
),
2708 le32_to_cpu(np
->tx_ring
.ex
[i
].flaglen
),
2709 le32_to_cpu(np
->tx_ring
.ex
[i
+1].bufhigh
),
2710 le32_to_cpu(np
->tx_ring
.ex
[i
+1].buflow
),
2711 le32_to_cpu(np
->tx_ring
.ex
[i
+1].flaglen
),
2712 le32_to_cpu(np
->tx_ring
.ex
[i
+2].bufhigh
),
2713 le32_to_cpu(np
->tx_ring
.ex
[i
+2].buflow
),
2714 le32_to_cpu(np
->tx_ring
.ex
[i
+2].flaglen
),
2715 le32_to_cpu(np
->tx_ring
.ex
[i
+3].bufhigh
),
2716 le32_to_cpu(np
->tx_ring
.ex
[i
+3].buflow
),
2717 le32_to_cpu(np
->tx_ring
.ex
[i
+3].flaglen
));
2722 spin_lock_irq(&np
->lock
);
2724 /* 1) stop tx engine */
2727 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2728 saved_tx_limit
= np
->tx_limit
;
2729 np
->tx_limit
= 0; /* prevent giving HW any limited pkts */
2730 np
->tx_stop
= 0; /* prevent waking tx queue */
2731 if (!nv_optimized(np
))
2732 nv_tx_done(dev
, np
->tx_ring_size
);
2734 nv_tx_done_optimized(dev
, np
->tx_ring_size
);
2736 /* save current HW position */
2737 if (np
->tx_change_owner
)
2738 put_tx
.ex
= np
->tx_change_owner
->first_tx_desc
;
2740 put_tx
= np
->put_tx
;
2742 /* 3) clear all tx state */
2746 /* 4) restore state to current HW position */
2747 np
->get_tx
= np
->put_tx
= put_tx
;
2748 np
->tx_limit
= saved_tx_limit
;
2750 /* 5) restart tx engine */
2752 netif_wake_queue(dev
);
2753 spin_unlock_irq(&np
->lock
);
2757 * Called when the nic notices a mismatch between the actual data len on the
2758 * wire and the len indicated in the 802 header
2760 static int nv_getlen(struct net_device
*dev
, void *packet
, int datalen
)
2762 int hdrlen
; /* length of the 802 header */
2763 int protolen
; /* length as stored in the proto field */
2765 /* 1) calculate len according to header */
2766 if (((struct vlan_ethhdr
*)packet
)->h_vlan_proto
== htons(ETH_P_8021Q
)) {
2767 protolen
= ntohs(((struct vlan_ethhdr
*)packet
)->h_vlan_encapsulated_proto
);
2770 protolen
= ntohs(((struct ethhdr
*)packet
)->h_proto
);
2773 if (protolen
> ETH_DATA_LEN
)
2774 return datalen
; /* Value in proto field not a len, no checks possible */
2777 /* consistency checks: */
2778 if (datalen
> ETH_ZLEN
) {
2779 if (datalen
>= protolen
) {
2780 /* more data on wire than in 802 header, trim of
2785 /* less data on wire than mentioned in header.
2786 * Discard the packet.
2791 /* short packet. Accept only if 802 values are also short */
2792 if (protolen
> ETH_ZLEN
) {
2799 static int nv_rx_process(struct net_device
*dev
, int limit
)
2801 struct fe_priv
*np
= netdev_priv(dev
);
2804 struct sk_buff
*skb
;
2807 while ((np
->get_rx
.orig
!= np
->put_rx
.orig
) &&
2808 !((flags
= le32_to_cpu(np
->get_rx
.orig
->flaglen
)) & NV_RX_AVAIL
) &&
2809 (rx_work
< limit
)) {
2812 * the packet is for us - immediately tear down the pci mapping.
2813 * TODO: check if a prefetch of the first cacheline improves
2816 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2817 np
->get_rx_ctx
->dma_len
,
2818 PCI_DMA_FROMDEVICE
);
2819 skb
= np
->get_rx_ctx
->skb
;
2820 np
->get_rx_ctx
->skb
= NULL
;
2822 /* look at what we actually got: */
2823 if (np
->desc_ver
== DESC_VER_1
) {
2824 if (likely(flags
& NV_RX_DESCRIPTORVALID
)) {
2825 len
= flags
& LEN_MASK_V1
;
2826 if (unlikely(flags
& NV_RX_ERROR
)) {
2827 if ((flags
& NV_RX_ERROR_MASK
) == NV_RX_ERROR4
) {
2828 len
= nv_getlen(dev
, skb
->data
, len
);
2834 /* framing errors are soft errors */
2835 else if ((flags
& NV_RX_ERROR_MASK
) == NV_RX_FRAMINGERR
) {
2836 if (flags
& NV_RX_SUBSTRACT1
)
2839 /* the rest are hard errors */
2841 if (flags
& NV_RX_MISSEDFRAME
) {
2842 u64_stats_update_begin(&np
->swstats_rx_syncp
);
2843 np
->stat_rx_missed_errors
++;
2844 u64_stats_update_end(&np
->swstats_rx_syncp
);
2855 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2856 len
= flags
& LEN_MASK_V2
;
2857 if (unlikely(flags
& NV_RX2_ERROR
)) {
2858 if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_ERROR4
) {
2859 len
= nv_getlen(dev
, skb
->data
, len
);
2865 /* framing errors are soft errors */
2866 else if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_FRAMINGERR
) {
2867 if (flags
& NV_RX2_SUBSTRACT1
)
2870 /* the rest are hard errors */
2876 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2877 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2878 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2884 /* got a valid packet - forward it to the network core */
2886 skb
->protocol
= eth_type_trans(skb
, dev
);
2887 napi_gro_receive(&np
->napi
, skb
);
2888 u64_stats_update_begin(&np
->swstats_rx_syncp
);
2889 np
->stat_rx_packets
++;
2890 np
->stat_rx_bytes
+= len
;
2891 u64_stats_update_end(&np
->swstats_rx_syncp
);
2893 if (unlikely(np
->get_rx
.orig
++ == np
->last_rx
.orig
))
2894 np
->get_rx
.orig
= np
->first_rx
.orig
;
2895 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2896 np
->get_rx_ctx
= np
->first_rx_ctx
;
2904 static int nv_rx_process_optimized(struct net_device
*dev
, int limit
)
2906 struct fe_priv
*np
= netdev_priv(dev
);
2910 struct sk_buff
*skb
;
2913 while ((np
->get_rx
.ex
!= np
->put_rx
.ex
) &&
2914 !((flags
= le32_to_cpu(np
->get_rx
.ex
->flaglen
)) & NV_RX2_AVAIL
) &&
2915 (rx_work
< limit
)) {
2918 * the packet is for us - immediately tear down the pci mapping.
2919 * TODO: check if a prefetch of the first cacheline improves
2922 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2923 np
->get_rx_ctx
->dma_len
,
2924 PCI_DMA_FROMDEVICE
);
2925 skb
= np
->get_rx_ctx
->skb
;
2926 np
->get_rx_ctx
->skb
= NULL
;
2928 /* look at what we actually got: */
2929 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2930 len
= flags
& LEN_MASK_V2
;
2931 if (unlikely(flags
& NV_RX2_ERROR
)) {
2932 if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_ERROR4
) {
2933 len
= nv_getlen(dev
, skb
->data
, len
);
2939 /* framing errors are soft errors */
2940 else if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_FRAMINGERR
) {
2941 if (flags
& NV_RX2_SUBSTRACT1
)
2944 /* the rest are hard errors */
2951 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2952 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2953 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2955 /* got a valid packet - forward it to the network core */
2957 skb
->protocol
= eth_type_trans(skb
, dev
);
2958 prefetch(skb
->data
);
2960 vlanflags
= le32_to_cpu(np
->get_rx
.ex
->buflow
);
2963 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
2964 * here. Even if vlan rx accel is disabled,
2965 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2967 if (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
2968 vlanflags
& NV_RX3_VLAN_TAG_PRESENT
) {
2969 u16 vid
= vlanflags
& NV_RX3_VLAN_TAG_MASK
;
2971 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
2973 napi_gro_receive(&np
->napi
, skb
);
2974 u64_stats_update_begin(&np
->swstats_rx_syncp
);
2975 np
->stat_rx_packets
++;
2976 np
->stat_rx_bytes
+= len
;
2977 u64_stats_update_end(&np
->swstats_rx_syncp
);
2982 if (unlikely(np
->get_rx
.ex
++ == np
->last_rx
.ex
))
2983 np
->get_rx
.ex
= np
->first_rx
.ex
;
2984 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2985 np
->get_rx_ctx
= np
->first_rx_ctx
;
2993 static void set_bufsize(struct net_device
*dev
)
2995 struct fe_priv
*np
= netdev_priv(dev
);
2997 if (dev
->mtu
<= ETH_DATA_LEN
)
2998 np
->rx_buf_sz
= ETH_DATA_LEN
+ NV_RX_HEADERS
;
3000 np
->rx_buf_sz
= dev
->mtu
+ NV_RX_HEADERS
;
3004 * nv_change_mtu: dev->change_mtu function
3005 * Called with dev_base_lock held for read.
3007 static int nv_change_mtu(struct net_device
*dev
, int new_mtu
)
3009 struct fe_priv
*np
= netdev_priv(dev
);
3012 if (new_mtu
< 64 || new_mtu
> np
->pkt_limit
)
3018 /* return early if the buffer sizes will not change */
3019 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
3021 if (old_mtu
== new_mtu
)
3024 /* synchronized against open : rtnl_lock() held by caller */
3025 if (netif_running(dev
)) {
3026 u8 __iomem
*base
= get_hwbase(dev
);
3028 * It seems that the nic preloads valid ring entries into an
3029 * internal buffer. The procedure for flushing everything is
3030 * guessed, there is probably a simpler approach.
3031 * Changing the MTU is a rare event, it shouldn't matter.
3033 nv_disable_irq(dev
);
3034 nv_napi_disable(dev
);
3035 netif_tx_lock_bh(dev
);
3036 netif_addr_lock(dev
);
3037 spin_lock(&np
->lock
);
3041 /* drain rx queue */
3043 /* reinit driver view of the rx queue */
3045 if (nv_init_ring(dev
)) {
3046 if (!np
->in_shutdown
)
3047 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3049 /* reinit nic view of the rx queue */
3050 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
3051 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
3052 writel(((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
3053 base
+ NvRegRingSizes
);
3055 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
3058 /* restart rx engine */
3060 spin_unlock(&np
->lock
);
3061 netif_addr_unlock(dev
);
3062 netif_tx_unlock_bh(dev
);
3063 nv_napi_enable(dev
);
3069 static void nv_copy_mac_to_hw(struct net_device
*dev
)
3071 u8 __iomem
*base
= get_hwbase(dev
);
3074 mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
3075 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
3076 mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
3078 writel(mac
[0], base
+ NvRegMacAddrA
);
3079 writel(mac
[1], base
+ NvRegMacAddrB
);
3083 * nv_set_mac_address: dev->set_mac_address function
3084 * Called with rtnl_lock() held.
3086 static int nv_set_mac_address(struct net_device
*dev
, void *addr
)
3088 struct fe_priv
*np
= netdev_priv(dev
);
3089 struct sockaddr
*macaddr
= (struct sockaddr
*)addr
;
3091 if (!is_valid_ether_addr(macaddr
->sa_data
))
3092 return -EADDRNOTAVAIL
;
3094 /* synchronized against open : rtnl_lock() held by caller */
3095 memcpy(dev
->dev_addr
, macaddr
->sa_data
, ETH_ALEN
);
3097 if (netif_running(dev
)) {
3098 netif_tx_lock_bh(dev
);
3099 netif_addr_lock(dev
);
3100 spin_lock_irq(&np
->lock
);
3102 /* stop rx engine */
3105 /* set mac address */
3106 nv_copy_mac_to_hw(dev
);
3108 /* restart rx engine */
3110 spin_unlock_irq(&np
->lock
);
3111 netif_addr_unlock(dev
);
3112 netif_tx_unlock_bh(dev
);
3114 nv_copy_mac_to_hw(dev
);
3120 * nv_set_multicast: dev->set_multicast function
3121 * Called with netif_tx_lock held.
3123 static void nv_set_multicast(struct net_device
*dev
)
3125 struct fe_priv
*np
= netdev_priv(dev
);
3126 u8 __iomem
*base
= get_hwbase(dev
);
3129 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & NVREG_PFF_PAUSE_RX
;
3131 memset(addr
, 0, sizeof(addr
));
3132 memset(mask
, 0, sizeof(mask
));
3134 if (dev
->flags
& IFF_PROMISC
) {
3135 pff
|= NVREG_PFF_PROMISC
;
3137 pff
|= NVREG_PFF_MYADDR
;
3139 if (dev
->flags
& IFF_ALLMULTI
|| !netdev_mc_empty(dev
)) {
3143 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0xffffffff;
3144 if (dev
->flags
& IFF_ALLMULTI
) {
3145 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0;
3147 struct netdev_hw_addr
*ha
;
3149 netdev_for_each_mc_addr(ha
, dev
) {
3150 unsigned char *hw_addr
= ha
->addr
;
3153 a
= le32_to_cpu(*(__le32
*) hw_addr
);
3154 b
= le16_to_cpu(*(__le16
*) (&hw_addr
[4]));
3161 addr
[0] = alwaysOn
[0];
3162 addr
[1] = alwaysOn
[1];
3163 mask
[0] = alwaysOn
[0] | alwaysOff
[0];
3164 mask
[1] = alwaysOn
[1] | alwaysOff
[1];
3166 mask
[0] = NVREG_MCASTMASKA_NONE
;
3167 mask
[1] = NVREG_MCASTMASKB_NONE
;
3170 addr
[0] |= NVREG_MCASTADDRA_FORCE
;
3171 pff
|= NVREG_PFF_ALWAYS
;
3172 spin_lock_irq(&np
->lock
);
3174 writel(addr
[0], base
+ NvRegMulticastAddrA
);
3175 writel(addr
[1], base
+ NvRegMulticastAddrB
);
3176 writel(mask
[0], base
+ NvRegMulticastMaskA
);
3177 writel(mask
[1], base
+ NvRegMulticastMaskB
);
3178 writel(pff
, base
+ NvRegPacketFilterFlags
);
3180 spin_unlock_irq(&np
->lock
);
3183 static void nv_update_pause(struct net_device
*dev
, u32 pause_flags
)
3185 struct fe_priv
*np
= netdev_priv(dev
);
3186 u8 __iomem
*base
= get_hwbase(dev
);
3188 np
->pause_flags
&= ~(NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
);
3190 if (np
->pause_flags
& NV_PAUSEFRAME_RX_CAPABLE
) {
3191 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & ~NVREG_PFF_PAUSE_RX
;
3192 if (pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) {
3193 writel(pff
|NVREG_PFF_PAUSE_RX
, base
+ NvRegPacketFilterFlags
);
3194 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3196 writel(pff
, base
+ NvRegPacketFilterFlags
);
3199 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
) {
3200 u32 regmisc
= readl(base
+ NvRegMisc1
) & ~NVREG_MISC1_PAUSE_TX
;
3201 if (pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) {
3202 u32 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V1
;
3203 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
)
3204 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V2
;
3205 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
) {
3206 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V3
;
3207 /* limit the number of tx pause frames to a default of 8 */
3208 writel(readl(base
+ NvRegTxPauseFrameLimit
)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE
, base
+ NvRegTxPauseFrameLimit
);
3210 writel(pause_enable
, base
+ NvRegTxPauseFrame
);
3211 writel(regmisc
|NVREG_MISC1_PAUSE_TX
, base
+ NvRegMisc1
);
3212 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3214 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
3215 writel(regmisc
, base
+ NvRegMisc1
);
3220 static void nv_force_linkspeed(struct net_device
*dev
, int speed
, int duplex
)
3222 struct fe_priv
*np
= netdev_priv(dev
);
3223 u8 __iomem
*base
= get_hwbase(dev
);
3227 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|speed
;
3228 np
->duplex
= duplex
;
3230 /* see if gigabit phy */
3231 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
3232 if (mii_status
& PHY_GIGABIT
) {
3233 np
->gigabit
= PHY_GIGABIT
;
3234 phyreg
= readl(base
+ NvRegSlotTime
);
3235 phyreg
&= ~(0x3FF00);
3236 if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
)
3237 phyreg
|= NVREG_SLOTTIME_10_100_FULL
;
3238 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
)
3239 phyreg
|= NVREG_SLOTTIME_10_100_FULL
;
3240 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
3241 phyreg
|= NVREG_SLOTTIME_1000_FULL
;
3242 writel(phyreg
, base
+ NvRegSlotTime
);
3245 phyreg
= readl(base
+ NvRegPhyInterface
);
3246 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
3247 if (np
->duplex
== 0)
3249 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
3251 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) ==
3252 NVREG_LINKSPEED_1000
)
3254 writel(phyreg
, base
+ NvRegPhyInterface
);
3256 if (phyreg
& PHY_RGMII
) {
3257 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) ==
3258 NVREG_LINKSPEED_1000
)
3259 txreg
= NVREG_TX_DEFERRAL_RGMII_1000
;
3261 txreg
= NVREG_TX_DEFERRAL_RGMII_10_100
;
3263 txreg
= NVREG_TX_DEFERRAL_DEFAULT
;
3265 writel(txreg
, base
+ NvRegTxDeferral
);
3267 if (np
->desc_ver
== DESC_VER_1
) {
3268 txreg
= NVREG_TX_WM_DESC1_DEFAULT
;
3270 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) ==
3271 NVREG_LINKSPEED_1000
)
3272 txreg
= NVREG_TX_WM_DESC2_3_1000
;
3274 txreg
= NVREG_TX_WM_DESC2_3_DEFAULT
;
3276 writel(txreg
, base
+ NvRegTxWatermark
);
3278 writel(NVREG_MISC1_FORCE
| (np
->duplex
? 0 : NVREG_MISC1_HD
),
3281 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
3288 * nv_update_linkspeed - Setup the MAC according to the link partner
3289 * @dev: Network device to be configured
3291 * The function queries the PHY and checks if there is a link partner.
3292 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3293 * set to 10 MBit HD.
3295 * The function returns 0 if there is no link partner and 1 if there is
3296 * a good link partner.
3298 static int nv_update_linkspeed(struct net_device
*dev
)
3300 struct fe_priv
*np
= netdev_priv(dev
);
3301 u8 __iomem
*base
= get_hwbase(dev
);
3304 int adv_lpa
, adv_pause
, lpa_pause
;
3305 int newls
= np
->linkspeed
;
3306 int newdup
= np
->duplex
;
3310 u32 control_1000
, status_1000
, phyreg
, pause_flags
, txreg
;
3314 /* If device loopback is enabled, set carrier on and enable max link
3317 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
3318 if (bmcr
& BMCR_LOOPBACK
) {
3319 if (netif_running(dev
)) {
3320 nv_force_linkspeed(dev
, NVREG_LINKSPEED_1000
, 1);
3321 if (!netif_carrier_ok(dev
))
3322 netif_carrier_on(dev
);
3327 /* BMSR_LSTATUS is latched, read it twice:
3328 * we want the current value.
3330 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
3331 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
3333 if (!(mii_status
& BMSR_LSTATUS
)) {
3334 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3340 if (np
->autoneg
== 0) {
3341 if (np
->fixed_mode
& LPA_100FULL
) {
3342 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3344 } else if (np
->fixed_mode
& LPA_100HALF
) {
3345 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3347 } else if (np
->fixed_mode
& LPA_10FULL
) {
3348 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3351 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3357 /* check auto negotiation is complete */
3358 if (!(mii_status
& BMSR_ANEGCOMPLETE
)) {
3359 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3360 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3366 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3367 lpa
= mii_rw(dev
, np
->phyaddr
, MII_LPA
, MII_READ
);
3370 if (np
->gigabit
== PHY_GIGABIT
) {
3371 control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3372 status_1000
= mii_rw(dev
, np
->phyaddr
, MII_STAT1000
, MII_READ
);
3374 if ((control_1000
& ADVERTISE_1000FULL
) &&
3375 (status_1000
& LPA_1000FULL
)) {
3376 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_1000
;
3382 /* FIXME: handle parallel detection properly */
3383 adv_lpa
= lpa
& adv
;
3384 if (adv_lpa
& LPA_100FULL
) {
3385 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3387 } else if (adv_lpa
& LPA_100HALF
) {
3388 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3390 } else if (adv_lpa
& LPA_10FULL
) {
3391 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3393 } else if (adv_lpa
& LPA_10HALF
) {
3394 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3397 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3402 if (np
->duplex
== newdup
&& np
->linkspeed
== newls
)
3405 np
->duplex
= newdup
;
3406 np
->linkspeed
= newls
;
3408 /* The transmitter and receiver must be restarted for safe update */
3409 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
) {
3410 txrxFlags
|= NV_RESTART_TX
;
3413 if (readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) {
3414 txrxFlags
|= NV_RESTART_RX
;
3418 if (np
->gigabit
== PHY_GIGABIT
) {
3419 phyreg
= readl(base
+ NvRegSlotTime
);
3420 phyreg
&= ~(0x3FF00);
3421 if (((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
) ||
3422 ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
))
3423 phyreg
|= NVREG_SLOTTIME_10_100_FULL
;
3424 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
3425 phyreg
|= NVREG_SLOTTIME_1000_FULL
;
3426 writel(phyreg
, base
+ NvRegSlotTime
);
3429 phyreg
= readl(base
+ NvRegPhyInterface
);
3430 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
3431 if (np
->duplex
== 0)
3433 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
3435 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
3437 writel(phyreg
, base
+ NvRegPhyInterface
);
3439 phy_exp
= mii_rw(dev
, np
->phyaddr
, MII_EXPANSION
, MII_READ
) & EXPANSION_NWAY
; /* autoneg capable */
3440 if (phyreg
& PHY_RGMII
) {
3441 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
) {
3442 txreg
= NVREG_TX_DEFERRAL_RGMII_1000
;
3444 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
)) {
3445 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_10
)
3446 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_10
;
3448 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_100
;
3450 txreg
= NVREG_TX_DEFERRAL_RGMII_10_100
;
3454 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
))
3455 txreg
= NVREG_TX_DEFERRAL_MII_STRETCH
;
3457 txreg
= NVREG_TX_DEFERRAL_DEFAULT
;
3459 writel(txreg
, base
+ NvRegTxDeferral
);
3461 if (np
->desc_ver
== DESC_VER_1
) {
3462 txreg
= NVREG_TX_WM_DESC1_DEFAULT
;
3464 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
3465 txreg
= NVREG_TX_WM_DESC2_3_1000
;
3467 txreg
= NVREG_TX_WM_DESC2_3_DEFAULT
;
3469 writel(txreg
, base
+ NvRegTxWatermark
);
3471 writel(NVREG_MISC1_FORCE
| (np
->duplex
? 0 : NVREG_MISC1_HD
),
3474 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
3478 /* setup pause frame */
3479 if (netif_running(dev
) && (np
->duplex
!= 0)) {
3480 if (np
->autoneg
&& np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) {
3481 adv_pause
= adv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3482 lpa_pause
= lpa
& (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
3484 switch (adv_pause
) {
3485 case ADVERTISE_PAUSE_CAP
:
3486 if (lpa_pause
& LPA_PAUSE_CAP
) {
3487 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3488 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3489 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3492 case ADVERTISE_PAUSE_ASYM
:
3493 if (lpa_pause
== (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
))
3494 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3496 case ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
:
3497 if (lpa_pause
& LPA_PAUSE_CAP
) {
3498 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3499 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3500 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3502 if (lpa_pause
== LPA_PAUSE_ASYM
)
3503 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3507 pause_flags
= np
->pause_flags
;
3510 nv_update_pause(dev
, pause_flags
);
3512 if (txrxFlags
& NV_RESTART_TX
)
3514 if (txrxFlags
& NV_RESTART_RX
)
3520 static void nv_linkchange(struct net_device
*dev
)
3522 if (nv_update_linkspeed(dev
)) {
3523 if (!netif_carrier_ok(dev
)) {
3524 netif_carrier_on(dev
);
3525 netdev_info(dev
, "link up\n");
3526 nv_txrx_gate(dev
, false);
3530 if (netif_carrier_ok(dev
)) {
3531 netif_carrier_off(dev
);
3532 netdev_info(dev
, "link down\n");
3533 nv_txrx_gate(dev
, true);
3539 static void nv_link_irq(struct net_device
*dev
)
3541 u8 __iomem
*base
= get_hwbase(dev
);
3544 miistat
= readl(base
+ NvRegMIIStatus
);
3545 writel(NVREG_MIISTAT_LINKCHANGE
, base
+ NvRegMIIStatus
);
3547 if (miistat
& (NVREG_MIISTAT_LINKCHANGE
))
3551 static void nv_msi_workaround(struct fe_priv
*np
)
3554 /* Need to toggle the msi irq mask within the ethernet device,
3555 * otherwise, future interrupts will not be detected.
3557 if (np
->msi_flags
& NV_MSI_ENABLED
) {
3558 u8 __iomem
*base
= np
->base
;
3560 writel(0, base
+ NvRegMSIIrqMask
);
3561 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
3565 static inline int nv_change_interrupt_mode(struct net_device
*dev
, int total_work
)
3567 struct fe_priv
*np
= netdev_priv(dev
);
3569 if (optimization_mode
== NV_OPTIMIZATION_MODE_DYNAMIC
) {
3570 if (total_work
> NV_DYNAMIC_THRESHOLD
) {
3571 /* transition to poll based interrupts */
3572 np
->quiet_count
= 0;
3573 if (np
->irqmask
!= NVREG_IRQMASK_CPU
) {
3574 np
->irqmask
= NVREG_IRQMASK_CPU
;
3578 if (np
->quiet_count
< NV_DYNAMIC_MAX_QUIET_COUNT
) {
3581 /* reached a period of low activity, switch
3582 to per tx/rx packet interrupts */
3583 if (np
->irqmask
!= NVREG_IRQMASK_THROUGHPUT
) {
3584 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
3593 static irqreturn_t
nv_nic_irq(int foo
, void *data
)
3595 struct net_device
*dev
= (struct net_device
*) data
;
3596 struct fe_priv
*np
= netdev_priv(dev
);
3597 u8 __iomem
*base
= get_hwbase(dev
);
3599 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3600 np
->events
= readl(base
+ NvRegIrqStatus
);
3601 writel(np
->events
, base
+ NvRegIrqStatus
);
3603 np
->events
= readl(base
+ NvRegMSIXIrqStatus
);
3604 writel(np
->events
, base
+ NvRegMSIXIrqStatus
);
3606 if (!(np
->events
& np
->irqmask
))
3609 nv_msi_workaround(np
);
3611 if (napi_schedule_prep(&np
->napi
)) {
3613 * Disable further irq's (msix not enabled with napi)
3615 writel(0, base
+ NvRegIrqMask
);
3616 __napi_schedule(&np
->napi
);
3622 /* All _optimized functions are used to help increase performance
3623 * (reduce CPU and increase throughput). They use descripter version 3,
3624 * compiler directives, and reduce memory accesses.
3626 static irqreturn_t
nv_nic_irq_optimized(int foo
, void *data
)
3628 struct net_device
*dev
= (struct net_device
*) data
;
3629 struct fe_priv
*np
= netdev_priv(dev
);
3630 u8 __iomem
*base
= get_hwbase(dev
);
3632 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3633 np
->events
= readl(base
+ NvRegIrqStatus
);
3634 writel(np
->events
, base
+ NvRegIrqStatus
);
3636 np
->events
= readl(base
+ NvRegMSIXIrqStatus
);
3637 writel(np
->events
, base
+ NvRegMSIXIrqStatus
);
3639 if (!(np
->events
& np
->irqmask
))
3642 nv_msi_workaround(np
);
3644 if (napi_schedule_prep(&np
->napi
)) {
3646 * Disable further irq's (msix not enabled with napi)
3648 writel(0, base
+ NvRegIrqMask
);
3649 __napi_schedule(&np
->napi
);
3655 static irqreturn_t
nv_nic_irq_tx(int foo
, void *data
)
3657 struct net_device
*dev
= (struct net_device
*) data
;
3658 struct fe_priv
*np
= netdev_priv(dev
);
3659 u8 __iomem
*base
= get_hwbase(dev
);
3662 unsigned long flags
;
3665 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_TX_ALL
;
3666 writel(events
, base
+ NvRegMSIXIrqStatus
);
3667 netdev_dbg(dev
, "tx irq events: %08x\n", events
);
3668 if (!(events
& np
->irqmask
))
3671 spin_lock_irqsave(&np
->lock
, flags
);
3672 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3673 spin_unlock_irqrestore(&np
->lock
, flags
);
3675 if (unlikely(i
> max_interrupt_work
)) {
3676 spin_lock_irqsave(&np
->lock
, flags
);
3677 /* disable interrupts on the nic */
3678 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegIrqMask
);
3681 if (!np
->in_shutdown
) {
3682 np
->nic_poll_irq
|= NVREG_IRQ_TX_ALL
;
3683 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3685 spin_unlock_irqrestore(&np
->lock
, flags
);
3686 netdev_dbg(dev
, "%s: too many iterations (%d)\n",
3693 return IRQ_RETVAL(i
);
3696 static int nv_napi_poll(struct napi_struct
*napi
, int budget
)
3698 struct fe_priv
*np
= container_of(napi
, struct fe_priv
, napi
);
3699 struct net_device
*dev
= np
->dev
;
3700 u8 __iomem
*base
= get_hwbase(dev
);
3701 unsigned long flags
;
3703 int rx_count
, tx_work
= 0, rx_work
= 0;
3706 if (!nv_optimized(np
)) {
3707 spin_lock_irqsave(&np
->lock
, flags
);
3708 tx_work
+= nv_tx_done(dev
, np
->tx_ring_size
);
3709 spin_unlock_irqrestore(&np
->lock
, flags
);
3711 rx_count
= nv_rx_process(dev
, budget
- rx_work
);
3712 retcode
= nv_alloc_rx(dev
);
3714 spin_lock_irqsave(&np
->lock
, flags
);
3715 tx_work
+= nv_tx_done_optimized(dev
, np
->tx_ring_size
);
3716 spin_unlock_irqrestore(&np
->lock
, flags
);
3718 rx_count
= nv_rx_process_optimized(dev
,
3720 retcode
= nv_alloc_rx_optimized(dev
);
3722 } while (retcode
== 0 &&
3723 rx_count
> 0 && (rx_work
+= rx_count
) < budget
);
3726 spin_lock_irqsave(&np
->lock
, flags
);
3727 if (!np
->in_shutdown
)
3728 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3729 spin_unlock_irqrestore(&np
->lock
, flags
);
3732 nv_change_interrupt_mode(dev
, tx_work
+ rx_work
);
3734 if (unlikely(np
->events
& NVREG_IRQ_LINK
)) {
3735 spin_lock_irqsave(&np
->lock
, flags
);
3737 spin_unlock_irqrestore(&np
->lock
, flags
);
3739 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3740 spin_lock_irqsave(&np
->lock
, flags
);
3742 spin_unlock_irqrestore(&np
->lock
, flags
);
3743 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3745 if (unlikely(np
->events
& NVREG_IRQ_RECOVER_ERROR
)) {
3746 spin_lock_irqsave(&np
->lock
, flags
);
3747 if (!np
->in_shutdown
) {
3748 np
->nic_poll_irq
= np
->irqmask
;
3749 np
->recover_error
= 1;
3750 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3752 spin_unlock_irqrestore(&np
->lock
, flags
);
3753 napi_complete(napi
);
3757 if (rx_work
< budget
) {
3758 /* re-enable interrupts
3759 (msix not enabled in napi) */
3760 napi_complete(napi
);
3762 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3767 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3769 struct net_device
*dev
= (struct net_device
*) data
;
3770 struct fe_priv
*np
= netdev_priv(dev
);
3771 u8 __iomem
*base
= get_hwbase(dev
);
3774 unsigned long flags
;
3777 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3778 writel(events
, base
+ NvRegMSIXIrqStatus
);
3779 netdev_dbg(dev
, "rx irq events: %08x\n", events
);
3780 if (!(events
& np
->irqmask
))
3783 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3784 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3785 spin_lock_irqsave(&np
->lock
, flags
);
3786 if (!np
->in_shutdown
)
3787 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3788 spin_unlock_irqrestore(&np
->lock
, flags
);
3792 if (unlikely(i
> max_interrupt_work
)) {
3793 spin_lock_irqsave(&np
->lock
, flags
);
3794 /* disable interrupts on the nic */
3795 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3798 if (!np
->in_shutdown
) {
3799 np
->nic_poll_irq
|= NVREG_IRQ_RX_ALL
;
3800 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3802 spin_unlock_irqrestore(&np
->lock
, flags
);
3803 netdev_dbg(dev
, "%s: too many iterations (%d)\n",
3809 return IRQ_RETVAL(i
);
3812 static irqreturn_t
nv_nic_irq_other(int foo
, void *data
)
3814 struct net_device
*dev
= (struct net_device
*) data
;
3815 struct fe_priv
*np
= netdev_priv(dev
);
3816 u8 __iomem
*base
= get_hwbase(dev
);
3819 unsigned long flags
;
3822 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_OTHER
;
3823 writel(events
, base
+ NvRegMSIXIrqStatus
);
3824 netdev_dbg(dev
, "irq events: %08x\n", events
);
3825 if (!(events
& np
->irqmask
))
3828 /* check tx in case we reached max loop limit in tx isr */
3829 spin_lock_irqsave(&np
->lock
, flags
);
3830 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3831 spin_unlock_irqrestore(&np
->lock
, flags
);
3833 if (events
& NVREG_IRQ_LINK
) {
3834 spin_lock_irqsave(&np
->lock
, flags
);
3836 spin_unlock_irqrestore(&np
->lock
, flags
);
3838 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
3839 spin_lock_irqsave(&np
->lock
, flags
);
3841 spin_unlock_irqrestore(&np
->lock
, flags
);
3842 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3844 if (events
& NVREG_IRQ_RECOVER_ERROR
) {
3845 spin_lock_irqsave(&np
->lock
, flags
);
3846 /* disable interrupts on the nic */
3847 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3850 if (!np
->in_shutdown
) {
3851 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3852 np
->recover_error
= 1;
3853 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3855 spin_unlock_irqrestore(&np
->lock
, flags
);
3858 if (unlikely(i
> max_interrupt_work
)) {
3859 spin_lock_irqsave(&np
->lock
, flags
);
3860 /* disable interrupts on the nic */
3861 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3864 if (!np
->in_shutdown
) {
3865 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3866 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3868 spin_unlock_irqrestore(&np
->lock
, flags
);
3869 netdev_dbg(dev
, "%s: too many iterations (%d)\n",
3876 return IRQ_RETVAL(i
);
3879 static irqreturn_t
nv_nic_irq_test(int foo
, void *data
)
3881 struct net_device
*dev
= (struct net_device
*) data
;
3882 struct fe_priv
*np
= netdev_priv(dev
);
3883 u8 __iomem
*base
= get_hwbase(dev
);
3886 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3887 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3888 writel(events
& NVREG_IRQ_TIMER
, base
+ NvRegIrqStatus
);
3890 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3891 writel(events
& NVREG_IRQ_TIMER
, base
+ NvRegMSIXIrqStatus
);
3894 if (!(events
& NVREG_IRQ_TIMER
))
3895 return IRQ_RETVAL(0);
3897 nv_msi_workaround(np
);
3899 spin_lock(&np
->lock
);
3901 spin_unlock(&np
->lock
);
3903 return IRQ_RETVAL(1);
3906 static void set_msix_vector_map(struct net_device
*dev
, u32 vector
, u32 irqmask
)
3908 u8 __iomem
*base
= get_hwbase(dev
);
3912 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3913 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3914 * the remaining 8 interrupts.
3916 for (i
= 0; i
< 8; i
++) {
3917 if ((irqmask
>> i
) & 0x1)
3918 msixmap
|= vector
<< (i
<< 2);
3920 writel(readl(base
+ NvRegMSIXMap0
) | msixmap
, base
+ NvRegMSIXMap0
);
3923 for (i
= 0; i
< 8; i
++) {
3924 if ((irqmask
>> (i
+ 8)) & 0x1)
3925 msixmap
|= vector
<< (i
<< 2);
3927 writel(readl(base
+ NvRegMSIXMap1
) | msixmap
, base
+ NvRegMSIXMap1
);
3930 static int nv_request_irq(struct net_device
*dev
, int intr_test
)
3932 struct fe_priv
*np
= get_nvpriv(dev
);
3933 u8 __iomem
*base
= get_hwbase(dev
);
3936 irqreturn_t (*handler
)(int foo
, void *data
);
3939 handler
= nv_nic_irq_test
;
3941 if (nv_optimized(np
))
3942 handler
= nv_nic_irq_optimized
;
3944 handler
= nv_nic_irq
;
3947 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) {
3948 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++)
3949 np
->msi_x_entry
[i
].entry
= i
;
3950 ret
= pci_enable_msix(np
->pci_dev
, np
->msi_x_entry
, (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
));
3952 np
->msi_flags
|= NV_MSI_X_ENABLED
;
3953 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
&& !intr_test
) {
3954 /* Request irq for rx handling */
3955 sprintf(np
->name_rx
, "%s-rx", dev
->name
);
3956 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
,
3957 nv_nic_irq_rx
, IRQF_SHARED
, np
->name_rx
, dev
) != 0) {
3959 "request_irq failed for rx %d\n",
3961 pci_disable_msix(np
->pci_dev
);
3962 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3965 /* Request irq for tx handling */
3966 sprintf(np
->name_tx
, "%s-tx", dev
->name
);
3967 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
,
3968 nv_nic_irq_tx
, IRQF_SHARED
, np
->name_tx
, dev
) != 0) {
3970 "request_irq failed for tx %d\n",
3972 pci_disable_msix(np
->pci_dev
);
3973 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3976 /* Request irq for link and timer handling */
3977 sprintf(np
->name_other
, "%s-other", dev
->name
);
3978 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
,
3979 nv_nic_irq_other
, IRQF_SHARED
, np
->name_other
, dev
) != 0) {
3981 "request_irq failed for link %d\n",
3983 pci_disable_msix(np
->pci_dev
);
3984 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3987 /* map interrupts to their respective vector */
3988 writel(0, base
+ NvRegMSIXMap0
);
3989 writel(0, base
+ NvRegMSIXMap1
);
3990 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_RX
, NVREG_IRQ_RX_ALL
);
3991 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_TX
, NVREG_IRQ_TX_ALL
);
3992 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_OTHER
, NVREG_IRQ_OTHER
);
3994 /* Request irq for all interrupts */
3995 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3997 "request_irq failed %d\n",
3999 pci_disable_msix(np
->pci_dev
);
4000 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
4004 /* map interrupts to vector 0 */
4005 writel(0, base
+ NvRegMSIXMap0
);
4006 writel(0, base
+ NvRegMSIXMap1
);
4008 netdev_info(dev
, "MSI-X enabled\n");
4011 if (ret
!= 0 && np
->msi_flags
& NV_MSI_CAPABLE
) {
4012 ret
= pci_enable_msi(np
->pci_dev
);
4014 np
->msi_flags
|= NV_MSI_ENABLED
;
4015 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
4016 netdev_info(dev
, "request_irq failed %d\n",
4018 pci_disable_msi(np
->pci_dev
);
4019 np
->msi_flags
&= ~NV_MSI_ENABLED
;
4023 /* map interrupts to vector 0 */
4024 writel(0, base
+ NvRegMSIMap0
);
4025 writel(0, base
+ NvRegMSIMap1
);
4026 /* enable msi vector 0 */
4027 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
4028 netdev_info(dev
, "MSI enabled\n");
4032 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0)
4039 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, dev
);
4041 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, dev
);
4046 static void nv_free_irq(struct net_device
*dev
)
4048 struct fe_priv
*np
= get_nvpriv(dev
);
4051 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
4052 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++)
4053 free_irq(np
->msi_x_entry
[i
].vector
, dev
);
4054 pci_disable_msix(np
->pci_dev
);
4055 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
4057 free_irq(np
->pci_dev
->irq
, dev
);
4058 if (np
->msi_flags
& NV_MSI_ENABLED
) {
4059 pci_disable_msi(np
->pci_dev
);
4060 np
->msi_flags
&= ~NV_MSI_ENABLED
;
4065 static void nv_do_nic_poll(unsigned long data
)
4067 struct net_device
*dev
= (struct net_device
*) data
;
4068 struct fe_priv
*np
= netdev_priv(dev
);
4069 u8 __iomem
*base
= get_hwbase(dev
);
4073 * First disable irq(s) and then
4074 * reenable interrupts on the nic, we have to do this before calling
4075 * nv_nic_irq because that may decide to do otherwise
4078 if (!using_multi_irqs(dev
)) {
4079 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
4080 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
4082 disable_irq_lockdep(np
->pci_dev
->irq
);
4085 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
4086 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
4087 mask
|= NVREG_IRQ_RX_ALL
;
4089 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
4090 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
4091 mask
|= NVREG_IRQ_TX_ALL
;
4093 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
4094 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
4095 mask
|= NVREG_IRQ_OTHER
;
4098 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4100 if (np
->recover_error
) {
4101 np
->recover_error
= 0;
4102 netdev_info(dev
, "MAC in recoverable error state\n");
4103 if (netif_running(dev
)) {
4104 netif_tx_lock_bh(dev
);
4105 netif_addr_lock(dev
);
4106 spin_lock(&np
->lock
);
4109 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
4112 /* drain rx queue */
4114 /* reinit driver view of the rx queue */
4116 if (nv_init_ring(dev
)) {
4117 if (!np
->in_shutdown
)
4118 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4120 /* reinit nic view of the rx queue */
4121 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4122 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4123 writel(((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4124 base
+ NvRegRingSizes
);
4126 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4128 /* clear interrupts */
4129 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
4130 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4132 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4134 /* restart rx engine */
4136 spin_unlock(&np
->lock
);
4137 netif_addr_unlock(dev
);
4138 netif_tx_unlock_bh(dev
);
4142 writel(mask
, base
+ NvRegIrqMask
);
4145 if (!using_multi_irqs(dev
)) {
4146 np
->nic_poll_irq
= 0;
4147 if (nv_optimized(np
))
4148 nv_nic_irq_optimized(0, dev
);
4151 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
4152 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
4154 enable_irq_lockdep(np
->pci_dev
->irq
);
4156 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
4157 np
->nic_poll_irq
&= ~NVREG_IRQ_RX_ALL
;
4158 nv_nic_irq_rx(0, dev
);
4159 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
4161 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
4162 np
->nic_poll_irq
&= ~NVREG_IRQ_TX_ALL
;
4163 nv_nic_irq_tx(0, dev
);
4164 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
4166 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
4167 np
->nic_poll_irq
&= ~NVREG_IRQ_OTHER
;
4168 nv_nic_irq_other(0, dev
);
4169 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
4175 #ifdef CONFIG_NET_POLL_CONTROLLER
4176 static void nv_poll_controller(struct net_device
*dev
)
4178 nv_do_nic_poll((unsigned long) dev
);
4182 static void nv_do_stats_poll(unsigned long data
)
4183 __acquires(&netdev_priv(dev
)->hwstats_lock
)
4184 __releases(&netdev_priv(dev
)->hwstats_lock
)
4186 struct net_device
*dev
= (struct net_device
*) data
;
4187 struct fe_priv
*np
= netdev_priv(dev
);
4189 /* If lock is currently taken, the stats are being refreshed
4190 * and hence fresh enough */
4191 if (spin_trylock(&np
->hwstats_lock
)) {
4192 nv_update_stats(dev
);
4193 spin_unlock(&np
->hwstats_lock
);
4196 if (!np
->in_shutdown
)
4197 mod_timer(&np
->stats_poll
,
4198 round_jiffies(jiffies
+ STATS_INTERVAL
));
4201 static void nv_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
4203 struct fe_priv
*np
= netdev_priv(dev
);
4204 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
4205 strlcpy(info
->version
, FORCEDETH_VERSION
, sizeof(info
->version
));
4206 strlcpy(info
->bus_info
, pci_name(np
->pci_dev
), sizeof(info
->bus_info
));
4209 static void nv_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
4211 struct fe_priv
*np
= netdev_priv(dev
);
4212 wolinfo
->supported
= WAKE_MAGIC
;
4214 spin_lock_irq(&np
->lock
);
4216 wolinfo
->wolopts
= WAKE_MAGIC
;
4217 spin_unlock_irq(&np
->lock
);
4220 static int nv_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
4222 struct fe_priv
*np
= netdev_priv(dev
);
4223 u8 __iomem
*base
= get_hwbase(dev
);
4226 if (wolinfo
->wolopts
== 0) {
4228 } else if (wolinfo
->wolopts
& WAKE_MAGIC
) {
4230 flags
= NVREG_WAKEUPFLAGS_ENABLE
;
4232 if (netif_running(dev
)) {
4233 spin_lock_irq(&np
->lock
);
4234 writel(flags
, base
+ NvRegWakeUpFlags
);
4235 spin_unlock_irq(&np
->lock
);
4237 device_set_wakeup_enable(&np
->pci_dev
->dev
, np
->wolenabled
);
4241 static int nv_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
4243 struct fe_priv
*np
= netdev_priv(dev
);
4247 spin_lock_irq(&np
->lock
);
4248 ecmd
->port
= PORT_MII
;
4249 if (!netif_running(dev
)) {
4250 /* We do not track link speed / duplex setting if the
4251 * interface is disabled. Force a link check */
4252 if (nv_update_linkspeed(dev
)) {
4253 if (!netif_carrier_ok(dev
))
4254 netif_carrier_on(dev
);
4256 if (netif_carrier_ok(dev
))
4257 netif_carrier_off(dev
);
4261 if (netif_carrier_ok(dev
)) {
4262 switch (np
->linkspeed
& (NVREG_LINKSPEED_MASK
)) {
4263 case NVREG_LINKSPEED_10
:
4266 case NVREG_LINKSPEED_100
:
4269 case NVREG_LINKSPEED_1000
:
4276 ecmd
->duplex
= DUPLEX_HALF
;
4278 ecmd
->duplex
= DUPLEX_FULL
;
4283 ethtool_cmd_speed_set(ecmd
, speed
);
4284 ecmd
->autoneg
= np
->autoneg
;
4286 ecmd
->advertising
= ADVERTISED_MII
;
4288 ecmd
->advertising
|= ADVERTISED_Autoneg
;
4289 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4290 if (adv
& ADVERTISE_10HALF
)
4291 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
4292 if (adv
& ADVERTISE_10FULL
)
4293 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
4294 if (adv
& ADVERTISE_100HALF
)
4295 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
4296 if (adv
& ADVERTISE_100FULL
)
4297 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
4298 if (np
->gigabit
== PHY_GIGABIT
) {
4299 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4300 if (adv
& ADVERTISE_1000FULL
)
4301 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
4304 ecmd
->supported
= (SUPPORTED_Autoneg
|
4305 SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
4306 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
4308 if (np
->gigabit
== PHY_GIGABIT
)
4309 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
4311 ecmd
->phy_address
= np
->phyaddr
;
4312 ecmd
->transceiver
= XCVR_EXTERNAL
;
4314 /* ignore maxtxpkt, maxrxpkt for now */
4315 spin_unlock_irq(&np
->lock
);
4319 static int nv_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
4321 struct fe_priv
*np
= netdev_priv(dev
);
4322 u32 speed
= ethtool_cmd_speed(ecmd
);
4324 if (ecmd
->port
!= PORT_MII
)
4326 if (ecmd
->transceiver
!= XCVR_EXTERNAL
)
4328 if (ecmd
->phy_address
!= np
->phyaddr
) {
4329 /* TODO: support switching between multiple phys. Should be
4330 * trivial, but not enabled due to lack of test hardware. */
4333 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
4336 mask
= ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
4337 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
;
4338 if (np
->gigabit
== PHY_GIGABIT
)
4339 mask
|= ADVERTISED_1000baseT_Full
;
4341 if ((ecmd
->advertising
& mask
) == 0)
4344 } else if (ecmd
->autoneg
== AUTONEG_DISABLE
) {
4345 /* Note: autonegotiation disable, speed 1000 intentionally
4346 * forbidden - no one should need that. */
4348 if (speed
!= SPEED_10
&& speed
!= SPEED_100
)
4350 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
4356 netif_carrier_off(dev
);
4357 if (netif_running(dev
)) {
4358 unsigned long flags
;
4360 nv_disable_irq(dev
);
4361 netif_tx_lock_bh(dev
);
4362 netif_addr_lock(dev
);
4363 /* with plain spinlock lockdep complains */
4364 spin_lock_irqsave(&np
->lock
, flags
);
4367 * this can take some time, and interrupts are disabled
4368 * due to spin_lock_irqsave, but let's hope no daemon
4369 * is going to change the settings very often...
4371 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4372 * + some minor delays, which is up to a second approximately
4375 spin_unlock_irqrestore(&np
->lock
, flags
);
4376 netif_addr_unlock(dev
);
4377 netif_tx_unlock_bh(dev
);
4380 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
4385 /* advertise only what has been requested */
4386 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4387 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4388 if (ecmd
->advertising
& ADVERTISED_10baseT_Half
)
4389 adv
|= ADVERTISE_10HALF
;
4390 if (ecmd
->advertising
& ADVERTISED_10baseT_Full
)
4391 adv
|= ADVERTISE_10FULL
;
4392 if (ecmd
->advertising
& ADVERTISED_100baseT_Half
)
4393 adv
|= ADVERTISE_100HALF
;
4394 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
4395 adv
|= ADVERTISE_100FULL
;
4396 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisements but disable tx pause */
4397 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4398 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4399 adv
|= ADVERTISE_PAUSE_ASYM
;
4400 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4402 if (np
->gigabit
== PHY_GIGABIT
) {
4403 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4404 adv
&= ~ADVERTISE_1000FULL
;
4405 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
4406 adv
|= ADVERTISE_1000FULL
;
4407 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
4410 if (netif_running(dev
))
4411 netdev_info(dev
, "link down\n");
4412 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4413 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4414 bmcr
|= BMCR_ANENABLE
;
4415 /* reset the phy in order for settings to stick,
4416 * and cause autoneg to start */
4417 if (phy_reset(dev
, bmcr
)) {
4418 netdev_info(dev
, "phy reset failed\n");
4422 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4423 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4430 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4431 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4432 if (speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
4433 adv
|= ADVERTISE_10HALF
;
4434 if (speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
4435 adv
|= ADVERTISE_10FULL
;
4436 if (speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
4437 adv
|= ADVERTISE_100HALF
;
4438 if (speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
4439 adv
|= ADVERTISE_100FULL
;
4440 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4441 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) {/* for rx we set both advertisements but disable tx pause */
4442 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4443 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4445 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
) {
4446 adv
|= ADVERTISE_PAUSE_ASYM
;
4447 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4449 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4450 np
->fixed_mode
= adv
;
4452 if (np
->gigabit
== PHY_GIGABIT
) {
4453 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4454 adv
&= ~ADVERTISE_1000FULL
;
4455 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
4458 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4459 bmcr
&= ~(BMCR_ANENABLE
|BMCR_SPEED100
|BMCR_SPEED1000
|BMCR_FULLDPLX
);
4460 if (np
->fixed_mode
& (ADVERTISE_10FULL
|ADVERTISE_100FULL
))
4461 bmcr
|= BMCR_FULLDPLX
;
4462 if (np
->fixed_mode
& (ADVERTISE_100HALF
|ADVERTISE_100FULL
))
4463 bmcr
|= BMCR_SPEED100
;
4464 if (np
->phy_oui
== PHY_OUI_MARVELL
) {
4465 /* reset the phy in order for forced mode settings to stick */
4466 if (phy_reset(dev
, bmcr
)) {
4467 netdev_info(dev
, "phy reset failed\n");
4471 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4472 if (netif_running(dev
)) {
4473 /* Wait a bit and then reconfigure the nic. */
4480 if (netif_running(dev
)) {
4488 #define FORCEDETH_REGS_VER 1
4490 static int nv_get_regs_len(struct net_device
*dev
)
4492 struct fe_priv
*np
= netdev_priv(dev
);
4493 return np
->register_size
;
4496 static void nv_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
4498 struct fe_priv
*np
= netdev_priv(dev
);
4499 u8 __iomem
*base
= get_hwbase(dev
);
4503 regs
->version
= FORCEDETH_REGS_VER
;
4504 spin_lock_irq(&np
->lock
);
4505 for (i
= 0; i
< np
->register_size
/sizeof(u32
); i
++)
4506 rbuf
[i
] = readl(base
+ i
*sizeof(u32
));
4507 spin_unlock_irq(&np
->lock
);
4510 static int nv_nway_reset(struct net_device
*dev
)
4512 struct fe_priv
*np
= netdev_priv(dev
);
4518 netif_carrier_off(dev
);
4519 if (netif_running(dev
)) {
4520 nv_disable_irq(dev
);
4521 netif_tx_lock_bh(dev
);
4522 netif_addr_lock(dev
);
4523 spin_lock(&np
->lock
);
4526 spin_unlock(&np
->lock
);
4527 netif_addr_unlock(dev
);
4528 netif_tx_unlock_bh(dev
);
4529 netdev_info(dev
, "link down\n");
4532 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4533 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4534 bmcr
|= BMCR_ANENABLE
;
4535 /* reset the phy in order for settings to stick*/
4536 if (phy_reset(dev
, bmcr
)) {
4537 netdev_info(dev
, "phy reset failed\n");
4541 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4542 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4545 if (netif_running(dev
)) {
4557 static void nv_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4559 struct fe_priv
*np
= netdev_priv(dev
);
4561 ring
->rx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4562 ring
->tx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4564 ring
->rx_pending
= np
->rx_ring_size
;
4565 ring
->tx_pending
= np
->tx_ring_size
;
4568 static int nv_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4570 struct fe_priv
*np
= netdev_priv(dev
);
4571 u8 __iomem
*base
= get_hwbase(dev
);
4572 u8
*rxtx_ring
, *rx_skbuff
, *tx_skbuff
;
4573 dma_addr_t ring_addr
;
4575 if (ring
->rx_pending
< RX_RING_MIN
||
4576 ring
->tx_pending
< TX_RING_MIN
||
4577 ring
->rx_mini_pending
!= 0 ||
4578 ring
->rx_jumbo_pending
!= 0 ||
4579 (np
->desc_ver
== DESC_VER_1
&&
4580 (ring
->rx_pending
> RING_MAX_DESC_VER_1
||
4581 ring
->tx_pending
> RING_MAX_DESC_VER_1
)) ||
4582 (np
->desc_ver
!= DESC_VER_1
&&
4583 (ring
->rx_pending
> RING_MAX_DESC_VER_2_3
||
4584 ring
->tx_pending
> RING_MAX_DESC_VER_2_3
))) {
4588 /* allocate new rings */
4589 if (!nv_optimized(np
)) {
4590 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4591 sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4594 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4595 sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4598 rx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->rx_pending
, GFP_KERNEL
);
4599 tx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->tx_pending
, GFP_KERNEL
);
4600 if (!rxtx_ring
|| !rx_skbuff
|| !tx_skbuff
) {
4601 /* fall back to old rings */
4602 if (!nv_optimized(np
)) {
4604 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4605 rxtx_ring
, ring_addr
);
4608 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4609 rxtx_ring
, ring_addr
);
4617 if (netif_running(dev
)) {
4618 nv_disable_irq(dev
);
4619 nv_napi_disable(dev
);
4620 netif_tx_lock_bh(dev
);
4621 netif_addr_lock(dev
);
4622 spin_lock(&np
->lock
);
4632 /* set new values */
4633 np
->rx_ring_size
= ring
->rx_pending
;
4634 np
->tx_ring_size
= ring
->tx_pending
;
4636 if (!nv_optimized(np
)) {
4637 np
->rx_ring
.orig
= (struct ring_desc
*)rxtx_ring
;
4638 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
4640 np
->rx_ring
.ex
= (struct ring_desc_ex
*)rxtx_ring
;
4641 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
4643 np
->rx_skb
= (struct nv_skb_map
*)rx_skbuff
;
4644 np
->tx_skb
= (struct nv_skb_map
*)tx_skbuff
;
4645 np
->ring_addr
= ring_addr
;
4647 memset(np
->rx_skb
, 0, sizeof(struct nv_skb_map
) * np
->rx_ring_size
);
4648 memset(np
->tx_skb
, 0, sizeof(struct nv_skb_map
) * np
->tx_ring_size
);
4650 if (netif_running(dev
)) {
4651 /* reinit driver view of the queues */
4653 if (nv_init_ring(dev
)) {
4654 if (!np
->in_shutdown
)
4655 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4658 /* reinit nic view of the queues */
4659 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4660 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4661 writel(((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4662 base
+ NvRegRingSizes
);
4664 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4667 /* restart engines */
4669 spin_unlock(&np
->lock
);
4670 netif_addr_unlock(dev
);
4671 netif_tx_unlock_bh(dev
);
4672 nv_napi_enable(dev
);
4680 static void nv_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4682 struct fe_priv
*np
= netdev_priv(dev
);
4684 pause
->autoneg
= (np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) != 0;
4685 pause
->rx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) != 0;
4686 pause
->tx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) != 0;
4689 static int nv_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4691 struct fe_priv
*np
= netdev_priv(dev
);
4694 if ((!np
->autoneg
&& np
->duplex
== 0) ||
4695 (np
->autoneg
&& !pause
->autoneg
&& np
->duplex
== 0)) {
4696 netdev_info(dev
, "can not set pause settings when forced link is in half duplex\n");
4699 if (pause
->tx_pause
&& !(np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)) {
4700 netdev_info(dev
, "hardware does not support tx pause frames\n");
4704 netif_carrier_off(dev
);
4705 if (netif_running(dev
)) {
4706 nv_disable_irq(dev
);
4707 netif_tx_lock_bh(dev
);
4708 netif_addr_lock(dev
);
4709 spin_lock(&np
->lock
);
4712 spin_unlock(&np
->lock
);
4713 netif_addr_unlock(dev
);
4714 netif_tx_unlock_bh(dev
);
4717 np
->pause_flags
&= ~(NV_PAUSEFRAME_RX_REQ
|NV_PAUSEFRAME_TX_REQ
);
4718 if (pause
->rx_pause
)
4719 np
->pause_flags
|= NV_PAUSEFRAME_RX_REQ
;
4720 if (pause
->tx_pause
)
4721 np
->pause_flags
|= NV_PAUSEFRAME_TX_REQ
;
4723 if (np
->autoneg
&& pause
->autoneg
) {
4724 np
->pause_flags
|= NV_PAUSEFRAME_AUTONEG
;
4726 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4727 adv
&= ~(ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4728 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisements but disable tx pause */
4729 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4730 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4731 adv
|= ADVERTISE_PAUSE_ASYM
;
4732 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4734 if (netif_running(dev
))
4735 netdev_info(dev
, "link down\n");
4736 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4737 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4738 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4740 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4741 if (pause
->rx_pause
)
4742 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4743 if (pause
->tx_pause
)
4744 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4746 if (!netif_running(dev
))
4747 nv_update_linkspeed(dev
);
4749 nv_update_pause(dev
, np
->pause_flags
);
4752 if (netif_running(dev
)) {
4759 static int nv_set_loopback(struct net_device
*dev
, netdev_features_t features
)
4761 struct fe_priv
*np
= netdev_priv(dev
);
4762 unsigned long flags
;
4764 int err
, retval
= 0;
4766 spin_lock_irqsave(&np
->lock
, flags
);
4767 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4768 if (features
& NETIF_F_LOOPBACK
) {
4769 if (miicontrol
& BMCR_LOOPBACK
) {
4770 spin_unlock_irqrestore(&np
->lock
, flags
);
4771 netdev_info(dev
, "Loopback already enabled\n");
4774 nv_disable_irq(dev
);
4775 /* Turn on loopback mode */
4776 miicontrol
|= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED1000
;
4777 err
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
);
4780 spin_unlock_irqrestore(&np
->lock
, flags
);
4783 if (netif_running(dev
)) {
4784 /* Force 1000 Mbps full-duplex */
4785 nv_force_linkspeed(dev
, NVREG_LINKSPEED_1000
,
4788 netif_carrier_on(dev
);
4790 spin_unlock_irqrestore(&np
->lock
, flags
);
4792 "Internal PHY loopback mode enabled.\n");
4795 if (!(miicontrol
& BMCR_LOOPBACK
)) {
4796 spin_unlock_irqrestore(&np
->lock
, flags
);
4797 netdev_info(dev
, "Loopback already disabled\n");
4800 nv_disable_irq(dev
);
4801 /* Turn off loopback */
4802 spin_unlock_irqrestore(&np
->lock
, flags
);
4803 netdev_info(dev
, "Internal PHY loopback mode disabled.\n");
4807 spin_lock_irqsave(&np
->lock
, flags
);
4809 spin_unlock_irqrestore(&np
->lock
, flags
);
4814 static netdev_features_t
nv_fix_features(struct net_device
*dev
,
4815 netdev_features_t features
)
4817 /* vlan is dependent on rx checksum offload */
4818 if (features
& (NETIF_F_HW_VLAN_CTAG_TX
|NETIF_F_HW_VLAN_CTAG_RX
))
4819 features
|= NETIF_F_RXCSUM
;
4824 static void nv_vlan_mode(struct net_device
*dev
, netdev_features_t features
)
4826 struct fe_priv
*np
= get_nvpriv(dev
);
4828 spin_lock_irq(&np
->lock
);
4830 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
4831 np
->txrxctl_bits
|= NVREG_TXRXCTL_VLANSTRIP
;
4833 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANSTRIP
;
4835 if (features
& NETIF_F_HW_VLAN_CTAG_TX
)
4836 np
->txrxctl_bits
|= NVREG_TXRXCTL_VLANINS
;
4838 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANINS
;
4840 writel(np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4842 spin_unlock_irq(&np
->lock
);
4845 static int nv_set_features(struct net_device
*dev
, netdev_features_t features
)
4847 struct fe_priv
*np
= netdev_priv(dev
);
4848 u8 __iomem
*base
= get_hwbase(dev
);
4849 netdev_features_t changed
= dev
->features
^ features
;
4852 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
)) {
4853 retval
= nv_set_loopback(dev
, features
);
4858 if (changed
& NETIF_F_RXCSUM
) {
4859 spin_lock_irq(&np
->lock
);
4861 if (features
& NETIF_F_RXCSUM
)
4862 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
4864 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_RXCHECK
;
4866 if (netif_running(dev
))
4867 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4869 spin_unlock_irq(&np
->lock
);
4872 if (changed
& (NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
))
4873 nv_vlan_mode(dev
, features
);
4878 static int nv_get_sset_count(struct net_device
*dev
, int sset
)
4880 struct fe_priv
*np
= netdev_priv(dev
);
4884 if (np
->driver_data
& DEV_HAS_TEST_EXTENDED
)
4885 return NV_TEST_COUNT_EXTENDED
;
4887 return NV_TEST_COUNT_BASE
;
4889 if (np
->driver_data
& DEV_HAS_STATISTICS_V3
)
4890 return NV_DEV_STATISTICS_V3_COUNT
;
4891 else if (np
->driver_data
& DEV_HAS_STATISTICS_V2
)
4892 return NV_DEV_STATISTICS_V2_COUNT
;
4893 else if (np
->driver_data
& DEV_HAS_STATISTICS_V1
)
4894 return NV_DEV_STATISTICS_V1_COUNT
;
4902 static void nv_get_ethtool_stats(struct net_device
*dev
,
4903 struct ethtool_stats
*estats
, u64
*buffer
)
4904 __acquires(&netdev_priv(dev
)->hwstats_lock
)
4905 __releases(&netdev_priv(dev
)->hwstats_lock
)
4907 struct fe_priv
*np
= netdev_priv(dev
);
4909 spin_lock_bh(&np
->hwstats_lock
);
4910 nv_update_stats(dev
);
4911 memcpy(buffer
, &np
->estats
,
4912 nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(u64
));
4913 spin_unlock_bh(&np
->hwstats_lock
);
4916 static int nv_link_test(struct net_device
*dev
)
4918 struct fe_priv
*np
= netdev_priv(dev
);
4921 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4922 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4924 /* check phy link status */
4925 if (!(mii_status
& BMSR_LSTATUS
))
4931 static int nv_register_test(struct net_device
*dev
)
4933 u8 __iomem
*base
= get_hwbase(dev
);
4935 u32 orig_read
, new_read
;
4938 orig_read
= readl(base
+ nv_registers_test
[i
].reg
);
4940 /* xor with mask to toggle bits */
4941 orig_read
^= nv_registers_test
[i
].mask
;
4943 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4945 new_read
= readl(base
+ nv_registers_test
[i
].reg
);
4947 if ((new_read
& nv_registers_test
[i
].mask
) != (orig_read
& nv_registers_test
[i
].mask
))
4950 /* restore original value */
4951 orig_read
^= nv_registers_test
[i
].mask
;
4952 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4954 } while (nv_registers_test
[++i
].reg
!= 0);
4959 static int nv_interrupt_test(struct net_device
*dev
)
4961 struct fe_priv
*np
= netdev_priv(dev
);
4962 u8 __iomem
*base
= get_hwbase(dev
);
4965 u32 save_msi_flags
, save_poll_interval
= 0;
4967 if (netif_running(dev
)) {
4968 /* free current irq */
4970 save_poll_interval
= readl(base
+NvRegPollingInterval
);
4973 /* flag to test interrupt handler */
4976 /* setup test irq */
4977 save_msi_flags
= np
->msi_flags
;
4978 np
->msi_flags
&= ~NV_MSI_X_VECTORS_MASK
;
4979 np
->msi_flags
|= 0x001; /* setup 1 vector */
4980 if (nv_request_irq(dev
, 1))
4983 /* setup timer interrupt */
4984 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4985 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4987 nv_enable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4989 /* wait for at least one interrupt */
4992 spin_lock_irq(&np
->lock
);
4994 /* flag should be set within ISR */
4995 testcnt
= np
->intr_test
;
4999 nv_disable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
5000 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
5001 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5003 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
5005 spin_unlock_irq(&np
->lock
);
5009 np
->msi_flags
= save_msi_flags
;
5011 if (netif_running(dev
)) {
5012 writel(save_poll_interval
, base
+ NvRegPollingInterval
);
5013 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
5014 /* restore original irq */
5015 if (nv_request_irq(dev
, 0))
5022 static int nv_loopback_test(struct net_device
*dev
)
5024 struct fe_priv
*np
= netdev_priv(dev
);
5025 u8 __iomem
*base
= get_hwbase(dev
);
5026 struct sk_buff
*tx_skb
, *rx_skb
;
5027 dma_addr_t test_dma_addr
;
5028 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
5030 int len
, i
, pkt_len
;
5032 u32 filter_flags
= 0;
5033 u32 misc1_flags
= 0;
5036 if (netif_running(dev
)) {
5037 nv_disable_irq(dev
);
5038 filter_flags
= readl(base
+ NvRegPacketFilterFlags
);
5039 misc1_flags
= readl(base
+ NvRegMisc1
);
5044 /* reinit driver view of the rx queue */
5048 /* setup hardware for loopback */
5049 writel(NVREG_MISC1_FORCE
, base
+ NvRegMisc1
);
5050 writel(NVREG_PFF_ALWAYS
| NVREG_PFF_LOOPBACK
, base
+ NvRegPacketFilterFlags
);
5052 /* reinit nic view of the rx queue */
5053 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
5054 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
5055 writel(((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
5056 base
+ NvRegRingSizes
);
5059 /* restart rx engine */
5062 /* setup packet for tx */
5063 pkt_len
= ETH_DATA_LEN
;
5064 tx_skb
= netdev_alloc_skb(dev
, pkt_len
);
5069 test_dma_addr
= pci_map_single(np
->pci_dev
, tx_skb
->data
,
5070 skb_tailroom(tx_skb
),
5071 PCI_DMA_FROMDEVICE
);
5072 if (pci_dma_mapping_error(np
->pci_dev
,
5074 dev_kfree_skb_any(tx_skb
);
5077 pkt_data
= skb_put(tx_skb
, pkt_len
);
5078 for (i
= 0; i
< pkt_len
; i
++)
5079 pkt_data
[i
] = (u8
)(i
& 0xff);
5081 if (!nv_optimized(np
)) {
5082 np
->tx_ring
.orig
[0].buf
= cpu_to_le32(test_dma_addr
);
5083 np
->tx_ring
.orig
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
5085 np
->tx_ring
.ex
[0].bufhigh
= cpu_to_le32(dma_high(test_dma_addr
));
5086 np
->tx_ring
.ex
[0].buflow
= cpu_to_le32(dma_low(test_dma_addr
));
5087 np
->tx_ring
.ex
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
5089 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
5090 pci_push(get_hwbase(dev
));
5094 /* check for rx of the packet */
5095 if (!nv_optimized(np
)) {
5096 flags
= le32_to_cpu(np
->rx_ring
.orig
[0].flaglen
);
5097 len
= nv_descr_getlength(&np
->rx_ring
.orig
[0], np
->desc_ver
);
5100 flags
= le32_to_cpu(np
->rx_ring
.ex
[0].flaglen
);
5101 len
= nv_descr_getlength_ex(&np
->rx_ring
.ex
[0], np
->desc_ver
);
5104 if (flags
& NV_RX_AVAIL
) {
5106 } else if (np
->desc_ver
== DESC_VER_1
) {
5107 if (flags
& NV_RX_ERROR
)
5110 if (flags
& NV_RX2_ERROR
)
5115 if (len
!= pkt_len
) {
5118 rx_skb
= np
->rx_skb
[0].skb
;
5119 for (i
= 0; i
< pkt_len
; i
++) {
5120 if (rx_skb
->data
[i
] != (u8
)(i
& 0xff)) {
5128 pci_unmap_single(np
->pci_dev
, test_dma_addr
,
5129 (skb_end_pointer(tx_skb
) - tx_skb
->data
),
5131 dev_kfree_skb_any(tx_skb
);
5136 /* drain rx queue */
5139 if (netif_running(dev
)) {
5140 writel(misc1_flags
, base
+ NvRegMisc1
);
5141 writel(filter_flags
, base
+ NvRegPacketFilterFlags
);
5148 static void nv_self_test(struct net_device
*dev
, struct ethtool_test
*test
, u64
*buffer
)
5150 struct fe_priv
*np
= netdev_priv(dev
);
5151 u8 __iomem
*base
= get_hwbase(dev
);
5154 count
= nv_get_sset_count(dev
, ETH_SS_TEST
);
5155 memset(buffer
, 0, count
* sizeof(u64
));
5157 if (!nv_link_test(dev
)) {
5158 test
->flags
|= ETH_TEST_FL_FAILED
;
5162 if (test
->flags
& ETH_TEST_FL_OFFLINE
) {
5163 if (netif_running(dev
)) {
5164 netif_stop_queue(dev
);
5165 nv_napi_disable(dev
);
5166 netif_tx_lock_bh(dev
);
5167 netif_addr_lock(dev
);
5168 spin_lock_irq(&np
->lock
);
5169 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5170 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
5171 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5173 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
5177 /* drain rx queue */
5179 spin_unlock_irq(&np
->lock
);
5180 netif_addr_unlock(dev
);
5181 netif_tx_unlock_bh(dev
);
5184 if (!nv_register_test(dev
)) {
5185 test
->flags
|= ETH_TEST_FL_FAILED
;
5189 result
= nv_interrupt_test(dev
);
5191 test
->flags
|= ETH_TEST_FL_FAILED
;
5199 if (count
> NV_TEST_COUNT_BASE
&& !nv_loopback_test(dev
)) {
5200 test
->flags
|= ETH_TEST_FL_FAILED
;
5204 if (netif_running(dev
)) {
5205 /* reinit driver view of the rx queue */
5207 if (nv_init_ring(dev
)) {
5208 if (!np
->in_shutdown
)
5209 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
5211 /* reinit nic view of the rx queue */
5212 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
5213 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
5214 writel(((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
5215 base
+ NvRegRingSizes
);
5217 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
5219 /* restart rx engine */
5221 netif_start_queue(dev
);
5222 nv_napi_enable(dev
);
5223 nv_enable_hw_interrupts(dev
, np
->irqmask
);
5228 static void nv_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buffer
)
5230 switch (stringset
) {
5232 memcpy(buffer
, &nv_estats_str
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(struct nv_ethtool_str
));
5235 memcpy(buffer
, &nv_etests_str
, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(struct nv_ethtool_str
));
5240 static const struct ethtool_ops ops
= {
5241 .get_drvinfo
= nv_get_drvinfo
,
5242 .get_link
= ethtool_op_get_link
,
5243 .get_wol
= nv_get_wol
,
5244 .set_wol
= nv_set_wol
,
5245 .get_settings
= nv_get_settings
,
5246 .set_settings
= nv_set_settings
,
5247 .get_regs_len
= nv_get_regs_len
,
5248 .get_regs
= nv_get_regs
,
5249 .nway_reset
= nv_nway_reset
,
5250 .get_ringparam
= nv_get_ringparam
,
5251 .set_ringparam
= nv_set_ringparam
,
5252 .get_pauseparam
= nv_get_pauseparam
,
5253 .set_pauseparam
= nv_set_pauseparam
,
5254 .get_strings
= nv_get_strings
,
5255 .get_ethtool_stats
= nv_get_ethtool_stats
,
5256 .get_sset_count
= nv_get_sset_count
,
5257 .self_test
= nv_self_test
,
5258 .get_ts_info
= ethtool_op_get_ts_info
,
5261 /* The mgmt unit and driver use a semaphore to access the phy during init */
5262 static int nv_mgmt_acquire_sema(struct net_device
*dev
)
5264 struct fe_priv
*np
= netdev_priv(dev
);
5265 u8 __iomem
*base
= get_hwbase(dev
);
5267 u32 tx_ctrl
, mgmt_sema
;
5269 for (i
= 0; i
< 10; i
++) {
5270 mgmt_sema
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_SEMA_MASK
;
5271 if (mgmt_sema
== NVREG_XMITCTL_MGMT_SEMA_FREE
)
5276 if (mgmt_sema
!= NVREG_XMITCTL_MGMT_SEMA_FREE
)
5279 for (i
= 0; i
< 2; i
++) {
5280 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5281 tx_ctrl
|= NVREG_XMITCTL_HOST_SEMA_ACQ
;
5282 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
5284 /* verify that semaphore was acquired */
5285 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5286 if (((tx_ctrl
& NVREG_XMITCTL_HOST_SEMA_MASK
) == NVREG_XMITCTL_HOST_SEMA_ACQ
) &&
5287 ((tx_ctrl
& NVREG_XMITCTL_MGMT_SEMA_MASK
) == NVREG_XMITCTL_MGMT_SEMA_FREE
)) {
5297 static void nv_mgmt_release_sema(struct net_device
*dev
)
5299 struct fe_priv
*np
= netdev_priv(dev
);
5300 u8 __iomem
*base
= get_hwbase(dev
);
5303 if (np
->driver_data
& DEV_HAS_MGMT_UNIT
) {
5304 if (np
->mgmt_sema
) {
5305 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5306 tx_ctrl
&= ~NVREG_XMITCTL_HOST_SEMA_ACQ
;
5307 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
5313 static int nv_mgmt_get_version(struct net_device
*dev
)
5315 struct fe_priv
*np
= netdev_priv(dev
);
5316 u8 __iomem
*base
= get_hwbase(dev
);
5317 u32 data_ready
= readl(base
+ NvRegTransmitterControl
);
5318 u32 data_ready2
= 0;
5319 unsigned long start
;
5322 writel(NVREG_MGMTUNITGETVERSION
, base
+ NvRegMgmtUnitGetVersion
);
5323 writel(data_ready
^ NVREG_XMITCTL_DATA_START
, base
+ NvRegTransmitterControl
);
5325 while (time_before(jiffies
, start
+ 5*HZ
)) {
5326 data_ready2
= readl(base
+ NvRegTransmitterControl
);
5327 if ((data_ready
& NVREG_XMITCTL_DATA_READY
) != (data_ready2
& NVREG_XMITCTL_DATA_READY
)) {
5331 schedule_timeout_uninterruptible(1);
5334 if (!ready
|| (data_ready2
& NVREG_XMITCTL_DATA_ERROR
))
5337 np
->mgmt_version
= readl(base
+ NvRegMgmtUnitVersion
) & NVREG_MGMTUNITVERSION
;
5342 static int nv_open(struct net_device
*dev
)
5344 struct fe_priv
*np
= netdev_priv(dev
);
5345 u8 __iomem
*base
= get_hwbase(dev
);
5351 mii_rw(dev
, np
->phyaddr
, MII_BMCR
,
5352 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
) & ~BMCR_PDOWN
);
5354 nv_txrx_gate(dev
, false);
5355 /* erase previous misconfiguration */
5356 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
5358 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
5359 writel(0, base
+ NvRegMulticastAddrB
);
5360 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
5361 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
5362 writel(0, base
+ NvRegPacketFilterFlags
);
5364 writel(0, base
+ NvRegTransmitterControl
);
5365 writel(0, base
+ NvRegReceiverControl
);
5367 writel(0, base
+ NvRegAdapterControl
);
5369 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)
5370 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
5372 /* initialize descriptor rings */
5374 oom
= nv_init_ring(dev
);
5376 writel(0, base
+ NvRegLinkSpeed
);
5377 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5379 writel(0, base
+ NvRegUnknownSetupReg6
);
5381 np
->in_shutdown
= 0;
5384 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
5385 writel(((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
5386 base
+ NvRegRingSizes
);
5388 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
5389 if (np
->desc_ver
== DESC_VER_1
)
5390 writel(NVREG_TX_WM_DESC1_DEFAULT
, base
+ NvRegTxWatermark
);
5392 writel(NVREG_TX_WM_DESC2_3_DEFAULT
, base
+ NvRegTxWatermark
);
5393 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
5394 writel(np
->vlanctl_bits
, base
+ NvRegVlanControl
);
5396 writel(NVREG_TXRXCTL_BIT1
|np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
5397 if (reg_delay(dev
, NvRegUnknownSetupReg5
,
5398 NVREG_UNKSETUP5_BIT31
, NVREG_UNKSETUP5_BIT31
,
5399 NV_SETUP5_DELAY
, NV_SETUP5_DELAYMAX
))
5401 "%s: SetupReg5, Bit 31 remained off\n", __func__
);
5403 writel(0, base
+ NvRegMIIMask
);
5404 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5405 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5407 writel(NVREG_MISC1_FORCE
| NVREG_MISC1_HD
, base
+ NvRegMisc1
);
5408 writel(readl(base
+ NvRegTransmitterStatus
), base
+ NvRegTransmitterStatus
);
5409 writel(NVREG_PFF_ALWAYS
, base
+ NvRegPacketFilterFlags
);
5410 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
5412 writel(readl(base
+ NvRegReceiverStatus
), base
+ NvRegReceiverStatus
);
5414 get_random_bytes(&low
, sizeof(low
));
5415 low
&= NVREG_SLOTTIME_MASK
;
5416 if (np
->desc_ver
== DESC_VER_1
) {
5417 writel(low
|NVREG_SLOTTIME_DEFAULT
, base
+ NvRegSlotTime
);
5419 if (!(np
->driver_data
& DEV_HAS_GEAR_MODE
)) {
5420 /* setup legacy backoff */
5421 writel(NVREG_SLOTTIME_LEGBF_ENABLED
|NVREG_SLOTTIME_10_100_FULL
|low
, base
+ NvRegSlotTime
);
5423 writel(NVREG_SLOTTIME_10_100_FULL
, base
+ NvRegSlotTime
);
5424 nv_gear_backoff_reseed(dev
);
5427 writel(NVREG_TX_DEFERRAL_DEFAULT
, base
+ NvRegTxDeferral
);
5428 writel(NVREG_RX_DEFERRAL_DEFAULT
, base
+ NvRegRxDeferral
);
5429 if (poll_interval
== -1) {
5430 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
)
5431 writel(NVREG_POLL_DEFAULT_THROUGHPUT
, base
+ NvRegPollingInterval
);
5433 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
5435 writel(poll_interval
& 0xFFFF, base
+ NvRegPollingInterval
);
5436 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
5437 writel((np
->phyaddr
<< NVREG_ADAPTCTL_PHYSHIFT
)|NVREG_ADAPTCTL_PHYVALID
|NVREG_ADAPTCTL_RUNNING
,
5438 base
+ NvRegAdapterControl
);
5439 writel(NVREG_MIISPEED_BIT8
|NVREG_MIIDELAY
, base
+ NvRegMIISpeed
);
5440 writel(NVREG_MII_LINKCHANGE
, base
+ NvRegMIIMask
);
5442 writel(NVREG_WAKEUPFLAGS_ENABLE
, base
+ NvRegWakeUpFlags
);
5444 i
= readl(base
+ NvRegPowerState
);
5445 if ((i
& NVREG_POWERSTATE_POWEREDUP
) == 0)
5446 writel(NVREG_POWERSTATE_POWEREDUP
|i
, base
+ NvRegPowerState
);
5450 writel(readl(base
+ NvRegPowerState
) | NVREG_POWERSTATE_VALID
, base
+ NvRegPowerState
);
5452 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5454 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5455 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5458 if (nv_request_irq(dev
, 0))
5461 /* ask for interrupts */
5462 nv_enable_hw_interrupts(dev
, np
->irqmask
);
5464 spin_lock_irq(&np
->lock
);
5465 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
5466 writel(0, base
+ NvRegMulticastAddrB
);
5467 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
5468 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
5469 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5470 /* One manual link speed update: Interrupts are enabled, future link
5471 * speed changes cause interrupts and are handled by nv_link_irq().
5475 miistat
= readl(base
+ NvRegMIIStatus
);
5476 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5478 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5481 ret
= nv_update_linkspeed(dev
);
5483 netif_start_queue(dev
);
5484 nv_napi_enable(dev
);
5487 netif_carrier_on(dev
);
5489 netdev_info(dev
, "no link during initialization\n");
5490 netif_carrier_off(dev
);
5493 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
5495 /* start statistics timer */
5496 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_STATISTICS_V3
))
5497 mod_timer(&np
->stats_poll
,
5498 round_jiffies(jiffies
+ STATS_INTERVAL
));
5500 spin_unlock_irq(&np
->lock
);
5502 /* If the loopback feature was set while the device was down, make sure
5503 * that it's set correctly now.
5505 if (dev
->features
& NETIF_F_LOOPBACK
)
5506 nv_set_loopback(dev
, dev
->features
);
5514 static int nv_close(struct net_device
*dev
)
5516 struct fe_priv
*np
= netdev_priv(dev
);
5519 spin_lock_irq(&np
->lock
);
5520 np
->in_shutdown
= 1;
5521 spin_unlock_irq(&np
->lock
);
5522 nv_napi_disable(dev
);
5523 synchronize_irq(np
->pci_dev
->irq
);
5525 del_timer_sync(&np
->oom_kick
);
5526 del_timer_sync(&np
->nic_poll
);
5527 del_timer_sync(&np
->stats_poll
);
5529 netif_stop_queue(dev
);
5530 spin_lock_irq(&np
->lock
);
5531 nv_update_pause(dev
, 0); /* otherwise stop_tx bricks NIC */
5535 /* disable interrupts on the nic or we will lock up */
5536 base
= get_hwbase(dev
);
5537 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5540 spin_unlock_irq(&np
->lock
);
5546 if (np
->wolenabled
|| !phy_power_down
) {
5547 nv_txrx_gate(dev
, false);
5548 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5551 /* power down phy */
5552 mii_rw(dev
, np
->phyaddr
, MII_BMCR
,
5553 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
)|BMCR_PDOWN
);
5554 nv_txrx_gate(dev
, true);
5557 /* FIXME: power down nic */
5562 static const struct net_device_ops nv_netdev_ops
= {
5563 .ndo_open
= nv_open
,
5564 .ndo_stop
= nv_close
,
5565 .ndo_get_stats64
= nv_get_stats64
,
5566 .ndo_start_xmit
= nv_start_xmit
,
5567 .ndo_tx_timeout
= nv_tx_timeout
,
5568 .ndo_change_mtu
= nv_change_mtu
,
5569 .ndo_fix_features
= nv_fix_features
,
5570 .ndo_set_features
= nv_set_features
,
5571 .ndo_validate_addr
= eth_validate_addr
,
5572 .ndo_set_mac_address
= nv_set_mac_address
,
5573 .ndo_set_rx_mode
= nv_set_multicast
,
5574 #ifdef CONFIG_NET_POLL_CONTROLLER
5575 .ndo_poll_controller
= nv_poll_controller
,
5579 static const struct net_device_ops nv_netdev_ops_optimized
= {
5580 .ndo_open
= nv_open
,
5581 .ndo_stop
= nv_close
,
5582 .ndo_get_stats64
= nv_get_stats64
,
5583 .ndo_start_xmit
= nv_start_xmit_optimized
,
5584 .ndo_tx_timeout
= nv_tx_timeout
,
5585 .ndo_change_mtu
= nv_change_mtu
,
5586 .ndo_fix_features
= nv_fix_features
,
5587 .ndo_set_features
= nv_set_features
,
5588 .ndo_validate_addr
= eth_validate_addr
,
5589 .ndo_set_mac_address
= nv_set_mac_address
,
5590 .ndo_set_rx_mode
= nv_set_multicast
,
5591 #ifdef CONFIG_NET_POLL_CONTROLLER
5592 .ndo_poll_controller
= nv_poll_controller
,
5596 static int nv_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
5598 struct net_device
*dev
;
5603 u32 powerstate
, txreg
;
5604 u32 phystate_orig
= 0, phystate
;
5605 int phyinitialized
= 0;
5606 static int printed_version
;
5608 if (!printed_version
++)
5609 pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5612 dev
= alloc_etherdev(sizeof(struct fe_priv
));
5617 np
= netdev_priv(dev
);
5619 np
->pci_dev
= pci_dev
;
5620 spin_lock_init(&np
->lock
);
5621 spin_lock_init(&np
->hwstats_lock
);
5622 SET_NETDEV_DEV(dev
, &pci_dev
->dev
);
5623 u64_stats_init(&np
->swstats_rx_syncp
);
5624 u64_stats_init(&np
->swstats_tx_syncp
);
5626 init_timer(&np
->oom_kick
);
5627 np
->oom_kick
.data
= (unsigned long) dev
;
5628 np
->oom_kick
.function
= nv_do_rx_refill
; /* timer handler */
5629 init_timer(&np
->nic_poll
);
5630 np
->nic_poll
.data
= (unsigned long) dev
;
5631 np
->nic_poll
.function
= nv_do_nic_poll
; /* timer handler */
5632 init_timer_deferrable(&np
->stats_poll
);
5633 np
->stats_poll
.data
= (unsigned long) dev
;
5634 np
->stats_poll
.function
= nv_do_stats_poll
; /* timer handler */
5636 err
= pci_enable_device(pci_dev
);
5640 pci_set_master(pci_dev
);
5642 err
= pci_request_regions(pci_dev
, DRV_NAME
);
5646 if (id
->driver_data
& (DEV_HAS_VLAN
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V2
|DEV_HAS_STATISTICS_V3
))
5647 np
->register_size
= NV_PCI_REGSZ_VER3
;
5648 else if (id
->driver_data
& DEV_HAS_STATISTICS_V1
)
5649 np
->register_size
= NV_PCI_REGSZ_VER2
;
5651 np
->register_size
= NV_PCI_REGSZ_VER1
;
5655 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
5656 if (pci_resource_flags(pci_dev
, i
) & IORESOURCE_MEM
&&
5657 pci_resource_len(pci_dev
, i
) >= np
->register_size
) {
5658 addr
= pci_resource_start(pci_dev
, i
);
5662 if (i
== DEVICE_COUNT_RESOURCE
) {
5663 dev_info(&pci_dev
->dev
, "Couldn't find register window\n");
5667 /* copy of driver data */
5668 np
->driver_data
= id
->driver_data
;
5669 /* copy of device id */
5670 np
->device_id
= id
->device
;
5672 /* handle different descriptor versions */
5673 if (id
->driver_data
& DEV_HAS_HIGH_DMA
) {
5674 /* packet format 3: supports 40-bit addressing */
5675 np
->desc_ver
= DESC_VER_3
;
5676 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_3
;
5678 if (pci_set_dma_mask(pci_dev
, DMA_BIT_MASK(39)))
5679 dev_info(&pci_dev
->dev
,
5680 "64-bit DMA failed, using 32-bit addressing\n");
5682 dev
->features
|= NETIF_F_HIGHDMA
;
5683 if (pci_set_consistent_dma_mask(pci_dev
, DMA_BIT_MASK(39))) {
5684 dev_info(&pci_dev
->dev
,
5685 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5688 } else if (id
->driver_data
& DEV_HAS_LARGEDESC
) {
5689 /* packet format 2: supports jumbo frames */
5690 np
->desc_ver
= DESC_VER_2
;
5691 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_2
;
5693 /* original packet format */
5694 np
->desc_ver
= DESC_VER_1
;
5695 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_1
;
5698 np
->pkt_limit
= NV_PKTLIMIT_1
;
5699 if (id
->driver_data
& DEV_HAS_LARGEDESC
)
5700 np
->pkt_limit
= NV_PKTLIMIT_2
;
5702 if (id
->driver_data
& DEV_HAS_CHECKSUM
) {
5703 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
5704 dev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
|
5705 NETIF_F_TSO
| NETIF_F_RXCSUM
;
5708 np
->vlanctl_bits
= 0;
5709 if (id
->driver_data
& DEV_HAS_VLAN
) {
5710 np
->vlanctl_bits
= NVREG_VLANCONTROL_ENABLE
;
5711 dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
|
5712 NETIF_F_HW_VLAN_CTAG_TX
;
5715 dev
->features
|= dev
->hw_features
;
5717 /* Add loopback capability to the device. */
5718 dev
->hw_features
|= NETIF_F_LOOPBACK
;
5720 np
->pause_flags
= NV_PAUSEFRAME_RX_CAPABLE
| NV_PAUSEFRAME_RX_REQ
| NV_PAUSEFRAME_AUTONEG
;
5721 if ((id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V1
) ||
5722 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
) ||
5723 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
)) {
5724 np
->pause_flags
|= NV_PAUSEFRAME_TX_CAPABLE
| NV_PAUSEFRAME_TX_REQ
;
5728 np
->base
= ioremap(addr
, np
->register_size
);
5732 np
->rx_ring_size
= RX_RING_DEFAULT
;
5733 np
->tx_ring_size
= TX_RING_DEFAULT
;
5735 if (!nv_optimized(np
)) {
5736 np
->rx_ring
.orig
= pci_alloc_consistent(pci_dev
,
5737 sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5739 if (!np
->rx_ring
.orig
)
5741 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
5743 np
->rx_ring
.ex
= pci_alloc_consistent(pci_dev
,
5744 sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5746 if (!np
->rx_ring
.ex
)
5748 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
5750 np
->rx_skb
= kcalloc(np
->rx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5751 np
->tx_skb
= kcalloc(np
->tx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5752 if (!np
->rx_skb
|| !np
->tx_skb
)
5755 if (!nv_optimized(np
))
5756 dev
->netdev_ops
= &nv_netdev_ops
;
5758 dev
->netdev_ops
= &nv_netdev_ops_optimized
;
5760 netif_napi_add(dev
, &np
->napi
, nv_napi_poll
, RX_WORK_PER_LOOP
);
5761 SET_ETHTOOL_OPS(dev
, &ops
);
5762 dev
->watchdog_timeo
= NV_WATCHDOG_TIMEO
;
5764 pci_set_drvdata(pci_dev
, dev
);
5766 /* read the mac address */
5767 base
= get_hwbase(dev
);
5768 np
->orig_mac
[0] = readl(base
+ NvRegMacAddrA
);
5769 np
->orig_mac
[1] = readl(base
+ NvRegMacAddrB
);
5771 /* check the workaround bit for correct mac address order */
5772 txreg
= readl(base
+ NvRegTransmitPoll
);
5773 if (id
->driver_data
& DEV_HAS_CORRECT_MACADDR
) {
5774 /* mac address is already in correct order */
5775 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5776 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5777 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5778 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5779 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5780 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5781 } else if (txreg
& NVREG_TRANSMITPOLL_MAC_ADDR_REV
) {
5782 /* mac address is already in correct order */
5783 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5784 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5785 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5786 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5787 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5788 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5790 * Set orig mac address back to the reversed version.
5791 * This flag will be cleared during low power transition.
5792 * Therefore, we should always put back the reversed address.
5794 np
->orig_mac
[0] = (dev
->dev_addr
[5] << 0) + (dev
->dev_addr
[4] << 8) +
5795 (dev
->dev_addr
[3] << 16) + (dev
->dev_addr
[2] << 24);
5796 np
->orig_mac
[1] = (dev
->dev_addr
[1] << 0) + (dev
->dev_addr
[0] << 8);
5798 /* need to reverse mac address to correct order */
5799 dev
->dev_addr
[0] = (np
->orig_mac
[1] >> 8) & 0xff;
5800 dev
->dev_addr
[1] = (np
->orig_mac
[1] >> 0) & 0xff;
5801 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 24) & 0xff;
5802 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 16) & 0xff;
5803 dev
->dev_addr
[4] = (np
->orig_mac
[0] >> 8) & 0xff;
5804 dev
->dev_addr
[5] = (np
->orig_mac
[0] >> 0) & 0xff;
5805 writel(txreg
|NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5806 dev_dbg(&pci_dev
->dev
,
5807 "%s: set workaround bit for reversed mac addr\n",
5811 if (!is_valid_ether_addr(dev
->dev_addr
)) {
5813 * Bad mac address. At least one bios sets the mac address
5814 * to 01:23:45:67:89:ab
5816 dev_err(&pci_dev
->dev
,
5817 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5819 eth_hw_addr_random(dev
);
5820 dev_err(&pci_dev
->dev
,
5821 "Using random MAC address: %pM\n", dev
->dev_addr
);
5824 /* set mac address */
5825 nv_copy_mac_to_hw(dev
);
5828 writel(0, base
+ NvRegWakeUpFlags
);
5830 device_set_wakeup_enable(&pci_dev
->dev
, false);
5832 if (id
->driver_data
& DEV_HAS_POWER_CNTRL
) {
5834 /* take phy and nic out of low power mode */
5835 powerstate
= readl(base
+ NvRegPowerState2
);
5836 powerstate
&= ~NVREG_POWERSTATE2_POWERUP_MASK
;
5837 if ((id
->driver_data
& DEV_NEED_LOW_POWER_FIX
) &&
5838 pci_dev
->revision
>= 0xA3)
5839 powerstate
|= NVREG_POWERSTATE2_POWERUP_REV_A3
;
5840 writel(powerstate
, base
+ NvRegPowerState2
);
5843 if (np
->desc_ver
== DESC_VER_1
)
5844 np
->tx_flags
= NV_TX_VALID
;
5846 np
->tx_flags
= NV_TX2_VALID
;
5849 if ((id
->driver_data
& DEV_HAS_MSI
) && msi
)
5850 np
->msi_flags
|= NV_MSI_CAPABLE
;
5852 if ((id
->driver_data
& DEV_HAS_MSI_X
) && msix
) {
5853 /* msix has had reported issues when modifying irqmask
5854 as in the case of napi, therefore, disable for now
5857 np
->msi_flags
|= NV_MSI_X_CAPABLE
;
5861 if (optimization_mode
== NV_OPTIMIZATION_MODE_CPU
) {
5862 np
->irqmask
= NVREG_IRQMASK_CPU
;
5863 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5864 np
->msi_flags
|= 0x0001;
5865 } else if (optimization_mode
== NV_OPTIMIZATION_MODE_DYNAMIC
&&
5866 !(id
->driver_data
& DEV_NEED_TIMERIRQ
)) {
5867 /* start off in throughput mode */
5868 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
5869 /* remove support for msix mode */
5870 np
->msi_flags
&= ~NV_MSI_X_CAPABLE
;
5872 optimization_mode
= NV_OPTIMIZATION_MODE_THROUGHPUT
;
5873 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
5874 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5875 np
->msi_flags
|= 0x0003;
5878 if (id
->driver_data
& DEV_NEED_TIMERIRQ
)
5879 np
->irqmask
|= NVREG_IRQ_TIMER
;
5880 if (id
->driver_data
& DEV_NEED_LINKTIMER
) {
5881 np
->need_linktimer
= 1;
5882 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
5884 np
->need_linktimer
= 0;
5887 /* Limit the number of tx's outstanding for hw bug */
5888 if (id
->driver_data
& DEV_NEED_TX_LIMIT
) {
5890 if (((id
->driver_data
& DEV_NEED_TX_LIMIT2
) == DEV_NEED_TX_LIMIT2
) &&
5891 pci_dev
->revision
>= 0xA2)
5895 /* clear phy state and temporarily halt phy interrupts */
5896 writel(0, base
+ NvRegMIIMask
);
5897 phystate
= readl(base
+ NvRegAdapterControl
);
5898 if (phystate
& NVREG_ADAPTCTL_RUNNING
) {
5900 phystate
&= ~NVREG_ADAPTCTL_RUNNING
;
5901 writel(phystate
, base
+ NvRegAdapterControl
);
5903 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5905 if (id
->driver_data
& DEV_HAS_MGMT_UNIT
) {
5906 /* management unit running on the mac? */
5907 if ((readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_ST
) &&
5908 (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_PHY_INIT
) &&
5909 nv_mgmt_acquire_sema(dev
) &&
5910 nv_mgmt_get_version(dev
)) {
5912 if (np
->mgmt_version
> 0)
5913 np
->mac_in_use
= readl(base
+ NvRegMgmtUnitControl
) & NVREG_MGMTUNITCONTROL_INUSE
;
5914 /* management unit setup the phy already? */
5915 if (np
->mac_in_use
&&
5916 ((readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_MASK
) ==
5917 NVREG_XMITCTL_SYNC_PHY_INIT
)) {
5918 /* phy is inited by mgmt unit */
5921 /* we need to init the phy */
5926 /* find a suitable phy */
5927 for (i
= 1; i
<= 32; i
++) {
5929 int phyaddr
= i
& 0x1F;
5931 spin_lock_irq(&np
->lock
);
5932 id1
= mii_rw(dev
, phyaddr
, MII_PHYSID1
, MII_READ
);
5933 spin_unlock_irq(&np
->lock
);
5934 if (id1
< 0 || id1
== 0xffff)
5936 spin_lock_irq(&np
->lock
);
5937 id2
= mii_rw(dev
, phyaddr
, MII_PHYSID2
, MII_READ
);
5938 spin_unlock_irq(&np
->lock
);
5939 if (id2
< 0 || id2
== 0xffff)
5942 np
->phy_model
= id2
& PHYID2_MODEL_MASK
;
5943 id1
= (id1
& PHYID1_OUI_MASK
) << PHYID1_OUI_SHFT
;
5944 id2
= (id2
& PHYID2_OUI_MASK
) >> PHYID2_OUI_SHFT
;
5945 np
->phyaddr
= phyaddr
;
5946 np
->phy_oui
= id1
| id2
;
5948 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5949 if (np
->phy_oui
== PHY_OUI_REALTEK2
)
5950 np
->phy_oui
= PHY_OUI_REALTEK
;
5951 /* Setup phy revision for Realtek */
5952 if (np
->phy_oui
== PHY_OUI_REALTEK
&& np
->phy_model
== PHY_MODEL_REALTEK_8211
)
5953 np
->phy_rev
= mii_rw(dev
, phyaddr
, MII_RESV1
, MII_READ
) & PHY_REV_MASK
;
5958 dev_info(&pci_dev
->dev
, "open: Could not find a valid PHY\n");
5962 if (!phyinitialized
) {
5966 /* see if it is a gigabit phy */
5967 u32 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
5968 if (mii_status
& PHY_GIGABIT
)
5969 np
->gigabit
= PHY_GIGABIT
;
5972 /* set default link speed settings */
5973 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
5977 err
= register_netdev(dev
);
5979 dev_info(&pci_dev
->dev
, "unable to register netdev: %d\n", err
);
5983 netif_carrier_off(dev
);
5985 /* Some NICs freeze when TX pause is enabled while NIC is
5986 * down, and this stays across warm reboots. The sequence
5987 * below should be enough to recover from that state.
5989 nv_update_pause(dev
, 0);
5993 if (id
->driver_data
& DEV_HAS_VLAN
)
5994 nv_vlan_mode(dev
, dev
->features
);
5996 dev_info(&pci_dev
->dev
, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5997 dev
->name
, np
->phy_oui
, np
->phyaddr
, dev
->dev_addr
);
5999 dev_info(&pci_dev
->dev
, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
6000 dev
->features
& NETIF_F_HIGHDMA
? "highdma " : "",
6001 dev
->features
& (NETIF_F_IP_CSUM
| NETIF_F_SG
) ?
6003 dev
->features
& (NETIF_F_HW_VLAN_CTAG_RX
|
6004 NETIF_F_HW_VLAN_CTAG_TX
) ?
6006 dev
->features
& (NETIF_F_LOOPBACK
) ?
6008 id
->driver_data
& DEV_HAS_POWER_CNTRL
? "pwrctl " : "",
6009 id
->driver_data
& DEV_HAS_MGMT_UNIT
? "mgmt " : "",
6010 id
->driver_data
& DEV_NEED_TIMERIRQ
? "timirq " : "",
6011 np
->gigabit
== PHY_GIGABIT
? "gbit " : "",
6012 np
->need_linktimer
? "lnktim " : "",
6013 np
->msi_flags
& NV_MSI_CAPABLE
? "msi " : "",
6014 np
->msi_flags
& NV_MSI_X_CAPABLE
? "msi-x " : "",
6021 writel(phystate
|NVREG_ADAPTCTL_RUNNING
, base
+ NvRegAdapterControl
);
6025 iounmap(get_hwbase(dev
));
6027 pci_release_regions(pci_dev
);
6029 pci_disable_device(pci_dev
);
6036 static void nv_restore_phy(struct net_device
*dev
)
6038 struct fe_priv
*np
= netdev_priv(dev
);
6039 u16 phy_reserved
, mii_control
;
6041 if (np
->phy_oui
== PHY_OUI_REALTEK
&&
6042 np
->phy_model
== PHY_MODEL_REALTEK_8201
&&
6043 phy_cross
== NV_CROSSOVER_DETECTION_DISABLED
) {
6044 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
);
6045 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, MII_READ
);
6046 phy_reserved
&= ~PHY_REALTEK_INIT_MSK1
;
6047 phy_reserved
|= PHY_REALTEK_INIT8
;
6048 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, phy_reserved
);
6049 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
);
6051 /* restart auto negotiation */
6052 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
6053 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
6054 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
);
6058 static void nv_restore_mac_addr(struct pci_dev
*pci_dev
)
6060 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
6061 struct fe_priv
*np
= netdev_priv(dev
);
6062 u8 __iomem
*base
= get_hwbase(dev
);
6064 /* special op: write back the misordered MAC address - otherwise
6065 * the next nv_probe would see a wrong address.
6067 writel(np
->orig_mac
[0], base
+ NvRegMacAddrA
);
6068 writel(np
->orig_mac
[1], base
+ NvRegMacAddrB
);
6069 writel(readl(base
+ NvRegTransmitPoll
) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
6070 base
+ NvRegTransmitPoll
);
6073 static void nv_remove(struct pci_dev
*pci_dev
)
6075 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
6077 unregister_netdev(dev
);
6079 nv_restore_mac_addr(pci_dev
);
6081 /* restore any phy related changes */
6082 nv_restore_phy(dev
);
6084 nv_mgmt_release_sema(dev
);
6086 /* free all structures */
6088 iounmap(get_hwbase(dev
));
6089 pci_release_regions(pci_dev
);
6090 pci_disable_device(pci_dev
);
6094 #ifdef CONFIG_PM_SLEEP
6095 static int nv_suspend(struct device
*device
)
6097 struct pci_dev
*pdev
= to_pci_dev(device
);
6098 struct net_device
*dev
= pci_get_drvdata(pdev
);
6099 struct fe_priv
*np
= netdev_priv(dev
);
6100 u8 __iomem
*base
= get_hwbase(dev
);
6103 if (netif_running(dev
)) {
6107 netif_device_detach(dev
);
6109 /* save non-pci configuration space */
6110 for (i
= 0; i
<= np
->register_size
/sizeof(u32
); i
++)
6111 np
->saved_config_space
[i
] = readl(base
+ i
*sizeof(u32
));
6116 static int nv_resume(struct device
*device
)
6118 struct pci_dev
*pdev
= to_pci_dev(device
);
6119 struct net_device
*dev
= pci_get_drvdata(pdev
);
6120 struct fe_priv
*np
= netdev_priv(dev
);
6121 u8 __iomem
*base
= get_hwbase(dev
);
6124 /* restore non-pci configuration space */
6125 for (i
= 0; i
<= np
->register_size
/sizeof(u32
); i
++)
6126 writel(np
->saved_config_space
[i
], base
+i
*sizeof(u32
));
6128 if (np
->driver_data
& DEV_NEED_MSI_FIX
)
6129 pci_write_config_dword(pdev
, NV_MSI_PRIV_OFFSET
, NV_MSI_PRIV_VALUE
);
6131 /* restore phy state, including autoneg */
6134 netif_device_attach(dev
);
6135 if (netif_running(dev
)) {
6137 nv_set_multicast(dev
);
6142 static SIMPLE_DEV_PM_OPS(nv_pm_ops
, nv_suspend
, nv_resume
);
6143 #define NV_PM_OPS (&nv_pm_ops)
6146 #define NV_PM_OPS NULL
6147 #endif /* CONFIG_PM_SLEEP */
6150 static void nv_shutdown(struct pci_dev
*pdev
)
6152 struct net_device
*dev
= pci_get_drvdata(pdev
);
6153 struct fe_priv
*np
= netdev_priv(dev
);
6155 if (netif_running(dev
))
6159 * Restore the MAC so a kernel started by kexec won't get confused.
6160 * If we really go for poweroff, we must not restore the MAC,
6161 * otherwise the MAC for WOL will be reversed at least on some boards.
6163 if (system_state
!= SYSTEM_POWER_OFF
)
6164 nv_restore_mac_addr(pdev
);
6166 pci_disable_device(pdev
);
6168 * Apparently it is not possible to reinitialise from D3 hot,
6169 * only put the device into D3 if we really go for poweroff.
6171 if (system_state
== SYSTEM_POWER_OFF
) {
6172 pci_wake_from_d3(pdev
, np
->wolenabled
);
6173 pci_set_power_state(pdev
, PCI_D3hot
);
6177 #define nv_shutdown NULL
6178 #endif /* CONFIG_PM */
6180 static DEFINE_PCI_DEVICE_TABLE(pci_tbl
) = {
6181 { /* nForce Ethernet Controller */
6182 PCI_DEVICE(0x10DE, 0x01C3),
6183 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
6185 { /* nForce2 Ethernet Controller */
6186 PCI_DEVICE(0x10DE, 0x0066),
6187 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
6189 { /* nForce3 Ethernet Controller */
6190 PCI_DEVICE(0x10DE, 0x00D6),
6191 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
6193 { /* nForce3 Ethernet Controller */
6194 PCI_DEVICE(0x10DE, 0x0086),
6195 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6197 { /* nForce3 Ethernet Controller */
6198 PCI_DEVICE(0x10DE, 0x008C),
6199 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6201 { /* nForce3 Ethernet Controller */
6202 PCI_DEVICE(0x10DE, 0x00E6),
6203 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6205 { /* nForce3 Ethernet Controller */
6206 PCI_DEVICE(0x10DE, 0x00DF),
6207 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6209 { /* CK804 Ethernet Controller */
6210 PCI_DEVICE(0x10DE, 0x0056),
6211 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6213 { /* CK804 Ethernet Controller */
6214 PCI_DEVICE(0x10DE, 0x0057),
6215 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6217 { /* MCP04 Ethernet Controller */
6218 PCI_DEVICE(0x10DE, 0x0037),
6219 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6221 { /* MCP04 Ethernet Controller */
6222 PCI_DEVICE(0x10DE, 0x0038),
6223 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6225 { /* MCP51 Ethernet Controller */
6226 PCI_DEVICE(0x10DE, 0x0268),
6227 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
|DEV_NEED_LOW_POWER_FIX
,
6229 { /* MCP51 Ethernet Controller */
6230 PCI_DEVICE(0x10DE, 0x0269),
6231 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
|DEV_NEED_LOW_POWER_FIX
,
6233 { /* MCP55 Ethernet Controller */
6234 PCI_DEVICE(0x10DE, 0x0372),
6235 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
|DEV_NEED_MSI_FIX
,
6237 { /* MCP55 Ethernet Controller */
6238 PCI_DEVICE(0x10DE, 0x0373),
6239 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
|DEV_NEED_MSI_FIX
,
6241 { /* MCP61 Ethernet Controller */
6242 PCI_DEVICE(0x10DE, 0x03E5),
6243 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_MSI_FIX
,
6245 { /* MCP61 Ethernet Controller */
6246 PCI_DEVICE(0x10DE, 0x03E6),
6247 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_MSI_FIX
,
6249 { /* MCP61 Ethernet Controller */
6250 PCI_DEVICE(0x10DE, 0x03EE),
6251 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_MSI_FIX
,
6253 { /* MCP61 Ethernet Controller */
6254 PCI_DEVICE(0x10DE, 0x03EF),
6255 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_MSI_FIX
,
6257 { /* MCP65 Ethernet Controller */
6258 PCI_DEVICE(0x10DE, 0x0450),
6259 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6261 { /* MCP65 Ethernet Controller */
6262 PCI_DEVICE(0x10DE, 0x0451),
6263 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6265 { /* MCP65 Ethernet Controller */
6266 PCI_DEVICE(0x10DE, 0x0452),
6267 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6269 { /* MCP65 Ethernet Controller */
6270 PCI_DEVICE(0x10DE, 0x0453),
6271 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6273 { /* MCP67 Ethernet Controller */
6274 PCI_DEVICE(0x10DE, 0x054C),
6275 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6277 { /* MCP67 Ethernet Controller */
6278 PCI_DEVICE(0x10DE, 0x054D),
6279 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6281 { /* MCP67 Ethernet Controller */
6282 PCI_DEVICE(0x10DE, 0x054E),
6283 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6285 { /* MCP67 Ethernet Controller */
6286 PCI_DEVICE(0x10DE, 0x054F),
6287 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6289 { /* MCP73 Ethernet Controller */
6290 PCI_DEVICE(0x10DE, 0x07DC),
6291 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6293 { /* MCP73 Ethernet Controller */
6294 PCI_DEVICE(0x10DE, 0x07DD),
6295 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6297 { /* MCP73 Ethernet Controller */
6298 PCI_DEVICE(0x10DE, 0x07DE),
6299 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6301 { /* MCP73 Ethernet Controller */
6302 PCI_DEVICE(0x10DE, 0x07DF),
6303 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V12
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
|DEV_NEED_MSI_FIX
,
6305 { /* MCP77 Ethernet Controller */
6306 PCI_DEVICE(0x10DE, 0x0760),
6307 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V123
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT2
|DEV_HAS_GEAR_MODE
|DEV_NEED_PHY_INIT_FIX
|DEV_NEED_MSI_FIX
,
6309 { /* MCP77 Ethernet Controller */
6310 PCI_DEVICE(0x10DE, 0x0761),
6311 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V123
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT2
|DEV_HAS_GEAR_MODE
|DEV_NEED_PHY_INIT_FIX
|DEV_NEED_MSI_FIX
,
6313 { /* MCP77 Ethernet Controller */
6314 PCI_DEVICE(0x10DE, 0x0762),
6315 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V123
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT2
|DEV_HAS_GEAR_MODE
|DEV_NEED_PHY_INIT_FIX
|DEV_NEED_MSI_FIX
,
6317 { /* MCP77 Ethernet Controller */
6318 PCI_DEVICE(0x10DE, 0x0763),
6319 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V123
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT2
|DEV_HAS_GEAR_MODE
|DEV_NEED_PHY_INIT_FIX
|DEV_NEED_MSI_FIX
,
6321 { /* MCP79 Ethernet Controller */
6322 PCI_DEVICE(0x10DE, 0x0AB0),
6323 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V123
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT2
|DEV_HAS_GEAR_MODE
|DEV_NEED_PHY_INIT_FIX
|DEV_NEED_MSI_FIX
,
6325 { /* MCP79 Ethernet Controller */
6326 PCI_DEVICE(0x10DE, 0x0AB1),
6327 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V123
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT2
|DEV_HAS_GEAR_MODE
|DEV_NEED_PHY_INIT_FIX
|DEV_NEED_MSI_FIX
,
6329 { /* MCP79 Ethernet Controller */
6330 PCI_DEVICE(0x10DE, 0x0AB2),
6331 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V123
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT2
|DEV_HAS_GEAR_MODE
|DEV_NEED_PHY_INIT_FIX
|DEV_NEED_MSI_FIX
,
6333 { /* MCP79 Ethernet Controller */
6334 PCI_DEVICE(0x10DE, 0x0AB3),
6335 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V123
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT2
|DEV_HAS_GEAR_MODE
|DEV_NEED_PHY_INIT_FIX
|DEV_NEED_MSI_FIX
,
6337 { /* MCP89 Ethernet Controller */
6338 PCI_DEVICE(0x10DE, 0x0D7D),
6339 .driver_data
= DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V123
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
|DEV_NEED_PHY_INIT_FIX
,
6344 static struct pci_driver forcedeth_pci_driver
= {
6346 .id_table
= pci_tbl
,
6348 .remove
= nv_remove
,
6349 .shutdown
= nv_shutdown
,
6350 .driver
.pm
= NV_PM_OPS
,
6353 module_param(max_interrupt_work
, int, 0);
6354 MODULE_PARM_DESC(max_interrupt_work
, "forcedeth maximum events handled per interrupt");
6355 module_param(optimization_mode
, int, 0);
6356 MODULE_PARM_DESC(optimization_mode
, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6357 module_param(poll_interval
, int, 0);
6358 MODULE_PARM_DESC(poll_interval
, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6359 module_param(msi
, int, 0);
6360 MODULE_PARM_DESC(msi
, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6361 module_param(msix
, int, 0);
6362 MODULE_PARM_DESC(msix
, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6363 module_param(dma_64bit
, int, 0);
6364 MODULE_PARM_DESC(dma_64bit
, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6365 module_param(phy_cross
, int, 0);
6366 MODULE_PARM_DESC(phy_cross
, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6367 module_param(phy_power_down
, int, 0);
6368 MODULE_PARM_DESC(phy_power_down
, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6369 module_param(debug_tx_timeout
, bool, 0);
6370 MODULE_PARM_DESC(debug_tx_timeout
,
6371 "Dump tx related registers and ring when tx_timeout happens");
6373 module_pci_driver(forcedeth_pci_driver
);
6374 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6375 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6376 MODULE_LICENSE("GPL");
6377 MODULE_DEVICE_TABLE(pci
, pci_tbl
);