tg3: Check all adv bits when checking config
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
b86fb2cf 7 * Copyright (C) 2005-2011 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
6867c843 21#include <linux/stringify.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4 28#include <linux/init.h>
a6b7a407 29#include <linux/interrupt.h>
1da177e4
LT
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
3110f5f5 36#include <linux/mdio.h>
1da177e4 37#include <linux/mii.h>
158d7abd 38#include <linux/phy.h>
a9daf367 39#include <linux/brcmphy.h>
1da177e4
LT
40#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
61487480 44#include <linux/prefetch.h>
f9a5f7d3 45#include <linux/dma-mapping.h>
077f849d 46#include <linux/firmware.h>
1da177e4
LT
47
48#include <net/checksum.h>
c9bdd4b5 49#include <net/ip.h>
1da177e4
LT
50
51#include <asm/system.h>
27fd9de8 52#include <linux/io.h>
1da177e4 53#include <asm/byteorder.h>
27fd9de8 54#include <linux/uaccess.h>
1da177e4 55
49b6e95f 56#ifdef CONFIG_SPARC
1da177e4 57#include <asm/idprom.h>
49b6e95f 58#include <asm/prom.h>
1da177e4
LT
59#endif
60
63532394
MC
61#define BAR_0 0
62#define BAR_2 2
63
1da177e4
LT
64#include "tg3.h"
65
63c3a66f
JP
66/* Functions & macros to verify TG3_FLAGS types */
67
68static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69{
70 return test_bit(flag, bits);
71}
72
73static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74{
75 set_bit(flag, bits);
76}
77
78static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79{
80 clear_bit(flag, bits);
81}
82
83#define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85#define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87#define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
1da177e4 90#define DRV_MODULE_NAME "tg3"
6867c843 91#define TG3_MAJ_NUM 3
eaa36660 92#define TG3_MIN_NUM 120
6867c843
MC
93#define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
eaa36660 95#define DRV_MODULE_RELDATE "August 18, 2011"
1da177e4 96
1da177e4
LT
97#define TG3_DEF_RX_MODE 0
98#define TG3_DEF_TX_MODE 0
99#define TG3_DEF_MSG_ENABLE \
100 (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK | \
103 NETIF_MSG_TIMER | \
104 NETIF_MSG_IFDOWN | \
105 NETIF_MSG_IFUP | \
106 NETIF_MSG_RX_ERR | \
107 NETIF_MSG_TX_ERR)
108
520b2756
MC
109#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
110
1da177e4
LT
111/* length of time before we decide the hardware is borked,
112 * and dev->tx_timeout() should be called to fix the problem
113 */
63c3a66f 114
1da177e4
LT
115#define TG3_TX_TIMEOUT (5 * HZ)
116
117/* hardware minimum and maximum for a single frame's data payload */
118#define TG3_MIN_MTU 60
119#define TG3_MAX_MTU(tp) \
63c3a66f 120 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
121
122/* These numbers seem to be hard coded in the NIC firmware somehow.
123 * You can't change the ring sizes, but you can change where you place
124 * them in the NIC onboard memory.
125 */
7cb32cf2 126#define TG3_RX_STD_RING_SIZE(tp) \
63c3a66f 127 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 128 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
1da177e4 129#define TG3_DEF_RX_RING_PENDING 200
7cb32cf2 130#define TG3_RX_JMB_RING_SIZE(tp) \
63c3a66f 131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 132 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
1da177e4 133#define TG3_DEF_RX_JUMBO_RING_PENDING 100
c6cdf436 134#define TG3_RSS_INDIR_TBL_SIZE 128
1da177e4
LT
135
136/* Do not place this n-ring entries value into the tp struct itself,
137 * we really want to expose these constants to GCC so that modulo et
138 * al. operations are done with shifts and masks instead of with
139 * hw multiply/modulo instructions. Another solution would be to
140 * replace things like '% foo' with '& (foo - 1)'.
141 */
1da177e4
LT
142
143#define TG3_TX_RING_SIZE 512
144#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
145
2c49a44d
MC
146#define TG3_RX_STD_RING_BYTES(tp) \
147 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
148#define TG3_RX_JMB_RING_BYTES(tp) \
149 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
150#define TG3_RX_RCB_RING_BYTES(tp) \
7cb32cf2 151 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
1da177e4
LT
152#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
153 TG3_TX_RING_SIZE)
1da177e4
LT
154#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
155
287be12e
MC
156#define TG3_DMA_BYTE_ENAB 64
157
158#define TG3_RX_STD_DMA_SZ 1536
159#define TG3_RX_JMB_DMA_SZ 9046
160
161#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
162
163#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
164#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
1da177e4 165
2c49a44d
MC
166#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
167 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
2b2cdb65 168
2c49a44d
MC
169#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
2b2cdb65 171
d2757fc4
MC
172/* Due to a hardware bug, the 5701 can only DMA to memory addresses
173 * that are at least dword aligned when used in PCIX mode. The driver
174 * works around this bug by double copying the packet. This workaround
175 * is built into the normal double copy length check for efficiency.
176 *
177 * However, the double copy is only necessary on those architectures
178 * where unaligned memory accesses are inefficient. For those architectures
179 * where unaligned memory accesses incur little penalty, we can reintegrate
180 * the 5701 in the normal rx path. Doing so saves a device structure
181 * dereference by hardcoding the double copy threshold in place.
182 */
183#define TG3_RX_COPY_THRESHOLD 256
184#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
185 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
186#else
187 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
188#endif
189
1da177e4 190/* minimum number of free TX descriptors required to wake up TX process */
f3f3f27e 191#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
e31aa987 192#define TG3_TX_BD_DMA_MAX 4096
1da177e4 193
ad829268
MC
194#define TG3_RAW_IP_ALIGN 2
195
c6cdf436
MC
196#define TG3_FW_UPDATE_TIMEOUT_SEC 5
197
077f849d
JSR
198#define FIRMWARE_TG3 "tigon/tg3.bin"
199#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
201
1da177e4 202static char version[] __devinitdata =
05dbe005 203 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
1da177e4
LT
204
205MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207MODULE_LICENSE("GPL");
208MODULE_VERSION(DRV_MODULE_VERSION);
077f849d
JSR
209MODULE_FIRMWARE(FIRMWARE_TG3);
210MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
1da177e4
LT
213static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214module_param(tg3_debug, int, 0);
215MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
a3aa1884 217static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
13185217
HK
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
13185217 240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
13185217
HK
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
c88e668b
MC
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
2befdcea
MC
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
321d32a0
MC
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
5e7ccf20 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
5001e2f6
MC
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
b0f75221
MC
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
302b500b 289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
ba1f3c76 290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
13185217
HK
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
1dcb14d9 298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
13185217 299 {}
1da177e4
LT
300};
301
302MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
50da859d 304static const struct {
1da177e4 305 const char string[ETH_GSTRING_LEN];
48fa55a0 306} ethtool_stats_keys[] = {
1da177e4
LT
307 { "rx_octets" },
308 { "rx_fragments" },
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
312 { "rx_fcs_errors" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
319 { "rx_jabbers" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
333
334 { "tx_octets" },
335 { "tx_collisions" },
336
337 { "tx_xon_sent" },
338 { "tx_xoff_sent" },
339 { "tx_flow_control" },
340 { "tx_mac_errors" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
343 { "tx_deferred" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
364 { "tx_discards" },
365 { "tx_errors" },
366
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
369 { "rxbds_empty" },
370 { "rx_discards" },
371 { "rx_errors" },
372 { "rx_threshold_hit" },
373
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
377
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
380 { "nic_irqs" },
381 { "nic_avoided_irqs" },
4452d099
MC
382 { "nic_tx_threshold_hit" },
383
384 { "mbuf_lwm_thresh_hit" },
1da177e4
LT
385};
386
48fa55a0
MC
387#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
388
389
50da859d 390static const struct {
4cafd3f5 391 const char string[ETH_GSTRING_LEN];
48fa55a0 392} ethtool_test_keys[] = {
28a45957
MC
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "mac loopback test (offline)" },
398 { "phy loopback test (offline)" },
941ec90f 399 { "ext loopback test (offline)" },
28a45957 400 { "interrupt test (offline)" },
4cafd3f5
MC
401};
402
48fa55a0
MC
403#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
404
405
b401e9e2
MC
406static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
407{
408 writel(val, tp->regs + off);
409}
410
411static u32 tg3_read32(struct tg3 *tp, u32 off)
412{
de6f31eb 413 return readl(tp->regs + off);
b401e9e2
MC
414}
415
0d3031d9
MC
416static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
417{
418 writel(val, tp->aperegs + off);
419}
420
421static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
422{
de6f31eb 423 return readl(tp->aperegs + off);
0d3031d9
MC
424}
425
1da177e4
LT
426static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
427{
6892914f
MC
428 unsigned long flags;
429
430 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
431 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
432 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 433 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
434}
435
436static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
437{
438 writel(val, tp->regs + off);
439 readl(tp->regs + off);
1da177e4
LT
440}
441
6892914f 442static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 443{
6892914f
MC
444 unsigned long flags;
445 u32 val;
446
447 spin_lock_irqsave(&tp->indirect_lock, flags);
448 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
449 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
450 spin_unlock_irqrestore(&tp->indirect_lock, flags);
451 return val;
452}
453
454static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
455{
456 unsigned long flags;
457
458 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
459 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
460 TG3_64BIT_REG_LOW, val);
461 return;
462 }
66711e66 463 if (off == TG3_RX_STD_PROD_IDX_REG) {
6892914f
MC
464 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
465 TG3_64BIT_REG_LOW, val);
466 return;
1da177e4 467 }
6892914f
MC
468
469 spin_lock_irqsave(&tp->indirect_lock, flags);
470 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
471 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
472 spin_unlock_irqrestore(&tp->indirect_lock, flags);
473
474 /* In indirect mode when disabling interrupts, we also need
475 * to clear the interrupt bit in the GRC local ctrl register.
476 */
477 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
478 (val == 0x1)) {
479 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
480 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
481 }
482}
483
484static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
485{
486 unsigned long flags;
487 u32 val;
488
489 spin_lock_irqsave(&tp->indirect_lock, flags);
490 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
491 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
492 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 return val;
494}
495
b401e9e2
MC
496/* usec_wait specifies the wait time in usec when writing to certain registers
497 * where it is unsafe to read back the register without some delay.
498 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
499 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
500 */
501static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 502{
63c3a66f 503 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
b401e9e2
MC
504 /* Non-posted methods */
505 tp->write32(tp, off, val);
506 else {
507 /* Posted method */
508 tg3_write32(tp, off, val);
509 if (usec_wait)
510 udelay(usec_wait);
511 tp->read32(tp, off);
512 }
513 /* Wait again after the read for the posted method to guarantee that
514 * the wait time is met.
515 */
516 if (usec_wait)
517 udelay(usec_wait);
1da177e4
LT
518}
519
09ee929c
MC
520static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
521{
522 tp->write32_mbox(tp, off, val);
63c3a66f 523 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
6892914f 524 tp->read32_mbox(tp, off);
09ee929c
MC
525}
526
20094930 527static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
528{
529 void __iomem *mbox = tp->regs + off;
530 writel(val, mbox);
63c3a66f 531 if (tg3_flag(tp, TXD_MBOX_HWBUG))
1da177e4 532 writel(val, mbox);
63c3a66f 533 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1da177e4
LT
534 readl(mbox);
535}
536
b5d3772c
MC
537static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
538{
de6f31eb 539 return readl(tp->regs + off + GRCMBOX_BASE);
b5d3772c
MC
540}
541
542static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
543{
544 writel(val, tp->regs + off + GRCMBOX_BASE);
545}
546
c6cdf436 547#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 548#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
c6cdf436
MC
549#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
550#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
551#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930 552
c6cdf436
MC
553#define tw32(reg, val) tp->write32(tp, reg, val)
554#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
555#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
556#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
557
558static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
559{
6892914f
MC
560 unsigned long flags;
561
6ff6f81d 562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
563 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
564 return;
565
6892914f 566 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 567 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
568 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
569 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 570
bbadf503
MC
571 /* Always leave this as zero. */
572 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
573 } else {
574 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
575 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 576
bbadf503
MC
577 /* Always leave this as zero. */
578 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
579 }
580 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
581}
582
1da177e4
LT
583static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
584{
6892914f
MC
585 unsigned long flags;
586
6ff6f81d 587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
588 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
589 *val = 0;
590 return;
591 }
592
6892914f 593 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 594 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
595 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
596 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 597
bbadf503
MC
598 /* Always leave this as zero. */
599 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
600 } else {
601 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
602 *val = tr32(TG3PCI_MEM_WIN_DATA);
603
604 /* Always leave this as zero. */
605 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
606 }
6892914f 607 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
608}
609
0d3031d9
MC
610static void tg3_ape_lock_init(struct tg3 *tp)
611{
612 int i;
6f5c8f83 613 u32 regbase, bit;
f92d9dc1
MC
614
615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
616 regbase = TG3_APE_LOCK_GRANT;
617 else
618 regbase = TG3_APE_PER_LOCK_GRANT;
0d3031d9
MC
619
620 /* Make sure the driver hasn't any stale locks. */
6f5c8f83
MC
621 for (i = 0; i < 8; i++) {
622 if (i == TG3_APE_LOCK_GPIO)
623 continue;
f92d9dc1 624 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
6f5c8f83
MC
625 }
626
627 /* Clear the correct bit of the GPIO lock too. */
628 if (!tp->pci_fn)
629 bit = APE_LOCK_GRANT_DRIVER;
630 else
631 bit = 1 << tp->pci_fn;
632
633 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
0d3031d9
MC
634}
635
636static int tg3_ape_lock(struct tg3 *tp, int locknum)
637{
638 int i, off;
639 int ret = 0;
6f5c8f83 640 u32 status, req, gnt, bit;
0d3031d9 641
63c3a66f 642 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
643 return 0;
644
645 switch (locknum) {
6f5c8f83
MC
646 case TG3_APE_LOCK_GPIO:
647 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
648 return 0;
33f401ae
MC
649 case TG3_APE_LOCK_GRC:
650 case TG3_APE_LOCK_MEM:
651 break;
652 default:
653 return -EINVAL;
0d3031d9
MC
654 }
655
f92d9dc1
MC
656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
657 req = TG3_APE_LOCK_REQ;
658 gnt = TG3_APE_LOCK_GRANT;
659 } else {
660 req = TG3_APE_PER_LOCK_REQ;
661 gnt = TG3_APE_PER_LOCK_GRANT;
662 }
663
0d3031d9
MC
664 off = 4 * locknum;
665
6f5c8f83
MC
666 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
667 bit = APE_LOCK_REQ_DRIVER;
668 else
669 bit = 1 << tp->pci_fn;
670
671 tg3_ape_write32(tp, req + off, bit);
0d3031d9
MC
672
673 /* Wait for up to 1 millisecond to acquire lock. */
674 for (i = 0; i < 100; i++) {
f92d9dc1 675 status = tg3_ape_read32(tp, gnt + off);
6f5c8f83 676 if (status == bit)
0d3031d9
MC
677 break;
678 udelay(10);
679 }
680
6f5c8f83 681 if (status != bit) {
0d3031d9 682 /* Revoke the lock request. */
6f5c8f83 683 tg3_ape_write32(tp, gnt + off, bit);
0d3031d9
MC
684 ret = -EBUSY;
685 }
686
687 return ret;
688}
689
690static void tg3_ape_unlock(struct tg3 *tp, int locknum)
691{
6f5c8f83 692 u32 gnt, bit;
0d3031d9 693
63c3a66f 694 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
695 return;
696
697 switch (locknum) {
6f5c8f83
MC
698 case TG3_APE_LOCK_GPIO:
699 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
700 return;
33f401ae
MC
701 case TG3_APE_LOCK_GRC:
702 case TG3_APE_LOCK_MEM:
703 break;
704 default:
705 return;
0d3031d9
MC
706 }
707
f92d9dc1
MC
708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
709 gnt = TG3_APE_LOCK_GRANT;
710 else
711 gnt = TG3_APE_PER_LOCK_GRANT;
712
6f5c8f83
MC
713 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
714 bit = APE_LOCK_GRANT_DRIVER;
715 else
716 bit = 1 << tp->pci_fn;
717
718 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
0d3031d9
MC
719}
720
1da177e4
LT
721static void tg3_disable_ints(struct tg3 *tp)
722{
89aeb3bc
MC
723 int i;
724
1da177e4
LT
725 tw32(TG3PCI_MISC_HOST_CTRL,
726 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc
MC
727 for (i = 0; i < tp->irq_max; i++)
728 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1da177e4
LT
729}
730
1da177e4
LT
731static void tg3_enable_ints(struct tg3 *tp)
732{
89aeb3bc 733 int i;
89aeb3bc 734
bbe832c0
MC
735 tp->irq_sync = 0;
736 wmb();
737
1da177e4
LT
738 tw32(TG3PCI_MISC_HOST_CTRL,
739 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc 740
f89f38b8 741 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
89aeb3bc
MC
742 for (i = 0; i < tp->irq_cnt; i++) {
743 struct tg3_napi *tnapi = &tp->napi[i];
c6cdf436 744
898a56f8 745 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
63c3a66f 746 if (tg3_flag(tp, 1SHOT_MSI))
89aeb3bc 747 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
f19af9c2 748
f89f38b8 749 tp->coal_now |= tnapi->coal_now;
89aeb3bc 750 }
f19af9c2
MC
751
752 /* Force an initial interrupt */
63c3a66f 753 if (!tg3_flag(tp, TAGGED_STATUS) &&
f19af9c2
MC
754 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
755 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
756 else
f89f38b8
MC
757 tw32(HOSTCC_MODE, tp->coal_now);
758
759 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1da177e4
LT
760}
761
17375d25 762static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
04237ddd 763{
17375d25 764 struct tg3 *tp = tnapi->tp;
898a56f8 765 struct tg3_hw_status *sblk = tnapi->hw_status;
04237ddd
MC
766 unsigned int work_exists = 0;
767
768 /* check for phy events */
63c3a66f 769 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
04237ddd
MC
770 if (sblk->status & SD_STATUS_LINK_CHG)
771 work_exists = 1;
772 }
773 /* check for RX/TX work to do */
f3f3f27e 774 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
8d9d7cfc 775 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
04237ddd
MC
776 work_exists = 1;
777
778 return work_exists;
779}
780
17375d25 781/* tg3_int_reenable
04237ddd
MC
782 * similar to tg3_enable_ints, but it accurately determines whether there
783 * is new work pending and can return without flushing the PIO write
6aa20a22 784 * which reenables interrupts
1da177e4 785 */
17375d25 786static void tg3_int_reenable(struct tg3_napi *tnapi)
1da177e4 787{
17375d25
MC
788 struct tg3 *tp = tnapi->tp;
789
898a56f8 790 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1da177e4
LT
791 mmiowb();
792
fac9b83e
DM
793 /* When doing tagged status, this work check is unnecessary.
794 * The last_tag we write above tells the chip which piece of
795 * work we've completed.
796 */
63c3a66f 797 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
04237ddd 798 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 799 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1da177e4
LT
800}
801
1da177e4
LT
802static void tg3_switch_clocks(struct tg3 *tp)
803{
f6eb9b1f 804 u32 clock_ctrl;
1da177e4
LT
805 u32 orig_clock_ctrl;
806
63c3a66f 807 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
4cf78e4f
MC
808 return;
809
f6eb9b1f
MC
810 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
811
1da177e4
LT
812 orig_clock_ctrl = clock_ctrl;
813 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
814 CLOCK_CTRL_CLKRUN_OENABLE |
815 0x1f);
816 tp->pci_clock_ctrl = clock_ctrl;
817
63c3a66f 818 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4 819 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
820 tw32_wait_f(TG3PCI_CLOCK_CTRL,
821 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
822 }
823 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
824 tw32_wait_f(TG3PCI_CLOCK_CTRL,
825 clock_ctrl |
826 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
827 40);
828 tw32_wait_f(TG3PCI_CLOCK_CTRL,
829 clock_ctrl | (CLOCK_CTRL_ALTCLK),
830 40);
1da177e4 831 }
b401e9e2 832 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
833}
834
835#define PHY_BUSY_LOOPS 5000
836
837static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
838{
839 u32 frame_val;
840 unsigned int loops;
841 int ret;
842
843 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
844 tw32_f(MAC_MI_MODE,
845 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
846 udelay(80);
847 }
848
849 *val = 0x0;
850
882e9793 851 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
852 MI_COM_PHY_ADDR_MASK);
853 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
854 MI_COM_REG_ADDR_MASK);
855 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 856
1da177e4
LT
857 tw32_f(MAC_MI_COM, frame_val);
858
859 loops = PHY_BUSY_LOOPS;
860 while (loops != 0) {
861 udelay(10);
862 frame_val = tr32(MAC_MI_COM);
863
864 if ((frame_val & MI_COM_BUSY) == 0) {
865 udelay(5);
866 frame_val = tr32(MAC_MI_COM);
867 break;
868 }
869 loops -= 1;
870 }
871
872 ret = -EBUSY;
873 if (loops != 0) {
874 *val = frame_val & MI_COM_DATA_MASK;
875 ret = 0;
876 }
877
878 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
879 tw32_f(MAC_MI_MODE, tp->mi_mode);
880 udelay(80);
881 }
882
883 return ret;
884}
885
886static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
887{
888 u32 frame_val;
889 unsigned int loops;
890 int ret;
891
f07e9af3 892 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
221c5637 893 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
b5d3772c
MC
894 return 0;
895
1da177e4
LT
896 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
897 tw32_f(MAC_MI_MODE,
898 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
899 udelay(80);
900 }
901
882e9793 902 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
903 MI_COM_PHY_ADDR_MASK);
904 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
905 MI_COM_REG_ADDR_MASK);
906 frame_val |= (val & MI_COM_DATA_MASK);
907 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 908
1da177e4
LT
909 tw32_f(MAC_MI_COM, frame_val);
910
911 loops = PHY_BUSY_LOOPS;
912 while (loops != 0) {
913 udelay(10);
914 frame_val = tr32(MAC_MI_COM);
915 if ((frame_val & MI_COM_BUSY) == 0) {
916 udelay(5);
917 frame_val = tr32(MAC_MI_COM);
918 break;
919 }
920 loops -= 1;
921 }
922
923 ret = -EBUSY;
924 if (loops != 0)
925 ret = 0;
926
927 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
928 tw32_f(MAC_MI_MODE, tp->mi_mode);
929 udelay(80);
930 }
931
932 return ret;
933}
934
b0988c15
MC
935static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
936{
937 int err;
938
939 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
940 if (err)
941 goto done;
942
943 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
944 if (err)
945 goto done;
946
947 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
948 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
949 if (err)
950 goto done;
951
952 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
953
954done:
955 return err;
956}
957
958static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
959{
960 int err;
961
962 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
963 if (err)
964 goto done;
965
966 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
967 if (err)
968 goto done;
969
970 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
971 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
972 if (err)
973 goto done;
974
975 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
976
977done:
978 return err;
979}
980
981static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
982{
983 int err;
984
985 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
986 if (!err)
987 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
988
989 return err;
990}
991
992static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
993{
994 int err;
995
996 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
997 if (!err)
998 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
999
1000 return err;
1001}
1002
15ee95c3
MC
1003static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1004{
1005 int err;
1006
1007 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1008 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1009 MII_TG3_AUXCTL_SHDWSEL_MISC);
1010 if (!err)
1011 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1012
1013 return err;
1014}
1015
b4bd2929
MC
1016static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1017{
1018 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1019 set |= MII_TG3_AUXCTL_MISC_WREN;
1020
1021 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1022}
1023
1d36ba45
MC
1024#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1025 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1026 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1027 MII_TG3_AUXCTL_ACTL_TX_6DB)
1028
1029#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1030 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1031 MII_TG3_AUXCTL_ACTL_TX_6DB);
1032
95e2869a
MC
1033static int tg3_bmcr_reset(struct tg3 *tp)
1034{
1035 u32 phy_control;
1036 int limit, err;
1037
1038 /* OK, reset it, and poll the BMCR_RESET bit until it
1039 * clears or we time out.
1040 */
1041 phy_control = BMCR_RESET;
1042 err = tg3_writephy(tp, MII_BMCR, phy_control);
1043 if (err != 0)
1044 return -EBUSY;
1045
1046 limit = 5000;
1047 while (limit--) {
1048 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1049 if (err != 0)
1050 return -EBUSY;
1051
1052 if ((phy_control & BMCR_RESET) == 0) {
1053 udelay(40);
1054 break;
1055 }
1056 udelay(10);
1057 }
d4675b52 1058 if (limit < 0)
95e2869a
MC
1059 return -EBUSY;
1060
1061 return 0;
1062}
1063
158d7abd
MC
1064static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1065{
3d16543d 1066 struct tg3 *tp = bp->priv;
158d7abd
MC
1067 u32 val;
1068
24bb4fb6 1069 spin_lock_bh(&tp->lock);
158d7abd
MC
1070
1071 if (tg3_readphy(tp, reg, &val))
24bb4fb6
MC
1072 val = -EIO;
1073
1074 spin_unlock_bh(&tp->lock);
158d7abd
MC
1075
1076 return val;
1077}
1078
1079static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1080{
3d16543d 1081 struct tg3 *tp = bp->priv;
24bb4fb6 1082 u32 ret = 0;
158d7abd 1083
24bb4fb6 1084 spin_lock_bh(&tp->lock);
158d7abd
MC
1085
1086 if (tg3_writephy(tp, reg, val))
24bb4fb6 1087 ret = -EIO;
158d7abd 1088
24bb4fb6
MC
1089 spin_unlock_bh(&tp->lock);
1090
1091 return ret;
158d7abd
MC
1092}
1093
1094static int tg3_mdio_reset(struct mii_bus *bp)
1095{
1096 return 0;
1097}
1098
9c61d6bc 1099static void tg3_mdio_config_5785(struct tg3 *tp)
a9daf367
MC
1100{
1101 u32 val;
fcb389df 1102 struct phy_device *phydev;
a9daf367 1103
3f0e3ad7 1104 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
fcb389df 1105 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f
MC
1106 case PHY_ID_BCM50610:
1107 case PHY_ID_BCM50610M:
fcb389df
MC
1108 val = MAC_PHYCFG2_50610_LED_MODES;
1109 break;
6a443a0f 1110 case PHY_ID_BCMAC131:
fcb389df
MC
1111 val = MAC_PHYCFG2_AC131_LED_MODES;
1112 break;
6a443a0f 1113 case PHY_ID_RTL8211C:
fcb389df
MC
1114 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1115 break;
6a443a0f 1116 case PHY_ID_RTL8201E:
fcb389df
MC
1117 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1118 break;
1119 default:
a9daf367 1120 return;
fcb389df
MC
1121 }
1122
1123 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1124 tw32(MAC_PHYCFG2, val);
1125
1126 val = tr32(MAC_PHYCFG1);
bb85fbb6
MC
1127 val &= ~(MAC_PHYCFG1_RGMII_INT |
1128 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1129 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
fcb389df
MC
1130 tw32(MAC_PHYCFG1, val);
1131
1132 return;
1133 }
1134
63c3a66f 1135 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
fcb389df
MC
1136 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1137 MAC_PHYCFG2_FMODE_MASK_MASK |
1138 MAC_PHYCFG2_GMODE_MASK_MASK |
1139 MAC_PHYCFG2_ACT_MASK_MASK |
1140 MAC_PHYCFG2_QUAL_MASK_MASK |
1141 MAC_PHYCFG2_INBAND_ENABLE;
1142
1143 tw32(MAC_PHYCFG2, val);
a9daf367 1144
bb85fbb6
MC
1145 val = tr32(MAC_PHYCFG1);
1146 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1147 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
63c3a66f
JP
1148 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1149 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1150 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
63c3a66f 1151 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1152 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1153 }
bb85fbb6
MC
1154 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1155 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1156 tw32(MAC_PHYCFG1, val);
a9daf367 1157
a9daf367
MC
1158 val = tr32(MAC_EXT_RGMII_MODE);
1159 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1160 MAC_RGMII_MODE_RX_QUALITY |
1161 MAC_RGMII_MODE_RX_ACTIVITY |
1162 MAC_RGMII_MODE_RX_ENG_DET |
1163 MAC_RGMII_MODE_TX_ENABLE |
1164 MAC_RGMII_MODE_TX_LOWPWR |
1165 MAC_RGMII_MODE_TX_RESET);
63c3a66f
JP
1166 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1167 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367
MC
1168 val |= MAC_RGMII_MODE_RX_INT_B |
1169 MAC_RGMII_MODE_RX_QUALITY |
1170 MAC_RGMII_MODE_RX_ACTIVITY |
1171 MAC_RGMII_MODE_RX_ENG_DET;
63c3a66f 1172 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1173 val |= MAC_RGMII_MODE_TX_ENABLE |
1174 MAC_RGMII_MODE_TX_LOWPWR |
1175 MAC_RGMII_MODE_TX_RESET;
1176 }
1177 tw32(MAC_EXT_RGMII_MODE, val);
1178}
1179
158d7abd
MC
1180static void tg3_mdio_start(struct tg3 *tp)
1181{
158d7abd
MC
1182 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1183 tw32_f(MAC_MI_MODE, tp->mi_mode);
1184 udelay(80);
a9daf367 1185
63c3a66f 1186 if (tg3_flag(tp, MDIOBUS_INITED) &&
9ea4818d
MC
1187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1188 tg3_mdio_config_5785(tp);
1189}
1190
1191static int tg3_mdio_init(struct tg3 *tp)
1192{
1193 int i;
1194 u32 reg;
1195 struct phy_device *phydev;
1196
63c3a66f 1197 if (tg3_flag(tp, 5717_PLUS)) {
9c7df915 1198 u32 is_serdes;
882e9793 1199
69f11c99 1200 tp->phy_addr = tp->pci_fn + 1;
882e9793 1201
d1ec96af
MC
1202 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1203 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1204 else
1205 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1206 TG3_CPMU_PHY_STRAP_IS_SERDES;
882e9793
MC
1207 if (is_serdes)
1208 tp->phy_addr += 7;
1209 } else
3f0e3ad7 1210 tp->phy_addr = TG3_PHY_MII_ADDR;
882e9793 1211
158d7abd
MC
1212 tg3_mdio_start(tp);
1213
63c3a66f 1214 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
158d7abd
MC
1215 return 0;
1216
298cf9be
LB
1217 tp->mdio_bus = mdiobus_alloc();
1218 if (tp->mdio_bus == NULL)
1219 return -ENOMEM;
158d7abd 1220
298cf9be
LB
1221 tp->mdio_bus->name = "tg3 mdio bus";
1222 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
158d7abd 1223 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
298cf9be
LB
1224 tp->mdio_bus->priv = tp;
1225 tp->mdio_bus->parent = &tp->pdev->dev;
1226 tp->mdio_bus->read = &tg3_mdio_read;
1227 tp->mdio_bus->write = &tg3_mdio_write;
1228 tp->mdio_bus->reset = &tg3_mdio_reset;
3f0e3ad7 1229 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
298cf9be 1230 tp->mdio_bus->irq = &tp->mdio_irq[0];
158d7abd
MC
1231
1232 for (i = 0; i < PHY_MAX_ADDR; i++)
298cf9be 1233 tp->mdio_bus->irq[i] = PHY_POLL;
158d7abd
MC
1234
1235 /* The bus registration will look for all the PHYs on the mdio bus.
1236 * Unfortunately, it does not ensure the PHY is powered up before
1237 * accessing the PHY ID registers. A chip reset is the
1238 * quickest way to bring the device back to an operational state..
1239 */
1240 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1241 tg3_bmcr_reset(tp);
1242
298cf9be 1243 i = mdiobus_register(tp->mdio_bus);
a9daf367 1244 if (i) {
ab96b241 1245 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
9c61d6bc 1246 mdiobus_free(tp->mdio_bus);
a9daf367
MC
1247 return i;
1248 }
158d7abd 1249
3f0e3ad7 1250 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
a9daf367 1251
9c61d6bc 1252 if (!phydev || !phydev->drv) {
ab96b241 1253 dev_warn(&tp->pdev->dev, "No PHY devices\n");
9c61d6bc
MC
1254 mdiobus_unregister(tp->mdio_bus);
1255 mdiobus_free(tp->mdio_bus);
1256 return -ENODEV;
1257 }
1258
1259 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f 1260 case PHY_ID_BCM57780:
321d32a0 1261 phydev->interface = PHY_INTERFACE_MODE_GMII;
c704dc23 1262 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
321d32a0 1263 break;
6a443a0f
MC
1264 case PHY_ID_BCM50610:
1265 case PHY_ID_BCM50610M:
32e5a8d6 1266 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
c704dc23 1267 PHY_BRCM_RX_REFCLK_UNUSED |
52fae083 1268 PHY_BRCM_DIS_TXCRXC_NOENRGY |
c704dc23 1269 PHY_BRCM_AUTO_PWRDWN_ENABLE;
63c3a66f 1270 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
a9daf367 1271 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
63c3a66f 1272 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1273 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
63c3a66f 1274 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367 1275 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fcb389df 1276 /* fallthru */
6a443a0f 1277 case PHY_ID_RTL8211C:
fcb389df 1278 phydev->interface = PHY_INTERFACE_MODE_RGMII;
a9daf367 1279 break;
6a443a0f
MC
1280 case PHY_ID_RTL8201E:
1281 case PHY_ID_BCMAC131:
a9daf367 1282 phydev->interface = PHY_INTERFACE_MODE_MII;
cdd4e09d 1283 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
f07e9af3 1284 tp->phy_flags |= TG3_PHYFLG_IS_FET;
a9daf367
MC
1285 break;
1286 }
1287
63c3a66f 1288 tg3_flag_set(tp, MDIOBUS_INITED);
9c61d6bc
MC
1289
1290 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1291 tg3_mdio_config_5785(tp);
a9daf367
MC
1292
1293 return 0;
158d7abd
MC
1294}
1295
1296static void tg3_mdio_fini(struct tg3 *tp)
1297{
63c3a66f
JP
1298 if (tg3_flag(tp, MDIOBUS_INITED)) {
1299 tg3_flag_clear(tp, MDIOBUS_INITED);
298cf9be
LB
1300 mdiobus_unregister(tp->mdio_bus);
1301 mdiobus_free(tp->mdio_bus);
158d7abd
MC
1302 }
1303}
1304
4ba526ce
MC
1305/* tp->lock is held. */
1306static inline void tg3_generate_fw_event(struct tg3 *tp)
1307{
1308 u32 val;
1309
1310 val = tr32(GRC_RX_CPU_EVENT);
1311 val |= GRC_RX_CPU_DRIVER_EVENT;
1312 tw32_f(GRC_RX_CPU_EVENT, val);
1313
1314 tp->last_event_jiffies = jiffies;
1315}
1316
1317#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1318
95e2869a
MC
1319/* tp->lock is held. */
1320static void tg3_wait_for_event_ack(struct tg3 *tp)
1321{
1322 int i;
4ba526ce
MC
1323 unsigned int delay_cnt;
1324 long time_remain;
1325
1326 /* If enough time has passed, no wait is necessary. */
1327 time_remain = (long)(tp->last_event_jiffies + 1 +
1328 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1329 (long)jiffies;
1330 if (time_remain < 0)
1331 return;
1332
1333 /* Check if we can shorten the wait time. */
1334 delay_cnt = jiffies_to_usecs(time_remain);
1335 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1336 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1337 delay_cnt = (delay_cnt >> 3) + 1;
95e2869a 1338
4ba526ce 1339 for (i = 0; i < delay_cnt; i++) {
95e2869a
MC
1340 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1341 break;
4ba526ce 1342 udelay(8);
95e2869a
MC
1343 }
1344}
1345
1346/* tp->lock is held. */
1347static void tg3_ump_link_report(struct tg3 *tp)
1348{
1349 u32 reg;
1350 u32 val;
1351
63c3a66f 1352 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
95e2869a
MC
1353 return;
1354
1355 tg3_wait_for_event_ack(tp);
1356
1357 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1358
1359 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1360
1361 val = 0;
1362 if (!tg3_readphy(tp, MII_BMCR, &reg))
1363 val = reg << 16;
1364 if (!tg3_readphy(tp, MII_BMSR, &reg))
1365 val |= (reg & 0xffff);
1366 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1367
1368 val = 0;
1369 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1370 val = reg << 16;
1371 if (!tg3_readphy(tp, MII_LPA, &reg))
1372 val |= (reg & 0xffff);
1373 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1374
1375 val = 0;
f07e9af3 1376 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
95e2869a
MC
1377 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1378 val = reg << 16;
1379 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1380 val |= (reg & 0xffff);
1381 }
1382 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1383
1384 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1385 val = reg << 16;
1386 else
1387 val = 0;
1388 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1389
4ba526ce 1390 tg3_generate_fw_event(tp);
95e2869a
MC
1391}
1392
1393static void tg3_link_report(struct tg3 *tp)
1394{
1395 if (!netif_carrier_ok(tp->dev)) {
05dbe005 1396 netif_info(tp, link, tp->dev, "Link is down\n");
95e2869a
MC
1397 tg3_ump_link_report(tp);
1398 } else if (netif_msg_link(tp)) {
05dbe005
JP
1399 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1400 (tp->link_config.active_speed == SPEED_1000 ?
1401 1000 :
1402 (tp->link_config.active_speed == SPEED_100 ?
1403 100 : 10)),
1404 (tp->link_config.active_duplex == DUPLEX_FULL ?
1405 "full" : "half"));
1406
1407 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1408 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1409 "on" : "off",
1410 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1411 "on" : "off");
47007831
MC
1412
1413 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1414 netdev_info(tp->dev, "EEE is %s\n",
1415 tp->setlpicnt ? "enabled" : "disabled");
1416
95e2869a
MC
1417 tg3_ump_link_report(tp);
1418 }
1419}
1420
1421static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1422{
1423 u16 miireg;
1424
e18ce346 1425 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
95e2869a 1426 miireg = ADVERTISE_PAUSE_CAP;
e18ce346 1427 else if (flow_ctrl & FLOW_CTRL_TX)
95e2869a 1428 miireg = ADVERTISE_PAUSE_ASYM;
e18ce346 1429 else if (flow_ctrl & FLOW_CTRL_RX)
95e2869a
MC
1430 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1431 else
1432 miireg = 0;
1433
1434 return miireg;
1435}
1436
1437static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1438{
1439 u16 miireg;
1440
e18ce346 1441 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
95e2869a 1442 miireg = ADVERTISE_1000XPAUSE;
e18ce346 1443 else if (flow_ctrl & FLOW_CTRL_TX)
95e2869a 1444 miireg = ADVERTISE_1000XPSE_ASYM;
e18ce346 1445 else if (flow_ctrl & FLOW_CTRL_RX)
95e2869a
MC
1446 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1447 else
1448 miireg = 0;
1449
1450 return miireg;
1451}
1452
95e2869a
MC
1453static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1454{
1455 u8 cap = 0;
1456
1457 if (lcladv & ADVERTISE_1000XPAUSE) {
1458 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1459 if (rmtadv & LPA_1000XPAUSE)
e18ce346 1460 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
95e2869a 1461 else if (rmtadv & LPA_1000XPAUSE_ASYM)
e18ce346 1462 cap = FLOW_CTRL_RX;
95e2869a
MC
1463 } else {
1464 if (rmtadv & LPA_1000XPAUSE)
e18ce346 1465 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
95e2869a
MC
1466 }
1467 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1468 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
e18ce346 1469 cap = FLOW_CTRL_TX;
95e2869a
MC
1470 }
1471
1472 return cap;
1473}
1474
f51f3562 1475static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
95e2869a 1476{
b02fd9e3 1477 u8 autoneg;
f51f3562 1478 u8 flowctrl = 0;
95e2869a
MC
1479 u32 old_rx_mode = tp->rx_mode;
1480 u32 old_tx_mode = tp->tx_mode;
1481
63c3a66f 1482 if (tg3_flag(tp, USE_PHYLIB))
3f0e3ad7 1483 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
b02fd9e3
MC
1484 else
1485 autoneg = tp->link_config.autoneg;
1486
63c3a66f 1487 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
f07e9af3 1488 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
f51f3562 1489 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
95e2869a 1490 else
bc02ff95 1491 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
f51f3562
MC
1492 } else
1493 flowctrl = tp->link_config.flowctrl;
95e2869a 1494
f51f3562 1495 tp->link_config.active_flowctrl = flowctrl;
95e2869a 1496
e18ce346 1497 if (flowctrl & FLOW_CTRL_RX)
95e2869a
MC
1498 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1499 else
1500 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1501
f51f3562 1502 if (old_rx_mode != tp->rx_mode)
95e2869a 1503 tw32_f(MAC_RX_MODE, tp->rx_mode);
95e2869a 1504
e18ce346 1505 if (flowctrl & FLOW_CTRL_TX)
95e2869a
MC
1506 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1507 else
1508 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1509
f51f3562 1510 if (old_tx_mode != tp->tx_mode)
95e2869a 1511 tw32_f(MAC_TX_MODE, tp->tx_mode);
95e2869a
MC
1512}
1513
b02fd9e3
MC
1514static void tg3_adjust_link(struct net_device *dev)
1515{
1516 u8 oldflowctrl, linkmesg = 0;
1517 u32 mac_mode, lcl_adv, rmt_adv;
1518 struct tg3 *tp = netdev_priv(dev);
3f0e3ad7 1519 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1520
24bb4fb6 1521 spin_lock_bh(&tp->lock);
b02fd9e3
MC
1522
1523 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1524 MAC_MODE_HALF_DUPLEX);
1525
1526 oldflowctrl = tp->link_config.active_flowctrl;
1527
1528 if (phydev->link) {
1529 lcl_adv = 0;
1530 rmt_adv = 0;
1531
1532 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1533 mac_mode |= MAC_MODE_PORT_MODE_MII;
c3df0748
MC
1534 else if (phydev->speed == SPEED_1000 ||
1535 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
b02fd9e3 1536 mac_mode |= MAC_MODE_PORT_MODE_GMII;
c3df0748
MC
1537 else
1538 mac_mode |= MAC_MODE_PORT_MODE_MII;
b02fd9e3
MC
1539
1540 if (phydev->duplex == DUPLEX_HALF)
1541 mac_mode |= MAC_MODE_HALF_DUPLEX;
1542 else {
1543 lcl_adv = tg3_advert_flowctrl_1000T(
1544 tp->link_config.flowctrl);
1545
1546 if (phydev->pause)
1547 rmt_adv = LPA_PAUSE_CAP;
1548 if (phydev->asym_pause)
1549 rmt_adv |= LPA_PAUSE_ASYM;
1550 }
1551
1552 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1553 } else
1554 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1555
1556 if (mac_mode != tp->mac_mode) {
1557 tp->mac_mode = mac_mode;
1558 tw32_f(MAC_MODE, tp->mac_mode);
1559 udelay(40);
1560 }
1561
fcb389df
MC
1562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1563 if (phydev->speed == SPEED_10)
1564 tw32(MAC_MI_STAT,
1565 MAC_MI_STAT_10MBPS_MODE |
1566 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1567 else
1568 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1569 }
1570
b02fd9e3
MC
1571 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1572 tw32(MAC_TX_LENGTHS,
1573 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1574 (6 << TX_LENGTHS_IPG_SHIFT) |
1575 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1576 else
1577 tw32(MAC_TX_LENGTHS,
1578 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1579 (6 << TX_LENGTHS_IPG_SHIFT) |
1580 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1581
1582 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1583 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1584 phydev->speed != tp->link_config.active_speed ||
1585 phydev->duplex != tp->link_config.active_duplex ||
1586 oldflowctrl != tp->link_config.active_flowctrl)
c6cdf436 1587 linkmesg = 1;
b02fd9e3
MC
1588
1589 tp->link_config.active_speed = phydev->speed;
1590 tp->link_config.active_duplex = phydev->duplex;
1591
24bb4fb6 1592 spin_unlock_bh(&tp->lock);
b02fd9e3
MC
1593
1594 if (linkmesg)
1595 tg3_link_report(tp);
1596}
1597
1598static int tg3_phy_init(struct tg3 *tp)
1599{
1600 struct phy_device *phydev;
1601
f07e9af3 1602 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
b02fd9e3
MC
1603 return 0;
1604
1605 /* Bring the PHY back to a known state. */
1606 tg3_bmcr_reset(tp);
1607
3f0e3ad7 1608 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3
MC
1609
1610 /* Attach the MAC to the PHY. */
fb28ad35 1611 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
a9daf367 1612 phydev->dev_flags, phydev->interface);
b02fd9e3 1613 if (IS_ERR(phydev)) {
ab96b241 1614 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
b02fd9e3
MC
1615 return PTR_ERR(phydev);
1616 }
1617
b02fd9e3 1618 /* Mask with MAC supported features. */
9c61d6bc
MC
1619 switch (phydev->interface) {
1620 case PHY_INTERFACE_MODE_GMII:
1621 case PHY_INTERFACE_MODE_RGMII:
f07e9af3 1622 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
321d32a0
MC
1623 phydev->supported &= (PHY_GBIT_FEATURES |
1624 SUPPORTED_Pause |
1625 SUPPORTED_Asym_Pause);
1626 break;
1627 }
1628 /* fallthru */
9c61d6bc
MC
1629 case PHY_INTERFACE_MODE_MII:
1630 phydev->supported &= (PHY_BASIC_FEATURES |
1631 SUPPORTED_Pause |
1632 SUPPORTED_Asym_Pause);
1633 break;
1634 default:
3f0e3ad7 1635 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9c61d6bc
MC
1636 return -EINVAL;
1637 }
1638
f07e9af3 1639 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
1640
1641 phydev->advertising = phydev->supported;
1642
b02fd9e3
MC
1643 return 0;
1644}
1645
1646static void tg3_phy_start(struct tg3 *tp)
1647{
1648 struct phy_device *phydev;
1649
f07e9af3 1650 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
1651 return;
1652
3f0e3ad7 1653 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1654
80096068
MC
1655 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1656 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3
MC
1657 phydev->speed = tp->link_config.orig_speed;
1658 phydev->duplex = tp->link_config.orig_duplex;
1659 phydev->autoneg = tp->link_config.orig_autoneg;
1660 phydev->advertising = tp->link_config.orig_advertising;
1661 }
1662
1663 phy_start(phydev);
1664
1665 phy_start_aneg(phydev);
1666}
1667
1668static void tg3_phy_stop(struct tg3 *tp)
1669{
f07e9af3 1670 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
1671 return;
1672
3f0e3ad7 1673 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
1674}
1675
1676static void tg3_phy_fini(struct tg3 *tp)
1677{
f07e9af3 1678 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7 1679 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
f07e9af3 1680 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
1681 }
1682}
1683
941ec90f
MC
1684static int tg3_phy_set_extloopbk(struct tg3 *tp)
1685{
1686 int err;
1687 u32 val;
1688
1689 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1690 return 0;
1691
1692 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1693 /* Cannot do read-modify-write on 5401 */
1694 err = tg3_phy_auxctl_write(tp,
1695 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1696 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1697 0x4c20);
1698 goto done;
1699 }
1700
1701 err = tg3_phy_auxctl_read(tp,
1702 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1703 if (err)
1704 return err;
1705
1706 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1707 err = tg3_phy_auxctl_write(tp,
1708 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1709
1710done:
1711 return err;
1712}
1713
7f97a4bd
MC
1714static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1715{
1716 u32 phytest;
1717
1718 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1719 u32 phy;
1720
1721 tg3_writephy(tp, MII_TG3_FET_TEST,
1722 phytest | MII_TG3_FET_SHADOW_EN);
1723 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1724 if (enable)
1725 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1726 else
1727 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1728 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1729 }
1730 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1731 }
1732}
1733
6833c043
MC
1734static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1735{
1736 u32 reg;
1737
63c3a66f
JP
1738 if (!tg3_flag(tp, 5705_PLUS) ||
1739 (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 1740 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
6833c043
MC
1741 return;
1742
f07e9af3 1743 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7f97a4bd
MC
1744 tg3_phy_fet_toggle_apd(tp, enable);
1745 return;
1746 }
1747
6833c043
MC
1748 reg = MII_TG3_MISC_SHDW_WREN |
1749 MII_TG3_MISC_SHDW_SCR5_SEL |
1750 MII_TG3_MISC_SHDW_SCR5_LPED |
1751 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1752 MII_TG3_MISC_SHDW_SCR5_SDTL |
1753 MII_TG3_MISC_SHDW_SCR5_C125OE;
1754 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1755 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1756
1757 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1758
1759
1760 reg = MII_TG3_MISC_SHDW_WREN |
1761 MII_TG3_MISC_SHDW_APD_SEL |
1762 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1763 if (enable)
1764 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1765
1766 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1767}
1768
9ef8ca99
MC
1769static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1770{
1771 u32 phy;
1772
63c3a66f 1773 if (!tg3_flag(tp, 5705_PLUS) ||
f07e9af3 1774 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9ef8ca99
MC
1775 return;
1776
f07e9af3 1777 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
9ef8ca99
MC
1778 u32 ephy;
1779
535ef6e1
MC
1780 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1781 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1782
1783 tg3_writephy(tp, MII_TG3_FET_TEST,
1784 ephy | MII_TG3_FET_SHADOW_EN);
1785 if (!tg3_readphy(tp, reg, &phy)) {
9ef8ca99 1786 if (enable)
535ef6e1 1787 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
9ef8ca99 1788 else
535ef6e1
MC
1789 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1790 tg3_writephy(tp, reg, phy);
9ef8ca99 1791 }
535ef6e1 1792 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
9ef8ca99
MC
1793 }
1794 } else {
15ee95c3
MC
1795 int ret;
1796
1797 ret = tg3_phy_auxctl_read(tp,
1798 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1799 if (!ret) {
9ef8ca99
MC
1800 if (enable)
1801 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1802 else
1803 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
b4bd2929
MC
1804 tg3_phy_auxctl_write(tp,
1805 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
9ef8ca99
MC
1806 }
1807 }
1808}
1809
1da177e4
LT
1810static void tg3_phy_set_wirespeed(struct tg3 *tp)
1811{
15ee95c3 1812 int ret;
1da177e4
LT
1813 u32 val;
1814
f07e9af3 1815 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1da177e4
LT
1816 return;
1817
15ee95c3
MC
1818 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1819 if (!ret)
b4bd2929
MC
1820 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1821 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1da177e4
LT
1822}
1823
b2a5c19c
MC
1824static void tg3_phy_apply_otp(struct tg3 *tp)
1825{
1826 u32 otp, phy;
1827
1828 if (!tp->phy_otp)
1829 return;
1830
1831 otp = tp->phy_otp;
1832
1d36ba45
MC
1833 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1834 return;
b2a5c19c
MC
1835
1836 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1837 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1838 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1839
1840 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1841 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1842 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1843
1844 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1845 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1846 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1847
1848 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1849 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1850
1851 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1852 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1853
1854 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1855 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1856 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1857
1d36ba45 1858 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
b2a5c19c
MC
1859}
1860
52b02d04
MC
1861static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1862{
1863 u32 val;
1864
1865 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1866 return;
1867
1868 tp->setlpicnt = 0;
1869
1870 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1871 current_link_up == 1 &&
a6b68dab
MC
1872 tp->link_config.active_duplex == DUPLEX_FULL &&
1873 (tp->link_config.active_speed == SPEED_100 ||
1874 tp->link_config.active_speed == SPEED_1000)) {
52b02d04
MC
1875 u32 eeectl;
1876
1877 if (tp->link_config.active_speed == SPEED_1000)
1878 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1879 else
1880 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1881
1882 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1883
3110f5f5
MC
1884 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1885 TG3_CL45_D7_EEERES_STAT, &val);
52b02d04 1886
b0c5943f
MC
1887 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1888 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
52b02d04
MC
1889 tp->setlpicnt = 2;
1890 }
1891
1892 if (!tp->setlpicnt) {
b715ce94
MC
1893 if (current_link_up == 1 &&
1894 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1895 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1896 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1897 }
1898
52b02d04
MC
1899 val = tr32(TG3_CPMU_EEE_MODE);
1900 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1901 }
1902}
1903
b0c5943f
MC
1904static void tg3_phy_eee_enable(struct tg3 *tp)
1905{
1906 u32 val;
1907
1908 if (tp->link_config.active_speed == SPEED_1000 &&
1909 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1911 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1912 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
b715ce94
MC
1913 val = MII_TG3_DSP_TAP26_ALNOKO |
1914 MII_TG3_DSP_TAP26_RMRXSTO;
1915 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
b0c5943f
MC
1916 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1917 }
1918
1919 val = tr32(TG3_CPMU_EEE_MODE);
1920 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1921}
1922
1da177e4
LT
1923static int tg3_wait_macro_done(struct tg3 *tp)
1924{
1925 int limit = 100;
1926
1927 while (limit--) {
1928 u32 tmp32;
1929
f08aa1a8 1930 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1da177e4
LT
1931 if ((tmp32 & 0x1000) == 0)
1932 break;
1933 }
1934 }
d4675b52 1935 if (limit < 0)
1da177e4
LT
1936 return -EBUSY;
1937
1938 return 0;
1939}
1940
1941static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1942{
1943 static const u32 test_pat[4][6] = {
1944 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1945 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1946 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1947 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1948 };
1949 int chan;
1950
1951 for (chan = 0; chan < 4; chan++) {
1952 int i;
1953
1954 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1955 (chan * 0x2000) | 0x0200);
f08aa1a8 1956 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
1957
1958 for (i = 0; i < 6; i++)
1959 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1960 test_pat[chan][i]);
1961
f08aa1a8 1962 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
1963 if (tg3_wait_macro_done(tp)) {
1964 *resetp = 1;
1965 return -EBUSY;
1966 }
1967
1968 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1969 (chan * 0x2000) | 0x0200);
f08aa1a8 1970 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1da177e4
LT
1971 if (tg3_wait_macro_done(tp)) {
1972 *resetp = 1;
1973 return -EBUSY;
1974 }
1975
f08aa1a8 1976 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1da177e4
LT
1977 if (tg3_wait_macro_done(tp)) {
1978 *resetp = 1;
1979 return -EBUSY;
1980 }
1981
1982 for (i = 0; i < 6; i += 2) {
1983 u32 low, high;
1984
1985 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1986 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1987 tg3_wait_macro_done(tp)) {
1988 *resetp = 1;
1989 return -EBUSY;
1990 }
1991 low &= 0x7fff;
1992 high &= 0x000f;
1993 if (low != test_pat[chan][i] ||
1994 high != test_pat[chan][i+1]) {
1995 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1996 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1997 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1998
1999 return -EBUSY;
2000 }
2001 }
2002 }
2003
2004 return 0;
2005}
2006
2007static int tg3_phy_reset_chanpat(struct tg3 *tp)
2008{
2009 int chan;
2010
2011 for (chan = 0; chan < 4; chan++) {
2012 int i;
2013
2014 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2015 (chan * 0x2000) | 0x0200);
f08aa1a8 2016 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2017 for (i = 0; i < 6; i++)
2018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
f08aa1a8 2019 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2020 if (tg3_wait_macro_done(tp))
2021 return -EBUSY;
2022 }
2023
2024 return 0;
2025}
2026
2027static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2028{
2029 u32 reg32, phy9_orig;
2030 int retries, do_phy_reset, err;
2031
2032 retries = 10;
2033 do_phy_reset = 1;
2034 do {
2035 if (do_phy_reset) {
2036 err = tg3_bmcr_reset(tp);
2037 if (err)
2038 return err;
2039 do_phy_reset = 0;
2040 }
2041
2042 /* Disable transmitter and interrupt. */
2043 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2044 continue;
2045
2046 reg32 |= 0x3000;
2047 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2048
2049 /* Set full-duplex, 1000 mbps. */
2050 tg3_writephy(tp, MII_BMCR,
221c5637 2051 BMCR_FULLDPLX | BMCR_SPEED1000);
1da177e4
LT
2052
2053 /* Set to master mode. */
221c5637 2054 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1da177e4
LT
2055 continue;
2056
221c5637
MC
2057 tg3_writephy(tp, MII_CTRL1000,
2058 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1da177e4 2059
1d36ba45
MC
2060 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2061 if (err)
2062 return err;
1da177e4
LT
2063
2064 /* Block the PHY control access. */
6ee7c0a0 2065 tg3_phydsp_write(tp, 0x8005, 0x0800);
1da177e4
LT
2066
2067 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2068 if (!err)
2069 break;
2070 } while (--retries);
2071
2072 err = tg3_phy_reset_chanpat(tp);
2073 if (err)
2074 return err;
2075
6ee7c0a0 2076 tg3_phydsp_write(tp, 0x8005, 0x0000);
1da177e4
LT
2077
2078 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
f08aa1a8 2079 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1da177e4 2080
1d36ba45 2081 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2082
221c5637 2083 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
1da177e4
LT
2084
2085 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2086 reg32 &= ~0x3000;
2087 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2088 } else if (!err)
2089 err = -EBUSY;
2090
2091 return err;
2092}
2093
2094/* This will reset the tigon3 PHY if there is no valid
2095 * link unless the FORCE argument is non-zero.
2096 */
2097static int tg3_phy_reset(struct tg3 *tp)
2098{
f833c4c1 2099 u32 val, cpmuctrl;
1da177e4
LT
2100 int err;
2101
60189ddf 2102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2103 val = tr32(GRC_MISC_CFG);
2104 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2105 udelay(40);
2106 }
f833c4c1
MC
2107 err = tg3_readphy(tp, MII_BMSR, &val);
2108 err |= tg3_readphy(tp, MII_BMSR, &val);
1da177e4
LT
2109 if (err != 0)
2110 return -EBUSY;
2111
c8e1e82b
MC
2112 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2113 netif_carrier_off(tp->dev);
2114 tg3_link_report(tp);
2115 }
2116
1da177e4
LT
2117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2118 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2119 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2120 err = tg3_phy_reset_5703_4_5(tp);
2121 if (err)
2122 return err;
2123 goto out;
2124 }
2125
b2a5c19c
MC
2126 cpmuctrl = 0;
2127 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2128 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2129 cpmuctrl = tr32(TG3_CPMU_CTRL);
2130 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2131 tw32(TG3_CPMU_CTRL,
2132 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2133 }
2134
1da177e4
LT
2135 err = tg3_bmcr_reset(tp);
2136 if (err)
2137 return err;
2138
b2a5c19c 2139 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
f833c4c1
MC
2140 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2141 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
b2a5c19c
MC
2142
2143 tw32(TG3_CPMU_CTRL, cpmuctrl);
2144 }
2145
bcb37f6c
MC
2146 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2147 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2148 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2149 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2150 CPMU_LSPD_1000MB_MACCLK_12_5) {
2151 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2152 udelay(40);
2153 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2154 }
2155 }
2156
63c3a66f 2157 if (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2158 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
ecf1410b
MC
2159 return 0;
2160
b2a5c19c
MC
2161 tg3_phy_apply_otp(tp);
2162
f07e9af3 2163 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
6833c043
MC
2164 tg3_phy_toggle_apd(tp, true);
2165 else
2166 tg3_phy_toggle_apd(tp, false);
2167
1da177e4 2168out:
1d36ba45
MC
2169 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2170 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
6ee7c0a0
MC
2171 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2172 tg3_phydsp_write(tp, 0x000a, 0x0323);
1d36ba45 2173 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2174 }
1d36ba45 2175
f07e9af3 2176 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
f08aa1a8
MC
2177 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2178 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1da177e4 2179 }
1d36ba45 2180
f07e9af3 2181 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1d36ba45
MC
2182 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2183 tg3_phydsp_write(tp, 0x000a, 0x310b);
2184 tg3_phydsp_write(tp, 0x201f, 0x9506);
2185 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2186 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2187 }
f07e9af3 2188 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1d36ba45
MC
2189 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2190 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2191 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2192 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2193 tg3_writephy(tp, MII_TG3_TEST1,
2194 MII_TG3_TEST1_TRIM_EN | 0x4);
2195 } else
2196 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2197
2198 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2199 }
c424cb24 2200 }
1d36ba45 2201
1da177e4
LT
2202 /* Set Extended packet length bit (bit 14) on all chips that */
2203 /* support jumbo frames */
79eb6904 2204 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4 2205 /* Cannot do read-modify-write on 5401 */
b4bd2929 2206 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
63c3a66f 2207 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
1da177e4 2208 /* Set bit 14 with read-modify-write to preserve other bits */
15ee95c3
MC
2209 err = tg3_phy_auxctl_read(tp,
2210 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2211 if (!err)
b4bd2929
MC
2212 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2213 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
1da177e4
LT
2214 }
2215
2216 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2217 * jumbo frames transmission.
2218 */
63c3a66f 2219 if (tg3_flag(tp, JUMBO_CAPABLE)) {
f833c4c1 2220 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
c6cdf436 2221 tg3_writephy(tp, MII_TG3_EXT_CTRL,
f833c4c1 2222 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1da177e4
LT
2223 }
2224
715116a1 2225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1 2226 /* adjust output voltage */
535ef6e1 2227 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
715116a1
MC
2228 }
2229
9ef8ca99 2230 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
2231 tg3_phy_set_wirespeed(tp);
2232 return 0;
2233}
2234
3a1e19d3
MC
2235#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2236#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2237#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2238 TG3_GPIO_MSG_NEED_VAUX)
2239#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2240 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2241 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2242 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2243 (TG3_GPIO_MSG_DRVR_PRES << 12))
2244
2245#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2246 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2247 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2248 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2249 (TG3_GPIO_MSG_NEED_VAUX << 12))
2250
2251static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2252{
2253 u32 status, shift;
2254
2255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2257 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2258 else
2259 status = tr32(TG3_CPMU_DRV_STATUS);
2260
2261 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2262 status &= ~(TG3_GPIO_MSG_MASK << shift);
2263 status |= (newstat << shift);
2264
2265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2267 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2268 else
2269 tw32(TG3_CPMU_DRV_STATUS, status);
2270
2271 return status >> TG3_APE_GPIO_MSG_SHIFT;
2272}
2273
520b2756
MC
2274static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2275{
2276 if (!tg3_flag(tp, IS_NIC))
2277 return 0;
2278
3a1e19d3
MC
2279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2280 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2281 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2282 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2283 return -EIO;
520b2756 2284
3a1e19d3
MC
2285 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2286
2287 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2288 TG3_GRC_LCLCTL_PWRSW_DELAY);
2289
2290 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2291 } else {
2292 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2293 TG3_GRC_LCLCTL_PWRSW_DELAY);
2294 }
6f5c8f83 2295
520b2756
MC
2296 return 0;
2297}
2298
2299static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2300{
2301 u32 grc_local_ctrl;
2302
2303 if (!tg3_flag(tp, IS_NIC) ||
2304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2306 return;
2307
2308 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2309
2310 tw32_wait_f(GRC_LOCAL_CTRL,
2311 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2312 TG3_GRC_LCLCTL_PWRSW_DELAY);
2313
2314 tw32_wait_f(GRC_LOCAL_CTRL,
2315 grc_local_ctrl,
2316 TG3_GRC_LCLCTL_PWRSW_DELAY);
2317
2318 tw32_wait_f(GRC_LOCAL_CTRL,
2319 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2320 TG3_GRC_LCLCTL_PWRSW_DELAY);
2321}
2322
2323static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2324{
2325 if (!tg3_flag(tp, IS_NIC))
2326 return;
2327
2328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2330 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2331 (GRC_LCLCTRL_GPIO_OE0 |
2332 GRC_LCLCTRL_GPIO_OE1 |
2333 GRC_LCLCTRL_GPIO_OE2 |
2334 GRC_LCLCTRL_GPIO_OUTPUT0 |
2335 GRC_LCLCTRL_GPIO_OUTPUT1),
2336 TG3_GRC_LCLCTL_PWRSW_DELAY);
2337 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2338 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2339 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2340 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2341 GRC_LCLCTRL_GPIO_OE1 |
2342 GRC_LCLCTRL_GPIO_OE2 |
2343 GRC_LCLCTRL_GPIO_OUTPUT0 |
2344 GRC_LCLCTRL_GPIO_OUTPUT1 |
2345 tp->grc_local_ctrl;
2346 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2347 TG3_GRC_LCLCTL_PWRSW_DELAY);
2348
2349 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2350 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2351 TG3_GRC_LCLCTL_PWRSW_DELAY);
2352
2353 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2354 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2355 TG3_GRC_LCLCTL_PWRSW_DELAY);
2356 } else {
2357 u32 no_gpio2;
2358 u32 grc_local_ctrl = 0;
2359
2360 /* Workaround to prevent overdrawing Amps. */
2361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2362 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2363 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2364 grc_local_ctrl,
2365 TG3_GRC_LCLCTL_PWRSW_DELAY);
2366 }
2367
2368 /* On 5753 and variants, GPIO2 cannot be used. */
2369 no_gpio2 = tp->nic_sram_data_cfg &
2370 NIC_SRAM_DATA_CFG_NO_GPIO2;
2371
2372 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2373 GRC_LCLCTRL_GPIO_OE1 |
2374 GRC_LCLCTRL_GPIO_OE2 |
2375 GRC_LCLCTRL_GPIO_OUTPUT1 |
2376 GRC_LCLCTRL_GPIO_OUTPUT2;
2377 if (no_gpio2) {
2378 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2379 GRC_LCLCTRL_GPIO_OUTPUT2);
2380 }
2381 tw32_wait_f(GRC_LOCAL_CTRL,
2382 tp->grc_local_ctrl | grc_local_ctrl,
2383 TG3_GRC_LCLCTL_PWRSW_DELAY);
2384
2385 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2386
2387 tw32_wait_f(GRC_LOCAL_CTRL,
2388 tp->grc_local_ctrl | grc_local_ctrl,
2389 TG3_GRC_LCLCTL_PWRSW_DELAY);
2390
2391 if (!no_gpio2) {
2392 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2393 tw32_wait_f(GRC_LOCAL_CTRL,
2394 tp->grc_local_ctrl | grc_local_ctrl,
2395 TG3_GRC_LCLCTL_PWRSW_DELAY);
2396 }
2397 }
3a1e19d3
MC
2398}
2399
cd0d7228 2400static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
3a1e19d3
MC
2401{
2402 u32 msg = 0;
2403
2404 /* Serialize power state transitions */
2405 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2406 return;
2407
cd0d7228 2408 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
3a1e19d3
MC
2409 msg = TG3_GPIO_MSG_NEED_VAUX;
2410
2411 msg = tg3_set_function_status(tp, msg);
2412
2413 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2414 goto done;
6f5c8f83 2415
3a1e19d3
MC
2416 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2417 tg3_pwrsrc_switch_to_vaux(tp);
2418 else
2419 tg3_pwrsrc_die_with_vmain(tp);
2420
2421done:
6f5c8f83 2422 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
520b2756
MC
2423}
2424
cd0d7228 2425static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
1da177e4 2426{
683644b7 2427 bool need_vaux = false;
1da177e4 2428
334355aa 2429 /* The GPIOs do something completely different on 57765. */
63c3a66f 2430 if (!tg3_flag(tp, IS_NIC) ||
334355aa 2431 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1da177e4
LT
2432 return;
2433
3a1e19d3
MC
2434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2435 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
cd0d7228
MC
2437 tg3_frob_aux_power_5717(tp, include_wol ?
2438 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
3a1e19d3
MC
2439 return;
2440 }
2441
2442 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
8c2dc7e1
MC
2443 struct net_device *dev_peer;
2444
2445 dev_peer = pci_get_drvdata(tp->pdev_peer);
683644b7 2446
bc1c7567 2447 /* remove_one() may have been run on the peer. */
683644b7
MC
2448 if (dev_peer) {
2449 struct tg3 *tp_peer = netdev_priv(dev_peer);
2450
63c3a66f 2451 if (tg3_flag(tp_peer, INIT_COMPLETE))
683644b7
MC
2452 return;
2453
cd0d7228 2454 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
63c3a66f 2455 tg3_flag(tp_peer, ENABLE_ASF))
683644b7
MC
2456 need_vaux = true;
2457 }
1da177e4
LT
2458 }
2459
cd0d7228
MC
2460 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2461 tg3_flag(tp, ENABLE_ASF))
683644b7
MC
2462 need_vaux = true;
2463
520b2756
MC
2464 if (need_vaux)
2465 tg3_pwrsrc_switch_to_vaux(tp);
2466 else
2467 tg3_pwrsrc_die_with_vmain(tp);
1da177e4
LT
2468}
2469
e8f3f6ca
MC
2470static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2471{
2472 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2473 return 1;
79eb6904 2474 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
e8f3f6ca
MC
2475 if (speed != SPEED_10)
2476 return 1;
2477 } else if (speed == SPEED_10)
2478 return 1;
2479
2480 return 0;
2481}
2482
1da177e4
LT
2483static int tg3_setup_phy(struct tg3 *, int);
2484
2485#define RESET_KIND_SHUTDOWN 0
2486#define RESET_KIND_INIT 1
2487#define RESET_KIND_SUSPEND 2
2488
2489static void tg3_write_sig_post_reset(struct tg3 *, int);
2490static int tg3_halt_cpu(struct tg3 *, u32);
2491
0a459aac 2492static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
15c3b696 2493{
ce057f01
MC
2494 u32 val;
2495
f07e9af3 2496 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
5129724a
MC
2497 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2498 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2499 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2500
2501 sg_dig_ctrl |=
2502 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2503 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2504 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2505 }
3f7045c1 2506 return;
5129724a 2507 }
3f7045c1 2508
60189ddf 2509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2510 tg3_bmcr_reset(tp);
2511 val = tr32(GRC_MISC_CFG);
2512 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2513 udelay(40);
2514 return;
f07e9af3 2515 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
0e5f784c
MC
2516 u32 phytest;
2517 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2518 u32 phy;
2519
2520 tg3_writephy(tp, MII_ADVERTISE, 0);
2521 tg3_writephy(tp, MII_BMCR,
2522 BMCR_ANENABLE | BMCR_ANRESTART);
2523
2524 tg3_writephy(tp, MII_TG3_FET_TEST,
2525 phytest | MII_TG3_FET_SHADOW_EN);
2526 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2527 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2528 tg3_writephy(tp,
2529 MII_TG3_FET_SHDW_AUXMODE4,
2530 phy);
2531 }
2532 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2533 }
2534 return;
0a459aac 2535 } else if (do_low_power) {
715116a1
MC
2536 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2537 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
0a459aac 2538
b4bd2929
MC
2539 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2540 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2541 MII_TG3_AUXCTL_PCTL_VREG_11V;
2542 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
715116a1 2543 }
3f7045c1 2544
15c3b696
MC
2545 /* The PHY should not be powered down on some chips because
2546 * of bugs.
2547 */
2548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2549 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2550 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
f07e9af3 2551 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
15c3b696 2552 return;
ce057f01 2553
bcb37f6c
MC
2554 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2555 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2556 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2557 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2558 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2559 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2560 }
2561
15c3b696
MC
2562 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2563}
2564
ffbcfed4
MC
2565/* tp->lock is held. */
2566static int tg3_nvram_lock(struct tg3 *tp)
2567{
63c3a66f 2568 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2569 int i;
2570
2571 if (tp->nvram_lock_cnt == 0) {
2572 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2573 for (i = 0; i < 8000; i++) {
2574 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2575 break;
2576 udelay(20);
2577 }
2578 if (i == 8000) {
2579 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2580 return -ENODEV;
2581 }
2582 }
2583 tp->nvram_lock_cnt++;
2584 }
2585 return 0;
2586}
2587
2588/* tp->lock is held. */
2589static void tg3_nvram_unlock(struct tg3 *tp)
2590{
63c3a66f 2591 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2592 if (tp->nvram_lock_cnt > 0)
2593 tp->nvram_lock_cnt--;
2594 if (tp->nvram_lock_cnt == 0)
2595 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2596 }
2597}
2598
2599/* tp->lock is held. */
2600static void tg3_enable_nvram_access(struct tg3 *tp)
2601{
63c3a66f 2602 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2603 u32 nvaccess = tr32(NVRAM_ACCESS);
2604
2605 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2606 }
2607}
2608
2609/* tp->lock is held. */
2610static void tg3_disable_nvram_access(struct tg3 *tp)
2611{
63c3a66f 2612 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2613 u32 nvaccess = tr32(NVRAM_ACCESS);
2614
2615 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2616 }
2617}
2618
2619static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2620 u32 offset, u32 *val)
2621{
2622 u32 tmp;
2623 int i;
2624
2625 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2626 return -EINVAL;
2627
2628 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2629 EEPROM_ADDR_DEVID_MASK |
2630 EEPROM_ADDR_READ);
2631 tw32(GRC_EEPROM_ADDR,
2632 tmp |
2633 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2634 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2635 EEPROM_ADDR_ADDR_MASK) |
2636 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2637
2638 for (i = 0; i < 1000; i++) {
2639 tmp = tr32(GRC_EEPROM_ADDR);
2640
2641 if (tmp & EEPROM_ADDR_COMPLETE)
2642 break;
2643 msleep(1);
2644 }
2645 if (!(tmp & EEPROM_ADDR_COMPLETE))
2646 return -EBUSY;
2647
62cedd11
MC
2648 tmp = tr32(GRC_EEPROM_DATA);
2649
2650 /*
2651 * The data will always be opposite the native endian
2652 * format. Perform a blind byteswap to compensate.
2653 */
2654 *val = swab32(tmp);
2655
ffbcfed4
MC
2656 return 0;
2657}
2658
2659#define NVRAM_CMD_TIMEOUT 10000
2660
2661static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2662{
2663 int i;
2664
2665 tw32(NVRAM_CMD, nvram_cmd);
2666 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2667 udelay(10);
2668 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2669 udelay(10);
2670 break;
2671 }
2672 }
2673
2674 if (i == NVRAM_CMD_TIMEOUT)
2675 return -EBUSY;
2676
2677 return 0;
2678}
2679
2680static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2681{
63c3a66f
JP
2682 if (tg3_flag(tp, NVRAM) &&
2683 tg3_flag(tp, NVRAM_BUFFERED) &&
2684 tg3_flag(tp, FLASH) &&
2685 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
2686 (tp->nvram_jedecnum == JEDEC_ATMEL))
2687
2688 addr = ((addr / tp->nvram_pagesize) <<
2689 ATMEL_AT45DB0X1B_PAGE_POS) +
2690 (addr % tp->nvram_pagesize);
2691
2692 return addr;
2693}
2694
2695static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2696{
63c3a66f
JP
2697 if (tg3_flag(tp, NVRAM) &&
2698 tg3_flag(tp, NVRAM_BUFFERED) &&
2699 tg3_flag(tp, FLASH) &&
2700 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
2701 (tp->nvram_jedecnum == JEDEC_ATMEL))
2702
2703 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2704 tp->nvram_pagesize) +
2705 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2706
2707 return addr;
2708}
2709
e4f34110
MC
2710/* NOTE: Data read in from NVRAM is byteswapped according to
2711 * the byteswapping settings for all other register accesses.
2712 * tg3 devices are BE devices, so on a BE machine, the data
2713 * returned will be exactly as it is seen in NVRAM. On a LE
2714 * machine, the 32-bit value will be byteswapped.
2715 */
ffbcfed4
MC
2716static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2717{
2718 int ret;
2719
63c3a66f 2720 if (!tg3_flag(tp, NVRAM))
ffbcfed4
MC
2721 return tg3_nvram_read_using_eeprom(tp, offset, val);
2722
2723 offset = tg3_nvram_phys_addr(tp, offset);
2724
2725 if (offset > NVRAM_ADDR_MSK)
2726 return -EINVAL;
2727
2728 ret = tg3_nvram_lock(tp);
2729 if (ret)
2730 return ret;
2731
2732 tg3_enable_nvram_access(tp);
2733
2734 tw32(NVRAM_ADDR, offset);
2735 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2736 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2737
2738 if (ret == 0)
e4f34110 2739 *val = tr32(NVRAM_RDDATA);
ffbcfed4
MC
2740
2741 tg3_disable_nvram_access(tp);
2742
2743 tg3_nvram_unlock(tp);
2744
2745 return ret;
2746}
2747
a9dc529d
MC
2748/* Ensures NVRAM data is in bytestream format. */
2749static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
ffbcfed4
MC
2750{
2751 u32 v;
a9dc529d 2752 int res = tg3_nvram_read(tp, offset, &v);
ffbcfed4 2753 if (!res)
a9dc529d 2754 *val = cpu_to_be32(v);
ffbcfed4
MC
2755 return res;
2756}
2757
3f007891
MC
2758/* tp->lock is held. */
2759static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2760{
2761 u32 addr_high, addr_low;
2762 int i;
2763
2764 addr_high = ((tp->dev->dev_addr[0] << 8) |
2765 tp->dev->dev_addr[1]);
2766 addr_low = ((tp->dev->dev_addr[2] << 24) |
2767 (tp->dev->dev_addr[3] << 16) |
2768 (tp->dev->dev_addr[4] << 8) |
2769 (tp->dev->dev_addr[5] << 0));
2770 for (i = 0; i < 4; i++) {
2771 if (i == 1 && skip_mac_1)
2772 continue;
2773 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2774 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2775 }
2776
2777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2779 for (i = 0; i < 12; i++) {
2780 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2781 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2782 }
2783 }
2784
2785 addr_high = (tp->dev->dev_addr[0] +
2786 tp->dev->dev_addr[1] +
2787 tp->dev->dev_addr[2] +
2788 tp->dev->dev_addr[3] +
2789 tp->dev->dev_addr[4] +
2790 tp->dev->dev_addr[5]) &
2791 TX_BACKOFF_SEED_MASK;
2792 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2793}
2794
c866b7ea 2795static void tg3_enable_register_access(struct tg3 *tp)
1da177e4 2796{
c866b7ea
RW
2797 /*
2798 * Make sure register accesses (indirect or otherwise) will function
2799 * correctly.
1da177e4
LT
2800 */
2801 pci_write_config_dword(tp->pdev,
c866b7ea
RW
2802 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2803}
1da177e4 2804
c866b7ea
RW
2805static int tg3_power_up(struct tg3 *tp)
2806{
bed9829f 2807 int err;
8c6bda1a 2808
bed9829f 2809 tg3_enable_register_access(tp);
1da177e4 2810
bed9829f
MC
2811 err = pci_set_power_state(tp->pdev, PCI_D0);
2812 if (!err) {
2813 /* Switch out of Vaux if it is a NIC */
2814 tg3_pwrsrc_switch_to_vmain(tp);
2815 } else {
2816 netdev_err(tp->dev, "Transition to D0 failed\n");
2817 }
1da177e4 2818
bed9829f 2819 return err;
c866b7ea 2820}
1da177e4 2821
c866b7ea
RW
2822static int tg3_power_down_prepare(struct tg3 *tp)
2823{
2824 u32 misc_host_ctrl;
2825 bool device_should_wake, do_low_power;
2826
2827 tg3_enable_register_access(tp);
5e7dfd0f
MC
2828
2829 /* Restore the CLKREQ setting. */
63c3a66f 2830 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
2831 u16 lnkctl;
2832
2833 pci_read_config_word(tp->pdev,
708ebb3a 2834 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
2835 &lnkctl);
2836 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2837 pci_write_config_word(tp->pdev,
708ebb3a 2838 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
2839 lnkctl);
2840 }
2841
1da177e4
LT
2842 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2843 tw32(TG3PCI_MISC_HOST_CTRL,
2844 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2845
c866b7ea 2846 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
63c3a66f 2847 tg3_flag(tp, WOL_ENABLE);
05ac4cb7 2848
63c3a66f 2849 if (tg3_flag(tp, USE_PHYLIB)) {
0a459aac 2850 do_low_power = false;
f07e9af3 2851 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
80096068 2852 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
b02fd9e3 2853 struct phy_device *phydev;
0a459aac 2854 u32 phyid, advertising;
b02fd9e3 2855
3f0e3ad7 2856 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 2857
80096068 2858 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3
MC
2859
2860 tp->link_config.orig_speed = phydev->speed;
2861 tp->link_config.orig_duplex = phydev->duplex;
2862 tp->link_config.orig_autoneg = phydev->autoneg;
2863 tp->link_config.orig_advertising = phydev->advertising;
2864
2865 advertising = ADVERTISED_TP |
2866 ADVERTISED_Pause |
2867 ADVERTISED_Autoneg |
2868 ADVERTISED_10baseT_Half;
2869
63c3a66f
JP
2870 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2871 if (tg3_flag(tp, WOL_SPEED_100MB))
b02fd9e3
MC
2872 advertising |=
2873 ADVERTISED_100baseT_Half |
2874 ADVERTISED_100baseT_Full |
2875 ADVERTISED_10baseT_Full;
2876 else
2877 advertising |= ADVERTISED_10baseT_Full;
2878 }
2879
2880 phydev->advertising = advertising;
2881
2882 phy_start_aneg(phydev);
0a459aac
MC
2883
2884 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
6a443a0f
MC
2885 if (phyid != PHY_ID_BCMAC131) {
2886 phyid &= PHY_BCM_OUI_MASK;
2887 if (phyid == PHY_BCM_OUI_1 ||
2888 phyid == PHY_BCM_OUI_2 ||
2889 phyid == PHY_BCM_OUI_3)
0a459aac
MC
2890 do_low_power = true;
2891 }
b02fd9e3 2892 }
dd477003 2893 } else {
2023276e 2894 do_low_power = true;
0a459aac 2895
80096068
MC
2896 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2897 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
dd477003
MC
2898 tp->link_config.orig_speed = tp->link_config.speed;
2899 tp->link_config.orig_duplex = tp->link_config.duplex;
2900 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2901 }
1da177e4 2902
f07e9af3 2903 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
dd477003
MC
2904 tp->link_config.speed = SPEED_10;
2905 tp->link_config.duplex = DUPLEX_HALF;
2906 tp->link_config.autoneg = AUTONEG_ENABLE;
2907 tg3_setup_phy(tp, 0);
2908 }
1da177e4
LT
2909 }
2910
b5d3772c
MC
2911 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2912 u32 val;
2913
2914 val = tr32(GRC_VCPU_EXT_CTRL);
2915 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
63c3a66f 2916 } else if (!tg3_flag(tp, ENABLE_ASF)) {
6921d201
MC
2917 int i;
2918 u32 val;
2919
2920 for (i = 0; i < 200; i++) {
2921 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2922 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2923 break;
2924 msleep(1);
2925 }
2926 }
63c3a66f 2927 if (tg3_flag(tp, WOL_CAP))
a85feb8c
GZ
2928 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2929 WOL_DRV_STATE_SHUTDOWN |
2930 WOL_DRV_WOL |
2931 WOL_SET_MAGIC_PKT);
6921d201 2932
05ac4cb7 2933 if (device_should_wake) {
1da177e4
LT
2934 u32 mac_mode;
2935
f07e9af3 2936 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
b4bd2929
MC
2937 if (do_low_power &&
2938 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2939 tg3_phy_auxctl_write(tp,
2940 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2941 MII_TG3_AUXCTL_PCTL_WOL_EN |
2942 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2943 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
dd477003
MC
2944 udelay(40);
2945 }
1da177e4 2946
f07e9af3 2947 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3f7045c1
MC
2948 mac_mode = MAC_MODE_PORT_MODE_GMII;
2949 else
2950 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 2951
e8f3f6ca
MC
2952 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2953 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2954 ASIC_REV_5700) {
63c3a66f 2955 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
e8f3f6ca
MC
2956 SPEED_100 : SPEED_10;
2957 if (tg3_5700_link_polarity(tp, speed))
2958 mac_mode |= MAC_MODE_LINK_POLARITY;
2959 else
2960 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2961 }
1da177e4
LT
2962 } else {
2963 mac_mode = MAC_MODE_PORT_MODE_TBI;
2964 }
2965
63c3a66f 2966 if (!tg3_flag(tp, 5750_PLUS))
1da177e4
LT
2967 tw32(MAC_LED_CTRL, tp->led_ctrl);
2968
05ac4cb7 2969 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
63c3a66f
JP
2970 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2971 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
05ac4cb7 2972 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
1da177e4 2973
63c3a66f 2974 if (tg3_flag(tp, ENABLE_APE))
d2394e6b
MC
2975 mac_mode |= MAC_MODE_APE_TX_EN |
2976 MAC_MODE_APE_RX_EN |
2977 MAC_MODE_TDE_ENABLE;
3bda1258 2978
1da177e4
LT
2979 tw32_f(MAC_MODE, mac_mode);
2980 udelay(100);
2981
2982 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2983 udelay(10);
2984 }
2985
63c3a66f 2986 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
1da177e4
LT
2987 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2988 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2989 u32 base_val;
2990
2991 base_val = tp->pci_clock_ctrl;
2992 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2993 CLOCK_CTRL_TXCLK_DISABLE);
2994
b401e9e2
MC
2995 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2996 CLOCK_CTRL_PWRDOWN_PLL133, 40);
63c3a66f
JP
2997 } else if (tg3_flag(tp, 5780_CLASS) ||
2998 tg3_flag(tp, CPMU_PRESENT) ||
6ff6f81d 2999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4cf78e4f 3000 /* do nothing */
63c3a66f 3001 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
1da177e4
LT
3002 u32 newbits1, newbits2;
3003
3004 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3005 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3006 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3007 CLOCK_CTRL_TXCLK_DISABLE |
3008 CLOCK_CTRL_ALTCLK);
3009 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
63c3a66f 3010 } else if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3011 newbits1 = CLOCK_CTRL_625_CORE;
3012 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3013 } else {
3014 newbits1 = CLOCK_CTRL_ALTCLK;
3015 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3016 }
3017
b401e9e2
MC
3018 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3019 40);
1da177e4 3020
b401e9e2
MC
3021 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3022 40);
1da177e4 3023
63c3a66f 3024 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3025 u32 newbits3;
3026
3027 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3028 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3029 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3030 CLOCK_CTRL_TXCLK_DISABLE |
3031 CLOCK_CTRL_44MHZ_CORE);
3032 } else {
3033 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3034 }
3035
b401e9e2
MC
3036 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3037 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
3038 }
3039 }
3040
63c3a66f 3041 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
0a459aac 3042 tg3_power_down_phy(tp, do_low_power);
6921d201 3043
cd0d7228 3044 tg3_frob_aux_power(tp, true);
1da177e4
LT
3045
3046 /* Workaround for unstable PLL clock */
3047 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3048 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3049 u32 val = tr32(0x7d00);
3050
3051 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3052 tw32(0x7d00, val);
63c3a66f 3053 if (!tg3_flag(tp, ENABLE_ASF)) {
ec41c7df
MC
3054 int err;
3055
3056 err = tg3_nvram_lock(tp);
1da177e4 3057 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
3058 if (!err)
3059 tg3_nvram_unlock(tp);
6921d201 3060 }
1da177e4
LT
3061 }
3062
bbadf503
MC
3063 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3064
c866b7ea
RW
3065 return 0;
3066}
12dac075 3067
c866b7ea
RW
3068static void tg3_power_down(struct tg3 *tp)
3069{
3070 tg3_power_down_prepare(tp);
1da177e4 3071
63c3a66f 3072 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
c866b7ea 3073 pci_set_power_state(tp->pdev, PCI_D3hot);
1da177e4
LT
3074}
3075
1da177e4
LT
3076static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3077{
3078 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3079 case MII_TG3_AUX_STAT_10HALF:
3080 *speed = SPEED_10;
3081 *duplex = DUPLEX_HALF;
3082 break;
3083
3084 case MII_TG3_AUX_STAT_10FULL:
3085 *speed = SPEED_10;
3086 *duplex = DUPLEX_FULL;
3087 break;
3088
3089 case MII_TG3_AUX_STAT_100HALF:
3090 *speed = SPEED_100;
3091 *duplex = DUPLEX_HALF;
3092 break;
3093
3094 case MII_TG3_AUX_STAT_100FULL:
3095 *speed = SPEED_100;
3096 *duplex = DUPLEX_FULL;
3097 break;
3098
3099 case MII_TG3_AUX_STAT_1000HALF:
3100 *speed = SPEED_1000;
3101 *duplex = DUPLEX_HALF;
3102 break;
3103
3104 case MII_TG3_AUX_STAT_1000FULL:
3105 *speed = SPEED_1000;
3106 *duplex = DUPLEX_FULL;
3107 break;
3108
3109 default:
f07e9af3 3110 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
715116a1
MC
3111 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3112 SPEED_10;
3113 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3114 DUPLEX_HALF;
3115 break;
3116 }
1da177e4
LT
3117 *speed = SPEED_INVALID;
3118 *duplex = DUPLEX_INVALID;
3119 break;
855e1111 3120 }
1da177e4
LT
3121}
3122
42b64a45 3123static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
1da177e4 3124{
42b64a45
MC
3125 int err = 0;
3126 u32 val, new_adv;
1da177e4 3127
42b64a45
MC
3128 new_adv = ADVERTISE_CSMA;
3129 if (advertise & ADVERTISED_10baseT_Half)
3130 new_adv |= ADVERTISE_10HALF;
3131 if (advertise & ADVERTISED_10baseT_Full)
3132 new_adv |= ADVERTISE_10FULL;
3133 if (advertise & ADVERTISED_100baseT_Half)
3134 new_adv |= ADVERTISE_100HALF;
3135 if (advertise & ADVERTISED_100baseT_Full)
3136 new_adv |= ADVERTISE_100FULL;
1da177e4 3137
42b64a45 3138 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
1da177e4 3139
42b64a45
MC
3140 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3141 if (err)
3142 goto done;
ba4d07a8 3143
42b64a45
MC
3144 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3145 goto done;
1da177e4 3146
42b64a45
MC
3147 new_adv = 0;
3148 if (advertise & ADVERTISED_1000baseT_Half)
221c5637 3149 new_adv |= ADVERTISE_1000HALF;
42b64a45 3150 if (advertise & ADVERTISED_1000baseT_Full)
221c5637 3151 new_adv |= ADVERTISE_1000FULL;
ba4d07a8 3152
42b64a45
MC
3153 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3154 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
221c5637 3155 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
ba4d07a8 3156
221c5637 3157 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
42b64a45
MC
3158 if (err)
3159 goto done;
1da177e4 3160
42b64a45
MC
3161 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3162 goto done;
52b02d04 3163
42b64a45
MC
3164 tw32(TG3_CPMU_EEE_MODE,
3165 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
52b02d04 3166
42b64a45
MC
3167 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3168 if (!err) {
3169 u32 err2;
52b02d04 3170
b715ce94
MC
3171 val = 0;
3172 /* Advertise 100-BaseTX EEE ability */
3173 if (advertise & ADVERTISED_100baseT_Full)
3174 val |= MDIO_AN_EEE_ADV_100TX;
3175 /* Advertise 1000-BaseT EEE ability */
3176 if (advertise & ADVERTISED_1000baseT_Full)
3177 val |= MDIO_AN_EEE_ADV_1000T;
3178 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3179 if (err)
3180 val = 0;
3181
21a00ab2
MC
3182 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3183 case ASIC_REV_5717:
3184 case ASIC_REV_57765:
21a00ab2 3185 case ASIC_REV_5719:
b715ce94
MC
3186 /* If we advertised any eee advertisements above... */
3187 if (val)
3188 val = MII_TG3_DSP_TAP26_ALNOKO |
3189 MII_TG3_DSP_TAP26_RMRXSTO |
3190 MII_TG3_DSP_TAP26_OPCSINPT;
21a00ab2 3191 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
be671947
MC
3192 /* Fall through */
3193 case ASIC_REV_5720:
3194 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3195 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3196 MII_TG3_DSP_CH34TP2_HIBW01);
21a00ab2 3197 }
52b02d04 3198
42b64a45
MC
3199 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3200 if (!err)
3201 err = err2;
3202 }
3203
3204done:
3205 return err;
3206}
3207
3208static void tg3_phy_copper_begin(struct tg3 *tp)
3209{
3210 u32 new_adv;
3211 int i;
3212
3213 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3214 new_adv = ADVERTISED_10baseT_Half |
3215 ADVERTISED_10baseT_Full;
3216 if (tg3_flag(tp, WOL_SPEED_100MB))
3217 new_adv |= ADVERTISED_100baseT_Half |
3218 ADVERTISED_100baseT_Full;
3219
3220 tg3_phy_autoneg_cfg(tp, new_adv,
3221 FLOW_CTRL_TX | FLOW_CTRL_RX);
3222 } else if (tp->link_config.speed == SPEED_INVALID) {
3223 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3224 tp->link_config.advertising &=
3225 ~(ADVERTISED_1000baseT_Half |
3226 ADVERTISED_1000baseT_Full);
3227
3228 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3229 tp->link_config.flowctrl);
3230 } else {
3231 /* Asking for a specific link mode. */
3232 if (tp->link_config.speed == SPEED_1000) {
3233 if (tp->link_config.duplex == DUPLEX_FULL)
3234 new_adv = ADVERTISED_1000baseT_Full;
3235 else
3236 new_adv = ADVERTISED_1000baseT_Half;
3237 } else if (tp->link_config.speed == SPEED_100) {
3238 if (tp->link_config.duplex == DUPLEX_FULL)
3239 new_adv = ADVERTISED_100baseT_Full;
3240 else
3241 new_adv = ADVERTISED_100baseT_Half;
3242 } else {
3243 if (tp->link_config.duplex == DUPLEX_FULL)
3244 new_adv = ADVERTISED_10baseT_Full;
3245 else
3246 new_adv = ADVERTISED_10baseT_Half;
52b02d04 3247 }
52b02d04 3248
42b64a45
MC
3249 tg3_phy_autoneg_cfg(tp, new_adv,
3250 tp->link_config.flowctrl);
52b02d04
MC
3251 }
3252
1da177e4
LT
3253 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3254 tp->link_config.speed != SPEED_INVALID) {
3255 u32 bmcr, orig_bmcr;
3256
3257 tp->link_config.active_speed = tp->link_config.speed;
3258 tp->link_config.active_duplex = tp->link_config.duplex;
3259
3260 bmcr = 0;
3261 switch (tp->link_config.speed) {
3262 default:
3263 case SPEED_10:
3264 break;
3265
3266 case SPEED_100:
3267 bmcr |= BMCR_SPEED100;
3268 break;
3269
3270 case SPEED_1000:
221c5637 3271 bmcr |= BMCR_SPEED1000;
1da177e4 3272 break;
855e1111 3273 }
1da177e4
LT
3274
3275 if (tp->link_config.duplex == DUPLEX_FULL)
3276 bmcr |= BMCR_FULLDPLX;
3277
3278 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3279 (bmcr != orig_bmcr)) {
3280 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3281 for (i = 0; i < 1500; i++) {
3282 u32 tmp;
3283
3284 udelay(10);
3285 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3286 tg3_readphy(tp, MII_BMSR, &tmp))
3287 continue;
3288 if (!(tmp & BMSR_LSTATUS)) {
3289 udelay(40);
3290 break;
3291 }
3292 }
3293 tg3_writephy(tp, MII_BMCR, bmcr);
3294 udelay(40);
3295 }
3296 } else {
3297 tg3_writephy(tp, MII_BMCR,
3298 BMCR_ANENABLE | BMCR_ANRESTART);
3299 }
3300}
3301
3302static int tg3_init_5401phy_dsp(struct tg3 *tp)
3303{
3304 int err;
3305
3306 /* Turn off tap power management. */
3307 /* Set Extended packet length bit */
b4bd2929 3308 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
1da177e4 3309
6ee7c0a0
MC
3310 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3311 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3312 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3313 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3314 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
1da177e4
LT
3315
3316 udelay(40);
3317
3318 return err;
3319}
3320
3600d918 3321static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1da177e4 3322{
3600d918
MC
3323 u32 adv_reg, all_mask = 0;
3324
3325 if (mask & ADVERTISED_10baseT_Half)
3326 all_mask |= ADVERTISE_10HALF;
3327 if (mask & ADVERTISED_10baseT_Full)
3328 all_mask |= ADVERTISE_10FULL;
3329 if (mask & ADVERTISED_100baseT_Half)
3330 all_mask |= ADVERTISE_100HALF;
3331 if (mask & ADVERTISED_100baseT_Full)
3332 all_mask |= ADVERTISE_100FULL;
1da177e4
LT
3333
3334 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3335 return 0;
3336
b99d2a57 3337 if ((adv_reg & ADVERTISE_ALL) != all_mask)
1da177e4 3338 return 0;
b99d2a57 3339
f07e9af3 3340 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1da177e4
LT
3341 u32 tg3_ctrl;
3342
3600d918
MC
3343 all_mask = 0;
3344 if (mask & ADVERTISED_1000baseT_Half)
3345 all_mask |= ADVERTISE_1000HALF;
3346 if (mask & ADVERTISED_1000baseT_Full)
3347 all_mask |= ADVERTISE_1000FULL;
3348
221c5637 3349 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
1da177e4
LT
3350 return 0;
3351
b99d2a57
MC
3352 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3353 if (tg3_ctrl != all_mask)
1da177e4
LT
3354 return 0;
3355 }
3356 return 1;
3357}
3358
ef167e27
MC
3359static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3360{
3361 u32 curadv, reqadv;
3362
3363 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3364 return 1;
3365
3366 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3367 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3368
3369 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3370 if (curadv != reqadv)
3371 return 0;
3372
63c3a66f 3373 if (tg3_flag(tp, PAUSE_AUTONEG))
ef167e27
MC
3374 tg3_readphy(tp, MII_LPA, rmtadv);
3375 } else {
3376 /* Reprogram the advertisement register, even if it
3377 * does not affect the current link. If the link
3378 * gets renegotiated in the future, we can save an
3379 * additional renegotiation cycle by advertising
3380 * it correctly in the first place.
3381 */
3382 if (curadv != reqadv) {
3383 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3384 ADVERTISE_PAUSE_ASYM);
3385 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3386 }
3387 }
3388
3389 return 1;
3390}
3391
1da177e4
LT
3392static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3393{
3394 int current_link_up;
f833c4c1 3395 u32 bmsr, val;
ef167e27 3396 u32 lcl_adv, rmt_adv;
1da177e4
LT
3397 u16 current_speed;
3398 u8 current_duplex;
3399 int i, err;
3400
3401 tw32(MAC_EVENT, 0);
3402
3403 tw32_f(MAC_STATUS,
3404 (MAC_STATUS_SYNC_CHANGED |
3405 MAC_STATUS_CFG_CHANGED |
3406 MAC_STATUS_MI_COMPLETION |
3407 MAC_STATUS_LNKSTATE_CHANGED));
3408 udelay(40);
3409
8ef21428
MC
3410 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3411 tw32_f(MAC_MI_MODE,
3412 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3413 udelay(80);
3414 }
1da177e4 3415
b4bd2929 3416 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
1da177e4
LT
3417
3418 /* Some third-party PHYs need to be reset on link going
3419 * down.
3420 */
3421 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3422 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3423 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3424 netif_carrier_ok(tp->dev)) {
3425 tg3_readphy(tp, MII_BMSR, &bmsr);
3426 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3427 !(bmsr & BMSR_LSTATUS))
3428 force_reset = 1;
3429 }
3430 if (force_reset)
3431 tg3_phy_reset(tp);
3432
79eb6904 3433 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
3434 tg3_readphy(tp, MII_BMSR, &bmsr);
3435 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
63c3a66f 3436 !tg3_flag(tp, INIT_COMPLETE))
1da177e4
LT
3437 bmsr = 0;
3438
3439 if (!(bmsr & BMSR_LSTATUS)) {
3440 err = tg3_init_5401phy_dsp(tp);
3441 if (err)
3442 return err;
3443
3444 tg3_readphy(tp, MII_BMSR, &bmsr);
3445 for (i = 0; i < 1000; i++) {
3446 udelay(10);
3447 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3448 (bmsr & BMSR_LSTATUS)) {
3449 udelay(40);
3450 break;
3451 }
3452 }
3453
79eb6904
MC
3454 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3455 TG3_PHY_REV_BCM5401_B0 &&
1da177e4
LT
3456 !(bmsr & BMSR_LSTATUS) &&
3457 tp->link_config.active_speed == SPEED_1000) {
3458 err = tg3_phy_reset(tp);
3459 if (!err)
3460 err = tg3_init_5401phy_dsp(tp);
3461 if (err)
3462 return err;
3463 }
3464 }
3465 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3466 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3467 /* 5701 {A0,B0} CRC bug workaround */
3468 tg3_writephy(tp, 0x15, 0x0a75);
f08aa1a8
MC
3469 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3470 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3471 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
1da177e4
LT
3472 }
3473
3474 /* Clear pending interrupts... */
f833c4c1
MC
3475 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3476 tg3_readphy(tp, MII_TG3_ISTAT, &val);
1da177e4 3477
f07e9af3 3478 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
1da177e4 3479 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
f07e9af3 3480 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
1da177e4
LT
3481 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3482
3483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3485 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3486 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3487 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3488 else
3489 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3490 }
3491
3492 current_link_up = 0;
3493 current_speed = SPEED_INVALID;
3494 current_duplex = DUPLEX_INVALID;
3495
f07e9af3 3496 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
15ee95c3
MC
3497 err = tg3_phy_auxctl_read(tp,
3498 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3499 &val);
3500 if (!err && !(val & (1 << 10))) {
b4bd2929
MC
3501 tg3_phy_auxctl_write(tp,
3502 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3503 val | (1 << 10));
1da177e4
LT
3504 goto relink;
3505 }
3506 }
3507
3508 bmsr = 0;
3509 for (i = 0; i < 100; i++) {
3510 tg3_readphy(tp, MII_BMSR, &bmsr);
3511 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3512 (bmsr & BMSR_LSTATUS))
3513 break;
3514 udelay(40);
3515 }
3516
3517 if (bmsr & BMSR_LSTATUS) {
3518 u32 aux_stat, bmcr;
3519
3520 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3521 for (i = 0; i < 2000; i++) {
3522 udelay(10);
3523 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3524 aux_stat)
3525 break;
3526 }
3527
3528 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3529 &current_speed,
3530 &current_duplex);
3531
3532 bmcr = 0;
3533 for (i = 0; i < 200; i++) {
3534 tg3_readphy(tp, MII_BMCR, &bmcr);
3535 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3536 continue;
3537 if (bmcr && bmcr != 0x7fff)
3538 break;
3539 udelay(10);
3540 }
3541
ef167e27
MC
3542 lcl_adv = 0;
3543 rmt_adv = 0;
1da177e4 3544
ef167e27
MC
3545 tp->link_config.active_speed = current_speed;
3546 tp->link_config.active_duplex = current_duplex;
3547
3548 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3549 if ((bmcr & BMCR_ANENABLE) &&
3550 tg3_copper_is_advertising_all(tp,
3551 tp->link_config.advertising)) {
3552 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3553 &rmt_adv))
3554 current_link_up = 1;
1da177e4
LT
3555 }
3556 } else {
3557 if (!(bmcr & BMCR_ANENABLE) &&
3558 tp->link_config.speed == current_speed &&
ef167e27
MC
3559 tp->link_config.duplex == current_duplex &&
3560 tp->link_config.flowctrl ==
3561 tp->link_config.active_flowctrl) {
1da177e4 3562 current_link_up = 1;
1da177e4
LT
3563 }
3564 }
3565
ef167e27
MC
3566 if (current_link_up == 1 &&
3567 tp->link_config.active_duplex == DUPLEX_FULL)
3568 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1da177e4
LT
3569 }
3570
1da177e4 3571relink:
80096068 3572 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
1da177e4
LT
3573 tg3_phy_copper_begin(tp);
3574
f833c4c1 3575 tg3_readphy(tp, MII_BMSR, &bmsr);
06c03c02
MB
3576 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3577 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
1da177e4
LT
3578 current_link_up = 1;
3579 }
3580
3581 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3582 if (current_link_up == 1) {
3583 if (tp->link_config.active_speed == SPEED_100 ||
3584 tp->link_config.active_speed == SPEED_10)
3585 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3586 else
3587 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
f07e9af3 3588 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7f97a4bd
MC
3589 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3590 else
1da177e4
LT
3591 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3592
3593 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3594 if (tp->link_config.active_duplex == DUPLEX_HALF)
3595 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3596
1da177e4 3597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
3598 if (current_link_up == 1 &&
3599 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 3600 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
3601 else
3602 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
3603 }
3604
3605 /* ??? Without this setting Netgear GA302T PHY does not
3606 * ??? send/receive packets...
3607 */
79eb6904 3608 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
1da177e4
LT
3609 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3610 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3611 tw32_f(MAC_MI_MODE, tp->mi_mode);
3612 udelay(80);
3613 }
3614
3615 tw32_f(MAC_MODE, tp->mac_mode);
3616 udelay(40);
3617
52b02d04
MC
3618 tg3_phy_eee_adjust(tp, current_link_up);
3619
63c3a66f 3620 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
3621 /* Polled via timer. */
3622 tw32_f(MAC_EVENT, 0);
3623 } else {
3624 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3625 }
3626 udelay(40);
3627
3628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3629 current_link_up == 1 &&
3630 tp->link_config.active_speed == SPEED_1000 &&
63c3a66f 3631 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
1da177e4
LT
3632 udelay(120);
3633 tw32_f(MAC_STATUS,
3634 (MAC_STATUS_SYNC_CHANGED |
3635 MAC_STATUS_CFG_CHANGED));
3636 udelay(40);
3637 tg3_write_mem(tp,
3638 NIC_SRAM_FIRMWARE_MBOX,
3639 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3640 }
3641
5e7dfd0f 3642 /* Prevent send BD corruption. */
63c3a66f 3643 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
3644 u16 oldlnkctl, newlnkctl;
3645
3646 pci_read_config_word(tp->pdev,
708ebb3a 3647 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
3648 &oldlnkctl);
3649 if (tp->link_config.active_speed == SPEED_100 ||
3650 tp->link_config.active_speed == SPEED_10)
3651 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3652 else
3653 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3654 if (newlnkctl != oldlnkctl)
3655 pci_write_config_word(tp->pdev,
708ebb3a 3656 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
3657 newlnkctl);
3658 }
3659
1da177e4
LT
3660 if (current_link_up != netif_carrier_ok(tp->dev)) {
3661 if (current_link_up)
3662 netif_carrier_on(tp->dev);
3663 else
3664 netif_carrier_off(tp->dev);
3665 tg3_link_report(tp);
3666 }
3667
3668 return 0;
3669}
3670
3671struct tg3_fiber_aneginfo {
3672 int state;
3673#define ANEG_STATE_UNKNOWN 0
3674#define ANEG_STATE_AN_ENABLE 1
3675#define ANEG_STATE_RESTART_INIT 2
3676#define ANEG_STATE_RESTART 3
3677#define ANEG_STATE_DISABLE_LINK_OK 4
3678#define ANEG_STATE_ABILITY_DETECT_INIT 5
3679#define ANEG_STATE_ABILITY_DETECT 6
3680#define ANEG_STATE_ACK_DETECT_INIT 7
3681#define ANEG_STATE_ACK_DETECT 8
3682#define ANEG_STATE_COMPLETE_ACK_INIT 9
3683#define ANEG_STATE_COMPLETE_ACK 10
3684#define ANEG_STATE_IDLE_DETECT_INIT 11
3685#define ANEG_STATE_IDLE_DETECT 12
3686#define ANEG_STATE_LINK_OK 13
3687#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3688#define ANEG_STATE_NEXT_PAGE_WAIT 15
3689
3690 u32 flags;
3691#define MR_AN_ENABLE 0x00000001
3692#define MR_RESTART_AN 0x00000002
3693#define MR_AN_COMPLETE 0x00000004
3694#define MR_PAGE_RX 0x00000008
3695#define MR_NP_LOADED 0x00000010
3696#define MR_TOGGLE_TX 0x00000020
3697#define MR_LP_ADV_FULL_DUPLEX 0x00000040
3698#define MR_LP_ADV_HALF_DUPLEX 0x00000080
3699#define MR_LP_ADV_SYM_PAUSE 0x00000100
3700#define MR_LP_ADV_ASYM_PAUSE 0x00000200
3701#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3702#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3703#define MR_LP_ADV_NEXT_PAGE 0x00001000
3704#define MR_TOGGLE_RX 0x00002000
3705#define MR_NP_RX 0x00004000
3706
3707#define MR_LINK_OK 0x80000000
3708
3709 unsigned long link_time, cur_time;
3710
3711 u32 ability_match_cfg;
3712 int ability_match_count;
3713
3714 char ability_match, idle_match, ack_match;
3715
3716 u32 txconfig, rxconfig;
3717#define ANEG_CFG_NP 0x00000080
3718#define ANEG_CFG_ACK 0x00000040
3719#define ANEG_CFG_RF2 0x00000020
3720#define ANEG_CFG_RF1 0x00000010
3721#define ANEG_CFG_PS2 0x00000001
3722#define ANEG_CFG_PS1 0x00008000
3723#define ANEG_CFG_HD 0x00004000
3724#define ANEG_CFG_FD 0x00002000
3725#define ANEG_CFG_INVAL 0x00001f06
3726
3727};
3728#define ANEG_OK 0
3729#define ANEG_DONE 1
3730#define ANEG_TIMER_ENAB 2
3731#define ANEG_FAILED -1
3732
3733#define ANEG_STATE_SETTLE_TIME 10000
3734
3735static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3736 struct tg3_fiber_aneginfo *ap)
3737{
5be73b47 3738 u16 flowctrl;
1da177e4
LT
3739 unsigned long delta;
3740 u32 rx_cfg_reg;
3741 int ret;
3742
3743 if (ap->state == ANEG_STATE_UNKNOWN) {
3744 ap->rxconfig = 0;
3745 ap->link_time = 0;
3746 ap->cur_time = 0;
3747 ap->ability_match_cfg = 0;
3748 ap->ability_match_count = 0;
3749 ap->ability_match = 0;
3750 ap->idle_match = 0;
3751 ap->ack_match = 0;
3752 }
3753 ap->cur_time++;
3754
3755 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3756 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3757
3758 if (rx_cfg_reg != ap->ability_match_cfg) {
3759 ap->ability_match_cfg = rx_cfg_reg;
3760 ap->ability_match = 0;
3761 ap->ability_match_count = 0;
3762 } else {
3763 if (++ap->ability_match_count > 1) {
3764 ap->ability_match = 1;
3765 ap->ability_match_cfg = rx_cfg_reg;
3766 }
3767 }
3768 if (rx_cfg_reg & ANEG_CFG_ACK)
3769 ap->ack_match = 1;
3770 else
3771 ap->ack_match = 0;
3772
3773 ap->idle_match = 0;
3774 } else {
3775 ap->idle_match = 1;
3776 ap->ability_match_cfg = 0;
3777 ap->ability_match_count = 0;
3778 ap->ability_match = 0;
3779 ap->ack_match = 0;
3780
3781 rx_cfg_reg = 0;
3782 }
3783
3784 ap->rxconfig = rx_cfg_reg;
3785 ret = ANEG_OK;
3786
33f401ae 3787 switch (ap->state) {
1da177e4
LT
3788 case ANEG_STATE_UNKNOWN:
3789 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3790 ap->state = ANEG_STATE_AN_ENABLE;
3791
3792 /* fallthru */
3793 case ANEG_STATE_AN_ENABLE:
3794 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3795 if (ap->flags & MR_AN_ENABLE) {
3796 ap->link_time = 0;
3797 ap->cur_time = 0;
3798 ap->ability_match_cfg = 0;
3799 ap->ability_match_count = 0;
3800 ap->ability_match = 0;
3801 ap->idle_match = 0;
3802 ap->ack_match = 0;
3803
3804 ap->state = ANEG_STATE_RESTART_INIT;
3805 } else {
3806 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3807 }
3808 break;
3809
3810 case ANEG_STATE_RESTART_INIT:
3811 ap->link_time = ap->cur_time;
3812 ap->flags &= ~(MR_NP_LOADED);
3813 ap->txconfig = 0;
3814 tw32(MAC_TX_AUTO_NEG, 0);
3815 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3816 tw32_f(MAC_MODE, tp->mac_mode);
3817 udelay(40);
3818
3819 ret = ANEG_TIMER_ENAB;
3820 ap->state = ANEG_STATE_RESTART;
3821
3822 /* fallthru */
3823 case ANEG_STATE_RESTART:
3824 delta = ap->cur_time - ap->link_time;
859a5887 3825 if (delta > ANEG_STATE_SETTLE_TIME)
1da177e4 3826 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
859a5887 3827 else
1da177e4 3828 ret = ANEG_TIMER_ENAB;
1da177e4
LT
3829 break;
3830
3831 case ANEG_STATE_DISABLE_LINK_OK:
3832 ret = ANEG_DONE;
3833 break;
3834
3835 case ANEG_STATE_ABILITY_DETECT_INIT:
3836 ap->flags &= ~(MR_TOGGLE_TX);
5be73b47
MC
3837 ap->txconfig = ANEG_CFG_FD;
3838 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3839 if (flowctrl & ADVERTISE_1000XPAUSE)
3840 ap->txconfig |= ANEG_CFG_PS1;
3841 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3842 ap->txconfig |= ANEG_CFG_PS2;
1da177e4
LT
3843 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3844 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3845 tw32_f(MAC_MODE, tp->mac_mode);
3846 udelay(40);
3847
3848 ap->state = ANEG_STATE_ABILITY_DETECT;
3849 break;
3850
3851 case ANEG_STATE_ABILITY_DETECT:
859a5887 3852 if (ap->ability_match != 0 && ap->rxconfig != 0)
1da177e4 3853 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1da177e4
LT
3854 break;
3855
3856 case ANEG_STATE_ACK_DETECT_INIT:
3857 ap->txconfig |= ANEG_CFG_ACK;
3858 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3859 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3860 tw32_f(MAC_MODE, tp->mac_mode);
3861 udelay(40);
3862
3863 ap->state = ANEG_STATE_ACK_DETECT;
3864
3865 /* fallthru */
3866 case ANEG_STATE_ACK_DETECT:
3867 if (ap->ack_match != 0) {
3868 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3869 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3870 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3871 } else {
3872 ap->state = ANEG_STATE_AN_ENABLE;
3873 }
3874 } else if (ap->ability_match != 0 &&
3875 ap->rxconfig == 0) {
3876 ap->state = ANEG_STATE_AN_ENABLE;
3877 }
3878 break;
3879
3880 case ANEG_STATE_COMPLETE_ACK_INIT:
3881 if (ap->rxconfig & ANEG_CFG_INVAL) {
3882 ret = ANEG_FAILED;
3883 break;
3884 }
3885 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3886 MR_LP_ADV_HALF_DUPLEX |
3887 MR_LP_ADV_SYM_PAUSE |
3888 MR_LP_ADV_ASYM_PAUSE |
3889 MR_LP_ADV_REMOTE_FAULT1 |
3890 MR_LP_ADV_REMOTE_FAULT2 |
3891 MR_LP_ADV_NEXT_PAGE |
3892 MR_TOGGLE_RX |
3893 MR_NP_RX);
3894 if (ap->rxconfig & ANEG_CFG_FD)
3895 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3896 if (ap->rxconfig & ANEG_CFG_HD)
3897 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3898 if (ap->rxconfig & ANEG_CFG_PS1)
3899 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3900 if (ap->rxconfig & ANEG_CFG_PS2)
3901 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3902 if (ap->rxconfig & ANEG_CFG_RF1)
3903 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3904 if (ap->rxconfig & ANEG_CFG_RF2)
3905 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3906 if (ap->rxconfig & ANEG_CFG_NP)
3907 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3908
3909 ap->link_time = ap->cur_time;
3910
3911 ap->flags ^= (MR_TOGGLE_TX);
3912 if (ap->rxconfig & 0x0008)
3913 ap->flags |= MR_TOGGLE_RX;
3914 if (ap->rxconfig & ANEG_CFG_NP)
3915 ap->flags |= MR_NP_RX;
3916 ap->flags |= MR_PAGE_RX;
3917
3918 ap->state = ANEG_STATE_COMPLETE_ACK;
3919 ret = ANEG_TIMER_ENAB;
3920 break;
3921
3922 case ANEG_STATE_COMPLETE_ACK:
3923 if (ap->ability_match != 0 &&
3924 ap->rxconfig == 0) {
3925 ap->state = ANEG_STATE_AN_ENABLE;
3926 break;
3927 }
3928 delta = ap->cur_time - ap->link_time;
3929 if (delta > ANEG_STATE_SETTLE_TIME) {
3930 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3931 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3932 } else {
3933 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3934 !(ap->flags & MR_NP_RX)) {
3935 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3936 } else {
3937 ret = ANEG_FAILED;
3938 }
3939 }
3940 }
3941 break;
3942
3943 case ANEG_STATE_IDLE_DETECT_INIT:
3944 ap->link_time = ap->cur_time;
3945 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3946 tw32_f(MAC_MODE, tp->mac_mode);
3947 udelay(40);
3948
3949 ap->state = ANEG_STATE_IDLE_DETECT;
3950 ret = ANEG_TIMER_ENAB;
3951 break;
3952
3953 case ANEG_STATE_IDLE_DETECT:
3954 if (ap->ability_match != 0 &&
3955 ap->rxconfig == 0) {
3956 ap->state = ANEG_STATE_AN_ENABLE;
3957 break;
3958 }
3959 delta = ap->cur_time - ap->link_time;
3960 if (delta > ANEG_STATE_SETTLE_TIME) {
3961 /* XXX another gem from the Broadcom driver :( */
3962 ap->state = ANEG_STATE_LINK_OK;
3963 }
3964 break;
3965
3966 case ANEG_STATE_LINK_OK:
3967 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3968 ret = ANEG_DONE;
3969 break;
3970
3971 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3972 /* ??? unimplemented */
3973 break;
3974
3975 case ANEG_STATE_NEXT_PAGE_WAIT:
3976 /* ??? unimplemented */
3977 break;
3978
3979 default:
3980 ret = ANEG_FAILED;
3981 break;
855e1111 3982 }
1da177e4
LT
3983
3984 return ret;
3985}
3986
5be73b47 3987static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
1da177e4
LT
3988{
3989 int res = 0;
3990 struct tg3_fiber_aneginfo aninfo;
3991 int status = ANEG_FAILED;
3992 unsigned int tick;
3993 u32 tmp;
3994
3995 tw32_f(MAC_TX_AUTO_NEG, 0);
3996
3997 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3998 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3999 udelay(40);
4000
4001 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4002 udelay(40);
4003
4004 memset(&aninfo, 0, sizeof(aninfo));
4005 aninfo.flags |= MR_AN_ENABLE;
4006 aninfo.state = ANEG_STATE_UNKNOWN;
4007 aninfo.cur_time = 0;
4008 tick = 0;
4009 while (++tick < 195000) {
4010 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4011 if (status == ANEG_DONE || status == ANEG_FAILED)
4012 break;
4013
4014 udelay(1);
4015 }
4016
4017 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4018 tw32_f(MAC_MODE, tp->mac_mode);
4019 udelay(40);
4020
5be73b47
MC
4021 *txflags = aninfo.txconfig;
4022 *rxflags = aninfo.flags;
1da177e4
LT
4023
4024 if (status == ANEG_DONE &&
4025 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4026 MR_LP_ADV_FULL_DUPLEX)))
4027 res = 1;
4028
4029 return res;
4030}
4031
4032static void tg3_init_bcm8002(struct tg3 *tp)
4033{
4034 u32 mac_status = tr32(MAC_STATUS);
4035 int i;
4036
4037 /* Reset when initting first time or we have a link. */
63c3a66f 4038 if (tg3_flag(tp, INIT_COMPLETE) &&
1da177e4
LT
4039 !(mac_status & MAC_STATUS_PCS_SYNCED))
4040 return;
4041
4042 /* Set PLL lock range. */
4043 tg3_writephy(tp, 0x16, 0x8007);
4044
4045 /* SW reset */
4046 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4047
4048 /* Wait for reset to complete. */
4049 /* XXX schedule_timeout() ... */
4050 for (i = 0; i < 500; i++)
4051 udelay(10);
4052
4053 /* Config mode; select PMA/Ch 1 regs. */
4054 tg3_writephy(tp, 0x10, 0x8411);
4055
4056 /* Enable auto-lock and comdet, select txclk for tx. */
4057 tg3_writephy(tp, 0x11, 0x0a10);
4058
4059 tg3_writephy(tp, 0x18, 0x00a0);
4060 tg3_writephy(tp, 0x16, 0x41ff);
4061
4062 /* Assert and deassert POR. */
4063 tg3_writephy(tp, 0x13, 0x0400);
4064 udelay(40);
4065 tg3_writephy(tp, 0x13, 0x0000);
4066
4067 tg3_writephy(tp, 0x11, 0x0a50);
4068 udelay(40);
4069 tg3_writephy(tp, 0x11, 0x0a10);
4070
4071 /* Wait for signal to stabilize */
4072 /* XXX schedule_timeout() ... */
4073 for (i = 0; i < 15000; i++)
4074 udelay(10);
4075
4076 /* Deselect the channel register so we can read the PHYID
4077 * later.
4078 */
4079 tg3_writephy(tp, 0x10, 0x8011);
4080}
4081
4082static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4083{
82cd3d11 4084 u16 flowctrl;
1da177e4
LT
4085 u32 sg_dig_ctrl, sg_dig_status;
4086 u32 serdes_cfg, expected_sg_dig_ctrl;
4087 int workaround, port_a;
4088 int current_link_up;
4089
4090 serdes_cfg = 0;
4091 expected_sg_dig_ctrl = 0;
4092 workaround = 0;
4093 port_a = 1;
4094 current_link_up = 0;
4095
4096 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4097 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4098 workaround = 1;
4099 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4100 port_a = 0;
4101
4102 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4103 /* preserve bits 20-23 for voltage regulator */
4104 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4105 }
4106
4107 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4108
4109 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
c98f6e3b 4110 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
1da177e4
LT
4111 if (workaround) {
4112 u32 val = serdes_cfg;
4113
4114 if (port_a)
4115 val |= 0xc010000;
4116 else
4117 val |= 0x4010000;
4118 tw32_f(MAC_SERDES_CFG, val);
4119 }
c98f6e3b
MC
4120
4121 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4122 }
4123 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4124 tg3_setup_flow_control(tp, 0, 0);
4125 current_link_up = 1;
4126 }
4127 goto out;
4128 }
4129
4130 /* Want auto-negotiation. */
c98f6e3b 4131 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
1da177e4 4132
82cd3d11
MC
4133 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4134 if (flowctrl & ADVERTISE_1000XPAUSE)
4135 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4136 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4137 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
1da177e4
LT
4138
4139 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
f07e9af3 4140 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3d3ebe74
MC
4141 tp->serdes_counter &&
4142 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4143 MAC_STATUS_RCVD_CFG)) ==
4144 MAC_STATUS_PCS_SYNCED)) {
4145 tp->serdes_counter--;
4146 current_link_up = 1;
4147 goto out;
4148 }
4149restart_autoneg:
1da177e4
LT
4150 if (workaround)
4151 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
c98f6e3b 4152 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
1da177e4
LT
4153 udelay(5);
4154 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4155
3d3ebe74 4156 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4157 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4158 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4159 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 4160 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
4161 mac_status = tr32(MAC_STATUS);
4162
c98f6e3b 4163 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1da177e4 4164 (mac_status & MAC_STATUS_PCS_SYNCED)) {
82cd3d11
MC
4165 u32 local_adv = 0, remote_adv = 0;
4166
4167 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4168 local_adv |= ADVERTISE_1000XPAUSE;
4169 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4170 local_adv |= ADVERTISE_1000XPSE_ASYM;
1da177e4 4171
c98f6e3b 4172 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
82cd3d11 4173 remote_adv |= LPA_1000XPAUSE;
c98f6e3b 4174 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
82cd3d11 4175 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4
LT
4176
4177 tg3_setup_flow_control(tp, local_adv, remote_adv);
4178 current_link_up = 1;
3d3ebe74 4179 tp->serdes_counter = 0;
f07e9af3 4180 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c98f6e3b 4181 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3d3ebe74
MC
4182 if (tp->serdes_counter)
4183 tp->serdes_counter--;
1da177e4
LT
4184 else {
4185 if (workaround) {
4186 u32 val = serdes_cfg;
4187
4188 if (port_a)
4189 val |= 0xc010000;
4190 else
4191 val |= 0x4010000;
4192
4193 tw32_f(MAC_SERDES_CFG, val);
4194 }
4195
c98f6e3b 4196 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4197 udelay(40);
4198
4199 /* Link parallel detection - link is up */
4200 /* only if we have PCS_SYNC and not */
4201 /* receiving config code words */
4202 mac_status = tr32(MAC_STATUS);
4203 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4204 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4205 tg3_setup_flow_control(tp, 0, 0);
4206 current_link_up = 1;
f07e9af3
MC
4207 tp->phy_flags |=
4208 TG3_PHYFLG_PARALLEL_DETECT;
3d3ebe74
MC
4209 tp->serdes_counter =
4210 SERDES_PARALLEL_DET_TIMEOUT;
4211 } else
4212 goto restart_autoneg;
1da177e4
LT
4213 }
4214 }
3d3ebe74
MC
4215 } else {
4216 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4217 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4218 }
4219
4220out:
4221 return current_link_up;
4222}
4223
4224static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4225{
4226 int current_link_up = 0;
4227
5cf64b8a 4228 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 4229 goto out;
1da177e4
LT
4230
4231 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5be73b47 4232 u32 txflags, rxflags;
1da177e4 4233 int i;
6aa20a22 4234
5be73b47
MC
4235 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4236 u32 local_adv = 0, remote_adv = 0;
1da177e4 4237
5be73b47
MC
4238 if (txflags & ANEG_CFG_PS1)
4239 local_adv |= ADVERTISE_1000XPAUSE;
4240 if (txflags & ANEG_CFG_PS2)
4241 local_adv |= ADVERTISE_1000XPSE_ASYM;
4242
4243 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4244 remote_adv |= LPA_1000XPAUSE;
4245 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4246 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4
LT
4247
4248 tg3_setup_flow_control(tp, local_adv, remote_adv);
4249
1da177e4
LT
4250 current_link_up = 1;
4251 }
4252 for (i = 0; i < 30; i++) {
4253 udelay(20);
4254 tw32_f(MAC_STATUS,
4255 (MAC_STATUS_SYNC_CHANGED |
4256 MAC_STATUS_CFG_CHANGED));
4257 udelay(40);
4258 if ((tr32(MAC_STATUS) &
4259 (MAC_STATUS_SYNC_CHANGED |
4260 MAC_STATUS_CFG_CHANGED)) == 0)
4261 break;
4262 }
4263
4264 mac_status = tr32(MAC_STATUS);
4265 if (current_link_up == 0 &&
4266 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4267 !(mac_status & MAC_STATUS_RCVD_CFG))
4268 current_link_up = 1;
4269 } else {
5be73b47
MC
4270 tg3_setup_flow_control(tp, 0, 0);
4271
1da177e4
LT
4272 /* Forcing 1000FD link up. */
4273 current_link_up = 1;
1da177e4
LT
4274
4275 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4276 udelay(40);
e8f3f6ca
MC
4277
4278 tw32_f(MAC_MODE, tp->mac_mode);
4279 udelay(40);
1da177e4
LT
4280 }
4281
4282out:
4283 return current_link_up;
4284}
4285
4286static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4287{
4288 u32 orig_pause_cfg;
4289 u16 orig_active_speed;
4290 u8 orig_active_duplex;
4291 u32 mac_status;
4292 int current_link_up;
4293 int i;
4294
8d018621 4295 orig_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
4296 orig_active_speed = tp->link_config.active_speed;
4297 orig_active_duplex = tp->link_config.active_duplex;
4298
63c3a66f 4299 if (!tg3_flag(tp, HW_AUTONEG) &&
1da177e4 4300 netif_carrier_ok(tp->dev) &&
63c3a66f 4301 tg3_flag(tp, INIT_COMPLETE)) {
1da177e4
LT
4302 mac_status = tr32(MAC_STATUS);
4303 mac_status &= (MAC_STATUS_PCS_SYNCED |
4304 MAC_STATUS_SIGNAL_DET |
4305 MAC_STATUS_CFG_CHANGED |
4306 MAC_STATUS_RCVD_CFG);
4307 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4308 MAC_STATUS_SIGNAL_DET)) {
4309 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4310 MAC_STATUS_CFG_CHANGED));
4311 return 0;
4312 }
4313 }
4314
4315 tw32_f(MAC_TX_AUTO_NEG, 0);
4316
4317 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4318 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4319 tw32_f(MAC_MODE, tp->mac_mode);
4320 udelay(40);
4321
79eb6904 4322 if (tp->phy_id == TG3_PHY_ID_BCM8002)
1da177e4
LT
4323 tg3_init_bcm8002(tp);
4324
4325 /* Enable link change event even when serdes polling. */
4326 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4327 udelay(40);
4328
4329 current_link_up = 0;
4330 mac_status = tr32(MAC_STATUS);
4331
63c3a66f 4332 if (tg3_flag(tp, HW_AUTONEG))
1da177e4
LT
4333 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4334 else
4335 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4336
898a56f8 4337 tp->napi[0].hw_status->status =
1da177e4 4338 (SD_STATUS_UPDATED |
898a56f8 4339 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
1da177e4
LT
4340
4341 for (i = 0; i < 100; i++) {
4342 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4343 MAC_STATUS_CFG_CHANGED));
4344 udelay(5);
4345 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
4346 MAC_STATUS_CFG_CHANGED |
4347 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
4348 break;
4349 }
4350
4351 mac_status = tr32(MAC_STATUS);
4352 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4353 current_link_up = 0;
3d3ebe74
MC
4354 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4355 tp->serdes_counter == 0) {
1da177e4
LT
4356 tw32_f(MAC_MODE, (tp->mac_mode |
4357 MAC_MODE_SEND_CONFIGS));
4358 udelay(1);
4359 tw32_f(MAC_MODE, tp->mac_mode);
4360 }
4361 }
4362
4363 if (current_link_up == 1) {
4364 tp->link_config.active_speed = SPEED_1000;
4365 tp->link_config.active_duplex = DUPLEX_FULL;
4366 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4367 LED_CTRL_LNKLED_OVERRIDE |
4368 LED_CTRL_1000MBPS_ON));
4369 } else {
4370 tp->link_config.active_speed = SPEED_INVALID;
4371 tp->link_config.active_duplex = DUPLEX_INVALID;
4372 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4373 LED_CTRL_LNKLED_OVERRIDE |
4374 LED_CTRL_TRAFFIC_OVERRIDE));
4375 }
4376
4377 if (current_link_up != netif_carrier_ok(tp->dev)) {
4378 if (current_link_up)
4379 netif_carrier_on(tp->dev);
4380 else
4381 netif_carrier_off(tp->dev);
4382 tg3_link_report(tp);
4383 } else {
8d018621 4384 u32 now_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
4385 if (orig_pause_cfg != now_pause_cfg ||
4386 orig_active_speed != tp->link_config.active_speed ||
4387 orig_active_duplex != tp->link_config.active_duplex)
4388 tg3_link_report(tp);
4389 }
4390
4391 return 0;
4392}
4393
747e8f8b
MC
4394static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4395{
4396 int current_link_up, err = 0;
4397 u32 bmsr, bmcr;
4398 u16 current_speed;
4399 u8 current_duplex;
ef167e27 4400 u32 local_adv, remote_adv;
747e8f8b
MC
4401
4402 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4403 tw32_f(MAC_MODE, tp->mac_mode);
4404 udelay(40);
4405
4406 tw32(MAC_EVENT, 0);
4407
4408 tw32_f(MAC_STATUS,
4409 (MAC_STATUS_SYNC_CHANGED |
4410 MAC_STATUS_CFG_CHANGED |
4411 MAC_STATUS_MI_COMPLETION |
4412 MAC_STATUS_LNKSTATE_CHANGED));
4413 udelay(40);
4414
4415 if (force_reset)
4416 tg3_phy_reset(tp);
4417
4418 current_link_up = 0;
4419 current_speed = SPEED_INVALID;
4420 current_duplex = DUPLEX_INVALID;
4421
4422 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4423 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
4424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4425 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4426 bmsr |= BMSR_LSTATUS;
4427 else
4428 bmsr &= ~BMSR_LSTATUS;
4429 }
747e8f8b
MC
4430
4431 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4432
4433 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
f07e9af3 4434 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
4435 /* do nothing, just check for link up at the end */
4436 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4437 u32 adv, new_adv;
4438
4439 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4440 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4441 ADVERTISE_1000XPAUSE |
4442 ADVERTISE_1000XPSE_ASYM |
4443 ADVERTISE_SLCT);
4444
ba4d07a8 4445 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
747e8f8b
MC
4446
4447 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4448 new_adv |= ADVERTISE_1000XHALF;
4449 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4450 new_adv |= ADVERTISE_1000XFULL;
4451
4452 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4453 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4454 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4455 tg3_writephy(tp, MII_BMCR, bmcr);
4456
4457 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 4458 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
f07e9af3 4459 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4460
4461 return err;
4462 }
4463 } else {
4464 u32 new_bmcr;
4465
4466 bmcr &= ~BMCR_SPEED1000;
4467 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4468
4469 if (tp->link_config.duplex == DUPLEX_FULL)
4470 new_bmcr |= BMCR_FULLDPLX;
4471
4472 if (new_bmcr != bmcr) {
4473 /* BMCR_SPEED1000 is a reserved bit that needs
4474 * to be set on write.
4475 */
4476 new_bmcr |= BMCR_SPEED1000;
4477
4478 /* Force a linkdown */
4479 if (netif_carrier_ok(tp->dev)) {
4480 u32 adv;
4481
4482 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4483 adv &= ~(ADVERTISE_1000XFULL |
4484 ADVERTISE_1000XHALF |
4485 ADVERTISE_SLCT);
4486 tg3_writephy(tp, MII_ADVERTISE, adv);
4487 tg3_writephy(tp, MII_BMCR, bmcr |
4488 BMCR_ANRESTART |
4489 BMCR_ANENABLE);
4490 udelay(10);
4491 netif_carrier_off(tp->dev);
4492 }
4493 tg3_writephy(tp, MII_BMCR, new_bmcr);
4494 bmcr = new_bmcr;
4495 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4496 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
4497 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4498 ASIC_REV_5714) {
4499 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4500 bmsr |= BMSR_LSTATUS;
4501 else
4502 bmsr &= ~BMSR_LSTATUS;
4503 }
f07e9af3 4504 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4505 }
4506 }
4507
4508 if (bmsr & BMSR_LSTATUS) {
4509 current_speed = SPEED_1000;
4510 current_link_up = 1;
4511 if (bmcr & BMCR_FULLDPLX)
4512 current_duplex = DUPLEX_FULL;
4513 else
4514 current_duplex = DUPLEX_HALF;
4515
ef167e27
MC
4516 local_adv = 0;
4517 remote_adv = 0;
4518
747e8f8b 4519 if (bmcr & BMCR_ANENABLE) {
ef167e27 4520 u32 common;
747e8f8b
MC
4521
4522 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4523 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4524 common = local_adv & remote_adv;
4525 if (common & (ADVERTISE_1000XHALF |
4526 ADVERTISE_1000XFULL)) {
4527 if (common & ADVERTISE_1000XFULL)
4528 current_duplex = DUPLEX_FULL;
4529 else
4530 current_duplex = DUPLEX_HALF;
63c3a66f 4531 } else if (!tg3_flag(tp, 5780_CLASS)) {
57d8b880 4532 /* Link is up via parallel detect */
859a5887 4533 } else {
747e8f8b 4534 current_link_up = 0;
859a5887 4535 }
747e8f8b
MC
4536 }
4537 }
4538
ef167e27
MC
4539 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4540 tg3_setup_flow_control(tp, local_adv, remote_adv);
4541
747e8f8b
MC
4542 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4543 if (tp->link_config.active_duplex == DUPLEX_HALF)
4544 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4545
4546 tw32_f(MAC_MODE, tp->mac_mode);
4547 udelay(40);
4548
4549 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4550
4551 tp->link_config.active_speed = current_speed;
4552 tp->link_config.active_duplex = current_duplex;
4553
4554 if (current_link_up != netif_carrier_ok(tp->dev)) {
4555 if (current_link_up)
4556 netif_carrier_on(tp->dev);
4557 else {
4558 netif_carrier_off(tp->dev);
f07e9af3 4559 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4560 }
4561 tg3_link_report(tp);
4562 }
4563 return err;
4564}
4565
4566static void tg3_serdes_parallel_detect(struct tg3 *tp)
4567{
3d3ebe74 4568 if (tp->serdes_counter) {
747e8f8b 4569 /* Give autoneg time to complete. */
3d3ebe74 4570 tp->serdes_counter--;
747e8f8b
MC
4571 return;
4572 }
c6cdf436 4573
747e8f8b
MC
4574 if (!netif_carrier_ok(tp->dev) &&
4575 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4576 u32 bmcr;
4577
4578 tg3_readphy(tp, MII_BMCR, &bmcr);
4579 if (bmcr & BMCR_ANENABLE) {
4580 u32 phy1, phy2;
4581
4582 /* Select shadow register 0x1f */
f08aa1a8
MC
4583 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4584 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
747e8f8b
MC
4585
4586 /* Select expansion interrupt status register */
f08aa1a8
MC
4587 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4588 MII_TG3_DSP_EXP1_INT_STAT);
4589 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4590 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
4591
4592 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4593 /* We have signal detect and not receiving
4594 * config code words, link is up by parallel
4595 * detection.
4596 */
4597
4598 bmcr &= ~BMCR_ANENABLE;
4599 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4600 tg3_writephy(tp, MII_BMCR, bmcr);
f07e9af3 4601 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4602 }
4603 }
859a5887
MC
4604 } else if (netif_carrier_ok(tp->dev) &&
4605 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
f07e9af3 4606 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
4607 u32 phy2;
4608
4609 /* Select expansion interrupt status register */
f08aa1a8
MC
4610 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4611 MII_TG3_DSP_EXP1_INT_STAT);
4612 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
4613 if (phy2 & 0x20) {
4614 u32 bmcr;
4615
4616 /* Config code words received, turn on autoneg. */
4617 tg3_readphy(tp, MII_BMCR, &bmcr);
4618 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4619
f07e9af3 4620 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
4621
4622 }
4623 }
4624}
4625
1da177e4
LT
4626static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4627{
f2096f94 4628 u32 val;
1da177e4
LT
4629 int err;
4630
f07e9af3 4631 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4 4632 err = tg3_setup_fiber_phy(tp, force_reset);
f07e9af3 4633 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
747e8f8b 4634 err = tg3_setup_fiber_mii_phy(tp, force_reset);
859a5887 4635 else
1da177e4 4636 err = tg3_setup_copper_phy(tp, force_reset);
1da177e4 4637
bcb37f6c 4638 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
f2096f94 4639 u32 scale;
aa6c91fe
MC
4640
4641 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4642 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4643 scale = 65;
4644 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4645 scale = 6;
4646 else
4647 scale = 12;
4648
4649 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4650 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4651 tw32(GRC_MISC_CFG, val);
4652 }
4653
f2096f94
MC
4654 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4655 (6 << TX_LENGTHS_IPG_SHIFT);
4656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4657 val |= tr32(MAC_TX_LENGTHS) &
4658 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4659 TX_LENGTHS_CNT_DWN_VAL_MSK);
4660
1da177e4
LT
4661 if (tp->link_config.active_speed == SPEED_1000 &&
4662 tp->link_config.active_duplex == DUPLEX_HALF)
f2096f94
MC
4663 tw32(MAC_TX_LENGTHS, val |
4664 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 4665 else
f2096f94
MC
4666 tw32(MAC_TX_LENGTHS, val |
4667 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 4668
63c3a66f 4669 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
4670 if (netif_carrier_ok(tp->dev)) {
4671 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 4672 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
4673 } else {
4674 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4675 }
4676 }
4677
63c3a66f 4678 if (tg3_flag(tp, ASPM_WORKAROUND)) {
f2096f94 4679 val = tr32(PCIE_PWR_MGMT_THRESH);
8ed5d97e
MC
4680 if (!netif_carrier_ok(tp->dev))
4681 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4682 tp->pwrmgmt_thresh;
4683 else
4684 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4685 tw32(PCIE_PWR_MGMT_THRESH, val);
4686 }
4687
1da177e4
LT
4688 return err;
4689}
4690
66cfd1bd
MC
4691static inline int tg3_irq_sync(struct tg3 *tp)
4692{
4693 return tp->irq_sync;
4694}
4695
97bd8e49
MC
4696static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4697{
4698 int i;
4699
4700 dst = (u32 *)((u8 *)dst + off);
4701 for (i = 0; i < len; i += sizeof(u32))
4702 *dst++ = tr32(off + i);
4703}
4704
4705static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4706{
4707 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4708 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4709 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4710 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4711 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4712 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4713 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4714 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4715 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4716 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4717 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4718 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4719 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4720 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4721 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4722 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4723 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4724 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4725 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4726
63c3a66f 4727 if (tg3_flag(tp, SUPPORT_MSIX))
97bd8e49
MC
4728 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4729
4730 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4731 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4732 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4733 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4734 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4735 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4736 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4737 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4738
63c3a66f 4739 if (!tg3_flag(tp, 5705_PLUS)) {
97bd8e49
MC
4740 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4741 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4742 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4743 }
4744
4745 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4746 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4747 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4748 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4749 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4750
63c3a66f 4751 if (tg3_flag(tp, NVRAM))
97bd8e49
MC
4752 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4753}
4754
4755static void tg3_dump_state(struct tg3 *tp)
4756{
4757 int i;
4758 u32 *regs;
4759
4760 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4761 if (!regs) {
4762 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4763 return;
4764 }
4765
63c3a66f 4766 if (tg3_flag(tp, PCI_EXPRESS)) {
97bd8e49
MC
4767 /* Read up to but not including private PCI registers */
4768 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4769 regs[i / sizeof(u32)] = tr32(i);
4770 } else
4771 tg3_dump_legacy_regs(tp, regs);
4772
4773 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4774 if (!regs[i + 0] && !regs[i + 1] &&
4775 !regs[i + 2] && !regs[i + 3])
4776 continue;
4777
4778 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4779 i * 4,
4780 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4781 }
4782
4783 kfree(regs);
4784
4785 for (i = 0; i < tp->irq_cnt; i++) {
4786 struct tg3_napi *tnapi = &tp->napi[i];
4787
4788 /* SW status block */
4789 netdev_err(tp->dev,
4790 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4791 i,
4792 tnapi->hw_status->status,
4793 tnapi->hw_status->status_tag,
4794 tnapi->hw_status->rx_jumbo_consumer,
4795 tnapi->hw_status->rx_consumer,
4796 tnapi->hw_status->rx_mini_consumer,
4797 tnapi->hw_status->idx[0].rx_producer,
4798 tnapi->hw_status->idx[0].tx_consumer);
4799
4800 netdev_err(tp->dev,
4801 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4802 i,
4803 tnapi->last_tag, tnapi->last_irq_tag,
4804 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4805 tnapi->rx_rcb_ptr,
4806 tnapi->prodring.rx_std_prod_idx,
4807 tnapi->prodring.rx_std_cons_idx,
4808 tnapi->prodring.rx_jmb_prod_idx,
4809 tnapi->prodring.rx_jmb_cons_idx);
4810 }
4811}
4812
df3e6548
MC
4813/* This is called whenever we suspect that the system chipset is re-
4814 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4815 * is bogus tx completions. We try to recover by setting the
4816 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4817 * in the workqueue.
4818 */
4819static void tg3_tx_recover(struct tg3 *tp)
4820{
63c3a66f 4821 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
df3e6548
MC
4822 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4823
5129c3a3
MC
4824 netdev_warn(tp->dev,
4825 "The system may be re-ordering memory-mapped I/O "
4826 "cycles to the network device, attempting to recover. "
4827 "Please report the problem to the driver maintainer "
4828 "and include system chipset information.\n");
df3e6548
MC
4829
4830 spin_lock(&tp->lock);
63c3a66f 4831 tg3_flag_set(tp, TX_RECOVERY_PENDING);
df3e6548
MC
4832 spin_unlock(&tp->lock);
4833}
4834
f3f3f27e 4835static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
1b2a7205 4836{
f65aac16
MC
4837 /* Tell compiler to fetch tx indices from memory. */
4838 barrier();
f3f3f27e
MC
4839 return tnapi->tx_pending -
4840 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
1b2a7205
MC
4841}
4842
1da177e4
LT
4843/* Tigon3 never reports partial packet sends. So we do not
4844 * need special logic to handle SKBs that have not had all
4845 * of their frags sent yet, like SunGEM does.
4846 */
17375d25 4847static void tg3_tx(struct tg3_napi *tnapi)
1da177e4 4848{
17375d25 4849 struct tg3 *tp = tnapi->tp;
898a56f8 4850 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
f3f3f27e 4851 u32 sw_idx = tnapi->tx_cons;
fe5f5787
MC
4852 struct netdev_queue *txq;
4853 int index = tnapi - tp->napi;
4854
63c3a66f 4855 if (tg3_flag(tp, ENABLE_TSS))
fe5f5787
MC
4856 index--;
4857
4858 txq = netdev_get_tx_queue(tp->dev, index);
1da177e4
LT
4859
4860 while (sw_idx != hw_idx) {
df8944cf 4861 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
1da177e4 4862 struct sk_buff *skb = ri->skb;
df3e6548
MC
4863 int i, tx_bug = 0;
4864
4865 if (unlikely(skb == NULL)) {
4866 tg3_tx_recover(tp);
4867 return;
4868 }
1da177e4 4869
f4188d8a 4870 pci_unmap_single(tp->pdev,
4e5e4f0d 4871 dma_unmap_addr(ri, mapping),
f4188d8a
AD
4872 skb_headlen(skb),
4873 PCI_DMA_TODEVICE);
1da177e4
LT
4874
4875 ri->skb = NULL;
4876
e01ee14d
MC
4877 while (ri->fragmented) {
4878 ri->fragmented = false;
4879 sw_idx = NEXT_TX(sw_idx);
4880 ri = &tnapi->tx_buffers[sw_idx];
4881 }
4882
1da177e4
LT
4883 sw_idx = NEXT_TX(sw_idx);
4884
4885 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
f3f3f27e 4886 ri = &tnapi->tx_buffers[sw_idx];
df3e6548
MC
4887 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4888 tx_bug = 1;
f4188d8a
AD
4889
4890 pci_unmap_page(tp->pdev,
4e5e4f0d 4891 dma_unmap_addr(ri, mapping),
f4188d8a
AD
4892 skb_shinfo(skb)->frags[i].size,
4893 PCI_DMA_TODEVICE);
e01ee14d
MC
4894
4895 while (ri->fragmented) {
4896 ri->fragmented = false;
4897 sw_idx = NEXT_TX(sw_idx);
4898 ri = &tnapi->tx_buffers[sw_idx];
4899 }
4900
1da177e4
LT
4901 sw_idx = NEXT_TX(sw_idx);
4902 }
4903
f47c11ee 4904 dev_kfree_skb(skb);
df3e6548
MC
4905
4906 if (unlikely(tx_bug)) {
4907 tg3_tx_recover(tp);
4908 return;
4909 }
1da177e4
LT
4910 }
4911
f3f3f27e 4912 tnapi->tx_cons = sw_idx;
1da177e4 4913
1b2a7205
MC
4914 /* Need to make the tx_cons update visible to tg3_start_xmit()
4915 * before checking for netif_queue_stopped(). Without the
4916 * memory barrier, there is a small possibility that tg3_start_xmit()
4917 * will miss it and cause the queue to be stopped forever.
4918 */
4919 smp_mb();
4920
fe5f5787 4921 if (unlikely(netif_tx_queue_stopped(txq) &&
f3f3f27e 4922 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
fe5f5787
MC
4923 __netif_tx_lock(txq, smp_processor_id());
4924 if (netif_tx_queue_stopped(txq) &&
f3f3f27e 4925 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
fe5f5787
MC
4926 netif_tx_wake_queue(txq);
4927 __netif_tx_unlock(txq);
51b91468 4928 }
1da177e4
LT
4929}
4930
2b2cdb65
MC
4931static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4932{
4933 if (!ri->skb)
4934 return;
4935
4e5e4f0d 4936 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
2b2cdb65
MC
4937 map_sz, PCI_DMA_FROMDEVICE);
4938 dev_kfree_skb_any(ri->skb);
4939 ri->skb = NULL;
4940}
4941
1da177e4
LT
4942/* Returns size of skb allocated or < 0 on error.
4943 *
4944 * We only need to fill in the address because the other members
4945 * of the RX descriptor are invariant, see tg3_init_rings.
4946 *
4947 * Note the purposeful assymetry of cpu vs. chip accesses. For
4948 * posting buffers we only dirty the first cache line of the RX
4949 * descriptor (containing the address). Whereas for the RX status
4950 * buffers the cpu only reads the last cacheline of the RX descriptor
4951 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4952 */
86b21e59 4953static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
a3896167 4954 u32 opaque_key, u32 dest_idx_unmasked)
1da177e4
LT
4955{
4956 struct tg3_rx_buffer_desc *desc;
f94e290e 4957 struct ring_info *map;
1da177e4
LT
4958 struct sk_buff *skb;
4959 dma_addr_t mapping;
4960 int skb_size, dest_idx;
4961
1da177e4
LT
4962 switch (opaque_key) {
4963 case RXD_OPAQUE_RING_STD:
2c49a44d 4964 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
21f581a5
MC
4965 desc = &tpr->rx_std[dest_idx];
4966 map = &tpr->rx_std_buffers[dest_idx];
287be12e 4967 skb_size = tp->rx_pkt_map_sz;
1da177e4
LT
4968 break;
4969
4970 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 4971 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
79ed5ac7 4972 desc = &tpr->rx_jmb[dest_idx].std;
21f581a5 4973 map = &tpr->rx_jmb_buffers[dest_idx];
287be12e 4974 skb_size = TG3_RX_JMB_MAP_SZ;
1da177e4
LT
4975 break;
4976
4977 default:
4978 return -EINVAL;
855e1111 4979 }
1da177e4
LT
4980
4981 /* Do not overwrite any of the map or rp information
4982 * until we are sure we can commit to a new buffer.
4983 *
4984 * Callers depend upon this behavior and assume that
4985 * we leave everything unchanged if we fail.
4986 */
287be12e 4987 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
1da177e4
LT
4988 if (skb == NULL)
4989 return -ENOMEM;
4990
1da177e4
LT
4991 skb_reserve(skb, tp->rx_offset);
4992
287be12e 4993 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
1da177e4 4994 PCI_DMA_FROMDEVICE);
a21771dd
MC
4995 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4996 dev_kfree_skb(skb);
4997 return -EIO;
4998 }
1da177e4
LT
4999
5000 map->skb = skb;
4e5e4f0d 5001 dma_unmap_addr_set(map, mapping, mapping);
1da177e4 5002
1da177e4
LT
5003 desc->addr_hi = ((u64)mapping >> 32);
5004 desc->addr_lo = ((u64)mapping & 0xffffffff);
5005
5006 return skb_size;
5007}
5008
5009/* We only need to move over in the address because the other
5010 * members of the RX descriptor are invariant. See notes above
5011 * tg3_alloc_rx_skb for full details.
5012 */
a3896167
MC
5013static void tg3_recycle_rx(struct tg3_napi *tnapi,
5014 struct tg3_rx_prodring_set *dpr,
5015 u32 opaque_key, int src_idx,
5016 u32 dest_idx_unmasked)
1da177e4 5017{
17375d25 5018 struct tg3 *tp = tnapi->tp;
1da177e4
LT
5019 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5020 struct ring_info *src_map, *dest_map;
8fea32b9 5021 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
c6cdf436 5022 int dest_idx;
1da177e4
LT
5023
5024 switch (opaque_key) {
5025 case RXD_OPAQUE_RING_STD:
2c49a44d 5026 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
a3896167
MC
5027 dest_desc = &dpr->rx_std[dest_idx];
5028 dest_map = &dpr->rx_std_buffers[dest_idx];
5029 src_desc = &spr->rx_std[src_idx];
5030 src_map = &spr->rx_std_buffers[src_idx];
1da177e4
LT
5031 break;
5032
5033 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 5034 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
a3896167
MC
5035 dest_desc = &dpr->rx_jmb[dest_idx].std;
5036 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5037 src_desc = &spr->rx_jmb[src_idx].std;
5038 src_map = &spr->rx_jmb_buffers[src_idx];
1da177e4
LT
5039 break;
5040
5041 default:
5042 return;
855e1111 5043 }
1da177e4
LT
5044
5045 dest_map->skb = src_map->skb;
4e5e4f0d
FT
5046 dma_unmap_addr_set(dest_map, mapping,
5047 dma_unmap_addr(src_map, mapping));
1da177e4
LT
5048 dest_desc->addr_hi = src_desc->addr_hi;
5049 dest_desc->addr_lo = src_desc->addr_lo;
e92967bf
MC
5050
5051 /* Ensure that the update to the skb happens after the physical
5052 * addresses have been transferred to the new BD location.
5053 */
5054 smp_wmb();
5055
1da177e4
LT
5056 src_map->skb = NULL;
5057}
5058
1da177e4
LT
5059/* The RX ring scheme is composed of multiple rings which post fresh
5060 * buffers to the chip, and one special ring the chip uses to report
5061 * status back to the host.
5062 *
5063 * The special ring reports the status of received packets to the
5064 * host. The chip does not write into the original descriptor the
5065 * RX buffer was obtained from. The chip simply takes the original
5066 * descriptor as provided by the host, updates the status and length
5067 * field, then writes this into the next status ring entry.
5068 *
5069 * Each ring the host uses to post buffers to the chip is described
5070 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5071 * it is first placed into the on-chip ram. When the packet's length
5072 * is known, it walks down the TG3_BDINFO entries to select the ring.
5073 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5074 * which is within the range of the new packet's length is chosen.
5075 *
5076 * The "separate ring for rx status" scheme may sound queer, but it makes
5077 * sense from a cache coherency perspective. If only the host writes
5078 * to the buffer post rings, and only the chip writes to the rx status
5079 * rings, then cache lines never move beyond shared-modified state.
5080 * If both the host and chip were to write into the same ring, cache line
5081 * eviction could occur since both entities want it in an exclusive state.
5082 */
17375d25 5083static int tg3_rx(struct tg3_napi *tnapi, int budget)
1da177e4 5084{
17375d25 5085 struct tg3 *tp = tnapi->tp;
f92905de 5086 u32 work_mask, rx_std_posted = 0;
4361935a 5087 u32 std_prod_idx, jmb_prod_idx;
72334482 5088 u32 sw_idx = tnapi->rx_rcb_ptr;
483ba50b 5089 u16 hw_idx;
1da177e4 5090 int received;
8fea32b9 5091 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
1da177e4 5092
8d9d7cfc 5093 hw_idx = *(tnapi->rx_rcb_prod_idx);
1da177e4
LT
5094 /*
5095 * We need to order the read of hw_idx and the read of
5096 * the opaque cookie.
5097 */
5098 rmb();
1da177e4
LT
5099 work_mask = 0;
5100 received = 0;
4361935a
MC
5101 std_prod_idx = tpr->rx_std_prod_idx;
5102 jmb_prod_idx = tpr->rx_jmb_prod_idx;
1da177e4 5103 while (sw_idx != hw_idx && budget > 0) {
afc081f8 5104 struct ring_info *ri;
72334482 5105 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
1da177e4
LT
5106 unsigned int len;
5107 struct sk_buff *skb;
5108 dma_addr_t dma_addr;
5109 u32 opaque_key, desc_idx, *post_ptr;
5110
5111 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5112 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5113 if (opaque_key == RXD_OPAQUE_RING_STD) {
8fea32b9 5114 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4e5e4f0d 5115 dma_addr = dma_unmap_addr(ri, mapping);
21f581a5 5116 skb = ri->skb;
4361935a 5117 post_ptr = &std_prod_idx;
f92905de 5118 rx_std_posted++;
1da177e4 5119 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
8fea32b9 5120 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4e5e4f0d 5121 dma_addr = dma_unmap_addr(ri, mapping);
21f581a5 5122 skb = ri->skb;
4361935a 5123 post_ptr = &jmb_prod_idx;
21f581a5 5124 } else
1da177e4 5125 goto next_pkt_nopost;
1da177e4
LT
5126
5127 work_mask |= opaque_key;
5128
5129 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5130 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5131 drop_it:
a3896167 5132 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5133 desc_idx, *post_ptr);
5134 drop_it_no_recycle:
5135 /* Other statistics kept track of by card. */
b0057c51 5136 tp->rx_dropped++;
1da177e4
LT
5137 goto next_pkt;
5138 }
5139
ad829268
MC
5140 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5141 ETH_FCS_LEN;
1da177e4 5142
d2757fc4 5143 if (len > TG3_RX_COPY_THRESH(tp)) {
1da177e4
LT
5144 int skb_size;
5145
86b21e59 5146 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
afc081f8 5147 *post_ptr);
1da177e4
LT
5148 if (skb_size < 0)
5149 goto drop_it;
5150
287be12e 5151 pci_unmap_single(tp->pdev, dma_addr, skb_size,
1da177e4
LT
5152 PCI_DMA_FROMDEVICE);
5153
61e800cf
MC
5154 /* Ensure that the update to the skb happens
5155 * after the usage of the old DMA mapping.
5156 */
5157 smp_wmb();
5158
5159 ri->skb = NULL;
5160
1da177e4
LT
5161 skb_put(skb, len);
5162 } else {
5163 struct sk_buff *copy_skb;
5164
a3896167 5165 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5166 desc_idx, *post_ptr);
5167
bf933c80 5168 copy_skb = netdev_alloc_skb(tp->dev, len +
9dc7a113 5169 TG3_RAW_IP_ALIGN);
1da177e4
LT
5170 if (copy_skb == NULL)
5171 goto drop_it_no_recycle;
5172
bf933c80 5173 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
1da177e4
LT
5174 skb_put(copy_skb, len);
5175 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
d626f62b 5176 skb_copy_from_linear_data(skb, copy_skb->data, len);
1da177e4
LT
5177 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5178
5179 /* We'll reuse the original ring buffer. */
5180 skb = copy_skb;
5181 }
5182
dc668910 5183 if ((tp->dev->features & NETIF_F_RXCSUM) &&
1da177e4
LT
5184 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5185 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5186 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5187 skb->ip_summed = CHECKSUM_UNNECESSARY;
5188 else
bc8acf2c 5189 skb_checksum_none_assert(skb);
1da177e4
LT
5190
5191 skb->protocol = eth_type_trans(skb, tp->dev);
f7b493e0
MC
5192
5193 if (len > (tp->dev->mtu + ETH_HLEN) &&
5194 skb->protocol != htons(ETH_P_8021Q)) {
5195 dev_kfree_skb(skb);
b0057c51 5196 goto drop_it_no_recycle;
f7b493e0
MC
5197 }
5198
9dc7a113 5199 if (desc->type_flags & RXD_FLAG_VLAN &&
bf933c80
MC
5200 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5201 __vlan_hwaccel_put_tag(skb,
5202 desc->err_vlan & RXD_VLAN_MASK);
9dc7a113 5203
bf933c80 5204 napi_gro_receive(&tnapi->napi, skb);
1da177e4 5205
1da177e4
LT
5206 received++;
5207 budget--;
5208
5209next_pkt:
5210 (*post_ptr)++;
f92905de
MC
5211
5212 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
2c49a44d
MC
5213 tpr->rx_std_prod_idx = std_prod_idx &
5214 tp->rx_std_ring_mask;
86cfe4ff
MC
5215 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5216 tpr->rx_std_prod_idx);
f92905de
MC
5217 work_mask &= ~RXD_OPAQUE_RING_STD;
5218 rx_std_posted = 0;
5219 }
1da177e4 5220next_pkt_nopost:
483ba50b 5221 sw_idx++;
7cb32cf2 5222 sw_idx &= tp->rx_ret_ring_mask;
52f6d697
MC
5223
5224 /* Refresh hw_idx to see if there is new work */
5225 if (sw_idx == hw_idx) {
8d9d7cfc 5226 hw_idx = *(tnapi->rx_rcb_prod_idx);
52f6d697
MC
5227 rmb();
5228 }
1da177e4
LT
5229 }
5230
5231 /* ACK the status ring. */
72334482
MC
5232 tnapi->rx_rcb_ptr = sw_idx;
5233 tw32_rx_mbox(tnapi->consmbox, sw_idx);
1da177e4
LT
5234
5235 /* Refill RX ring(s). */
63c3a66f 5236 if (!tg3_flag(tp, ENABLE_RSS)) {
b196c7e4 5237 if (work_mask & RXD_OPAQUE_RING_STD) {
2c49a44d
MC
5238 tpr->rx_std_prod_idx = std_prod_idx &
5239 tp->rx_std_ring_mask;
b196c7e4
MC
5240 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5241 tpr->rx_std_prod_idx);
5242 }
5243 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2c49a44d
MC
5244 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5245 tp->rx_jmb_ring_mask;
b196c7e4
MC
5246 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5247 tpr->rx_jmb_prod_idx);
5248 }
5249 mmiowb();
5250 } else if (work_mask) {
5251 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5252 * updated before the producer indices can be updated.
5253 */
5254 smp_wmb();
5255
2c49a44d
MC
5256 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5257 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
b196c7e4 5258
e4af1af9
MC
5259 if (tnapi != &tp->napi[1])
5260 napi_schedule(&tp->napi[1].napi);
1da177e4 5261 }
1da177e4
LT
5262
5263 return received;
5264}
5265
35f2d7d0 5266static void tg3_poll_link(struct tg3 *tp)
1da177e4 5267{
1da177e4 5268 /* handle link change and other phy events */
63c3a66f 5269 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
35f2d7d0
MC
5270 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5271
1da177e4
LT
5272 if (sblk->status & SD_STATUS_LINK_CHG) {
5273 sblk->status = SD_STATUS_UPDATED |
35f2d7d0 5274 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 5275 spin_lock(&tp->lock);
63c3a66f 5276 if (tg3_flag(tp, USE_PHYLIB)) {
dd477003
MC
5277 tw32_f(MAC_STATUS,
5278 (MAC_STATUS_SYNC_CHANGED |
5279 MAC_STATUS_CFG_CHANGED |
5280 MAC_STATUS_MI_COMPLETION |
5281 MAC_STATUS_LNKSTATE_CHANGED));
5282 udelay(40);
5283 } else
5284 tg3_setup_phy(tp, 0);
f47c11ee 5285 spin_unlock(&tp->lock);
1da177e4
LT
5286 }
5287 }
35f2d7d0
MC
5288}
5289
f89f38b8
MC
5290static int tg3_rx_prodring_xfer(struct tg3 *tp,
5291 struct tg3_rx_prodring_set *dpr,
5292 struct tg3_rx_prodring_set *spr)
b196c7e4
MC
5293{
5294 u32 si, di, cpycnt, src_prod_idx;
f89f38b8 5295 int i, err = 0;
b196c7e4
MC
5296
5297 while (1) {
5298 src_prod_idx = spr->rx_std_prod_idx;
5299
5300 /* Make sure updates to the rx_std_buffers[] entries and the
5301 * standard producer index are seen in the correct order.
5302 */
5303 smp_rmb();
5304
5305 if (spr->rx_std_cons_idx == src_prod_idx)
5306 break;
5307
5308 if (spr->rx_std_cons_idx < src_prod_idx)
5309 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5310 else
2c49a44d
MC
5311 cpycnt = tp->rx_std_ring_mask + 1 -
5312 spr->rx_std_cons_idx;
b196c7e4 5313
2c49a44d
MC
5314 cpycnt = min(cpycnt,
5315 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
b196c7e4
MC
5316
5317 si = spr->rx_std_cons_idx;
5318 di = dpr->rx_std_prod_idx;
5319
e92967bf
MC
5320 for (i = di; i < di + cpycnt; i++) {
5321 if (dpr->rx_std_buffers[i].skb) {
5322 cpycnt = i - di;
f89f38b8 5323 err = -ENOSPC;
e92967bf
MC
5324 break;
5325 }
5326 }
5327
5328 if (!cpycnt)
5329 break;
5330
5331 /* Ensure that updates to the rx_std_buffers ring and the
5332 * shadowed hardware producer ring from tg3_recycle_skb() are
5333 * ordered correctly WRT the skb check above.
5334 */
5335 smp_rmb();
5336
b196c7e4
MC
5337 memcpy(&dpr->rx_std_buffers[di],
5338 &spr->rx_std_buffers[si],
5339 cpycnt * sizeof(struct ring_info));
5340
5341 for (i = 0; i < cpycnt; i++, di++, si++) {
5342 struct tg3_rx_buffer_desc *sbd, *dbd;
5343 sbd = &spr->rx_std[si];
5344 dbd = &dpr->rx_std[di];
5345 dbd->addr_hi = sbd->addr_hi;
5346 dbd->addr_lo = sbd->addr_lo;
5347 }
5348
2c49a44d
MC
5349 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5350 tp->rx_std_ring_mask;
5351 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5352 tp->rx_std_ring_mask;
b196c7e4
MC
5353 }
5354
5355 while (1) {
5356 src_prod_idx = spr->rx_jmb_prod_idx;
5357
5358 /* Make sure updates to the rx_jmb_buffers[] entries and
5359 * the jumbo producer index are seen in the correct order.
5360 */
5361 smp_rmb();
5362
5363 if (spr->rx_jmb_cons_idx == src_prod_idx)
5364 break;
5365
5366 if (spr->rx_jmb_cons_idx < src_prod_idx)
5367 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5368 else
2c49a44d
MC
5369 cpycnt = tp->rx_jmb_ring_mask + 1 -
5370 spr->rx_jmb_cons_idx;
b196c7e4
MC
5371
5372 cpycnt = min(cpycnt,
2c49a44d 5373 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
b196c7e4
MC
5374
5375 si = spr->rx_jmb_cons_idx;
5376 di = dpr->rx_jmb_prod_idx;
5377
e92967bf
MC
5378 for (i = di; i < di + cpycnt; i++) {
5379 if (dpr->rx_jmb_buffers[i].skb) {
5380 cpycnt = i - di;
f89f38b8 5381 err = -ENOSPC;
e92967bf
MC
5382 break;
5383 }
5384 }
5385
5386 if (!cpycnt)
5387 break;
5388
5389 /* Ensure that updates to the rx_jmb_buffers ring and the
5390 * shadowed hardware producer ring from tg3_recycle_skb() are
5391 * ordered correctly WRT the skb check above.
5392 */
5393 smp_rmb();
5394
b196c7e4
MC
5395 memcpy(&dpr->rx_jmb_buffers[di],
5396 &spr->rx_jmb_buffers[si],
5397 cpycnt * sizeof(struct ring_info));
5398
5399 for (i = 0; i < cpycnt; i++, di++, si++) {
5400 struct tg3_rx_buffer_desc *sbd, *dbd;
5401 sbd = &spr->rx_jmb[si].std;
5402 dbd = &dpr->rx_jmb[di].std;
5403 dbd->addr_hi = sbd->addr_hi;
5404 dbd->addr_lo = sbd->addr_lo;
5405 }
5406
2c49a44d
MC
5407 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5408 tp->rx_jmb_ring_mask;
5409 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5410 tp->rx_jmb_ring_mask;
b196c7e4 5411 }
f89f38b8
MC
5412
5413 return err;
b196c7e4
MC
5414}
5415
35f2d7d0
MC
5416static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5417{
5418 struct tg3 *tp = tnapi->tp;
1da177e4
LT
5419
5420 /* run TX completion thread */
f3f3f27e 5421 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
17375d25 5422 tg3_tx(tnapi);
63c3a66f 5423 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
4fd7ab59 5424 return work_done;
1da177e4
LT
5425 }
5426
1da177e4
LT
5427 /* run RX thread, within the bounds set by NAPI.
5428 * All RX "locking" is done by ensuring outside
bea3348e 5429 * code synchronizes with tg3->napi.poll()
1da177e4 5430 */
8d9d7cfc 5431 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
17375d25 5432 work_done += tg3_rx(tnapi, budget - work_done);
1da177e4 5433
63c3a66f 5434 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
8fea32b9 5435 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
f89f38b8 5436 int i, err = 0;
e4af1af9
MC
5437 u32 std_prod_idx = dpr->rx_std_prod_idx;
5438 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
b196c7e4 5439
e4af1af9 5440 for (i = 1; i < tp->irq_cnt; i++)
f89f38b8 5441 err |= tg3_rx_prodring_xfer(tp, dpr,
8fea32b9 5442 &tp->napi[i].prodring);
b196c7e4
MC
5443
5444 wmb();
5445
e4af1af9
MC
5446 if (std_prod_idx != dpr->rx_std_prod_idx)
5447 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5448 dpr->rx_std_prod_idx);
b196c7e4 5449
e4af1af9
MC
5450 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5451 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5452 dpr->rx_jmb_prod_idx);
b196c7e4
MC
5453
5454 mmiowb();
f89f38b8
MC
5455
5456 if (err)
5457 tw32_f(HOSTCC_MODE, tp->coal_now);
b196c7e4
MC
5458 }
5459
6f535763
DM
5460 return work_done;
5461}
5462
35f2d7d0
MC
5463static int tg3_poll_msix(struct napi_struct *napi, int budget)
5464{
5465 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5466 struct tg3 *tp = tnapi->tp;
5467 int work_done = 0;
5468 struct tg3_hw_status *sblk = tnapi->hw_status;
5469
5470 while (1) {
5471 work_done = tg3_poll_work(tnapi, work_done, budget);
5472
63c3a66f 5473 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
35f2d7d0
MC
5474 goto tx_recovery;
5475
5476 if (unlikely(work_done >= budget))
5477 break;
5478
c6cdf436 5479 /* tp->last_tag is used in tg3_int_reenable() below
35f2d7d0
MC
5480 * to tell the hw how much work has been processed,
5481 * so we must read it before checking for more work.
5482 */
5483 tnapi->last_tag = sblk->status_tag;
5484 tnapi->last_irq_tag = tnapi->last_tag;
5485 rmb();
5486
5487 /* check for RX/TX work to do */
6d40db7b
MC
5488 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5489 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
35f2d7d0
MC
5490 napi_complete(napi);
5491 /* Reenable interrupts. */
5492 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5493 mmiowb();
5494 break;
5495 }
5496 }
5497
5498 return work_done;
5499
5500tx_recovery:
5501 /* work_done is guaranteed to be less than budget. */
5502 napi_complete(napi);
5503 schedule_work(&tp->reset_task);
5504 return work_done;
5505}
5506
e64de4e6
MC
5507static void tg3_process_error(struct tg3 *tp)
5508{
5509 u32 val;
5510 bool real_error = false;
5511
63c3a66f 5512 if (tg3_flag(tp, ERROR_PROCESSED))
e64de4e6
MC
5513 return;
5514
5515 /* Check Flow Attention register */
5516 val = tr32(HOSTCC_FLOW_ATTN);
5517 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5518 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5519 real_error = true;
5520 }
5521
5522 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5523 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5524 real_error = true;
5525 }
5526
5527 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5528 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5529 real_error = true;
5530 }
5531
5532 if (!real_error)
5533 return;
5534
5535 tg3_dump_state(tp);
5536
63c3a66f 5537 tg3_flag_set(tp, ERROR_PROCESSED);
e64de4e6
MC
5538 schedule_work(&tp->reset_task);
5539}
5540
6f535763
DM
5541static int tg3_poll(struct napi_struct *napi, int budget)
5542{
8ef0442f
MC
5543 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5544 struct tg3 *tp = tnapi->tp;
6f535763 5545 int work_done = 0;
898a56f8 5546 struct tg3_hw_status *sblk = tnapi->hw_status;
6f535763
DM
5547
5548 while (1) {
e64de4e6
MC
5549 if (sblk->status & SD_STATUS_ERROR)
5550 tg3_process_error(tp);
5551
35f2d7d0
MC
5552 tg3_poll_link(tp);
5553
17375d25 5554 work_done = tg3_poll_work(tnapi, work_done, budget);
6f535763 5555
63c3a66f 5556 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6f535763
DM
5557 goto tx_recovery;
5558
5559 if (unlikely(work_done >= budget))
5560 break;
5561
63c3a66f 5562 if (tg3_flag(tp, TAGGED_STATUS)) {
17375d25 5563 /* tp->last_tag is used in tg3_int_reenable() below
4fd7ab59
MC
5564 * to tell the hw how much work has been processed,
5565 * so we must read it before checking for more work.
5566 */
898a56f8
MC
5567 tnapi->last_tag = sblk->status_tag;
5568 tnapi->last_irq_tag = tnapi->last_tag;
4fd7ab59
MC
5569 rmb();
5570 } else
5571 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 5572
17375d25 5573 if (likely(!tg3_has_work(tnapi))) {
288379f0 5574 napi_complete(napi);
17375d25 5575 tg3_int_reenable(tnapi);
6f535763
DM
5576 break;
5577 }
1da177e4
LT
5578 }
5579
bea3348e 5580 return work_done;
6f535763
DM
5581
5582tx_recovery:
4fd7ab59 5583 /* work_done is guaranteed to be less than budget. */
288379f0 5584 napi_complete(napi);
6f535763 5585 schedule_work(&tp->reset_task);
4fd7ab59 5586 return work_done;
1da177e4
LT
5587}
5588
66cfd1bd
MC
5589static void tg3_napi_disable(struct tg3 *tp)
5590{
5591 int i;
5592
5593 for (i = tp->irq_cnt - 1; i >= 0; i--)
5594 napi_disable(&tp->napi[i].napi);
5595}
5596
5597static void tg3_napi_enable(struct tg3 *tp)
5598{
5599 int i;
5600
5601 for (i = 0; i < tp->irq_cnt; i++)
5602 napi_enable(&tp->napi[i].napi);
5603}
5604
5605static void tg3_napi_init(struct tg3 *tp)
5606{
5607 int i;
5608
5609 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5610 for (i = 1; i < tp->irq_cnt; i++)
5611 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5612}
5613
5614static void tg3_napi_fini(struct tg3 *tp)
5615{
5616 int i;
5617
5618 for (i = 0; i < tp->irq_cnt; i++)
5619 netif_napi_del(&tp->napi[i].napi);
5620}
5621
5622static inline void tg3_netif_stop(struct tg3 *tp)
5623{
5624 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5625 tg3_napi_disable(tp);
5626 netif_tx_disable(tp->dev);
5627}
5628
5629static inline void tg3_netif_start(struct tg3 *tp)
5630{
5631 /* NOTE: unconditional netif_tx_wake_all_queues is only
5632 * appropriate so long as all callers are assured to
5633 * have free tx slots (such as after tg3_init_hw)
5634 */
5635 netif_tx_wake_all_queues(tp->dev);
5636
5637 tg3_napi_enable(tp);
5638 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5639 tg3_enable_ints(tp);
5640}
5641
f47c11ee
DM
5642static void tg3_irq_quiesce(struct tg3 *tp)
5643{
4f125f42
MC
5644 int i;
5645
f47c11ee
DM
5646 BUG_ON(tp->irq_sync);
5647
5648 tp->irq_sync = 1;
5649 smp_mb();
5650
4f125f42
MC
5651 for (i = 0; i < tp->irq_cnt; i++)
5652 synchronize_irq(tp->napi[i].irq_vec);
f47c11ee
DM
5653}
5654
f47c11ee
DM
5655/* Fully shutdown all tg3 driver activity elsewhere in the system.
5656 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5657 * with as well. Most of the time, this is not necessary except when
5658 * shutting down the device.
5659 */
5660static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5661{
46966545 5662 spin_lock_bh(&tp->lock);
f47c11ee
DM
5663 if (irq_sync)
5664 tg3_irq_quiesce(tp);
f47c11ee
DM
5665}
5666
5667static inline void tg3_full_unlock(struct tg3 *tp)
5668{
f47c11ee
DM
5669 spin_unlock_bh(&tp->lock);
5670}
5671
fcfa0a32
MC
5672/* One-shot MSI handler - Chip automatically disables interrupt
5673 * after sending MSI so driver doesn't have to do it.
5674 */
7d12e780 5675static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32 5676{
09943a18
MC
5677 struct tg3_napi *tnapi = dev_id;
5678 struct tg3 *tp = tnapi->tp;
fcfa0a32 5679
898a56f8 5680 prefetch(tnapi->hw_status);
0c1d0e2b
MC
5681 if (tnapi->rx_rcb)
5682 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
fcfa0a32
MC
5683
5684 if (likely(!tg3_irq_sync(tp)))
09943a18 5685 napi_schedule(&tnapi->napi);
fcfa0a32
MC
5686
5687 return IRQ_HANDLED;
5688}
5689
88b06bc2
MC
5690/* MSI ISR - No need to check for interrupt sharing and no need to
5691 * flush status block and interrupt mailbox. PCI ordering rules
5692 * guarantee that MSI will arrive after the status block.
5693 */
7d12e780 5694static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2 5695{
09943a18
MC
5696 struct tg3_napi *tnapi = dev_id;
5697 struct tg3 *tp = tnapi->tp;
88b06bc2 5698
898a56f8 5699 prefetch(tnapi->hw_status);
0c1d0e2b
MC
5700 if (tnapi->rx_rcb)
5701 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
88b06bc2 5702 /*
fac9b83e 5703 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 5704 * chip-internal interrupt pending events.
fac9b83e 5705 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
5706 * NIC to stop sending us irqs, engaging "in-intr-handler"
5707 * event coalescing.
5708 */
5709 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 5710 if (likely(!tg3_irq_sync(tp)))
09943a18 5711 napi_schedule(&tnapi->napi);
61487480 5712
88b06bc2
MC
5713 return IRQ_RETVAL(1);
5714}
5715
7d12e780 5716static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4 5717{
09943a18
MC
5718 struct tg3_napi *tnapi = dev_id;
5719 struct tg3 *tp = tnapi->tp;
898a56f8 5720 struct tg3_hw_status *sblk = tnapi->hw_status;
1da177e4
LT
5721 unsigned int handled = 1;
5722
1da177e4
LT
5723 /* In INTx mode, it is possible for the interrupt to arrive at
5724 * the CPU before the status block posted prior to the interrupt.
5725 * Reading the PCI State register will confirm whether the
5726 * interrupt is ours and will flush the status block.
5727 */
d18edcb2 5728 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
63c3a66f 5729 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
5730 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5731 handled = 0;
f47c11ee 5732 goto out;
fac9b83e 5733 }
d18edcb2
MC
5734 }
5735
5736 /*
5737 * Writing any value to intr-mbox-0 clears PCI INTA# and
5738 * chip-internal interrupt pending events.
5739 * Writing non-zero to intr-mbox-0 additional tells the
5740 * NIC to stop sending us irqs, engaging "in-intr-handler"
5741 * event coalescing.
c04cb347
MC
5742 *
5743 * Flush the mailbox to de-assert the IRQ immediately to prevent
5744 * spurious interrupts. The flush impacts performance but
5745 * excessive spurious interrupts can be worse in some cases.
d18edcb2 5746 */
c04cb347 5747 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
5748 if (tg3_irq_sync(tp))
5749 goto out;
5750 sblk->status &= ~SD_STATUS_UPDATED;
17375d25 5751 if (likely(tg3_has_work(tnapi))) {
72334482 5752 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
09943a18 5753 napi_schedule(&tnapi->napi);
d18edcb2
MC
5754 } else {
5755 /* No work, shared interrupt perhaps? re-enable
5756 * interrupts, and flush that PCI write
5757 */
5758 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5759 0x00000000);
fac9b83e 5760 }
f47c11ee 5761out:
fac9b83e
DM
5762 return IRQ_RETVAL(handled);
5763}
5764
7d12e780 5765static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e 5766{
09943a18
MC
5767 struct tg3_napi *tnapi = dev_id;
5768 struct tg3 *tp = tnapi->tp;
898a56f8 5769 struct tg3_hw_status *sblk = tnapi->hw_status;
fac9b83e
DM
5770 unsigned int handled = 1;
5771
fac9b83e
DM
5772 /* In INTx mode, it is possible for the interrupt to arrive at
5773 * the CPU before the status block posted prior to the interrupt.
5774 * Reading the PCI State register will confirm whether the
5775 * interrupt is ours and will flush the status block.
5776 */
898a56f8 5777 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
63c3a66f 5778 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
5779 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5780 handled = 0;
f47c11ee 5781 goto out;
1da177e4 5782 }
d18edcb2
MC
5783 }
5784
5785 /*
5786 * writing any value to intr-mbox-0 clears PCI INTA# and
5787 * chip-internal interrupt pending events.
5788 * writing non-zero to intr-mbox-0 additional tells the
5789 * NIC to stop sending us irqs, engaging "in-intr-handler"
5790 * event coalescing.
c04cb347
MC
5791 *
5792 * Flush the mailbox to de-assert the IRQ immediately to prevent
5793 * spurious interrupts. The flush impacts performance but
5794 * excessive spurious interrupts can be worse in some cases.
d18edcb2 5795 */
c04cb347 5796 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
624f8e50
MC
5797
5798 /*
5799 * In a shared interrupt configuration, sometimes other devices'
5800 * interrupts will scream. We record the current status tag here
5801 * so that the above check can report that the screaming interrupts
5802 * are unhandled. Eventually they will be silenced.
5803 */
898a56f8 5804 tnapi->last_irq_tag = sblk->status_tag;
624f8e50 5805
d18edcb2
MC
5806 if (tg3_irq_sync(tp))
5807 goto out;
624f8e50 5808
72334482 5809 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
624f8e50 5810
09943a18 5811 napi_schedule(&tnapi->napi);
624f8e50 5812
f47c11ee 5813out:
1da177e4
LT
5814 return IRQ_RETVAL(handled);
5815}
5816
7938109f 5817/* ISR for interrupt test */
7d12e780 5818static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f 5819{
09943a18
MC
5820 struct tg3_napi *tnapi = dev_id;
5821 struct tg3 *tp = tnapi->tp;
898a56f8 5822 struct tg3_hw_status *sblk = tnapi->hw_status;
7938109f 5823
f9804ddb
MC
5824 if ((sblk->status & SD_STATUS_UPDATED) ||
5825 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 5826 tg3_disable_ints(tp);
7938109f
MC
5827 return IRQ_RETVAL(1);
5828 }
5829 return IRQ_RETVAL(0);
5830}
5831
8e7a22e3 5832static int tg3_init_hw(struct tg3 *, int);
944d980e 5833static int tg3_halt(struct tg3 *, int, int);
1da177e4 5834
b9ec6c1b
MC
5835/* Restart hardware after configuration changes, self-test, etc.
5836 * Invoked with tp->lock held.
5837 */
5838static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
78c6146f
ED
5839 __releases(tp->lock)
5840 __acquires(tp->lock)
b9ec6c1b
MC
5841{
5842 int err;
5843
5844 err = tg3_init_hw(tp, reset_phy);
5845 if (err) {
5129c3a3
MC
5846 netdev_err(tp->dev,
5847 "Failed to re-initialize device, aborting\n");
b9ec6c1b
MC
5848 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5849 tg3_full_unlock(tp);
5850 del_timer_sync(&tp->timer);
5851 tp->irq_sync = 0;
fed97810 5852 tg3_napi_enable(tp);
b9ec6c1b
MC
5853 dev_close(tp->dev);
5854 tg3_full_lock(tp, 0);
5855 }
5856 return err;
5857}
5858
1da177e4
LT
5859#ifdef CONFIG_NET_POLL_CONTROLLER
5860static void tg3_poll_controller(struct net_device *dev)
5861{
4f125f42 5862 int i;
88b06bc2
MC
5863 struct tg3 *tp = netdev_priv(dev);
5864
4f125f42 5865 for (i = 0; i < tp->irq_cnt; i++)
fe234f0e 5866 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1da177e4
LT
5867}
5868#endif
5869
c4028958 5870static void tg3_reset_task(struct work_struct *work)
1da177e4 5871{
c4028958 5872 struct tg3 *tp = container_of(work, struct tg3, reset_task);
b02fd9e3 5873 int err;
1da177e4
LT
5874 unsigned int restart_timer;
5875
7faa006f 5876 tg3_full_lock(tp, 0);
7faa006f
MC
5877
5878 if (!netif_running(tp->dev)) {
7faa006f
MC
5879 tg3_full_unlock(tp);
5880 return;
5881 }
5882
5883 tg3_full_unlock(tp);
5884
b02fd9e3
MC
5885 tg3_phy_stop(tp);
5886
1da177e4
LT
5887 tg3_netif_stop(tp);
5888
f47c11ee 5889 tg3_full_lock(tp, 1);
1da177e4 5890
63c3a66f
JP
5891 restart_timer = tg3_flag(tp, RESTART_TIMER);
5892 tg3_flag_clear(tp, RESTART_TIMER);
1da177e4 5893
63c3a66f 5894 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
df3e6548
MC
5895 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5896 tp->write32_rx_mbox = tg3_write_flush_reg32;
63c3a66f
JP
5897 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5898 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
df3e6548
MC
5899 }
5900
944d980e 5901 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b02fd9e3
MC
5902 err = tg3_init_hw(tp, 1);
5903 if (err)
b9ec6c1b 5904 goto out;
1da177e4
LT
5905
5906 tg3_netif_start(tp);
5907
1da177e4
LT
5908 if (restart_timer)
5909 mod_timer(&tp->timer, jiffies + 1);
7faa006f 5910
b9ec6c1b 5911out:
7faa006f 5912 tg3_full_unlock(tp);
b02fd9e3
MC
5913
5914 if (!err)
5915 tg3_phy_start(tp);
1da177e4
LT
5916}
5917
5918static void tg3_tx_timeout(struct net_device *dev)
5919{
5920 struct tg3 *tp = netdev_priv(dev);
5921
b0408751 5922 if (netif_msg_tx_err(tp)) {
05dbe005 5923 netdev_err(dev, "transmit timed out, resetting\n");
97bd8e49 5924 tg3_dump_state(tp);
b0408751 5925 }
1da177e4
LT
5926
5927 schedule_work(&tp->reset_task);
5928}
5929
c58ec932
MC
5930/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5931static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5932{
5933 u32 base = (u32) mapping & 0xffffffff;
5934
807540ba 5935 return (base > 0xffffdcc0) && (base + len + 8 < base);
c58ec932
MC
5936}
5937
72f2afb8
MC
5938/* Test for DMA addresses > 40-bit */
5939static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5940 int len)
5941{
5942#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
63c3a66f 5943 if (tg3_flag(tp, 40BIT_DMA_BUG))
807540ba 5944 return ((u64) mapping + len) > DMA_BIT_MASK(40);
72f2afb8
MC
5945 return 0;
5946#else
5947 return 0;
5948#endif
5949}
5950
d1a3b737 5951static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
92cd3a17
MC
5952 dma_addr_t mapping, u32 len, u32 flags,
5953 u32 mss, u32 vlan)
2ffcc981 5954{
92cd3a17
MC
5955 txbd->addr_hi = ((u64) mapping >> 32);
5956 txbd->addr_lo = ((u64) mapping & 0xffffffff);
5957 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5958 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
2ffcc981 5959}
1da177e4 5960
84b67b27 5961static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
d1a3b737
MC
5962 dma_addr_t map, u32 len, u32 flags,
5963 u32 mss, u32 vlan)
5964{
5965 struct tg3 *tp = tnapi->tp;
5966 bool hwbug = false;
5967
5968 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
5969 hwbug = 1;
5970
5971 if (tg3_4g_overflow_test(map, len))
5972 hwbug = 1;
5973
5974 if (tg3_40bit_overflow_test(tp, map, len))
5975 hwbug = 1;
5976
e31aa987
MC
5977 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
5978 u32 tmp_flag = flags & ~TXD_FLAG_END;
5979 while (len > TG3_TX_BD_DMA_MAX) {
5980 u32 frag_len = TG3_TX_BD_DMA_MAX;
5981 len -= TG3_TX_BD_DMA_MAX;
5982
5983 if (len) {
5984 tnapi->tx_buffers[*entry].fragmented = true;
5985 /* Avoid the 8byte DMA problem */
5986 if (len <= 8) {
5987 len += TG3_TX_BD_DMA_MAX / 2;
5988 frag_len = TG3_TX_BD_DMA_MAX / 2;
5989 }
5990 } else
5991 tmp_flag = flags;
5992
5993 if (*budget) {
5994 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5995 frag_len, tmp_flag, mss, vlan);
5996 (*budget)--;
5997 *entry = NEXT_TX(*entry);
5998 } else {
5999 hwbug = 1;
6000 break;
6001 }
6002
6003 map += frag_len;
6004 }
6005
6006 if (len) {
6007 if (*budget) {
6008 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6009 len, flags, mss, vlan);
6010 (*budget)--;
6011 *entry = NEXT_TX(*entry);
6012 } else {
6013 hwbug = 1;
6014 }
6015 }
6016 } else {
84b67b27
MC
6017 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6018 len, flags, mss, vlan);
e31aa987
MC
6019 *entry = NEXT_TX(*entry);
6020 }
d1a3b737
MC
6021
6022 return hwbug;
6023}
6024
0d681b27 6025static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
432aa7ed
MC
6026{
6027 int i;
0d681b27 6028 struct sk_buff *skb;
df8944cf 6029 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
432aa7ed 6030
0d681b27
MC
6031 skb = txb->skb;
6032 txb->skb = NULL;
6033
432aa7ed
MC
6034 pci_unmap_single(tnapi->tp->pdev,
6035 dma_unmap_addr(txb, mapping),
6036 skb_headlen(skb),
6037 PCI_DMA_TODEVICE);
e01ee14d
MC
6038
6039 while (txb->fragmented) {
6040 txb->fragmented = false;
6041 entry = NEXT_TX(entry);
6042 txb = &tnapi->tx_buffers[entry];
6043 }
6044
9a2e0fb0 6045 for (i = 0; i < last; i++) {
432aa7ed
MC
6046 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6047
6048 entry = NEXT_TX(entry);
6049 txb = &tnapi->tx_buffers[entry];
6050
6051 pci_unmap_page(tnapi->tp->pdev,
6052 dma_unmap_addr(txb, mapping),
6053 frag->size, PCI_DMA_TODEVICE);
e01ee14d
MC
6054
6055 while (txb->fragmented) {
6056 txb->fragmented = false;
6057 entry = NEXT_TX(entry);
6058 txb = &tnapi->tx_buffers[entry];
6059 }
432aa7ed
MC
6060 }
6061}
6062
72f2afb8 6063/* Workaround 4GB and 40-bit hardware DMA bugs. */
24f4efd4 6064static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
432aa7ed 6065 struct sk_buff *skb,
84b67b27 6066 u32 *entry, u32 *budget,
92cd3a17 6067 u32 base_flags, u32 mss, u32 vlan)
1da177e4 6068{
24f4efd4 6069 struct tg3 *tp = tnapi->tp;
41588ba1 6070 struct sk_buff *new_skb;
c58ec932 6071 dma_addr_t new_addr = 0;
432aa7ed 6072 int ret = 0;
1da177e4 6073
41588ba1
MC
6074 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6075 new_skb = skb_copy(skb, GFP_ATOMIC);
6076 else {
6077 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6078
6079 new_skb = skb_copy_expand(skb,
6080 skb_headroom(skb) + more_headroom,
6081 skb_tailroom(skb), GFP_ATOMIC);
6082 }
6083
1da177e4 6084 if (!new_skb) {
c58ec932
MC
6085 ret = -1;
6086 } else {
6087 /* New SKB is guaranteed to be linear. */
f4188d8a
AD
6088 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6089 PCI_DMA_TODEVICE);
6090 /* Make sure the mapping succeeded */
6091 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
f4188d8a 6092 dev_kfree_skb(new_skb);
c58ec932 6093 ret = -1;
c58ec932 6094 } else {
92cd3a17
MC
6095 base_flags |= TXD_FLAG_END;
6096
84b67b27
MC
6097 tnapi->tx_buffers[*entry].skb = new_skb;
6098 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
432aa7ed
MC
6099 mapping, new_addr);
6100
84b67b27 6101 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
d1a3b737
MC
6102 new_skb->len, base_flags,
6103 mss, vlan)) {
84b67b27 6104 tg3_tx_skb_unmap(tnapi, *entry, 0);
d1a3b737
MC
6105 dev_kfree_skb(new_skb);
6106 ret = -1;
6107 }
f4188d8a 6108 }
1da177e4
LT
6109 }
6110
6111 dev_kfree_skb(skb);
6112
c58ec932 6113 return ret;
1da177e4
LT
6114}
6115
2ffcc981 6116static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
52c0fd83
MC
6117
6118/* Use GSO to workaround a rare TSO bug that may be triggered when the
6119 * TSO header is greater than 80 bytes.
6120 */
6121static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6122{
6123 struct sk_buff *segs, *nskb;
f3f3f27e 6124 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
52c0fd83
MC
6125
6126 /* Estimate the number of fragments in the worst case */
f3f3f27e 6127 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
52c0fd83 6128 netif_stop_queue(tp->dev);
f65aac16
MC
6129
6130 /* netif_tx_stop_queue() must be done before checking
6131 * checking tx index in tg3_tx_avail() below, because in
6132 * tg3_tx(), we update tx index before checking for
6133 * netif_tx_queue_stopped().
6134 */
6135 smp_mb();
f3f3f27e 6136 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7f62ad5d
MC
6137 return NETDEV_TX_BUSY;
6138
6139 netif_wake_queue(tp->dev);
52c0fd83
MC
6140 }
6141
6142 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
801678c5 6143 if (IS_ERR(segs))
52c0fd83
MC
6144 goto tg3_tso_bug_end;
6145
6146 do {
6147 nskb = segs;
6148 segs = segs->next;
6149 nskb->next = NULL;
2ffcc981 6150 tg3_start_xmit(nskb, tp->dev);
52c0fd83
MC
6151 } while (segs);
6152
6153tg3_tso_bug_end:
6154 dev_kfree_skb(skb);
6155
6156 return NETDEV_TX_OK;
6157}
52c0fd83 6158
5a6f3074 6159/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
63c3a66f 6160 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5a6f3074 6161 */
2ffcc981 6162static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
6163{
6164 struct tg3 *tp = netdev_priv(dev);
92cd3a17 6165 u32 len, entry, base_flags, mss, vlan = 0;
84b67b27 6166 u32 budget;
432aa7ed 6167 int i = -1, would_hit_hwbug;
90079ce8 6168 dma_addr_t mapping;
24f4efd4
MC
6169 struct tg3_napi *tnapi;
6170 struct netdev_queue *txq;
432aa7ed 6171 unsigned int last;
f4188d8a 6172
24f4efd4
MC
6173 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6174 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
63c3a66f 6175 if (tg3_flag(tp, ENABLE_TSS))
24f4efd4 6176 tnapi++;
1da177e4 6177
84b67b27
MC
6178 budget = tg3_tx_avail(tnapi);
6179
00b70504 6180 /* We are running in BH disabled context with netif_tx_lock
bea3348e 6181 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
6182 * interrupt. Furthermore, IRQ processing runs lockless so we have
6183 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 6184 */
84b67b27 6185 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
24f4efd4
MC
6186 if (!netif_tx_queue_stopped(txq)) {
6187 netif_tx_stop_queue(txq);
1f064a87
SH
6188
6189 /* This is a hard error, log it. */
5129c3a3
MC
6190 netdev_err(dev,
6191 "BUG! Tx Ring full when queue awake!\n");
1f064a87 6192 }
1da177e4
LT
6193 return NETDEV_TX_BUSY;
6194 }
6195
f3f3f27e 6196 entry = tnapi->tx_prod;
1da177e4 6197 base_flags = 0;
84fa7933 6198 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 6199 base_flags |= TXD_FLAG_TCPUDP_CSUM;
24f4efd4 6200
be98da6a
MC
6201 mss = skb_shinfo(skb)->gso_size;
6202 if (mss) {
eddc9ec5 6203 struct iphdr *iph;
34195c3d 6204 u32 tcp_opt_len, hdr_len;
1da177e4
LT
6205
6206 if (skb_header_cloned(skb) &&
6207 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6208 dev_kfree_skb(skb);
6209 goto out_unlock;
6210 }
6211
34195c3d 6212 iph = ip_hdr(skb);
ab6a5bb6 6213 tcp_opt_len = tcp_optlen(skb);
1da177e4 6214
02e96080 6215 if (skb_is_gso_v6(skb)) {
34195c3d
MC
6216 hdr_len = skb_headlen(skb) - ETH_HLEN;
6217 } else {
6218 u32 ip_tcp_len;
6219
6220 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6221 hdr_len = ip_tcp_len + tcp_opt_len;
6222
6223 iph->check = 0;
6224 iph->tot_len = htons(mss + hdr_len);
6225 }
6226
52c0fd83 6227 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
63c3a66f 6228 tg3_flag(tp, TSO_BUG))
de6f31eb 6229 return tg3_tso_bug(tp, skb);
52c0fd83 6230
1da177e4
LT
6231 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6232 TXD_FLAG_CPU_POST_DMA);
6233
63c3a66f
JP
6234 if (tg3_flag(tp, HW_TSO_1) ||
6235 tg3_flag(tp, HW_TSO_2) ||
6236 tg3_flag(tp, HW_TSO_3)) {
aa8223c7 6237 tcp_hdr(skb)->check = 0;
1da177e4 6238 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
6239 } else
6240 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6241 iph->daddr, 0,
6242 IPPROTO_TCP,
6243 0);
1da177e4 6244
63c3a66f 6245 if (tg3_flag(tp, HW_TSO_3)) {
615774fe
MC
6246 mss |= (hdr_len & 0xc) << 12;
6247 if (hdr_len & 0x10)
6248 base_flags |= 0x00000010;
6249 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 6250 } else if (tg3_flag(tp, HW_TSO_2))
92c6b8d1 6251 mss |= hdr_len << 9;
63c3a66f 6252 else if (tg3_flag(tp, HW_TSO_1) ||
92c6b8d1 6253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
eddc9ec5 6254 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
6255 int tsflags;
6256
eddc9ec5 6257 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
6258 mss |= (tsflags << 11);
6259 }
6260 } else {
eddc9ec5 6261 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
6262 int tsflags;
6263
eddc9ec5 6264 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
6265 base_flags |= tsflags << 12;
6266 }
6267 }
6268 }
bf933c80 6269
92cd3a17
MC
6270#ifdef BCM_KERNEL_SUPPORTS_8021Q
6271 if (vlan_tx_tag_present(skb)) {
6272 base_flags |= TXD_FLAG_VLAN;
6273 vlan = vlan_tx_tag_get(skb);
6274 }
6275#endif
1da177e4 6276
63c3a66f 6277 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8fc2f995 6278 !mss && skb->len > VLAN_ETH_FRAME_LEN)
615774fe
MC
6279 base_flags |= TXD_FLAG_JMB_PKT;
6280
f4188d8a
AD
6281 len = skb_headlen(skb);
6282
6283 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6284 if (pci_dma_mapping_error(tp->pdev, mapping)) {
90079ce8
DM
6285 dev_kfree_skb(skb);
6286 goto out_unlock;
6287 }
6288
f3f3f27e 6289 tnapi->tx_buffers[entry].skb = skb;
4e5e4f0d 6290 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
1da177e4
LT
6291
6292 would_hit_hwbug = 0;
6293
63c3a66f 6294 if (tg3_flag(tp, 5701_DMA_BUG))
c58ec932 6295 would_hit_hwbug = 1;
1da177e4 6296
84b67b27 6297 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
d1a3b737
MC
6298 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6299 mss, vlan))
6300 would_hit_hwbug = 1;
1da177e4 6301
1da177e4
LT
6302 /* Now loop through additional data fragments, and queue them. */
6303 if (skb_shinfo(skb)->nr_frags > 0) {
92cd3a17
MC
6304 u32 tmp_mss = mss;
6305
6306 if (!tg3_flag(tp, HW_TSO_1) &&
6307 !tg3_flag(tp, HW_TSO_2) &&
6308 !tg3_flag(tp, HW_TSO_3))
6309 tmp_mss = 0;
6310
1da177e4
LT
6311 last = skb_shinfo(skb)->nr_frags - 1;
6312 for (i = 0; i <= last; i++) {
6313 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6314
6315 len = frag->size;
dc234d0b
IC
6316 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6317 len, PCI_DMA_TODEVICE);
1da177e4 6318
f3f3f27e 6319 tnapi->tx_buffers[entry].skb = NULL;
4e5e4f0d 6320 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
f4188d8a
AD
6321 mapping);
6322 if (pci_dma_mapping_error(tp->pdev, mapping))
6323 goto dma_error;
1da177e4 6324
84b67b27
MC
6325 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6326 len, base_flags |
6327 ((i == last) ? TXD_FLAG_END : 0),
d1a3b737 6328 tmp_mss, vlan))
72f2afb8 6329 would_hit_hwbug = 1;
1da177e4
LT
6330 }
6331 }
6332
6333 if (would_hit_hwbug) {
0d681b27 6334 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
1da177e4
LT
6335
6336 /* If the workaround fails due to memory/mapping
6337 * failure, silently drop this packet.
6338 */
84b67b27
MC
6339 entry = tnapi->tx_prod;
6340 budget = tg3_tx_avail(tnapi);
6341 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6342 base_flags, mss, vlan))
1da177e4 6343 goto out_unlock;
1da177e4
LT
6344 }
6345
d515b450
RC
6346 skb_tx_timestamp(skb);
6347
1da177e4 6348 /* Packets are ready, update Tx producer idx local and on card. */
24f4efd4 6349 tw32_tx_mbox(tnapi->prodmbox, entry);
1da177e4 6350
f3f3f27e
MC
6351 tnapi->tx_prod = entry;
6352 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
24f4efd4 6353 netif_tx_stop_queue(txq);
f65aac16
MC
6354
6355 /* netif_tx_stop_queue() must be done before checking
6356 * checking tx index in tg3_tx_avail() below, because in
6357 * tg3_tx(), we update tx index before checking for
6358 * netif_tx_queue_stopped().
6359 */
6360 smp_mb();
f3f3f27e 6361 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
24f4efd4 6362 netif_tx_wake_queue(txq);
51b91468 6363 }
1da177e4
LT
6364
6365out_unlock:
cdd0db05 6366 mmiowb();
1da177e4
LT
6367
6368 return NETDEV_TX_OK;
f4188d8a
AD
6369
6370dma_error:
0d681b27 6371 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
f4188d8a 6372 dev_kfree_skb(skb);
432aa7ed 6373 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
f4188d8a 6374 return NETDEV_TX_OK;
1da177e4
LT
6375}
6376
6e01b20b
MC
6377static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6378{
6379 if (enable) {
6380 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6381 MAC_MODE_PORT_MODE_MASK);
6382
6383 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6384
6385 if (!tg3_flag(tp, 5705_PLUS))
6386 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6387
6388 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6389 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6390 else
6391 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6392 } else {
6393 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6394
6395 if (tg3_flag(tp, 5705_PLUS) ||
6396 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6398 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6399 }
6400
6401 tw32(MAC_MODE, tp->mac_mode);
6402 udelay(40);
6403}
6404
941ec90f 6405static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
5e5a7f37 6406{
941ec90f 6407 u32 val, bmcr, mac_mode, ptest = 0;
5e5a7f37
MC
6408
6409 tg3_phy_toggle_apd(tp, false);
6410 tg3_phy_toggle_automdix(tp, 0);
6411
941ec90f
MC
6412 if (extlpbk && tg3_phy_set_extloopbk(tp))
6413 return -EIO;
6414
6415 bmcr = BMCR_FULLDPLX;
5e5a7f37
MC
6416 switch (speed) {
6417 case SPEED_10:
6418 break;
6419 case SPEED_100:
6420 bmcr |= BMCR_SPEED100;
6421 break;
6422 case SPEED_1000:
6423 default:
6424 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6425 speed = SPEED_100;
6426 bmcr |= BMCR_SPEED100;
6427 } else {
6428 speed = SPEED_1000;
6429 bmcr |= BMCR_SPEED1000;
6430 }
6431 }
6432
941ec90f
MC
6433 if (extlpbk) {
6434 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6435 tg3_readphy(tp, MII_CTRL1000, &val);
6436 val |= CTL1000_AS_MASTER |
6437 CTL1000_ENABLE_MASTER;
6438 tg3_writephy(tp, MII_CTRL1000, val);
6439 } else {
6440 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6441 MII_TG3_FET_PTEST_TRIM_2;
6442 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6443 }
6444 } else
6445 bmcr |= BMCR_LOOPBACK;
6446
5e5a7f37
MC
6447 tg3_writephy(tp, MII_BMCR, bmcr);
6448
6449 /* The write needs to be flushed for the FETs */
6450 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6451 tg3_readphy(tp, MII_BMCR, &bmcr);
6452
6453 udelay(40);
6454
6455 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
941ec90f 6457 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
5e5a7f37
MC
6458 MII_TG3_FET_PTEST_FRC_TX_LINK |
6459 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6460
6461 /* The write needs to be flushed for the AC131 */
6462 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6463 }
6464
6465 /* Reset to prevent losing 1st rx packet intermittently */
6466 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6467 tg3_flag(tp, 5780_CLASS)) {
6468 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6469 udelay(10);
6470 tw32_f(MAC_RX_MODE, tp->rx_mode);
6471 }
6472
6473 mac_mode = tp->mac_mode &
6474 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6475 if (speed == SPEED_1000)
6476 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6477 else
6478 mac_mode |= MAC_MODE_PORT_MODE_MII;
6479
6480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6481 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6482
6483 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6484 mac_mode &= ~MAC_MODE_LINK_POLARITY;
6485 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6486 mac_mode |= MAC_MODE_LINK_POLARITY;
6487
6488 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6489 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6490 }
6491
6492 tw32(MAC_MODE, mac_mode);
6493 udelay(40);
941ec90f
MC
6494
6495 return 0;
5e5a7f37
MC
6496}
6497
06c03c02
MB
6498static void tg3_set_loopback(struct net_device *dev, u32 features)
6499{
6500 struct tg3 *tp = netdev_priv(dev);
6501
6502 if (features & NETIF_F_LOOPBACK) {
6503 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6504 return;
6505
06c03c02 6506 spin_lock_bh(&tp->lock);
6e01b20b 6507 tg3_mac_loopback(tp, true);
06c03c02
MB
6508 netif_carrier_on(tp->dev);
6509 spin_unlock_bh(&tp->lock);
6510 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6511 } else {
6512 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6513 return;
6514
06c03c02 6515 spin_lock_bh(&tp->lock);
6e01b20b 6516 tg3_mac_loopback(tp, false);
06c03c02
MB
6517 /* Force link status check */
6518 tg3_setup_phy(tp, 1);
6519 spin_unlock_bh(&tp->lock);
6520 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6521 }
6522}
6523
dc668910
MM
6524static u32 tg3_fix_features(struct net_device *dev, u32 features)
6525{
6526 struct tg3 *tp = netdev_priv(dev);
6527
63c3a66f 6528 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
dc668910
MM
6529 features &= ~NETIF_F_ALL_TSO;
6530
6531 return features;
6532}
6533
06c03c02
MB
6534static int tg3_set_features(struct net_device *dev, u32 features)
6535{
6536 u32 changed = dev->features ^ features;
6537
6538 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6539 tg3_set_loopback(dev, features);
6540
6541 return 0;
6542}
6543
1da177e4
LT
6544static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6545 int new_mtu)
6546{
6547 dev->mtu = new_mtu;
6548
ef7f5ec0 6549 if (new_mtu > ETH_DATA_LEN) {
63c3a66f 6550 if (tg3_flag(tp, 5780_CLASS)) {
dc668910 6551 netdev_update_features(dev);
63c3a66f 6552 tg3_flag_clear(tp, TSO_CAPABLE);
859a5887 6553 } else {
63c3a66f 6554 tg3_flag_set(tp, JUMBO_RING_ENABLE);
859a5887 6555 }
ef7f5ec0 6556 } else {
63c3a66f
JP
6557 if (tg3_flag(tp, 5780_CLASS)) {
6558 tg3_flag_set(tp, TSO_CAPABLE);
dc668910
MM
6559 netdev_update_features(dev);
6560 }
63c3a66f 6561 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
ef7f5ec0 6562 }
1da177e4
LT
6563}
6564
6565static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6566{
6567 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 6568 int err;
1da177e4
LT
6569
6570 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6571 return -EINVAL;
6572
6573 if (!netif_running(dev)) {
6574 /* We'll just catch it later when the
6575 * device is up'd.
6576 */
6577 tg3_set_mtu(dev, tp, new_mtu);
6578 return 0;
6579 }
6580
b02fd9e3
MC
6581 tg3_phy_stop(tp);
6582
1da177e4 6583 tg3_netif_stop(tp);
f47c11ee
DM
6584
6585 tg3_full_lock(tp, 1);
1da177e4 6586
944d980e 6587 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6588
6589 tg3_set_mtu(dev, tp, new_mtu);
6590
b9ec6c1b 6591 err = tg3_restart_hw(tp, 0);
1da177e4 6592
b9ec6c1b
MC
6593 if (!err)
6594 tg3_netif_start(tp);
1da177e4 6595
f47c11ee 6596 tg3_full_unlock(tp);
1da177e4 6597
b02fd9e3
MC
6598 if (!err)
6599 tg3_phy_start(tp);
6600
b9ec6c1b 6601 return err;
1da177e4
LT
6602}
6603
21f581a5
MC
6604static void tg3_rx_prodring_free(struct tg3 *tp,
6605 struct tg3_rx_prodring_set *tpr)
1da177e4 6606{
1da177e4
LT
6607 int i;
6608
8fea32b9 6609 if (tpr != &tp->napi[0].prodring) {
b196c7e4 6610 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
2c49a44d 6611 i = (i + 1) & tp->rx_std_ring_mask)
b196c7e4
MC
6612 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6613 tp->rx_pkt_map_sz);
6614
63c3a66f 6615 if (tg3_flag(tp, JUMBO_CAPABLE)) {
b196c7e4
MC
6616 for (i = tpr->rx_jmb_cons_idx;
6617 i != tpr->rx_jmb_prod_idx;
2c49a44d 6618 i = (i + 1) & tp->rx_jmb_ring_mask) {
b196c7e4
MC
6619 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6620 TG3_RX_JMB_MAP_SZ);
6621 }
6622 }
6623
2b2cdb65 6624 return;
b196c7e4 6625 }
1da177e4 6626
2c49a44d 6627 for (i = 0; i <= tp->rx_std_ring_mask; i++)
2b2cdb65
MC
6628 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6629 tp->rx_pkt_map_sz);
1da177e4 6630
63c3a66f 6631 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 6632 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
2b2cdb65
MC
6633 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6634 TG3_RX_JMB_MAP_SZ);
1da177e4
LT
6635 }
6636}
6637
c6cdf436 6638/* Initialize rx rings for packet processing.
1da177e4
LT
6639 *
6640 * The chip has been shut down and the driver detached from
6641 * the networking, so no interrupts or new tx packets will
6642 * end up in the driver. tp->{tx,}lock are held and thus
6643 * we may not sleep.
6644 */
21f581a5
MC
6645static int tg3_rx_prodring_alloc(struct tg3 *tp,
6646 struct tg3_rx_prodring_set *tpr)
1da177e4 6647{
287be12e 6648 u32 i, rx_pkt_dma_sz;
1da177e4 6649
b196c7e4
MC
6650 tpr->rx_std_cons_idx = 0;
6651 tpr->rx_std_prod_idx = 0;
6652 tpr->rx_jmb_cons_idx = 0;
6653 tpr->rx_jmb_prod_idx = 0;
6654
8fea32b9 6655 if (tpr != &tp->napi[0].prodring) {
2c49a44d
MC
6656 memset(&tpr->rx_std_buffers[0], 0,
6657 TG3_RX_STD_BUFF_RING_SIZE(tp));
48035728 6658 if (tpr->rx_jmb_buffers)
2b2cdb65 6659 memset(&tpr->rx_jmb_buffers[0], 0,
2c49a44d 6660 TG3_RX_JMB_BUFF_RING_SIZE(tp));
2b2cdb65
MC
6661 goto done;
6662 }
6663
1da177e4 6664 /* Zero out all descriptors. */
2c49a44d 6665 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
1da177e4 6666
287be12e 6667 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
63c3a66f 6668 if (tg3_flag(tp, 5780_CLASS) &&
287be12e
MC
6669 tp->dev->mtu > ETH_DATA_LEN)
6670 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6671 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7e72aad4 6672
1da177e4
LT
6673 /* Initialize invariants of the rings, we only set this
6674 * stuff once. This works because the card does not
6675 * write into the rx buffer posting rings.
6676 */
2c49a44d 6677 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
1da177e4
LT
6678 struct tg3_rx_buffer_desc *rxd;
6679
21f581a5 6680 rxd = &tpr->rx_std[i];
287be12e 6681 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
1da177e4
LT
6682 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6683 rxd->opaque = (RXD_OPAQUE_RING_STD |
6684 (i << RXD_OPAQUE_INDEX_SHIFT));
6685 }
6686
1da177e4
LT
6687 /* Now allocate fresh SKBs for each rx ring. */
6688 for (i = 0; i < tp->rx_pending; i++) {
86b21e59 6689 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
5129c3a3
MC
6690 netdev_warn(tp->dev,
6691 "Using a smaller RX standard ring. Only "
6692 "%d out of %d buffers were allocated "
6693 "successfully\n", i, tp->rx_pending);
32d8c572 6694 if (i == 0)
cf7a7298 6695 goto initfail;
32d8c572 6696 tp->rx_pending = i;
1da177e4 6697 break;
32d8c572 6698 }
1da177e4
LT
6699 }
6700
63c3a66f 6701 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
cf7a7298
MC
6702 goto done;
6703
2c49a44d 6704 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
cf7a7298 6705
63c3a66f 6706 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
0d86df80 6707 goto done;
cf7a7298 6708
2c49a44d 6709 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
0d86df80
MC
6710 struct tg3_rx_buffer_desc *rxd;
6711
6712 rxd = &tpr->rx_jmb[i].std;
6713 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6714 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6715 RXD_FLAG_JUMBO;
6716 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6717 (i << RXD_OPAQUE_INDEX_SHIFT));
6718 }
6719
6720 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6721 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
5129c3a3
MC
6722 netdev_warn(tp->dev,
6723 "Using a smaller RX jumbo ring. Only %d "
6724 "out of %d buffers were allocated "
6725 "successfully\n", i, tp->rx_jumbo_pending);
0d86df80
MC
6726 if (i == 0)
6727 goto initfail;
6728 tp->rx_jumbo_pending = i;
6729 break;
1da177e4
LT
6730 }
6731 }
cf7a7298
MC
6732
6733done:
32d8c572 6734 return 0;
cf7a7298
MC
6735
6736initfail:
21f581a5 6737 tg3_rx_prodring_free(tp, tpr);
cf7a7298 6738 return -ENOMEM;
1da177e4
LT
6739}
6740
21f581a5
MC
6741static void tg3_rx_prodring_fini(struct tg3 *tp,
6742 struct tg3_rx_prodring_set *tpr)
1da177e4 6743{
21f581a5
MC
6744 kfree(tpr->rx_std_buffers);
6745 tpr->rx_std_buffers = NULL;
6746 kfree(tpr->rx_jmb_buffers);
6747 tpr->rx_jmb_buffers = NULL;
6748 if (tpr->rx_std) {
4bae65c8
MC
6749 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6750 tpr->rx_std, tpr->rx_std_mapping);
21f581a5 6751 tpr->rx_std = NULL;
1da177e4 6752 }
21f581a5 6753 if (tpr->rx_jmb) {
4bae65c8
MC
6754 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6755 tpr->rx_jmb, tpr->rx_jmb_mapping);
21f581a5 6756 tpr->rx_jmb = NULL;
1da177e4 6757 }
cf7a7298
MC
6758}
6759
21f581a5
MC
6760static int tg3_rx_prodring_init(struct tg3 *tp,
6761 struct tg3_rx_prodring_set *tpr)
cf7a7298 6762{
2c49a44d
MC
6763 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6764 GFP_KERNEL);
21f581a5 6765 if (!tpr->rx_std_buffers)
cf7a7298
MC
6766 return -ENOMEM;
6767
4bae65c8
MC
6768 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6769 TG3_RX_STD_RING_BYTES(tp),
6770 &tpr->rx_std_mapping,
6771 GFP_KERNEL);
21f581a5 6772 if (!tpr->rx_std)
cf7a7298
MC
6773 goto err_out;
6774
63c3a66f 6775 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 6776 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
21f581a5
MC
6777 GFP_KERNEL);
6778 if (!tpr->rx_jmb_buffers)
cf7a7298
MC
6779 goto err_out;
6780
4bae65c8
MC
6781 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6782 TG3_RX_JMB_RING_BYTES(tp),
6783 &tpr->rx_jmb_mapping,
6784 GFP_KERNEL);
21f581a5 6785 if (!tpr->rx_jmb)
cf7a7298
MC
6786 goto err_out;
6787 }
6788
6789 return 0;
6790
6791err_out:
21f581a5 6792 tg3_rx_prodring_fini(tp, tpr);
cf7a7298
MC
6793 return -ENOMEM;
6794}
6795
6796/* Free up pending packets in all rx/tx rings.
6797 *
6798 * The chip has been shut down and the driver detached from
6799 * the networking, so no interrupts or new tx packets will
6800 * end up in the driver. tp->{tx,}lock is not held and we are not
6801 * in an interrupt context and thus may sleep.
6802 */
6803static void tg3_free_rings(struct tg3 *tp)
6804{
f77a6a8e 6805 int i, j;
cf7a7298 6806
f77a6a8e
MC
6807 for (j = 0; j < tp->irq_cnt; j++) {
6808 struct tg3_napi *tnapi = &tp->napi[j];
cf7a7298 6809
8fea32b9 6810 tg3_rx_prodring_free(tp, &tnapi->prodring);
b28f6428 6811
0c1d0e2b
MC
6812 if (!tnapi->tx_buffers)
6813 continue;
6814
0d681b27
MC
6815 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6816 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
cf7a7298 6817
0d681b27 6818 if (!skb)
f77a6a8e 6819 continue;
cf7a7298 6820
0d681b27 6821 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
f77a6a8e
MC
6822
6823 dev_kfree_skb_any(skb);
6824 }
2b2cdb65 6825 }
cf7a7298
MC
6826}
6827
6828/* Initialize tx/rx rings for packet processing.
6829 *
6830 * The chip has been shut down and the driver detached from
6831 * the networking, so no interrupts or new tx packets will
6832 * end up in the driver. tp->{tx,}lock are held and thus
6833 * we may not sleep.
6834 */
6835static int tg3_init_rings(struct tg3 *tp)
6836{
f77a6a8e 6837 int i;
72334482 6838
cf7a7298
MC
6839 /* Free up all the SKBs. */
6840 tg3_free_rings(tp);
6841
f77a6a8e
MC
6842 for (i = 0; i < tp->irq_cnt; i++) {
6843 struct tg3_napi *tnapi = &tp->napi[i];
6844
6845 tnapi->last_tag = 0;
6846 tnapi->last_irq_tag = 0;
6847 tnapi->hw_status->status = 0;
6848 tnapi->hw_status->status_tag = 0;
6849 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
cf7a7298 6850
f77a6a8e
MC
6851 tnapi->tx_prod = 0;
6852 tnapi->tx_cons = 0;
0c1d0e2b
MC
6853 if (tnapi->tx_ring)
6854 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
f77a6a8e
MC
6855
6856 tnapi->rx_rcb_ptr = 0;
0c1d0e2b
MC
6857 if (tnapi->rx_rcb)
6858 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
2b2cdb65 6859
8fea32b9 6860 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
e4af1af9 6861 tg3_free_rings(tp);
2b2cdb65 6862 return -ENOMEM;
e4af1af9 6863 }
f77a6a8e 6864 }
72334482 6865
2b2cdb65 6866 return 0;
cf7a7298
MC
6867}
6868
6869/*
6870 * Must not be invoked with interrupt sources disabled and
6871 * the hardware shutdown down.
6872 */
6873static void tg3_free_consistent(struct tg3 *tp)
6874{
f77a6a8e 6875 int i;
898a56f8 6876
f77a6a8e
MC
6877 for (i = 0; i < tp->irq_cnt; i++) {
6878 struct tg3_napi *tnapi = &tp->napi[i];
6879
6880 if (tnapi->tx_ring) {
4bae65c8 6881 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
f77a6a8e
MC
6882 tnapi->tx_ring, tnapi->tx_desc_mapping);
6883 tnapi->tx_ring = NULL;
6884 }
6885
6886 kfree(tnapi->tx_buffers);
6887 tnapi->tx_buffers = NULL;
6888
6889 if (tnapi->rx_rcb) {
4bae65c8
MC
6890 dma_free_coherent(&tp->pdev->dev,
6891 TG3_RX_RCB_RING_BYTES(tp),
6892 tnapi->rx_rcb,
6893 tnapi->rx_rcb_mapping);
f77a6a8e
MC
6894 tnapi->rx_rcb = NULL;
6895 }
6896
8fea32b9
MC
6897 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6898
f77a6a8e 6899 if (tnapi->hw_status) {
4bae65c8
MC
6900 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6901 tnapi->hw_status,
6902 tnapi->status_mapping);
f77a6a8e
MC
6903 tnapi->hw_status = NULL;
6904 }
1da177e4 6905 }
f77a6a8e 6906
1da177e4 6907 if (tp->hw_stats) {
4bae65c8
MC
6908 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6909 tp->hw_stats, tp->stats_mapping);
1da177e4
LT
6910 tp->hw_stats = NULL;
6911 }
6912}
6913
6914/*
6915 * Must not be invoked with interrupt sources disabled and
6916 * the hardware shutdown down. Can sleep.
6917 */
6918static int tg3_alloc_consistent(struct tg3 *tp)
6919{
f77a6a8e 6920 int i;
898a56f8 6921
4bae65c8
MC
6922 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6923 sizeof(struct tg3_hw_stats),
6924 &tp->stats_mapping,
6925 GFP_KERNEL);
f77a6a8e 6926 if (!tp->hw_stats)
1da177e4
LT
6927 goto err_out;
6928
f77a6a8e 6929 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
1da177e4 6930
f77a6a8e
MC
6931 for (i = 0; i < tp->irq_cnt; i++) {
6932 struct tg3_napi *tnapi = &tp->napi[i];
8d9d7cfc 6933 struct tg3_hw_status *sblk;
1da177e4 6934
4bae65c8
MC
6935 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6936 TG3_HW_STATUS_SIZE,
6937 &tnapi->status_mapping,
6938 GFP_KERNEL);
f77a6a8e
MC
6939 if (!tnapi->hw_status)
6940 goto err_out;
898a56f8 6941
f77a6a8e 6942 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8d9d7cfc
MC
6943 sblk = tnapi->hw_status;
6944
8fea32b9
MC
6945 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6946 goto err_out;
6947
19cfaecc
MC
6948 /* If multivector TSS is enabled, vector 0 does not handle
6949 * tx interrupts. Don't allocate any resources for it.
6950 */
63c3a66f
JP
6951 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6952 (i && tg3_flag(tp, ENABLE_TSS))) {
df8944cf
MC
6953 tnapi->tx_buffers = kzalloc(
6954 sizeof(struct tg3_tx_ring_info) *
6955 TG3_TX_RING_SIZE, GFP_KERNEL);
19cfaecc
MC
6956 if (!tnapi->tx_buffers)
6957 goto err_out;
6958
4bae65c8
MC
6959 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6960 TG3_TX_RING_BYTES,
6961 &tnapi->tx_desc_mapping,
6962 GFP_KERNEL);
19cfaecc
MC
6963 if (!tnapi->tx_ring)
6964 goto err_out;
6965 }
6966
8d9d7cfc
MC
6967 /*
6968 * When RSS is enabled, the status block format changes
6969 * slightly. The "rx_jumbo_consumer", "reserved",
6970 * and "rx_mini_consumer" members get mapped to the
6971 * other three rx return ring producer indexes.
6972 */
6973 switch (i) {
6974 default:
6975 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6976 break;
6977 case 2:
6978 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6979 break;
6980 case 3:
6981 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6982 break;
6983 case 4:
6984 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6985 break;
6986 }
72334482 6987
0c1d0e2b
MC
6988 /*
6989 * If multivector RSS is enabled, vector 0 does not handle
6990 * rx or tx interrupts. Don't allocate any resources for it.
6991 */
63c3a66f 6992 if (!i && tg3_flag(tp, ENABLE_RSS))
0c1d0e2b
MC
6993 continue;
6994
4bae65c8
MC
6995 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6996 TG3_RX_RCB_RING_BYTES(tp),
6997 &tnapi->rx_rcb_mapping,
6998 GFP_KERNEL);
f77a6a8e
MC
6999 if (!tnapi->rx_rcb)
7000 goto err_out;
72334482 7001
f77a6a8e 7002 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
f77a6a8e 7003 }
1da177e4
LT
7004
7005 return 0;
7006
7007err_out:
7008 tg3_free_consistent(tp);
7009 return -ENOMEM;
7010}
7011
7012#define MAX_WAIT_CNT 1000
7013
7014/* To stop a block, clear the enable bit and poll till it
7015 * clears. tp->lock is held.
7016 */
b3b7d6be 7017static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
7018{
7019 unsigned int i;
7020 u32 val;
7021
63c3a66f 7022 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
7023 switch (ofs) {
7024 case RCVLSC_MODE:
7025 case DMAC_MODE:
7026 case MBFREE_MODE:
7027 case BUFMGR_MODE:
7028 case MEMARB_MODE:
7029 /* We can't enable/disable these bits of the
7030 * 5705/5750, just say success.
7031 */
7032 return 0;
7033
7034 default:
7035 break;
855e1111 7036 }
1da177e4
LT
7037 }
7038
7039 val = tr32(ofs);
7040 val &= ~enable_bit;
7041 tw32_f(ofs, val);
7042
7043 for (i = 0; i < MAX_WAIT_CNT; i++) {
7044 udelay(100);
7045 val = tr32(ofs);
7046 if ((val & enable_bit) == 0)
7047 break;
7048 }
7049
b3b7d6be 7050 if (i == MAX_WAIT_CNT && !silent) {
2445e461
MC
7051 dev_err(&tp->pdev->dev,
7052 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7053 ofs, enable_bit);
1da177e4
LT
7054 return -ENODEV;
7055 }
7056
7057 return 0;
7058}
7059
7060/* tp->lock is held. */
b3b7d6be 7061static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
7062{
7063 int i, err;
7064
7065 tg3_disable_ints(tp);
7066
7067 tp->rx_mode &= ~RX_MODE_ENABLE;
7068 tw32_f(MAC_RX_MODE, tp->rx_mode);
7069 udelay(10);
7070
b3b7d6be
DM
7071 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7072 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7073 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7074 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7075 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7076 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7077
7078 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7079 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7080 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7081 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7082 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7083 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7084 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
7085
7086 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7087 tw32_f(MAC_MODE, tp->mac_mode);
7088 udelay(40);
7089
7090 tp->tx_mode &= ~TX_MODE_ENABLE;
7091 tw32_f(MAC_TX_MODE, tp->tx_mode);
7092
7093 for (i = 0; i < MAX_WAIT_CNT; i++) {
7094 udelay(100);
7095 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7096 break;
7097 }
7098 if (i >= MAX_WAIT_CNT) {
ab96b241
MC
7099 dev_err(&tp->pdev->dev,
7100 "%s timed out, TX_MODE_ENABLE will not clear "
7101 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
e6de8ad1 7102 err |= -ENODEV;
1da177e4
LT
7103 }
7104
e6de8ad1 7105 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
7106 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7107 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
7108
7109 tw32(FTQ_RESET, 0xffffffff);
7110 tw32(FTQ_RESET, 0x00000000);
7111
b3b7d6be
DM
7112 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7113 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4 7114
f77a6a8e
MC
7115 for (i = 0; i < tp->irq_cnt; i++) {
7116 struct tg3_napi *tnapi = &tp->napi[i];
7117 if (tnapi->hw_status)
7118 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7119 }
1da177e4
LT
7120 if (tp->hw_stats)
7121 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7122
1da177e4
LT
7123 return err;
7124}
7125
0d3031d9
MC
7126static void tg3_ape_send_event(struct tg3 *tp, u32 event)
7127{
7128 int i;
7129 u32 apedata;
7130
dc6d0744 7131 /* NCSI does not support APE events */
63c3a66f 7132 if (tg3_flag(tp, APE_HAS_NCSI))
dc6d0744
MC
7133 return;
7134
0d3031d9
MC
7135 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
7136 if (apedata != APE_SEG_SIG_MAGIC)
7137 return;
7138
7139 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
731fd79c 7140 if (!(apedata & APE_FW_STATUS_READY))
0d3031d9
MC
7141 return;
7142
7143 /* Wait for up to 1 millisecond for APE to service previous event. */
7144 for (i = 0; i < 10; i++) {
7145 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
7146 return;
7147
7148 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
7149
7150 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7151 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
7152 event | APE_EVENT_STATUS_EVENT_PENDING);
7153
7154 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
7155
7156 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7157 break;
7158
7159 udelay(100);
7160 }
7161
7162 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7163 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
7164}
7165
7166static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
7167{
7168 u32 event;
7169 u32 apedata;
7170
63c3a66f 7171 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
7172 return;
7173
7174 switch (kind) {
33f401ae
MC
7175 case RESET_KIND_INIT:
7176 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
7177 APE_HOST_SEG_SIG_MAGIC);
7178 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
7179 APE_HOST_SEG_LEN_MAGIC);
7180 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
7181 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
7182 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6867c843 7183 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
33f401ae
MC
7184 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
7185 APE_HOST_BEHAV_NO_PHYLOCK);
dc6d0744
MC
7186 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7187 TG3_APE_HOST_DRVR_STATE_START);
33f401ae
MC
7188
7189 event = APE_EVENT_STATUS_STATE_START;
7190 break;
7191 case RESET_KIND_SHUTDOWN:
7192 /* With the interface we are currently using,
7193 * APE does not track driver state. Wiping
7194 * out the HOST SEGMENT SIGNATURE forces
7195 * the APE to assume OS absent status.
7196 */
7197 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
b2aee154 7198
dc6d0744 7199 if (device_may_wakeup(&tp->pdev->dev) &&
63c3a66f 7200 tg3_flag(tp, WOL_ENABLE)) {
dc6d0744
MC
7201 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7202 TG3_APE_HOST_WOL_SPEED_AUTO);
7203 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7204 } else
7205 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7206
7207 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7208
33f401ae
MC
7209 event = APE_EVENT_STATUS_STATE_UNLOAD;
7210 break;
7211 case RESET_KIND_SUSPEND:
7212 event = APE_EVENT_STATUS_STATE_SUSPEND;
7213 break;
7214 default:
7215 return;
0d3031d9
MC
7216 }
7217
7218 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7219
7220 tg3_ape_send_event(tp, event);
7221}
7222
1da177e4
LT
7223/* tp->lock is held. */
7224static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7225{
f49639e6
DM
7226 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7227 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4 7228
63c3a66f 7229 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1da177e4
LT
7230 switch (kind) {
7231 case RESET_KIND_INIT:
7232 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7233 DRV_STATE_START);
7234 break;
7235
7236 case RESET_KIND_SHUTDOWN:
7237 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7238 DRV_STATE_UNLOAD);
7239 break;
7240
7241 case RESET_KIND_SUSPEND:
7242 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7243 DRV_STATE_SUSPEND);
7244 break;
7245
7246 default:
7247 break;
855e1111 7248 }
1da177e4 7249 }
0d3031d9
MC
7250
7251 if (kind == RESET_KIND_INIT ||
7252 kind == RESET_KIND_SUSPEND)
7253 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
7254}
7255
7256/* tp->lock is held. */
7257static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7258{
63c3a66f 7259 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1da177e4
LT
7260 switch (kind) {
7261 case RESET_KIND_INIT:
7262 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7263 DRV_STATE_START_DONE);
7264 break;
7265
7266 case RESET_KIND_SHUTDOWN:
7267 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7268 DRV_STATE_UNLOAD_DONE);
7269 break;
7270
7271 default:
7272 break;
855e1111 7273 }
1da177e4 7274 }
0d3031d9
MC
7275
7276 if (kind == RESET_KIND_SHUTDOWN)
7277 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
7278}
7279
7280/* tp->lock is held. */
7281static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7282{
63c3a66f 7283 if (tg3_flag(tp, ENABLE_ASF)) {
1da177e4
LT
7284 switch (kind) {
7285 case RESET_KIND_INIT:
7286 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7287 DRV_STATE_START);
7288 break;
7289
7290 case RESET_KIND_SHUTDOWN:
7291 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7292 DRV_STATE_UNLOAD);
7293 break;
7294
7295 case RESET_KIND_SUSPEND:
7296 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7297 DRV_STATE_SUSPEND);
7298 break;
7299
7300 default:
7301 break;
855e1111 7302 }
1da177e4
LT
7303 }
7304}
7305
7a6f4369
MC
7306static int tg3_poll_fw(struct tg3 *tp)
7307{
7308 int i;
7309 u32 val;
7310
b5d3772c 7311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
0ccead18
GZ
7312 /* Wait up to 20ms for init done. */
7313 for (i = 0; i < 200; i++) {
b5d3772c
MC
7314 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7315 return 0;
0ccead18 7316 udelay(100);
b5d3772c
MC
7317 }
7318 return -ENODEV;
7319 }
7320
7a6f4369
MC
7321 /* Wait for firmware initialization to complete. */
7322 for (i = 0; i < 100000; i++) {
7323 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7324 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7325 break;
7326 udelay(10);
7327 }
7328
7329 /* Chip might not be fitted with firmware. Some Sun onboard
7330 * parts are configured like that. So don't signal the timeout
7331 * of the above loop as an error, but do report the lack of
7332 * running firmware once.
7333 */
63c3a66f
JP
7334 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7335 tg3_flag_set(tp, NO_FWARE_REPORTED);
7a6f4369 7336
05dbe005 7337 netdev_info(tp->dev, "No firmware running\n");
7a6f4369
MC
7338 }
7339
6b10c165
MC
7340 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7341 /* The 57765 A0 needs a little more
7342 * time to do some important work.
7343 */
7344 mdelay(10);
7345 }
7346
7a6f4369
MC
7347 return 0;
7348}
7349
ee6a99b5
MC
7350/* Save PCI command register before chip reset */
7351static void tg3_save_pci_state(struct tg3 *tp)
7352{
8a6eac90 7353 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
7354}
7355
7356/* Restore PCI state after chip reset */
7357static void tg3_restore_pci_state(struct tg3 *tp)
7358{
7359 u32 val;
7360
7361 /* Re-enable indirect register accesses. */
7362 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7363 tp->misc_host_ctrl);
7364
7365 /* Set MAX PCI retry to zero. */
7366 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7367 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 7368 tg3_flag(tp, PCIX_MODE))
ee6a99b5 7369 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9 7370 /* Allow reads and writes to the APE register and memory space. */
63c3a66f 7371 if (tg3_flag(tp, ENABLE_APE))
0d3031d9 7372 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
7373 PCISTATE_ALLOW_APE_SHMEM_WR |
7374 PCISTATE_ALLOW_APE_PSPACE_WR;
ee6a99b5
MC
7375 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7376
8a6eac90 7377 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 7378
fcb389df 7379 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
63c3a66f 7380 if (tg3_flag(tp, PCI_EXPRESS))
cf79003d 7381 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
fcb389df
MC
7382 else {
7383 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7384 tp->pci_cacheline_sz);
7385 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7386 tp->pci_lat_timer);
7387 }
114342f2 7388 }
5f5c51e3 7389
ee6a99b5 7390 /* Make sure PCI-X relaxed ordering bit is clear. */
63c3a66f 7391 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
7392 u16 pcix_cmd;
7393
7394 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7395 &pcix_cmd);
7396 pcix_cmd &= ~PCI_X_CMD_ERO;
7397 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7398 pcix_cmd);
7399 }
ee6a99b5 7400
63c3a66f 7401 if (tg3_flag(tp, 5780_CLASS)) {
ee6a99b5
MC
7402
7403 /* Chip reset on 5780 will reset MSI enable bit,
7404 * so need to restore it.
7405 */
63c3a66f 7406 if (tg3_flag(tp, USING_MSI)) {
ee6a99b5
MC
7407 u16 ctrl;
7408
7409 pci_read_config_word(tp->pdev,
7410 tp->msi_cap + PCI_MSI_FLAGS,
7411 &ctrl);
7412 pci_write_config_word(tp->pdev,
7413 tp->msi_cap + PCI_MSI_FLAGS,
7414 ctrl | PCI_MSI_FLAGS_ENABLE);
7415 val = tr32(MSGINT_MODE);
7416 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7417 }
7418 }
7419}
7420
1da177e4
LT
7421static void tg3_stop_fw(struct tg3 *);
7422
7423/* tp->lock is held. */
7424static int tg3_chip_reset(struct tg3 *tp)
7425{
7426 u32 val;
1ee582d8 7427 void (*write_op)(struct tg3 *, u32, u32);
4f125f42 7428 int i, err;
1da177e4 7429
f49639e6
DM
7430 tg3_nvram_lock(tp);
7431
77b483f1
MC
7432 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7433
f49639e6
DM
7434 /* No matching tg3_nvram_unlock() after this because
7435 * chip reset below will undo the nvram lock.
7436 */
7437 tp->nvram_lock_cnt = 0;
1da177e4 7438
ee6a99b5
MC
7439 /* GRC_MISC_CFG core clock reset will clear the memory
7440 * enable bit in PCI register 4 and the MSI enable bit
7441 * on some chips, so we save relevant registers here.
7442 */
7443 tg3_save_pci_state(tp);
7444
d9ab5ad1 7445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
63c3a66f 7446 tg3_flag(tp, 5755_PLUS))
d9ab5ad1
MC
7447 tw32(GRC_FASTBOOT_PC, 0);
7448
1da177e4
LT
7449 /*
7450 * We must avoid the readl() that normally takes place.
7451 * It locks machines, causes machine checks, and other
7452 * fun things. So, temporarily disable the 5701
7453 * hardware workaround, while we do the reset.
7454 */
1ee582d8
MC
7455 write_op = tp->write32;
7456 if (write_op == tg3_write_flush_reg32)
7457 tp->write32 = tg3_write32;
1da177e4 7458
d18edcb2
MC
7459 /* Prevent the irq handler from reading or writing PCI registers
7460 * during chip reset when the memory enable bit in the PCI command
7461 * register may be cleared. The chip does not generate interrupt
7462 * at this time, but the irq handler may still be called due to irq
7463 * sharing or irqpoll.
7464 */
63c3a66f 7465 tg3_flag_set(tp, CHIP_RESETTING);
f77a6a8e
MC
7466 for (i = 0; i < tp->irq_cnt; i++) {
7467 struct tg3_napi *tnapi = &tp->napi[i];
7468 if (tnapi->hw_status) {
7469 tnapi->hw_status->status = 0;
7470 tnapi->hw_status->status_tag = 0;
7471 }
7472 tnapi->last_tag = 0;
7473 tnapi->last_irq_tag = 0;
b8fa2f3a 7474 }
d18edcb2 7475 smp_mb();
4f125f42
MC
7476
7477 for (i = 0; i < tp->irq_cnt; i++)
7478 synchronize_irq(tp->napi[i].irq_vec);
d18edcb2 7479
255ca311
MC
7480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7481 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7482 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7483 }
7484
1da177e4
LT
7485 /* do the reset */
7486 val = GRC_MISC_CFG_CORECLK_RESET;
7487
63c3a66f 7488 if (tg3_flag(tp, PCI_EXPRESS)) {
88075d91
MC
7489 /* Force PCIe 1.0a mode */
7490 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 7491 !tg3_flag(tp, 57765_PLUS) &&
88075d91
MC
7492 tr32(TG3_PCIE_PHY_TSTCTL) ==
7493 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7494 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7495
1da177e4
LT
7496 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7497 tw32(GRC_MISC_CFG, (1 << 29));
7498 val |= (1 << 29);
7499 }
7500 }
7501
b5d3772c
MC
7502 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7503 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7504 tw32(GRC_VCPU_EXT_CTRL,
7505 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7506 }
7507
f37500d3 7508 /* Manage gphy power for all CPMU absent PCIe devices. */
63c3a66f 7509 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1da177e4 7510 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
f37500d3 7511
1da177e4
LT
7512 tw32(GRC_MISC_CFG, val);
7513
1ee582d8
MC
7514 /* restore 5701 hardware bug workaround write method */
7515 tp->write32 = write_op;
1da177e4
LT
7516
7517 /* Unfortunately, we have to delay before the PCI read back.
7518 * Some 575X chips even will not respond to a PCI cfg access
7519 * when the reset command is given to the chip.
7520 *
7521 * How do these hardware designers expect things to work
7522 * properly if the PCI write is posted for a long period
7523 * of time? It is always necessary to have some method by
7524 * which a register read back can occur to push the write
7525 * out which does the reset.
7526 *
7527 * For most tg3 variants the trick below was working.
7528 * Ho hum...
7529 */
7530 udelay(120);
7531
7532 /* Flush PCI posted writes. The normal MMIO registers
7533 * are inaccessible at this time so this is the only
7534 * way to make this reliably (actually, this is no longer
7535 * the case, see above). I tried to use indirect
7536 * register read/write but this upset some 5701 variants.
7537 */
7538 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7539
7540 udelay(120);
7541
708ebb3a 7542 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
e7126997
MC
7543 u16 val16;
7544
1da177e4
LT
7545 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7546 int i;
7547 u32 cfg_val;
7548
7549 /* Wait for link training to complete. */
7550 for (i = 0; i < 5000; i++)
7551 udelay(100);
7552
7553 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7554 pci_write_config_dword(tp->pdev, 0xc4,
7555 cfg_val | (1 << 15));
7556 }
5e7dfd0f 7557
e7126997
MC
7558 /* Clear the "no snoop" and "relaxed ordering" bits. */
7559 pci_read_config_word(tp->pdev,
708ebb3a 7560 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
e7126997
MC
7561 &val16);
7562 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7563 PCI_EXP_DEVCTL_NOSNOOP_EN);
7564 /*
7565 * Older PCIe devices only support the 128 byte
7566 * MPS setting. Enforce the restriction.
5e7dfd0f 7567 */
63c3a66f 7568 if (!tg3_flag(tp, CPMU_PRESENT))
e7126997 7569 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
5e7dfd0f 7570 pci_write_config_word(tp->pdev,
708ebb3a 7571 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
e7126997 7572 val16);
5e7dfd0f 7573
cf79003d 7574 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
5e7dfd0f
MC
7575
7576 /* Clear error status */
7577 pci_write_config_word(tp->pdev,
708ebb3a 7578 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
5e7dfd0f
MC
7579 PCI_EXP_DEVSTA_CED |
7580 PCI_EXP_DEVSTA_NFED |
7581 PCI_EXP_DEVSTA_FED |
7582 PCI_EXP_DEVSTA_URD);
1da177e4
LT
7583 }
7584
ee6a99b5 7585 tg3_restore_pci_state(tp);
1da177e4 7586
63c3a66f
JP
7587 tg3_flag_clear(tp, CHIP_RESETTING);
7588 tg3_flag_clear(tp, ERROR_PROCESSED);
d18edcb2 7589
ee6a99b5 7590 val = 0;
63c3a66f 7591 if (tg3_flag(tp, 5780_CLASS))
4cf78e4f 7592 val = tr32(MEMARB_MODE);
ee6a99b5 7593 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
7594
7595 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7596 tg3_stop_fw(tp);
7597 tw32(0x5000, 0x400);
7598 }
7599
7600 tw32(GRC_MODE, tp->grc_mode);
7601
7602 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 7603 val = tr32(0xc4);
1da177e4
LT
7604
7605 tw32(0xc4, val | (1 << 15));
7606 }
7607
7608 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7610 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7611 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7612 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7613 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7614 }
7615
f07e9af3 7616 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9e975cc2 7617 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
d2394e6b 7618 val = tp->mac_mode;
f07e9af3 7619 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9e975cc2 7620 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
d2394e6b 7621 val = tp->mac_mode;
1da177e4 7622 } else
d2394e6b
MC
7623 val = 0;
7624
7625 tw32_f(MAC_MODE, val);
1da177e4
LT
7626 udelay(40);
7627
77b483f1
MC
7628 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7629
7a6f4369
MC
7630 err = tg3_poll_fw(tp);
7631 if (err)
7632 return err;
1da177e4 7633
0a9140cf
MC
7634 tg3_mdio_start(tp);
7635
63c3a66f 7636 if (tg3_flag(tp, PCI_EXPRESS) &&
f6eb9b1f
MC
7637 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7638 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 7639 !tg3_flag(tp, 57765_PLUS)) {
ab0049b4 7640 val = tr32(0x7c00);
1da177e4
LT
7641
7642 tw32(0x7c00, val | (1 << 25));
7643 }
7644
d78b59f5
MC
7645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7646 val = tr32(TG3_CPMU_CLCK_ORIDE);
7647 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7648 }
7649
1da177e4 7650 /* Reprobe ASF enable state. */
63c3a66f
JP
7651 tg3_flag_clear(tp, ENABLE_ASF);
7652 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
7653 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7654 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7655 u32 nic_cfg;
7656
7657 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7658 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f 7659 tg3_flag_set(tp, ENABLE_ASF);
4ba526ce 7660 tp->last_event_jiffies = jiffies;
63c3a66f
JP
7661 if (tg3_flag(tp, 5750_PLUS))
7662 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
7663 }
7664 }
7665
7666 return 0;
7667}
7668
7669/* tp->lock is held. */
7670static void tg3_stop_fw(struct tg3 *tp)
7671{
63c3a66f 7672 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7c5026aa
MC
7673 /* Wait for RX cpu to ACK the previous event. */
7674 tg3_wait_for_event_ack(tp);
1da177e4
LT
7675
7676 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4ba526ce
MC
7677
7678 tg3_generate_fw_event(tp);
1da177e4 7679
7c5026aa
MC
7680 /* Wait for RX cpu to ACK this event. */
7681 tg3_wait_for_event_ack(tp);
1da177e4
LT
7682 }
7683}
7684
7685/* tp->lock is held. */
944d980e 7686static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
7687{
7688 int err;
7689
7690 tg3_stop_fw(tp);
7691
944d980e 7692 tg3_write_sig_pre_reset(tp, kind);
1da177e4 7693
b3b7d6be 7694 tg3_abort_hw(tp, silent);
1da177e4
LT
7695 err = tg3_chip_reset(tp);
7696
daba2a63
MC
7697 __tg3_set_mac_addr(tp, 0);
7698
944d980e
MC
7699 tg3_write_sig_legacy(tp, kind);
7700 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
7701
7702 if (err)
7703 return err;
7704
7705 return 0;
7706}
7707
1da177e4
LT
7708#define RX_CPU_SCRATCH_BASE 0x30000
7709#define RX_CPU_SCRATCH_SIZE 0x04000
7710#define TX_CPU_SCRATCH_BASE 0x34000
7711#define TX_CPU_SCRATCH_SIZE 0x04000
7712
7713/* tp->lock is held. */
7714static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7715{
7716 int i;
7717
63c3a66f 7718 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
1da177e4 7719
b5d3772c
MC
7720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7721 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7722
7723 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7724 return 0;
7725 }
1da177e4
LT
7726 if (offset == RX_CPU_BASE) {
7727 for (i = 0; i < 10000; i++) {
7728 tw32(offset + CPU_STATE, 0xffffffff);
7729 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7730 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7731 break;
7732 }
7733
7734 tw32(offset + CPU_STATE, 0xffffffff);
7735 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7736 udelay(10);
7737 } else {
7738 for (i = 0; i < 10000; i++) {
7739 tw32(offset + CPU_STATE, 0xffffffff);
7740 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7741 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7742 break;
7743 }
7744 }
7745
7746 if (i >= 10000) {
05dbe005
JP
7747 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7748 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
1da177e4
LT
7749 return -ENODEV;
7750 }
ec41c7df
MC
7751
7752 /* Clear firmware's nvram arbitration. */
63c3a66f 7753 if (tg3_flag(tp, NVRAM))
ec41c7df 7754 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
7755 return 0;
7756}
7757
7758struct fw_info {
077f849d
JSR
7759 unsigned int fw_base;
7760 unsigned int fw_len;
7761 const __be32 *fw_data;
1da177e4
LT
7762};
7763
7764/* tp->lock is held. */
7765static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7766 int cpu_scratch_size, struct fw_info *info)
7767{
ec41c7df 7768 int err, lock_err, i;
1da177e4
LT
7769 void (*write_op)(struct tg3 *, u32, u32);
7770
63c3a66f 7771 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
5129c3a3
MC
7772 netdev_err(tp->dev,
7773 "%s: Trying to load TX cpu firmware which is 5705\n",
05dbe005 7774 __func__);
1da177e4
LT
7775 return -EINVAL;
7776 }
7777
63c3a66f 7778 if (tg3_flag(tp, 5705_PLUS))
1da177e4
LT
7779 write_op = tg3_write_mem;
7780 else
7781 write_op = tg3_write_indirect_reg32;
7782
1b628151
MC
7783 /* It is possible that bootcode is still loading at this point.
7784 * Get the nvram lock first before halting the cpu.
7785 */
ec41c7df 7786 lock_err = tg3_nvram_lock(tp);
1da177e4 7787 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
7788 if (!lock_err)
7789 tg3_nvram_unlock(tp);
1da177e4
LT
7790 if (err)
7791 goto out;
7792
7793 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7794 write_op(tp, cpu_scratch_base + i, 0);
7795 tw32(cpu_base + CPU_STATE, 0xffffffff);
7796 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
077f849d 7797 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
1da177e4 7798 write_op(tp, (cpu_scratch_base +
077f849d 7799 (info->fw_base & 0xffff) +
1da177e4 7800 (i * sizeof(u32))),
077f849d 7801 be32_to_cpu(info->fw_data[i]));
1da177e4
LT
7802
7803 err = 0;
7804
7805out:
1da177e4
LT
7806 return err;
7807}
7808
7809/* tp->lock is held. */
7810static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7811{
7812 struct fw_info info;
077f849d 7813 const __be32 *fw_data;
1da177e4
LT
7814 int err, i;
7815
077f849d
JSR
7816 fw_data = (void *)tp->fw->data;
7817
7818 /* Firmware blob starts with version numbers, followed by
7819 start address and length. We are setting complete length.
7820 length = end_address_of_bss - start_address_of_text.
7821 Remainder is the blob to be loaded contiguously
7822 from start address. */
7823
7824 info.fw_base = be32_to_cpu(fw_data[1]);
7825 info.fw_len = tp->fw->size - 12;
7826 info.fw_data = &fw_data[3];
1da177e4
LT
7827
7828 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7829 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7830 &info);
7831 if (err)
7832 return err;
7833
7834 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7835 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7836 &info);
7837 if (err)
7838 return err;
7839
7840 /* Now startup only the RX cpu. */
7841 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
077f849d 7842 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
1da177e4
LT
7843
7844 for (i = 0; i < 5; i++) {
077f849d 7845 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
1da177e4
LT
7846 break;
7847 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7848 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
077f849d 7849 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
1da177e4
LT
7850 udelay(1000);
7851 }
7852 if (i >= 5) {
5129c3a3
MC
7853 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7854 "should be %08x\n", __func__,
05dbe005 7855 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
1da177e4
LT
7856 return -ENODEV;
7857 }
7858 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7859 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7860
7861 return 0;
7862}
7863
1da177e4
LT
7864/* tp->lock is held. */
7865static int tg3_load_tso_firmware(struct tg3 *tp)
7866{
7867 struct fw_info info;
077f849d 7868 const __be32 *fw_data;
1da177e4
LT
7869 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7870 int err, i;
7871
63c3a66f
JP
7872 if (tg3_flag(tp, HW_TSO_1) ||
7873 tg3_flag(tp, HW_TSO_2) ||
7874 tg3_flag(tp, HW_TSO_3))
1da177e4
LT
7875 return 0;
7876
077f849d
JSR
7877 fw_data = (void *)tp->fw->data;
7878
7879 /* Firmware blob starts with version numbers, followed by
7880 start address and length. We are setting complete length.
7881 length = end_address_of_bss - start_address_of_text.
7882 Remainder is the blob to be loaded contiguously
7883 from start address. */
7884
7885 info.fw_base = be32_to_cpu(fw_data[1]);
7886 cpu_scratch_size = tp->fw_len;
7887 info.fw_len = tp->fw->size - 12;
7888 info.fw_data = &fw_data[3];
7889
1da177e4 7890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
7891 cpu_base = RX_CPU_BASE;
7892 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
1da177e4 7893 } else {
1da177e4
LT
7894 cpu_base = TX_CPU_BASE;
7895 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7896 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7897 }
7898
7899 err = tg3_load_firmware_cpu(tp, cpu_base,
7900 cpu_scratch_base, cpu_scratch_size,
7901 &info);
7902 if (err)
7903 return err;
7904
7905 /* Now startup the cpu. */
7906 tw32(cpu_base + CPU_STATE, 0xffffffff);
077f849d 7907 tw32_f(cpu_base + CPU_PC, info.fw_base);
1da177e4
LT
7908
7909 for (i = 0; i < 5; i++) {
077f849d 7910 if (tr32(cpu_base + CPU_PC) == info.fw_base)
1da177e4
LT
7911 break;
7912 tw32(cpu_base + CPU_STATE, 0xffffffff);
7913 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
077f849d 7914 tw32_f(cpu_base + CPU_PC, info.fw_base);
1da177e4
LT
7915 udelay(1000);
7916 }
7917 if (i >= 5) {
5129c3a3
MC
7918 netdev_err(tp->dev,
7919 "%s fails to set CPU PC, is %08x should be %08x\n",
05dbe005 7920 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
1da177e4
LT
7921 return -ENODEV;
7922 }
7923 tw32(cpu_base + CPU_STATE, 0xffffffff);
7924 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7925 return 0;
7926}
7927
1da177e4 7928
1da177e4
LT
7929static int tg3_set_mac_addr(struct net_device *dev, void *p)
7930{
7931 struct tg3 *tp = netdev_priv(dev);
7932 struct sockaddr *addr = p;
986e0aeb 7933 int err = 0, skip_mac_1 = 0;
1da177e4 7934
f9804ddb
MC
7935 if (!is_valid_ether_addr(addr->sa_data))
7936 return -EINVAL;
7937
1da177e4
LT
7938 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7939
e75f7c90
MC
7940 if (!netif_running(dev))
7941 return 0;
7942
63c3a66f 7943 if (tg3_flag(tp, ENABLE_ASF)) {
986e0aeb 7944 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 7945
986e0aeb
MC
7946 addr0_high = tr32(MAC_ADDR_0_HIGH);
7947 addr0_low = tr32(MAC_ADDR_0_LOW);
7948 addr1_high = tr32(MAC_ADDR_1_HIGH);
7949 addr1_low = tr32(MAC_ADDR_1_LOW);
7950
7951 /* Skip MAC addr 1 if ASF is using it. */
7952 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7953 !(addr1_high == 0 && addr1_low == 0))
7954 skip_mac_1 = 1;
58712ef9 7955 }
986e0aeb
MC
7956 spin_lock_bh(&tp->lock);
7957 __tg3_set_mac_addr(tp, skip_mac_1);
7958 spin_unlock_bh(&tp->lock);
1da177e4 7959
b9ec6c1b 7960 return err;
1da177e4
LT
7961}
7962
7963/* tp->lock is held. */
7964static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7965 dma_addr_t mapping, u32 maxlen_flags,
7966 u32 nic_addr)
7967{
7968 tg3_write_mem(tp,
7969 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7970 ((u64) mapping >> 32));
7971 tg3_write_mem(tp,
7972 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7973 ((u64) mapping & 0xffffffff));
7974 tg3_write_mem(tp,
7975 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7976 maxlen_flags);
7977
63c3a66f 7978 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
7979 tg3_write_mem(tp,
7980 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7981 nic_addr);
7982}
7983
7984static void __tg3_set_rx_mode(struct net_device *);
d244c892 7985static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d 7986{
b6080e12
MC
7987 int i;
7988
63c3a66f 7989 if (!tg3_flag(tp, ENABLE_TSS)) {
b6080e12
MC
7990 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7991 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7992 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
b6080e12
MC
7993 } else {
7994 tw32(HOSTCC_TXCOL_TICKS, 0);
7995 tw32(HOSTCC_TXMAX_FRAMES, 0);
7996 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
19cfaecc 7997 }
b6080e12 7998
63c3a66f 7999 if (!tg3_flag(tp, ENABLE_RSS)) {
19cfaecc
MC
8000 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8001 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8002 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8003 } else {
b6080e12
MC
8004 tw32(HOSTCC_RXCOL_TICKS, 0);
8005 tw32(HOSTCC_RXMAX_FRAMES, 0);
8006 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
15f9850d 8007 }
b6080e12 8008
63c3a66f 8009 if (!tg3_flag(tp, 5705_PLUS)) {
15f9850d
DM
8010 u32 val = ec->stats_block_coalesce_usecs;
8011
b6080e12
MC
8012 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8013 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8014
15f9850d
DM
8015 if (!netif_carrier_ok(tp->dev))
8016 val = 0;
8017
8018 tw32(HOSTCC_STAT_COAL_TICKS, val);
8019 }
b6080e12
MC
8020
8021 for (i = 0; i < tp->irq_cnt - 1; i++) {
8022 u32 reg;
8023
8024 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8025 tw32(reg, ec->rx_coalesce_usecs);
b6080e12
MC
8026 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8027 tw32(reg, ec->rx_max_coalesced_frames);
b6080e12
MC
8028 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8029 tw32(reg, ec->rx_max_coalesced_frames_irq);
19cfaecc 8030
63c3a66f 8031 if (tg3_flag(tp, ENABLE_TSS)) {
19cfaecc
MC
8032 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8033 tw32(reg, ec->tx_coalesce_usecs);
8034 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8035 tw32(reg, ec->tx_max_coalesced_frames);
8036 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8037 tw32(reg, ec->tx_max_coalesced_frames_irq);
8038 }
b6080e12
MC
8039 }
8040
8041 for (; i < tp->irq_max - 1; i++) {
8042 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
b6080e12 8043 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
b6080e12 8044 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
19cfaecc 8045
63c3a66f 8046 if (tg3_flag(tp, ENABLE_TSS)) {
19cfaecc
MC
8047 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8048 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8049 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8050 }
b6080e12 8051 }
15f9850d 8052}
1da177e4 8053
2d31ecaf
MC
8054/* tp->lock is held. */
8055static void tg3_rings_reset(struct tg3 *tp)
8056{
8057 int i;
f77a6a8e 8058 u32 stblk, txrcb, rxrcb, limit;
2d31ecaf
MC
8059 struct tg3_napi *tnapi = &tp->napi[0];
8060
8061 /* Disable all transmit rings but the first. */
63c3a66f 8062 if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8063 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
63c3a66f 8064 else if (tg3_flag(tp, 5717_PLUS))
3d37728b 8065 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
b703df6f
MC
8066 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8067 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
2d31ecaf
MC
8068 else
8069 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8070
8071 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8072 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8073 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8074 BDINFO_FLAGS_DISABLED);
8075
8076
8077 /* Disable all receive return rings but the first. */
63c3a66f 8078 if (tg3_flag(tp, 5717_PLUS))
f6eb9b1f 8079 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
63c3a66f 8080 else if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8081 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
b703df6f
MC
8082 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2d31ecaf
MC
8084 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8085 else
8086 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8087
8088 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8089 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8090 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8091 BDINFO_FLAGS_DISABLED);
8092
8093 /* Disable interrupts */
8094 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
0e6cf6a9
MC
8095 tp->napi[0].chk_msi_cnt = 0;
8096 tp->napi[0].last_rx_cons = 0;
8097 tp->napi[0].last_tx_cons = 0;
2d31ecaf
MC
8098
8099 /* Zero mailbox registers. */
63c3a66f 8100 if (tg3_flag(tp, SUPPORT_MSIX)) {
6fd45cb8 8101 for (i = 1; i < tp->irq_max; i++) {
f77a6a8e
MC
8102 tp->napi[i].tx_prod = 0;
8103 tp->napi[i].tx_cons = 0;
63c3a66f 8104 if (tg3_flag(tp, ENABLE_TSS))
c2353a32 8105 tw32_mailbox(tp->napi[i].prodmbox, 0);
f77a6a8e
MC
8106 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8107 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
0e6cf6a9
MC
8108 tp->napi[0].chk_msi_cnt = 0;
8109 tp->napi[i].last_rx_cons = 0;
8110 tp->napi[i].last_tx_cons = 0;
f77a6a8e 8111 }
63c3a66f 8112 if (!tg3_flag(tp, ENABLE_TSS))
c2353a32 8113 tw32_mailbox(tp->napi[0].prodmbox, 0);
f77a6a8e
MC
8114 } else {
8115 tp->napi[0].tx_prod = 0;
8116 tp->napi[0].tx_cons = 0;
8117 tw32_mailbox(tp->napi[0].prodmbox, 0);
8118 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8119 }
2d31ecaf
MC
8120
8121 /* Make sure the NIC-based send BD rings are disabled. */
63c3a66f 8122 if (!tg3_flag(tp, 5705_PLUS)) {
2d31ecaf
MC
8123 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8124 for (i = 0; i < 16; i++)
8125 tw32_tx_mbox(mbox + i * 8, 0);
8126 }
8127
8128 txrcb = NIC_SRAM_SEND_RCB;
8129 rxrcb = NIC_SRAM_RCV_RET_RCB;
8130
8131 /* Clear status block in ram. */
8132 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8133
8134 /* Set status block DMA address */
8135 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8136 ((u64) tnapi->status_mapping >> 32));
8137 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8138 ((u64) tnapi->status_mapping & 0xffffffff));
8139
f77a6a8e
MC
8140 if (tnapi->tx_ring) {
8141 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8142 (TG3_TX_RING_SIZE <<
8143 BDINFO_FLAGS_MAXLEN_SHIFT),
8144 NIC_SRAM_TX_BUFFER_DESC);
8145 txrcb += TG3_BDINFO_SIZE;
8146 }
8147
8148 if (tnapi->rx_rcb) {
8149 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2
MC
8150 (tp->rx_ret_ring_mask + 1) <<
8151 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
f77a6a8e
MC
8152 rxrcb += TG3_BDINFO_SIZE;
8153 }
8154
8155 stblk = HOSTCC_STATBLCK_RING1;
2d31ecaf 8156
f77a6a8e
MC
8157 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8158 u64 mapping = (u64)tnapi->status_mapping;
8159 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8160 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8161
8162 /* Clear status block in ram. */
8163 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8164
19cfaecc
MC
8165 if (tnapi->tx_ring) {
8166 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8167 (TG3_TX_RING_SIZE <<
8168 BDINFO_FLAGS_MAXLEN_SHIFT),
8169 NIC_SRAM_TX_BUFFER_DESC);
8170 txrcb += TG3_BDINFO_SIZE;
8171 }
f77a6a8e
MC
8172
8173 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2 8174 ((tp->rx_ret_ring_mask + 1) <<
f77a6a8e
MC
8175 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8176
8177 stblk += 8;
f77a6a8e
MC
8178 rxrcb += TG3_BDINFO_SIZE;
8179 }
2d31ecaf
MC
8180}
8181
eb07a940
MC
8182static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8183{
8184 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8185
63c3a66f
JP
8186 if (!tg3_flag(tp, 5750_PLUS) ||
8187 tg3_flag(tp, 5780_CLASS) ||
eb07a940
MC
8188 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8189 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8190 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8191 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8193 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8194 else
8195 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8196
8197 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8198 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8199
8200 val = min(nic_rep_thresh, host_rep_thresh);
8201 tw32(RCVBDI_STD_THRESH, val);
8202
63c3a66f 8203 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8204 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8205
63c3a66f 8206 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
eb07a940
MC
8207 return;
8208
63c3a66f 8209 if (!tg3_flag(tp, 5705_PLUS))
eb07a940
MC
8210 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8211 else
8212 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8213
8214 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8215
8216 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8217 tw32(RCVBDI_JUMBO_THRESH, val);
8218
63c3a66f 8219 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8220 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8221}
8222
1da177e4 8223/* tp->lock is held. */
8e7a22e3 8224static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
8225{
8226 u32 val, rdmac_mode;
8227 int i, err, limit;
8fea32b9 8228 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
1da177e4
LT
8229
8230 tg3_disable_ints(tp);
8231
8232 tg3_stop_fw(tp);
8233
8234 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8235
63c3a66f 8236 if (tg3_flag(tp, INIT_COMPLETE))
e6de8ad1 8237 tg3_abort_hw(tp, 1);
1da177e4 8238
699c0193
MC
8239 /* Enable MAC control of LPI */
8240 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8241 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8242 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8243 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8244
8245 tw32_f(TG3_CPMU_EEE_CTRL,
8246 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8247
a386b901
MC
8248 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8249 TG3_CPMU_EEEMD_LPI_IN_TX |
8250 TG3_CPMU_EEEMD_LPI_IN_RX |
8251 TG3_CPMU_EEEMD_EEE_ENABLE;
8252
8253 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8254 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8255
63c3a66f 8256 if (tg3_flag(tp, ENABLE_APE))
a386b901
MC
8257 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8258
8259 tw32_f(TG3_CPMU_EEE_MODE, val);
8260
8261 tw32_f(TG3_CPMU_EEE_DBTMR1,
8262 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8263 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8264
8265 tw32_f(TG3_CPMU_EEE_DBTMR2,
d7f2ab20 8266 TG3_CPMU_DBTMR2_APE_TX_2047US |
a386b901 8267 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
699c0193
MC
8268 }
8269
603f1173 8270 if (reset_phy)
d4d2c558
MC
8271 tg3_phy_reset(tp);
8272
1da177e4
LT
8273 err = tg3_chip_reset(tp);
8274 if (err)
8275 return err;
8276
8277 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8278
bcb37f6c 8279 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
d30cdd28
MC
8280 val = tr32(TG3_CPMU_CTRL);
8281 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8282 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
8283
8284 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8285 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8286 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8287 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8288
8289 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8290 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8291 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8292 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8293
8294 val = tr32(TG3_CPMU_HST_ACC);
8295 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8296 val |= CPMU_HST_ACC_MACCLK_6_25;
8297 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
8298 }
8299
33466d93
MC
8300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8301 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8302 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8303 PCIE_PWR_MGMT_L1_THRESH_4MS;
8304 tw32(PCIE_PWR_MGMT_THRESH, val);
521e6b90
MC
8305
8306 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8307 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8308
8309 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
33466d93 8310
f40386c8
MC
8311 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8312 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
255ca311
MC
8313 }
8314
63c3a66f 8315 if (tg3_flag(tp, L1PLLPD_EN)) {
614b0590
MC
8316 u32 grc_mode = tr32(GRC_MODE);
8317
8318 /* Access the lower 1K of PL PCIE block registers. */
8319 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8320 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8321
8322 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8323 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8324 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8325
8326 tw32(GRC_MODE, grc_mode);
8327 }
8328
5093eedc
MC
8329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8330 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8331 u32 grc_mode = tr32(GRC_MODE);
cea46462 8332
5093eedc
MC
8333 /* Access the lower 1K of PL PCIE block registers. */
8334 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8335 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
cea46462 8336
5093eedc
MC
8337 val = tr32(TG3_PCIE_TLDLPL_PORT +
8338 TG3_PCIE_PL_LO_PHYCTL5);
8339 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8340 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
cea46462 8341
5093eedc
MC
8342 tw32(GRC_MODE, grc_mode);
8343 }
a977dbe8 8344
1ff30a59
MC
8345 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8346 u32 grc_mode = tr32(GRC_MODE);
8347
8348 /* Access the lower 1K of DL PCIE block registers. */
8349 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8350 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8351
8352 val = tr32(TG3_PCIE_TLDLPL_PORT +
8353 TG3_PCIE_DL_LO_FTSMAX);
8354 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8355 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8356 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8357
8358 tw32(GRC_MODE, grc_mode);
8359 }
8360
a977dbe8
MC
8361 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8362 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8363 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8364 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
cea46462
MC
8365 }
8366
1da177e4
LT
8367 /* This works around an issue with Athlon chipsets on
8368 * B3 tigon3 silicon. This bit has no effect on any
8369 * other revision. But do not set this on PCI Express
795d01c5 8370 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 8371 */
63c3a66f
JP
8372 if (!tg3_flag(tp, CPMU_PRESENT)) {
8373 if (!tg3_flag(tp, PCI_EXPRESS))
795d01c5
MC
8374 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8375 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8376 }
1da177e4
LT
8377
8378 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 8379 tg3_flag(tp, PCIX_MODE)) {
1da177e4
LT
8380 val = tr32(TG3PCI_PCISTATE);
8381 val |= PCISTATE_RETRY_SAME_DMA;
8382 tw32(TG3PCI_PCISTATE, val);
8383 }
8384
63c3a66f 8385 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
8386 /* Allow reads and writes to the
8387 * APE register and memory space.
8388 */
8389 val = tr32(TG3PCI_PCISTATE);
8390 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
8391 PCISTATE_ALLOW_APE_SHMEM_WR |
8392 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
8393 tw32(TG3PCI_PCISTATE, val);
8394 }
8395
1da177e4
LT
8396 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8397 /* Enable some hw fixes. */
8398 val = tr32(TG3PCI_MSI_DATA);
8399 val |= (1 << 26) | (1 << 28) | (1 << 29);
8400 tw32(TG3PCI_MSI_DATA, val);
8401 }
8402
8403 /* Descriptor ring init may make accesses to the
8404 * NIC SRAM area to setup the TX descriptors, so we
8405 * can only do this after the hardware has been
8406 * successfully reset.
8407 */
32d8c572
MC
8408 err = tg3_init_rings(tp);
8409 if (err)
8410 return err;
1da177e4 8411
63c3a66f 8412 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
8413 val = tr32(TG3PCI_DMA_RW_CTRL) &
8414 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
1a319025
MC
8415 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8416 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
0aebff48
MC
8417 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8418 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8419 val |= DMA_RWCTRL_TAGGED_STAT_WA;
cbf9ca6c
MC
8420 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8421 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8422 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
8423 /* This value is determined during the probe time DMA
8424 * engine test, tg3_test_dma.
8425 */
8426 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8427 }
1da177e4
LT
8428
8429 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8430 GRC_MODE_4X_NIC_SEND_RINGS |
8431 GRC_MODE_NO_TX_PHDR_CSUM |
8432 GRC_MODE_NO_RX_PHDR_CSUM);
8433 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
8434
8435 /* Pseudo-header checksum is done by hardware logic and not
8436 * the offload processers, so make the chip do the pseudo-
8437 * header checksums on receive. For transmit it is more
8438 * convenient to do the pseudo-header checksum in software
8439 * as Linux does that on transmit for us in all cases.
8440 */
8441 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
8442
8443 tw32(GRC_MODE,
8444 tp->grc_mode |
8445 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8446
8447 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8448 val = tr32(GRC_MISC_CFG);
8449 val &= ~0xff;
8450 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8451 tw32(GRC_MISC_CFG, val);
8452
8453 /* Initialize MBUF/DESC pool. */
63c3a66f 8454 if (tg3_flag(tp, 5750_PLUS)) {
1da177e4
LT
8455 /* Do nothing. */
8456 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8457 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8459 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8460 else
8461 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8462 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8463 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
63c3a66f 8464 } else if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8465 int fw_len;
8466
077f849d 8467 fw_len = tp->fw_len;
1da177e4
LT
8468 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8469 tw32(BUFMGR_MB_POOL_ADDR,
8470 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8471 tw32(BUFMGR_MB_POOL_SIZE,
8472 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8473 }
1da177e4 8474
0f893dc6 8475 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
8476 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8477 tp->bufmgr_config.mbuf_read_dma_low_water);
8478 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8479 tp->bufmgr_config.mbuf_mac_rx_low_water);
8480 tw32(BUFMGR_MB_HIGH_WATER,
8481 tp->bufmgr_config.mbuf_high_water);
8482 } else {
8483 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8484 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8485 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8486 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8487 tw32(BUFMGR_MB_HIGH_WATER,
8488 tp->bufmgr_config.mbuf_high_water_jumbo);
8489 }
8490 tw32(BUFMGR_DMA_LOW_WATER,
8491 tp->bufmgr_config.dma_low_water);
8492 tw32(BUFMGR_DMA_HIGH_WATER,
8493 tp->bufmgr_config.dma_high_water);
8494
d309a46e
MC
8495 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8497 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
4d958473
MC
8498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8499 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8500 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8501 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
d309a46e 8502 tw32(BUFMGR_MODE, val);
1da177e4
LT
8503 for (i = 0; i < 2000; i++) {
8504 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8505 break;
8506 udelay(10);
8507 }
8508 if (i >= 2000) {
05dbe005 8509 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
1da177e4
LT
8510 return -ENODEV;
8511 }
8512
eb07a940
MC
8513 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8514 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
b5d3772c 8515
eb07a940 8516 tg3_setup_rxbd_thresholds(tp);
1da177e4
LT
8517
8518 /* Initialize TG3_BDINFO's at:
8519 * RCVDBDI_STD_BD: standard eth size rx ring
8520 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8521 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8522 *
8523 * like so:
8524 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8525 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8526 * ring attribute flags
8527 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8528 *
8529 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8530 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8531 *
8532 * The size of each ring is fixed in the firmware, but the location is
8533 * configurable.
8534 */
8535 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 8536 ((u64) tpr->rx_std_mapping >> 32));
1da177e4 8537 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 8538 ((u64) tpr->rx_std_mapping & 0xffffffff));
63c3a66f 8539 if (!tg3_flag(tp, 5717_PLUS))
87668d35
MC
8540 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8541 NIC_SRAM_RX_BUFFER_DESC);
1da177e4 8542
fdb72b38 8543 /* Disable the mini ring */
63c3a66f 8544 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8545 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8546 BDINFO_FLAGS_DISABLED);
8547
fdb72b38
MC
8548 /* Program the jumbo buffer descriptor ring control
8549 * blocks on those devices that have them.
8550 */
a0512944 8551 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
63c3a66f 8552 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
1da177e4 8553
63c3a66f 8554 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
1da177e4 8555 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 8556 ((u64) tpr->rx_jmb_mapping >> 32));
1da177e4 8557 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 8558 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
de9f5230
MC
8559 val = TG3_RX_JMB_RING_SIZE(tp) <<
8560 BDINFO_FLAGS_MAXLEN_SHIFT;
1da177e4 8561 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
de9f5230 8562 val | BDINFO_FLAGS_USE_EXT_RECV);
63c3a66f 8563 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
a50d0796 8564 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
87668d35
MC
8565 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8566 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
1da177e4
LT
8567 } else {
8568 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8569 BDINFO_FLAGS_DISABLED);
8570 }
8571
63c3a66f 8572 if (tg3_flag(tp, 57765_PLUS)) {
7cb32cf2 8573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
de9f5230 8574 val = TG3_RX_STD_MAX_SIZE_5700;
7cb32cf2 8575 else
de9f5230 8576 val = TG3_RX_STD_MAX_SIZE_5717;
7cb32cf2
MC
8577 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8578 val |= (TG3_RX_STD_DMA_SZ << 2);
8579 } else
04380d40 8580 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38 8581 } else
de9f5230 8582 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38
MC
8583
8584 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
1da177e4 8585
411da640 8586 tpr->rx_std_prod_idx = tp->rx_pending;
66711e66 8587 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
1da177e4 8588
63c3a66f
JP
8589 tpr->rx_jmb_prod_idx =
8590 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
66711e66 8591 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
1da177e4 8592
2d31ecaf
MC
8593 tg3_rings_reset(tp);
8594
1da177e4 8595 /* Initialize MAC address and backoff seed. */
986e0aeb 8596 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
8597
8598 /* MTU + ethernet header + FCS + optional VLAN tag */
f7b493e0
MC
8599 tw32(MAC_RX_MTU_SIZE,
8600 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
1da177e4
LT
8601
8602 /* The slot time is changed by tg3_setup_phy if we
8603 * run at gigabit with half duplex.
8604 */
f2096f94
MC
8605 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8606 (6 << TX_LENGTHS_IPG_SHIFT) |
8607 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8608
8609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8610 val |= tr32(MAC_TX_LENGTHS) &
8611 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8612 TX_LENGTHS_CNT_DWN_VAL_MSK);
8613
8614 tw32(MAC_TX_LENGTHS, val);
1da177e4
LT
8615
8616 /* Receive rules. */
8617 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8618 tw32(RCVLPC_CONFIG, 0x0181);
8619
8620 /* Calculate RDMAC_MODE setting early, we need it to determine
8621 * the RCVLPC_STATE_ENABLE mask.
8622 */
8623 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8624 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8625 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8626 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8627 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 8628
deabaac8 8629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
0339e4e3
MC
8630 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8631
57e6983c 8632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0
MC
8633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8634 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
d30cdd28
MC
8635 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8636 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8637 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8638
c5908939
MC
8639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8640 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 8641 if (tg3_flag(tp, TSO_CAPABLE) &&
c13e3713 8642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
8643 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8644 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 8645 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
8646 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8647 }
8648 }
8649
63c3a66f 8650 if (tg3_flag(tp, PCI_EXPRESS))
85e94ced
MC
8651 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8652
63c3a66f
JP
8653 if (tg3_flag(tp, HW_TSO_1) ||
8654 tg3_flag(tp, HW_TSO_2) ||
8655 tg3_flag(tp, HW_TSO_3))
027455ad
MC
8656 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8657
108a6c16 8658 if (tg3_flag(tp, 57765_PLUS) ||
e849cdc3 8659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
027455ad
MC
8660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8661 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
1da177e4 8662
f2096f94
MC
8663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8664 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8665
41a8a7ee
MC
8666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8667 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f 8670 tg3_flag(tp, 57765_PLUS)) {
41a8a7ee 8671 val = tr32(TG3_RDMA_RSRVCTRL_REG);
d78b59f5
MC
8672 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
b4495ed8
MC
8674 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8675 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8676 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8677 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8678 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8679 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
b75cc0e4 8680 }
41a8a7ee
MC
8681 tw32(TG3_RDMA_RSRVCTRL_REG,
8682 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8683 }
8684
d78b59f5
MC
8685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
d309a46e
MC
8687 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8688 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8689 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8690 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8691 }
8692
1da177e4 8693 /* Receive/send statistics. */
63c3a66f 8694 if (tg3_flag(tp, 5750_PLUS)) {
1661394e
MC
8695 val = tr32(RCVLPC_STATS_ENABLE);
8696 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8697 tw32(RCVLPC_STATS_ENABLE, val);
8698 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
63c3a66f 8699 tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8700 val = tr32(RCVLPC_STATS_ENABLE);
8701 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8702 tw32(RCVLPC_STATS_ENABLE, val);
8703 } else {
8704 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8705 }
8706 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8707 tw32(SNDDATAI_STATSENAB, 0xffffff);
8708 tw32(SNDDATAI_STATSCTRL,
8709 (SNDDATAI_SCTRL_ENABLE |
8710 SNDDATAI_SCTRL_FASTUPD));
8711
8712 /* Setup host coalescing engine. */
8713 tw32(HOSTCC_MODE, 0);
8714 for (i = 0; i < 2000; i++) {
8715 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8716 break;
8717 udelay(10);
8718 }
8719
d244c892 8720 __tg3_set_coalesce(tp, &tp->coal);
1da177e4 8721
63c3a66f 8722 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
8723 /* Status/statistics block address. See tg3_timer,
8724 * the tg3_periodic_fetch_stats call there, and
8725 * tg3_get_stats to see how this works for 5705/5750 chips.
8726 */
1da177e4
LT
8727 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8728 ((u64) tp->stats_mapping >> 32));
8729 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8730 ((u64) tp->stats_mapping & 0xffffffff));
8731 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
2d31ecaf 8732
1da177e4 8733 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2d31ecaf
MC
8734
8735 /* Clear statistics and status block memory areas */
8736 for (i = NIC_SRAM_STATS_BLK;
8737 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8738 i += sizeof(u32)) {
8739 tg3_write_mem(tp, i, 0);
8740 udelay(40);
8741 }
1da177e4
LT
8742 }
8743
8744 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8745
8746 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8747 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
63c3a66f 8748 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8749 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8750
f07e9af3
MC
8751 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8752 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c94e3941
MC
8753 /* reset to prevent losing 1st rx packet intermittently */
8754 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8755 udelay(10);
8756 }
8757
3bda1258 8758 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9e975cc2
MC
8759 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8760 MAC_MODE_FHDE_ENABLE;
8761 if (tg3_flag(tp, ENABLE_APE))
8762 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
63c3a66f 8763 if (!tg3_flag(tp, 5705_PLUS) &&
f07e9af3 8764 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
e8f3f6ca
MC
8765 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8766 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
8767 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8768 udelay(40);
8769
314fba34 8770 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
63c3a66f 8771 * If TG3_FLAG_IS_NIC is zero, we should read the
314fba34
MC
8772 * register to preserve the GPIO settings for LOMs. The GPIOs,
8773 * whether used as inputs or outputs, are set by boot code after
8774 * reset.
8775 */
63c3a66f 8776 if (!tg3_flag(tp, IS_NIC)) {
314fba34
MC
8777 u32 gpio_mask;
8778
9d26e213
MC
8779 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8780 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8781 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
8782
8783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8784 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8785 GRC_LCLCTRL_GPIO_OUTPUT3;
8786
af36e6b6
MC
8787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8788 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8789
aaf84465 8790 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
8791 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8792
8793 /* GPIO1 must be driven high for eeprom write protect */
63c3a66f 8794 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9d26e213
MC
8795 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8796 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 8797 }
1da177e4
LT
8798 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8799 udelay(100);
8800
63c3a66f 8801 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
baf8a94a
MC
8802 val = tr32(MSGINT_MODE);
8803 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8804 tw32(MSGINT_MODE, val);
8805 }
8806
63c3a66f 8807 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
8808 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8809 udelay(40);
8810 }
8811
8812 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8813 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8814 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8815 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8816 WDMAC_MODE_LNGREAD_ENAB);
8817
c5908939
MC
8818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8819 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 8820 if (tg3_flag(tp, TSO_CAPABLE) &&
1da177e4
LT
8821 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8822 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8823 /* nothing */
8824 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 8825 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
8826 val |= WDMAC_MODE_RX_ACCEL;
8827 }
8828 }
8829
d9ab5ad1 8830 /* Enable host coalescing bug fix */
63c3a66f 8831 if (tg3_flag(tp, 5755_PLUS))
f51f3562 8832 val |= WDMAC_MODE_STATUS_TAG_FIX;
d9ab5ad1 8833
788a035e
MC
8834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8835 val |= WDMAC_MODE_BURST_ALL_DATA;
8836
1da177e4
LT
8837 tw32_f(WDMAC_MODE, val);
8838 udelay(40);
8839
63c3a66f 8840 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
8841 u16 pcix_cmd;
8842
8843 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8844 &pcix_cmd);
1da177e4 8845 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
8846 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8847 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 8848 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
8849 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8850 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 8851 }
9974a356
MC
8852 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8853 pcix_cmd);
1da177e4
LT
8854 }
8855
8856 tw32_f(RDMAC_MODE, rdmac_mode);
8857 udelay(40);
8858
8859 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
63c3a66f 8860 if (!tg3_flag(tp, 5705_PLUS))
1da177e4 8861 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
8862
8863 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8864 tw32(SNDDATAC_MODE,
8865 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8866 else
8867 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8868
1da177e4
LT
8869 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8870 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7cb32cf2 8871 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
63c3a66f 8872 if (tg3_flag(tp, LRG_PROD_RING_CAP))
7cb32cf2
MC
8873 val |= RCVDBDI_MODE_LRG_RING_SZ;
8874 tw32(RCVDBDI_MODE, val);
1da177e4 8875 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
63c3a66f
JP
8876 if (tg3_flag(tp, HW_TSO_1) ||
8877 tg3_flag(tp, HW_TSO_2) ||
8878 tg3_flag(tp, HW_TSO_3))
1da177e4 8879 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
baf8a94a 8880 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
63c3a66f 8881 if (tg3_flag(tp, ENABLE_TSS))
baf8a94a
MC
8882 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8883 tw32(SNDBDI_MODE, val);
1da177e4
LT
8884 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8885
8886 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8887 err = tg3_load_5701_a0_firmware_fix(tp);
8888 if (err)
8889 return err;
8890 }
8891
63c3a66f 8892 if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8893 err = tg3_load_tso_firmware(tp);
8894 if (err)
8895 return err;
8896 }
1da177e4
LT
8897
8898 tp->tx_mode = TX_MODE_ENABLE;
f2096f94 8899
63c3a66f 8900 if (tg3_flag(tp, 5755_PLUS) ||
b1d05210
MC
8901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8902 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
f2096f94
MC
8903
8904 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8905 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8906 tp->tx_mode &= ~val;
8907 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8908 }
8909
1da177e4
LT
8910 tw32_f(MAC_TX_MODE, tp->tx_mode);
8911 udelay(100);
8912
63c3a66f 8913 if (tg3_flag(tp, ENABLE_RSS)) {
9d53fa12 8914 int i = 0;
baf8a94a 8915 u32 reg = MAC_RSS_INDIR_TBL_0;
baf8a94a 8916
9d53fa12
MC
8917 if (tp->irq_cnt == 2) {
8918 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8919 tw32(reg, 0x0);
8920 reg += 4;
8921 }
8922 } else {
8923 u32 val;
baf8a94a 8924
9d53fa12
MC
8925 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8926 val = i % (tp->irq_cnt - 1);
8927 i++;
8928 for (; i % 8; i++) {
8929 val <<= 4;
8930 val |= (i % (tp->irq_cnt - 1));
8931 }
baf8a94a
MC
8932 tw32(reg, val);
8933 reg += 4;
8934 }
8935 }
8936
8937 /* Setup the "secret" hash key. */
8938 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8939 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8940 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8941 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8942 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8943 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8944 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8945 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8946 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8947 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8948 }
8949
1da177e4 8950 tp->rx_mode = RX_MODE_ENABLE;
63c3a66f 8951 if (tg3_flag(tp, 5755_PLUS))
af36e6b6
MC
8952 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8953
63c3a66f 8954 if (tg3_flag(tp, ENABLE_RSS))
baf8a94a
MC
8955 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8956 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8957 RX_MODE_RSS_IPV6_HASH_EN |
8958 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8959 RX_MODE_RSS_IPV4_HASH_EN |
8960 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8961
1da177e4
LT
8962 tw32_f(MAC_RX_MODE, tp->rx_mode);
8963 udelay(10);
8964
1da177e4
LT
8965 tw32(MAC_LED_CTRL, tp->led_ctrl);
8966
8967 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
f07e9af3 8968 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4
LT
8969 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8970 udelay(10);
8971 }
8972 tw32_f(MAC_RX_MODE, tp->rx_mode);
8973 udelay(10);
8974
f07e9af3 8975 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4 8976 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
f07e9af3 8977 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
1da177e4
LT
8978 /* Set drive transmission level to 1.2V */
8979 /* only if the signal pre-emphasis bit is not set */
8980 val = tr32(MAC_SERDES_CFG);
8981 val &= 0xfffff000;
8982 val |= 0x880;
8983 tw32(MAC_SERDES_CFG, val);
8984 }
8985 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8986 tw32(MAC_SERDES_CFG, 0x616000);
8987 }
8988
8989 /* Prevent chip from dropping frames when flow control
8990 * is enabled.
8991 */
666bc831
MC
8992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8993 val = 1;
8994 else
8995 val = 2;
8996 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
1da177e4
LT
8997
8998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
f07e9af3 8999 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
1da177e4 9000 /* Use hardware link auto-negotiation */
63c3a66f 9001 tg3_flag_set(tp, HW_AUTONEG);
1da177e4
LT
9002 }
9003
f07e9af3 9004 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6ff6f81d 9005 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
d4d2c558
MC
9006 u32 tmp;
9007
9008 tmp = tr32(SERDES_RX_CTRL);
9009 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9010 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9011 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9012 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9013 }
9014
63c3a66f 9015 if (!tg3_flag(tp, USE_PHYLIB)) {
80096068
MC
9016 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9017 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
dd477003
MC
9018 tp->link_config.speed = tp->link_config.orig_speed;
9019 tp->link_config.duplex = tp->link_config.orig_duplex;
9020 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9021 }
1da177e4 9022
dd477003
MC
9023 err = tg3_setup_phy(tp, 0);
9024 if (err)
9025 return err;
1da177e4 9026
f07e9af3
MC
9027 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9028 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
dd477003
MC
9029 u32 tmp;
9030
9031 /* Clear CRC stats. */
9032 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9033 tg3_writephy(tp, MII_TG3_TEST1,
9034 tmp | MII_TG3_TEST1_CRC_EN);
f08aa1a8 9035 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
dd477003 9036 }
1da177e4
LT
9037 }
9038 }
9039
9040 __tg3_set_rx_mode(tp->dev);
9041
9042 /* Initialize receive rules. */
9043 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9044 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9045 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9046 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9047
63c3a66f 9048 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
1da177e4
LT
9049 limit = 8;
9050 else
9051 limit = 16;
63c3a66f 9052 if (tg3_flag(tp, ENABLE_ASF))
1da177e4
LT
9053 limit -= 4;
9054 switch (limit) {
9055 case 16:
9056 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9057 case 15:
9058 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9059 case 14:
9060 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9061 case 13:
9062 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9063 case 12:
9064 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9065 case 11:
9066 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9067 case 10:
9068 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9069 case 9:
9070 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9071 case 8:
9072 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9073 case 7:
9074 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9075 case 6:
9076 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9077 case 5:
9078 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9079 case 4:
9080 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9081 case 3:
9082 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9083 case 2:
9084 case 1:
9085
9086 default:
9087 break;
855e1111 9088 }
1da177e4 9089
63c3a66f 9090 if (tg3_flag(tp, ENABLE_APE))
9ce768ea
MC
9091 /* Write our heartbeat update interval to APE. */
9092 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9093 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 9094
1da177e4
LT
9095 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9096
1da177e4
LT
9097 return 0;
9098}
9099
9100/* Called at device open time to get the chip ready for
9101 * packet processing. Invoked with tp->lock held.
9102 */
8e7a22e3 9103static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4 9104{
1da177e4
LT
9105 tg3_switch_clocks(tp);
9106
9107 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9108
2f751b67 9109 return tg3_reset_hw(tp, reset_phy);
1da177e4
LT
9110}
9111
9112#define TG3_STAT_ADD32(PSTAT, REG) \
9113do { u32 __val = tr32(REG); \
9114 (PSTAT)->low += __val; \
9115 if ((PSTAT)->low < __val) \
9116 (PSTAT)->high += 1; \
9117} while (0)
9118
9119static void tg3_periodic_fetch_stats(struct tg3 *tp)
9120{
9121 struct tg3_hw_stats *sp = tp->hw_stats;
9122
9123 if (!netif_carrier_ok(tp->dev))
9124 return;
9125
9126 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9127 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9128 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9129 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9130 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9131 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9132 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9133 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9134 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9135 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9136 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9137 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9138 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9139
9140 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9141 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9142 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9143 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9144 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9145 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9146 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9147 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9148 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9149 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9150 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9151 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9152 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9153 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
9154
9155 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
310050fa
MC
9156 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9157 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9158 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
4d958473
MC
9159 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9160 } else {
9161 u32 val = tr32(HOSTCC_FLOW_ATTN);
9162 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9163 if (val) {
9164 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9165 sp->rx_discards.low += val;
9166 if (sp->rx_discards.low < val)
9167 sp->rx_discards.high += 1;
9168 }
9169 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9170 }
463d305b 9171 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
9172}
9173
0e6cf6a9
MC
9174static void tg3_chk_missed_msi(struct tg3 *tp)
9175{
9176 u32 i;
9177
9178 for (i = 0; i < tp->irq_cnt; i++) {
9179 struct tg3_napi *tnapi = &tp->napi[i];
9180
9181 if (tg3_has_work(tnapi)) {
9182 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9183 tnapi->last_tx_cons == tnapi->tx_cons) {
9184 if (tnapi->chk_msi_cnt < 1) {
9185 tnapi->chk_msi_cnt++;
9186 return;
9187 }
9188 tw32_mailbox(tnapi->int_mbox,
9189 tnapi->last_tag << 24);
9190 }
9191 }
9192 tnapi->chk_msi_cnt = 0;
9193 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9194 tnapi->last_tx_cons = tnapi->tx_cons;
9195 }
9196}
9197
1da177e4
LT
9198static void tg3_timer(unsigned long __opaque)
9199{
9200 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 9201
f475f163
MC
9202 if (tp->irq_sync)
9203 goto restart_timer;
9204
f47c11ee 9205 spin_lock(&tp->lock);
1da177e4 9206
0e6cf6a9
MC
9207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9208 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9209 tg3_chk_missed_msi(tp);
9210
63c3a66f 9211 if (!tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
9212 /* All of this garbage is because when using non-tagged
9213 * IRQ status the mailbox/status_block protocol the chip
9214 * uses with the cpu is race prone.
9215 */
898a56f8 9216 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
fac9b83e
DM
9217 tw32(GRC_LOCAL_CTRL,
9218 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9219 } else {
9220 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 9221 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
fac9b83e 9222 }
1da177e4 9223
fac9b83e 9224 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
63c3a66f 9225 tg3_flag_set(tp, RESTART_TIMER);
f47c11ee 9226 spin_unlock(&tp->lock);
fac9b83e
DM
9227 schedule_work(&tp->reset_task);
9228 return;
9229 }
1da177e4
LT
9230 }
9231
1da177e4
LT
9232 /* This part only runs once per second. */
9233 if (!--tp->timer_counter) {
63c3a66f 9234 if (tg3_flag(tp, 5705_PLUS))
fac9b83e
DM
9235 tg3_periodic_fetch_stats(tp);
9236
b0c5943f
MC
9237 if (tp->setlpicnt && !--tp->setlpicnt)
9238 tg3_phy_eee_enable(tp);
52b02d04 9239
63c3a66f 9240 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
9241 u32 mac_stat;
9242 int phy_event;
9243
9244 mac_stat = tr32(MAC_STATUS);
9245
9246 phy_event = 0;
f07e9af3 9247 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
1da177e4
LT
9248 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9249 phy_event = 1;
9250 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9251 phy_event = 1;
9252
9253 if (phy_event)
9254 tg3_setup_phy(tp, 0);
63c3a66f 9255 } else if (tg3_flag(tp, POLL_SERDES)) {
1da177e4
LT
9256 u32 mac_stat = tr32(MAC_STATUS);
9257 int need_setup = 0;
9258
9259 if (netif_carrier_ok(tp->dev) &&
9260 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9261 need_setup = 1;
9262 }
be98da6a 9263 if (!netif_carrier_ok(tp->dev) &&
1da177e4
LT
9264 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9265 MAC_STATUS_SIGNAL_DET))) {
9266 need_setup = 1;
9267 }
9268 if (need_setup) {
3d3ebe74
MC
9269 if (!tp->serdes_counter) {
9270 tw32_f(MAC_MODE,
9271 (tp->mac_mode &
9272 ~MAC_MODE_PORT_MODE_MASK));
9273 udelay(40);
9274 tw32_f(MAC_MODE, tp->mac_mode);
9275 udelay(40);
9276 }
1da177e4
LT
9277 tg3_setup_phy(tp, 0);
9278 }
f07e9af3 9279 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
63c3a66f 9280 tg3_flag(tp, 5780_CLASS)) {
747e8f8b 9281 tg3_serdes_parallel_detect(tp);
57d8b880 9282 }
1da177e4
LT
9283
9284 tp->timer_counter = tp->timer_multiplier;
9285 }
9286
130b8e4d
MC
9287 /* Heartbeat is only sent once every 2 seconds.
9288 *
9289 * The heartbeat is to tell the ASF firmware that the host
9290 * driver is still alive. In the event that the OS crashes,
9291 * ASF needs to reset the hardware to free up the FIFO space
9292 * that may be filled with rx packets destined for the host.
9293 * If the FIFO is full, ASF will no longer function properly.
9294 *
9295 * Unintended resets have been reported on real time kernels
9296 * where the timer doesn't run on time. Netpoll will also have
9297 * same problem.
9298 *
9299 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9300 * to check the ring condition when the heartbeat is expiring
9301 * before doing the reset. This will prevent most unintended
9302 * resets.
9303 */
1da177e4 9304 if (!--tp->asf_counter) {
63c3a66f 9305 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7c5026aa
MC
9306 tg3_wait_for_event_ack(tp);
9307
bbadf503 9308 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 9309 FWCMD_NICDRV_ALIVE3);
bbadf503 9310 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
c6cdf436
MC
9311 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9312 TG3_FW_UPDATE_TIMEOUT_SEC);
4ba526ce
MC
9313
9314 tg3_generate_fw_event(tp);
1da177e4
LT
9315 }
9316 tp->asf_counter = tp->asf_multiplier;
9317 }
9318
f47c11ee 9319 spin_unlock(&tp->lock);
1da177e4 9320
f475f163 9321restart_timer:
1da177e4
LT
9322 tp->timer.expires = jiffies + tp->timer_offset;
9323 add_timer(&tp->timer);
9324}
9325
4f125f42 9326static int tg3_request_irq(struct tg3 *tp, int irq_num)
fcfa0a32 9327{
7d12e780 9328 irq_handler_t fn;
fcfa0a32 9329 unsigned long flags;
4f125f42
MC
9330 char *name;
9331 struct tg3_napi *tnapi = &tp->napi[irq_num];
9332
9333 if (tp->irq_cnt == 1)
9334 name = tp->dev->name;
9335 else {
9336 name = &tnapi->irq_lbl[0];
9337 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9338 name[IFNAMSIZ-1] = 0;
9339 }
fcfa0a32 9340
63c3a66f 9341 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
fcfa0a32 9342 fn = tg3_msi;
63c3a66f 9343 if (tg3_flag(tp, 1SHOT_MSI))
fcfa0a32 9344 fn = tg3_msi_1shot;
ab392d2d 9345 flags = 0;
fcfa0a32
MC
9346 } else {
9347 fn = tg3_interrupt;
63c3a66f 9348 if (tg3_flag(tp, TAGGED_STATUS))
fcfa0a32 9349 fn = tg3_interrupt_tagged;
ab392d2d 9350 flags = IRQF_SHARED;
fcfa0a32 9351 }
4f125f42
MC
9352
9353 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
fcfa0a32
MC
9354}
9355
7938109f
MC
9356static int tg3_test_interrupt(struct tg3 *tp)
9357{
09943a18 9358 struct tg3_napi *tnapi = &tp->napi[0];
7938109f 9359 struct net_device *dev = tp->dev;
b16250e3 9360 int err, i, intr_ok = 0;
f6eb9b1f 9361 u32 val;
7938109f 9362
d4bc3927
MC
9363 if (!netif_running(dev))
9364 return -ENODEV;
9365
7938109f
MC
9366 tg3_disable_ints(tp);
9367
4f125f42 9368 free_irq(tnapi->irq_vec, tnapi);
7938109f 9369
f6eb9b1f
MC
9370 /*
9371 * Turn off MSI one shot mode. Otherwise this test has no
9372 * observable way to know whether the interrupt was delivered.
9373 */
3aa1cdf8 9374 if (tg3_flag(tp, 57765_PLUS)) {
f6eb9b1f
MC
9375 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9376 tw32(MSGINT_MODE, val);
9377 }
9378
4f125f42 9379 err = request_irq(tnapi->irq_vec, tg3_test_isr,
09943a18 9380 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
7938109f
MC
9381 if (err)
9382 return err;
9383
898a56f8 9384 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
9385 tg3_enable_ints(tp);
9386
9387 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 9388 tnapi->coal_now);
7938109f
MC
9389
9390 for (i = 0; i < 5; i++) {
b16250e3
MC
9391 u32 int_mbox, misc_host_ctrl;
9392
898a56f8 9393 int_mbox = tr32_mailbox(tnapi->int_mbox);
b16250e3
MC
9394 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9395
9396 if ((int_mbox != 0) ||
9397 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9398 intr_ok = 1;
7938109f 9399 break;
b16250e3
MC
9400 }
9401
3aa1cdf8
MC
9402 if (tg3_flag(tp, 57765_PLUS) &&
9403 tnapi->hw_status->status_tag != tnapi->last_tag)
9404 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9405
7938109f
MC
9406 msleep(10);
9407 }
9408
9409 tg3_disable_ints(tp);
9410
4f125f42 9411 free_irq(tnapi->irq_vec, tnapi);
6aa20a22 9412
4f125f42 9413 err = tg3_request_irq(tp, 0);
7938109f
MC
9414
9415 if (err)
9416 return err;
9417
f6eb9b1f
MC
9418 if (intr_ok) {
9419 /* Reenable MSI one shot mode. */
3aa1cdf8 9420 if (tg3_flag(tp, 57765_PLUS)) {
f6eb9b1f
MC
9421 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9422 tw32(MSGINT_MODE, val);
9423 }
7938109f 9424 return 0;
f6eb9b1f 9425 }
7938109f
MC
9426
9427 return -EIO;
9428}
9429
9430/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9431 * successfully restored
9432 */
9433static int tg3_test_msi(struct tg3 *tp)
9434{
7938109f
MC
9435 int err;
9436 u16 pci_cmd;
9437
63c3a66f 9438 if (!tg3_flag(tp, USING_MSI))
7938109f
MC
9439 return 0;
9440
9441 /* Turn off SERR reporting in case MSI terminates with Master
9442 * Abort.
9443 */
9444 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9445 pci_write_config_word(tp->pdev, PCI_COMMAND,
9446 pci_cmd & ~PCI_COMMAND_SERR);
9447
9448 err = tg3_test_interrupt(tp);
9449
9450 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9451
9452 if (!err)
9453 return 0;
9454
9455 /* other failures */
9456 if (err != -EIO)
9457 return err;
9458
9459 /* MSI test failed, go back to INTx mode */
5129c3a3
MC
9460 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9461 "to INTx mode. Please report this failure to the PCI "
9462 "maintainer and include system chipset information\n");
7938109f 9463
4f125f42 9464 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
09943a18 9465
7938109f
MC
9466 pci_disable_msi(tp->pdev);
9467
63c3a66f 9468 tg3_flag_clear(tp, USING_MSI);
dc8bf1b1 9469 tp->napi[0].irq_vec = tp->pdev->irq;
7938109f 9470
4f125f42 9471 err = tg3_request_irq(tp, 0);
7938109f
MC
9472 if (err)
9473 return err;
9474
9475 /* Need to reset the chip because the MSI cycle may have terminated
9476 * with Master Abort.
9477 */
f47c11ee 9478 tg3_full_lock(tp, 1);
7938109f 9479
944d980e 9480 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 9481 err = tg3_init_hw(tp, 1);
7938109f 9482
f47c11ee 9483 tg3_full_unlock(tp);
7938109f
MC
9484
9485 if (err)
4f125f42 9486 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7938109f
MC
9487
9488 return err;
9489}
9490
9e9fd12d
MC
9491static int tg3_request_firmware(struct tg3 *tp)
9492{
9493 const __be32 *fw_data;
9494
9495 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
05dbe005
JP
9496 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9497 tp->fw_needed);
9e9fd12d
MC
9498 return -ENOENT;
9499 }
9500
9501 fw_data = (void *)tp->fw->data;
9502
9503 /* Firmware blob starts with version numbers, followed by
9504 * start address and _full_ length including BSS sections
9505 * (which must be longer than the actual data, of course
9506 */
9507
9508 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9509 if (tp->fw_len < (tp->fw->size - 12)) {
05dbe005
JP
9510 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9511 tp->fw_len, tp->fw_needed);
9e9fd12d
MC
9512 release_firmware(tp->fw);
9513 tp->fw = NULL;
9514 return -EINVAL;
9515 }
9516
9517 /* We no longer need firmware; we have it. */
9518 tp->fw_needed = NULL;
9519 return 0;
9520}
9521
679563f4
MC
9522static bool tg3_enable_msix(struct tg3 *tp)
9523{
9524 int i, rc, cpus = num_online_cpus();
9525 struct msix_entry msix_ent[tp->irq_max];
9526
9527 if (cpus == 1)
9528 /* Just fallback to the simpler MSI mode. */
9529 return false;
9530
9531 /*
9532 * We want as many rx rings enabled as there are cpus.
9533 * The first MSIX vector only deals with link interrupts, etc,
9534 * so we add one to the number of vectors we are requesting.
9535 */
9536 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9537
9538 for (i = 0; i < tp->irq_max; i++) {
9539 msix_ent[i].entry = i;
9540 msix_ent[i].vector = 0;
9541 }
9542
9543 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
2430b031
MC
9544 if (rc < 0) {
9545 return false;
9546 } else if (rc != 0) {
679563f4
MC
9547 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9548 return false;
05dbe005
JP
9549 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9550 tp->irq_cnt, rc);
679563f4
MC
9551 tp->irq_cnt = rc;
9552 }
9553
9554 for (i = 0; i < tp->irq_max; i++)
9555 tp->napi[i].irq_vec = msix_ent[i].vector;
9556
2ddaad39
BH
9557 netif_set_real_num_tx_queues(tp->dev, 1);
9558 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9559 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9560 pci_disable_msix(tp->pdev);
9561 return false;
9562 }
b92b9040
MC
9563
9564 if (tp->irq_cnt > 1) {
63c3a66f 9565 tg3_flag_set(tp, ENABLE_RSS);
d78b59f5
MC
9566
9567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
63c3a66f 9569 tg3_flag_set(tp, ENABLE_TSS);
b92b9040
MC
9570 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9571 }
9572 }
2430b031 9573
679563f4
MC
9574 return true;
9575}
9576
07b0173c
MC
9577static void tg3_ints_init(struct tg3 *tp)
9578{
63c3a66f
JP
9579 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9580 !tg3_flag(tp, TAGGED_STATUS)) {
07b0173c
MC
9581 /* All MSI supporting chips should support tagged
9582 * status. Assert that this is the case.
9583 */
5129c3a3
MC
9584 netdev_warn(tp->dev,
9585 "MSI without TAGGED_STATUS? Not using MSI\n");
679563f4 9586 goto defcfg;
07b0173c 9587 }
4f125f42 9588
63c3a66f
JP
9589 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9590 tg3_flag_set(tp, USING_MSIX);
9591 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9592 tg3_flag_set(tp, USING_MSI);
679563f4 9593
63c3a66f 9594 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
679563f4 9595 u32 msi_mode = tr32(MSGINT_MODE);
63c3a66f 9596 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
baf8a94a 9597 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
679563f4
MC
9598 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9599 }
9600defcfg:
63c3a66f 9601 if (!tg3_flag(tp, USING_MSIX)) {
679563f4
MC
9602 tp->irq_cnt = 1;
9603 tp->napi[0].irq_vec = tp->pdev->irq;
2ddaad39 9604 netif_set_real_num_tx_queues(tp->dev, 1);
85407885 9605 netif_set_real_num_rx_queues(tp->dev, 1);
679563f4 9606 }
07b0173c
MC
9607}
9608
9609static void tg3_ints_fini(struct tg3 *tp)
9610{
63c3a66f 9611 if (tg3_flag(tp, USING_MSIX))
679563f4 9612 pci_disable_msix(tp->pdev);
63c3a66f 9613 else if (tg3_flag(tp, USING_MSI))
679563f4 9614 pci_disable_msi(tp->pdev);
63c3a66f
JP
9615 tg3_flag_clear(tp, USING_MSI);
9616 tg3_flag_clear(tp, USING_MSIX);
9617 tg3_flag_clear(tp, ENABLE_RSS);
9618 tg3_flag_clear(tp, ENABLE_TSS);
07b0173c
MC
9619}
9620
1da177e4
LT
9621static int tg3_open(struct net_device *dev)
9622{
9623 struct tg3 *tp = netdev_priv(dev);
4f125f42 9624 int i, err;
1da177e4 9625
9e9fd12d
MC
9626 if (tp->fw_needed) {
9627 err = tg3_request_firmware(tp);
9628 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9629 if (err)
9630 return err;
9631 } else if (err) {
05dbe005 9632 netdev_warn(tp->dev, "TSO capability disabled\n");
63c3a66f
JP
9633 tg3_flag_clear(tp, TSO_CAPABLE);
9634 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
05dbe005 9635 netdev_notice(tp->dev, "TSO capability restored\n");
63c3a66f 9636 tg3_flag_set(tp, TSO_CAPABLE);
9e9fd12d
MC
9637 }
9638 }
9639
c49a1561
MC
9640 netif_carrier_off(tp->dev);
9641
c866b7ea 9642 err = tg3_power_up(tp);
2f751b67 9643 if (err)
bc1c7567 9644 return err;
2f751b67
MC
9645
9646 tg3_full_lock(tp, 0);
bc1c7567 9647
1da177e4 9648 tg3_disable_ints(tp);
63c3a66f 9649 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 9650
f47c11ee 9651 tg3_full_unlock(tp);
1da177e4 9652
679563f4
MC
9653 /*
9654 * Setup interrupts first so we know how
9655 * many NAPI resources to allocate
9656 */
9657 tg3_ints_init(tp);
9658
1da177e4
LT
9659 /* The placement of this call is tied
9660 * to the setup and use of Host TX descriptors.
9661 */
9662 err = tg3_alloc_consistent(tp);
9663 if (err)
679563f4 9664 goto err_out1;
88b06bc2 9665
66cfd1bd
MC
9666 tg3_napi_init(tp);
9667
fed97810 9668 tg3_napi_enable(tp);
1da177e4 9669
4f125f42
MC
9670 for (i = 0; i < tp->irq_cnt; i++) {
9671 struct tg3_napi *tnapi = &tp->napi[i];
9672 err = tg3_request_irq(tp, i);
9673 if (err) {
9674 for (i--; i >= 0; i--)
9675 free_irq(tnapi->irq_vec, tnapi);
9676 break;
9677 }
9678 }
1da177e4 9679
07b0173c 9680 if (err)
679563f4 9681 goto err_out2;
bea3348e 9682
f47c11ee 9683 tg3_full_lock(tp, 0);
1da177e4 9684
8e7a22e3 9685 err = tg3_init_hw(tp, 1);
1da177e4 9686 if (err) {
944d980e 9687 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
9688 tg3_free_rings(tp);
9689 } else {
0e6cf6a9
MC
9690 if (tg3_flag(tp, TAGGED_STATUS) &&
9691 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9692 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
fac9b83e
DM
9693 tp->timer_offset = HZ;
9694 else
9695 tp->timer_offset = HZ / 10;
9696
9697 BUG_ON(tp->timer_offset > HZ);
9698 tp->timer_counter = tp->timer_multiplier =
9699 (HZ / tp->timer_offset);
9700 tp->asf_counter = tp->asf_multiplier =
28fbef78 9701 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
9702
9703 init_timer(&tp->timer);
9704 tp->timer.expires = jiffies + tp->timer_offset;
9705 tp->timer.data = (unsigned long) tp;
9706 tp->timer.function = tg3_timer;
1da177e4
LT
9707 }
9708
f47c11ee 9709 tg3_full_unlock(tp);
1da177e4 9710
07b0173c 9711 if (err)
679563f4 9712 goto err_out3;
1da177e4 9713
63c3a66f 9714 if (tg3_flag(tp, USING_MSI)) {
7938109f 9715 err = tg3_test_msi(tp);
fac9b83e 9716
7938109f 9717 if (err) {
f47c11ee 9718 tg3_full_lock(tp, 0);
944d980e 9719 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f 9720 tg3_free_rings(tp);
f47c11ee 9721 tg3_full_unlock(tp);
7938109f 9722
679563f4 9723 goto err_out2;
7938109f 9724 }
fcfa0a32 9725
63c3a66f 9726 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
f6eb9b1f 9727 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 9728
f6eb9b1f
MC
9729 tw32(PCIE_TRANSACTION_CFG,
9730 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32 9731 }
7938109f
MC
9732 }
9733
b02fd9e3
MC
9734 tg3_phy_start(tp);
9735
f47c11ee 9736 tg3_full_lock(tp, 0);
1da177e4 9737
7938109f 9738 add_timer(&tp->timer);
63c3a66f 9739 tg3_flag_set(tp, INIT_COMPLETE);
1da177e4
LT
9740 tg3_enable_ints(tp);
9741
f47c11ee 9742 tg3_full_unlock(tp);
1da177e4 9743
fe5f5787 9744 netif_tx_start_all_queues(dev);
1da177e4 9745
06c03c02
MB
9746 /*
9747 * Reset loopback feature if it was turned on while the device was down
9748 * make sure that it's installed properly now.
9749 */
9750 if (dev->features & NETIF_F_LOOPBACK)
9751 tg3_set_loopback(dev, dev->features);
9752
1da177e4 9753 return 0;
07b0173c 9754
679563f4 9755err_out3:
4f125f42
MC
9756 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9757 struct tg3_napi *tnapi = &tp->napi[i];
9758 free_irq(tnapi->irq_vec, tnapi);
9759 }
07b0173c 9760
679563f4 9761err_out2:
fed97810 9762 tg3_napi_disable(tp);
66cfd1bd 9763 tg3_napi_fini(tp);
07b0173c 9764 tg3_free_consistent(tp);
679563f4
MC
9765
9766err_out1:
9767 tg3_ints_fini(tp);
cd0d7228
MC
9768 tg3_frob_aux_power(tp, false);
9769 pci_set_power_state(tp->pdev, PCI_D3hot);
07b0173c 9770 return err;
1da177e4
LT
9771}
9772
511d2224
ED
9773static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9774 struct rtnl_link_stats64 *);
1da177e4
LT
9775static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9776
9777static int tg3_close(struct net_device *dev)
9778{
4f125f42 9779 int i;
1da177e4
LT
9780 struct tg3 *tp = netdev_priv(dev);
9781
fed97810 9782 tg3_napi_disable(tp);
28e53bdd 9783 cancel_work_sync(&tp->reset_task);
7faa006f 9784
fe5f5787 9785 netif_tx_stop_all_queues(dev);
1da177e4
LT
9786
9787 del_timer_sync(&tp->timer);
9788
24bb4fb6
MC
9789 tg3_phy_stop(tp);
9790
f47c11ee 9791 tg3_full_lock(tp, 1);
1da177e4
LT
9792
9793 tg3_disable_ints(tp);
9794
944d980e 9795 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 9796 tg3_free_rings(tp);
63c3a66f 9797 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 9798
f47c11ee 9799 tg3_full_unlock(tp);
1da177e4 9800
4f125f42
MC
9801 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9802 struct tg3_napi *tnapi = &tp->napi[i];
9803 free_irq(tnapi->irq_vec, tnapi);
9804 }
07b0173c
MC
9805
9806 tg3_ints_fini(tp);
1da177e4 9807
511d2224
ED
9808 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9809
1da177e4
LT
9810 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9811 sizeof(tp->estats_prev));
9812
66cfd1bd
MC
9813 tg3_napi_fini(tp);
9814
1da177e4
LT
9815 tg3_free_consistent(tp);
9816
c866b7ea 9817 tg3_power_down(tp);
bc1c7567
MC
9818
9819 netif_carrier_off(tp->dev);
9820
1da177e4
LT
9821 return 0;
9822}
9823
511d2224 9824static inline u64 get_stat64(tg3_stat64_t *val)
816f8b86
SB
9825{
9826 return ((u64)val->high << 32) | ((u64)val->low);
9827}
9828
511d2224 9829static u64 calc_crc_errors(struct tg3 *tp)
1da177e4
LT
9830{
9831 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9832
f07e9af3 9833 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
1da177e4
LT
9834 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9835 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
9836 u32 val;
9837
f47c11ee 9838 spin_lock_bh(&tp->lock);
569a5df8
MC
9839 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9840 tg3_writephy(tp, MII_TG3_TEST1,
9841 val | MII_TG3_TEST1_CRC_EN);
f08aa1a8 9842 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
1da177e4
LT
9843 } else
9844 val = 0;
f47c11ee 9845 spin_unlock_bh(&tp->lock);
1da177e4
LT
9846
9847 tp->phy_crc_errors += val;
9848
9849 return tp->phy_crc_errors;
9850 }
9851
9852 return get_stat64(&hw_stats->rx_fcs_errors);
9853}
9854
9855#define ESTAT_ADD(member) \
9856 estats->member = old_estats->member + \
511d2224 9857 get_stat64(&hw_stats->member)
1da177e4
LT
9858
9859static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9860{
9861 struct tg3_ethtool_stats *estats = &tp->estats;
9862 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9863 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9864
9865 if (!hw_stats)
9866 return old_estats;
9867
9868 ESTAT_ADD(rx_octets);
9869 ESTAT_ADD(rx_fragments);
9870 ESTAT_ADD(rx_ucast_packets);
9871 ESTAT_ADD(rx_mcast_packets);
9872 ESTAT_ADD(rx_bcast_packets);
9873 ESTAT_ADD(rx_fcs_errors);
9874 ESTAT_ADD(rx_align_errors);
9875 ESTAT_ADD(rx_xon_pause_rcvd);
9876 ESTAT_ADD(rx_xoff_pause_rcvd);
9877 ESTAT_ADD(rx_mac_ctrl_rcvd);
9878 ESTAT_ADD(rx_xoff_entered);
9879 ESTAT_ADD(rx_frame_too_long_errors);
9880 ESTAT_ADD(rx_jabbers);
9881 ESTAT_ADD(rx_undersize_packets);
9882 ESTAT_ADD(rx_in_length_errors);
9883 ESTAT_ADD(rx_out_length_errors);
9884 ESTAT_ADD(rx_64_or_less_octet_packets);
9885 ESTAT_ADD(rx_65_to_127_octet_packets);
9886 ESTAT_ADD(rx_128_to_255_octet_packets);
9887 ESTAT_ADD(rx_256_to_511_octet_packets);
9888 ESTAT_ADD(rx_512_to_1023_octet_packets);
9889 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9890 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9891 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9892 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9893 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9894
9895 ESTAT_ADD(tx_octets);
9896 ESTAT_ADD(tx_collisions);
9897 ESTAT_ADD(tx_xon_sent);
9898 ESTAT_ADD(tx_xoff_sent);
9899 ESTAT_ADD(tx_flow_control);
9900 ESTAT_ADD(tx_mac_errors);
9901 ESTAT_ADD(tx_single_collisions);
9902 ESTAT_ADD(tx_mult_collisions);
9903 ESTAT_ADD(tx_deferred);
9904 ESTAT_ADD(tx_excessive_collisions);
9905 ESTAT_ADD(tx_late_collisions);
9906 ESTAT_ADD(tx_collide_2times);
9907 ESTAT_ADD(tx_collide_3times);
9908 ESTAT_ADD(tx_collide_4times);
9909 ESTAT_ADD(tx_collide_5times);
9910 ESTAT_ADD(tx_collide_6times);
9911 ESTAT_ADD(tx_collide_7times);
9912 ESTAT_ADD(tx_collide_8times);
9913 ESTAT_ADD(tx_collide_9times);
9914 ESTAT_ADD(tx_collide_10times);
9915 ESTAT_ADD(tx_collide_11times);
9916 ESTAT_ADD(tx_collide_12times);
9917 ESTAT_ADD(tx_collide_13times);
9918 ESTAT_ADD(tx_collide_14times);
9919 ESTAT_ADD(tx_collide_15times);
9920 ESTAT_ADD(tx_ucast_packets);
9921 ESTAT_ADD(tx_mcast_packets);
9922 ESTAT_ADD(tx_bcast_packets);
9923 ESTAT_ADD(tx_carrier_sense_errors);
9924 ESTAT_ADD(tx_discards);
9925 ESTAT_ADD(tx_errors);
9926
9927 ESTAT_ADD(dma_writeq_full);
9928 ESTAT_ADD(dma_write_prioq_full);
9929 ESTAT_ADD(rxbds_empty);
9930 ESTAT_ADD(rx_discards);
9931 ESTAT_ADD(rx_errors);
9932 ESTAT_ADD(rx_threshold_hit);
9933
9934 ESTAT_ADD(dma_readq_full);
9935 ESTAT_ADD(dma_read_prioq_full);
9936 ESTAT_ADD(tx_comp_queue_full);
9937
9938 ESTAT_ADD(ring_set_send_prod_index);
9939 ESTAT_ADD(ring_status_update);
9940 ESTAT_ADD(nic_irqs);
9941 ESTAT_ADD(nic_avoided_irqs);
9942 ESTAT_ADD(nic_tx_threshold_hit);
9943
4452d099
MC
9944 ESTAT_ADD(mbuf_lwm_thresh_hit);
9945
1da177e4
LT
9946 return estats;
9947}
9948
511d2224
ED
9949static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9950 struct rtnl_link_stats64 *stats)
1da177e4
LT
9951{
9952 struct tg3 *tp = netdev_priv(dev);
511d2224 9953 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
1da177e4
LT
9954 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9955
9956 if (!hw_stats)
9957 return old_stats;
9958
9959 stats->rx_packets = old_stats->rx_packets +
9960 get_stat64(&hw_stats->rx_ucast_packets) +
9961 get_stat64(&hw_stats->rx_mcast_packets) +
9962 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 9963
1da177e4
LT
9964 stats->tx_packets = old_stats->tx_packets +
9965 get_stat64(&hw_stats->tx_ucast_packets) +
9966 get_stat64(&hw_stats->tx_mcast_packets) +
9967 get_stat64(&hw_stats->tx_bcast_packets);
9968
9969 stats->rx_bytes = old_stats->rx_bytes +
9970 get_stat64(&hw_stats->rx_octets);
9971 stats->tx_bytes = old_stats->tx_bytes +
9972 get_stat64(&hw_stats->tx_octets);
9973
9974 stats->rx_errors = old_stats->rx_errors +
4f63b877 9975 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
9976 stats->tx_errors = old_stats->tx_errors +
9977 get_stat64(&hw_stats->tx_errors) +
9978 get_stat64(&hw_stats->tx_mac_errors) +
9979 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9980 get_stat64(&hw_stats->tx_discards);
9981
9982 stats->multicast = old_stats->multicast +
9983 get_stat64(&hw_stats->rx_mcast_packets);
9984 stats->collisions = old_stats->collisions +
9985 get_stat64(&hw_stats->tx_collisions);
9986
9987 stats->rx_length_errors = old_stats->rx_length_errors +
9988 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9989 get_stat64(&hw_stats->rx_undersize_packets);
9990
9991 stats->rx_over_errors = old_stats->rx_over_errors +
9992 get_stat64(&hw_stats->rxbds_empty);
9993 stats->rx_frame_errors = old_stats->rx_frame_errors +
9994 get_stat64(&hw_stats->rx_align_errors);
9995 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9996 get_stat64(&hw_stats->tx_discards);
9997 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9998 get_stat64(&hw_stats->tx_carrier_sense_errors);
9999
10000 stats->rx_crc_errors = old_stats->rx_crc_errors +
10001 calc_crc_errors(tp);
10002
4f63b877
JL
10003 stats->rx_missed_errors = old_stats->rx_missed_errors +
10004 get_stat64(&hw_stats->rx_discards);
10005
b0057c51
ED
10006 stats->rx_dropped = tp->rx_dropped;
10007
1da177e4
LT
10008 return stats;
10009}
10010
10011static inline u32 calc_crc(unsigned char *buf, int len)
10012{
10013 u32 reg;
10014 u32 tmp;
10015 int j, k;
10016
10017 reg = 0xffffffff;
10018
10019 for (j = 0; j < len; j++) {
10020 reg ^= buf[j];
10021
10022 for (k = 0; k < 8; k++) {
10023 tmp = reg & 0x01;
10024
10025 reg >>= 1;
10026
859a5887 10027 if (tmp)
1da177e4 10028 reg ^= 0xedb88320;
1da177e4
LT
10029 }
10030 }
10031
10032 return ~reg;
10033}
10034
10035static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10036{
10037 /* accept or reject all multicast frames */
10038 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10039 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10040 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10041 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10042}
10043
10044static void __tg3_set_rx_mode(struct net_device *dev)
10045{
10046 struct tg3 *tp = netdev_priv(dev);
10047 u32 rx_mode;
10048
10049 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10050 RX_MODE_KEEP_VLAN_TAG);
10051
bf933c80 10052#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
1da177e4
LT
10053 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10054 * flag clear.
10055 */
63c3a66f 10056 if (!tg3_flag(tp, ENABLE_ASF))
1da177e4
LT
10057 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10058#endif
10059
10060 if (dev->flags & IFF_PROMISC) {
10061 /* Promiscuous mode. */
10062 rx_mode |= RX_MODE_PROMISC;
10063 } else if (dev->flags & IFF_ALLMULTI) {
10064 /* Accept all multicast. */
de6f31eb 10065 tg3_set_multi(tp, 1);
4cd24eaf 10066 } else if (netdev_mc_empty(dev)) {
1da177e4 10067 /* Reject all multicast. */
de6f31eb 10068 tg3_set_multi(tp, 0);
1da177e4
LT
10069 } else {
10070 /* Accept one or more multicast(s). */
22bedad3 10071 struct netdev_hw_addr *ha;
1da177e4
LT
10072 u32 mc_filter[4] = { 0, };
10073 u32 regidx;
10074 u32 bit;
10075 u32 crc;
10076
22bedad3
JP
10077 netdev_for_each_mc_addr(ha, dev) {
10078 crc = calc_crc(ha->addr, ETH_ALEN);
1da177e4
LT
10079 bit = ~crc & 0x7f;
10080 regidx = (bit & 0x60) >> 5;
10081 bit &= 0x1f;
10082 mc_filter[regidx] |= (1 << bit);
10083 }
10084
10085 tw32(MAC_HASH_REG_0, mc_filter[0]);
10086 tw32(MAC_HASH_REG_1, mc_filter[1]);
10087 tw32(MAC_HASH_REG_2, mc_filter[2]);
10088 tw32(MAC_HASH_REG_3, mc_filter[3]);
10089 }
10090
10091 if (rx_mode != tp->rx_mode) {
10092 tp->rx_mode = rx_mode;
10093 tw32_f(MAC_RX_MODE, rx_mode);
10094 udelay(10);
10095 }
10096}
10097
10098static void tg3_set_rx_mode(struct net_device *dev)
10099{
10100 struct tg3 *tp = netdev_priv(dev);
10101
e75f7c90
MC
10102 if (!netif_running(dev))
10103 return;
10104
f47c11ee 10105 tg3_full_lock(tp, 0);
1da177e4 10106 __tg3_set_rx_mode(dev);
f47c11ee 10107 tg3_full_unlock(tp);
1da177e4
LT
10108}
10109
1da177e4
LT
10110static int tg3_get_regs_len(struct net_device *dev)
10111{
97bd8e49 10112 return TG3_REG_BLK_SIZE;
1da177e4
LT
10113}
10114
10115static void tg3_get_regs(struct net_device *dev,
10116 struct ethtool_regs *regs, void *_p)
10117{
1da177e4 10118 struct tg3 *tp = netdev_priv(dev);
1da177e4
LT
10119
10120 regs->version = 0;
10121
97bd8e49 10122 memset(_p, 0, TG3_REG_BLK_SIZE);
1da177e4 10123
80096068 10124 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10125 return;
10126
f47c11ee 10127 tg3_full_lock(tp, 0);
1da177e4 10128
97bd8e49 10129 tg3_dump_legacy_regs(tp, (u32 *)_p);
1da177e4 10130
f47c11ee 10131 tg3_full_unlock(tp);
1da177e4
LT
10132}
10133
10134static int tg3_get_eeprom_len(struct net_device *dev)
10135{
10136 struct tg3 *tp = netdev_priv(dev);
10137
10138 return tp->nvram_size;
10139}
10140
1da177e4
LT
10141static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10142{
10143 struct tg3 *tp = netdev_priv(dev);
10144 int ret;
10145 u8 *pd;
b9fc7dc5 10146 u32 i, offset, len, b_offset, b_count;
a9dc529d 10147 __be32 val;
1da177e4 10148
63c3a66f 10149 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
10150 return -EINVAL;
10151
80096068 10152 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10153 return -EAGAIN;
10154
1da177e4
LT
10155 offset = eeprom->offset;
10156 len = eeprom->len;
10157 eeprom->len = 0;
10158
10159 eeprom->magic = TG3_EEPROM_MAGIC;
10160
10161 if (offset & 3) {
10162 /* adjustments to start on required 4 byte boundary */
10163 b_offset = offset & 3;
10164 b_count = 4 - b_offset;
10165 if (b_count > len) {
10166 /* i.e. offset=1 len=2 */
10167 b_count = len;
10168 }
a9dc529d 10169 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
1da177e4
LT
10170 if (ret)
10171 return ret;
be98da6a 10172 memcpy(data, ((char *)&val) + b_offset, b_count);
1da177e4
LT
10173 len -= b_count;
10174 offset += b_count;
c6cdf436 10175 eeprom->len += b_count;
1da177e4
LT
10176 }
10177
25985edc 10178 /* read bytes up to the last 4 byte boundary */
1da177e4
LT
10179 pd = &data[eeprom->len];
10180 for (i = 0; i < (len - (len & 3)); i += 4) {
a9dc529d 10181 ret = tg3_nvram_read_be32(tp, offset + i, &val);
1da177e4
LT
10182 if (ret) {
10183 eeprom->len += i;
10184 return ret;
10185 }
1da177e4
LT
10186 memcpy(pd + i, &val, 4);
10187 }
10188 eeprom->len += i;
10189
10190 if (len & 3) {
10191 /* read last bytes not ending on 4 byte boundary */
10192 pd = &data[eeprom->len];
10193 b_count = len & 3;
10194 b_offset = offset + len - b_count;
a9dc529d 10195 ret = tg3_nvram_read_be32(tp, b_offset, &val);
1da177e4
LT
10196 if (ret)
10197 return ret;
b9fc7dc5 10198 memcpy(pd, &val, b_count);
1da177e4
LT
10199 eeprom->len += b_count;
10200 }
10201 return 0;
10202}
10203
6aa20a22 10204static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
10205
10206static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10207{
10208 struct tg3 *tp = netdev_priv(dev);
10209 int ret;
b9fc7dc5 10210 u32 offset, len, b_offset, odd_len;
1da177e4 10211 u8 *buf;
a9dc529d 10212 __be32 start, end;
1da177e4 10213
80096068 10214 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10215 return -EAGAIN;
10216
63c3a66f 10217 if (tg3_flag(tp, NO_NVRAM) ||
df259d8c 10218 eeprom->magic != TG3_EEPROM_MAGIC)
1da177e4
LT
10219 return -EINVAL;
10220
10221 offset = eeprom->offset;
10222 len = eeprom->len;
10223
10224 if ((b_offset = (offset & 3))) {
10225 /* adjustments to start on required 4 byte boundary */
a9dc529d 10226 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
1da177e4
LT
10227 if (ret)
10228 return ret;
1da177e4
LT
10229 len += b_offset;
10230 offset &= ~3;
1c8594b4
MC
10231 if (len < 4)
10232 len = 4;
1da177e4
LT
10233 }
10234
10235 odd_len = 0;
1c8594b4 10236 if (len & 3) {
1da177e4
LT
10237 /* adjustments to end on required 4 byte boundary */
10238 odd_len = 1;
10239 len = (len + 3) & ~3;
a9dc529d 10240 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
1da177e4
LT
10241 if (ret)
10242 return ret;
1da177e4
LT
10243 }
10244
10245 buf = data;
10246 if (b_offset || odd_len) {
10247 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 10248 if (!buf)
1da177e4
LT
10249 return -ENOMEM;
10250 if (b_offset)
10251 memcpy(buf, &start, 4);
10252 if (odd_len)
10253 memcpy(buf+len-4, &end, 4);
10254 memcpy(buf + b_offset, data, eeprom->len);
10255 }
10256
10257 ret = tg3_nvram_write_block(tp, offset, len, buf);
10258
10259 if (buf != data)
10260 kfree(buf);
10261
10262 return ret;
10263}
10264
10265static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10266{
b02fd9e3
MC
10267 struct tg3 *tp = netdev_priv(dev);
10268
63c3a66f 10269 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10270 struct phy_device *phydev;
f07e9af3 10271 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10272 return -EAGAIN;
3f0e3ad7
MC
10273 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10274 return phy_ethtool_gset(phydev, cmd);
b02fd9e3 10275 }
6aa20a22 10276
1da177e4
LT
10277 cmd->supported = (SUPPORTED_Autoneg);
10278
f07e9af3 10279 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
1da177e4
LT
10280 cmd->supported |= (SUPPORTED_1000baseT_Half |
10281 SUPPORTED_1000baseT_Full);
10282
f07e9af3 10283 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
1da177e4
LT
10284 cmd->supported |= (SUPPORTED_100baseT_Half |
10285 SUPPORTED_100baseT_Full |
10286 SUPPORTED_10baseT_Half |
10287 SUPPORTED_10baseT_Full |
3bebab59 10288 SUPPORTED_TP);
ef348144
KK
10289 cmd->port = PORT_TP;
10290 } else {
1da177e4 10291 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
10292 cmd->port = PORT_FIBRE;
10293 }
6aa20a22 10294
1da177e4 10295 cmd->advertising = tp->link_config.advertising;
5bb09778
MC
10296 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10297 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10298 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10299 cmd->advertising |= ADVERTISED_Pause;
10300 } else {
10301 cmd->advertising |= ADVERTISED_Pause |
10302 ADVERTISED_Asym_Pause;
10303 }
10304 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10305 cmd->advertising |= ADVERTISED_Asym_Pause;
10306 }
10307 }
1da177e4 10308 if (netif_running(dev)) {
70739497 10309 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
1da177e4 10310 cmd->duplex = tp->link_config.active_duplex;
64c22182 10311 } else {
70739497 10312 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
64c22182 10313 cmd->duplex = DUPLEX_INVALID;
1da177e4 10314 }
882e9793 10315 cmd->phy_address = tp->phy_addr;
7e5856bd 10316 cmd->transceiver = XCVR_INTERNAL;
1da177e4
LT
10317 cmd->autoneg = tp->link_config.autoneg;
10318 cmd->maxtxpkt = 0;
10319 cmd->maxrxpkt = 0;
10320 return 0;
10321}
6aa20a22 10322
1da177e4
LT
10323static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10324{
10325 struct tg3 *tp = netdev_priv(dev);
25db0338 10326 u32 speed = ethtool_cmd_speed(cmd);
6aa20a22 10327
63c3a66f 10328 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10329 struct phy_device *phydev;
f07e9af3 10330 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10331 return -EAGAIN;
3f0e3ad7
MC
10332 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10333 return phy_ethtool_sset(phydev, cmd);
b02fd9e3
MC
10334 }
10335
7e5856bd
MC
10336 if (cmd->autoneg != AUTONEG_ENABLE &&
10337 cmd->autoneg != AUTONEG_DISABLE)
37ff238d 10338 return -EINVAL;
7e5856bd
MC
10339
10340 if (cmd->autoneg == AUTONEG_DISABLE &&
10341 cmd->duplex != DUPLEX_FULL &&
10342 cmd->duplex != DUPLEX_HALF)
37ff238d 10343 return -EINVAL;
1da177e4 10344
7e5856bd
MC
10345 if (cmd->autoneg == AUTONEG_ENABLE) {
10346 u32 mask = ADVERTISED_Autoneg |
10347 ADVERTISED_Pause |
10348 ADVERTISED_Asym_Pause;
10349
f07e9af3 10350 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
7e5856bd
MC
10351 mask |= ADVERTISED_1000baseT_Half |
10352 ADVERTISED_1000baseT_Full;
10353
f07e9af3 10354 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
7e5856bd
MC
10355 mask |= ADVERTISED_100baseT_Half |
10356 ADVERTISED_100baseT_Full |
10357 ADVERTISED_10baseT_Half |
10358 ADVERTISED_10baseT_Full |
10359 ADVERTISED_TP;
10360 else
10361 mask |= ADVERTISED_FIBRE;
10362
10363 if (cmd->advertising & ~mask)
10364 return -EINVAL;
10365
10366 mask &= (ADVERTISED_1000baseT_Half |
10367 ADVERTISED_1000baseT_Full |
10368 ADVERTISED_100baseT_Half |
10369 ADVERTISED_100baseT_Full |
10370 ADVERTISED_10baseT_Half |
10371 ADVERTISED_10baseT_Full);
10372
10373 cmd->advertising &= mask;
10374 } else {
f07e9af3 10375 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
25db0338 10376 if (speed != SPEED_1000)
7e5856bd
MC
10377 return -EINVAL;
10378
10379 if (cmd->duplex != DUPLEX_FULL)
10380 return -EINVAL;
10381 } else {
25db0338
DD
10382 if (speed != SPEED_100 &&
10383 speed != SPEED_10)
7e5856bd
MC
10384 return -EINVAL;
10385 }
10386 }
10387
f47c11ee 10388 tg3_full_lock(tp, 0);
1da177e4
LT
10389
10390 tp->link_config.autoneg = cmd->autoneg;
10391 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
10392 tp->link_config.advertising = (cmd->advertising |
10393 ADVERTISED_Autoneg);
1da177e4
LT
10394 tp->link_config.speed = SPEED_INVALID;
10395 tp->link_config.duplex = DUPLEX_INVALID;
10396 } else {
10397 tp->link_config.advertising = 0;
25db0338 10398 tp->link_config.speed = speed;
1da177e4 10399 tp->link_config.duplex = cmd->duplex;
b02fd9e3 10400 }
6aa20a22 10401
24fcad6b
MC
10402 tp->link_config.orig_speed = tp->link_config.speed;
10403 tp->link_config.orig_duplex = tp->link_config.duplex;
10404 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10405
1da177e4
LT
10406 if (netif_running(dev))
10407 tg3_setup_phy(tp, 1);
10408
f47c11ee 10409 tg3_full_unlock(tp);
6aa20a22 10410
1da177e4
LT
10411 return 0;
10412}
6aa20a22 10413
1da177e4
LT
10414static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10415{
10416 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10417
1da177e4
LT
10418 strcpy(info->driver, DRV_MODULE_NAME);
10419 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 10420 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
10421 strcpy(info->bus_info, pci_name(tp->pdev));
10422}
6aa20a22 10423
1da177e4
LT
10424static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10425{
10426 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10427
63c3a66f 10428 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
a85feb8c
GZ
10429 wol->supported = WAKE_MAGIC;
10430 else
10431 wol->supported = 0;
1da177e4 10432 wol->wolopts = 0;
63c3a66f 10433 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
1da177e4
LT
10434 wol->wolopts = WAKE_MAGIC;
10435 memset(&wol->sopass, 0, sizeof(wol->sopass));
10436}
6aa20a22 10437
1da177e4
LT
10438static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10439{
10440 struct tg3 *tp = netdev_priv(dev);
12dac075 10441 struct device *dp = &tp->pdev->dev;
6aa20a22 10442
1da177e4
LT
10443 if (wol->wolopts & ~WAKE_MAGIC)
10444 return -EINVAL;
10445 if ((wol->wolopts & WAKE_MAGIC) &&
63c3a66f 10446 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
1da177e4 10447 return -EINVAL;
6aa20a22 10448
f2dc0d18
RW
10449 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10450
f47c11ee 10451 spin_lock_bh(&tp->lock);
f2dc0d18 10452 if (device_may_wakeup(dp))
63c3a66f 10453 tg3_flag_set(tp, WOL_ENABLE);
f2dc0d18 10454 else
63c3a66f 10455 tg3_flag_clear(tp, WOL_ENABLE);
f47c11ee 10456 spin_unlock_bh(&tp->lock);
6aa20a22 10457
1da177e4
LT
10458 return 0;
10459}
6aa20a22 10460
1da177e4
LT
10461static u32 tg3_get_msglevel(struct net_device *dev)
10462{
10463 struct tg3 *tp = netdev_priv(dev);
10464 return tp->msg_enable;
10465}
6aa20a22 10466
1da177e4
LT
10467static void tg3_set_msglevel(struct net_device *dev, u32 value)
10468{
10469 struct tg3 *tp = netdev_priv(dev);
10470 tp->msg_enable = value;
10471}
6aa20a22 10472
1da177e4
LT
10473static int tg3_nway_reset(struct net_device *dev)
10474{
10475 struct tg3 *tp = netdev_priv(dev);
1da177e4 10476 int r;
6aa20a22 10477
1da177e4
LT
10478 if (!netif_running(dev))
10479 return -EAGAIN;
10480
f07e9af3 10481 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
c94e3941
MC
10482 return -EINVAL;
10483
63c3a66f 10484 if (tg3_flag(tp, USE_PHYLIB)) {
f07e9af3 10485 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10486 return -EAGAIN;
3f0e3ad7 10487 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
10488 } else {
10489 u32 bmcr;
10490
10491 spin_lock_bh(&tp->lock);
10492 r = -EINVAL;
10493 tg3_readphy(tp, MII_BMCR, &bmcr);
10494 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10495 ((bmcr & BMCR_ANENABLE) ||
f07e9af3 10496 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
b02fd9e3
MC
10497 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10498 BMCR_ANENABLE);
10499 r = 0;
10500 }
10501 spin_unlock_bh(&tp->lock);
1da177e4 10502 }
6aa20a22 10503
1da177e4
LT
10504 return r;
10505}
6aa20a22 10506
1da177e4
LT
10507static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10508{
10509 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10510
2c49a44d 10511 ering->rx_max_pending = tp->rx_std_ring_mask;
1da177e4 10512 ering->rx_mini_max_pending = 0;
63c3a66f 10513 if (tg3_flag(tp, JUMBO_RING_ENABLE))
2c49a44d 10514 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
4f81c32b
MC
10515 else
10516 ering->rx_jumbo_max_pending = 0;
10517
10518 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
10519
10520 ering->rx_pending = tp->rx_pending;
10521 ering->rx_mini_pending = 0;
63c3a66f 10522 if (tg3_flag(tp, JUMBO_RING_ENABLE))
4f81c32b
MC
10523 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10524 else
10525 ering->rx_jumbo_pending = 0;
10526
f3f3f27e 10527 ering->tx_pending = tp->napi[0].tx_pending;
1da177e4 10528}
6aa20a22 10529
1da177e4
LT
10530static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10531{
10532 struct tg3 *tp = netdev_priv(dev);
646c9edd 10533 int i, irq_sync = 0, err = 0;
6aa20a22 10534
2c49a44d
MC
10535 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10536 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
bc3a9254
MC
10537 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10538 (ering->tx_pending <= MAX_SKB_FRAGS) ||
63c3a66f 10539 (tg3_flag(tp, TSO_BUG) &&
bc3a9254 10540 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 10541 return -EINVAL;
6aa20a22 10542
bbe832c0 10543 if (netif_running(dev)) {
b02fd9e3 10544 tg3_phy_stop(tp);
1da177e4 10545 tg3_netif_stop(tp);
bbe832c0
MC
10546 irq_sync = 1;
10547 }
1da177e4 10548
bbe832c0 10549 tg3_full_lock(tp, irq_sync);
6aa20a22 10550
1da177e4
LT
10551 tp->rx_pending = ering->rx_pending;
10552
63c3a66f 10553 if (tg3_flag(tp, MAX_RXPEND_64) &&
1da177e4
LT
10554 tp->rx_pending > 63)
10555 tp->rx_pending = 63;
10556 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
646c9edd 10557
6fd45cb8 10558 for (i = 0; i < tp->irq_max; i++)
646c9edd 10559 tp->napi[i].tx_pending = ering->tx_pending;
1da177e4
LT
10560
10561 if (netif_running(dev)) {
944d980e 10562 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
10563 err = tg3_restart_hw(tp, 1);
10564 if (!err)
10565 tg3_netif_start(tp);
1da177e4
LT
10566 }
10567
f47c11ee 10568 tg3_full_unlock(tp);
6aa20a22 10569
b02fd9e3
MC
10570 if (irq_sync && !err)
10571 tg3_phy_start(tp);
10572
b9ec6c1b 10573 return err;
1da177e4 10574}
6aa20a22 10575
1da177e4
LT
10576static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10577{
10578 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10579
63c3a66f 10580 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
8d018621 10581
e18ce346 10582 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
8d018621
MC
10583 epause->rx_pause = 1;
10584 else
10585 epause->rx_pause = 0;
10586
e18ce346 10587 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
8d018621
MC
10588 epause->tx_pause = 1;
10589 else
10590 epause->tx_pause = 0;
1da177e4 10591}
6aa20a22 10592
1da177e4
LT
10593static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10594{
10595 struct tg3 *tp = netdev_priv(dev);
b02fd9e3 10596 int err = 0;
6aa20a22 10597
63c3a66f 10598 if (tg3_flag(tp, USE_PHYLIB)) {
2712168f
MC
10599 u32 newadv;
10600 struct phy_device *phydev;
1da177e4 10601
2712168f 10602 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
f47c11ee 10603
2712168f
MC
10604 if (!(phydev->supported & SUPPORTED_Pause) ||
10605 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
2259dca3 10606 (epause->rx_pause != epause->tx_pause)))
2712168f 10607 return -EINVAL;
1da177e4 10608
2712168f
MC
10609 tp->link_config.flowctrl = 0;
10610 if (epause->rx_pause) {
10611 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10612
10613 if (epause->tx_pause) {
10614 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10615 newadv = ADVERTISED_Pause;
b02fd9e3 10616 } else
2712168f
MC
10617 newadv = ADVERTISED_Pause |
10618 ADVERTISED_Asym_Pause;
10619 } else if (epause->tx_pause) {
10620 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10621 newadv = ADVERTISED_Asym_Pause;
10622 } else
10623 newadv = 0;
10624
10625 if (epause->autoneg)
63c3a66f 10626 tg3_flag_set(tp, PAUSE_AUTONEG);
2712168f 10627 else
63c3a66f 10628 tg3_flag_clear(tp, PAUSE_AUTONEG);
2712168f 10629
f07e9af3 10630 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2712168f
MC
10631 u32 oldadv = phydev->advertising &
10632 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10633 if (oldadv != newadv) {
10634 phydev->advertising &=
10635 ~(ADVERTISED_Pause |
10636 ADVERTISED_Asym_Pause);
10637 phydev->advertising |= newadv;
10638 if (phydev->autoneg) {
10639 /*
10640 * Always renegotiate the link to
10641 * inform our link partner of our
10642 * flow control settings, even if the
10643 * flow control is forced. Let
10644 * tg3_adjust_link() do the final
10645 * flow control setup.
10646 */
10647 return phy_start_aneg(phydev);
b02fd9e3 10648 }
b02fd9e3 10649 }
b02fd9e3 10650
2712168f 10651 if (!epause->autoneg)
b02fd9e3 10652 tg3_setup_flow_control(tp, 0, 0);
2712168f
MC
10653 } else {
10654 tp->link_config.orig_advertising &=
10655 ~(ADVERTISED_Pause |
10656 ADVERTISED_Asym_Pause);
10657 tp->link_config.orig_advertising |= newadv;
b02fd9e3
MC
10658 }
10659 } else {
10660 int irq_sync = 0;
10661
10662 if (netif_running(dev)) {
10663 tg3_netif_stop(tp);
10664 irq_sync = 1;
10665 }
10666
10667 tg3_full_lock(tp, irq_sync);
10668
10669 if (epause->autoneg)
63c3a66f 10670 tg3_flag_set(tp, PAUSE_AUTONEG);
b02fd9e3 10671 else
63c3a66f 10672 tg3_flag_clear(tp, PAUSE_AUTONEG);
b02fd9e3 10673 if (epause->rx_pause)
e18ce346 10674 tp->link_config.flowctrl |= FLOW_CTRL_RX;
b02fd9e3 10675 else
e18ce346 10676 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
b02fd9e3 10677 if (epause->tx_pause)
e18ce346 10678 tp->link_config.flowctrl |= FLOW_CTRL_TX;
b02fd9e3 10679 else
e18ce346 10680 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
b02fd9e3
MC
10681
10682 if (netif_running(dev)) {
10683 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10684 err = tg3_restart_hw(tp, 1);
10685 if (!err)
10686 tg3_netif_start(tp);
10687 }
10688
10689 tg3_full_unlock(tp);
10690 }
6aa20a22 10691
b9ec6c1b 10692 return err;
1da177e4 10693}
6aa20a22 10694
de6f31eb 10695static int tg3_get_sset_count(struct net_device *dev, int sset)
1da177e4 10696{
b9f2c044
JG
10697 switch (sset) {
10698 case ETH_SS_TEST:
10699 return TG3_NUM_TEST;
10700 case ETH_SS_STATS:
10701 return TG3_NUM_STATS;
10702 default:
10703 return -EOPNOTSUPP;
10704 }
4cafd3f5
MC
10705}
10706
de6f31eb 10707static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1da177e4
LT
10708{
10709 switch (stringset) {
10710 case ETH_SS_STATS:
10711 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10712 break;
4cafd3f5
MC
10713 case ETH_SS_TEST:
10714 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10715 break;
1da177e4
LT
10716 default:
10717 WARN_ON(1); /* we need a WARN() */
10718 break;
10719 }
10720}
10721
81b8709c 10722static int tg3_set_phys_id(struct net_device *dev,
10723 enum ethtool_phys_id_state state)
4009a93d
MC
10724{
10725 struct tg3 *tp = netdev_priv(dev);
4009a93d
MC
10726
10727 if (!netif_running(tp->dev))
10728 return -EAGAIN;
10729
81b8709c 10730 switch (state) {
10731 case ETHTOOL_ID_ACTIVE:
fce55922 10732 return 1; /* cycle on/off once per second */
4009a93d 10733
81b8709c 10734 case ETHTOOL_ID_ON:
10735 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10736 LED_CTRL_1000MBPS_ON |
10737 LED_CTRL_100MBPS_ON |
10738 LED_CTRL_10MBPS_ON |
10739 LED_CTRL_TRAFFIC_OVERRIDE |
10740 LED_CTRL_TRAFFIC_BLINK |
10741 LED_CTRL_TRAFFIC_LED);
10742 break;
6aa20a22 10743
81b8709c 10744 case ETHTOOL_ID_OFF:
10745 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10746 LED_CTRL_TRAFFIC_OVERRIDE);
10747 break;
4009a93d 10748
81b8709c 10749 case ETHTOOL_ID_INACTIVE:
10750 tw32(MAC_LED_CTRL, tp->led_ctrl);
10751 break;
4009a93d 10752 }
81b8709c 10753
4009a93d
MC
10754 return 0;
10755}
10756
de6f31eb 10757static void tg3_get_ethtool_stats(struct net_device *dev,
1da177e4
LT
10758 struct ethtool_stats *estats, u64 *tmp_stats)
10759{
10760 struct tg3 *tp = netdev_priv(dev);
10761 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10762}
10763
535a490e 10764static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
c3e94500
MC
10765{
10766 int i;
10767 __be32 *buf;
10768 u32 offset = 0, len = 0;
10769 u32 magic, val;
10770
63c3a66f 10771 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
c3e94500
MC
10772 return NULL;
10773
10774 if (magic == TG3_EEPROM_MAGIC) {
10775 for (offset = TG3_NVM_DIR_START;
10776 offset < TG3_NVM_DIR_END;
10777 offset += TG3_NVM_DIRENT_SIZE) {
10778 if (tg3_nvram_read(tp, offset, &val))
10779 return NULL;
10780
10781 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10782 TG3_NVM_DIRTYPE_EXTVPD)
10783 break;
10784 }
10785
10786 if (offset != TG3_NVM_DIR_END) {
10787 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10788 if (tg3_nvram_read(tp, offset + 4, &offset))
10789 return NULL;
10790
10791 offset = tg3_nvram_logical_addr(tp, offset);
10792 }
10793 }
10794
10795 if (!offset || !len) {
10796 offset = TG3_NVM_VPD_OFF;
10797 len = TG3_NVM_VPD_LEN;
10798 }
10799
10800 buf = kmalloc(len, GFP_KERNEL);
10801 if (buf == NULL)
10802 return NULL;
10803
10804 if (magic == TG3_EEPROM_MAGIC) {
10805 for (i = 0; i < len; i += 4) {
10806 /* The data is in little-endian format in NVRAM.
10807 * Use the big-endian read routines to preserve
10808 * the byte order as it exists in NVRAM.
10809 */
10810 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10811 goto error;
10812 }
10813 } else {
10814 u8 *ptr;
10815 ssize_t cnt;
10816 unsigned int pos = 0;
10817
10818 ptr = (u8 *)&buf[0];
10819 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10820 cnt = pci_read_vpd(tp->pdev, pos,
10821 len - pos, ptr);
10822 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10823 cnt = 0;
10824 else if (cnt < 0)
10825 goto error;
10826 }
10827 if (pos != len)
10828 goto error;
10829 }
10830
535a490e
MC
10831 *vpdlen = len;
10832
c3e94500
MC
10833 return buf;
10834
10835error:
10836 kfree(buf);
10837 return NULL;
10838}
10839
566f86ad 10840#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
10841#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10842#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10843#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
727a6d9f
MC
10844#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10845#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
bda18faf 10846#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
b16250e3
MC
10847#define NVRAM_SELFBOOT_HW_SIZE 0x20
10848#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
10849
10850static int tg3_test_nvram(struct tg3 *tp)
10851{
535a490e 10852 u32 csum, magic, len;
a9dc529d 10853 __be32 *buf;
ab0049b4 10854 int i, j, k, err = 0, size;
566f86ad 10855
63c3a66f 10856 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
10857 return 0;
10858
e4f34110 10859 if (tg3_nvram_read(tp, 0, &magic) != 0)
1b27777a
MC
10860 return -EIO;
10861
1b27777a
MC
10862 if (magic == TG3_EEPROM_MAGIC)
10863 size = NVRAM_TEST_SIZE;
b16250e3 10864 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
10865 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10866 TG3_EEPROM_SB_FORMAT_1) {
10867 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10868 case TG3_EEPROM_SB_REVISION_0:
10869 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10870 break;
10871 case TG3_EEPROM_SB_REVISION_2:
10872 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10873 break;
10874 case TG3_EEPROM_SB_REVISION_3:
10875 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10876 break;
727a6d9f
MC
10877 case TG3_EEPROM_SB_REVISION_4:
10878 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10879 break;
10880 case TG3_EEPROM_SB_REVISION_5:
10881 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10882 break;
10883 case TG3_EEPROM_SB_REVISION_6:
10884 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10885 break;
a5767dec 10886 default:
727a6d9f 10887 return -EIO;
a5767dec
MC
10888 }
10889 } else
1b27777a 10890 return 0;
b16250e3
MC
10891 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10892 size = NVRAM_SELFBOOT_HW_SIZE;
10893 else
1b27777a
MC
10894 return -EIO;
10895
10896 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
10897 if (buf == NULL)
10898 return -ENOMEM;
10899
1b27777a
MC
10900 err = -EIO;
10901 for (i = 0, j = 0; i < size; i += 4, j++) {
a9dc529d
MC
10902 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10903 if (err)
566f86ad 10904 break;
566f86ad 10905 }
1b27777a 10906 if (i < size)
566f86ad
MC
10907 goto out;
10908
1b27777a 10909 /* Selfboot format */
a9dc529d 10910 magic = be32_to_cpu(buf[0]);
b9fc7dc5 10911 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
b16250e3 10912 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
10913 u8 *buf8 = (u8 *) buf, csum8 = 0;
10914
b9fc7dc5 10915 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
a5767dec
MC
10916 TG3_EEPROM_SB_REVISION_2) {
10917 /* For rev 2, the csum doesn't include the MBA. */
10918 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10919 csum8 += buf8[i];
10920 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10921 csum8 += buf8[i];
10922 } else {
10923 for (i = 0; i < size; i++)
10924 csum8 += buf8[i];
10925 }
1b27777a 10926
ad96b485
AB
10927 if (csum8 == 0) {
10928 err = 0;
10929 goto out;
10930 }
10931
10932 err = -EIO;
10933 goto out;
1b27777a 10934 }
566f86ad 10935
b9fc7dc5 10936 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
b16250e3
MC
10937 TG3_EEPROM_MAGIC_HW) {
10938 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
a9dc529d 10939 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
b16250e3 10940 u8 *buf8 = (u8 *) buf;
b16250e3
MC
10941
10942 /* Separate the parity bits and the data bytes. */
10943 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10944 if ((i == 0) || (i == 8)) {
10945 int l;
10946 u8 msk;
10947
10948 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10949 parity[k++] = buf8[i] & msk;
10950 i++;
859a5887 10951 } else if (i == 16) {
b16250e3
MC
10952 int l;
10953 u8 msk;
10954
10955 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10956 parity[k++] = buf8[i] & msk;
10957 i++;
10958
10959 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10960 parity[k++] = buf8[i] & msk;
10961 i++;
10962 }
10963 data[j++] = buf8[i];
10964 }
10965
10966 err = -EIO;
10967 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10968 u8 hw8 = hweight8(data[i]);
10969
10970 if ((hw8 & 0x1) && parity[i])
10971 goto out;
10972 else if (!(hw8 & 0x1) && !parity[i])
10973 goto out;
10974 }
10975 err = 0;
10976 goto out;
10977 }
10978
01c3a392
MC
10979 err = -EIO;
10980
566f86ad
MC
10981 /* Bootstrap checksum at offset 0x10 */
10982 csum = calc_crc((unsigned char *) buf, 0x10);
01c3a392 10983 if (csum != le32_to_cpu(buf[0x10/4]))
566f86ad
MC
10984 goto out;
10985
10986 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10987 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
01c3a392 10988 if (csum != le32_to_cpu(buf[0xfc/4]))
a9dc529d 10989 goto out;
566f86ad 10990
c3e94500
MC
10991 kfree(buf);
10992
535a490e 10993 buf = tg3_vpd_readblock(tp, &len);
c3e94500
MC
10994 if (!buf)
10995 return -ENOMEM;
d4894f3e 10996
535a490e 10997 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
d4894f3e
MC
10998 if (i > 0) {
10999 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11000 if (j < 0)
11001 goto out;
11002
535a490e 11003 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
d4894f3e
MC
11004 goto out;
11005
11006 i += PCI_VPD_LRDT_TAG_SIZE;
11007 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11008 PCI_VPD_RO_KEYWORD_CHKSUM);
11009 if (j > 0) {
11010 u8 csum8 = 0;
11011
11012 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11013
11014 for (i = 0; i <= j; i++)
11015 csum8 += ((u8 *)buf)[i];
11016
11017 if (csum8)
11018 goto out;
11019 }
11020 }
11021
566f86ad
MC
11022 err = 0;
11023
11024out:
11025 kfree(buf);
11026 return err;
11027}
11028
ca43007a
MC
11029#define TG3_SERDES_TIMEOUT_SEC 2
11030#define TG3_COPPER_TIMEOUT_SEC 6
11031
11032static int tg3_test_link(struct tg3 *tp)
11033{
11034 int i, max;
11035
11036 if (!netif_running(tp->dev))
11037 return -ENODEV;
11038
f07e9af3 11039 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
ca43007a
MC
11040 max = TG3_SERDES_TIMEOUT_SEC;
11041 else
11042 max = TG3_COPPER_TIMEOUT_SEC;
11043
11044 for (i = 0; i < max; i++) {
11045 if (netif_carrier_ok(tp->dev))
11046 return 0;
11047
11048 if (msleep_interruptible(1000))
11049 break;
11050 }
11051
11052 return -EIO;
11053}
11054
a71116d1 11055/* Only test the commonly used registers */
30ca3e37 11056static int tg3_test_registers(struct tg3 *tp)
a71116d1 11057{
b16250e3 11058 int i, is_5705, is_5750;
a71116d1
MC
11059 u32 offset, read_mask, write_mask, val, save_val, read_val;
11060 static struct {
11061 u16 offset;
11062 u16 flags;
11063#define TG3_FL_5705 0x1
11064#define TG3_FL_NOT_5705 0x2
11065#define TG3_FL_NOT_5788 0x4
b16250e3 11066#define TG3_FL_NOT_5750 0x8
a71116d1
MC
11067 u32 read_mask;
11068 u32 write_mask;
11069 } reg_tbl[] = {
11070 /* MAC Control Registers */
11071 { MAC_MODE, TG3_FL_NOT_5705,
11072 0x00000000, 0x00ef6f8c },
11073 { MAC_MODE, TG3_FL_5705,
11074 0x00000000, 0x01ef6b8c },
11075 { MAC_STATUS, TG3_FL_NOT_5705,
11076 0x03800107, 0x00000000 },
11077 { MAC_STATUS, TG3_FL_5705,
11078 0x03800100, 0x00000000 },
11079 { MAC_ADDR_0_HIGH, 0x0000,
11080 0x00000000, 0x0000ffff },
11081 { MAC_ADDR_0_LOW, 0x0000,
c6cdf436 11082 0x00000000, 0xffffffff },
a71116d1
MC
11083 { MAC_RX_MTU_SIZE, 0x0000,
11084 0x00000000, 0x0000ffff },
11085 { MAC_TX_MODE, 0x0000,
11086 0x00000000, 0x00000070 },
11087 { MAC_TX_LENGTHS, 0x0000,
11088 0x00000000, 0x00003fff },
11089 { MAC_RX_MODE, TG3_FL_NOT_5705,
11090 0x00000000, 0x000007fc },
11091 { MAC_RX_MODE, TG3_FL_5705,
11092 0x00000000, 0x000007dc },
11093 { MAC_HASH_REG_0, 0x0000,
11094 0x00000000, 0xffffffff },
11095 { MAC_HASH_REG_1, 0x0000,
11096 0x00000000, 0xffffffff },
11097 { MAC_HASH_REG_2, 0x0000,
11098 0x00000000, 0xffffffff },
11099 { MAC_HASH_REG_3, 0x0000,
11100 0x00000000, 0xffffffff },
11101
11102 /* Receive Data and Receive BD Initiator Control Registers. */
11103 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11104 0x00000000, 0xffffffff },
11105 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11106 0x00000000, 0xffffffff },
11107 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11108 0x00000000, 0x00000003 },
11109 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11110 0x00000000, 0xffffffff },
11111 { RCVDBDI_STD_BD+0, 0x0000,
11112 0x00000000, 0xffffffff },
11113 { RCVDBDI_STD_BD+4, 0x0000,
11114 0x00000000, 0xffffffff },
11115 { RCVDBDI_STD_BD+8, 0x0000,
11116 0x00000000, 0xffff0002 },
11117 { RCVDBDI_STD_BD+0xc, 0x0000,
11118 0x00000000, 0xffffffff },
6aa20a22 11119
a71116d1
MC
11120 /* Receive BD Initiator Control Registers. */
11121 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11122 0x00000000, 0xffffffff },
11123 { RCVBDI_STD_THRESH, TG3_FL_5705,
11124 0x00000000, 0x000003ff },
11125 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11126 0x00000000, 0xffffffff },
6aa20a22 11127
a71116d1
MC
11128 /* Host Coalescing Control Registers. */
11129 { HOSTCC_MODE, TG3_FL_NOT_5705,
11130 0x00000000, 0x00000004 },
11131 { HOSTCC_MODE, TG3_FL_5705,
11132 0x00000000, 0x000000f6 },
11133 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11134 0x00000000, 0xffffffff },
11135 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11136 0x00000000, 0x000003ff },
11137 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11138 0x00000000, 0xffffffff },
11139 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11140 0x00000000, 0x000003ff },
11141 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11142 0x00000000, 0xffffffff },
11143 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11144 0x00000000, 0x000000ff },
11145 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11146 0x00000000, 0xffffffff },
11147 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11148 0x00000000, 0x000000ff },
11149 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11150 0x00000000, 0xffffffff },
11151 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11152 0x00000000, 0xffffffff },
11153 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11154 0x00000000, 0xffffffff },
11155 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11156 0x00000000, 0x000000ff },
11157 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11158 0x00000000, 0xffffffff },
11159 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11160 0x00000000, 0x000000ff },
11161 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11162 0x00000000, 0xffffffff },
11163 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11164 0x00000000, 0xffffffff },
11165 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11166 0x00000000, 0xffffffff },
11167 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11168 0x00000000, 0xffffffff },
11169 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11170 0x00000000, 0xffffffff },
11171 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11172 0xffffffff, 0x00000000 },
11173 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11174 0xffffffff, 0x00000000 },
11175
11176 /* Buffer Manager Control Registers. */
b16250e3 11177 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 11178 0x00000000, 0x007fff80 },
b16250e3 11179 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
11180 0x00000000, 0x007fffff },
11181 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11182 0x00000000, 0x0000003f },
11183 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11184 0x00000000, 0x000001ff },
11185 { BUFMGR_MB_HIGH_WATER, 0x0000,
11186 0x00000000, 0x000001ff },
11187 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11188 0xffffffff, 0x00000000 },
11189 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11190 0xffffffff, 0x00000000 },
6aa20a22 11191
a71116d1
MC
11192 /* Mailbox Registers */
11193 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11194 0x00000000, 0x000001ff },
11195 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11196 0x00000000, 0x000001ff },
11197 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11198 0x00000000, 0x000007ff },
11199 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11200 0x00000000, 0x000001ff },
11201
11202 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11203 };
11204
b16250e3 11205 is_5705 = is_5750 = 0;
63c3a66f 11206 if (tg3_flag(tp, 5705_PLUS)) {
a71116d1 11207 is_5705 = 1;
63c3a66f 11208 if (tg3_flag(tp, 5750_PLUS))
b16250e3
MC
11209 is_5750 = 1;
11210 }
a71116d1
MC
11211
11212 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11213 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11214 continue;
11215
11216 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11217 continue;
11218
63c3a66f 11219 if (tg3_flag(tp, IS_5788) &&
a71116d1
MC
11220 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11221 continue;
11222
b16250e3
MC
11223 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11224 continue;
11225
a71116d1
MC
11226 offset = (u32) reg_tbl[i].offset;
11227 read_mask = reg_tbl[i].read_mask;
11228 write_mask = reg_tbl[i].write_mask;
11229
11230 /* Save the original register content */
11231 save_val = tr32(offset);
11232
11233 /* Determine the read-only value. */
11234 read_val = save_val & read_mask;
11235
11236 /* Write zero to the register, then make sure the read-only bits
11237 * are not changed and the read/write bits are all zeros.
11238 */
11239 tw32(offset, 0);
11240
11241 val = tr32(offset);
11242
11243 /* Test the read-only and read/write bits. */
11244 if (((val & read_mask) != read_val) || (val & write_mask))
11245 goto out;
11246
11247 /* Write ones to all the bits defined by RdMask and WrMask, then
11248 * make sure the read-only bits are not changed and the
11249 * read/write bits are all ones.
11250 */
11251 tw32(offset, read_mask | write_mask);
11252
11253 val = tr32(offset);
11254
11255 /* Test the read-only bits. */
11256 if ((val & read_mask) != read_val)
11257 goto out;
11258
11259 /* Test the read/write bits. */
11260 if ((val & write_mask) != write_mask)
11261 goto out;
11262
11263 tw32(offset, save_val);
11264 }
11265
11266 return 0;
11267
11268out:
9f88f29f 11269 if (netif_msg_hw(tp))
2445e461
MC
11270 netdev_err(tp->dev,
11271 "Register test failed at offset %x\n", offset);
a71116d1
MC
11272 tw32(offset, save_val);
11273 return -EIO;
11274}
11275
7942e1db
MC
11276static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11277{
f71e1309 11278 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
11279 int i;
11280 u32 j;
11281
e9edda69 11282 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
11283 for (j = 0; j < len; j += 4) {
11284 u32 val;
11285
11286 tg3_write_mem(tp, offset + j, test_pattern[i]);
11287 tg3_read_mem(tp, offset + j, &val);
11288 if (val != test_pattern[i])
11289 return -EIO;
11290 }
11291 }
11292 return 0;
11293}
11294
11295static int tg3_test_memory(struct tg3 *tp)
11296{
11297 static struct mem_entry {
11298 u32 offset;
11299 u32 len;
11300 } mem_tbl_570x[] = {
38690194 11301 { 0x00000000, 0x00b50},
7942e1db
MC
11302 { 0x00002000, 0x1c000},
11303 { 0xffffffff, 0x00000}
11304 }, mem_tbl_5705[] = {
11305 { 0x00000100, 0x0000c},
11306 { 0x00000200, 0x00008},
7942e1db
MC
11307 { 0x00004000, 0x00800},
11308 { 0x00006000, 0x01000},
11309 { 0x00008000, 0x02000},
11310 { 0x00010000, 0x0e000},
11311 { 0xffffffff, 0x00000}
79f4d13a
MC
11312 }, mem_tbl_5755[] = {
11313 { 0x00000200, 0x00008},
11314 { 0x00004000, 0x00800},
11315 { 0x00006000, 0x00800},
11316 { 0x00008000, 0x02000},
11317 { 0x00010000, 0x0c000},
11318 { 0xffffffff, 0x00000}
b16250e3
MC
11319 }, mem_tbl_5906[] = {
11320 { 0x00000200, 0x00008},
11321 { 0x00004000, 0x00400},
11322 { 0x00006000, 0x00400},
11323 { 0x00008000, 0x01000},
11324 { 0x00010000, 0x01000},
11325 { 0xffffffff, 0x00000}
8b5a6c42
MC
11326 }, mem_tbl_5717[] = {
11327 { 0x00000200, 0x00008},
11328 { 0x00010000, 0x0a000},
11329 { 0x00020000, 0x13c00},
11330 { 0xffffffff, 0x00000}
11331 }, mem_tbl_57765[] = {
11332 { 0x00000200, 0x00008},
11333 { 0x00004000, 0x00800},
11334 { 0x00006000, 0x09800},
11335 { 0x00010000, 0x0a000},
11336 { 0xffffffff, 0x00000}
7942e1db
MC
11337 };
11338 struct mem_entry *mem_tbl;
11339 int err = 0;
11340 int i;
11341
63c3a66f 11342 if (tg3_flag(tp, 5717_PLUS))
8b5a6c42
MC
11343 mem_tbl = mem_tbl_5717;
11344 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11345 mem_tbl = mem_tbl_57765;
63c3a66f 11346 else if (tg3_flag(tp, 5755_PLUS))
321d32a0
MC
11347 mem_tbl = mem_tbl_5755;
11348 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11349 mem_tbl = mem_tbl_5906;
63c3a66f 11350 else if (tg3_flag(tp, 5705_PLUS))
321d32a0
MC
11351 mem_tbl = mem_tbl_5705;
11352 else
7942e1db
MC
11353 mem_tbl = mem_tbl_570x;
11354
11355 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
be98da6a
MC
11356 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11357 if (err)
7942e1db
MC
11358 break;
11359 }
6aa20a22 11360
7942e1db
MC
11361 return err;
11362}
11363
bb158d69
MC
11364#define TG3_TSO_MSS 500
11365
11366#define TG3_TSO_IP_HDR_LEN 20
11367#define TG3_TSO_TCP_HDR_LEN 20
11368#define TG3_TSO_TCP_OPT_LEN 12
11369
11370static const u8 tg3_tso_header[] = {
113710x08, 0x00,
113720x45, 0x00, 0x00, 0x00,
113730x00, 0x00, 0x40, 0x00,
113740x40, 0x06, 0x00, 0x00,
113750x0a, 0x00, 0x00, 0x01,
113760x0a, 0x00, 0x00, 0x02,
113770x0d, 0x00, 0xe0, 0x00,
113780x00, 0x00, 0x01, 0x00,
113790x00, 0x00, 0x02, 0x00,
113800x80, 0x10, 0x10, 0x00,
113810x14, 0x09, 0x00, 0x00,
113820x01, 0x01, 0x08, 0x0a,
113830x11, 0x11, 0x11, 0x11,
113840x11, 0x11, 0x11, 0x11,
11385};
9f40dead 11386
28a45957 11387static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
c76949a6 11388{
5e5a7f37 11389 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
bb158d69 11390 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
84b67b27 11391 u32 budget;
c76949a6
MC
11392 struct sk_buff *skb, *rx_skb;
11393 u8 *tx_data;
11394 dma_addr_t map;
11395 int num_pkts, tx_len, rx_len, i, err;
11396 struct tg3_rx_buffer_desc *desc;
898a56f8 11397 struct tg3_napi *tnapi, *rnapi;
8fea32b9 11398 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
c76949a6 11399
c8873405
MC
11400 tnapi = &tp->napi[0];
11401 rnapi = &tp->napi[0];
0c1d0e2b 11402 if (tp->irq_cnt > 1) {
63c3a66f 11403 if (tg3_flag(tp, ENABLE_RSS))
1da85aa3 11404 rnapi = &tp->napi[1];
63c3a66f 11405 if (tg3_flag(tp, ENABLE_TSS))
c8873405 11406 tnapi = &tp->napi[1];
0c1d0e2b 11407 }
fd2ce37f 11408 coal_now = tnapi->coal_now | rnapi->coal_now;
898a56f8 11409
c76949a6
MC
11410 err = -EIO;
11411
4852a861 11412 tx_len = pktsz;
a20e9c62 11413 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
11414 if (!skb)
11415 return -ENOMEM;
11416
c76949a6
MC
11417 tx_data = skb_put(skb, tx_len);
11418 memcpy(tx_data, tp->dev->dev_addr, 6);
11419 memset(tx_data + 6, 0x0, 8);
11420
4852a861 11421 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
c76949a6 11422
28a45957 11423 if (tso_loopback) {
bb158d69
MC
11424 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11425
11426 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11427 TG3_TSO_TCP_OPT_LEN;
11428
11429 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11430 sizeof(tg3_tso_header));
11431 mss = TG3_TSO_MSS;
11432
11433 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11434 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11435
11436 /* Set the total length field in the IP header */
11437 iph->tot_len = htons((u16)(mss + hdr_len));
11438
11439 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11440 TXD_FLAG_CPU_POST_DMA);
11441
63c3a66f
JP
11442 if (tg3_flag(tp, HW_TSO_1) ||
11443 tg3_flag(tp, HW_TSO_2) ||
11444 tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
11445 struct tcphdr *th;
11446 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11447 th = (struct tcphdr *)&tx_data[val];
11448 th->check = 0;
11449 } else
11450 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11451
63c3a66f 11452 if (tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
11453 mss |= (hdr_len & 0xc) << 12;
11454 if (hdr_len & 0x10)
11455 base_flags |= 0x00000010;
11456 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 11457 } else if (tg3_flag(tp, HW_TSO_2))
bb158d69 11458 mss |= hdr_len << 9;
63c3a66f 11459 else if (tg3_flag(tp, HW_TSO_1) ||
bb158d69
MC
11460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11461 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11462 } else {
11463 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11464 }
11465
11466 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11467 } else {
11468 num_pkts = 1;
11469 data_off = ETH_HLEN;
11470 }
11471
11472 for (i = data_off; i < tx_len; i++)
c76949a6
MC
11473 tx_data[i] = (u8) (i & 0xff);
11474
f4188d8a
AD
11475 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11476 if (pci_dma_mapping_error(tp->pdev, map)) {
a21771dd
MC
11477 dev_kfree_skb(skb);
11478 return -EIO;
11479 }
c76949a6 11480
0d681b27
MC
11481 val = tnapi->tx_prod;
11482 tnapi->tx_buffers[val].skb = skb;
11483 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11484
c76949a6 11485 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 11486 rnapi->coal_now);
c76949a6
MC
11487
11488 udelay(10);
11489
898a56f8 11490 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
c76949a6 11491
84b67b27
MC
11492 budget = tg3_tx_avail(tnapi);
11493 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
d1a3b737
MC
11494 base_flags | TXD_FLAG_END, mss, 0)) {
11495 tnapi->tx_buffers[val].skb = NULL;
11496 dev_kfree_skb(skb);
11497 return -EIO;
11498 }
c76949a6 11499
f3f3f27e 11500 tnapi->tx_prod++;
c76949a6 11501
f3f3f27e
MC
11502 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11503 tr32_mailbox(tnapi->prodmbox);
c76949a6
MC
11504
11505 udelay(10);
11506
303fc921
MC
11507 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11508 for (i = 0; i < 35; i++) {
c76949a6 11509 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 11510 coal_now);
c76949a6
MC
11511
11512 udelay(10);
11513
898a56f8
MC
11514 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11515 rx_idx = rnapi->hw_status->idx[0].rx_producer;
f3f3f27e 11516 if ((tx_idx == tnapi->tx_prod) &&
c76949a6
MC
11517 (rx_idx == (rx_start_idx + num_pkts)))
11518 break;
11519 }
11520
0d681b27 11521 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
c76949a6
MC
11522 dev_kfree_skb(skb);
11523
f3f3f27e 11524 if (tx_idx != tnapi->tx_prod)
c76949a6
MC
11525 goto out;
11526
11527 if (rx_idx != rx_start_idx + num_pkts)
11528 goto out;
11529
bb158d69
MC
11530 val = data_off;
11531 while (rx_idx != rx_start_idx) {
11532 desc = &rnapi->rx_rcb[rx_start_idx++];
11533 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11534 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
c76949a6 11535
bb158d69
MC
11536 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11537 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11538 goto out;
c76949a6 11539
bb158d69
MC
11540 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11541 - ETH_FCS_LEN;
c76949a6 11542
28a45957 11543 if (!tso_loopback) {
bb158d69
MC
11544 if (rx_len != tx_len)
11545 goto out;
4852a861 11546
bb158d69
MC
11547 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11548 if (opaque_key != RXD_OPAQUE_RING_STD)
11549 goto out;
11550 } else {
11551 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11552 goto out;
11553 }
11554 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11555 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
54e0a67f 11556 >> RXD_TCPCSUM_SHIFT != 0xffff) {
4852a861 11557 goto out;
bb158d69 11558 }
4852a861 11559
bb158d69
MC
11560 if (opaque_key == RXD_OPAQUE_RING_STD) {
11561 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11562 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11563 mapping);
11564 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11565 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11566 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11567 mapping);
11568 } else
11569 goto out;
c76949a6 11570
bb158d69
MC
11571 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11572 PCI_DMA_FROMDEVICE);
c76949a6 11573
bb158d69
MC
11574 for (i = data_off; i < rx_len; i++, val++) {
11575 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11576 goto out;
11577 }
c76949a6 11578 }
bb158d69 11579
c76949a6 11580 err = 0;
6aa20a22 11581
c76949a6
MC
11582 /* tg3_free_rings will unmap and free the rx_skb */
11583out:
11584 return err;
11585}
11586
00c266b7
MC
11587#define TG3_STD_LOOPBACK_FAILED 1
11588#define TG3_JMB_LOOPBACK_FAILED 2
bb158d69 11589#define TG3_TSO_LOOPBACK_FAILED 4
28a45957
MC
11590#define TG3_LOOPBACK_FAILED \
11591 (TG3_STD_LOOPBACK_FAILED | \
11592 TG3_JMB_LOOPBACK_FAILED | \
11593 TG3_TSO_LOOPBACK_FAILED)
00c266b7 11594
941ec90f 11595static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
9f40dead 11596{
28a45957 11597 int err = -EIO;
2215e24c 11598 u32 eee_cap;
9f40dead 11599
ab789046
MC
11600 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11601 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11602
28a45957
MC
11603 if (!netif_running(tp->dev)) {
11604 data[0] = TG3_LOOPBACK_FAILED;
11605 data[1] = TG3_LOOPBACK_FAILED;
941ec90f
MC
11606 if (do_extlpbk)
11607 data[2] = TG3_LOOPBACK_FAILED;
28a45957
MC
11608 goto done;
11609 }
11610
b9ec6c1b 11611 err = tg3_reset_hw(tp, 1);
ab789046 11612 if (err) {
28a45957
MC
11613 data[0] = TG3_LOOPBACK_FAILED;
11614 data[1] = TG3_LOOPBACK_FAILED;
941ec90f
MC
11615 if (do_extlpbk)
11616 data[2] = TG3_LOOPBACK_FAILED;
ab789046
MC
11617 goto done;
11618 }
9f40dead 11619
63c3a66f 11620 if (tg3_flag(tp, ENABLE_RSS)) {
4a85f098
MC
11621 int i;
11622
11623 /* Reroute all rx packets to the 1st queue */
11624 for (i = MAC_RSS_INDIR_TBL_0;
11625 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11626 tw32(i, 0x0);
11627 }
11628
6e01b20b
MC
11629 /* HW errata - mac loopback fails in some cases on 5780.
11630 * Normal traffic and PHY loopback are not affected by
11631 * errata. Also, the MAC loopback test is deprecated for
11632 * all newer ASIC revisions.
11633 */
11634 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11635 !tg3_flag(tp, CPMU_PRESENT)) {
11636 tg3_mac_loopback(tp, true);
9936bcf6 11637
28a45957
MC
11638 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11639 data[0] |= TG3_STD_LOOPBACK_FAILED;
6e01b20b
MC
11640
11641 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
28a45957
MC
11642 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11643 data[0] |= TG3_JMB_LOOPBACK_FAILED;
6e01b20b
MC
11644
11645 tg3_mac_loopback(tp, false);
11646 }
4852a861 11647
f07e9af3 11648 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
63c3a66f 11649 !tg3_flag(tp, USE_PHYLIB)) {
5e5a7f37
MC
11650 int i;
11651
941ec90f 11652 tg3_phy_lpbk_set(tp, 0, false);
5e5a7f37
MC
11653
11654 /* Wait for link */
11655 for (i = 0; i < 100; i++) {
11656 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11657 break;
11658 mdelay(1);
11659 }
11660
28a45957
MC
11661 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11662 data[1] |= TG3_STD_LOOPBACK_FAILED;
63c3a66f 11663 if (tg3_flag(tp, TSO_CAPABLE) &&
28a45957
MC
11664 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11665 data[1] |= TG3_TSO_LOOPBACK_FAILED;
63c3a66f 11666 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
28a45957
MC
11667 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11668 data[1] |= TG3_JMB_LOOPBACK_FAILED;
9f40dead 11669
941ec90f
MC
11670 if (do_extlpbk) {
11671 tg3_phy_lpbk_set(tp, 0, true);
11672
11673 /* All link indications report up, but the hardware
11674 * isn't really ready for about 20 msec. Double it
11675 * to be sure.
11676 */
11677 mdelay(40);
11678
11679 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11680 data[2] |= TG3_STD_LOOPBACK_FAILED;
11681 if (tg3_flag(tp, TSO_CAPABLE) &&
11682 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11683 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11684 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11685 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11686 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11687 }
11688
5e5a7f37
MC
11689 /* Re-enable gphy autopowerdown. */
11690 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11691 tg3_phy_toggle_apd(tp, true);
11692 }
6833c043 11693
941ec90f 11694 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
28a45957 11695
ab789046
MC
11696done:
11697 tp->phy_flags |= eee_cap;
11698
9f40dead
MC
11699 return err;
11700}
11701
4cafd3f5
MC
11702static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11703 u64 *data)
11704{
566f86ad 11705 struct tg3 *tp = netdev_priv(dev);
941ec90f 11706 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
566f86ad 11707
bed9829f
MC
11708 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11709 tg3_power_up(tp)) {
11710 etest->flags |= ETH_TEST_FL_FAILED;
11711 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11712 return;
11713 }
bc1c7567 11714
566f86ad
MC
11715 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11716
11717 if (tg3_test_nvram(tp) != 0) {
11718 etest->flags |= ETH_TEST_FL_FAILED;
11719 data[0] = 1;
11720 }
941ec90f 11721 if (!doextlpbk && tg3_test_link(tp)) {
ca43007a
MC
11722 etest->flags |= ETH_TEST_FL_FAILED;
11723 data[1] = 1;
11724 }
a71116d1 11725 if (etest->flags & ETH_TEST_FL_OFFLINE) {
b02fd9e3 11726 int err, err2 = 0, irq_sync = 0;
bbe832c0
MC
11727
11728 if (netif_running(dev)) {
b02fd9e3 11729 tg3_phy_stop(tp);
a71116d1 11730 tg3_netif_stop(tp);
bbe832c0
MC
11731 irq_sync = 1;
11732 }
a71116d1 11733
bbe832c0 11734 tg3_full_lock(tp, irq_sync);
a71116d1
MC
11735
11736 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 11737 err = tg3_nvram_lock(tp);
a71116d1 11738 tg3_halt_cpu(tp, RX_CPU_BASE);
63c3a66f 11739 if (!tg3_flag(tp, 5705_PLUS))
a71116d1 11740 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
11741 if (!err)
11742 tg3_nvram_unlock(tp);
a71116d1 11743
f07e9af3 11744 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
d9ab5ad1
MC
11745 tg3_phy_reset(tp);
11746
a71116d1
MC
11747 if (tg3_test_registers(tp) != 0) {
11748 etest->flags |= ETH_TEST_FL_FAILED;
11749 data[2] = 1;
11750 }
28a45957 11751
7942e1db
MC
11752 if (tg3_test_memory(tp) != 0) {
11753 etest->flags |= ETH_TEST_FL_FAILED;
11754 data[3] = 1;
11755 }
28a45957 11756
941ec90f
MC
11757 if (doextlpbk)
11758 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11759
11760 if (tg3_test_loopback(tp, &data[4], doextlpbk))
c76949a6 11761 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 11762
f47c11ee
DM
11763 tg3_full_unlock(tp);
11764
d4bc3927
MC
11765 if (tg3_test_interrupt(tp) != 0) {
11766 etest->flags |= ETH_TEST_FL_FAILED;
941ec90f 11767 data[7] = 1;
d4bc3927 11768 }
f47c11ee
DM
11769
11770 tg3_full_lock(tp, 0);
d4bc3927 11771
a71116d1
MC
11772 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11773 if (netif_running(dev)) {
63c3a66f 11774 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
11775 err2 = tg3_restart_hw(tp, 1);
11776 if (!err2)
b9ec6c1b 11777 tg3_netif_start(tp);
a71116d1 11778 }
f47c11ee
DM
11779
11780 tg3_full_unlock(tp);
b02fd9e3
MC
11781
11782 if (irq_sync && !err2)
11783 tg3_phy_start(tp);
a71116d1 11784 }
80096068 11785 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
c866b7ea 11786 tg3_power_down(tp);
bc1c7567 11787
4cafd3f5
MC
11788}
11789
1da177e4
LT
11790static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11791{
11792 struct mii_ioctl_data *data = if_mii(ifr);
11793 struct tg3 *tp = netdev_priv(dev);
11794 int err;
11795
63c3a66f 11796 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 11797 struct phy_device *phydev;
f07e9af3 11798 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 11799 return -EAGAIN;
3f0e3ad7 11800 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
28b04113 11801 return phy_mii_ioctl(phydev, ifr, cmd);
b02fd9e3
MC
11802 }
11803
33f401ae 11804 switch (cmd) {
1da177e4 11805 case SIOCGMIIPHY:
882e9793 11806 data->phy_id = tp->phy_addr;
1da177e4
LT
11807
11808 /* fallthru */
11809 case SIOCGMIIREG: {
11810 u32 mii_regval;
11811
f07e9af3 11812 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
11813 break; /* We have no PHY */
11814
34eea5ac 11815 if (!netif_running(dev))
bc1c7567
MC
11816 return -EAGAIN;
11817
f47c11ee 11818 spin_lock_bh(&tp->lock);
1da177e4 11819 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 11820 spin_unlock_bh(&tp->lock);
1da177e4
LT
11821
11822 data->val_out = mii_regval;
11823
11824 return err;
11825 }
11826
11827 case SIOCSMIIREG:
f07e9af3 11828 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
11829 break; /* We have no PHY */
11830
34eea5ac 11831 if (!netif_running(dev))
bc1c7567
MC
11832 return -EAGAIN;
11833
f47c11ee 11834 spin_lock_bh(&tp->lock);
1da177e4 11835 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 11836 spin_unlock_bh(&tp->lock);
1da177e4
LT
11837
11838 return err;
11839
11840 default:
11841 /* do nothing */
11842 break;
11843 }
11844 return -EOPNOTSUPP;
11845}
11846
15f9850d
DM
11847static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11848{
11849 struct tg3 *tp = netdev_priv(dev);
11850
11851 memcpy(ec, &tp->coal, sizeof(*ec));
11852 return 0;
11853}
11854
d244c892
MC
11855static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11856{
11857 struct tg3 *tp = netdev_priv(dev);
11858 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11859 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11860
63c3a66f 11861 if (!tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
11862 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11863 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11864 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11865 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11866 }
11867
11868 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11869 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11870 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11871 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11872 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11873 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11874 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11875 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11876 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11877 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11878 return -EINVAL;
11879
11880 /* No rx interrupts will be generated if both are zero */
11881 if ((ec->rx_coalesce_usecs == 0) &&
11882 (ec->rx_max_coalesced_frames == 0))
11883 return -EINVAL;
11884
11885 /* No tx interrupts will be generated if both are zero */
11886 if ((ec->tx_coalesce_usecs == 0) &&
11887 (ec->tx_max_coalesced_frames == 0))
11888 return -EINVAL;
11889
11890 /* Only copy relevant parameters, ignore all others. */
11891 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11892 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11893 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11894 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11895 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11896 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11897 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11898 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11899 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11900
11901 if (netif_running(dev)) {
11902 tg3_full_lock(tp, 0);
11903 __tg3_set_coalesce(tp, &tp->coal);
11904 tg3_full_unlock(tp);
11905 }
11906 return 0;
11907}
11908
7282d491 11909static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
11910 .get_settings = tg3_get_settings,
11911 .set_settings = tg3_set_settings,
11912 .get_drvinfo = tg3_get_drvinfo,
11913 .get_regs_len = tg3_get_regs_len,
11914 .get_regs = tg3_get_regs,
11915 .get_wol = tg3_get_wol,
11916 .set_wol = tg3_set_wol,
11917 .get_msglevel = tg3_get_msglevel,
11918 .set_msglevel = tg3_set_msglevel,
11919 .nway_reset = tg3_nway_reset,
11920 .get_link = ethtool_op_get_link,
11921 .get_eeprom_len = tg3_get_eeprom_len,
11922 .get_eeprom = tg3_get_eeprom,
11923 .set_eeprom = tg3_set_eeprom,
11924 .get_ringparam = tg3_get_ringparam,
11925 .set_ringparam = tg3_set_ringparam,
11926 .get_pauseparam = tg3_get_pauseparam,
11927 .set_pauseparam = tg3_set_pauseparam,
4cafd3f5 11928 .self_test = tg3_self_test,
1da177e4 11929 .get_strings = tg3_get_strings,
81b8709c 11930 .set_phys_id = tg3_set_phys_id,
1da177e4 11931 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 11932 .get_coalesce = tg3_get_coalesce,
d244c892 11933 .set_coalesce = tg3_set_coalesce,
b9f2c044 11934 .get_sset_count = tg3_get_sset_count,
1da177e4
LT
11935};
11936
11937static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11938{
1b27777a 11939 u32 cursize, val, magic;
1da177e4
LT
11940
11941 tp->nvram_size = EEPROM_CHIP_SIZE;
11942
e4f34110 11943 if (tg3_nvram_read(tp, 0, &magic) != 0)
1da177e4
LT
11944 return;
11945
b16250e3
MC
11946 if ((magic != TG3_EEPROM_MAGIC) &&
11947 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11948 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
11949 return;
11950
11951 /*
11952 * Size the chip by reading offsets at increasing powers of two.
11953 * When we encounter our validation signature, we know the addressing
11954 * has wrapped around, and thus have our chip size.
11955 */
1b27777a 11956 cursize = 0x10;
1da177e4
LT
11957
11958 while (cursize < tp->nvram_size) {
e4f34110 11959 if (tg3_nvram_read(tp, cursize, &val) != 0)
1da177e4
LT
11960 return;
11961
1820180b 11962 if (val == magic)
1da177e4
LT
11963 break;
11964
11965 cursize <<= 1;
11966 }
11967
11968 tp->nvram_size = cursize;
11969}
6aa20a22 11970
1da177e4
LT
11971static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11972{
11973 u32 val;
11974
63c3a66f 11975 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
1b27777a
MC
11976 return;
11977
11978 /* Selfboot format */
1820180b 11979 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
11980 tg3_get_eeprom_size(tp);
11981 return;
11982 }
11983
6d348f2c 11984 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
1da177e4 11985 if (val != 0) {
6d348f2c
MC
11986 /* This is confusing. We want to operate on the
11987 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11988 * call will read from NVRAM and byteswap the data
11989 * according to the byteswapping settings for all
11990 * other register accesses. This ensures the data we
11991 * want will always reside in the lower 16-bits.
11992 * However, the data in NVRAM is in LE format, which
11993 * means the data from the NVRAM read will always be
11994 * opposite the endianness of the CPU. The 16-bit
11995 * byteswap then brings the data to CPU endianness.
11996 */
11997 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
1da177e4
LT
11998 return;
11999 }
12000 }
fd1122a2 12001 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
1da177e4
LT
12002}
12003
12004static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12005{
12006 u32 nvcfg1;
12007
12008 nvcfg1 = tr32(NVRAM_CFG1);
12009 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
63c3a66f 12010 tg3_flag_set(tp, FLASH);
8590a603 12011 } else {
1da177e4
LT
12012 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12013 tw32(NVRAM_CFG1, nvcfg1);
12014 }
12015
6ff6f81d 12016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
63c3a66f 12017 tg3_flag(tp, 5780_CLASS)) {
1da177e4 12018 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8590a603
MC
12019 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12020 tp->nvram_jedecnum = JEDEC_ATMEL;
12021 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12022 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12023 break;
12024 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12025 tp->nvram_jedecnum = JEDEC_ATMEL;
12026 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12027 break;
12028 case FLASH_VENDOR_ATMEL_EEPROM:
12029 tp->nvram_jedecnum = JEDEC_ATMEL;
12030 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
63c3a66f 12031 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12032 break;
12033 case FLASH_VENDOR_ST:
12034 tp->nvram_jedecnum = JEDEC_ST;
12035 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
63c3a66f 12036 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12037 break;
12038 case FLASH_VENDOR_SAIFUN:
12039 tp->nvram_jedecnum = JEDEC_SAIFUN;
12040 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12041 break;
12042 case FLASH_VENDOR_SST_SMALL:
12043 case FLASH_VENDOR_SST_LARGE:
12044 tp->nvram_jedecnum = JEDEC_SST;
12045 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12046 break;
1da177e4 12047 }
8590a603 12048 } else {
1da177e4
LT
12049 tp->nvram_jedecnum = JEDEC_ATMEL;
12050 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12051 tg3_flag_set(tp, NVRAM_BUFFERED);
1da177e4
LT
12052 }
12053}
12054
a1b950d5
MC
12055static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12056{
12057 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12058 case FLASH_5752PAGE_SIZE_256:
12059 tp->nvram_pagesize = 256;
12060 break;
12061 case FLASH_5752PAGE_SIZE_512:
12062 tp->nvram_pagesize = 512;
12063 break;
12064 case FLASH_5752PAGE_SIZE_1K:
12065 tp->nvram_pagesize = 1024;
12066 break;
12067 case FLASH_5752PAGE_SIZE_2K:
12068 tp->nvram_pagesize = 2048;
12069 break;
12070 case FLASH_5752PAGE_SIZE_4K:
12071 tp->nvram_pagesize = 4096;
12072 break;
12073 case FLASH_5752PAGE_SIZE_264:
12074 tp->nvram_pagesize = 264;
12075 break;
12076 case FLASH_5752PAGE_SIZE_528:
12077 tp->nvram_pagesize = 528;
12078 break;
12079 }
12080}
12081
361b4ac2
MC
12082static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12083{
12084 u32 nvcfg1;
12085
12086 nvcfg1 = tr32(NVRAM_CFG1);
12087
e6af301b
MC
12088 /* NVRAM protection for TPM */
12089 if (nvcfg1 & (1 << 27))
63c3a66f 12090 tg3_flag_set(tp, PROTECTED_NVRAM);
e6af301b 12091
361b4ac2 12092 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
12093 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12094 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12095 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12096 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12097 break;
12098 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12099 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12100 tg3_flag_set(tp, NVRAM_BUFFERED);
12101 tg3_flag_set(tp, FLASH);
8590a603
MC
12102 break;
12103 case FLASH_5752VENDOR_ST_M45PE10:
12104 case FLASH_5752VENDOR_ST_M45PE20:
12105 case FLASH_5752VENDOR_ST_M45PE40:
12106 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12107 tg3_flag_set(tp, NVRAM_BUFFERED);
12108 tg3_flag_set(tp, FLASH);
8590a603 12109 break;
361b4ac2
MC
12110 }
12111
63c3a66f 12112 if (tg3_flag(tp, FLASH)) {
a1b950d5 12113 tg3_nvram_get_pagesize(tp, nvcfg1);
8590a603 12114 } else {
361b4ac2
MC
12115 /* For eeprom, set pagesize to maximum eeprom size */
12116 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12117
12118 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12119 tw32(NVRAM_CFG1, nvcfg1);
12120 }
12121}
12122
d3c7b886
MC
12123static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12124{
989a9d23 12125 u32 nvcfg1, protect = 0;
d3c7b886
MC
12126
12127 nvcfg1 = tr32(NVRAM_CFG1);
12128
12129 /* NVRAM protection for TPM */
989a9d23 12130 if (nvcfg1 & (1 << 27)) {
63c3a66f 12131 tg3_flag_set(tp, PROTECTED_NVRAM);
989a9d23
MC
12132 protect = 1;
12133 }
d3c7b886 12134
989a9d23
MC
12135 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12136 switch (nvcfg1) {
8590a603
MC
12137 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12138 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12139 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12140 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12141 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12142 tg3_flag_set(tp, NVRAM_BUFFERED);
12143 tg3_flag_set(tp, FLASH);
8590a603
MC
12144 tp->nvram_pagesize = 264;
12145 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12146 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12147 tp->nvram_size = (protect ? 0x3e200 :
12148 TG3_NVRAM_SIZE_512KB);
12149 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12150 tp->nvram_size = (protect ? 0x1f200 :
12151 TG3_NVRAM_SIZE_256KB);
12152 else
12153 tp->nvram_size = (protect ? 0x1f200 :
12154 TG3_NVRAM_SIZE_128KB);
12155 break;
12156 case FLASH_5752VENDOR_ST_M45PE10:
12157 case FLASH_5752VENDOR_ST_M45PE20:
12158 case FLASH_5752VENDOR_ST_M45PE40:
12159 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12160 tg3_flag_set(tp, NVRAM_BUFFERED);
12161 tg3_flag_set(tp, FLASH);
8590a603
MC
12162 tp->nvram_pagesize = 256;
12163 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12164 tp->nvram_size = (protect ?
12165 TG3_NVRAM_SIZE_64KB :
12166 TG3_NVRAM_SIZE_128KB);
12167 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12168 tp->nvram_size = (protect ?
12169 TG3_NVRAM_SIZE_64KB :
12170 TG3_NVRAM_SIZE_256KB);
12171 else
12172 tp->nvram_size = (protect ?
12173 TG3_NVRAM_SIZE_128KB :
12174 TG3_NVRAM_SIZE_512KB);
12175 break;
d3c7b886
MC
12176 }
12177}
12178
1b27777a
MC
12179static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12180{
12181 u32 nvcfg1;
12182
12183 nvcfg1 = tr32(NVRAM_CFG1);
12184
12185 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
12186 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12187 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12188 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12189 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12190 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12191 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603 12192 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
1b27777a 12193
8590a603
MC
12194 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12195 tw32(NVRAM_CFG1, nvcfg1);
12196 break;
12197 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12198 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12199 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12200 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12201 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12202 tg3_flag_set(tp, NVRAM_BUFFERED);
12203 tg3_flag_set(tp, FLASH);
8590a603
MC
12204 tp->nvram_pagesize = 264;
12205 break;
12206 case FLASH_5752VENDOR_ST_M45PE10:
12207 case FLASH_5752VENDOR_ST_M45PE20:
12208 case FLASH_5752VENDOR_ST_M45PE40:
12209 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12210 tg3_flag_set(tp, NVRAM_BUFFERED);
12211 tg3_flag_set(tp, FLASH);
8590a603
MC
12212 tp->nvram_pagesize = 256;
12213 break;
1b27777a
MC
12214 }
12215}
12216
6b91fa02
MC
12217static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12218{
12219 u32 nvcfg1, protect = 0;
12220
12221 nvcfg1 = tr32(NVRAM_CFG1);
12222
12223 /* NVRAM protection for TPM */
12224 if (nvcfg1 & (1 << 27)) {
63c3a66f 12225 tg3_flag_set(tp, PROTECTED_NVRAM);
6b91fa02
MC
12226 protect = 1;
12227 }
12228
12229 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12230 switch (nvcfg1) {
8590a603
MC
12231 case FLASH_5761VENDOR_ATMEL_ADB021D:
12232 case FLASH_5761VENDOR_ATMEL_ADB041D:
12233 case FLASH_5761VENDOR_ATMEL_ADB081D:
12234 case FLASH_5761VENDOR_ATMEL_ADB161D:
12235 case FLASH_5761VENDOR_ATMEL_MDB021D:
12236 case FLASH_5761VENDOR_ATMEL_MDB041D:
12237 case FLASH_5761VENDOR_ATMEL_MDB081D:
12238 case FLASH_5761VENDOR_ATMEL_MDB161D:
12239 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12240 tg3_flag_set(tp, NVRAM_BUFFERED);
12241 tg3_flag_set(tp, FLASH);
12242 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
8590a603
MC
12243 tp->nvram_pagesize = 256;
12244 break;
12245 case FLASH_5761VENDOR_ST_A_M45PE20:
12246 case FLASH_5761VENDOR_ST_A_M45PE40:
12247 case FLASH_5761VENDOR_ST_A_M45PE80:
12248 case FLASH_5761VENDOR_ST_A_M45PE16:
12249 case FLASH_5761VENDOR_ST_M_M45PE20:
12250 case FLASH_5761VENDOR_ST_M_M45PE40:
12251 case FLASH_5761VENDOR_ST_M_M45PE80:
12252 case FLASH_5761VENDOR_ST_M_M45PE16:
12253 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12254 tg3_flag_set(tp, NVRAM_BUFFERED);
12255 tg3_flag_set(tp, FLASH);
8590a603
MC
12256 tp->nvram_pagesize = 256;
12257 break;
6b91fa02
MC
12258 }
12259
12260 if (protect) {
12261 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12262 } else {
12263 switch (nvcfg1) {
8590a603
MC
12264 case FLASH_5761VENDOR_ATMEL_ADB161D:
12265 case FLASH_5761VENDOR_ATMEL_MDB161D:
12266 case FLASH_5761VENDOR_ST_A_M45PE16:
12267 case FLASH_5761VENDOR_ST_M_M45PE16:
12268 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12269 break;
12270 case FLASH_5761VENDOR_ATMEL_ADB081D:
12271 case FLASH_5761VENDOR_ATMEL_MDB081D:
12272 case FLASH_5761VENDOR_ST_A_M45PE80:
12273 case FLASH_5761VENDOR_ST_M_M45PE80:
12274 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12275 break;
12276 case FLASH_5761VENDOR_ATMEL_ADB041D:
12277 case FLASH_5761VENDOR_ATMEL_MDB041D:
12278 case FLASH_5761VENDOR_ST_A_M45PE40:
12279 case FLASH_5761VENDOR_ST_M_M45PE40:
12280 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12281 break;
12282 case FLASH_5761VENDOR_ATMEL_ADB021D:
12283 case FLASH_5761VENDOR_ATMEL_MDB021D:
12284 case FLASH_5761VENDOR_ST_A_M45PE20:
12285 case FLASH_5761VENDOR_ST_M_M45PE20:
12286 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12287 break;
6b91fa02
MC
12288 }
12289 }
12290}
12291
b5d3772c
MC
12292static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12293{
12294 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12295 tg3_flag_set(tp, NVRAM_BUFFERED);
b5d3772c
MC
12296 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12297}
12298
321d32a0
MC
12299static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12300{
12301 u32 nvcfg1;
12302
12303 nvcfg1 = tr32(NVRAM_CFG1);
12304
12305 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12306 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12307 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12308 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12309 tg3_flag_set(tp, NVRAM_BUFFERED);
321d32a0
MC
12310 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12311
12312 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12313 tw32(NVRAM_CFG1, nvcfg1);
12314 return;
12315 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12316 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12317 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12318 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12319 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12320 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12321 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12322 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12323 tg3_flag_set(tp, NVRAM_BUFFERED);
12324 tg3_flag_set(tp, FLASH);
321d32a0
MC
12325
12326 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12327 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12328 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12329 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12330 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12331 break;
12332 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12333 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12334 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12335 break;
12336 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12337 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12338 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12339 break;
12340 }
12341 break;
12342 case FLASH_5752VENDOR_ST_M45PE10:
12343 case FLASH_5752VENDOR_ST_M45PE20:
12344 case FLASH_5752VENDOR_ST_M45PE40:
12345 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12346 tg3_flag_set(tp, NVRAM_BUFFERED);
12347 tg3_flag_set(tp, FLASH);
321d32a0
MC
12348
12349 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12350 case FLASH_5752VENDOR_ST_M45PE10:
12351 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12352 break;
12353 case FLASH_5752VENDOR_ST_M45PE20:
12354 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12355 break;
12356 case FLASH_5752VENDOR_ST_M45PE40:
12357 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12358 break;
12359 }
12360 break;
12361 default:
63c3a66f 12362 tg3_flag_set(tp, NO_NVRAM);
321d32a0
MC
12363 return;
12364 }
12365
a1b950d5
MC
12366 tg3_nvram_get_pagesize(tp, nvcfg1);
12367 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 12368 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
a1b950d5
MC
12369}
12370
12371
12372static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12373{
12374 u32 nvcfg1;
12375
12376 nvcfg1 = tr32(NVRAM_CFG1);
12377
12378 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12379 case FLASH_5717VENDOR_ATMEL_EEPROM:
12380 case FLASH_5717VENDOR_MICRO_EEPROM:
12381 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12382 tg3_flag_set(tp, NVRAM_BUFFERED);
a1b950d5
MC
12383 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12384
12385 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12386 tw32(NVRAM_CFG1, nvcfg1);
12387 return;
12388 case FLASH_5717VENDOR_ATMEL_MDB011D:
12389 case FLASH_5717VENDOR_ATMEL_ADB011B:
12390 case FLASH_5717VENDOR_ATMEL_ADB011D:
12391 case FLASH_5717VENDOR_ATMEL_MDB021D:
12392 case FLASH_5717VENDOR_ATMEL_ADB021B:
12393 case FLASH_5717VENDOR_ATMEL_ADB021D:
12394 case FLASH_5717VENDOR_ATMEL_45USPT:
12395 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12396 tg3_flag_set(tp, NVRAM_BUFFERED);
12397 tg3_flag_set(tp, FLASH);
a1b950d5
MC
12398
12399 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12400 case FLASH_5717VENDOR_ATMEL_MDB021D:
66ee33bf
MC
12401 /* Detect size with tg3_nvram_get_size() */
12402 break;
a1b950d5
MC
12403 case FLASH_5717VENDOR_ATMEL_ADB021B:
12404 case FLASH_5717VENDOR_ATMEL_ADB021D:
12405 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12406 break;
12407 default:
12408 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12409 break;
12410 }
321d32a0 12411 break;
a1b950d5
MC
12412 case FLASH_5717VENDOR_ST_M_M25PE10:
12413 case FLASH_5717VENDOR_ST_A_M25PE10:
12414 case FLASH_5717VENDOR_ST_M_M45PE10:
12415 case FLASH_5717VENDOR_ST_A_M45PE10:
12416 case FLASH_5717VENDOR_ST_M_M25PE20:
12417 case FLASH_5717VENDOR_ST_A_M25PE20:
12418 case FLASH_5717VENDOR_ST_M_M45PE20:
12419 case FLASH_5717VENDOR_ST_A_M45PE20:
12420 case FLASH_5717VENDOR_ST_25USPT:
12421 case FLASH_5717VENDOR_ST_45USPT:
12422 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12423 tg3_flag_set(tp, NVRAM_BUFFERED);
12424 tg3_flag_set(tp, FLASH);
a1b950d5
MC
12425
12426 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12427 case FLASH_5717VENDOR_ST_M_M25PE20:
a1b950d5 12428 case FLASH_5717VENDOR_ST_M_M45PE20:
66ee33bf
MC
12429 /* Detect size with tg3_nvram_get_size() */
12430 break;
12431 case FLASH_5717VENDOR_ST_A_M25PE20:
a1b950d5
MC
12432 case FLASH_5717VENDOR_ST_A_M45PE20:
12433 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12434 break;
12435 default:
12436 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12437 break;
12438 }
321d32a0 12439 break;
a1b950d5 12440 default:
63c3a66f 12441 tg3_flag_set(tp, NO_NVRAM);
a1b950d5 12442 return;
321d32a0 12443 }
a1b950d5
MC
12444
12445 tg3_nvram_get_pagesize(tp, nvcfg1);
12446 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 12447 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
321d32a0
MC
12448}
12449
9b91b5f1
MC
12450static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12451{
12452 u32 nvcfg1, nvmpinstrp;
12453
12454 nvcfg1 = tr32(NVRAM_CFG1);
12455 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12456
12457 switch (nvmpinstrp) {
12458 case FLASH_5720_EEPROM_HD:
12459 case FLASH_5720_EEPROM_LD:
12460 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12461 tg3_flag_set(tp, NVRAM_BUFFERED);
9b91b5f1
MC
12462
12463 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12464 tw32(NVRAM_CFG1, nvcfg1);
12465 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12466 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12467 else
12468 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12469 return;
12470 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12471 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12472 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12473 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12474 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12475 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12476 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12477 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12478 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12479 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12480 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12481 case FLASH_5720VENDOR_ATMEL_45USPT:
12482 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12483 tg3_flag_set(tp, NVRAM_BUFFERED);
12484 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
12485
12486 switch (nvmpinstrp) {
12487 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12488 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12489 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12490 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12491 break;
12492 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12493 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12494 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12495 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12496 break;
12497 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12498 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12499 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12500 break;
12501 default:
12502 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12503 break;
12504 }
12505 break;
12506 case FLASH_5720VENDOR_M_ST_M25PE10:
12507 case FLASH_5720VENDOR_M_ST_M45PE10:
12508 case FLASH_5720VENDOR_A_ST_M25PE10:
12509 case FLASH_5720VENDOR_A_ST_M45PE10:
12510 case FLASH_5720VENDOR_M_ST_M25PE20:
12511 case FLASH_5720VENDOR_M_ST_M45PE20:
12512 case FLASH_5720VENDOR_A_ST_M25PE20:
12513 case FLASH_5720VENDOR_A_ST_M45PE20:
12514 case FLASH_5720VENDOR_M_ST_M25PE40:
12515 case FLASH_5720VENDOR_M_ST_M45PE40:
12516 case FLASH_5720VENDOR_A_ST_M25PE40:
12517 case FLASH_5720VENDOR_A_ST_M45PE40:
12518 case FLASH_5720VENDOR_M_ST_M25PE80:
12519 case FLASH_5720VENDOR_M_ST_M45PE80:
12520 case FLASH_5720VENDOR_A_ST_M25PE80:
12521 case FLASH_5720VENDOR_A_ST_M45PE80:
12522 case FLASH_5720VENDOR_ST_25USPT:
12523 case FLASH_5720VENDOR_ST_45USPT:
12524 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12525 tg3_flag_set(tp, NVRAM_BUFFERED);
12526 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
12527
12528 switch (nvmpinstrp) {
12529 case FLASH_5720VENDOR_M_ST_M25PE20:
12530 case FLASH_5720VENDOR_M_ST_M45PE20:
12531 case FLASH_5720VENDOR_A_ST_M25PE20:
12532 case FLASH_5720VENDOR_A_ST_M45PE20:
12533 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12534 break;
12535 case FLASH_5720VENDOR_M_ST_M25PE40:
12536 case FLASH_5720VENDOR_M_ST_M45PE40:
12537 case FLASH_5720VENDOR_A_ST_M25PE40:
12538 case FLASH_5720VENDOR_A_ST_M45PE40:
12539 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12540 break;
12541 case FLASH_5720VENDOR_M_ST_M25PE80:
12542 case FLASH_5720VENDOR_M_ST_M45PE80:
12543 case FLASH_5720VENDOR_A_ST_M25PE80:
12544 case FLASH_5720VENDOR_A_ST_M45PE80:
12545 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12546 break;
12547 default:
12548 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12549 break;
12550 }
12551 break;
12552 default:
63c3a66f 12553 tg3_flag_set(tp, NO_NVRAM);
9b91b5f1
MC
12554 return;
12555 }
12556
12557 tg3_nvram_get_pagesize(tp, nvcfg1);
12558 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 12559 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
9b91b5f1
MC
12560}
12561
1da177e4
LT
12562/* Chips other than 5700/5701 use the NVRAM for fetching info. */
12563static void __devinit tg3_nvram_init(struct tg3 *tp)
12564{
1da177e4
LT
12565 tw32_f(GRC_EEPROM_ADDR,
12566 (EEPROM_ADDR_FSM_RESET |
12567 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12568 EEPROM_ADDR_CLKPERD_SHIFT)));
12569
9d57f01c 12570 msleep(1);
1da177e4
LT
12571
12572 /* Enable seeprom accesses. */
12573 tw32_f(GRC_LOCAL_CTRL,
12574 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12575 udelay(100);
12576
12577 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12578 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
63c3a66f 12579 tg3_flag_set(tp, NVRAM);
1da177e4 12580
ec41c7df 12581 if (tg3_nvram_lock(tp)) {
5129c3a3
MC
12582 netdev_warn(tp->dev,
12583 "Cannot get nvram lock, %s failed\n",
05dbe005 12584 __func__);
ec41c7df
MC
12585 return;
12586 }
e6af301b 12587 tg3_enable_nvram_access(tp);
1da177e4 12588
989a9d23
MC
12589 tp->nvram_size = 0;
12590
361b4ac2
MC
12591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12592 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
12593 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12594 tg3_get_5755_nvram_info(tp);
d30cdd28 12595 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
57e6983c
MC
12596 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1b27777a 12598 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
12599 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12600 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
12601 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12602 tg3_get_5906_nvram_info(tp);
b703df6f
MC
12603 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12604 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
321d32a0 12605 tg3_get_57780_nvram_info(tp);
9b91b5f1
MC
12606 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12607 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a1b950d5 12608 tg3_get_5717_nvram_info(tp);
9b91b5f1
MC
12609 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12610 tg3_get_5720_nvram_info(tp);
361b4ac2
MC
12611 else
12612 tg3_get_nvram_info(tp);
12613
989a9d23
MC
12614 if (tp->nvram_size == 0)
12615 tg3_get_nvram_size(tp);
1da177e4 12616
e6af301b 12617 tg3_disable_nvram_access(tp);
381291b7 12618 tg3_nvram_unlock(tp);
1da177e4
LT
12619
12620 } else {
63c3a66f
JP
12621 tg3_flag_clear(tp, NVRAM);
12622 tg3_flag_clear(tp, NVRAM_BUFFERED);
1da177e4
LT
12623
12624 tg3_get_eeprom_size(tp);
12625 }
12626}
12627
1da177e4
LT
12628static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12629 u32 offset, u32 len, u8 *buf)
12630{
12631 int i, j, rc = 0;
12632 u32 val;
12633
12634 for (i = 0; i < len; i += 4) {
b9fc7dc5 12635 u32 addr;
a9dc529d 12636 __be32 data;
1da177e4
LT
12637
12638 addr = offset + i;
12639
12640 memcpy(&data, buf + i, 4);
12641
62cedd11
MC
12642 /*
12643 * The SEEPROM interface expects the data to always be opposite
12644 * the native endian format. We accomplish this by reversing
12645 * all the operations that would have been performed on the
12646 * data from a call to tg3_nvram_read_be32().
12647 */
12648 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
1da177e4
LT
12649
12650 val = tr32(GRC_EEPROM_ADDR);
12651 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12652
12653 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12654 EEPROM_ADDR_READ);
12655 tw32(GRC_EEPROM_ADDR, val |
12656 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12657 (addr & EEPROM_ADDR_ADDR_MASK) |
12658 EEPROM_ADDR_START |
12659 EEPROM_ADDR_WRITE);
6aa20a22 12660
9d57f01c 12661 for (j = 0; j < 1000; j++) {
1da177e4
LT
12662 val = tr32(GRC_EEPROM_ADDR);
12663
12664 if (val & EEPROM_ADDR_COMPLETE)
12665 break;
9d57f01c 12666 msleep(1);
1da177e4
LT
12667 }
12668 if (!(val & EEPROM_ADDR_COMPLETE)) {
12669 rc = -EBUSY;
12670 break;
12671 }
12672 }
12673
12674 return rc;
12675}
12676
12677/* offset and length are dword aligned */
12678static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12679 u8 *buf)
12680{
12681 int ret = 0;
12682 u32 pagesize = tp->nvram_pagesize;
12683 u32 pagemask = pagesize - 1;
12684 u32 nvram_cmd;
12685 u8 *tmp;
12686
12687 tmp = kmalloc(pagesize, GFP_KERNEL);
12688 if (tmp == NULL)
12689 return -ENOMEM;
12690
12691 while (len) {
12692 int j;
e6af301b 12693 u32 phy_addr, page_off, size;
1da177e4
LT
12694
12695 phy_addr = offset & ~pagemask;
6aa20a22 12696
1da177e4 12697 for (j = 0; j < pagesize; j += 4) {
a9dc529d
MC
12698 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12699 (__be32 *) (tmp + j));
12700 if (ret)
1da177e4
LT
12701 break;
12702 }
12703 if (ret)
12704 break;
12705
c6cdf436 12706 page_off = offset & pagemask;
1da177e4
LT
12707 size = pagesize;
12708 if (len < size)
12709 size = len;
12710
12711 len -= size;
12712
12713 memcpy(tmp + page_off, buf, size);
12714
12715 offset = offset + (pagesize - page_off);
12716
e6af301b 12717 tg3_enable_nvram_access(tp);
1da177e4
LT
12718
12719 /*
12720 * Before we can erase the flash page, we need
12721 * to issue a special "write enable" command.
12722 */
12723 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12724
12725 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12726 break;
12727
12728 /* Erase the target page */
12729 tw32(NVRAM_ADDR, phy_addr);
12730
12731 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12732 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12733
c6cdf436 12734 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
1da177e4
LT
12735 break;
12736
12737 /* Issue another write enable to start the write. */
12738 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12739
12740 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12741 break;
12742
12743 for (j = 0; j < pagesize; j += 4) {
b9fc7dc5 12744 __be32 data;
1da177e4 12745
b9fc7dc5 12746 data = *((__be32 *) (tmp + j));
a9dc529d 12747
b9fc7dc5 12748 tw32(NVRAM_WRDATA, be32_to_cpu(data));
1da177e4
LT
12749
12750 tw32(NVRAM_ADDR, phy_addr + j);
12751
12752 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12753 NVRAM_CMD_WR;
12754
12755 if (j == 0)
12756 nvram_cmd |= NVRAM_CMD_FIRST;
12757 else if (j == (pagesize - 4))
12758 nvram_cmd |= NVRAM_CMD_LAST;
12759
12760 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12761 break;
12762 }
12763 if (ret)
12764 break;
12765 }
12766
12767 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12768 tg3_nvram_exec_cmd(tp, nvram_cmd);
12769
12770 kfree(tmp);
12771
12772 return ret;
12773}
12774
12775/* offset and length are dword aligned */
12776static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12777 u8 *buf)
12778{
12779 int i, ret = 0;
12780
12781 for (i = 0; i < len; i += 4, offset += 4) {
b9fc7dc5
AV
12782 u32 page_off, phy_addr, nvram_cmd;
12783 __be32 data;
1da177e4
LT
12784
12785 memcpy(&data, buf + i, 4);
b9fc7dc5 12786 tw32(NVRAM_WRDATA, be32_to_cpu(data));
1da177e4 12787
c6cdf436 12788 page_off = offset % tp->nvram_pagesize;
1da177e4 12789
1820180b 12790 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
12791
12792 tw32(NVRAM_ADDR, phy_addr);
12793
12794 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12795
c6cdf436 12796 if (page_off == 0 || i == 0)
1da177e4 12797 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 12798 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
12799 nvram_cmd |= NVRAM_CMD_LAST;
12800
12801 if (i == (len - 4))
12802 nvram_cmd |= NVRAM_CMD_LAST;
12803
321d32a0 12804 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
63c3a66f 12805 !tg3_flag(tp, 5755_PLUS) &&
4c987487
MC
12806 (tp->nvram_jedecnum == JEDEC_ST) &&
12807 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
12808
12809 if ((ret = tg3_nvram_exec_cmd(tp,
12810 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12811 NVRAM_CMD_DONE)))
12812
12813 break;
12814 }
63c3a66f 12815 if (!tg3_flag(tp, FLASH)) {
1da177e4
LT
12816 /* We always do complete word writes to eeprom. */
12817 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12818 }
12819
12820 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12821 break;
12822 }
12823 return ret;
12824}
12825
12826/* offset and length are dword aligned */
12827static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12828{
12829 int ret;
12830
63c3a66f 12831 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
314fba34
MC
12832 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12833 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
12834 udelay(40);
12835 }
12836
63c3a66f 12837 if (!tg3_flag(tp, NVRAM)) {
1da177e4 12838 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
859a5887 12839 } else {
1da177e4
LT
12840 u32 grc_mode;
12841
ec41c7df
MC
12842 ret = tg3_nvram_lock(tp);
12843 if (ret)
12844 return ret;
1da177e4 12845
e6af301b 12846 tg3_enable_nvram_access(tp);
63c3a66f 12847 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
1da177e4 12848 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
12849
12850 grc_mode = tr32(GRC_MODE);
12851 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12852
63c3a66f 12853 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
1da177e4
LT
12854 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12855 buf);
859a5887 12856 } else {
1da177e4
LT
12857 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12858 buf);
12859 }
12860
12861 grc_mode = tr32(GRC_MODE);
12862 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12863
e6af301b 12864 tg3_disable_nvram_access(tp);
1da177e4
LT
12865 tg3_nvram_unlock(tp);
12866 }
12867
63c3a66f 12868 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
314fba34 12869 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
12870 udelay(40);
12871 }
12872
12873 return ret;
12874}
12875
12876struct subsys_tbl_ent {
12877 u16 subsys_vendor, subsys_devid;
12878 u32 phy_id;
12879};
12880
24daf2b0 12881static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
1da177e4 12882 /* Broadcom boards. */
24daf2b0 12883 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12884 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
24daf2b0 12885 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12886 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
24daf2b0 12887 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12888 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
24daf2b0
MC
12889 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12890 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12891 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12892 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
24daf2b0 12893 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12894 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
12895 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12896 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12897 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12898 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
24daf2b0 12899 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12900 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
24daf2b0 12901 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12902 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
24daf2b0 12903 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 12904 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
1da177e4
LT
12905
12906 /* 3com boards. */
24daf2b0 12907 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 12908 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
24daf2b0 12909 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 12910 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
12911 { TG3PCI_SUBVENDOR_ID_3COM,
12912 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12913 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 12914 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
24daf2b0 12915 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 12916 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
1da177e4
LT
12917
12918 /* DELL boards. */
24daf2b0 12919 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 12920 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
24daf2b0 12921 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 12922 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
24daf2b0 12923 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 12924 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
24daf2b0 12925 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 12926 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
1da177e4
LT
12927
12928 /* Compaq boards. */
24daf2b0 12929 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 12930 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
24daf2b0 12931 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 12932 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
12933 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12934 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12935 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 12936 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
24daf2b0 12937 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 12938 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
1da177e4
LT
12939
12940 /* IBM boards. */
24daf2b0
MC
12941 { TG3PCI_SUBVENDOR_ID_IBM,
12942 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
1da177e4
LT
12943};
12944
24daf2b0 12945static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
1da177e4
LT
12946{
12947 int i;
12948
12949 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12950 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12951 tp->pdev->subsystem_vendor) &&
12952 (subsys_id_to_phy_id[i].subsys_devid ==
12953 tp->pdev->subsystem_device))
12954 return &subsys_id_to_phy_id[i];
12955 }
12956 return NULL;
12957}
12958
7d0c41ef 12959static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 12960{
1da177e4 12961 u32 val;
f49639e6 12962
79eb6904 12963 tp->phy_id = TG3_PHY_ID_INVALID;
7d0c41ef
MC
12964 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12965
a85feb8c 12966 /* Assume an onboard device and WOL capable by default. */
63c3a66f
JP
12967 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12968 tg3_flag_set(tp, WOL_CAP);
72b845e0 12969
b5d3772c 12970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 12971 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
63c3a66f
JP
12972 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12973 tg3_flag_set(tp, IS_NIC);
9d26e213 12974 }
0527ba35
MC
12975 val = tr32(VCPU_CFGSHDW);
12976 if (val & VCPU_CFGSHDW_ASPM_DBNC)
63c3a66f 12977 tg3_flag_set(tp, ASPM_WORKAROUND);
0527ba35 12978 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
6fdbab9d 12979 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
63c3a66f 12980 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
12981 device_set_wakeup_enable(&tp->pdev->dev, true);
12982 }
05ac4cb7 12983 goto done;
b5d3772c
MC
12984 }
12985
1da177e4
LT
12986 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12987 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12988 u32 nic_cfg, led_cfg;
a9daf367 12989 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
7d0c41ef 12990 int eeprom_phy_serdes = 0;
1da177e4
LT
12991
12992 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12993 tp->nic_sram_data_cfg = nic_cfg;
12994
12995 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12996 ver >>= NIC_SRAM_DATA_VER_SHIFT;
6ff6f81d
MC
12997 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12998 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12999 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
1da177e4
LT
13000 (ver > 0) && (ver < 0x100))
13001 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13002
a9daf367
MC
13003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13004 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13005
1da177e4
LT
13006 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13007 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13008 eeprom_phy_serdes = 1;
13009
13010 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13011 if (nic_phy_id != 0) {
13012 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13013 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13014
13015 eeprom_phy_id = (id1 >> 16) << 10;
13016 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13017 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13018 } else
13019 eeprom_phy_id = 0;
13020
7d0c41ef 13021 tp->phy_id = eeprom_phy_id;
747e8f8b 13022 if (eeprom_phy_serdes) {
63c3a66f 13023 if (!tg3_flag(tp, 5705_PLUS))
f07e9af3 13024 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
a50d0796 13025 else
f07e9af3 13026 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
747e8f8b 13027 }
7d0c41ef 13028
63c3a66f 13029 if (tg3_flag(tp, 5750_PLUS))
1da177e4
LT
13030 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13031 SHASTA_EXT_LED_MODE_MASK);
cbf46853 13032 else
1da177e4
LT
13033 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13034
13035 switch (led_cfg) {
13036 default:
13037 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13038 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13039 break;
13040
13041 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13042 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13043 break;
13044
13045 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13046 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
13047
13048 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13049 * read on some older 5700/5701 bootcode.
13050 */
13051 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13052 ASIC_REV_5700 ||
13053 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13054 ASIC_REV_5701)
13055 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13056
1da177e4
LT
13057 break;
13058
13059 case SHASTA_EXT_LED_SHARED:
13060 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13061 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13062 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13063 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13064 LED_CTRL_MODE_PHY_2);
13065 break;
13066
13067 case SHASTA_EXT_LED_MAC:
13068 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13069 break;
13070
13071 case SHASTA_EXT_LED_COMBO:
13072 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13073 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13074 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13075 LED_CTRL_MODE_PHY_2);
13076 break;
13077
855e1111 13078 }
1da177e4
LT
13079
13080 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13082 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13083 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13084
b2a5c19c
MC
13085 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13086 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
5f60891b 13087
9d26e213 13088 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
63c3a66f 13089 tg3_flag_set(tp, EEPROM_WRITE_PROT);
9d26e213
MC
13090 if ((tp->pdev->subsystem_vendor ==
13091 PCI_VENDOR_ID_ARIMA) &&
13092 (tp->pdev->subsystem_device == 0x205a ||
13093 tp->pdev->subsystem_device == 0x2063))
63c3a66f 13094 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
9d26e213 13095 } else {
63c3a66f
JP
13096 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13097 tg3_flag_set(tp, IS_NIC);
9d26e213 13098 }
1da177e4
LT
13099
13100 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f
JP
13101 tg3_flag_set(tp, ENABLE_ASF);
13102 if (tg3_flag(tp, 5750_PLUS))
13103 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4 13104 }
b2b98d4a
MC
13105
13106 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
63c3a66f
JP
13107 tg3_flag(tp, 5750_PLUS))
13108 tg3_flag_set(tp, ENABLE_APE);
b2b98d4a 13109
f07e9af3 13110 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
a85feb8c 13111 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
63c3a66f 13112 tg3_flag_clear(tp, WOL_CAP);
1da177e4 13113
63c3a66f 13114 if (tg3_flag(tp, WOL_CAP) &&
6fdbab9d 13115 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
63c3a66f 13116 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
13117 device_set_wakeup_enable(&tp->pdev->dev, true);
13118 }
0527ba35 13119
1da177e4 13120 if (cfg2 & (1 << 17))
f07e9af3 13121 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
1da177e4
LT
13122
13123 /* serdes signal pre-emphasis in register 0x590 set by */
13124 /* bootcode if bit 18 is set */
13125 if (cfg2 & (1 << 18))
f07e9af3 13126 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
8ed5d97e 13127
63c3a66f
JP
13128 if ((tg3_flag(tp, 57765_PLUS) ||
13129 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13130 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
6833c043 13131 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
f07e9af3 13132 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
6833c043 13133
63c3a66f 13134 if (tg3_flag(tp, PCI_EXPRESS) &&
8c69b1e7 13135 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 13136 !tg3_flag(tp, 57765_PLUS)) {
8ed5d97e
MC
13137 u32 cfg3;
13138
13139 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13140 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
63c3a66f 13141 tg3_flag_set(tp, ASPM_WORKAROUND);
8ed5d97e 13142 }
a9daf367 13143
14417063 13144 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
63c3a66f 13145 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
a9daf367 13146 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
63c3a66f 13147 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
a9daf367 13148 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
63c3a66f 13149 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
1da177e4 13150 }
05ac4cb7 13151done:
63c3a66f 13152 if (tg3_flag(tp, WOL_CAP))
43067ed8 13153 device_set_wakeup_enable(&tp->pdev->dev,
63c3a66f 13154 tg3_flag(tp, WOL_ENABLE));
43067ed8
RW
13155 else
13156 device_set_wakeup_capable(&tp->pdev->dev, false);
7d0c41ef
MC
13157}
13158
b2a5c19c
MC
13159static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13160{
13161 int i;
13162 u32 val;
13163
13164 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13165 tw32(OTP_CTRL, cmd);
13166
13167 /* Wait for up to 1 ms for command to execute. */
13168 for (i = 0; i < 100; i++) {
13169 val = tr32(OTP_STATUS);
13170 if (val & OTP_STATUS_CMD_DONE)
13171 break;
13172 udelay(10);
13173 }
13174
13175 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13176}
13177
13178/* Read the gphy configuration from the OTP region of the chip. The gphy
13179 * configuration is a 32-bit value that straddles the alignment boundary.
13180 * We do two 32-bit reads and then shift and merge the results.
13181 */
13182static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13183{
13184 u32 bhalf_otp, thalf_otp;
13185
13186 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13187
13188 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13189 return 0;
13190
13191 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13192
13193 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13194 return 0;
13195
13196 thalf_otp = tr32(OTP_READ_DATA);
13197
13198 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13199
13200 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13201 return 0;
13202
13203 bhalf_otp = tr32(OTP_READ_DATA);
13204
13205 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13206}
13207
e256f8a3
MC
13208static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13209{
13210 u32 adv = ADVERTISED_Autoneg |
13211 ADVERTISED_Pause;
13212
13213 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13214 adv |= ADVERTISED_1000baseT_Half |
13215 ADVERTISED_1000baseT_Full;
13216
13217 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13218 adv |= ADVERTISED_100baseT_Half |
13219 ADVERTISED_100baseT_Full |
13220 ADVERTISED_10baseT_Half |
13221 ADVERTISED_10baseT_Full |
13222 ADVERTISED_TP;
13223 else
13224 adv |= ADVERTISED_FIBRE;
13225
13226 tp->link_config.advertising = adv;
13227 tp->link_config.speed = SPEED_INVALID;
13228 tp->link_config.duplex = DUPLEX_INVALID;
13229 tp->link_config.autoneg = AUTONEG_ENABLE;
13230 tp->link_config.active_speed = SPEED_INVALID;
13231 tp->link_config.active_duplex = DUPLEX_INVALID;
13232 tp->link_config.orig_speed = SPEED_INVALID;
13233 tp->link_config.orig_duplex = DUPLEX_INVALID;
13234 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13235}
13236
7d0c41ef
MC
13237static int __devinit tg3_phy_probe(struct tg3 *tp)
13238{
13239 u32 hw_phy_id_1, hw_phy_id_2;
13240 u32 hw_phy_id, hw_phy_id_masked;
13241 int err;
1da177e4 13242
e256f8a3 13243 /* flow control autonegotiation is default behavior */
63c3a66f 13244 tg3_flag_set(tp, PAUSE_AUTONEG);
e256f8a3
MC
13245 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13246
63c3a66f 13247 if (tg3_flag(tp, USE_PHYLIB))
b02fd9e3
MC
13248 return tg3_phy_init(tp);
13249
1da177e4 13250 /* Reading the PHY ID register can conflict with ASF
877d0310 13251 * firmware access to the PHY hardware.
1da177e4
LT
13252 */
13253 err = 0;
63c3a66f 13254 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
79eb6904 13255 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
1da177e4
LT
13256 } else {
13257 /* Now read the physical PHY_ID from the chip and verify
13258 * that it is sane. If it doesn't look good, we fall back
13259 * to either the hard-coded table based PHY_ID and failing
13260 * that the value found in the eeprom area.
13261 */
13262 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13263 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13264
13265 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13266 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13267 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13268
79eb6904 13269 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
1da177e4
LT
13270 }
13271
79eb6904 13272 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
1da177e4 13273 tp->phy_id = hw_phy_id;
79eb6904 13274 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
f07e9af3 13275 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
da6b2d01 13276 else
f07e9af3 13277 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
1da177e4 13278 } else {
79eb6904 13279 if (tp->phy_id != TG3_PHY_ID_INVALID) {
7d0c41ef
MC
13280 /* Do nothing, phy ID already set up in
13281 * tg3_get_eeprom_hw_cfg().
13282 */
1da177e4
LT
13283 } else {
13284 struct subsys_tbl_ent *p;
13285
13286 /* No eeprom signature? Try the hardcoded
13287 * subsys device table.
13288 */
24daf2b0 13289 p = tg3_lookup_by_subsys(tp);
1da177e4
LT
13290 if (!p)
13291 return -ENODEV;
13292
13293 tp->phy_id = p->phy_id;
13294 if (!tp->phy_id ||
79eb6904 13295 tp->phy_id == TG3_PHY_ID_BCM8002)
f07e9af3 13296 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
1da177e4
LT
13297 }
13298 }
13299
a6b68dab 13300 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
5baa5e9a
MC
13301 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13302 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13303 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
a6b68dab
MC
13304 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13305 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13306 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
52b02d04
MC
13307 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13308
e256f8a3
MC
13309 tg3_phy_init_link_config(tp);
13310
f07e9af3 13311 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
63c3a66f
JP
13312 !tg3_flag(tp, ENABLE_APE) &&
13313 !tg3_flag(tp, ENABLE_ASF)) {
42b64a45 13314 u32 bmsr, mask;
1da177e4
LT
13315
13316 tg3_readphy(tp, MII_BMSR, &bmsr);
13317 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13318 (bmsr & BMSR_LSTATUS))
13319 goto skip_phy_reset;
6aa20a22 13320
1da177e4
LT
13321 err = tg3_phy_reset(tp);
13322 if (err)
13323 return err;
13324
42b64a45 13325 tg3_phy_set_wirespeed(tp);
1da177e4 13326
3600d918
MC
13327 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13328 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13329 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13330 if (!tg3_copper_is_advertising_all(tp, mask)) {
42b64a45
MC
13331 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13332 tp->link_config.flowctrl);
1da177e4
LT
13333
13334 tg3_writephy(tp, MII_BMCR,
13335 BMCR_ANENABLE | BMCR_ANRESTART);
13336 }
1da177e4
LT
13337 }
13338
13339skip_phy_reset:
79eb6904 13340 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
13341 err = tg3_init_5401phy_dsp(tp);
13342 if (err)
13343 return err;
1da177e4 13344
1da177e4
LT
13345 err = tg3_init_5401phy_dsp(tp);
13346 }
13347
1da177e4
LT
13348 return err;
13349}
13350
184b8904 13351static void __devinit tg3_read_vpd(struct tg3 *tp)
1da177e4 13352{
a4a8bb15 13353 u8 *vpd_data;
4181b2c8 13354 unsigned int block_end, rosize, len;
535a490e 13355 u32 vpdlen;
184b8904 13356 int j, i = 0;
a4a8bb15 13357
535a490e 13358 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
a4a8bb15
MC
13359 if (!vpd_data)
13360 goto out_no_vpd;
1da177e4 13361
535a490e 13362 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
4181b2c8
MC
13363 if (i < 0)
13364 goto out_not_found;
1da177e4 13365
4181b2c8
MC
13366 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13367 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13368 i += PCI_VPD_LRDT_TAG_SIZE;
1da177e4 13369
535a490e 13370 if (block_end > vpdlen)
4181b2c8 13371 goto out_not_found;
af2c6a4a 13372
184b8904
MC
13373 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13374 PCI_VPD_RO_KEYWORD_MFR_ID);
13375 if (j > 0) {
13376 len = pci_vpd_info_field_size(&vpd_data[j]);
13377
13378 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13379 if (j + len > block_end || len != 4 ||
13380 memcmp(&vpd_data[j], "1028", 4))
13381 goto partno;
13382
13383 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13384 PCI_VPD_RO_KEYWORD_VENDOR0);
13385 if (j < 0)
13386 goto partno;
13387
13388 len = pci_vpd_info_field_size(&vpd_data[j]);
13389
13390 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13391 if (j + len > block_end)
13392 goto partno;
13393
13394 memcpy(tp->fw_ver, &vpd_data[j], len);
535a490e 13395 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
184b8904
MC
13396 }
13397
13398partno:
4181b2c8
MC
13399 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13400 PCI_VPD_RO_KEYWORD_PARTNO);
13401 if (i < 0)
13402 goto out_not_found;
af2c6a4a 13403
4181b2c8 13404 len = pci_vpd_info_field_size(&vpd_data[i]);
1da177e4 13405
4181b2c8
MC
13406 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13407 if (len > TG3_BPN_SIZE ||
535a490e 13408 (len + i) > vpdlen)
4181b2c8 13409 goto out_not_found;
1da177e4 13410
4181b2c8 13411 memcpy(tp->board_part_number, &vpd_data[i], len);
1da177e4 13412
1da177e4 13413out_not_found:
a4a8bb15 13414 kfree(vpd_data);
37a949c5 13415 if (tp->board_part_number[0])
a4a8bb15
MC
13416 return;
13417
13418out_no_vpd:
37a949c5
MC
13419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13420 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13421 strcpy(tp->board_part_number, "BCM5717");
13422 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13423 strcpy(tp->board_part_number, "BCM5718");
13424 else
13425 goto nomatch;
13426 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13427 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13428 strcpy(tp->board_part_number, "BCM57780");
13429 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13430 strcpy(tp->board_part_number, "BCM57760");
13431 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13432 strcpy(tp->board_part_number, "BCM57790");
13433 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13434 strcpy(tp->board_part_number, "BCM57788");
13435 else
13436 goto nomatch;
13437 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13438 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13439 strcpy(tp->board_part_number, "BCM57761");
13440 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13441 strcpy(tp->board_part_number, "BCM57765");
13442 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13443 strcpy(tp->board_part_number, "BCM57781");
13444 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13445 strcpy(tp->board_part_number, "BCM57785");
13446 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13447 strcpy(tp->board_part_number, "BCM57791");
13448 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13449 strcpy(tp->board_part_number, "BCM57795");
13450 else
13451 goto nomatch;
13452 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b5d3772c 13453 strcpy(tp->board_part_number, "BCM95906");
37a949c5
MC
13454 } else {
13455nomatch:
b5d3772c 13456 strcpy(tp->board_part_number, "none");
37a949c5 13457 }
1da177e4
LT
13458}
13459
9c8a620e
MC
13460static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13461{
13462 u32 val;
13463
e4f34110 13464 if (tg3_nvram_read(tp, offset, &val) ||
9c8a620e 13465 (val & 0xfc000000) != 0x0c000000 ||
e4f34110 13466 tg3_nvram_read(tp, offset + 4, &val) ||
9c8a620e
MC
13467 val != 0)
13468 return 0;
13469
13470 return 1;
13471}
13472
acd9c119
MC
13473static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13474{
ff3a7cb2 13475 u32 val, offset, start, ver_offset;
75f9936e 13476 int i, dst_off;
ff3a7cb2 13477 bool newver = false;
acd9c119
MC
13478
13479 if (tg3_nvram_read(tp, 0xc, &offset) ||
13480 tg3_nvram_read(tp, 0x4, &start))
13481 return;
13482
13483 offset = tg3_nvram_logical_addr(tp, offset);
13484
ff3a7cb2 13485 if (tg3_nvram_read(tp, offset, &val))
acd9c119
MC
13486 return;
13487
ff3a7cb2
MC
13488 if ((val & 0xfc000000) == 0x0c000000) {
13489 if (tg3_nvram_read(tp, offset + 4, &val))
acd9c119
MC
13490 return;
13491
ff3a7cb2
MC
13492 if (val == 0)
13493 newver = true;
13494 }
13495
75f9936e
MC
13496 dst_off = strlen(tp->fw_ver);
13497
ff3a7cb2 13498 if (newver) {
75f9936e
MC
13499 if (TG3_VER_SIZE - dst_off < 16 ||
13500 tg3_nvram_read(tp, offset + 8, &ver_offset))
ff3a7cb2
MC
13501 return;
13502
13503 offset = offset + ver_offset - start;
13504 for (i = 0; i < 16; i += 4) {
13505 __be32 v;
13506 if (tg3_nvram_read_be32(tp, offset + i, &v))
13507 return;
13508
75f9936e 13509 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
ff3a7cb2
MC
13510 }
13511 } else {
13512 u32 major, minor;
13513
13514 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13515 return;
13516
13517 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13518 TG3_NVM_BCVER_MAJSFT;
13519 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
75f9936e
MC
13520 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13521 "v%d.%02d", major, minor);
acd9c119
MC
13522 }
13523}
13524
a6f6cb1c
MC
13525static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13526{
13527 u32 val, major, minor;
13528
13529 /* Use native endian representation */
13530 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13531 return;
13532
13533 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13534 TG3_NVM_HWSB_CFG1_MAJSFT;
13535 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13536 TG3_NVM_HWSB_CFG1_MINSFT;
13537
13538 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13539}
13540
dfe00d7d
MC
13541static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13542{
13543 u32 offset, major, minor, build;
13544
75f9936e 13545 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
dfe00d7d
MC
13546
13547 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13548 return;
13549
13550 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13551 case TG3_EEPROM_SB_REVISION_0:
13552 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13553 break;
13554 case TG3_EEPROM_SB_REVISION_2:
13555 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13556 break;
13557 case TG3_EEPROM_SB_REVISION_3:
13558 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13559 break;
a4153d40
MC
13560 case TG3_EEPROM_SB_REVISION_4:
13561 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13562 break;
13563 case TG3_EEPROM_SB_REVISION_5:
13564 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13565 break;
bba226ac
MC
13566 case TG3_EEPROM_SB_REVISION_6:
13567 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13568 break;
dfe00d7d
MC
13569 default:
13570 return;
13571 }
13572
e4f34110 13573 if (tg3_nvram_read(tp, offset, &val))
dfe00d7d
MC
13574 return;
13575
13576 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13577 TG3_EEPROM_SB_EDH_BLD_SHFT;
13578 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13579 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13580 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13581
13582 if (minor > 99 || build > 26)
13583 return;
13584
75f9936e
MC
13585 offset = strlen(tp->fw_ver);
13586 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13587 " v%d.%02d", major, minor);
dfe00d7d
MC
13588
13589 if (build > 0) {
75f9936e
MC
13590 offset = strlen(tp->fw_ver);
13591 if (offset < TG3_VER_SIZE - 1)
13592 tp->fw_ver[offset] = 'a' + build - 1;
dfe00d7d
MC
13593 }
13594}
13595
acd9c119 13596static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
c4e6575c
MC
13597{
13598 u32 val, offset, start;
acd9c119 13599 int i, vlen;
9c8a620e
MC
13600
13601 for (offset = TG3_NVM_DIR_START;
13602 offset < TG3_NVM_DIR_END;
13603 offset += TG3_NVM_DIRENT_SIZE) {
e4f34110 13604 if (tg3_nvram_read(tp, offset, &val))
c4e6575c
MC
13605 return;
13606
9c8a620e
MC
13607 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13608 break;
13609 }
13610
13611 if (offset == TG3_NVM_DIR_END)
13612 return;
13613
63c3a66f 13614 if (!tg3_flag(tp, 5705_PLUS))
9c8a620e 13615 start = 0x08000000;
e4f34110 13616 else if (tg3_nvram_read(tp, offset - 4, &start))
9c8a620e
MC
13617 return;
13618
e4f34110 13619 if (tg3_nvram_read(tp, offset + 4, &offset) ||
9c8a620e 13620 !tg3_fw_img_is_valid(tp, offset) ||
e4f34110 13621 tg3_nvram_read(tp, offset + 8, &val))
9c8a620e
MC
13622 return;
13623
13624 offset += val - start;
13625
acd9c119 13626 vlen = strlen(tp->fw_ver);
9c8a620e 13627
acd9c119
MC
13628 tp->fw_ver[vlen++] = ',';
13629 tp->fw_ver[vlen++] = ' ';
9c8a620e
MC
13630
13631 for (i = 0; i < 4; i++) {
a9dc529d
MC
13632 __be32 v;
13633 if (tg3_nvram_read_be32(tp, offset, &v))
c4e6575c
MC
13634 return;
13635
b9fc7dc5 13636 offset += sizeof(v);
c4e6575c 13637
acd9c119
MC
13638 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13639 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
9c8a620e 13640 break;
c4e6575c 13641 }
9c8a620e 13642
acd9c119
MC
13643 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13644 vlen += sizeof(v);
c4e6575c 13645 }
acd9c119
MC
13646}
13647
7fd76445
MC
13648static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13649{
13650 int vlen;
13651 u32 apedata;
ecc79648 13652 char *fwtype;
7fd76445 13653
63c3a66f 13654 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
7fd76445
MC
13655 return;
13656
13657 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13658 if (apedata != APE_SEG_SIG_MAGIC)
13659 return;
13660
13661 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13662 if (!(apedata & APE_FW_STATUS_READY))
13663 return;
13664
13665 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13666
dc6d0744 13667 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
63c3a66f 13668 tg3_flag_set(tp, APE_HAS_NCSI);
ecc79648 13669 fwtype = "NCSI";
dc6d0744 13670 } else {
ecc79648 13671 fwtype = "DASH";
dc6d0744 13672 }
ecc79648 13673
7fd76445
MC
13674 vlen = strlen(tp->fw_ver);
13675
ecc79648
MC
13676 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13677 fwtype,
7fd76445
MC
13678 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13679 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13680 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13681 (apedata & APE_FW_VERSION_BLDMSK));
13682}
13683
acd9c119
MC
13684static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13685{
13686 u32 val;
75f9936e 13687 bool vpd_vers = false;
acd9c119 13688
75f9936e
MC
13689 if (tp->fw_ver[0] != 0)
13690 vpd_vers = true;
df259d8c 13691
63c3a66f 13692 if (tg3_flag(tp, NO_NVRAM)) {
75f9936e 13693 strcat(tp->fw_ver, "sb");
df259d8c
MC
13694 return;
13695 }
13696
acd9c119
MC
13697 if (tg3_nvram_read(tp, 0, &val))
13698 return;
13699
13700 if (val == TG3_EEPROM_MAGIC)
13701 tg3_read_bc_ver(tp);
13702 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13703 tg3_read_sb_ver(tp, val);
a6f6cb1c
MC
13704 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13705 tg3_read_hwsb_ver(tp);
acd9c119
MC
13706 else
13707 return;
13708
c9cab24e 13709 if (vpd_vers)
75f9936e 13710 goto done;
acd9c119 13711
c9cab24e
MC
13712 if (tg3_flag(tp, ENABLE_APE)) {
13713 if (tg3_flag(tp, ENABLE_ASF))
13714 tg3_read_dash_ver(tp);
13715 } else if (tg3_flag(tp, ENABLE_ASF)) {
13716 tg3_read_mgmtfw_ver(tp);
13717 }
9c8a620e 13718
75f9936e 13719done:
9c8a620e 13720 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
13721}
13722
7544b097
MC
13723static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13724
7cb32cf2
MC
13725static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13726{
63c3a66f 13727 if (tg3_flag(tp, LRG_PROD_RING_CAP))
de9f5230 13728 return TG3_RX_RET_MAX_SIZE_5717;
63c3a66f 13729 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
de9f5230 13730 return TG3_RX_RET_MAX_SIZE_5700;
7cb32cf2 13731 else
de9f5230 13732 return TG3_RX_RET_MAX_SIZE_5705;
7cb32cf2
MC
13733}
13734
4143470c 13735static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
895950c2
JP
13736 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13737 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13738 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13739 { },
13740};
13741
1da177e4
LT
13742static int __devinit tg3_get_invariants(struct tg3 *tp)
13743{
1da177e4 13744 u32 misc_ctrl_reg;
1da177e4
LT
13745 u32 pci_state_reg, grc_misc_cfg;
13746 u32 val;
13747 u16 pci_cmd;
5e7dfd0f 13748 int err;
1da177e4 13749
1da177e4
LT
13750 /* Force memory write invalidate off. If we leave it on,
13751 * then on 5700_BX chips we have to enable a workaround.
13752 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13753 * to match the cacheline size. The Broadcom driver have this
13754 * workaround but turns MWI off all the times so never uses
13755 * it. This seems to suggest that the workaround is insufficient.
13756 */
13757 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13758 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13759 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13760
16821285
MC
13761 /* Important! -- Make sure register accesses are byteswapped
13762 * correctly. Also, for those chips that require it, make
13763 * sure that indirect register accesses are enabled before
13764 * the first operation.
1da177e4
LT
13765 */
13766 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13767 &misc_ctrl_reg);
16821285
MC
13768 tp->misc_host_ctrl |= (misc_ctrl_reg &
13769 MISC_HOST_CTRL_CHIPREV);
13770 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13771 tp->misc_host_ctrl);
1da177e4
LT
13772
13773 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13774 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
13775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13776 u32 prod_id_asic_rev;
13777
5001e2f6
MC
13778 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13779 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
d78b59f5
MC
13780 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13781 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
f6eb9b1f
MC
13782 pci_read_config_dword(tp->pdev,
13783 TG3PCI_GEN2_PRODID_ASICREV,
13784 &prod_id_asic_rev);
b703df6f
MC
13785 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13786 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13787 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13789 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13790 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13791 pci_read_config_dword(tp->pdev,
13792 TG3PCI_GEN15_PRODID_ASICREV,
13793 &prod_id_asic_rev);
f6eb9b1f
MC
13794 else
13795 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13796 &prod_id_asic_rev);
13797
321d32a0 13798 tp->pci_chip_rev_id = prod_id_asic_rev;
795d01c5 13799 }
1da177e4 13800
ff645bec
MC
13801 /* Wrong chip ID in 5752 A0. This code can be removed later
13802 * as A0 is not in production.
13803 */
13804 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13805 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13806
6892914f
MC
13807 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13808 * we need to disable memory and use config. cycles
13809 * only to access all registers. The 5702/03 chips
13810 * can mistakenly decode the special cycles from the
13811 * ICH chipsets as memory write cycles, causing corruption
13812 * of register and memory space. Only certain ICH bridges
13813 * will drive special cycles with non-zero data during the
13814 * address phase which can fall within the 5703's address
13815 * range. This is not an ICH bug as the PCI spec allows
13816 * non-zero address during special cycles. However, only
13817 * these ICH bridges are known to drive non-zero addresses
13818 * during special cycles.
13819 *
13820 * Since special cycles do not cross PCI bridges, we only
13821 * enable this workaround if the 5703 is on the secondary
13822 * bus of these ICH bridges.
13823 */
13824 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13825 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13826 static struct tg3_dev_id {
13827 u32 vendor;
13828 u32 device;
13829 u32 rev;
13830 } ich_chipsets[] = {
13831 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13832 PCI_ANY_ID },
13833 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13834 PCI_ANY_ID },
13835 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13836 0xa },
13837 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13838 PCI_ANY_ID },
13839 { },
13840 };
13841 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13842 struct pci_dev *bridge = NULL;
13843
13844 while (pci_id->vendor != 0) {
13845 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13846 bridge);
13847 if (!bridge) {
13848 pci_id++;
13849 continue;
13850 }
13851 if (pci_id->rev != PCI_ANY_ID) {
44c10138 13852 if (bridge->revision > pci_id->rev)
6892914f
MC
13853 continue;
13854 }
13855 if (bridge->subordinate &&
13856 (bridge->subordinate->number ==
13857 tp->pdev->bus->number)) {
63c3a66f 13858 tg3_flag_set(tp, ICH_WORKAROUND);
6892914f
MC
13859 pci_dev_put(bridge);
13860 break;
13861 }
13862 }
13863 }
13864
6ff6f81d 13865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
41588ba1
MC
13866 static struct tg3_dev_id {
13867 u32 vendor;
13868 u32 device;
13869 } bridge_chipsets[] = {
13870 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13871 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13872 { },
13873 };
13874 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13875 struct pci_dev *bridge = NULL;
13876
13877 while (pci_id->vendor != 0) {
13878 bridge = pci_get_device(pci_id->vendor,
13879 pci_id->device,
13880 bridge);
13881 if (!bridge) {
13882 pci_id++;
13883 continue;
13884 }
13885 if (bridge->subordinate &&
13886 (bridge->subordinate->number <=
13887 tp->pdev->bus->number) &&
13888 (bridge->subordinate->subordinate >=
13889 tp->pdev->bus->number)) {
63c3a66f 13890 tg3_flag_set(tp, 5701_DMA_BUG);
41588ba1
MC
13891 pci_dev_put(bridge);
13892 break;
13893 }
13894 }
13895 }
13896
4a29cc2e
MC
13897 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13898 * DMA addresses > 40-bit. This bridge may have other additional
13899 * 57xx devices behind it in some 4-port NIC designs for example.
13900 * Any tg3 device found behind the bridge will also need the 40-bit
13901 * DMA workaround.
13902 */
a4e2b347
MC
13903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
63c3a66f
JP
13905 tg3_flag_set(tp, 5780_CLASS);
13906 tg3_flag_set(tp, 40BIT_DMA_BUG);
4cf78e4f 13907 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
859a5887 13908 } else {
4a29cc2e
MC
13909 struct pci_dev *bridge = NULL;
13910
13911 do {
13912 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13913 PCI_DEVICE_ID_SERVERWORKS_EPB,
13914 bridge);
13915 if (bridge && bridge->subordinate &&
13916 (bridge->subordinate->number <=
13917 tp->pdev->bus->number) &&
13918 (bridge->subordinate->subordinate >=
13919 tp->pdev->bus->number)) {
63c3a66f 13920 tg3_flag_set(tp, 40BIT_DMA_BUG);
4a29cc2e
MC
13921 pci_dev_put(bridge);
13922 break;
13923 }
13924 } while (bridge);
13925 }
4cf78e4f 13926
f6eb9b1f 13927 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3a1e19d3 13928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
7544b097
MC
13929 tp->pdev_peer = tg3_find_peer(tp);
13930
c885e824 13931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
d78b59f5
MC
13932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13933 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
63c3a66f 13934 tg3_flag_set(tp, 5717_PLUS);
0a58d668
MC
13935
13936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
63c3a66f
JP
13937 tg3_flag(tp, 5717_PLUS))
13938 tg3_flag_set(tp, 57765_PLUS);
c885e824 13939
321d32a0
MC
13940 /* Intentionally exclude ASIC_REV_5906 */
13941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 13942 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 13943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 13944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c 13945 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
f6eb9b1f 13946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f
JP
13947 tg3_flag(tp, 57765_PLUS))
13948 tg3_flag_set(tp, 5755_PLUS);
321d32a0
MC
13949
13950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
b5d3772c 13952 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
63c3a66f
JP
13953 tg3_flag(tp, 5755_PLUS) ||
13954 tg3_flag(tp, 5780_CLASS))
13955 tg3_flag_set(tp, 5750_PLUS);
6708e5cc 13956
6ff6f81d 13957 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
63c3a66f
JP
13958 tg3_flag(tp, 5750_PLUS))
13959 tg3_flag_set(tp, 5705_PLUS);
1b440c56 13960
507399f1 13961 /* Determine TSO capabilities */
a0512944 13962 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
4d163b75 13963 ; /* Do nothing. HW bug. */
63c3a66f
JP
13964 else if (tg3_flag(tp, 57765_PLUS))
13965 tg3_flag_set(tp, HW_TSO_3);
13966 else if (tg3_flag(tp, 5755_PLUS) ||
e849cdc3 13967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f
JP
13968 tg3_flag_set(tp, HW_TSO_2);
13969 else if (tg3_flag(tp, 5750_PLUS)) {
13970 tg3_flag_set(tp, HW_TSO_1);
13971 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
13972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13973 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
63c3a66f 13974 tg3_flag_clear(tp, TSO_BUG);
507399f1
MC
13975 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13976 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13977 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 13978 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
13979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13980 tp->fw_needed = FIRMWARE_TG3TSO5;
13981 else
13982 tp->fw_needed = FIRMWARE_TG3TSO;
13983 }
13984
dabc5c67 13985 /* Selectively allow TSO based on operating conditions */
6ff6f81d
MC
13986 if (tg3_flag(tp, HW_TSO_1) ||
13987 tg3_flag(tp, HW_TSO_2) ||
13988 tg3_flag(tp, HW_TSO_3) ||
dabc5c67
MC
13989 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13990 tg3_flag_set(tp, TSO_CAPABLE);
13991 else {
13992 tg3_flag_clear(tp, TSO_CAPABLE);
13993 tg3_flag_clear(tp, TSO_BUG);
13994 tp->fw_needed = NULL;
13995 }
13996
13997 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13998 tp->fw_needed = FIRMWARE_TG3;
13999
507399f1
MC
14000 tp->irq_max = 1;
14001
63c3a66f
JP
14002 if (tg3_flag(tp, 5750_PLUS)) {
14003 tg3_flag_set(tp, SUPPORT_MSI);
7544b097
MC
14004 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14005 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14006 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14007 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14008 tp->pdev_peer == tp->pdev))
63c3a66f 14009 tg3_flag_clear(tp, SUPPORT_MSI);
7544b097 14010
63c3a66f 14011 if (tg3_flag(tp, 5755_PLUS) ||
b5d3772c 14012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
63c3a66f 14013 tg3_flag_set(tp, 1SHOT_MSI);
52c0fd83 14014 }
4f125f42 14015
63c3a66f
JP
14016 if (tg3_flag(tp, 57765_PLUS)) {
14017 tg3_flag_set(tp, SUPPORT_MSIX);
507399f1
MC
14018 tp->irq_max = TG3_IRQ_MAX_VECS;
14019 }
f6eb9b1f 14020 }
0e1406dd 14021
2ffcc981 14022 if (tg3_flag(tp, 5755_PLUS))
63c3a66f 14023 tg3_flag_set(tp, SHORT_DMA_BUG);
f6eb9b1f 14024
e31aa987
MC
14025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14026 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14027
63c3a66f
JP
14028 if (tg3_flag(tp, 5717_PLUS))
14029 tg3_flag_set(tp, LRG_PROD_RING_CAP);
de9f5230 14030
63c3a66f 14031 if (tg3_flag(tp, 57765_PLUS) &&
a0512944 14032 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
63c3a66f 14033 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
b703df6f 14034
63c3a66f
JP
14035 if (!tg3_flag(tp, 5705_PLUS) ||
14036 tg3_flag(tp, 5780_CLASS) ||
14037 tg3_flag(tp, USE_JUMBO_BDFLAG))
14038 tg3_flag_set(tp, JUMBO_CAPABLE);
0f893dc6 14039
52f4490c
MC
14040 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14041 &pci_state_reg);
14042
708ebb3a 14043 if (pci_is_pcie(tp->pdev)) {
5e7dfd0f
MC
14044 u16 lnkctl;
14045
63c3a66f 14046 tg3_flag_set(tp, PCI_EXPRESS);
5f5c51e3 14047
cf79003d 14048 tp->pcie_readrq = 4096;
d78b59f5
MC
14049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
b4495ed8 14051 tp->pcie_readrq = 2048;
cf79003d
MC
14052
14053 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
5f5c51e3 14054
5e7dfd0f 14055 pci_read_config_word(tp->pdev,
708ebb3a 14056 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
14057 &lnkctl);
14058 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
7196cd6c
MC
14059 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14060 ASIC_REV_5906) {
63c3a66f 14061 tg3_flag_clear(tp, HW_TSO_2);
dabc5c67 14062 tg3_flag_clear(tp, TSO_CAPABLE);
7196cd6c 14063 }
5e7dfd0f 14064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0 14065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9cf74ebb
MC
14066 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14067 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
63c3a66f 14068 tg3_flag_set(tp, CLKREQ_BUG);
614b0590 14069 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
63c3a66f 14070 tg3_flag_set(tp, L1PLLPD_EN);
c7835a77 14071 }
52f4490c 14072 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
708ebb3a
JM
14073 /* BCM5785 devices are effectively PCIe devices, and should
14074 * follow PCIe codepaths, but do not have a PCIe capabilities
14075 * section.
14076 */
63c3a66f
JP
14077 tg3_flag_set(tp, PCI_EXPRESS);
14078 } else if (!tg3_flag(tp, 5705_PLUS) ||
14079 tg3_flag(tp, 5780_CLASS)) {
52f4490c
MC
14080 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14081 if (!tp->pcix_cap) {
2445e461
MC
14082 dev_err(&tp->pdev->dev,
14083 "Cannot find PCI-X capability, aborting\n");
52f4490c
MC
14084 return -EIO;
14085 }
14086
14087 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
63c3a66f 14088 tg3_flag_set(tp, PCIX_MODE);
52f4490c 14089 }
1da177e4 14090
399de50b
MC
14091 /* If we have an AMD 762 or VIA K8T800 chipset, write
14092 * reordering to the mailbox registers done by the host
14093 * controller can cause major troubles. We read back from
14094 * every mailbox register write to force the writes to be
14095 * posted to the chip in order.
14096 */
4143470c 14097 if (pci_dev_present(tg3_write_reorder_chipsets) &&
63c3a66f
JP
14098 !tg3_flag(tp, PCI_EXPRESS))
14099 tg3_flag_set(tp, MBOX_WRITE_REORDER);
399de50b 14100
69fc4053
MC
14101 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14102 &tp->pci_cacheline_sz);
14103 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14104 &tp->pci_lat_timer);
1da177e4
LT
14105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14106 tp->pci_lat_timer < 64) {
14107 tp->pci_lat_timer = 64;
69fc4053
MC
14108 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14109 tp->pci_lat_timer);
1da177e4
LT
14110 }
14111
16821285
MC
14112 /* Important! -- It is critical that the PCI-X hw workaround
14113 * situation is decided before the first MMIO register access.
14114 */
52f4490c
MC
14115 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14116 /* 5700 BX chips need to have their TX producer index
14117 * mailboxes written twice to workaround a bug.
14118 */
63c3a66f 14119 tg3_flag_set(tp, TXD_MBOX_HWBUG);
1da177e4 14120
52f4490c 14121 /* If we are in PCI-X mode, enable register write workaround.
1da177e4
LT
14122 *
14123 * The workaround is to use indirect register accesses
14124 * for all chip writes not to mailbox registers.
14125 */
63c3a66f 14126 if (tg3_flag(tp, PCIX_MODE)) {
1da177e4 14127 u32 pm_reg;
1da177e4 14128
63c3a66f 14129 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
14130
14131 /* The chip can have it's power management PCI config
14132 * space registers clobbered due to this bug.
14133 * So explicitly force the chip into D0 here.
14134 */
9974a356
MC
14135 pci_read_config_dword(tp->pdev,
14136 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14137 &pm_reg);
14138 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14139 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
14140 pci_write_config_dword(tp->pdev,
14141 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14142 pm_reg);
14143
14144 /* Also, force SERR#/PERR# in PCI command. */
14145 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14146 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14147 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14148 }
14149 }
14150
1da177e4 14151 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
63c3a66f 14152 tg3_flag_set(tp, PCI_HIGH_SPEED);
1da177e4 14153 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
63c3a66f 14154 tg3_flag_set(tp, PCI_32BIT);
1da177e4
LT
14155
14156 /* Chip-specific fixup from Broadcom driver */
14157 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14158 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14159 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14160 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14161 }
14162
1ee582d8 14163 /* Default fast path register access methods */
20094930 14164 tp->read32 = tg3_read32;
1ee582d8 14165 tp->write32 = tg3_write32;
09ee929c 14166 tp->read32_mbox = tg3_read32;
20094930 14167 tp->write32_mbox = tg3_write32;
1ee582d8
MC
14168 tp->write32_tx_mbox = tg3_write32;
14169 tp->write32_rx_mbox = tg3_write32;
14170
14171 /* Various workaround register access methods */
63c3a66f 14172 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
1ee582d8 14173 tp->write32 = tg3_write_indirect_reg32;
98efd8a6 14174 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
63c3a66f 14175 (tg3_flag(tp, PCI_EXPRESS) &&
98efd8a6
MC
14176 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14177 /*
14178 * Back to back register writes can cause problems on these
14179 * chips, the workaround is to read back all reg writes
14180 * except those to mailbox regs.
14181 *
14182 * See tg3_write_indirect_reg32().
14183 */
1ee582d8 14184 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
14185 }
14186
63c3a66f 14187 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
1ee582d8 14188 tp->write32_tx_mbox = tg3_write32_tx_mbox;
63c3a66f 14189 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1ee582d8
MC
14190 tp->write32_rx_mbox = tg3_write_flush_reg32;
14191 }
20094930 14192
63c3a66f 14193 if (tg3_flag(tp, ICH_WORKAROUND)) {
6892914f
MC
14194 tp->read32 = tg3_read_indirect_reg32;
14195 tp->write32 = tg3_write_indirect_reg32;
14196 tp->read32_mbox = tg3_read_indirect_mbox;
14197 tp->write32_mbox = tg3_write_indirect_mbox;
14198 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14199 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14200
14201 iounmap(tp->regs);
22abe310 14202 tp->regs = NULL;
6892914f
MC
14203
14204 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14205 pci_cmd &= ~PCI_COMMAND_MEMORY;
14206 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14207 }
b5d3772c
MC
14208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14209 tp->read32_mbox = tg3_read32_mbox_5906;
14210 tp->write32_mbox = tg3_write32_mbox_5906;
14211 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14212 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14213 }
6892914f 14214
bbadf503 14215 if (tp->write32 == tg3_write_indirect_reg32 ||
63c3a66f 14216 (tg3_flag(tp, PCIX_MODE) &&
bbadf503 14217 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 14218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
63c3a66f 14219 tg3_flag_set(tp, SRAM_USE_CONFIG);
bbadf503 14220
16821285
MC
14221 /* The memory arbiter has to be enabled in order for SRAM accesses
14222 * to succeed. Normally on powerup the tg3 chip firmware will make
14223 * sure it is enabled, but other entities such as system netboot
14224 * code might disable it.
14225 */
14226 val = tr32(MEMARB_MODE);
14227 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14228
69f11c99
MC
14229 if (tg3_flag(tp, PCIX_MODE)) {
14230 pci_read_config_dword(tp->pdev,
14231 tp->pcix_cap + PCI_X_STATUS, &val);
14232 tp->pci_fn = val & 0x7;
14233 } else {
14234 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14235 }
14236
7d0c41ef 14237 /* Get eeprom hw config before calling tg3_set_power_state().
63c3a66f 14238 * In particular, the TG3_FLAG_IS_NIC flag must be
7d0c41ef
MC
14239 * determined before calling tg3_set_power_state() so that
14240 * we know whether or not to switch out of Vaux power.
14241 * When the flag is set, it means that GPIO1 is used for eeprom
14242 * write protect and also implies that it is a LOM where GPIOs
14243 * are not used to switch power.
6aa20a22 14244 */
7d0c41ef
MC
14245 tg3_get_eeprom_hw_cfg(tp);
14246
63c3a66f 14247 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
14248 /* Allow reads and writes to the
14249 * APE register and memory space.
14250 */
14251 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
14252 PCISTATE_ALLOW_APE_SHMEM_WR |
14253 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
14254 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14255 pci_state_reg);
c9cab24e
MC
14256
14257 tg3_ape_lock_init(tp);
0d3031d9
MC
14258 }
14259
9936bcf6 14260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
57e6983c 14261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
321d32a0 14262 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
f6eb9b1f 14263 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f
JP
14264 tg3_flag(tp, 57765_PLUS))
14265 tg3_flag_set(tp, CPMU_PRESENT);
d30cdd28 14266
16821285
MC
14267 /* Set up tp->grc_local_ctrl before calling
14268 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14269 * will bring 5700's external PHY out of reset.
314fba34
MC
14270 * It is also used as eeprom write protect on LOMs.
14271 */
14272 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
6ff6f81d 14273 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
63c3a66f 14274 tg3_flag(tp, EEPROM_WRITE_PROT))
314fba34
MC
14275 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14276 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
14277 /* Unused GPIO3 must be driven as output on 5752 because there
14278 * are no pull-up resistors on unused GPIO pins.
14279 */
14280 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14281 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 14282
321d32a0 14283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
cb4ed1fd
MC
14284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
af36e6b6
MC
14286 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14287
8d519ab2
MC
14288 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14289 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
5f0c4a3c
MC
14290 /* Turn off the debug UART. */
14291 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
63c3a66f 14292 if (tg3_flag(tp, IS_NIC))
5f0c4a3c
MC
14293 /* Keep VMain power. */
14294 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14295 GRC_LCLCTRL_GPIO_OUTPUT0;
14296 }
14297
16821285
MC
14298 /* Switch out of Vaux if it is a NIC */
14299 tg3_pwrsrc_switch_to_vmain(tp);
1da177e4 14300
1da177e4
LT
14301 /* Derive initial jumbo mode from MTU assigned in
14302 * ether_setup() via the alloc_etherdev() call
14303 */
63c3a66f
JP
14304 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14305 tg3_flag_set(tp, JUMBO_RING_ENABLE);
1da177e4
LT
14306
14307 /* Determine WakeOnLan speed to use. */
14308 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14309 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14310 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14311 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
63c3a66f 14312 tg3_flag_clear(tp, WOL_SPEED_100MB);
1da177e4 14313 } else {
63c3a66f 14314 tg3_flag_set(tp, WOL_SPEED_100MB);
1da177e4
LT
14315 }
14316
7f97a4bd 14317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
f07e9af3 14318 tp->phy_flags |= TG3_PHYFLG_IS_FET;
7f97a4bd 14319
1da177e4 14320 /* A few boards don't want Ethernet@WireSpeed phy feature */
6ff6f81d
MC
14321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14322 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
1da177e4 14323 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 14324 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
f07e9af3
MC
14325 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14326 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14327 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
1da177e4
LT
14328
14329 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14330 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
f07e9af3 14331 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
1da177e4 14332 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
f07e9af3 14333 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
1da177e4 14334
63c3a66f 14335 if (tg3_flag(tp, 5705_PLUS) &&
f07e9af3 14336 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
321d32a0 14337 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
f6eb9b1f 14338 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
63c3a66f 14339 !tg3_flag(tp, 57765_PLUS)) {
c424cb24 14340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 14341 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
14342 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
14344 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14345 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
f07e9af3 14346 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
c1d2a196 14347 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
f07e9af3 14348 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
321d32a0 14349 } else
f07e9af3 14350 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
c424cb24 14351 }
1da177e4 14352
b2a5c19c
MC
14353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14354 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14355 tp->phy_otp = tg3_read_otp_phycfg(tp);
14356 if (tp->phy_otp == 0)
14357 tp->phy_otp = TG3_OTP_DEFAULT;
14358 }
14359
63c3a66f 14360 if (tg3_flag(tp, CPMU_PRESENT))
8ef21428
MC
14361 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14362 else
14363 tp->mi_mode = MAC_MI_MODE_BASE;
14364
1da177e4 14365 tp->coalesce_mode = 0;
1da177e4
LT
14366 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14367 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14368 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14369
4d958473
MC
14370 /* Set these bits to enable statistics workaround. */
14371 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14372 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14373 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14374 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14375 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14376 }
14377
321d32a0
MC
14378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14379 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
63c3a66f 14380 tg3_flag_set(tp, USE_PHYLIB);
57e6983c 14381
158d7abd
MC
14382 err = tg3_mdio_init(tp);
14383 if (err)
14384 return err;
1da177e4
LT
14385
14386 /* Initialize data/descriptor byte/word swapping. */
14387 val = tr32(GRC_MODE);
f2096f94
MC
14388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14389 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14390 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14391 GRC_MODE_B2HRX_ENABLE |
14392 GRC_MODE_HTX2B_ENABLE |
14393 GRC_MODE_HOST_STACKUP);
14394 else
14395 val &= GRC_MODE_HOST_STACKUP;
14396
1da177e4
LT
14397 tw32(GRC_MODE, val | tp->grc_mode);
14398
14399 tg3_switch_clocks(tp);
14400
14401 /* Clear this out for sanity. */
14402 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14403
14404 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14405 &pci_state_reg);
14406 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
63c3a66f 14407 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
1da177e4
LT
14408 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14409
14410 if (chiprevid == CHIPREV_ID_5701_A0 ||
14411 chiprevid == CHIPREV_ID_5701_B0 ||
14412 chiprevid == CHIPREV_ID_5701_B2 ||
14413 chiprevid == CHIPREV_ID_5701_B5) {
14414 void __iomem *sram_base;
14415
14416 /* Write some dummy words into the SRAM status block
14417 * area, see if it reads back correctly. If the return
14418 * value is bad, force enable the PCIX workaround.
14419 */
14420 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14421
14422 writel(0x00000000, sram_base);
14423 writel(0x00000000, sram_base + 4);
14424 writel(0xffffffff, sram_base + 4);
14425 if (readl(sram_base) != 0x00000000)
63c3a66f 14426 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
14427 }
14428 }
14429
14430 udelay(50);
14431 tg3_nvram_init(tp);
14432
14433 grc_misc_cfg = tr32(GRC_MISC_CFG);
14434 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14435
1da177e4
LT
14436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14437 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14438 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
63c3a66f 14439 tg3_flag_set(tp, IS_5788);
1da177e4 14440
63c3a66f 14441 if (!tg3_flag(tp, IS_5788) &&
6ff6f81d 14442 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
63c3a66f
JP
14443 tg3_flag_set(tp, TAGGED_STATUS);
14444 if (tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
14445 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14446 HOSTCC_MODE_CLRTICK_TXBD);
14447
14448 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14449 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14450 tp->misc_host_ctrl);
14451 }
14452
3bda1258 14453 /* Preserve the APE MAC_MODE bits */
63c3a66f 14454 if (tg3_flag(tp, ENABLE_APE))
d2394e6b 14455 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
3bda1258 14456 else
6e01b20b 14457 tp->mac_mode = 0;
3bda1258 14458
1da177e4
LT
14459 /* these are limited to 10/100 only */
14460 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14461 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14462 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14463 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14464 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14465 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14466 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14467 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14468 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
14469 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14470 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
321d32a0 14471 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
d1101142
MC
14472 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14473 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
f07e9af3
MC
14474 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14475 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
1da177e4
LT
14476
14477 err = tg3_phy_probe(tp);
14478 if (err) {
2445e461 14479 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
1da177e4 14480 /* ... but do not return immediately ... */
b02fd9e3 14481 tg3_mdio_fini(tp);
1da177e4
LT
14482 }
14483
184b8904 14484 tg3_read_vpd(tp);
c4e6575c 14485 tg3_read_fw_ver(tp);
1da177e4 14486
f07e9af3
MC
14487 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14488 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
14489 } else {
14490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
f07e9af3 14491 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4 14492 else
f07e9af3 14493 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
14494 }
14495
14496 /* 5700 {AX,BX} chips have a broken status block link
14497 * change bit implementation, so we must use the
14498 * status register in those cases.
14499 */
14500 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
63c3a66f 14501 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4 14502 else
63c3a66f 14503 tg3_flag_clear(tp, USE_LINKCHG_REG);
1da177e4
LT
14504
14505 /* The led_ctrl is set during tg3_phy_probe, here we might
14506 * have to force the link status polling mechanism based
14507 * upon subsystem IDs.
14508 */
14509 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 14510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
f07e9af3
MC
14511 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14512 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
63c3a66f 14513 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4
LT
14514 }
14515
14516 /* For all SERDES we poll the MAC status register. */
f07e9af3 14517 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
63c3a66f 14518 tg3_flag_set(tp, POLL_SERDES);
1da177e4 14519 else
63c3a66f 14520 tg3_flag_clear(tp, POLL_SERDES);
1da177e4 14521
bf933c80 14522 tp->rx_offset = NET_IP_ALIGN;
d2757fc4 14523 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
1da177e4 14524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
63c3a66f 14525 tg3_flag(tp, PCIX_MODE)) {
bf933c80 14526 tp->rx_offset = 0;
d2757fc4 14527#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
9dc7a113 14528 tp->rx_copy_thresh = ~(u16)0;
d2757fc4
MC
14529#endif
14530 }
1da177e4 14531
2c49a44d
MC
14532 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14533 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
7cb32cf2
MC
14534 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14535
2c49a44d 14536 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
f92905de
MC
14537
14538 /* Increment the rx prod index on the rx std ring by at most
14539 * 8 for these chips to workaround hw errata.
14540 */
14541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14544 tp->rx_std_max_post = 8;
14545
63c3a66f 14546 if (tg3_flag(tp, ASPM_WORKAROUND))
8ed5d97e
MC
14547 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14548 PCIE_PWR_MGMT_L1_THRESH_MSK;
14549
1da177e4
LT
14550 return err;
14551}
14552
49b6e95f 14553#ifdef CONFIG_SPARC
1da177e4
LT
14554static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14555{
14556 struct net_device *dev = tp->dev;
14557 struct pci_dev *pdev = tp->pdev;
49b6e95f 14558 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 14559 const unsigned char *addr;
49b6e95f
DM
14560 int len;
14561
14562 addr = of_get_property(dp, "local-mac-address", &len);
14563 if (addr && len == 6) {
14564 memcpy(dev->dev_addr, addr, 6);
14565 memcpy(dev->perm_addr, dev->dev_addr, 6);
14566 return 0;
1da177e4
LT
14567 }
14568 return -ENODEV;
14569}
14570
14571static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14572{
14573 struct net_device *dev = tp->dev;
14574
14575 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 14576 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
14577 return 0;
14578}
14579#endif
14580
14581static int __devinit tg3_get_device_address(struct tg3 *tp)
14582{
14583 struct net_device *dev = tp->dev;
14584 u32 hi, lo, mac_offset;
008652b3 14585 int addr_ok = 0;
1da177e4 14586
49b6e95f 14587#ifdef CONFIG_SPARC
1da177e4
LT
14588 if (!tg3_get_macaddr_sparc(tp))
14589 return 0;
14590#endif
14591
14592 mac_offset = 0x7c;
6ff6f81d 14593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
63c3a66f 14594 tg3_flag(tp, 5780_CLASS)) {
1da177e4
LT
14595 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14596 mac_offset = 0xcc;
14597 if (tg3_nvram_lock(tp))
14598 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14599 else
14600 tg3_nvram_unlock(tp);
63c3a66f 14601 } else if (tg3_flag(tp, 5717_PLUS)) {
69f11c99 14602 if (tp->pci_fn & 1)
a1b950d5 14603 mac_offset = 0xcc;
69f11c99 14604 if (tp->pci_fn > 1)
a50d0796 14605 mac_offset += 0x18c;
a1b950d5 14606 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
b5d3772c 14607 mac_offset = 0x10;
1da177e4
LT
14608
14609 /* First try to get it from MAC address mailbox. */
14610 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14611 if ((hi >> 16) == 0x484b) {
14612 dev->dev_addr[0] = (hi >> 8) & 0xff;
14613 dev->dev_addr[1] = (hi >> 0) & 0xff;
14614
14615 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14616 dev->dev_addr[2] = (lo >> 24) & 0xff;
14617 dev->dev_addr[3] = (lo >> 16) & 0xff;
14618 dev->dev_addr[4] = (lo >> 8) & 0xff;
14619 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 14620
008652b3
MC
14621 /* Some old bootcode may report a 0 MAC address in SRAM */
14622 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14623 }
14624 if (!addr_ok) {
14625 /* Next, try NVRAM. */
63c3a66f 14626 if (!tg3_flag(tp, NO_NVRAM) &&
df259d8c 14627 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
6d348f2c 14628 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
62cedd11
MC
14629 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14630 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
008652b3
MC
14631 }
14632 /* Finally just fetch it out of the MAC control regs. */
14633 else {
14634 hi = tr32(MAC_ADDR_0_HIGH);
14635 lo = tr32(MAC_ADDR_0_LOW);
14636
14637 dev->dev_addr[5] = lo & 0xff;
14638 dev->dev_addr[4] = (lo >> 8) & 0xff;
14639 dev->dev_addr[3] = (lo >> 16) & 0xff;
14640 dev->dev_addr[2] = (lo >> 24) & 0xff;
14641 dev->dev_addr[1] = hi & 0xff;
14642 dev->dev_addr[0] = (hi >> 8) & 0xff;
14643 }
1da177e4
LT
14644 }
14645
14646 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7582a335 14647#ifdef CONFIG_SPARC
1da177e4
LT
14648 if (!tg3_get_default_macaddr_sparc(tp))
14649 return 0;
14650#endif
14651 return -EINVAL;
14652 }
2ff43697 14653 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
14654 return 0;
14655}
14656
59e6b434
DM
14657#define BOUNDARY_SINGLE_CACHELINE 1
14658#define BOUNDARY_MULTI_CACHELINE 2
14659
14660static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14661{
14662 int cacheline_size;
14663 u8 byte;
14664 int goal;
14665
14666 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14667 if (byte == 0)
14668 cacheline_size = 1024;
14669 else
14670 cacheline_size = (int) byte * 4;
14671
14672 /* On 5703 and later chips, the boundary bits have no
14673 * effect.
14674 */
14675 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14676 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
63c3a66f 14677 !tg3_flag(tp, PCI_EXPRESS))
59e6b434
DM
14678 goto out;
14679
14680#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14681 goal = BOUNDARY_MULTI_CACHELINE;
14682#else
14683#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14684 goal = BOUNDARY_SINGLE_CACHELINE;
14685#else
14686 goal = 0;
14687#endif
14688#endif
14689
63c3a66f 14690 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
14691 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14692 goto out;
14693 }
14694
59e6b434
DM
14695 if (!goal)
14696 goto out;
14697
14698 /* PCI controllers on most RISC systems tend to disconnect
14699 * when a device tries to burst across a cache-line boundary.
14700 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14701 *
14702 * Unfortunately, for PCI-E there are only limited
14703 * write-side controls for this, and thus for reads
14704 * we will still get the disconnects. We'll also waste
14705 * these PCI cycles for both read and write for chips
14706 * other than 5700 and 5701 which do not implement the
14707 * boundary bits.
14708 */
63c3a66f 14709 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
14710 switch (cacheline_size) {
14711 case 16:
14712 case 32:
14713 case 64:
14714 case 128:
14715 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14716 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14717 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14718 } else {
14719 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14720 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14721 }
14722 break;
14723
14724 case 256:
14725 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14726 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14727 break;
14728
14729 default:
14730 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14731 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14732 break;
855e1111 14733 }
63c3a66f 14734 } else if (tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
14735 switch (cacheline_size) {
14736 case 16:
14737 case 32:
14738 case 64:
14739 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14740 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14741 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14742 break;
14743 }
14744 /* fallthrough */
14745 case 128:
14746 default:
14747 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14748 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14749 break;
855e1111 14750 }
59e6b434
DM
14751 } else {
14752 switch (cacheline_size) {
14753 case 16:
14754 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14755 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14756 DMA_RWCTRL_WRITE_BNDRY_16);
14757 break;
14758 }
14759 /* fallthrough */
14760 case 32:
14761 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14762 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14763 DMA_RWCTRL_WRITE_BNDRY_32);
14764 break;
14765 }
14766 /* fallthrough */
14767 case 64:
14768 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14769 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14770 DMA_RWCTRL_WRITE_BNDRY_64);
14771 break;
14772 }
14773 /* fallthrough */
14774 case 128:
14775 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14776 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14777 DMA_RWCTRL_WRITE_BNDRY_128);
14778 break;
14779 }
14780 /* fallthrough */
14781 case 256:
14782 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14783 DMA_RWCTRL_WRITE_BNDRY_256);
14784 break;
14785 case 512:
14786 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14787 DMA_RWCTRL_WRITE_BNDRY_512);
14788 break;
14789 case 1024:
14790 default:
14791 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14792 DMA_RWCTRL_WRITE_BNDRY_1024);
14793 break;
855e1111 14794 }
59e6b434
DM
14795 }
14796
14797out:
14798 return val;
14799}
14800
1da177e4
LT
14801static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14802{
14803 struct tg3_internal_buffer_desc test_desc;
14804 u32 sram_dma_descs;
14805 int i, ret;
14806
14807 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14808
14809 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14810 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14811 tw32(RDMAC_STATUS, 0);
14812 tw32(WDMAC_STATUS, 0);
14813
14814 tw32(BUFMGR_MODE, 0);
14815 tw32(FTQ_RESET, 0);
14816
14817 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14818 test_desc.addr_lo = buf_dma & 0xffffffff;
14819 test_desc.nic_mbuf = 0x00002100;
14820 test_desc.len = size;
14821
14822 /*
14823 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14824 * the *second* time the tg3 driver was getting loaded after an
14825 * initial scan.
14826 *
14827 * Broadcom tells me:
14828 * ...the DMA engine is connected to the GRC block and a DMA
14829 * reset may affect the GRC block in some unpredictable way...
14830 * The behavior of resets to individual blocks has not been tested.
14831 *
14832 * Broadcom noted the GRC reset will also reset all sub-components.
14833 */
14834 if (to_device) {
14835 test_desc.cqid_sqid = (13 << 8) | 2;
14836
14837 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14838 udelay(40);
14839 } else {
14840 test_desc.cqid_sqid = (16 << 8) | 7;
14841
14842 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14843 udelay(40);
14844 }
14845 test_desc.flags = 0x00000005;
14846
14847 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14848 u32 val;
14849
14850 val = *(((u32 *)&test_desc) + i);
14851 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14852 sram_dma_descs + (i * sizeof(u32)));
14853 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14854 }
14855 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14856
859a5887 14857 if (to_device)
1da177e4 14858 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
859a5887 14859 else
1da177e4 14860 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
1da177e4
LT
14861
14862 ret = -ENODEV;
14863 for (i = 0; i < 40; i++) {
14864 u32 val;
14865
14866 if (to_device)
14867 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14868 else
14869 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14870 if ((val & 0xffff) == sram_dma_descs) {
14871 ret = 0;
14872 break;
14873 }
14874
14875 udelay(100);
14876 }
14877
14878 return ret;
14879}
14880
ded7340d 14881#define TEST_BUFFER_SIZE 0x2000
1da177e4 14882
4143470c 14883static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
895950c2
JP
14884 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14885 { },
14886};
14887
1da177e4
LT
14888static int __devinit tg3_test_dma(struct tg3 *tp)
14889{
14890 dma_addr_t buf_dma;
59e6b434 14891 u32 *buf, saved_dma_rwctrl;
cbf9ca6c 14892 int ret = 0;
1da177e4 14893
4bae65c8
MC
14894 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14895 &buf_dma, GFP_KERNEL);
1da177e4
LT
14896 if (!buf) {
14897 ret = -ENOMEM;
14898 goto out_nofree;
14899 }
14900
14901 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14902 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14903
59e6b434 14904 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4 14905
63c3a66f 14906 if (tg3_flag(tp, 57765_PLUS))
cbf9ca6c
MC
14907 goto out;
14908
63c3a66f 14909 if (tg3_flag(tp, PCI_EXPRESS)) {
1da177e4
LT
14910 /* DMA read watermark not used on PCIE */
14911 tp->dma_rwctrl |= 0x00180000;
63c3a66f 14912 } else if (!tg3_flag(tp, PCIX_MODE)) {
85e94ced
MC
14913 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
14915 tp->dma_rwctrl |= 0x003f0000;
14916 else
14917 tp->dma_rwctrl |= 0x003f000f;
14918 } else {
14919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14920 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14921 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 14922 u32 read_water = 0x7;
1da177e4 14923
4a29cc2e
MC
14924 /* If the 5704 is behind the EPB bridge, we can
14925 * do the less restrictive ONE_DMA workaround for
14926 * better performance.
14927 */
63c3a66f 14928 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
4a29cc2e
MC
14929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14930 tp->dma_rwctrl |= 0x8000;
14931 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
14932 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14933
49afdeb6
MC
14934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14935 read_water = 4;
59e6b434 14936 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
14937 tp->dma_rwctrl |=
14938 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14939 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14940 (1 << 23);
4cf78e4f
MC
14941 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14942 /* 5780 always in PCIX mode */
14943 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
14944 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14945 /* 5714 always in PCIX mode */
14946 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
14947 } else {
14948 tp->dma_rwctrl |= 0x001b000f;
14949 }
14950 }
14951
14952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14953 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14954 tp->dma_rwctrl &= 0xfffffff0;
14955
14956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14958 /* Remove this if it causes problems for some boards. */
14959 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14960
14961 /* On 5700/5701 chips, we need to set this bit.
14962 * Otherwise the chip will issue cacheline transactions
14963 * to streamable DMA memory with not all the byte
14964 * enables turned on. This is an error on several
14965 * RISC PCI controllers, in particular sparc64.
14966 *
14967 * On 5703/5704 chips, this bit has been reassigned
14968 * a different meaning. In particular, it is used
14969 * on those chips to enable a PCI-X workaround.
14970 */
14971 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14972 }
14973
14974 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14975
14976#if 0
14977 /* Unneeded, already done by tg3_get_invariants. */
14978 tg3_switch_clocks(tp);
14979#endif
14980
1da177e4
LT
14981 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14982 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14983 goto out;
14984
59e6b434
DM
14985 /* It is best to perform DMA test with maximum write burst size
14986 * to expose the 5700/5701 write DMA bug.
14987 */
14988 saved_dma_rwctrl = tp->dma_rwctrl;
14989 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14990 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14991
1da177e4
LT
14992 while (1) {
14993 u32 *p = buf, i;
14994
14995 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14996 p[i] = i;
14997
14998 /* Send the buffer to the chip. */
14999 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15000 if (ret) {
2445e461
MC
15001 dev_err(&tp->pdev->dev,
15002 "%s: Buffer write failed. err = %d\n",
15003 __func__, ret);
1da177e4
LT
15004 break;
15005 }
15006
15007#if 0
15008 /* validate data reached card RAM correctly. */
15009 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15010 u32 val;
15011 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15012 if (le32_to_cpu(val) != p[i]) {
2445e461
MC
15013 dev_err(&tp->pdev->dev,
15014 "%s: Buffer corrupted on device! "
15015 "(%d != %d)\n", __func__, val, i);
1da177e4
LT
15016 /* ret = -ENODEV here? */
15017 }
15018 p[i] = 0;
15019 }
15020#endif
15021 /* Now read it back. */
15022 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15023 if (ret) {
5129c3a3
MC
15024 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15025 "err = %d\n", __func__, ret);
1da177e4
LT
15026 break;
15027 }
15028
15029 /* Verify it. */
15030 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15031 if (p[i] == i)
15032 continue;
15033
59e6b434
DM
15034 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15035 DMA_RWCTRL_WRITE_BNDRY_16) {
15036 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
15037 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15038 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15039 break;
15040 } else {
2445e461
MC
15041 dev_err(&tp->pdev->dev,
15042 "%s: Buffer corrupted on read back! "
15043 "(%d != %d)\n", __func__, p[i], i);
1da177e4
LT
15044 ret = -ENODEV;
15045 goto out;
15046 }
15047 }
15048
15049 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15050 /* Success. */
15051 ret = 0;
15052 break;
15053 }
15054 }
59e6b434
DM
15055 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15056 DMA_RWCTRL_WRITE_BNDRY_16) {
15057 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
15058 * now look for chipsets that are known to expose the
15059 * DMA bug without failing the test.
59e6b434 15060 */
4143470c 15061 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
6d1cfbab
MC
15062 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15063 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
859a5887 15064 } else {
6d1cfbab
MC
15065 /* Safe to use the calculated DMA boundary. */
15066 tp->dma_rwctrl = saved_dma_rwctrl;
859a5887 15067 }
6d1cfbab 15068
59e6b434
DM
15069 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15070 }
1da177e4
LT
15071
15072out:
4bae65c8 15073 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
1da177e4
LT
15074out_nofree:
15075 return ret;
15076}
15077
1da177e4
LT
15078static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15079{
63c3a66f 15080 if (tg3_flag(tp, 57765_PLUS)) {
666bc831
MC
15081 tp->bufmgr_config.mbuf_read_dma_low_water =
15082 DEFAULT_MB_RDMA_LOW_WATER_5705;
15083 tp->bufmgr_config.mbuf_mac_rx_low_water =
15084 DEFAULT_MB_MACRX_LOW_WATER_57765;
15085 tp->bufmgr_config.mbuf_high_water =
15086 DEFAULT_MB_HIGH_WATER_57765;
15087
15088 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15089 DEFAULT_MB_RDMA_LOW_WATER_5705;
15090 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15091 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15092 tp->bufmgr_config.mbuf_high_water_jumbo =
15093 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
63c3a66f 15094 } else if (tg3_flag(tp, 5705_PLUS)) {
fdfec172
MC
15095 tp->bufmgr_config.mbuf_read_dma_low_water =
15096 DEFAULT_MB_RDMA_LOW_WATER_5705;
15097 tp->bufmgr_config.mbuf_mac_rx_low_water =
15098 DEFAULT_MB_MACRX_LOW_WATER_5705;
15099 tp->bufmgr_config.mbuf_high_water =
15100 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
15101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15102 tp->bufmgr_config.mbuf_mac_rx_low_water =
15103 DEFAULT_MB_MACRX_LOW_WATER_5906;
15104 tp->bufmgr_config.mbuf_high_water =
15105 DEFAULT_MB_HIGH_WATER_5906;
15106 }
fdfec172
MC
15107
15108 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15109 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15110 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15111 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15112 tp->bufmgr_config.mbuf_high_water_jumbo =
15113 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15114 } else {
15115 tp->bufmgr_config.mbuf_read_dma_low_water =
15116 DEFAULT_MB_RDMA_LOW_WATER;
15117 tp->bufmgr_config.mbuf_mac_rx_low_water =
15118 DEFAULT_MB_MACRX_LOW_WATER;
15119 tp->bufmgr_config.mbuf_high_water =
15120 DEFAULT_MB_HIGH_WATER;
15121
15122 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15123 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15124 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15125 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15126 tp->bufmgr_config.mbuf_high_water_jumbo =
15127 DEFAULT_MB_HIGH_WATER_JUMBO;
15128 }
1da177e4
LT
15129
15130 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15131 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15132}
15133
15134static char * __devinit tg3_phy_string(struct tg3 *tp)
15135{
79eb6904
MC
15136 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15137 case TG3_PHY_ID_BCM5400: return "5400";
15138 case TG3_PHY_ID_BCM5401: return "5401";
15139 case TG3_PHY_ID_BCM5411: return "5411";
15140 case TG3_PHY_ID_BCM5701: return "5701";
15141 case TG3_PHY_ID_BCM5703: return "5703";
15142 case TG3_PHY_ID_BCM5704: return "5704";
15143 case TG3_PHY_ID_BCM5705: return "5705";
15144 case TG3_PHY_ID_BCM5750: return "5750";
15145 case TG3_PHY_ID_BCM5752: return "5752";
15146 case TG3_PHY_ID_BCM5714: return "5714";
15147 case TG3_PHY_ID_BCM5780: return "5780";
15148 case TG3_PHY_ID_BCM5755: return "5755";
15149 case TG3_PHY_ID_BCM5787: return "5787";
15150 case TG3_PHY_ID_BCM5784: return "5784";
15151 case TG3_PHY_ID_BCM5756: return "5722/5756";
15152 case TG3_PHY_ID_BCM5906: return "5906";
15153 case TG3_PHY_ID_BCM5761: return "5761";
15154 case TG3_PHY_ID_BCM5718C: return "5718C";
15155 case TG3_PHY_ID_BCM5718S: return "5718S";
15156 case TG3_PHY_ID_BCM57765: return "57765";
302b500b 15157 case TG3_PHY_ID_BCM5719C: return "5719C";
6418f2c1 15158 case TG3_PHY_ID_BCM5720C: return "5720C";
79eb6904 15159 case TG3_PHY_ID_BCM8002: return "8002/serdes";
1da177e4
LT
15160 case 0: return "serdes";
15161 default: return "unknown";
855e1111 15162 }
1da177e4
LT
15163}
15164
f9804ddb
MC
15165static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15166{
63c3a66f 15167 if (tg3_flag(tp, PCI_EXPRESS)) {
f9804ddb
MC
15168 strcpy(str, "PCI Express");
15169 return str;
63c3a66f 15170 } else if (tg3_flag(tp, PCIX_MODE)) {
f9804ddb
MC
15171 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15172
15173 strcpy(str, "PCIX:");
15174
15175 if ((clock_ctrl == 7) ||
15176 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15177 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15178 strcat(str, "133MHz");
15179 else if (clock_ctrl == 0)
15180 strcat(str, "33MHz");
15181 else if (clock_ctrl == 2)
15182 strcat(str, "50MHz");
15183 else if (clock_ctrl == 4)
15184 strcat(str, "66MHz");
15185 else if (clock_ctrl == 6)
15186 strcat(str, "100MHz");
f9804ddb
MC
15187 } else {
15188 strcpy(str, "PCI:");
63c3a66f 15189 if (tg3_flag(tp, PCI_HIGH_SPEED))
f9804ddb
MC
15190 strcat(str, "66MHz");
15191 else
15192 strcat(str, "33MHz");
15193 }
63c3a66f 15194 if (tg3_flag(tp, PCI_32BIT))
f9804ddb
MC
15195 strcat(str, ":32-bit");
15196 else
15197 strcat(str, ":64-bit");
15198 return str;
15199}
15200
8c2dc7e1 15201static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
15202{
15203 struct pci_dev *peer;
15204 unsigned int func, devnr = tp->pdev->devfn & ~7;
15205
15206 for (func = 0; func < 8; func++) {
15207 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15208 if (peer && peer != tp->pdev)
15209 break;
15210 pci_dev_put(peer);
15211 }
16fe9d74
MC
15212 /* 5704 can be configured in single-port mode, set peer to
15213 * tp->pdev in that case.
15214 */
15215 if (!peer) {
15216 peer = tp->pdev;
15217 return peer;
15218 }
1da177e4
LT
15219
15220 /*
15221 * We don't need to keep the refcount elevated; there's no way
15222 * to remove one half of this device without removing the other
15223 */
15224 pci_dev_put(peer);
15225
15226 return peer;
15227}
15228
15f9850d
DM
15229static void __devinit tg3_init_coal(struct tg3 *tp)
15230{
15231 struct ethtool_coalesce *ec = &tp->coal;
15232
15233 memset(ec, 0, sizeof(*ec));
15234 ec->cmd = ETHTOOL_GCOALESCE;
15235 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15236 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15237 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15238 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15239 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15240 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15241 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15242 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15243 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15244
15245 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15246 HOSTCC_MODE_CLRTICK_TXBD)) {
15247 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15248 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15249 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15250 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15251 }
d244c892 15252
63c3a66f 15253 if (tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
15254 ec->rx_coalesce_usecs_irq = 0;
15255 ec->tx_coalesce_usecs_irq = 0;
15256 ec->stats_block_coalesce_usecs = 0;
15257 }
15f9850d
DM
15258}
15259
7c7d64b8
SH
15260static const struct net_device_ops tg3_netdev_ops = {
15261 .ndo_open = tg3_open,
15262 .ndo_stop = tg3_close,
00829823 15263 .ndo_start_xmit = tg3_start_xmit,
511d2224 15264 .ndo_get_stats64 = tg3_get_stats64,
00829823 15265 .ndo_validate_addr = eth_validate_addr,
afc4b13d 15266 .ndo_set_rx_mode = tg3_set_rx_mode,
00829823
SH
15267 .ndo_set_mac_address = tg3_set_mac_addr,
15268 .ndo_do_ioctl = tg3_ioctl,
15269 .ndo_tx_timeout = tg3_tx_timeout,
15270 .ndo_change_mtu = tg3_change_mtu,
dc668910 15271 .ndo_fix_features = tg3_fix_features,
06c03c02 15272 .ndo_set_features = tg3_set_features,
00829823
SH
15273#ifdef CONFIG_NET_POLL_CONTROLLER
15274 .ndo_poll_controller = tg3_poll_controller,
15275#endif
15276};
15277
1da177e4
LT
15278static int __devinit tg3_init_one(struct pci_dev *pdev,
15279 const struct pci_device_id *ent)
15280{
1da177e4
LT
15281 struct net_device *dev;
15282 struct tg3 *tp;
646c9edd
MC
15283 int i, err, pm_cap;
15284 u32 sndmbx, rcvmbx, intmbx;
f9804ddb 15285 char str[40];
72f2afb8 15286 u64 dma_mask, persist_dma_mask;
0da0606f 15287 u32 features = 0;
1da177e4 15288
05dbe005 15289 printk_once(KERN_INFO "%s\n", version);
1da177e4
LT
15290
15291 err = pci_enable_device(pdev);
15292 if (err) {
2445e461 15293 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
15294 return err;
15295 }
15296
1da177e4
LT
15297 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15298 if (err) {
2445e461 15299 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
15300 goto err_out_disable_pdev;
15301 }
15302
15303 pci_set_master(pdev);
15304
15305 /* Find power-management capability. */
15306 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15307 if (pm_cap == 0) {
2445e461
MC
15308 dev_err(&pdev->dev,
15309 "Cannot find Power Management capability, aborting\n");
1da177e4
LT
15310 err = -EIO;
15311 goto err_out_free_res;
15312 }
15313
16821285
MC
15314 err = pci_set_power_state(pdev, PCI_D0);
15315 if (err) {
15316 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15317 goto err_out_free_res;
15318 }
15319
fe5f5787 15320 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
1da177e4 15321 if (!dev) {
2445e461 15322 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
1da177e4 15323 err = -ENOMEM;
16821285 15324 goto err_out_power_down;
1da177e4
LT
15325 }
15326
1da177e4
LT
15327 SET_NETDEV_DEV(dev, &pdev->dev);
15328
1da177e4
LT
15329 tp = netdev_priv(dev);
15330 tp->pdev = pdev;
15331 tp->dev = dev;
15332 tp->pm_cap = pm_cap;
1da177e4
LT
15333 tp->rx_mode = TG3_DEF_RX_MODE;
15334 tp->tx_mode = TG3_DEF_TX_MODE;
8ef21428 15335
1da177e4
LT
15336 if (tg3_debug > 0)
15337 tp->msg_enable = tg3_debug;
15338 else
15339 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15340
15341 /* The word/byte swap controls here control register access byte
15342 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15343 * setting below.
15344 */
15345 tp->misc_host_ctrl =
15346 MISC_HOST_CTRL_MASK_PCI_INT |
15347 MISC_HOST_CTRL_WORD_SWAP |
15348 MISC_HOST_CTRL_INDIR_ACCESS |
15349 MISC_HOST_CTRL_PCISTATE_RW;
15350
15351 /* The NONFRM (non-frame) byte/word swap controls take effect
15352 * on descriptor entries, anything which isn't packet data.
15353 *
15354 * The StrongARM chips on the board (one for tx, one for rx)
15355 * are running in big-endian mode.
15356 */
15357 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15358 GRC_MODE_WSWAP_NONFRM_DATA);
15359#ifdef __BIG_ENDIAN
15360 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15361#endif
15362 spin_lock_init(&tp->lock);
1da177e4 15363 spin_lock_init(&tp->indirect_lock);
c4028958 15364 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4 15365
d5fe488a 15366 tp->regs = pci_ioremap_bar(pdev, BAR_0);
ab0049b4 15367 if (!tp->regs) {
ab96b241 15368 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
1da177e4
LT
15369 err = -ENOMEM;
15370 goto err_out_free_dev;
15371 }
15372
c9cab24e
MC
15373 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15374 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15375 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15376 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15377 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15378 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15379 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15380 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15381 tg3_flag_set(tp, ENABLE_APE);
15382 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15383 if (!tp->aperegs) {
15384 dev_err(&pdev->dev,
15385 "Cannot map APE registers, aborting\n");
15386 err = -ENOMEM;
15387 goto err_out_iounmap;
15388 }
15389 }
15390
1da177e4
LT
15391 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15392 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
1da177e4 15393
1da177e4 15394 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4 15395 dev->watchdog_timeo = TG3_TX_TIMEOUT;
2ffcc981 15396 dev->netdev_ops = &tg3_netdev_ops;
1da177e4 15397 dev->irq = pdev->irq;
1da177e4
LT
15398
15399 err = tg3_get_invariants(tp);
15400 if (err) {
ab96b241
MC
15401 dev_err(&pdev->dev,
15402 "Problem fetching invariants of chip, aborting\n");
c9cab24e 15403 goto err_out_apeunmap;
1da177e4
LT
15404 }
15405
4a29cc2e
MC
15406 /* The EPB bridge inside 5714, 5715, and 5780 and any
15407 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
15408 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15409 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15410 * do DMA address check in tg3_start_xmit().
15411 */
63c3a66f 15412 if (tg3_flag(tp, IS_5788))
284901a9 15413 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
63c3a66f 15414 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
50cf156a 15415 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
72f2afb8 15416#ifdef CONFIG_HIGHMEM
6a35528a 15417 dma_mask = DMA_BIT_MASK(64);
72f2afb8 15418#endif
4a29cc2e 15419 } else
6a35528a 15420 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
72f2afb8
MC
15421
15422 /* Configure DMA attributes. */
284901a9 15423 if (dma_mask > DMA_BIT_MASK(32)) {
72f2afb8
MC
15424 err = pci_set_dma_mask(pdev, dma_mask);
15425 if (!err) {
0da0606f 15426 features |= NETIF_F_HIGHDMA;
72f2afb8
MC
15427 err = pci_set_consistent_dma_mask(pdev,
15428 persist_dma_mask);
15429 if (err < 0) {
ab96b241
MC
15430 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15431 "DMA for consistent allocations\n");
c9cab24e 15432 goto err_out_apeunmap;
72f2afb8
MC
15433 }
15434 }
15435 }
284901a9
YH
15436 if (err || dma_mask == DMA_BIT_MASK(32)) {
15437 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
72f2afb8 15438 if (err) {
ab96b241
MC
15439 dev_err(&pdev->dev,
15440 "No usable DMA configuration, aborting\n");
c9cab24e 15441 goto err_out_apeunmap;
72f2afb8
MC
15442 }
15443 }
15444
fdfec172 15445 tg3_init_bufmgr_config(tp);
1da177e4 15446
0da0606f
MC
15447 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15448
15449 /* 5700 B0 chips do not support checksumming correctly due
15450 * to hardware bugs.
15451 */
15452 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15453 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15454
15455 if (tg3_flag(tp, 5755_PLUS))
15456 features |= NETIF_F_IPV6_CSUM;
15457 }
15458
4e3a7aaa
MC
15459 /* TSO is on by default on chips that support hardware TSO.
15460 * Firmware TSO on older chips gives lower performance, so it
15461 * is off by default, but can be enabled using ethtool.
15462 */
63c3a66f
JP
15463 if ((tg3_flag(tp, HW_TSO_1) ||
15464 tg3_flag(tp, HW_TSO_2) ||
15465 tg3_flag(tp, HW_TSO_3)) &&
0da0606f
MC
15466 (features & NETIF_F_IP_CSUM))
15467 features |= NETIF_F_TSO;
63c3a66f 15468 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
0da0606f
MC
15469 if (features & NETIF_F_IPV6_CSUM)
15470 features |= NETIF_F_TSO6;
63c3a66f 15471 if (tg3_flag(tp, HW_TSO_3) ||
e849cdc3 15472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c
MC
15473 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15474 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
63c3a66f 15475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
dc668910 15476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
0da0606f 15477 features |= NETIF_F_TSO_ECN;
b0026624 15478 }
1da177e4 15479
d542fe27
MC
15480 dev->features |= features;
15481 dev->vlan_features |= features;
15482
06c03c02
MB
15483 /*
15484 * Add loopback capability only for a subset of devices that support
15485 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15486 * loopback for the remaining devices.
15487 */
15488 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15489 !tg3_flag(tp, CPMU_PRESENT))
15490 /* Add the loopback capability */
0da0606f
MC
15491 features |= NETIF_F_LOOPBACK;
15492
0da0606f 15493 dev->hw_features |= features;
06c03c02 15494
1da177e4 15495 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
63c3a66f 15496 !tg3_flag(tp, TSO_CAPABLE) &&
1da177e4 15497 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
63c3a66f 15498 tg3_flag_set(tp, MAX_RXPEND_64);
1da177e4
LT
15499 tp->rx_pending = 63;
15500 }
15501
1da177e4
LT
15502 err = tg3_get_device_address(tp);
15503 if (err) {
ab96b241
MC
15504 dev_err(&pdev->dev,
15505 "Could not obtain valid ethernet address, aborting\n");
c9cab24e 15506 goto err_out_apeunmap;
c88864df
MC
15507 }
15508
1da177e4
LT
15509 /*
15510 * Reset chip in case UNDI or EFI driver did not shutdown
15511 * DMA self test will enable WDMAC and we'll see (spurious)
15512 * pending DMA on the PCI bus at that point.
15513 */
15514 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15515 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 15516 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 15517 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
15518 }
15519
15520 err = tg3_test_dma(tp);
15521 if (err) {
ab96b241 15522 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
c88864df 15523 goto err_out_apeunmap;
1da177e4
LT
15524 }
15525
78f90dcf
MC
15526 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15527 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15528 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
6fd45cb8 15529 for (i = 0; i < tp->irq_max; i++) {
78f90dcf
MC
15530 struct tg3_napi *tnapi = &tp->napi[i];
15531
15532 tnapi->tp = tp;
15533 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15534
15535 tnapi->int_mbox = intmbx;
15536 if (i < 4)
15537 intmbx += 0x8;
15538 else
15539 intmbx += 0x4;
15540
15541 tnapi->consmbox = rcvmbx;
15542 tnapi->prodmbox = sndmbx;
15543
66cfd1bd 15544 if (i)
78f90dcf 15545 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
66cfd1bd 15546 else
78f90dcf 15547 tnapi->coal_now = HOSTCC_MODE_NOW;
78f90dcf 15548
63c3a66f 15549 if (!tg3_flag(tp, SUPPORT_MSIX))
78f90dcf
MC
15550 break;
15551
15552 /*
15553 * If we support MSIX, we'll be using RSS. If we're using
15554 * RSS, the first vector only handles link interrupts and the
15555 * remaining vectors handle rx and tx interrupts. Reuse the
15556 * mailbox values for the next iteration. The values we setup
15557 * above are still useful for the single vectored mode.
15558 */
15559 if (!i)
15560 continue;
15561
15562 rcvmbx += 0x8;
15563
15564 if (sndmbx & 0x4)
15565 sndmbx -= 0x4;
15566 else
15567 sndmbx += 0xc;
15568 }
15569
15f9850d
DM
15570 tg3_init_coal(tp);
15571
c49a1561
MC
15572 pci_set_drvdata(pdev, dev);
15573
cd0d7228
MC
15574 if (tg3_flag(tp, 5717_PLUS)) {
15575 /* Resume a low-power mode */
15576 tg3_frob_aux_power(tp, false);
15577 }
15578
1da177e4
LT
15579 err = register_netdev(dev);
15580 if (err) {
ab96b241 15581 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
0d3031d9 15582 goto err_out_apeunmap;
1da177e4
LT
15583 }
15584
05dbe005
JP
15585 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15586 tp->board_part_number,
15587 tp->pci_chip_rev_id,
15588 tg3_bus_string(tp, str),
15589 dev->dev_addr);
1da177e4 15590
f07e9af3 15591 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7
MC
15592 struct phy_device *phydev;
15593 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
5129c3a3
MC
15594 netdev_info(dev,
15595 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
05dbe005 15596 phydev->drv->name, dev_name(&phydev->dev));
f07e9af3
MC
15597 } else {
15598 char *ethtype;
15599
15600 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15601 ethtype = "10/100Base-TX";
15602 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15603 ethtype = "1000Base-SX";
15604 else
15605 ethtype = "10/100/1000Base-T";
15606
5129c3a3 15607 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
47007831
MC
15608 "(WireSpeed[%d], EEE[%d])\n",
15609 tg3_phy_string(tp), ethtype,
15610 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15611 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
f07e9af3 15612 }
05dbe005
JP
15613
15614 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
dc668910 15615 (dev->features & NETIF_F_RXCSUM) != 0,
63c3a66f 15616 tg3_flag(tp, USE_LINKCHG_REG) != 0,
f07e9af3 15617 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
63c3a66f
JP
15618 tg3_flag(tp, ENABLE_ASF) != 0,
15619 tg3_flag(tp, TSO_CAPABLE) != 0);
05dbe005
JP
15620 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15621 tp->dma_rwctrl,
15622 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15623 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
1da177e4 15624
b45aa2f6
MC
15625 pci_save_state(pdev);
15626
1da177e4
LT
15627 return 0;
15628
0d3031d9
MC
15629err_out_apeunmap:
15630 if (tp->aperegs) {
15631 iounmap(tp->aperegs);
15632 tp->aperegs = NULL;
15633 }
15634
1da177e4 15635err_out_iounmap:
6892914f
MC
15636 if (tp->regs) {
15637 iounmap(tp->regs);
22abe310 15638 tp->regs = NULL;
6892914f 15639 }
1da177e4
LT
15640
15641err_out_free_dev:
15642 free_netdev(dev);
15643
16821285
MC
15644err_out_power_down:
15645 pci_set_power_state(pdev, PCI_D3hot);
15646
1da177e4
LT
15647err_out_free_res:
15648 pci_release_regions(pdev);
15649
15650err_out_disable_pdev:
15651 pci_disable_device(pdev);
15652 pci_set_drvdata(pdev, NULL);
15653 return err;
15654}
15655
15656static void __devexit tg3_remove_one(struct pci_dev *pdev)
15657{
15658 struct net_device *dev = pci_get_drvdata(pdev);
15659
15660 if (dev) {
15661 struct tg3 *tp = netdev_priv(dev);
15662
077f849d
JSR
15663 if (tp->fw)
15664 release_firmware(tp->fw);
15665
23f333a2 15666 cancel_work_sync(&tp->reset_task);
158d7abd 15667
63c3a66f 15668 if (!tg3_flag(tp, USE_PHYLIB)) {
b02fd9e3 15669 tg3_phy_fini(tp);
158d7abd 15670 tg3_mdio_fini(tp);
b02fd9e3 15671 }
158d7abd 15672
1da177e4 15673 unregister_netdev(dev);
0d3031d9
MC
15674 if (tp->aperegs) {
15675 iounmap(tp->aperegs);
15676 tp->aperegs = NULL;
15677 }
6892914f
MC
15678 if (tp->regs) {
15679 iounmap(tp->regs);
22abe310 15680 tp->regs = NULL;
6892914f 15681 }
1da177e4
LT
15682 free_netdev(dev);
15683 pci_release_regions(pdev);
15684 pci_disable_device(pdev);
15685 pci_set_drvdata(pdev, NULL);
15686 }
15687}
15688
aa6027ca 15689#ifdef CONFIG_PM_SLEEP
c866b7ea 15690static int tg3_suspend(struct device *device)
1da177e4 15691{
c866b7ea 15692 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
15693 struct net_device *dev = pci_get_drvdata(pdev);
15694 struct tg3 *tp = netdev_priv(dev);
15695 int err;
15696
15697 if (!netif_running(dev))
15698 return 0;
15699
23f333a2 15700 flush_work_sync(&tp->reset_task);
b02fd9e3 15701 tg3_phy_stop(tp);
1da177e4
LT
15702 tg3_netif_stop(tp);
15703
15704 del_timer_sync(&tp->timer);
15705
f47c11ee 15706 tg3_full_lock(tp, 1);
1da177e4 15707 tg3_disable_ints(tp);
f47c11ee 15708 tg3_full_unlock(tp);
1da177e4
LT
15709
15710 netif_device_detach(dev);
15711
f47c11ee 15712 tg3_full_lock(tp, 0);
944d980e 15713 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
63c3a66f 15714 tg3_flag_clear(tp, INIT_COMPLETE);
f47c11ee 15715 tg3_full_unlock(tp);
1da177e4 15716
c866b7ea 15717 err = tg3_power_down_prepare(tp);
1da177e4 15718 if (err) {
b02fd9e3
MC
15719 int err2;
15720
f47c11ee 15721 tg3_full_lock(tp, 0);
1da177e4 15722
63c3a66f 15723 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
15724 err2 = tg3_restart_hw(tp, 1);
15725 if (err2)
b9ec6c1b 15726 goto out;
1da177e4
LT
15727
15728 tp->timer.expires = jiffies + tp->timer_offset;
15729 add_timer(&tp->timer);
15730
15731 netif_device_attach(dev);
15732 tg3_netif_start(tp);
15733
b9ec6c1b 15734out:
f47c11ee 15735 tg3_full_unlock(tp);
b02fd9e3
MC
15736
15737 if (!err2)
15738 tg3_phy_start(tp);
1da177e4
LT
15739 }
15740
15741 return err;
15742}
15743
c866b7ea 15744static int tg3_resume(struct device *device)
1da177e4 15745{
c866b7ea 15746 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
15747 struct net_device *dev = pci_get_drvdata(pdev);
15748 struct tg3 *tp = netdev_priv(dev);
15749 int err;
15750
15751 if (!netif_running(dev))
15752 return 0;
15753
1da177e4
LT
15754 netif_device_attach(dev);
15755
f47c11ee 15756 tg3_full_lock(tp, 0);
1da177e4 15757
63c3a66f 15758 tg3_flag_set(tp, INIT_COMPLETE);
b9ec6c1b
MC
15759 err = tg3_restart_hw(tp, 1);
15760 if (err)
15761 goto out;
1da177e4
LT
15762
15763 tp->timer.expires = jiffies + tp->timer_offset;
15764 add_timer(&tp->timer);
15765
1da177e4
LT
15766 tg3_netif_start(tp);
15767
b9ec6c1b 15768out:
f47c11ee 15769 tg3_full_unlock(tp);
1da177e4 15770
b02fd9e3
MC
15771 if (!err)
15772 tg3_phy_start(tp);
15773
b9ec6c1b 15774 return err;
1da177e4
LT
15775}
15776
c866b7ea 15777static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
aa6027ca
ED
15778#define TG3_PM_OPS (&tg3_pm_ops)
15779
15780#else
15781
15782#define TG3_PM_OPS NULL
15783
15784#endif /* CONFIG_PM_SLEEP */
c866b7ea 15785
b45aa2f6
MC
15786/**
15787 * tg3_io_error_detected - called when PCI error is detected
15788 * @pdev: Pointer to PCI device
15789 * @state: The current pci connection state
15790 *
15791 * This function is called after a PCI bus error affecting
15792 * this device has been detected.
15793 */
15794static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15795 pci_channel_state_t state)
15796{
15797 struct net_device *netdev = pci_get_drvdata(pdev);
15798 struct tg3 *tp = netdev_priv(netdev);
15799 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15800
15801 netdev_info(netdev, "PCI I/O error detected\n");
15802
15803 rtnl_lock();
15804
15805 if (!netif_running(netdev))
15806 goto done;
15807
15808 tg3_phy_stop(tp);
15809
15810 tg3_netif_stop(tp);
15811
15812 del_timer_sync(&tp->timer);
63c3a66f 15813 tg3_flag_clear(tp, RESTART_TIMER);
b45aa2f6
MC
15814
15815 /* Want to make sure that the reset task doesn't run */
15816 cancel_work_sync(&tp->reset_task);
63c3a66f
JP
15817 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15818 tg3_flag_clear(tp, RESTART_TIMER);
b45aa2f6
MC
15819
15820 netif_device_detach(netdev);
15821
15822 /* Clean up software state, even if MMIO is blocked */
15823 tg3_full_lock(tp, 0);
15824 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15825 tg3_full_unlock(tp);
15826
15827done:
15828 if (state == pci_channel_io_perm_failure)
15829 err = PCI_ERS_RESULT_DISCONNECT;
15830 else
15831 pci_disable_device(pdev);
15832
15833 rtnl_unlock();
15834
15835 return err;
15836}
15837
15838/**
15839 * tg3_io_slot_reset - called after the pci bus has been reset.
15840 * @pdev: Pointer to PCI device
15841 *
15842 * Restart the card from scratch, as if from a cold-boot.
15843 * At this point, the card has exprienced a hard reset,
15844 * followed by fixups by BIOS, and has its config space
15845 * set up identically to what it was at cold boot.
15846 */
15847static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15848{
15849 struct net_device *netdev = pci_get_drvdata(pdev);
15850 struct tg3 *tp = netdev_priv(netdev);
15851 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15852 int err;
15853
15854 rtnl_lock();
15855
15856 if (pci_enable_device(pdev)) {
15857 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15858 goto done;
15859 }
15860
15861 pci_set_master(pdev);
15862 pci_restore_state(pdev);
15863 pci_save_state(pdev);
15864
15865 if (!netif_running(netdev)) {
15866 rc = PCI_ERS_RESULT_RECOVERED;
15867 goto done;
15868 }
15869
15870 err = tg3_power_up(tp);
bed9829f 15871 if (err)
b45aa2f6 15872 goto done;
b45aa2f6
MC
15873
15874 rc = PCI_ERS_RESULT_RECOVERED;
15875
15876done:
15877 rtnl_unlock();
15878
15879 return rc;
15880}
15881
15882/**
15883 * tg3_io_resume - called when traffic can start flowing again.
15884 * @pdev: Pointer to PCI device
15885 *
15886 * This callback is called when the error recovery driver tells
15887 * us that its OK to resume normal operation.
15888 */
15889static void tg3_io_resume(struct pci_dev *pdev)
15890{
15891 struct net_device *netdev = pci_get_drvdata(pdev);
15892 struct tg3 *tp = netdev_priv(netdev);
15893 int err;
15894
15895 rtnl_lock();
15896
15897 if (!netif_running(netdev))
15898 goto done;
15899
15900 tg3_full_lock(tp, 0);
63c3a66f 15901 tg3_flag_set(tp, INIT_COMPLETE);
b45aa2f6
MC
15902 err = tg3_restart_hw(tp, 1);
15903 tg3_full_unlock(tp);
15904 if (err) {
15905 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15906 goto done;
15907 }
15908
15909 netif_device_attach(netdev);
15910
15911 tp->timer.expires = jiffies + tp->timer_offset;
15912 add_timer(&tp->timer);
15913
15914 tg3_netif_start(tp);
15915
15916 tg3_phy_start(tp);
15917
15918done:
15919 rtnl_unlock();
15920}
15921
15922static struct pci_error_handlers tg3_err_handler = {
15923 .error_detected = tg3_io_error_detected,
15924 .slot_reset = tg3_io_slot_reset,
15925 .resume = tg3_io_resume
15926};
15927
1da177e4
LT
15928static struct pci_driver tg3_driver = {
15929 .name = DRV_MODULE_NAME,
15930 .id_table = tg3_pci_tbl,
15931 .probe = tg3_init_one,
15932 .remove = __devexit_p(tg3_remove_one),
b45aa2f6 15933 .err_handler = &tg3_err_handler,
aa6027ca 15934 .driver.pm = TG3_PM_OPS,
1da177e4
LT
15935};
15936
15937static int __init tg3_init(void)
15938{
29917620 15939 return pci_register_driver(&tg3_driver);
1da177e4
LT
15940}
15941
15942static void __exit tg3_cleanup(void)
15943{
15944 pci_unregister_driver(&tg3_driver);
15945}
15946
15947module_init(tg3_init);
15948module_exit(tg3_cleanup);
This page took 5.857687 seconds and 5 git commands to generate.