can: Remove unnecessary alloc/OOM messages
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
b681b65d 7 * Copyright (C) 2005-2013 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
6867c843 21#include <linux/stringify.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4 28#include <linux/init.h>
a6b7a407 29#include <linux/interrupt.h>
1da177e4
LT
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
3110f5f5 36#include <linux/mdio.h>
1da177e4 37#include <linux/mii.h>
158d7abd 38#include <linux/phy.h>
a9daf367 39#include <linux/brcmphy.h>
1da177e4
LT
40#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
61487480 44#include <linux/prefetch.h>
f9a5f7d3 45#include <linux/dma-mapping.h>
077f849d 46#include <linux/firmware.h>
aed93e0b
MC
47#include <linux/hwmon.h>
48#include <linux/hwmon-sysfs.h>
1da177e4
LT
49
50#include <net/checksum.h>
c9bdd4b5 51#include <net/ip.h>
1da177e4 52
27fd9de8 53#include <linux/io.h>
1da177e4 54#include <asm/byteorder.h>
27fd9de8 55#include <linux/uaccess.h>
1da177e4 56
be947307
MC
57#include <uapi/linux/net_tstamp.h>
58#include <linux/ptp_clock_kernel.h>
59
49b6e95f 60#ifdef CONFIG_SPARC
1da177e4 61#include <asm/idprom.h>
49b6e95f 62#include <asm/prom.h>
1da177e4
LT
63#endif
64
63532394
MC
65#define BAR_0 0
66#define BAR_2 2
67
1da177e4
LT
68#include "tg3.h"
69
63c3a66f
JP
70/* Functions & macros to verify TG3_FLAGS types */
71
72static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73{
74 return test_bit(flag, bits);
75}
76
77static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78{
79 set_bit(flag, bits);
80}
81
82static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83{
84 clear_bit(flag, bits);
85}
86
87#define tg3_flag(tp, flag) \
88 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89#define tg3_flag_set(tp, flag) \
90 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91#define tg3_flag_clear(tp, flag) \
92 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93
1da177e4 94#define DRV_MODULE_NAME "tg3"
6867c843 95#define TG3_MAJ_NUM 3
b681b65d 96#define TG3_MIN_NUM 129
6867c843
MC
97#define DRV_MODULE_VERSION \
98 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
b681b65d 99#define DRV_MODULE_RELDATE "January 06, 2013"
1da177e4 100
fd6d3f0e
MC
101#define RESET_KIND_SHUTDOWN 0
102#define RESET_KIND_INIT 1
103#define RESET_KIND_SUSPEND 2
104
1da177e4
LT
105#define TG3_DEF_RX_MODE 0
106#define TG3_DEF_TX_MODE 0
107#define TG3_DEF_MSG_ENABLE \
108 (NETIF_MSG_DRV | \
109 NETIF_MSG_PROBE | \
110 NETIF_MSG_LINK | \
111 NETIF_MSG_TIMER | \
112 NETIF_MSG_IFDOWN | \
113 NETIF_MSG_IFUP | \
114 NETIF_MSG_RX_ERR | \
115 NETIF_MSG_TX_ERR)
116
520b2756
MC
117#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
118
1da177e4
LT
119/* length of time before we decide the hardware is borked,
120 * and dev->tx_timeout() should be called to fix the problem
121 */
63c3a66f 122
1da177e4
LT
123#define TG3_TX_TIMEOUT (5 * HZ)
124
125/* hardware minimum and maximum for a single frame's data payload */
126#define TG3_MIN_MTU 60
127#define TG3_MAX_MTU(tp) \
63c3a66f 128 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
129
130/* These numbers seem to be hard coded in the NIC firmware somehow.
131 * You can't change the ring sizes, but you can change where you place
132 * them in the NIC onboard memory.
133 */
7cb32cf2 134#define TG3_RX_STD_RING_SIZE(tp) \
63c3a66f 135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 136 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
1da177e4 137#define TG3_DEF_RX_RING_PENDING 200
7cb32cf2 138#define TG3_RX_JMB_RING_SIZE(tp) \
63c3a66f 139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 140 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
1da177e4
LT
141#define TG3_DEF_RX_JUMBO_RING_PENDING 100
142
143/* Do not place this n-ring entries value into the tp struct itself,
144 * we really want to expose these constants to GCC so that modulo et
145 * al. operations are done with shifts and masks instead of with
146 * hw multiply/modulo instructions. Another solution would be to
147 * replace things like '% foo' with '& (foo - 1)'.
148 */
1da177e4
LT
149
150#define TG3_TX_RING_SIZE 512
151#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
152
2c49a44d
MC
153#define TG3_RX_STD_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155#define TG3_RX_JMB_RING_BYTES(tp) \
156 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157#define TG3_RX_RCB_RING_BYTES(tp) \
7cb32cf2 158 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
1da177e4
LT
159#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
160 TG3_TX_RING_SIZE)
1da177e4
LT
161#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162
287be12e
MC
163#define TG3_DMA_BYTE_ENAB 64
164
165#define TG3_RX_STD_DMA_SZ 1536
166#define TG3_RX_JMB_DMA_SZ 9046
167
168#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
169
170#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
1da177e4 172
2c49a44d
MC
173#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
2b2cdb65 175
2c49a44d
MC
176#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
2b2cdb65 178
d2757fc4
MC
179/* Due to a hardware bug, the 5701 can only DMA to memory addresses
180 * that are at least dword aligned when used in PCIX mode. The driver
181 * works around this bug by double copying the packet. This workaround
182 * is built into the normal double copy length check for efficiency.
183 *
184 * However, the double copy is only necessary on those architectures
185 * where unaligned memory accesses are inefficient. For those architectures
186 * where unaligned memory accesses incur little penalty, we can reintegrate
187 * the 5701 in the normal rx path. Doing so saves a device structure
188 * dereference by hardcoding the double copy threshold in place.
189 */
190#define TG3_RX_COPY_THRESHOLD 256
191#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
193#else
194 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
195#endif
196
81389f57
MC
197#if (NET_IP_ALIGN != 0)
198#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
199#else
9205fd9c 200#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
81389f57
MC
201#endif
202
1da177e4 203/* minimum number of free TX descriptors required to wake up TX process */
f3f3f27e 204#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
55086ad9 205#define TG3_TX_BD_DMA_MAX_2K 2048
a4cb428d 206#define TG3_TX_BD_DMA_MAX_4K 4096
1da177e4 207
ad829268
MC
208#define TG3_RAW_IP_ALIGN 2
209
c6cdf436 210#define TG3_FW_UPDATE_TIMEOUT_SEC 5
21f7638e 211#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
c6cdf436 212
077f849d
JSR
213#define FIRMWARE_TG3 "tigon/tg3.bin"
214#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
215#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
216
229b1ad1 217static char version[] =
05dbe005 218 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
1da177e4
LT
219
220MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222MODULE_LICENSE("GPL");
223MODULE_VERSION(DRV_MODULE_VERSION);
077f849d
JSR
224MODULE_FIRMWARE(FIRMWARE_TG3);
225MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227
1da177e4
LT
228static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
229module_param(tg3_debug, int, 0);
230MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231
3d567e0e
NNS
232#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
233#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
234
a3aa1884 235static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
13185217
HK
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
3d567e0e
NNS
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259 TG3_DRV_DATA_FLAG_5705_10_100},
13185217 260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
3d567e0e
NNS
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
13185217 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217 266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
13185217 267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
3d567e0e
NNS
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217
HK
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
3d567e0e
NNS
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217
HK
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
3d567e0e
NNS
283 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284 PCI_VENDOR_ID_LENOVO,
285 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
3d567e0e
NNS
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217
HK
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
c88e668b
MC
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
2befdcea
MC
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
3d567e0e
NNS
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321d32a0
MC
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
3d567e0e
NNS
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
5e7ccf20 318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
5001e2f6 319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
79d49695 320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
5001e2f6 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
b0f75221
MC
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
3d567e0e
NNS
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
302b500b 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
ba1f3c76 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
02eca3f5 332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
c86a8560
MC
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
13185217
HK
336 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
337 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
338 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
339 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
340 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
342 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
1dcb14d9 343 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
13185217 344 {}
1da177e4
LT
345};
346
347MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
348
50da859d 349static const struct {
1da177e4 350 const char string[ETH_GSTRING_LEN];
48fa55a0 351} ethtool_stats_keys[] = {
1da177e4
LT
352 { "rx_octets" },
353 { "rx_fragments" },
354 { "rx_ucast_packets" },
355 { "rx_mcast_packets" },
356 { "rx_bcast_packets" },
357 { "rx_fcs_errors" },
358 { "rx_align_errors" },
359 { "rx_xon_pause_rcvd" },
360 { "rx_xoff_pause_rcvd" },
361 { "rx_mac_ctrl_rcvd" },
362 { "rx_xoff_entered" },
363 { "rx_frame_too_long_errors" },
364 { "rx_jabbers" },
365 { "rx_undersize_packets" },
366 { "rx_in_length_errors" },
367 { "rx_out_length_errors" },
368 { "rx_64_or_less_octet_packets" },
369 { "rx_65_to_127_octet_packets" },
370 { "rx_128_to_255_octet_packets" },
371 { "rx_256_to_511_octet_packets" },
372 { "rx_512_to_1023_octet_packets" },
373 { "rx_1024_to_1522_octet_packets" },
374 { "rx_1523_to_2047_octet_packets" },
375 { "rx_2048_to_4095_octet_packets" },
376 { "rx_4096_to_8191_octet_packets" },
377 { "rx_8192_to_9022_octet_packets" },
378
379 { "tx_octets" },
380 { "tx_collisions" },
381
382 { "tx_xon_sent" },
383 { "tx_xoff_sent" },
384 { "tx_flow_control" },
385 { "tx_mac_errors" },
386 { "tx_single_collisions" },
387 { "tx_mult_collisions" },
388 { "tx_deferred" },
389 { "tx_excessive_collisions" },
390 { "tx_late_collisions" },
391 { "tx_collide_2times" },
392 { "tx_collide_3times" },
393 { "tx_collide_4times" },
394 { "tx_collide_5times" },
395 { "tx_collide_6times" },
396 { "tx_collide_7times" },
397 { "tx_collide_8times" },
398 { "tx_collide_9times" },
399 { "tx_collide_10times" },
400 { "tx_collide_11times" },
401 { "tx_collide_12times" },
402 { "tx_collide_13times" },
403 { "tx_collide_14times" },
404 { "tx_collide_15times" },
405 { "tx_ucast_packets" },
406 { "tx_mcast_packets" },
407 { "tx_bcast_packets" },
408 { "tx_carrier_sense_errors" },
409 { "tx_discards" },
410 { "tx_errors" },
411
412 { "dma_writeq_full" },
413 { "dma_write_prioq_full" },
414 { "rxbds_empty" },
415 { "rx_discards" },
416 { "rx_errors" },
417 { "rx_threshold_hit" },
418
419 { "dma_readq_full" },
420 { "dma_read_prioq_full" },
421 { "tx_comp_queue_full" },
422
423 { "ring_set_send_prod_index" },
424 { "ring_status_update" },
425 { "nic_irqs" },
426 { "nic_avoided_irqs" },
4452d099
MC
427 { "nic_tx_threshold_hit" },
428
429 { "mbuf_lwm_thresh_hit" },
1da177e4
LT
430};
431
48fa55a0 432#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
93df8b8f
NNS
433#define TG3_NVRAM_TEST 0
434#define TG3_LINK_TEST 1
435#define TG3_REGISTER_TEST 2
436#define TG3_MEMORY_TEST 3
437#define TG3_MAC_LOOPB_TEST 4
438#define TG3_PHY_LOOPB_TEST 5
439#define TG3_EXT_LOOPB_TEST 6
440#define TG3_INTERRUPT_TEST 7
48fa55a0
MC
441
442
50da859d 443static const struct {
4cafd3f5 444 const char string[ETH_GSTRING_LEN];
48fa55a0 445} ethtool_test_keys[] = {
93df8b8f
NNS
446 [TG3_NVRAM_TEST] = { "nvram test (online) " },
447 [TG3_LINK_TEST] = { "link test (online) " },
448 [TG3_REGISTER_TEST] = { "register test (offline)" },
449 [TG3_MEMORY_TEST] = { "memory test (offline)" },
450 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
451 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
452 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
453 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
4cafd3f5
MC
454};
455
48fa55a0
MC
456#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
457
458
b401e9e2
MC
459static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
460{
461 writel(val, tp->regs + off);
462}
463
464static u32 tg3_read32(struct tg3 *tp, u32 off)
465{
de6f31eb 466 return readl(tp->regs + off);
b401e9e2
MC
467}
468
0d3031d9
MC
469static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
470{
471 writel(val, tp->aperegs + off);
472}
473
474static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
475{
de6f31eb 476 return readl(tp->aperegs + off);
0d3031d9
MC
477}
478
1da177e4
LT
479static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
480{
6892914f
MC
481 unsigned long flags;
482
483 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
484 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
485 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
487}
488
489static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
490{
491 writel(val, tp->regs + off);
492 readl(tp->regs + off);
1da177e4
LT
493}
494
6892914f 495static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 496{
6892914f
MC
497 unsigned long flags;
498 u32 val;
499
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 return val;
505}
506
507static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
508{
509 unsigned long flags;
510
511 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
512 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
513 TG3_64BIT_REG_LOW, val);
514 return;
515 }
66711e66 516 if (off == TG3_RX_STD_PROD_IDX_REG) {
6892914f
MC
517 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
518 TG3_64BIT_REG_LOW, val);
519 return;
1da177e4 520 }
6892914f
MC
521
522 spin_lock_irqsave(&tp->indirect_lock, flags);
523 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
524 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
525 spin_unlock_irqrestore(&tp->indirect_lock, flags);
526
527 /* In indirect mode when disabling interrupts, we also need
528 * to clear the interrupt bit in the GRC local ctrl register.
529 */
530 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
531 (val == 0x1)) {
532 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
533 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
534 }
535}
536
537static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
538{
539 unsigned long flags;
540 u32 val;
541
542 spin_lock_irqsave(&tp->indirect_lock, flags);
543 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
545 spin_unlock_irqrestore(&tp->indirect_lock, flags);
546 return val;
547}
548
b401e9e2
MC
549/* usec_wait specifies the wait time in usec when writing to certain registers
550 * where it is unsafe to read back the register without some delay.
551 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
552 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
553 */
554static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 555{
63c3a66f 556 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
b401e9e2
MC
557 /* Non-posted methods */
558 tp->write32(tp, off, val);
559 else {
560 /* Posted method */
561 tg3_write32(tp, off, val);
562 if (usec_wait)
563 udelay(usec_wait);
564 tp->read32(tp, off);
565 }
566 /* Wait again after the read for the posted method to guarantee that
567 * the wait time is met.
568 */
569 if (usec_wait)
570 udelay(usec_wait);
1da177e4
LT
571}
572
09ee929c
MC
573static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
574{
575 tp->write32_mbox(tp, off, val);
63c3a66f 576 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
6892914f 577 tp->read32_mbox(tp, off);
09ee929c
MC
578}
579
20094930 580static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
581{
582 void __iomem *mbox = tp->regs + off;
583 writel(val, mbox);
63c3a66f 584 if (tg3_flag(tp, TXD_MBOX_HWBUG))
1da177e4 585 writel(val, mbox);
63c3a66f 586 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1da177e4
LT
587 readl(mbox);
588}
589
b5d3772c
MC
590static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
591{
de6f31eb 592 return readl(tp->regs + off + GRCMBOX_BASE);
b5d3772c
MC
593}
594
595static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
596{
597 writel(val, tp->regs + off + GRCMBOX_BASE);
598}
599
c6cdf436 600#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 601#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
c6cdf436
MC
602#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
603#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
604#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930 605
c6cdf436
MC
606#define tw32(reg, val) tp->write32(tp, reg, val)
607#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
608#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
609#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
610
611static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
612{
6892914f
MC
613 unsigned long flags;
614
6ff6f81d 615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
616 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
617 return;
618
6892914f 619 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 620 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
621 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
622 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 623
bbadf503
MC
624 /* Always leave this as zero. */
625 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
626 } else {
627 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
628 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 629
bbadf503
MC
630 /* Always leave this as zero. */
631 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
632 }
633 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
634}
635
1da177e4
LT
636static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
637{
6892914f
MC
638 unsigned long flags;
639
6ff6f81d 640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
641 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
642 *val = 0;
643 return;
644 }
645
6892914f 646 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 647 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
648 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
649 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 650
bbadf503
MC
651 /* Always leave this as zero. */
652 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
653 } else {
654 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
655 *val = tr32(TG3PCI_MEM_WIN_DATA);
656
657 /* Always leave this as zero. */
658 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
659 }
6892914f 660 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
661}
662
0d3031d9
MC
663static void tg3_ape_lock_init(struct tg3 *tp)
664{
665 int i;
6f5c8f83 666 u32 regbase, bit;
f92d9dc1
MC
667
668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
669 regbase = TG3_APE_LOCK_GRANT;
670 else
671 regbase = TG3_APE_PER_LOCK_GRANT;
0d3031d9
MC
672
673 /* Make sure the driver hasn't any stale locks. */
78f94dc7
MC
674 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
675 switch (i) {
676 case TG3_APE_LOCK_PHY0:
677 case TG3_APE_LOCK_PHY1:
678 case TG3_APE_LOCK_PHY2:
679 case TG3_APE_LOCK_PHY3:
680 bit = APE_LOCK_GRANT_DRIVER;
681 break;
682 default:
683 if (!tp->pci_fn)
684 bit = APE_LOCK_GRANT_DRIVER;
685 else
686 bit = 1 << tp->pci_fn;
687 }
688 tg3_ape_write32(tp, regbase + 4 * i, bit);
6f5c8f83
MC
689 }
690
0d3031d9
MC
691}
692
693static int tg3_ape_lock(struct tg3 *tp, int locknum)
694{
695 int i, off;
696 int ret = 0;
6f5c8f83 697 u32 status, req, gnt, bit;
0d3031d9 698
63c3a66f 699 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
700 return 0;
701
702 switch (locknum) {
6f5c8f83
MC
703 case TG3_APE_LOCK_GPIO:
704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
705 return 0;
33f401ae
MC
706 case TG3_APE_LOCK_GRC:
707 case TG3_APE_LOCK_MEM:
78f94dc7
MC
708 if (!tp->pci_fn)
709 bit = APE_LOCK_REQ_DRIVER;
710 else
711 bit = 1 << tp->pci_fn;
33f401ae 712 break;
8151ad57
MC
713 case TG3_APE_LOCK_PHY0:
714 case TG3_APE_LOCK_PHY1:
715 case TG3_APE_LOCK_PHY2:
716 case TG3_APE_LOCK_PHY3:
717 bit = APE_LOCK_REQ_DRIVER;
718 break;
33f401ae
MC
719 default:
720 return -EINVAL;
0d3031d9
MC
721 }
722
f92d9dc1
MC
723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
724 req = TG3_APE_LOCK_REQ;
725 gnt = TG3_APE_LOCK_GRANT;
726 } else {
727 req = TG3_APE_PER_LOCK_REQ;
728 gnt = TG3_APE_PER_LOCK_GRANT;
729 }
730
0d3031d9
MC
731 off = 4 * locknum;
732
6f5c8f83 733 tg3_ape_write32(tp, req + off, bit);
0d3031d9
MC
734
735 /* Wait for up to 1 millisecond to acquire lock. */
736 for (i = 0; i < 100; i++) {
f92d9dc1 737 status = tg3_ape_read32(tp, gnt + off);
6f5c8f83 738 if (status == bit)
0d3031d9
MC
739 break;
740 udelay(10);
741 }
742
6f5c8f83 743 if (status != bit) {
0d3031d9 744 /* Revoke the lock request. */
6f5c8f83 745 tg3_ape_write32(tp, gnt + off, bit);
0d3031d9
MC
746 ret = -EBUSY;
747 }
748
749 return ret;
750}
751
752static void tg3_ape_unlock(struct tg3 *tp, int locknum)
753{
6f5c8f83 754 u32 gnt, bit;
0d3031d9 755
63c3a66f 756 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
757 return;
758
759 switch (locknum) {
6f5c8f83
MC
760 case TG3_APE_LOCK_GPIO:
761 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
762 return;
33f401ae
MC
763 case TG3_APE_LOCK_GRC:
764 case TG3_APE_LOCK_MEM:
78f94dc7
MC
765 if (!tp->pci_fn)
766 bit = APE_LOCK_GRANT_DRIVER;
767 else
768 bit = 1 << tp->pci_fn;
33f401ae 769 break;
8151ad57
MC
770 case TG3_APE_LOCK_PHY0:
771 case TG3_APE_LOCK_PHY1:
772 case TG3_APE_LOCK_PHY2:
773 case TG3_APE_LOCK_PHY3:
774 bit = APE_LOCK_GRANT_DRIVER;
775 break;
33f401ae
MC
776 default:
777 return;
0d3031d9
MC
778 }
779
f92d9dc1
MC
780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
781 gnt = TG3_APE_LOCK_GRANT;
782 else
783 gnt = TG3_APE_PER_LOCK_GRANT;
784
6f5c8f83 785 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
0d3031d9
MC
786}
787
b65a372b 788static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
fd6d3f0e 789{
fd6d3f0e
MC
790 u32 apedata;
791
b65a372b
MC
792 while (timeout_us) {
793 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
794 return -EBUSY;
795
796 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
797 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
798 break;
799
800 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
801
802 udelay(10);
803 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
804 }
805
806 return timeout_us ? 0 : -EBUSY;
807}
808
cf8d55ae
MC
809static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
810{
811 u32 i, apedata;
812
813 for (i = 0; i < timeout_us / 10; i++) {
814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 break;
818
819 udelay(10);
820 }
821
822 return i == timeout_us / 10;
823}
824
86449944
MC
825static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
826 u32 len)
cf8d55ae
MC
827{
828 int err;
829 u32 i, bufoff, msgoff, maxlen, apedata;
830
831 if (!tg3_flag(tp, APE_HAS_NCSI))
832 return 0;
833
834 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
835 if (apedata != APE_SEG_SIG_MAGIC)
836 return -ENODEV;
837
838 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
839 if (!(apedata & APE_FW_STATUS_READY))
840 return -EAGAIN;
841
842 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
843 TG3_APE_SHMEM_BASE;
844 msgoff = bufoff + 2 * sizeof(u32);
845 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
846
847 while (len) {
848 u32 length;
849
850 /* Cap xfer sizes to scratchpad limits. */
851 length = (len > maxlen) ? maxlen : len;
852 len -= length;
853
854 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
855 if (!(apedata & APE_FW_STATUS_READY))
856 return -EAGAIN;
857
858 /* Wait for up to 1 msec for APE to service previous event. */
859 err = tg3_ape_event_lock(tp, 1000);
860 if (err)
861 return err;
862
863 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
864 APE_EVENT_STATUS_SCRTCHPD_READ |
865 APE_EVENT_STATUS_EVENT_PENDING;
866 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
867
868 tg3_ape_write32(tp, bufoff, base_off);
869 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
870
871 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873
874 base_off += length;
875
876 if (tg3_ape_wait_for_event(tp, 30000))
877 return -EAGAIN;
878
879 for (i = 0; length; i += 4, length -= 4) {
880 u32 val = tg3_ape_read32(tp, msgoff + i);
881 memcpy(data, &val, sizeof(u32));
882 data++;
883 }
884 }
885
886 return 0;
887}
888
b65a372b
MC
889static int tg3_ape_send_event(struct tg3 *tp, u32 event)
890{
891 int err;
892 u32 apedata;
fd6d3f0e
MC
893
894 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
895 if (apedata != APE_SEG_SIG_MAGIC)
b65a372b 896 return -EAGAIN;
fd6d3f0e
MC
897
898 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
899 if (!(apedata & APE_FW_STATUS_READY))
b65a372b 900 return -EAGAIN;
fd6d3f0e
MC
901
902 /* Wait for up to 1 millisecond for APE to service previous event. */
b65a372b
MC
903 err = tg3_ape_event_lock(tp, 1000);
904 if (err)
905 return err;
fd6d3f0e 906
b65a372b
MC
907 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
908 event | APE_EVENT_STATUS_EVENT_PENDING);
fd6d3f0e 909
b65a372b
MC
910 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
911 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
fd6d3f0e 912
b65a372b 913 return 0;
fd6d3f0e
MC
914}
915
916static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
917{
918 u32 event;
919 u32 apedata;
920
921 if (!tg3_flag(tp, ENABLE_APE))
922 return;
923
924 switch (kind) {
925 case RESET_KIND_INIT:
926 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
927 APE_HOST_SEG_SIG_MAGIC);
928 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
929 APE_HOST_SEG_LEN_MAGIC);
930 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
931 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
932 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
933 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
934 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
935 APE_HOST_BEHAV_NO_PHYLOCK);
936 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
937 TG3_APE_HOST_DRVR_STATE_START);
938
939 event = APE_EVENT_STATUS_STATE_START;
940 break;
941 case RESET_KIND_SHUTDOWN:
942 /* With the interface we are currently using,
943 * APE does not track driver state. Wiping
944 * out the HOST SEGMENT SIGNATURE forces
945 * the APE to assume OS absent status.
946 */
947 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
948
949 if (device_may_wakeup(&tp->pdev->dev) &&
950 tg3_flag(tp, WOL_ENABLE)) {
951 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
952 TG3_APE_HOST_WOL_SPEED_AUTO);
953 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
954 } else
955 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
956
957 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
958
959 event = APE_EVENT_STATUS_STATE_UNLOAD;
960 break;
961 case RESET_KIND_SUSPEND:
962 event = APE_EVENT_STATUS_STATE_SUSPEND;
963 break;
964 default:
965 return;
966 }
967
968 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
969
970 tg3_ape_send_event(tp, event);
971}
972
1da177e4
LT
973static void tg3_disable_ints(struct tg3 *tp)
974{
89aeb3bc
MC
975 int i;
976
1da177e4
LT
977 tw32(TG3PCI_MISC_HOST_CTRL,
978 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc
MC
979 for (i = 0; i < tp->irq_max; i++)
980 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1da177e4
LT
981}
982
1da177e4
LT
983static void tg3_enable_ints(struct tg3 *tp)
984{
89aeb3bc 985 int i;
89aeb3bc 986
bbe832c0
MC
987 tp->irq_sync = 0;
988 wmb();
989
1da177e4
LT
990 tw32(TG3PCI_MISC_HOST_CTRL,
991 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc 992
f89f38b8 993 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
89aeb3bc
MC
994 for (i = 0; i < tp->irq_cnt; i++) {
995 struct tg3_napi *tnapi = &tp->napi[i];
c6cdf436 996
898a56f8 997 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
63c3a66f 998 if (tg3_flag(tp, 1SHOT_MSI))
89aeb3bc 999 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
f19af9c2 1000
f89f38b8 1001 tp->coal_now |= tnapi->coal_now;
89aeb3bc 1002 }
f19af9c2
MC
1003
1004 /* Force an initial interrupt */
63c3a66f 1005 if (!tg3_flag(tp, TAGGED_STATUS) &&
f19af9c2
MC
1006 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1007 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1008 else
f89f38b8
MC
1009 tw32(HOSTCC_MODE, tp->coal_now);
1010
1011 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1da177e4
LT
1012}
1013
17375d25 1014static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
04237ddd 1015{
17375d25 1016 struct tg3 *tp = tnapi->tp;
898a56f8 1017 struct tg3_hw_status *sblk = tnapi->hw_status;
04237ddd
MC
1018 unsigned int work_exists = 0;
1019
1020 /* check for phy events */
63c3a66f 1021 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
04237ddd
MC
1022 if (sblk->status & SD_STATUS_LINK_CHG)
1023 work_exists = 1;
1024 }
f891ea16
MC
1025
1026 /* check for TX work to do */
1027 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1028 work_exists = 1;
1029
1030 /* check for RX work to do */
1031 if (tnapi->rx_rcb_prod_idx &&
8d9d7cfc 1032 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
04237ddd
MC
1033 work_exists = 1;
1034
1035 return work_exists;
1036}
1037
17375d25 1038/* tg3_int_reenable
04237ddd
MC
1039 * similar to tg3_enable_ints, but it accurately determines whether there
1040 * is new work pending and can return without flushing the PIO write
6aa20a22 1041 * which reenables interrupts
1da177e4 1042 */
17375d25 1043static void tg3_int_reenable(struct tg3_napi *tnapi)
1da177e4 1044{
17375d25
MC
1045 struct tg3 *tp = tnapi->tp;
1046
898a56f8 1047 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1da177e4
LT
1048 mmiowb();
1049
fac9b83e
DM
1050 /* When doing tagged status, this work check is unnecessary.
1051 * The last_tag we write above tells the chip which piece of
1052 * work we've completed.
1053 */
63c3a66f 1054 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
04237ddd 1055 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 1056 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1da177e4
LT
1057}
1058
1da177e4
LT
1059static void tg3_switch_clocks(struct tg3 *tp)
1060{
f6eb9b1f 1061 u32 clock_ctrl;
1da177e4
LT
1062 u32 orig_clock_ctrl;
1063
63c3a66f 1064 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
4cf78e4f
MC
1065 return;
1066
f6eb9b1f
MC
1067 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1068
1da177e4
LT
1069 orig_clock_ctrl = clock_ctrl;
1070 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1071 CLOCK_CTRL_CLKRUN_OENABLE |
1072 0x1f);
1073 tp->pci_clock_ctrl = clock_ctrl;
1074
63c3a66f 1075 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4 1076 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
1077 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1078 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
1079 }
1080 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
1081 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1082 clock_ctrl |
1083 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1084 40);
1085 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1086 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1087 40);
1da177e4 1088 }
b401e9e2 1089 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
1090}
1091
1092#define PHY_BUSY_LOOPS 5000
1093
1094static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1095{
1096 u32 frame_val;
1097 unsigned int loops;
1098 int ret;
1099
1100 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1101 tw32_f(MAC_MI_MODE,
1102 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1103 udelay(80);
1104 }
1105
8151ad57
MC
1106 tg3_ape_lock(tp, tp->phy_ape_lock);
1107
1da177e4
LT
1108 *val = 0x0;
1109
882e9793 1110 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
1111 MI_COM_PHY_ADDR_MASK);
1112 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1113 MI_COM_REG_ADDR_MASK);
1114 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 1115
1da177e4
LT
1116 tw32_f(MAC_MI_COM, frame_val);
1117
1118 loops = PHY_BUSY_LOOPS;
1119 while (loops != 0) {
1120 udelay(10);
1121 frame_val = tr32(MAC_MI_COM);
1122
1123 if ((frame_val & MI_COM_BUSY) == 0) {
1124 udelay(5);
1125 frame_val = tr32(MAC_MI_COM);
1126 break;
1127 }
1128 loops -= 1;
1129 }
1130
1131 ret = -EBUSY;
1132 if (loops != 0) {
1133 *val = frame_val & MI_COM_DATA_MASK;
1134 ret = 0;
1135 }
1136
1137 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1138 tw32_f(MAC_MI_MODE, tp->mi_mode);
1139 udelay(80);
1140 }
1141
8151ad57
MC
1142 tg3_ape_unlock(tp, tp->phy_ape_lock);
1143
1da177e4
LT
1144 return ret;
1145}
1146
1147static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1148{
1149 u32 frame_val;
1150 unsigned int loops;
1151 int ret;
1152
f07e9af3 1153 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
221c5637 1154 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
b5d3772c
MC
1155 return 0;
1156
1da177e4
LT
1157 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1158 tw32_f(MAC_MI_MODE,
1159 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1160 udelay(80);
1161 }
1162
8151ad57
MC
1163 tg3_ape_lock(tp, tp->phy_ape_lock);
1164
882e9793 1165 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
1166 MI_COM_PHY_ADDR_MASK);
1167 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1168 MI_COM_REG_ADDR_MASK);
1169 frame_val |= (val & MI_COM_DATA_MASK);
1170 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 1171
1da177e4
LT
1172 tw32_f(MAC_MI_COM, frame_val);
1173
1174 loops = PHY_BUSY_LOOPS;
1175 while (loops != 0) {
1176 udelay(10);
1177 frame_val = tr32(MAC_MI_COM);
1178 if ((frame_val & MI_COM_BUSY) == 0) {
1179 udelay(5);
1180 frame_val = tr32(MAC_MI_COM);
1181 break;
1182 }
1183 loops -= 1;
1184 }
1185
1186 ret = -EBUSY;
1187 if (loops != 0)
1188 ret = 0;
1189
1190 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1191 tw32_f(MAC_MI_MODE, tp->mi_mode);
1192 udelay(80);
1193 }
1194
8151ad57
MC
1195 tg3_ape_unlock(tp, tp->phy_ape_lock);
1196
1da177e4
LT
1197 return ret;
1198}
1199
b0988c15
MC
1200static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1201{
1202 int err;
1203
1204 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1205 if (err)
1206 goto done;
1207
1208 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1209 if (err)
1210 goto done;
1211
1212 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1213 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1214 if (err)
1215 goto done;
1216
1217 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1218
1219done:
1220 return err;
1221}
1222
1223static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1224{
1225 int err;
1226
1227 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1228 if (err)
1229 goto done;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1232 if (err)
1233 goto done;
1234
1235 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1236 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1237 if (err)
1238 goto done;
1239
1240 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1241
1242done:
1243 return err;
1244}
1245
1246static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1247{
1248 int err;
1249
1250 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1251 if (!err)
1252 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1253
1254 return err;
1255}
1256
1257static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1258{
1259 int err;
1260
1261 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1262 if (!err)
1263 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1264
1265 return err;
1266}
1267
15ee95c3
MC
1268static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1269{
1270 int err;
1271
1272 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1273 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1274 MII_TG3_AUXCTL_SHDWSEL_MISC);
1275 if (!err)
1276 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1277
1278 return err;
1279}
1280
b4bd2929
MC
1281static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1282{
1283 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1284 set |= MII_TG3_AUXCTL_MISC_WREN;
1285
1286 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1287}
1288
daf3ec68
NNS
1289static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1290{
1291 u32 val;
1292 int err;
1d36ba45 1293
daf3ec68 1294 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1d36ba45 1295
daf3ec68
NNS
1296 if (err)
1297 return err;
1298 if (enable)
1299
1300 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1301 else
1302 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1303
1304 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1305 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1306
1307 return err;
1308}
1d36ba45 1309
95e2869a
MC
1310static int tg3_bmcr_reset(struct tg3 *tp)
1311{
1312 u32 phy_control;
1313 int limit, err;
1314
1315 /* OK, reset it, and poll the BMCR_RESET bit until it
1316 * clears or we time out.
1317 */
1318 phy_control = BMCR_RESET;
1319 err = tg3_writephy(tp, MII_BMCR, phy_control);
1320 if (err != 0)
1321 return -EBUSY;
1322
1323 limit = 5000;
1324 while (limit--) {
1325 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1326 if (err != 0)
1327 return -EBUSY;
1328
1329 if ((phy_control & BMCR_RESET) == 0) {
1330 udelay(40);
1331 break;
1332 }
1333 udelay(10);
1334 }
d4675b52 1335 if (limit < 0)
95e2869a
MC
1336 return -EBUSY;
1337
1338 return 0;
1339}
1340
158d7abd
MC
1341static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1342{
3d16543d 1343 struct tg3 *tp = bp->priv;
158d7abd
MC
1344 u32 val;
1345
24bb4fb6 1346 spin_lock_bh(&tp->lock);
158d7abd
MC
1347
1348 if (tg3_readphy(tp, reg, &val))
24bb4fb6
MC
1349 val = -EIO;
1350
1351 spin_unlock_bh(&tp->lock);
158d7abd
MC
1352
1353 return val;
1354}
1355
1356static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1357{
3d16543d 1358 struct tg3 *tp = bp->priv;
24bb4fb6 1359 u32 ret = 0;
158d7abd 1360
24bb4fb6 1361 spin_lock_bh(&tp->lock);
158d7abd
MC
1362
1363 if (tg3_writephy(tp, reg, val))
24bb4fb6 1364 ret = -EIO;
158d7abd 1365
24bb4fb6
MC
1366 spin_unlock_bh(&tp->lock);
1367
1368 return ret;
158d7abd
MC
1369}
1370
1371static int tg3_mdio_reset(struct mii_bus *bp)
1372{
1373 return 0;
1374}
1375
9c61d6bc 1376static void tg3_mdio_config_5785(struct tg3 *tp)
a9daf367
MC
1377{
1378 u32 val;
fcb389df 1379 struct phy_device *phydev;
a9daf367 1380
3f0e3ad7 1381 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
fcb389df 1382 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f
MC
1383 case PHY_ID_BCM50610:
1384 case PHY_ID_BCM50610M:
fcb389df
MC
1385 val = MAC_PHYCFG2_50610_LED_MODES;
1386 break;
6a443a0f 1387 case PHY_ID_BCMAC131:
fcb389df
MC
1388 val = MAC_PHYCFG2_AC131_LED_MODES;
1389 break;
6a443a0f 1390 case PHY_ID_RTL8211C:
fcb389df
MC
1391 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1392 break;
6a443a0f 1393 case PHY_ID_RTL8201E:
fcb389df
MC
1394 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1395 break;
1396 default:
a9daf367 1397 return;
fcb389df
MC
1398 }
1399
1400 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1401 tw32(MAC_PHYCFG2, val);
1402
1403 val = tr32(MAC_PHYCFG1);
bb85fbb6
MC
1404 val &= ~(MAC_PHYCFG1_RGMII_INT |
1405 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1406 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
fcb389df
MC
1407 tw32(MAC_PHYCFG1, val);
1408
1409 return;
1410 }
1411
63c3a66f 1412 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
fcb389df
MC
1413 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1414 MAC_PHYCFG2_FMODE_MASK_MASK |
1415 MAC_PHYCFG2_GMODE_MASK_MASK |
1416 MAC_PHYCFG2_ACT_MASK_MASK |
1417 MAC_PHYCFG2_QUAL_MASK_MASK |
1418 MAC_PHYCFG2_INBAND_ENABLE;
1419
1420 tw32(MAC_PHYCFG2, val);
a9daf367 1421
bb85fbb6
MC
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1424 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
63c3a66f
JP
1425 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1426 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1427 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
63c3a66f 1428 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1429 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1430 }
bb85fbb6
MC
1431 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1432 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1433 tw32(MAC_PHYCFG1, val);
a9daf367 1434
a9daf367
MC
1435 val = tr32(MAC_EXT_RGMII_MODE);
1436 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1437 MAC_RGMII_MODE_RX_QUALITY |
1438 MAC_RGMII_MODE_RX_ACTIVITY |
1439 MAC_RGMII_MODE_RX_ENG_DET |
1440 MAC_RGMII_MODE_TX_ENABLE |
1441 MAC_RGMII_MODE_TX_LOWPWR |
1442 MAC_RGMII_MODE_TX_RESET);
63c3a66f
JP
1443 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367
MC
1445 val |= MAC_RGMII_MODE_RX_INT_B |
1446 MAC_RGMII_MODE_RX_QUALITY |
1447 MAC_RGMII_MODE_RX_ACTIVITY |
1448 MAC_RGMII_MODE_RX_ENG_DET;
63c3a66f 1449 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1450 val |= MAC_RGMII_MODE_TX_ENABLE |
1451 MAC_RGMII_MODE_TX_LOWPWR |
1452 MAC_RGMII_MODE_TX_RESET;
1453 }
1454 tw32(MAC_EXT_RGMII_MODE, val);
1455}
1456
158d7abd
MC
1457static void tg3_mdio_start(struct tg3 *tp)
1458{
158d7abd
MC
1459 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1460 tw32_f(MAC_MI_MODE, tp->mi_mode);
1461 udelay(80);
a9daf367 1462
63c3a66f 1463 if (tg3_flag(tp, MDIOBUS_INITED) &&
9ea4818d
MC
1464 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1465 tg3_mdio_config_5785(tp);
1466}
1467
1468static int tg3_mdio_init(struct tg3 *tp)
1469{
1470 int i;
1471 u32 reg;
1472 struct phy_device *phydev;
1473
63c3a66f 1474 if (tg3_flag(tp, 5717_PLUS)) {
9c7df915 1475 u32 is_serdes;
882e9793 1476
69f11c99 1477 tp->phy_addr = tp->pci_fn + 1;
882e9793 1478
d1ec96af
MC
1479 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1480 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1481 else
1482 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1483 TG3_CPMU_PHY_STRAP_IS_SERDES;
882e9793
MC
1484 if (is_serdes)
1485 tp->phy_addr += 7;
1486 } else
3f0e3ad7 1487 tp->phy_addr = TG3_PHY_MII_ADDR;
882e9793 1488
158d7abd
MC
1489 tg3_mdio_start(tp);
1490
63c3a66f 1491 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
158d7abd
MC
1492 return 0;
1493
298cf9be
LB
1494 tp->mdio_bus = mdiobus_alloc();
1495 if (tp->mdio_bus == NULL)
1496 return -ENOMEM;
158d7abd 1497
298cf9be
LB
1498 tp->mdio_bus->name = "tg3 mdio bus";
1499 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
158d7abd 1500 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
298cf9be
LB
1501 tp->mdio_bus->priv = tp;
1502 tp->mdio_bus->parent = &tp->pdev->dev;
1503 tp->mdio_bus->read = &tg3_mdio_read;
1504 tp->mdio_bus->write = &tg3_mdio_write;
1505 tp->mdio_bus->reset = &tg3_mdio_reset;
3f0e3ad7 1506 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
298cf9be 1507 tp->mdio_bus->irq = &tp->mdio_irq[0];
158d7abd
MC
1508
1509 for (i = 0; i < PHY_MAX_ADDR; i++)
298cf9be 1510 tp->mdio_bus->irq[i] = PHY_POLL;
158d7abd
MC
1511
1512 /* The bus registration will look for all the PHYs on the mdio bus.
1513 * Unfortunately, it does not ensure the PHY is powered up before
1514 * accessing the PHY ID registers. A chip reset is the
1515 * quickest way to bring the device back to an operational state..
1516 */
1517 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1518 tg3_bmcr_reset(tp);
1519
298cf9be 1520 i = mdiobus_register(tp->mdio_bus);
a9daf367 1521 if (i) {
ab96b241 1522 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
9c61d6bc 1523 mdiobus_free(tp->mdio_bus);
a9daf367
MC
1524 return i;
1525 }
158d7abd 1526
3f0e3ad7 1527 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
a9daf367 1528
9c61d6bc 1529 if (!phydev || !phydev->drv) {
ab96b241 1530 dev_warn(&tp->pdev->dev, "No PHY devices\n");
9c61d6bc
MC
1531 mdiobus_unregister(tp->mdio_bus);
1532 mdiobus_free(tp->mdio_bus);
1533 return -ENODEV;
1534 }
1535
1536 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f 1537 case PHY_ID_BCM57780:
321d32a0 1538 phydev->interface = PHY_INTERFACE_MODE_GMII;
c704dc23 1539 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
321d32a0 1540 break;
6a443a0f
MC
1541 case PHY_ID_BCM50610:
1542 case PHY_ID_BCM50610M:
32e5a8d6 1543 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
c704dc23 1544 PHY_BRCM_RX_REFCLK_UNUSED |
52fae083 1545 PHY_BRCM_DIS_TXCRXC_NOENRGY |
c704dc23 1546 PHY_BRCM_AUTO_PWRDWN_ENABLE;
63c3a66f 1547 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
a9daf367 1548 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
63c3a66f 1549 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1550 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
63c3a66f 1551 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367 1552 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fcb389df 1553 /* fallthru */
6a443a0f 1554 case PHY_ID_RTL8211C:
fcb389df 1555 phydev->interface = PHY_INTERFACE_MODE_RGMII;
a9daf367 1556 break;
6a443a0f
MC
1557 case PHY_ID_RTL8201E:
1558 case PHY_ID_BCMAC131:
a9daf367 1559 phydev->interface = PHY_INTERFACE_MODE_MII;
cdd4e09d 1560 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
f07e9af3 1561 tp->phy_flags |= TG3_PHYFLG_IS_FET;
a9daf367
MC
1562 break;
1563 }
1564
63c3a66f 1565 tg3_flag_set(tp, MDIOBUS_INITED);
9c61d6bc
MC
1566
1567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1568 tg3_mdio_config_5785(tp);
a9daf367
MC
1569
1570 return 0;
158d7abd
MC
1571}
1572
1573static void tg3_mdio_fini(struct tg3 *tp)
1574{
63c3a66f
JP
1575 if (tg3_flag(tp, MDIOBUS_INITED)) {
1576 tg3_flag_clear(tp, MDIOBUS_INITED);
298cf9be
LB
1577 mdiobus_unregister(tp->mdio_bus);
1578 mdiobus_free(tp->mdio_bus);
158d7abd
MC
1579 }
1580}
1581
4ba526ce
MC
1582/* tp->lock is held. */
1583static inline void tg3_generate_fw_event(struct tg3 *tp)
1584{
1585 u32 val;
1586
1587 val = tr32(GRC_RX_CPU_EVENT);
1588 val |= GRC_RX_CPU_DRIVER_EVENT;
1589 tw32_f(GRC_RX_CPU_EVENT, val);
1590
1591 tp->last_event_jiffies = jiffies;
1592}
1593
1594#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1595
95e2869a
MC
1596/* tp->lock is held. */
1597static void tg3_wait_for_event_ack(struct tg3 *tp)
1598{
1599 int i;
4ba526ce
MC
1600 unsigned int delay_cnt;
1601 long time_remain;
1602
1603 /* If enough time has passed, no wait is necessary. */
1604 time_remain = (long)(tp->last_event_jiffies + 1 +
1605 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1606 (long)jiffies;
1607 if (time_remain < 0)
1608 return;
1609
1610 /* Check if we can shorten the wait time. */
1611 delay_cnt = jiffies_to_usecs(time_remain);
1612 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1613 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1614 delay_cnt = (delay_cnt >> 3) + 1;
95e2869a 1615
4ba526ce 1616 for (i = 0; i < delay_cnt; i++) {
95e2869a
MC
1617 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1618 break;
4ba526ce 1619 udelay(8);
95e2869a
MC
1620 }
1621}
1622
1623/* tp->lock is held. */
b28f389d 1624static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
95e2869a 1625{
b28f389d 1626 u32 reg, val;
95e2869a
MC
1627
1628 val = 0;
1629 if (!tg3_readphy(tp, MII_BMCR, &reg))
1630 val = reg << 16;
1631 if (!tg3_readphy(tp, MII_BMSR, &reg))
1632 val |= (reg & 0xffff);
b28f389d 1633 *data++ = val;
95e2869a
MC
1634
1635 val = 0;
1636 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1637 val = reg << 16;
1638 if (!tg3_readphy(tp, MII_LPA, &reg))
1639 val |= (reg & 0xffff);
b28f389d 1640 *data++ = val;
95e2869a
MC
1641
1642 val = 0;
f07e9af3 1643 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
95e2869a
MC
1644 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1645 val = reg << 16;
1646 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1647 val |= (reg & 0xffff);
1648 }
b28f389d 1649 *data++ = val;
95e2869a
MC
1650
1651 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1652 val = reg << 16;
1653 else
1654 val = 0;
b28f389d
MC
1655 *data++ = val;
1656}
1657
1658/* tp->lock is held. */
1659static void tg3_ump_link_report(struct tg3 *tp)
1660{
1661 u32 data[4];
1662
1663 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1664 return;
1665
1666 tg3_phy_gather_ump_data(tp, data);
1667
1668 tg3_wait_for_event_ack(tp);
1669
1670 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1671 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1672 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1673 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1674 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1675 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
95e2869a 1676
4ba526ce 1677 tg3_generate_fw_event(tp);
95e2869a
MC
1678}
1679
8d5a89b3
MC
1680/* tp->lock is held. */
1681static void tg3_stop_fw(struct tg3 *tp)
1682{
1683 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1684 /* Wait for RX cpu to ACK the previous event. */
1685 tg3_wait_for_event_ack(tp);
1686
1687 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1688
1689 tg3_generate_fw_event(tp);
1690
1691 /* Wait for RX cpu to ACK this event. */
1692 tg3_wait_for_event_ack(tp);
1693 }
1694}
1695
fd6d3f0e
MC
1696/* tp->lock is held. */
1697static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1698{
1699 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1700 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1701
1702 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1703 switch (kind) {
1704 case RESET_KIND_INIT:
1705 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1706 DRV_STATE_START);
1707 break;
1708
1709 case RESET_KIND_SHUTDOWN:
1710 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1711 DRV_STATE_UNLOAD);
1712 break;
1713
1714 case RESET_KIND_SUSPEND:
1715 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1716 DRV_STATE_SUSPEND);
1717 break;
1718
1719 default:
1720 break;
1721 }
1722 }
1723
1724 if (kind == RESET_KIND_INIT ||
1725 kind == RESET_KIND_SUSPEND)
1726 tg3_ape_driver_state_change(tp, kind);
1727}
1728
1729/* tp->lock is held. */
1730static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1731{
1732 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1733 switch (kind) {
1734 case RESET_KIND_INIT:
1735 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736 DRV_STATE_START_DONE);
1737 break;
1738
1739 case RESET_KIND_SHUTDOWN:
1740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 DRV_STATE_UNLOAD_DONE);
1742 break;
1743
1744 default:
1745 break;
1746 }
1747 }
1748
1749 if (kind == RESET_KIND_SHUTDOWN)
1750 tg3_ape_driver_state_change(tp, kind);
1751}
1752
1753/* tp->lock is held. */
1754static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1755{
1756 if (tg3_flag(tp, ENABLE_ASF)) {
1757 switch (kind) {
1758 case RESET_KIND_INIT:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_START);
1761 break;
1762
1763 case RESET_KIND_SHUTDOWN:
1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 DRV_STATE_UNLOAD);
1766 break;
1767
1768 case RESET_KIND_SUSPEND:
1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 DRV_STATE_SUSPEND);
1771 break;
1772
1773 default:
1774 break;
1775 }
1776 }
1777}
1778
1779static int tg3_poll_fw(struct tg3 *tp)
1780{
1781 int i;
1782 u32 val;
1783
1784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1785 /* Wait up to 20ms for init done. */
1786 for (i = 0; i < 200; i++) {
1787 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1788 return 0;
1789 udelay(100);
1790 }
1791 return -ENODEV;
1792 }
1793
1794 /* Wait for firmware initialization to complete. */
1795 for (i = 0; i < 100000; i++) {
1796 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1797 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1798 break;
1799 udelay(10);
1800 }
1801
1802 /* Chip might not be fitted with firmware. Some Sun onboard
1803 * parts are configured like that. So don't signal the timeout
1804 * of the above loop as an error, but do report the lack of
1805 * running firmware once.
1806 */
1807 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1808 tg3_flag_set(tp, NO_FWARE_REPORTED);
1809
1810 netdev_info(tp->dev, "No firmware running\n");
1811 }
1812
1813 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1814 /* The 57765 A0 needs a little more
1815 * time to do some important work.
1816 */
1817 mdelay(10);
1818 }
1819
1820 return 0;
1821}
1822
95e2869a
MC
1823static void tg3_link_report(struct tg3 *tp)
1824{
1825 if (!netif_carrier_ok(tp->dev)) {
05dbe005 1826 netif_info(tp, link, tp->dev, "Link is down\n");
95e2869a
MC
1827 tg3_ump_link_report(tp);
1828 } else if (netif_msg_link(tp)) {
05dbe005
JP
1829 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1830 (tp->link_config.active_speed == SPEED_1000 ?
1831 1000 :
1832 (tp->link_config.active_speed == SPEED_100 ?
1833 100 : 10)),
1834 (tp->link_config.active_duplex == DUPLEX_FULL ?
1835 "full" : "half"));
1836
1837 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1838 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1839 "on" : "off",
1840 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1841 "on" : "off");
47007831
MC
1842
1843 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1844 netdev_info(tp->dev, "EEE is %s\n",
1845 tp->setlpicnt ? "enabled" : "disabled");
1846
95e2869a
MC
1847 tg3_ump_link_report(tp);
1848 }
1849}
1850
95e2869a
MC
1851static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1852{
1853 u16 miireg;
1854
e18ce346 1855 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
95e2869a 1856 miireg = ADVERTISE_1000XPAUSE;
e18ce346 1857 else if (flow_ctrl & FLOW_CTRL_TX)
95e2869a 1858 miireg = ADVERTISE_1000XPSE_ASYM;
e18ce346 1859 else if (flow_ctrl & FLOW_CTRL_RX)
95e2869a
MC
1860 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1861 else
1862 miireg = 0;
1863
1864 return miireg;
1865}
1866
95e2869a
MC
1867static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1868{
1869 u8 cap = 0;
1870
f3791cdf
MC
1871 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1872 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1873 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1874 if (lcladv & ADVERTISE_1000XPAUSE)
1875 cap = FLOW_CTRL_RX;
1876 if (rmtadv & ADVERTISE_1000XPAUSE)
e18ce346 1877 cap = FLOW_CTRL_TX;
95e2869a
MC
1878 }
1879
1880 return cap;
1881}
1882
f51f3562 1883static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
95e2869a 1884{
b02fd9e3 1885 u8 autoneg;
f51f3562 1886 u8 flowctrl = 0;
95e2869a
MC
1887 u32 old_rx_mode = tp->rx_mode;
1888 u32 old_tx_mode = tp->tx_mode;
1889
63c3a66f 1890 if (tg3_flag(tp, USE_PHYLIB))
3f0e3ad7 1891 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
b02fd9e3
MC
1892 else
1893 autoneg = tp->link_config.autoneg;
1894
63c3a66f 1895 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
f07e9af3 1896 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
f51f3562 1897 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
95e2869a 1898 else
bc02ff95 1899 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
f51f3562
MC
1900 } else
1901 flowctrl = tp->link_config.flowctrl;
95e2869a 1902
f51f3562 1903 tp->link_config.active_flowctrl = flowctrl;
95e2869a 1904
e18ce346 1905 if (flowctrl & FLOW_CTRL_RX)
95e2869a
MC
1906 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1907 else
1908 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1909
f51f3562 1910 if (old_rx_mode != tp->rx_mode)
95e2869a 1911 tw32_f(MAC_RX_MODE, tp->rx_mode);
95e2869a 1912
e18ce346 1913 if (flowctrl & FLOW_CTRL_TX)
95e2869a
MC
1914 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1915 else
1916 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1917
f51f3562 1918 if (old_tx_mode != tp->tx_mode)
95e2869a 1919 tw32_f(MAC_TX_MODE, tp->tx_mode);
95e2869a
MC
1920}
1921
b02fd9e3
MC
1922static void tg3_adjust_link(struct net_device *dev)
1923{
1924 u8 oldflowctrl, linkmesg = 0;
1925 u32 mac_mode, lcl_adv, rmt_adv;
1926 struct tg3 *tp = netdev_priv(dev);
3f0e3ad7 1927 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1928
24bb4fb6 1929 spin_lock_bh(&tp->lock);
b02fd9e3
MC
1930
1931 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1932 MAC_MODE_HALF_DUPLEX);
1933
1934 oldflowctrl = tp->link_config.active_flowctrl;
1935
1936 if (phydev->link) {
1937 lcl_adv = 0;
1938 rmt_adv = 0;
1939
1940 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1941 mac_mode |= MAC_MODE_PORT_MODE_MII;
c3df0748
MC
1942 else if (phydev->speed == SPEED_1000 ||
1943 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
b02fd9e3 1944 mac_mode |= MAC_MODE_PORT_MODE_GMII;
c3df0748
MC
1945 else
1946 mac_mode |= MAC_MODE_PORT_MODE_MII;
b02fd9e3
MC
1947
1948 if (phydev->duplex == DUPLEX_HALF)
1949 mac_mode |= MAC_MODE_HALF_DUPLEX;
1950 else {
f88788f0 1951 lcl_adv = mii_advertise_flowctrl(
b02fd9e3
MC
1952 tp->link_config.flowctrl);
1953
1954 if (phydev->pause)
1955 rmt_adv = LPA_PAUSE_CAP;
1956 if (phydev->asym_pause)
1957 rmt_adv |= LPA_PAUSE_ASYM;
1958 }
1959
1960 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1961 } else
1962 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1963
1964 if (mac_mode != tp->mac_mode) {
1965 tp->mac_mode = mac_mode;
1966 tw32_f(MAC_MODE, tp->mac_mode);
1967 udelay(40);
1968 }
1969
fcb389df
MC
1970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1971 if (phydev->speed == SPEED_10)
1972 tw32(MAC_MI_STAT,
1973 MAC_MI_STAT_10MBPS_MODE |
1974 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1975 else
1976 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1977 }
1978
b02fd9e3
MC
1979 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1980 tw32(MAC_TX_LENGTHS,
1981 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1982 (6 << TX_LENGTHS_IPG_SHIFT) |
1983 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1984 else
1985 tw32(MAC_TX_LENGTHS,
1986 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1987 (6 << TX_LENGTHS_IPG_SHIFT) |
1988 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1989
34655ad6 1990 if (phydev->link != tp->old_link ||
b02fd9e3
MC
1991 phydev->speed != tp->link_config.active_speed ||
1992 phydev->duplex != tp->link_config.active_duplex ||
1993 oldflowctrl != tp->link_config.active_flowctrl)
c6cdf436 1994 linkmesg = 1;
b02fd9e3 1995
34655ad6 1996 tp->old_link = phydev->link;
b02fd9e3
MC
1997 tp->link_config.active_speed = phydev->speed;
1998 tp->link_config.active_duplex = phydev->duplex;
1999
24bb4fb6 2000 spin_unlock_bh(&tp->lock);
b02fd9e3
MC
2001
2002 if (linkmesg)
2003 tg3_link_report(tp);
2004}
2005
2006static int tg3_phy_init(struct tg3 *tp)
2007{
2008 struct phy_device *phydev;
2009
f07e9af3 2010 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
b02fd9e3
MC
2011 return 0;
2012
2013 /* Bring the PHY back to a known state. */
2014 tg3_bmcr_reset(tp);
2015
3f0e3ad7 2016 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3
MC
2017
2018 /* Attach the MAC to the PHY. */
f9a8f83b
FF
2019 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2020 tg3_adjust_link, phydev->interface);
b02fd9e3 2021 if (IS_ERR(phydev)) {
ab96b241 2022 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
b02fd9e3
MC
2023 return PTR_ERR(phydev);
2024 }
2025
b02fd9e3 2026 /* Mask with MAC supported features. */
9c61d6bc
MC
2027 switch (phydev->interface) {
2028 case PHY_INTERFACE_MODE_GMII:
2029 case PHY_INTERFACE_MODE_RGMII:
f07e9af3 2030 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
321d32a0
MC
2031 phydev->supported &= (PHY_GBIT_FEATURES |
2032 SUPPORTED_Pause |
2033 SUPPORTED_Asym_Pause);
2034 break;
2035 }
2036 /* fallthru */
9c61d6bc
MC
2037 case PHY_INTERFACE_MODE_MII:
2038 phydev->supported &= (PHY_BASIC_FEATURES |
2039 SUPPORTED_Pause |
2040 SUPPORTED_Asym_Pause);
2041 break;
2042 default:
3f0e3ad7 2043 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9c61d6bc
MC
2044 return -EINVAL;
2045 }
2046
f07e9af3 2047 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
2048
2049 phydev->advertising = phydev->supported;
2050
b02fd9e3
MC
2051 return 0;
2052}
2053
2054static void tg3_phy_start(struct tg3 *tp)
2055{
2056 struct phy_device *phydev;
2057
f07e9af3 2058 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
2059 return;
2060
3f0e3ad7 2061 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 2062
80096068
MC
2063 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2064 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
c6700ce2
MC
2065 phydev->speed = tp->link_config.speed;
2066 phydev->duplex = tp->link_config.duplex;
2067 phydev->autoneg = tp->link_config.autoneg;
2068 phydev->advertising = tp->link_config.advertising;
b02fd9e3
MC
2069 }
2070
2071 phy_start(phydev);
2072
2073 phy_start_aneg(phydev);
2074}
2075
2076static void tg3_phy_stop(struct tg3 *tp)
2077{
f07e9af3 2078 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
2079 return;
2080
3f0e3ad7 2081 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
2082}
2083
2084static void tg3_phy_fini(struct tg3 *tp)
2085{
f07e9af3 2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7 2087 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
f07e9af3 2088 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
2089 }
2090}
2091
941ec90f
MC
2092static int tg3_phy_set_extloopbk(struct tg3 *tp)
2093{
2094 int err;
2095 u32 val;
2096
2097 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2098 return 0;
2099
2100 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2101 /* Cannot do read-modify-write on 5401 */
2102 err = tg3_phy_auxctl_write(tp,
2103 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2104 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2105 0x4c20);
2106 goto done;
2107 }
2108
2109 err = tg3_phy_auxctl_read(tp,
2110 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2111 if (err)
2112 return err;
2113
2114 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2115 err = tg3_phy_auxctl_write(tp,
2116 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2117
2118done:
2119 return err;
2120}
2121
7f97a4bd
MC
2122static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2123{
2124 u32 phytest;
2125
2126 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2127 u32 phy;
2128
2129 tg3_writephy(tp, MII_TG3_FET_TEST,
2130 phytest | MII_TG3_FET_SHADOW_EN);
2131 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2132 if (enable)
2133 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2134 else
2135 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2136 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2137 }
2138 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2139 }
2140}
2141
6833c043
MC
2142static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2143{
2144 u32 reg;
2145
63c3a66f
JP
2146 if (!tg3_flag(tp, 5705_PLUS) ||
2147 (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2148 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
6833c043
MC
2149 return;
2150
f07e9af3 2151 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7f97a4bd
MC
2152 tg3_phy_fet_toggle_apd(tp, enable);
2153 return;
2154 }
2155
6833c043
MC
2156 reg = MII_TG3_MISC_SHDW_WREN |
2157 MII_TG3_MISC_SHDW_SCR5_SEL |
2158 MII_TG3_MISC_SHDW_SCR5_LPED |
2159 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2160 MII_TG3_MISC_SHDW_SCR5_SDTL |
2161 MII_TG3_MISC_SHDW_SCR5_C125OE;
2162 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2163 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2164
2165 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2166
2167
2168 reg = MII_TG3_MISC_SHDW_WREN |
2169 MII_TG3_MISC_SHDW_APD_SEL |
2170 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2171 if (enable)
2172 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2173
2174 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2175}
2176
9ef8ca99
MC
2177static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2178{
2179 u32 phy;
2180
63c3a66f 2181 if (!tg3_flag(tp, 5705_PLUS) ||
f07e9af3 2182 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9ef8ca99
MC
2183 return;
2184
f07e9af3 2185 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
9ef8ca99
MC
2186 u32 ephy;
2187
535ef6e1
MC
2188 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2189 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2190
2191 tg3_writephy(tp, MII_TG3_FET_TEST,
2192 ephy | MII_TG3_FET_SHADOW_EN);
2193 if (!tg3_readphy(tp, reg, &phy)) {
9ef8ca99 2194 if (enable)
535ef6e1 2195 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
9ef8ca99 2196 else
535ef6e1
MC
2197 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2198 tg3_writephy(tp, reg, phy);
9ef8ca99 2199 }
535ef6e1 2200 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
9ef8ca99
MC
2201 }
2202 } else {
15ee95c3
MC
2203 int ret;
2204
2205 ret = tg3_phy_auxctl_read(tp,
2206 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2207 if (!ret) {
9ef8ca99
MC
2208 if (enable)
2209 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2210 else
2211 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
b4bd2929
MC
2212 tg3_phy_auxctl_write(tp,
2213 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
9ef8ca99
MC
2214 }
2215 }
2216}
2217
1da177e4
LT
2218static void tg3_phy_set_wirespeed(struct tg3 *tp)
2219{
15ee95c3 2220 int ret;
1da177e4
LT
2221 u32 val;
2222
f07e9af3 2223 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1da177e4
LT
2224 return;
2225
15ee95c3
MC
2226 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2227 if (!ret)
b4bd2929
MC
2228 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2229 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1da177e4
LT
2230}
2231
b2a5c19c
MC
2232static void tg3_phy_apply_otp(struct tg3 *tp)
2233{
2234 u32 otp, phy;
2235
2236 if (!tp->phy_otp)
2237 return;
2238
2239 otp = tp->phy_otp;
2240
daf3ec68 2241 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
1d36ba45 2242 return;
b2a5c19c
MC
2243
2244 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2245 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2246 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2247
2248 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2249 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2250 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2251
2252 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2253 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2254 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2255
2256 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2257 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2258
2259 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2260 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2261
2262 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2263 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2264 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2265
daf3ec68 2266 tg3_phy_toggle_auxctl_smdsp(tp, false);
b2a5c19c
MC
2267}
2268
52b02d04
MC
2269static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2270{
2271 u32 val;
2272
2273 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2274 return;
2275
2276 tp->setlpicnt = 0;
2277
2278 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2279 current_link_up == 1 &&
a6b68dab
MC
2280 tp->link_config.active_duplex == DUPLEX_FULL &&
2281 (tp->link_config.active_speed == SPEED_100 ||
2282 tp->link_config.active_speed == SPEED_1000)) {
52b02d04
MC
2283 u32 eeectl;
2284
2285 if (tp->link_config.active_speed == SPEED_1000)
2286 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2287 else
2288 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2289
2290 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2291
3110f5f5
MC
2292 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2293 TG3_CL45_D7_EEERES_STAT, &val);
52b02d04 2294
b0c5943f
MC
2295 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2296 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
52b02d04
MC
2297 tp->setlpicnt = 2;
2298 }
2299
2300 if (!tp->setlpicnt) {
b715ce94 2301 if (current_link_up == 1 &&
daf3ec68 2302 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
b715ce94 2303 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
daf3ec68 2304 tg3_phy_toggle_auxctl_smdsp(tp, false);
b715ce94
MC
2305 }
2306
52b02d04
MC
2307 val = tr32(TG3_CPMU_EEE_MODE);
2308 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2309 }
2310}
2311
b0c5943f
MC
2312static void tg3_phy_eee_enable(struct tg3 *tp)
2313{
2314 u32 val;
2315
2316 if (tp->link_config.active_speed == SPEED_1000 &&
2317 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2318 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
55086ad9 2319 tg3_flag(tp, 57765_CLASS)) &&
daf3ec68 2320 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
b715ce94
MC
2321 val = MII_TG3_DSP_TAP26_ALNOKO |
2322 MII_TG3_DSP_TAP26_RMRXSTO;
2323 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
daf3ec68 2324 tg3_phy_toggle_auxctl_smdsp(tp, false);
b0c5943f
MC
2325 }
2326
2327 val = tr32(TG3_CPMU_EEE_MODE);
2328 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2329}
2330
1da177e4
LT
2331static int tg3_wait_macro_done(struct tg3 *tp)
2332{
2333 int limit = 100;
2334
2335 while (limit--) {
2336 u32 tmp32;
2337
f08aa1a8 2338 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1da177e4
LT
2339 if ((tmp32 & 0x1000) == 0)
2340 break;
2341 }
2342 }
d4675b52 2343 if (limit < 0)
1da177e4
LT
2344 return -EBUSY;
2345
2346 return 0;
2347}
2348
2349static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2350{
2351 static const u32 test_pat[4][6] = {
2352 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2353 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2354 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2355 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2356 };
2357 int chan;
2358
2359 for (chan = 0; chan < 4; chan++) {
2360 int i;
2361
2362 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2363 (chan * 0x2000) | 0x0200);
f08aa1a8 2364 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2365
2366 for (i = 0; i < 6; i++)
2367 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2368 test_pat[chan][i]);
2369
f08aa1a8 2370 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2371 if (tg3_wait_macro_done(tp)) {
2372 *resetp = 1;
2373 return -EBUSY;
2374 }
2375
2376 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2377 (chan * 0x2000) | 0x0200);
f08aa1a8 2378 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1da177e4
LT
2379 if (tg3_wait_macro_done(tp)) {
2380 *resetp = 1;
2381 return -EBUSY;
2382 }
2383
f08aa1a8 2384 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1da177e4
LT
2385 if (tg3_wait_macro_done(tp)) {
2386 *resetp = 1;
2387 return -EBUSY;
2388 }
2389
2390 for (i = 0; i < 6; i += 2) {
2391 u32 low, high;
2392
2393 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2394 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2395 tg3_wait_macro_done(tp)) {
2396 *resetp = 1;
2397 return -EBUSY;
2398 }
2399 low &= 0x7fff;
2400 high &= 0x000f;
2401 if (low != test_pat[chan][i] ||
2402 high != test_pat[chan][i+1]) {
2403 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2404 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2405 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2406
2407 return -EBUSY;
2408 }
2409 }
2410 }
2411
2412 return 0;
2413}
2414
2415static int tg3_phy_reset_chanpat(struct tg3 *tp)
2416{
2417 int chan;
2418
2419 for (chan = 0; chan < 4; chan++) {
2420 int i;
2421
2422 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2423 (chan * 0x2000) | 0x0200);
f08aa1a8 2424 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2425 for (i = 0; i < 6; i++)
2426 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
f08aa1a8 2427 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2428 if (tg3_wait_macro_done(tp))
2429 return -EBUSY;
2430 }
2431
2432 return 0;
2433}
2434
2435static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2436{
2437 u32 reg32, phy9_orig;
2438 int retries, do_phy_reset, err;
2439
2440 retries = 10;
2441 do_phy_reset = 1;
2442 do {
2443 if (do_phy_reset) {
2444 err = tg3_bmcr_reset(tp);
2445 if (err)
2446 return err;
2447 do_phy_reset = 0;
2448 }
2449
2450 /* Disable transmitter and interrupt. */
2451 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2452 continue;
2453
2454 reg32 |= 0x3000;
2455 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2456
2457 /* Set full-duplex, 1000 mbps. */
2458 tg3_writephy(tp, MII_BMCR,
221c5637 2459 BMCR_FULLDPLX | BMCR_SPEED1000);
1da177e4
LT
2460
2461 /* Set to master mode. */
221c5637 2462 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1da177e4
LT
2463 continue;
2464
221c5637
MC
2465 tg3_writephy(tp, MII_CTRL1000,
2466 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1da177e4 2467
daf3ec68 2468 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
1d36ba45
MC
2469 if (err)
2470 return err;
1da177e4
LT
2471
2472 /* Block the PHY control access. */
6ee7c0a0 2473 tg3_phydsp_write(tp, 0x8005, 0x0800);
1da177e4
LT
2474
2475 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2476 if (!err)
2477 break;
2478 } while (--retries);
2479
2480 err = tg3_phy_reset_chanpat(tp);
2481 if (err)
2482 return err;
2483
6ee7c0a0 2484 tg3_phydsp_write(tp, 0x8005, 0x0000);
1da177e4
LT
2485
2486 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
f08aa1a8 2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1da177e4 2488
daf3ec68 2489 tg3_phy_toggle_auxctl_smdsp(tp, false);
1da177e4 2490
221c5637 2491 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
1da177e4
LT
2492
2493 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2494 reg32 &= ~0x3000;
2495 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2496 } else if (!err)
2497 err = -EBUSY;
2498
2499 return err;
2500}
2501
f4a46d1f
NNS
2502static void tg3_carrier_on(struct tg3 *tp)
2503{
2504 netif_carrier_on(tp->dev);
2505 tp->link_up = true;
2506}
2507
2508static void tg3_carrier_off(struct tg3 *tp)
2509{
2510 netif_carrier_off(tp->dev);
2511 tp->link_up = false;
2512}
2513
1da177e4
LT
2514/* This will reset the tigon3 PHY if there is no valid
2515 * link unless the FORCE argument is non-zero.
2516 */
2517static int tg3_phy_reset(struct tg3 *tp)
2518{
f833c4c1 2519 u32 val, cpmuctrl;
1da177e4
LT
2520 int err;
2521
60189ddf 2522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2523 val = tr32(GRC_MISC_CFG);
2524 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2525 udelay(40);
2526 }
f833c4c1
MC
2527 err = tg3_readphy(tp, MII_BMSR, &val);
2528 err |= tg3_readphy(tp, MII_BMSR, &val);
1da177e4
LT
2529 if (err != 0)
2530 return -EBUSY;
2531
f4a46d1f
NNS
2532 if (netif_running(tp->dev) && tp->link_up) {
2533 tg3_carrier_off(tp);
c8e1e82b
MC
2534 tg3_link_report(tp);
2535 }
2536
1da177e4
LT
2537 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2538 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2540 err = tg3_phy_reset_5703_4_5(tp);
2541 if (err)
2542 return err;
2543 goto out;
2544 }
2545
b2a5c19c
MC
2546 cpmuctrl = 0;
2547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2548 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2549 cpmuctrl = tr32(TG3_CPMU_CTRL);
2550 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2551 tw32(TG3_CPMU_CTRL,
2552 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2553 }
2554
1da177e4
LT
2555 err = tg3_bmcr_reset(tp);
2556 if (err)
2557 return err;
2558
b2a5c19c 2559 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
f833c4c1
MC
2560 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2561 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
b2a5c19c
MC
2562
2563 tw32(TG3_CPMU_CTRL, cpmuctrl);
2564 }
2565
bcb37f6c
MC
2566 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2567 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2568 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2569 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2570 CPMU_LSPD_1000MB_MACCLK_12_5) {
2571 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2572 udelay(40);
2573 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2574 }
2575 }
2576
63c3a66f 2577 if (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2578 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
ecf1410b
MC
2579 return 0;
2580
b2a5c19c
MC
2581 tg3_phy_apply_otp(tp);
2582
f07e9af3 2583 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
6833c043
MC
2584 tg3_phy_toggle_apd(tp, true);
2585 else
2586 tg3_phy_toggle_apd(tp, false);
2587
1da177e4 2588out:
1d36ba45 2589 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
daf3ec68 2590 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
6ee7c0a0
MC
2591 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2592 tg3_phydsp_write(tp, 0x000a, 0x0323);
daf3ec68 2593 tg3_phy_toggle_auxctl_smdsp(tp, false);
1da177e4 2594 }
1d36ba45 2595
f07e9af3 2596 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
f08aa1a8
MC
2597 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2598 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1da177e4 2599 }
1d36ba45 2600
f07e9af3 2601 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
daf3ec68 2602 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
1d36ba45
MC
2603 tg3_phydsp_write(tp, 0x000a, 0x310b);
2604 tg3_phydsp_write(tp, 0x201f, 0x9506);
2605 tg3_phydsp_write(tp, 0x401f, 0x14e2);
daf3ec68 2606 tg3_phy_toggle_auxctl_smdsp(tp, false);
1d36ba45 2607 }
f07e9af3 2608 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
daf3ec68 2609 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
1d36ba45
MC
2610 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2611 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2612 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2613 tg3_writephy(tp, MII_TG3_TEST1,
2614 MII_TG3_TEST1_TRIM_EN | 0x4);
2615 } else
2616 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2617
daf3ec68 2618 tg3_phy_toggle_auxctl_smdsp(tp, false);
1d36ba45 2619 }
c424cb24 2620 }
1d36ba45 2621
1da177e4
LT
2622 /* Set Extended packet length bit (bit 14) on all chips that */
2623 /* support jumbo frames */
79eb6904 2624 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4 2625 /* Cannot do read-modify-write on 5401 */
b4bd2929 2626 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
63c3a66f 2627 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
1da177e4 2628 /* Set bit 14 with read-modify-write to preserve other bits */
15ee95c3
MC
2629 err = tg3_phy_auxctl_read(tp,
2630 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2631 if (!err)
b4bd2929
MC
2632 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2633 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
1da177e4
LT
2634 }
2635
2636 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2637 * jumbo frames transmission.
2638 */
63c3a66f 2639 if (tg3_flag(tp, JUMBO_CAPABLE)) {
f833c4c1 2640 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
c6cdf436 2641 tg3_writephy(tp, MII_TG3_EXT_CTRL,
f833c4c1 2642 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1da177e4
LT
2643 }
2644
715116a1 2645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1 2646 /* adjust output voltage */
535ef6e1 2647 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
715116a1
MC
2648 }
2649
c65a17f4
MC
2650 if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2651 tg3_phydsp_write(tp, 0xffb, 0x4000);
2652
9ef8ca99 2653 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
2654 tg3_phy_set_wirespeed(tp);
2655 return 0;
2656}
2657
3a1e19d3
MC
2658#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2659#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2660#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2661 TG3_GPIO_MSG_NEED_VAUX)
2662#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2663 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2664 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2665 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2666 (TG3_GPIO_MSG_DRVR_PRES << 12))
2667
2668#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2669 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2670 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2671 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2672 (TG3_GPIO_MSG_NEED_VAUX << 12))
2673
2674static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2675{
2676 u32 status, shift;
2677
2678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2680 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2681 else
2682 status = tr32(TG3_CPMU_DRV_STATUS);
2683
2684 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2685 status &= ~(TG3_GPIO_MSG_MASK << shift);
2686 status |= (newstat << shift);
2687
2688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2690 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2691 else
2692 tw32(TG3_CPMU_DRV_STATUS, status);
2693
2694 return status >> TG3_APE_GPIO_MSG_SHIFT;
2695}
2696
520b2756
MC
2697static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2698{
2699 if (!tg3_flag(tp, IS_NIC))
2700 return 0;
2701
3a1e19d3
MC
2702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2705 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2706 return -EIO;
520b2756 2707
3a1e19d3
MC
2708 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2709
2710 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2711 TG3_GRC_LCLCTL_PWRSW_DELAY);
2712
2713 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2714 } else {
2715 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2716 TG3_GRC_LCLCTL_PWRSW_DELAY);
2717 }
6f5c8f83 2718
520b2756
MC
2719 return 0;
2720}
2721
2722static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2723{
2724 u32 grc_local_ctrl;
2725
2726 if (!tg3_flag(tp, IS_NIC) ||
2727 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2729 return;
2730
2731 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2732
2733 tw32_wait_f(GRC_LOCAL_CTRL,
2734 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2735 TG3_GRC_LCLCTL_PWRSW_DELAY);
2736
2737 tw32_wait_f(GRC_LOCAL_CTRL,
2738 grc_local_ctrl,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY);
2740
2741 tw32_wait_f(GRC_LOCAL_CTRL,
2742 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2743 TG3_GRC_LCLCTL_PWRSW_DELAY);
2744}
2745
2746static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2747{
2748 if (!tg3_flag(tp, IS_NIC))
2749 return;
2750
2751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2752 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2753 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2754 (GRC_LCLCTRL_GPIO_OE0 |
2755 GRC_LCLCTRL_GPIO_OE1 |
2756 GRC_LCLCTRL_GPIO_OE2 |
2757 GRC_LCLCTRL_GPIO_OUTPUT0 |
2758 GRC_LCLCTRL_GPIO_OUTPUT1),
2759 TG3_GRC_LCLCTL_PWRSW_DELAY);
2760 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2762 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2763 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2764 GRC_LCLCTRL_GPIO_OE1 |
2765 GRC_LCLCTRL_GPIO_OE2 |
2766 GRC_LCLCTRL_GPIO_OUTPUT0 |
2767 GRC_LCLCTRL_GPIO_OUTPUT1 |
2768 tp->grc_local_ctrl;
2769 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2770 TG3_GRC_LCLCTL_PWRSW_DELAY);
2771
2772 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2773 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2774 TG3_GRC_LCLCTL_PWRSW_DELAY);
2775
2776 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2777 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2778 TG3_GRC_LCLCTL_PWRSW_DELAY);
2779 } else {
2780 u32 no_gpio2;
2781 u32 grc_local_ctrl = 0;
2782
2783 /* Workaround to prevent overdrawing Amps. */
2784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2785 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2786 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2787 grc_local_ctrl,
2788 TG3_GRC_LCLCTL_PWRSW_DELAY);
2789 }
2790
2791 /* On 5753 and variants, GPIO2 cannot be used. */
2792 no_gpio2 = tp->nic_sram_data_cfg &
2793 NIC_SRAM_DATA_CFG_NO_GPIO2;
2794
2795 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2796 GRC_LCLCTRL_GPIO_OE1 |
2797 GRC_LCLCTRL_GPIO_OE2 |
2798 GRC_LCLCTRL_GPIO_OUTPUT1 |
2799 GRC_LCLCTRL_GPIO_OUTPUT2;
2800 if (no_gpio2) {
2801 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2802 GRC_LCLCTRL_GPIO_OUTPUT2);
2803 }
2804 tw32_wait_f(GRC_LOCAL_CTRL,
2805 tp->grc_local_ctrl | grc_local_ctrl,
2806 TG3_GRC_LCLCTL_PWRSW_DELAY);
2807
2808 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2809
2810 tw32_wait_f(GRC_LOCAL_CTRL,
2811 tp->grc_local_ctrl | grc_local_ctrl,
2812 TG3_GRC_LCLCTL_PWRSW_DELAY);
2813
2814 if (!no_gpio2) {
2815 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2816 tw32_wait_f(GRC_LOCAL_CTRL,
2817 tp->grc_local_ctrl | grc_local_ctrl,
2818 TG3_GRC_LCLCTL_PWRSW_DELAY);
2819 }
2820 }
3a1e19d3
MC
2821}
2822
cd0d7228 2823static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
3a1e19d3
MC
2824{
2825 u32 msg = 0;
2826
2827 /* Serialize power state transitions */
2828 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2829 return;
2830
cd0d7228 2831 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
3a1e19d3
MC
2832 msg = TG3_GPIO_MSG_NEED_VAUX;
2833
2834 msg = tg3_set_function_status(tp, msg);
2835
2836 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2837 goto done;
6f5c8f83 2838
3a1e19d3
MC
2839 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2840 tg3_pwrsrc_switch_to_vaux(tp);
2841 else
2842 tg3_pwrsrc_die_with_vmain(tp);
2843
2844done:
6f5c8f83 2845 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
520b2756
MC
2846}
2847
cd0d7228 2848static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
1da177e4 2849{
683644b7 2850 bool need_vaux = false;
1da177e4 2851
334355aa 2852 /* The GPIOs do something completely different on 57765. */
55086ad9 2853 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
1da177e4
LT
2854 return;
2855
3a1e19d3
MC
2856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2857 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
cd0d7228
MC
2859 tg3_frob_aux_power_5717(tp, include_wol ?
2860 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
3a1e19d3
MC
2861 return;
2862 }
2863
2864 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
8c2dc7e1
MC
2865 struct net_device *dev_peer;
2866
2867 dev_peer = pci_get_drvdata(tp->pdev_peer);
683644b7 2868
bc1c7567 2869 /* remove_one() may have been run on the peer. */
683644b7
MC
2870 if (dev_peer) {
2871 struct tg3 *tp_peer = netdev_priv(dev_peer);
2872
63c3a66f 2873 if (tg3_flag(tp_peer, INIT_COMPLETE))
683644b7
MC
2874 return;
2875
cd0d7228 2876 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
63c3a66f 2877 tg3_flag(tp_peer, ENABLE_ASF))
683644b7
MC
2878 need_vaux = true;
2879 }
1da177e4
LT
2880 }
2881
cd0d7228
MC
2882 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2883 tg3_flag(tp, ENABLE_ASF))
683644b7
MC
2884 need_vaux = true;
2885
520b2756
MC
2886 if (need_vaux)
2887 tg3_pwrsrc_switch_to_vaux(tp);
2888 else
2889 tg3_pwrsrc_die_with_vmain(tp);
1da177e4
LT
2890}
2891
e8f3f6ca
MC
2892static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2893{
2894 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2895 return 1;
79eb6904 2896 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
e8f3f6ca
MC
2897 if (speed != SPEED_10)
2898 return 1;
2899 } else if (speed == SPEED_10)
2900 return 1;
2901
2902 return 0;
2903}
2904
0a459aac 2905static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
15c3b696 2906{
ce057f01
MC
2907 u32 val;
2908
f07e9af3 2909 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
5129724a
MC
2910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2911 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2912 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2913
2914 sg_dig_ctrl |=
2915 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2916 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2917 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2918 }
3f7045c1 2919 return;
5129724a 2920 }
3f7045c1 2921
60189ddf 2922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2923 tg3_bmcr_reset(tp);
2924 val = tr32(GRC_MISC_CFG);
2925 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2926 udelay(40);
2927 return;
f07e9af3 2928 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
0e5f784c
MC
2929 u32 phytest;
2930 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2931 u32 phy;
2932
2933 tg3_writephy(tp, MII_ADVERTISE, 0);
2934 tg3_writephy(tp, MII_BMCR,
2935 BMCR_ANENABLE | BMCR_ANRESTART);
2936
2937 tg3_writephy(tp, MII_TG3_FET_TEST,
2938 phytest | MII_TG3_FET_SHADOW_EN);
2939 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2940 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2941 tg3_writephy(tp,
2942 MII_TG3_FET_SHDW_AUXMODE4,
2943 phy);
2944 }
2945 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2946 }
2947 return;
0a459aac 2948 } else if (do_low_power) {
715116a1
MC
2949 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2950 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
0a459aac 2951
b4bd2929
MC
2952 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2953 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2954 MII_TG3_AUXCTL_PCTL_VREG_11V;
2955 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
715116a1 2956 }
3f7045c1 2957
15c3b696
MC
2958 /* The PHY should not be powered down on some chips because
2959 * of bugs.
2960 */
2961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2963 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
085f1afc
MC
2964 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2965 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2966 !tp->pci_fn))
15c3b696 2967 return;
ce057f01 2968
bcb37f6c
MC
2969 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2970 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2971 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2972 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2973 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2974 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2975 }
2976
15c3b696
MC
2977 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2978}
2979
ffbcfed4
MC
2980/* tp->lock is held. */
2981static int tg3_nvram_lock(struct tg3 *tp)
2982{
63c3a66f 2983 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2984 int i;
2985
2986 if (tp->nvram_lock_cnt == 0) {
2987 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2988 for (i = 0; i < 8000; i++) {
2989 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2990 break;
2991 udelay(20);
2992 }
2993 if (i == 8000) {
2994 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2995 return -ENODEV;
2996 }
2997 }
2998 tp->nvram_lock_cnt++;
2999 }
3000 return 0;
3001}
3002
3003/* tp->lock is held. */
3004static void tg3_nvram_unlock(struct tg3 *tp)
3005{
63c3a66f 3006 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
3007 if (tp->nvram_lock_cnt > 0)
3008 tp->nvram_lock_cnt--;
3009 if (tp->nvram_lock_cnt == 0)
3010 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3011 }
3012}
3013
3014/* tp->lock is held. */
3015static void tg3_enable_nvram_access(struct tg3 *tp)
3016{
63c3a66f 3017 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
3018 u32 nvaccess = tr32(NVRAM_ACCESS);
3019
3020 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3021 }
3022}
3023
3024/* tp->lock is held. */
3025static void tg3_disable_nvram_access(struct tg3 *tp)
3026{
63c3a66f 3027 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
3028 u32 nvaccess = tr32(NVRAM_ACCESS);
3029
3030 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3031 }
3032}
3033
3034static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3035 u32 offset, u32 *val)
3036{
3037 u32 tmp;
3038 int i;
3039
3040 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3041 return -EINVAL;
3042
3043 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3044 EEPROM_ADDR_DEVID_MASK |
3045 EEPROM_ADDR_READ);
3046 tw32(GRC_EEPROM_ADDR,
3047 tmp |
3048 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3049 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3050 EEPROM_ADDR_ADDR_MASK) |
3051 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3052
3053 for (i = 0; i < 1000; i++) {
3054 tmp = tr32(GRC_EEPROM_ADDR);
3055
3056 if (tmp & EEPROM_ADDR_COMPLETE)
3057 break;
3058 msleep(1);
3059 }
3060 if (!(tmp & EEPROM_ADDR_COMPLETE))
3061 return -EBUSY;
3062
62cedd11
MC
3063 tmp = tr32(GRC_EEPROM_DATA);
3064
3065 /*
3066 * The data will always be opposite the native endian
3067 * format. Perform a blind byteswap to compensate.
3068 */
3069 *val = swab32(tmp);
3070
ffbcfed4
MC
3071 return 0;
3072}
3073
3074#define NVRAM_CMD_TIMEOUT 10000
3075
3076static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3077{
3078 int i;
3079
3080 tw32(NVRAM_CMD, nvram_cmd);
3081 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3082 udelay(10);
3083 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3084 udelay(10);
3085 break;
3086 }
3087 }
3088
3089 if (i == NVRAM_CMD_TIMEOUT)
3090 return -EBUSY;
3091
3092 return 0;
3093}
3094
3095static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3096{
63c3a66f
JP
3097 if (tg3_flag(tp, NVRAM) &&
3098 tg3_flag(tp, NVRAM_BUFFERED) &&
3099 tg3_flag(tp, FLASH) &&
3100 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
3101 (tp->nvram_jedecnum == JEDEC_ATMEL))
3102
3103 addr = ((addr / tp->nvram_pagesize) <<
3104 ATMEL_AT45DB0X1B_PAGE_POS) +
3105 (addr % tp->nvram_pagesize);
3106
3107 return addr;
3108}
3109
3110static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3111{
63c3a66f
JP
3112 if (tg3_flag(tp, NVRAM) &&
3113 tg3_flag(tp, NVRAM_BUFFERED) &&
3114 tg3_flag(tp, FLASH) &&
3115 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
3116 (tp->nvram_jedecnum == JEDEC_ATMEL))
3117
3118 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3119 tp->nvram_pagesize) +
3120 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3121
3122 return addr;
3123}
3124
e4f34110
MC
3125/* NOTE: Data read in from NVRAM is byteswapped according to
3126 * the byteswapping settings for all other register accesses.
3127 * tg3 devices are BE devices, so on a BE machine, the data
3128 * returned will be exactly as it is seen in NVRAM. On a LE
3129 * machine, the 32-bit value will be byteswapped.
3130 */
ffbcfed4
MC
3131static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3132{
3133 int ret;
3134
63c3a66f 3135 if (!tg3_flag(tp, NVRAM))
ffbcfed4
MC
3136 return tg3_nvram_read_using_eeprom(tp, offset, val);
3137
3138 offset = tg3_nvram_phys_addr(tp, offset);
3139
3140 if (offset > NVRAM_ADDR_MSK)
3141 return -EINVAL;
3142
3143 ret = tg3_nvram_lock(tp);
3144 if (ret)
3145 return ret;
3146
3147 tg3_enable_nvram_access(tp);
3148
3149 tw32(NVRAM_ADDR, offset);
3150 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3151 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3152
3153 if (ret == 0)
e4f34110 3154 *val = tr32(NVRAM_RDDATA);
ffbcfed4
MC
3155
3156 tg3_disable_nvram_access(tp);
3157
3158 tg3_nvram_unlock(tp);
3159
3160 return ret;
3161}
3162
a9dc529d
MC
3163/* Ensures NVRAM data is in bytestream format. */
3164static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
ffbcfed4
MC
3165{
3166 u32 v;
a9dc529d 3167 int res = tg3_nvram_read(tp, offset, &v);
ffbcfed4 3168 if (!res)
a9dc529d 3169 *val = cpu_to_be32(v);
ffbcfed4
MC
3170 return res;
3171}
3172
dbe9b92a
MC
3173static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3174 u32 offset, u32 len, u8 *buf)
3175{
3176 int i, j, rc = 0;
3177 u32 val;
3178
3179 for (i = 0; i < len; i += 4) {
3180 u32 addr;
3181 __be32 data;
3182
3183 addr = offset + i;
3184
3185 memcpy(&data, buf + i, 4);
3186
3187 /*
3188 * The SEEPROM interface expects the data to always be opposite
3189 * the native endian format. We accomplish this by reversing
3190 * all the operations that would have been performed on the
3191 * data from a call to tg3_nvram_read_be32().
3192 */
3193 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3194
3195 val = tr32(GRC_EEPROM_ADDR);
3196 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3197
3198 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3199 EEPROM_ADDR_READ);
3200 tw32(GRC_EEPROM_ADDR, val |
3201 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3202 (addr & EEPROM_ADDR_ADDR_MASK) |
3203 EEPROM_ADDR_START |
3204 EEPROM_ADDR_WRITE);
3205
3206 for (j = 0; j < 1000; j++) {
3207 val = tr32(GRC_EEPROM_ADDR);
3208
3209 if (val & EEPROM_ADDR_COMPLETE)
3210 break;
3211 msleep(1);
3212 }
3213 if (!(val & EEPROM_ADDR_COMPLETE)) {
3214 rc = -EBUSY;
3215 break;
3216 }
3217 }
3218
3219 return rc;
3220}
3221
3222/* offset and length are dword aligned */
3223static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3224 u8 *buf)
3225{
3226 int ret = 0;
3227 u32 pagesize = tp->nvram_pagesize;
3228 u32 pagemask = pagesize - 1;
3229 u32 nvram_cmd;
3230 u8 *tmp;
3231
3232 tmp = kmalloc(pagesize, GFP_KERNEL);
3233 if (tmp == NULL)
3234 return -ENOMEM;
3235
3236 while (len) {
3237 int j;
3238 u32 phy_addr, page_off, size;
3239
3240 phy_addr = offset & ~pagemask;
3241
3242 for (j = 0; j < pagesize; j += 4) {
3243 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3244 (__be32 *) (tmp + j));
3245 if (ret)
3246 break;
3247 }
3248 if (ret)
3249 break;
3250
3251 page_off = offset & pagemask;
3252 size = pagesize;
3253 if (len < size)
3254 size = len;
3255
3256 len -= size;
3257
3258 memcpy(tmp + page_off, buf, size);
3259
3260 offset = offset + (pagesize - page_off);
3261
3262 tg3_enable_nvram_access(tp);
3263
3264 /*
3265 * Before we can erase the flash page, we need
3266 * to issue a special "write enable" command.
3267 */
3268 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3269
3270 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3271 break;
3272
3273 /* Erase the target page */
3274 tw32(NVRAM_ADDR, phy_addr);
3275
3276 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3277 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3278
3279 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3280 break;
3281
3282 /* Issue another write enable to start the write. */
3283 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3284
3285 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3286 break;
3287
3288 for (j = 0; j < pagesize; j += 4) {
3289 __be32 data;
3290
3291 data = *((__be32 *) (tmp + j));
3292
3293 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3294
3295 tw32(NVRAM_ADDR, phy_addr + j);
3296
3297 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3298 NVRAM_CMD_WR;
3299
3300 if (j == 0)
3301 nvram_cmd |= NVRAM_CMD_FIRST;
3302 else if (j == (pagesize - 4))
3303 nvram_cmd |= NVRAM_CMD_LAST;
3304
3305 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3306 if (ret)
3307 break;
3308 }
3309 if (ret)
3310 break;
3311 }
3312
3313 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3314 tg3_nvram_exec_cmd(tp, nvram_cmd);
3315
3316 kfree(tmp);
3317
3318 return ret;
3319}
3320
3321/* offset and length are dword aligned */
3322static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3323 u8 *buf)
3324{
3325 int i, ret = 0;
3326
3327 for (i = 0; i < len; i += 4, offset += 4) {
3328 u32 page_off, phy_addr, nvram_cmd;
3329 __be32 data;
3330
3331 memcpy(&data, buf + i, 4);
3332 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3333
3334 page_off = offset % tp->nvram_pagesize;
3335
3336 phy_addr = tg3_nvram_phys_addr(tp, offset);
3337
dbe9b92a
MC
3338 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3339
3340 if (page_off == 0 || i == 0)
3341 nvram_cmd |= NVRAM_CMD_FIRST;
3342 if (page_off == (tp->nvram_pagesize - 4))
3343 nvram_cmd |= NVRAM_CMD_LAST;
3344
3345 if (i == (len - 4))
3346 nvram_cmd |= NVRAM_CMD_LAST;
3347
42278224
MC
3348 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3349 !tg3_flag(tp, FLASH) ||
3350 !tg3_flag(tp, 57765_PLUS))
3351 tw32(NVRAM_ADDR, phy_addr);
3352
dbe9b92a
MC
3353 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3354 !tg3_flag(tp, 5755_PLUS) &&
3355 (tp->nvram_jedecnum == JEDEC_ST) &&
3356 (nvram_cmd & NVRAM_CMD_FIRST)) {
3357 u32 cmd;
3358
3359 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3360 ret = tg3_nvram_exec_cmd(tp, cmd);
3361 if (ret)
3362 break;
3363 }
3364 if (!tg3_flag(tp, FLASH)) {
3365 /* We always do complete word writes to eeprom. */
3366 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3367 }
3368
3369 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3370 if (ret)
3371 break;
3372 }
3373 return ret;
3374}
3375
3376/* offset and length are dword aligned */
3377static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3378{
3379 int ret;
3380
3381 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3382 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3383 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3384 udelay(40);
3385 }
3386
3387 if (!tg3_flag(tp, NVRAM)) {
3388 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3389 } else {
3390 u32 grc_mode;
3391
3392 ret = tg3_nvram_lock(tp);
3393 if (ret)
3394 return ret;
3395
3396 tg3_enable_nvram_access(tp);
3397 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3398 tw32(NVRAM_WRITE1, 0x406);
3399
3400 grc_mode = tr32(GRC_MODE);
3401 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3402
3403 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3404 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3405 buf);
3406 } else {
3407 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3408 buf);
3409 }
3410
3411 grc_mode = tr32(GRC_MODE);
3412 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3413
3414 tg3_disable_nvram_access(tp);
3415 tg3_nvram_unlock(tp);
3416 }
3417
3418 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3419 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3420 udelay(40);
3421 }
3422
3423 return ret;
3424}
3425
997b4f13
MC
3426#define RX_CPU_SCRATCH_BASE 0x30000
3427#define RX_CPU_SCRATCH_SIZE 0x04000
3428#define TX_CPU_SCRATCH_BASE 0x34000
3429#define TX_CPU_SCRATCH_SIZE 0x04000
3430
3431/* tp->lock is held. */
3432static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3433{
3434 int i;
3435
3436 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3437
3438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3439 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3440
3441 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3442 return 0;
3443 }
3444 if (offset == RX_CPU_BASE) {
3445 for (i = 0; i < 10000; i++) {
3446 tw32(offset + CPU_STATE, 0xffffffff);
3447 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3448 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3449 break;
3450 }
3451
3452 tw32(offset + CPU_STATE, 0xffffffff);
3453 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3454 udelay(10);
3455 } else {
3456 for (i = 0; i < 10000; i++) {
3457 tw32(offset + CPU_STATE, 0xffffffff);
3458 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3459 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3460 break;
3461 }
3462 }
3463
3464 if (i >= 10000) {
3465 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3466 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3467 return -ENODEV;
3468 }
3469
3470 /* Clear firmware's nvram arbitration. */
3471 if (tg3_flag(tp, NVRAM))
3472 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3473 return 0;
3474}
3475
3476struct fw_info {
3477 unsigned int fw_base;
3478 unsigned int fw_len;
3479 const __be32 *fw_data;
3480};
3481
3482/* tp->lock is held. */
3483static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3484 u32 cpu_scratch_base, int cpu_scratch_size,
3485 struct fw_info *info)
3486{
3487 int err, lock_err, i;
3488 void (*write_op)(struct tg3 *, u32, u32);
3489
3490 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3491 netdev_err(tp->dev,
3492 "%s: Trying to load TX cpu firmware which is 5705\n",
3493 __func__);
3494 return -EINVAL;
3495 }
3496
3497 if (tg3_flag(tp, 5705_PLUS))
3498 write_op = tg3_write_mem;
3499 else
3500 write_op = tg3_write_indirect_reg32;
3501
3502 /* It is possible that bootcode is still loading at this point.
3503 * Get the nvram lock first before halting the cpu.
3504 */
3505 lock_err = tg3_nvram_lock(tp);
3506 err = tg3_halt_cpu(tp, cpu_base);
3507 if (!lock_err)
3508 tg3_nvram_unlock(tp);
3509 if (err)
3510 goto out;
3511
3512 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3513 write_op(tp, cpu_scratch_base + i, 0);
3514 tw32(cpu_base + CPU_STATE, 0xffffffff);
3515 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3516 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3517 write_op(tp, (cpu_scratch_base +
3518 (info->fw_base & 0xffff) +
3519 (i * sizeof(u32))),
3520 be32_to_cpu(info->fw_data[i]));
3521
3522 err = 0;
3523
3524out:
3525 return err;
3526}
3527
3528/* tp->lock is held. */
3529static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3530{
3531 struct fw_info info;
3532 const __be32 *fw_data;
3533 int err, i;
3534
3535 fw_data = (void *)tp->fw->data;
3536
3537 /* Firmware blob starts with version numbers, followed by
3538 start address and length. We are setting complete length.
3539 length = end_address_of_bss - start_address_of_text.
3540 Remainder is the blob to be loaded contiguously
3541 from start address. */
3542
3543 info.fw_base = be32_to_cpu(fw_data[1]);
3544 info.fw_len = tp->fw->size - 12;
3545 info.fw_data = &fw_data[3];
3546
3547 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3548 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3549 &info);
3550 if (err)
3551 return err;
3552
3553 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3554 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3555 &info);
3556 if (err)
3557 return err;
3558
3559 /* Now startup only the RX cpu. */
3560 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3561 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3562
3563 for (i = 0; i < 5; i++) {
3564 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3565 break;
3566 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3567 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3568 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3569 udelay(1000);
3570 }
3571 if (i >= 5) {
3572 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3573 "should be %08x\n", __func__,
3574 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3575 return -ENODEV;
3576 }
3577 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3578 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3579
3580 return 0;
3581}
3582
3583/* tp->lock is held. */
3584static int tg3_load_tso_firmware(struct tg3 *tp)
3585{
3586 struct fw_info info;
3587 const __be32 *fw_data;
3588 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3589 int err, i;
3590
3591 if (tg3_flag(tp, HW_TSO_1) ||
3592 tg3_flag(tp, HW_TSO_2) ||
3593 tg3_flag(tp, HW_TSO_3))
3594 return 0;
3595
3596 fw_data = (void *)tp->fw->data;
3597
3598 /* Firmware blob starts with version numbers, followed by
3599 start address and length. We are setting complete length.
3600 length = end_address_of_bss - start_address_of_text.
3601 Remainder is the blob to be loaded contiguously
3602 from start address. */
3603
3604 info.fw_base = be32_to_cpu(fw_data[1]);
3605 cpu_scratch_size = tp->fw_len;
3606 info.fw_len = tp->fw->size - 12;
3607 info.fw_data = &fw_data[3];
3608
3609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3610 cpu_base = RX_CPU_BASE;
3611 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3612 } else {
3613 cpu_base = TX_CPU_BASE;
3614 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3615 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3616 }
3617
3618 err = tg3_load_firmware_cpu(tp, cpu_base,
3619 cpu_scratch_base, cpu_scratch_size,
3620 &info);
3621 if (err)
3622 return err;
3623
3624 /* Now startup the cpu. */
3625 tw32(cpu_base + CPU_STATE, 0xffffffff);
3626 tw32_f(cpu_base + CPU_PC, info.fw_base);
3627
3628 for (i = 0; i < 5; i++) {
3629 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3630 break;
3631 tw32(cpu_base + CPU_STATE, 0xffffffff);
3632 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3633 tw32_f(cpu_base + CPU_PC, info.fw_base);
3634 udelay(1000);
3635 }
3636 if (i >= 5) {
3637 netdev_err(tp->dev,
3638 "%s fails to set CPU PC, is %08x should be %08x\n",
3639 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3640 return -ENODEV;
3641 }
3642 tw32(cpu_base + CPU_STATE, 0xffffffff);
3643 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3644 return 0;
3645}
3646
3647
3f007891
MC
3648/* tp->lock is held. */
3649static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3650{
3651 u32 addr_high, addr_low;
3652 int i;
3653
3654 addr_high = ((tp->dev->dev_addr[0] << 8) |
3655 tp->dev->dev_addr[1]);
3656 addr_low = ((tp->dev->dev_addr[2] << 24) |
3657 (tp->dev->dev_addr[3] << 16) |
3658 (tp->dev->dev_addr[4] << 8) |
3659 (tp->dev->dev_addr[5] << 0));
3660 for (i = 0; i < 4; i++) {
3661 if (i == 1 && skip_mac_1)
3662 continue;
3663 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3664 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3665 }
3666
3667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3669 for (i = 0; i < 12; i++) {
3670 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3671 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3672 }
3673 }
3674
3675 addr_high = (tp->dev->dev_addr[0] +
3676 tp->dev->dev_addr[1] +
3677 tp->dev->dev_addr[2] +
3678 tp->dev->dev_addr[3] +
3679 tp->dev->dev_addr[4] +
3680 tp->dev->dev_addr[5]) &
3681 TX_BACKOFF_SEED_MASK;
3682 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3683}
3684
c866b7ea 3685static void tg3_enable_register_access(struct tg3 *tp)
1da177e4 3686{
c866b7ea
RW
3687 /*
3688 * Make sure register accesses (indirect or otherwise) will function
3689 * correctly.
1da177e4
LT
3690 */
3691 pci_write_config_dword(tp->pdev,
c866b7ea
RW
3692 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3693}
1da177e4 3694
c866b7ea
RW
3695static int tg3_power_up(struct tg3 *tp)
3696{
bed9829f 3697 int err;
8c6bda1a 3698
bed9829f 3699 tg3_enable_register_access(tp);
1da177e4 3700
bed9829f
MC
3701 err = pci_set_power_state(tp->pdev, PCI_D0);
3702 if (!err) {
3703 /* Switch out of Vaux if it is a NIC */
3704 tg3_pwrsrc_switch_to_vmain(tp);
3705 } else {
3706 netdev_err(tp->dev, "Transition to D0 failed\n");
3707 }
1da177e4 3708
bed9829f 3709 return err;
c866b7ea 3710}
1da177e4 3711
4b409522
MC
3712static int tg3_setup_phy(struct tg3 *, int);
3713
c866b7ea
RW
3714static int tg3_power_down_prepare(struct tg3 *tp)
3715{
3716 u32 misc_host_ctrl;
3717 bool device_should_wake, do_low_power;
3718
3719 tg3_enable_register_access(tp);
5e7dfd0f
MC
3720
3721 /* Restore the CLKREQ setting. */
0f49bfbd
JL
3722 if (tg3_flag(tp, CLKREQ_BUG))
3723 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3724 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f 3725
1da177e4
LT
3726 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3727 tw32(TG3PCI_MISC_HOST_CTRL,
3728 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3729
c866b7ea 3730 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
63c3a66f 3731 tg3_flag(tp, WOL_ENABLE);
05ac4cb7 3732
63c3a66f 3733 if (tg3_flag(tp, USE_PHYLIB)) {
0a459aac 3734 do_low_power = false;
f07e9af3 3735 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
80096068 3736 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
b02fd9e3 3737 struct phy_device *phydev;
0a459aac 3738 u32 phyid, advertising;
b02fd9e3 3739
3f0e3ad7 3740 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 3741
80096068 3742 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3 3743
c6700ce2
MC
3744 tp->link_config.speed = phydev->speed;
3745 tp->link_config.duplex = phydev->duplex;
3746 tp->link_config.autoneg = phydev->autoneg;
3747 tp->link_config.advertising = phydev->advertising;
b02fd9e3
MC
3748
3749 advertising = ADVERTISED_TP |
3750 ADVERTISED_Pause |
3751 ADVERTISED_Autoneg |
3752 ADVERTISED_10baseT_Half;
3753
63c3a66f
JP
3754 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3755 if (tg3_flag(tp, WOL_SPEED_100MB))
b02fd9e3
MC
3756 advertising |=
3757 ADVERTISED_100baseT_Half |
3758 ADVERTISED_100baseT_Full |
3759 ADVERTISED_10baseT_Full;
3760 else
3761 advertising |= ADVERTISED_10baseT_Full;
3762 }
3763
3764 phydev->advertising = advertising;
3765
3766 phy_start_aneg(phydev);
0a459aac
MC
3767
3768 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
6a443a0f
MC
3769 if (phyid != PHY_ID_BCMAC131) {
3770 phyid &= PHY_BCM_OUI_MASK;
3771 if (phyid == PHY_BCM_OUI_1 ||
3772 phyid == PHY_BCM_OUI_2 ||
3773 phyid == PHY_BCM_OUI_3)
0a459aac
MC
3774 do_low_power = true;
3775 }
b02fd9e3 3776 }
dd477003 3777 } else {
2023276e 3778 do_low_power = true;
0a459aac 3779
c6700ce2 3780 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
80096068 3781 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
1da177e4 3782
2855b9fe 3783 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
dd477003 3784 tg3_setup_phy(tp, 0);
1da177e4
LT
3785 }
3786
b5d3772c
MC
3787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3788 u32 val;
3789
3790 val = tr32(GRC_VCPU_EXT_CTRL);
3791 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
63c3a66f 3792 } else if (!tg3_flag(tp, ENABLE_ASF)) {
6921d201
MC
3793 int i;
3794 u32 val;
3795
3796 for (i = 0; i < 200; i++) {
3797 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3798 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3799 break;
3800 msleep(1);
3801 }
3802 }
63c3a66f 3803 if (tg3_flag(tp, WOL_CAP))
a85feb8c
GZ
3804 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3805 WOL_DRV_STATE_SHUTDOWN |
3806 WOL_DRV_WOL |
3807 WOL_SET_MAGIC_PKT);
6921d201 3808
05ac4cb7 3809 if (device_should_wake) {
1da177e4
LT
3810 u32 mac_mode;
3811
f07e9af3 3812 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
b4bd2929
MC
3813 if (do_low_power &&
3814 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3815 tg3_phy_auxctl_write(tp,
3816 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3817 MII_TG3_AUXCTL_PCTL_WOL_EN |
3818 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3819 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
dd477003
MC
3820 udelay(40);
3821 }
1da177e4 3822
f07e9af3 3823 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3f7045c1
MC
3824 mac_mode = MAC_MODE_PORT_MODE_GMII;
3825 else
3826 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 3827
e8f3f6ca
MC
3828 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3829 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3830 ASIC_REV_5700) {
63c3a66f 3831 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
e8f3f6ca
MC
3832 SPEED_100 : SPEED_10;
3833 if (tg3_5700_link_polarity(tp, speed))
3834 mac_mode |= MAC_MODE_LINK_POLARITY;
3835 else
3836 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3837 }
1da177e4
LT
3838 } else {
3839 mac_mode = MAC_MODE_PORT_MODE_TBI;
3840 }
3841
63c3a66f 3842 if (!tg3_flag(tp, 5750_PLUS))
1da177e4
LT
3843 tw32(MAC_LED_CTRL, tp->led_ctrl);
3844
05ac4cb7 3845 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
63c3a66f
JP
3846 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3847 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
05ac4cb7 3848 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
1da177e4 3849
63c3a66f 3850 if (tg3_flag(tp, ENABLE_APE))
d2394e6b
MC
3851 mac_mode |= MAC_MODE_APE_TX_EN |
3852 MAC_MODE_APE_RX_EN |
3853 MAC_MODE_TDE_ENABLE;
3bda1258 3854
1da177e4
LT
3855 tw32_f(MAC_MODE, mac_mode);
3856 udelay(100);
3857
3858 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3859 udelay(10);
3860 }
3861
63c3a66f 3862 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
1da177e4
LT
3863 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3864 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3865 u32 base_val;
3866
3867 base_val = tp->pci_clock_ctrl;
3868 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3869 CLOCK_CTRL_TXCLK_DISABLE);
3870
b401e9e2
MC
3871 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3872 CLOCK_CTRL_PWRDOWN_PLL133, 40);
63c3a66f
JP
3873 } else if (tg3_flag(tp, 5780_CLASS) ||
3874 tg3_flag(tp, CPMU_PRESENT) ||
6ff6f81d 3875 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4cf78e4f 3876 /* do nothing */
63c3a66f 3877 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
1da177e4
LT
3878 u32 newbits1, newbits2;
3879
3880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3881 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3882 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3883 CLOCK_CTRL_TXCLK_DISABLE |
3884 CLOCK_CTRL_ALTCLK);
3885 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
63c3a66f 3886 } else if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3887 newbits1 = CLOCK_CTRL_625_CORE;
3888 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3889 } else {
3890 newbits1 = CLOCK_CTRL_ALTCLK;
3891 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3892 }
3893
b401e9e2
MC
3894 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3895 40);
1da177e4 3896
b401e9e2
MC
3897 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3898 40);
1da177e4 3899
63c3a66f 3900 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3901 u32 newbits3;
3902
3903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3905 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3906 CLOCK_CTRL_TXCLK_DISABLE |
3907 CLOCK_CTRL_44MHZ_CORE);
3908 } else {
3909 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3910 }
3911
b401e9e2
MC
3912 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3913 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
3914 }
3915 }
3916
63c3a66f 3917 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
0a459aac 3918 tg3_power_down_phy(tp, do_low_power);
6921d201 3919
cd0d7228 3920 tg3_frob_aux_power(tp, true);
1da177e4
LT
3921
3922 /* Workaround for unstable PLL clock */
3923 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3924 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3925 u32 val = tr32(0x7d00);
3926
3927 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3928 tw32(0x7d00, val);
63c3a66f 3929 if (!tg3_flag(tp, ENABLE_ASF)) {
ec41c7df
MC
3930 int err;
3931
3932 err = tg3_nvram_lock(tp);
1da177e4 3933 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
3934 if (!err)
3935 tg3_nvram_unlock(tp);
6921d201 3936 }
1da177e4
LT
3937 }
3938
bbadf503
MC
3939 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3940
c866b7ea
RW
3941 return 0;
3942}
12dac075 3943
c866b7ea
RW
3944static void tg3_power_down(struct tg3 *tp)
3945{
3946 tg3_power_down_prepare(tp);
1da177e4 3947
63c3a66f 3948 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
c866b7ea 3949 pci_set_power_state(tp->pdev, PCI_D3hot);
1da177e4
LT
3950}
3951
1da177e4
LT
3952static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3953{
3954 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3955 case MII_TG3_AUX_STAT_10HALF:
3956 *speed = SPEED_10;
3957 *duplex = DUPLEX_HALF;
3958 break;
3959
3960 case MII_TG3_AUX_STAT_10FULL:
3961 *speed = SPEED_10;
3962 *duplex = DUPLEX_FULL;
3963 break;
3964
3965 case MII_TG3_AUX_STAT_100HALF:
3966 *speed = SPEED_100;
3967 *duplex = DUPLEX_HALF;
3968 break;
3969
3970 case MII_TG3_AUX_STAT_100FULL:
3971 *speed = SPEED_100;
3972 *duplex = DUPLEX_FULL;
3973 break;
3974
3975 case MII_TG3_AUX_STAT_1000HALF:
3976 *speed = SPEED_1000;
3977 *duplex = DUPLEX_HALF;
3978 break;
3979
3980 case MII_TG3_AUX_STAT_1000FULL:
3981 *speed = SPEED_1000;
3982 *duplex = DUPLEX_FULL;
3983 break;
3984
3985 default:
f07e9af3 3986 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
715116a1
MC
3987 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3988 SPEED_10;
3989 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3990 DUPLEX_HALF;
3991 break;
3992 }
e740522e
MC
3993 *speed = SPEED_UNKNOWN;
3994 *duplex = DUPLEX_UNKNOWN;
1da177e4 3995 break;
855e1111 3996 }
1da177e4
LT
3997}
3998
42b64a45 3999static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
1da177e4 4000{
42b64a45
MC
4001 int err = 0;
4002 u32 val, new_adv;
1da177e4 4003
42b64a45 4004 new_adv = ADVERTISE_CSMA;
202ff1c2 4005 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
f88788f0 4006 new_adv |= mii_advertise_flowctrl(flowctrl);
1da177e4 4007
42b64a45
MC
4008 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4009 if (err)
4010 goto done;
ba4d07a8 4011
4f272096
MC
4012 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4013 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
ba4d07a8 4014
4f272096
MC
4015 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4016 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4017 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
ba4d07a8 4018
4f272096
MC
4019 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4020 if (err)
4021 goto done;
4022 }
1da177e4 4023
42b64a45
MC
4024 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4025 goto done;
52b02d04 4026
42b64a45
MC
4027 tw32(TG3_CPMU_EEE_MODE,
4028 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
52b02d04 4029
daf3ec68 4030 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
42b64a45
MC
4031 if (!err) {
4032 u32 err2;
52b02d04 4033
b715ce94
MC
4034 val = 0;
4035 /* Advertise 100-BaseTX EEE ability */
4036 if (advertise & ADVERTISED_100baseT_Full)
4037 val |= MDIO_AN_EEE_ADV_100TX;
4038 /* Advertise 1000-BaseT EEE ability */
4039 if (advertise & ADVERTISED_1000baseT_Full)
4040 val |= MDIO_AN_EEE_ADV_1000T;
4041 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4042 if (err)
4043 val = 0;
4044
21a00ab2
MC
4045 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4046 case ASIC_REV_5717:
4047 case ASIC_REV_57765:
55086ad9 4048 case ASIC_REV_57766:
21a00ab2 4049 case ASIC_REV_5719:
b715ce94
MC
4050 /* If we advertised any eee advertisements above... */
4051 if (val)
4052 val = MII_TG3_DSP_TAP26_ALNOKO |
4053 MII_TG3_DSP_TAP26_RMRXSTO |
4054 MII_TG3_DSP_TAP26_OPCSINPT;
21a00ab2 4055 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
be671947
MC
4056 /* Fall through */
4057 case ASIC_REV_5720:
c65a17f4 4058 case ASIC_REV_5762:
be671947
MC
4059 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4060 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4061 MII_TG3_DSP_CH34TP2_HIBW01);
21a00ab2 4062 }
52b02d04 4063
daf3ec68 4064 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
42b64a45
MC
4065 if (!err)
4066 err = err2;
4067 }
4068
4069done:
4070 return err;
4071}
4072
4073static void tg3_phy_copper_begin(struct tg3 *tp)
4074{
d13ba512
MC
4075 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4076 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4077 u32 adv, fc;
4078
4079 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4080 adv = ADVERTISED_10baseT_Half |
4081 ADVERTISED_10baseT_Full;
4082 if (tg3_flag(tp, WOL_SPEED_100MB))
4083 adv |= ADVERTISED_100baseT_Half |
4084 ADVERTISED_100baseT_Full;
4085
4086 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
42b64a45 4087 } else {
d13ba512
MC
4088 adv = tp->link_config.advertising;
4089 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4090 adv &= ~(ADVERTISED_1000baseT_Half |
4091 ADVERTISED_1000baseT_Full);
4092
4093 fc = tp->link_config.flowctrl;
52b02d04 4094 }
52b02d04 4095
d13ba512 4096 tg3_phy_autoneg_cfg(tp, adv, fc);
52b02d04 4097
d13ba512
MC
4098 tg3_writephy(tp, MII_BMCR,
4099 BMCR_ANENABLE | BMCR_ANRESTART);
4100 } else {
4101 int i;
1da177e4
LT
4102 u32 bmcr, orig_bmcr;
4103
4104 tp->link_config.active_speed = tp->link_config.speed;
4105 tp->link_config.active_duplex = tp->link_config.duplex;
4106
4107 bmcr = 0;
4108 switch (tp->link_config.speed) {
4109 default:
4110 case SPEED_10:
4111 break;
4112
4113 case SPEED_100:
4114 bmcr |= BMCR_SPEED100;
4115 break;
4116
4117 case SPEED_1000:
221c5637 4118 bmcr |= BMCR_SPEED1000;
1da177e4 4119 break;
855e1111 4120 }
1da177e4
LT
4121
4122 if (tp->link_config.duplex == DUPLEX_FULL)
4123 bmcr |= BMCR_FULLDPLX;
4124
4125 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4126 (bmcr != orig_bmcr)) {
4127 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4128 for (i = 0; i < 1500; i++) {
4129 u32 tmp;
4130
4131 udelay(10);
4132 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4133 tg3_readphy(tp, MII_BMSR, &tmp))
4134 continue;
4135 if (!(tmp & BMSR_LSTATUS)) {
4136 udelay(40);
4137 break;
4138 }
4139 }
4140 tg3_writephy(tp, MII_BMCR, bmcr);
4141 udelay(40);
4142 }
1da177e4
LT
4143 }
4144}
4145
4146static int tg3_init_5401phy_dsp(struct tg3 *tp)
4147{
4148 int err;
4149
4150 /* Turn off tap power management. */
4151 /* Set Extended packet length bit */
b4bd2929 4152 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
1da177e4 4153
6ee7c0a0
MC
4154 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4155 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4156 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4157 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4158 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
1da177e4
LT
4159
4160 udelay(40);
4161
4162 return err;
4163}
4164
e2bf73e7 4165static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
1da177e4 4166{
e2bf73e7 4167 u32 advmsk, tgtadv, advertising;
3600d918 4168
e2bf73e7
MC
4169 advertising = tp->link_config.advertising;
4170 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
1da177e4 4171
e2bf73e7
MC
4172 advmsk = ADVERTISE_ALL;
4173 if (tp->link_config.active_duplex == DUPLEX_FULL) {
f88788f0 4174 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
e2bf73e7
MC
4175 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4176 }
1da177e4 4177
e2bf73e7
MC
4178 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4179 return false;
4180
4181 if ((*lcladv & advmsk) != tgtadv)
4182 return false;
b99d2a57 4183
f07e9af3 4184 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1da177e4
LT
4185 u32 tg3_ctrl;
4186
e2bf73e7 4187 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3600d918 4188
221c5637 4189 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
e2bf73e7 4190 return false;
1da177e4 4191
3198e07f
MC
4192 if (tgtadv &&
4193 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4194 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4195 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4196 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4197 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4198 } else {
4199 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4200 }
4201
e2bf73e7
MC
4202 if (tg3_ctrl != tgtadv)
4203 return false;
ef167e27
MC
4204 }
4205
e2bf73e7 4206 return true;
ef167e27
MC
4207}
4208
859edb26
MC
4209static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4210{
4211 u32 lpeth = 0;
4212
4213 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4214 u32 val;
4215
4216 if (tg3_readphy(tp, MII_STAT1000, &val))
4217 return false;
4218
4219 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4220 }
4221
4222 if (tg3_readphy(tp, MII_LPA, rmtadv))
4223 return false;
4224
4225 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4226 tp->link_config.rmt_adv = lpeth;
4227
4228 return true;
4229}
4230
f4a46d1f
NNS
4231static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4232{
4233 if (curr_link_up != tp->link_up) {
4234 if (curr_link_up) {
4235 tg3_carrier_on(tp);
4236 } else {
4237 tg3_carrier_off(tp);
4238 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4239 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4240 }
4241
4242 tg3_link_report(tp);
4243 return true;
4244 }
4245
4246 return false;
4247}
4248
1da177e4
LT
4249static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4250{
4251 int current_link_up;
f833c4c1 4252 u32 bmsr, val;
ef167e27 4253 u32 lcl_adv, rmt_adv;
1da177e4
LT
4254 u16 current_speed;
4255 u8 current_duplex;
4256 int i, err;
4257
4258 tw32(MAC_EVENT, 0);
4259
4260 tw32_f(MAC_STATUS,
4261 (MAC_STATUS_SYNC_CHANGED |
4262 MAC_STATUS_CFG_CHANGED |
4263 MAC_STATUS_MI_COMPLETION |
4264 MAC_STATUS_LNKSTATE_CHANGED));
4265 udelay(40);
4266
8ef21428
MC
4267 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4268 tw32_f(MAC_MI_MODE,
4269 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4270 udelay(80);
4271 }
1da177e4 4272
b4bd2929 4273 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
1da177e4
LT
4274
4275 /* Some third-party PHYs need to be reset on link going
4276 * down.
4277 */
4278 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4279 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4280 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
f4a46d1f 4281 tp->link_up) {
1da177e4
LT
4282 tg3_readphy(tp, MII_BMSR, &bmsr);
4283 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4284 !(bmsr & BMSR_LSTATUS))
4285 force_reset = 1;
4286 }
4287 if (force_reset)
4288 tg3_phy_reset(tp);
4289
79eb6904 4290 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
4291 tg3_readphy(tp, MII_BMSR, &bmsr);
4292 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
63c3a66f 4293 !tg3_flag(tp, INIT_COMPLETE))
1da177e4
LT
4294 bmsr = 0;
4295
4296 if (!(bmsr & BMSR_LSTATUS)) {
4297 err = tg3_init_5401phy_dsp(tp);
4298 if (err)
4299 return err;
4300
4301 tg3_readphy(tp, MII_BMSR, &bmsr);
4302 for (i = 0; i < 1000; i++) {
4303 udelay(10);
4304 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4305 (bmsr & BMSR_LSTATUS)) {
4306 udelay(40);
4307 break;
4308 }
4309 }
4310
79eb6904
MC
4311 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4312 TG3_PHY_REV_BCM5401_B0 &&
1da177e4
LT
4313 !(bmsr & BMSR_LSTATUS) &&
4314 tp->link_config.active_speed == SPEED_1000) {
4315 err = tg3_phy_reset(tp);
4316 if (!err)
4317 err = tg3_init_5401phy_dsp(tp);
4318 if (err)
4319 return err;
4320 }
4321 }
4322 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4323 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4324 /* 5701 {A0,B0} CRC bug workaround */
4325 tg3_writephy(tp, 0x15, 0x0a75);
f08aa1a8
MC
4326 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4327 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4328 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
1da177e4
LT
4329 }
4330
4331 /* Clear pending interrupts... */
f833c4c1
MC
4332 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4333 tg3_readphy(tp, MII_TG3_ISTAT, &val);
1da177e4 4334
f07e9af3 4335 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
1da177e4 4336 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
f07e9af3 4337 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
1da177e4
LT
4338 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4339
4340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4341 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4342 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4343 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4344 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4345 else
4346 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4347 }
4348
4349 current_link_up = 0;
e740522e
MC
4350 current_speed = SPEED_UNKNOWN;
4351 current_duplex = DUPLEX_UNKNOWN;
e348c5e7 4352 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
859edb26 4353 tp->link_config.rmt_adv = 0;
1da177e4 4354
f07e9af3 4355 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
15ee95c3
MC
4356 err = tg3_phy_auxctl_read(tp,
4357 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4358 &val);
4359 if (!err && !(val & (1 << 10))) {
b4bd2929
MC
4360 tg3_phy_auxctl_write(tp,
4361 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4362 val | (1 << 10));
1da177e4
LT
4363 goto relink;
4364 }
4365 }
4366
4367 bmsr = 0;
4368 for (i = 0; i < 100; i++) {
4369 tg3_readphy(tp, MII_BMSR, &bmsr);
4370 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4371 (bmsr & BMSR_LSTATUS))
4372 break;
4373 udelay(40);
4374 }
4375
4376 if (bmsr & BMSR_LSTATUS) {
4377 u32 aux_stat, bmcr;
4378
4379 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4380 for (i = 0; i < 2000; i++) {
4381 udelay(10);
4382 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4383 aux_stat)
4384 break;
4385 }
4386
4387 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4388 &current_speed,
4389 &current_duplex);
4390
4391 bmcr = 0;
4392 for (i = 0; i < 200; i++) {
4393 tg3_readphy(tp, MII_BMCR, &bmcr);
4394 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4395 continue;
4396 if (bmcr && bmcr != 0x7fff)
4397 break;
4398 udelay(10);
4399 }
4400
ef167e27
MC
4401 lcl_adv = 0;
4402 rmt_adv = 0;
1da177e4 4403
ef167e27
MC
4404 tp->link_config.active_speed = current_speed;
4405 tp->link_config.active_duplex = current_duplex;
4406
4407 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4408 if ((bmcr & BMCR_ANENABLE) &&
e2bf73e7 4409 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
859edb26 4410 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
e2bf73e7 4411 current_link_up = 1;
1da177e4
LT
4412 } else {
4413 if (!(bmcr & BMCR_ANENABLE) &&
4414 tp->link_config.speed == current_speed &&
ef167e27
MC
4415 tp->link_config.duplex == current_duplex &&
4416 tp->link_config.flowctrl ==
4417 tp->link_config.active_flowctrl) {
1da177e4 4418 current_link_up = 1;
1da177e4
LT
4419 }
4420 }
4421
ef167e27 4422 if (current_link_up == 1 &&
e348c5e7
MC
4423 tp->link_config.active_duplex == DUPLEX_FULL) {
4424 u32 reg, bit;
4425
4426 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4427 reg = MII_TG3_FET_GEN_STAT;
4428 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4429 } else {
4430 reg = MII_TG3_EXT_STAT;
4431 bit = MII_TG3_EXT_STAT_MDIX;
4432 }
4433
4434 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4435 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4436
ef167e27 4437 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
e348c5e7 4438 }
1da177e4
LT
4439 }
4440
1da177e4 4441relink:
80096068 4442 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
1da177e4
LT
4443 tg3_phy_copper_begin(tp);
4444
f833c4c1 4445 tg3_readphy(tp, MII_BMSR, &bmsr);
06c03c02
MB
4446 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4447 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
1da177e4
LT
4448 current_link_up = 1;
4449 }
4450
4451 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4452 if (current_link_up == 1) {
4453 if (tp->link_config.active_speed == SPEED_100 ||
4454 tp->link_config.active_speed == SPEED_10)
4455 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4456 else
4457 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
f07e9af3 4458 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7f97a4bd
MC
4459 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4460 else
1da177e4
LT
4461 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4462
4463 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4464 if (tp->link_config.active_duplex == DUPLEX_HALF)
4465 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4466
1da177e4 4467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
4468 if (current_link_up == 1 &&
4469 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 4470 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
4471 else
4472 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
4473 }
4474
4475 /* ??? Without this setting Netgear GA302T PHY does not
4476 * ??? send/receive packets...
4477 */
79eb6904 4478 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
1da177e4
LT
4479 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4480 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4481 tw32_f(MAC_MI_MODE, tp->mi_mode);
4482 udelay(80);
4483 }
4484
4485 tw32_f(MAC_MODE, tp->mac_mode);
4486 udelay(40);
4487
52b02d04
MC
4488 tg3_phy_eee_adjust(tp, current_link_up);
4489
63c3a66f 4490 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
4491 /* Polled via timer. */
4492 tw32_f(MAC_EVENT, 0);
4493 } else {
4494 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4495 }
4496 udelay(40);
4497
4498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4499 current_link_up == 1 &&
4500 tp->link_config.active_speed == SPEED_1000 &&
63c3a66f 4501 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
1da177e4
LT
4502 udelay(120);
4503 tw32_f(MAC_STATUS,
4504 (MAC_STATUS_SYNC_CHANGED |
4505 MAC_STATUS_CFG_CHANGED));
4506 udelay(40);
4507 tg3_write_mem(tp,
4508 NIC_SRAM_FIRMWARE_MBOX,
4509 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4510 }
4511
5e7dfd0f 4512 /* Prevent send BD corruption. */
63c3a66f 4513 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
4514 if (tp->link_config.active_speed == SPEED_100 ||
4515 tp->link_config.active_speed == SPEED_10)
0f49bfbd
JL
4516 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4517 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f 4518 else
0f49bfbd
JL
4519 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4520 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f
MC
4521 }
4522
f4a46d1f 4523 tg3_test_and_report_link_chg(tp, current_link_up);
1da177e4
LT
4524
4525 return 0;
4526}
4527
4528struct tg3_fiber_aneginfo {
4529 int state;
4530#define ANEG_STATE_UNKNOWN 0
4531#define ANEG_STATE_AN_ENABLE 1
4532#define ANEG_STATE_RESTART_INIT 2
4533#define ANEG_STATE_RESTART 3
4534#define ANEG_STATE_DISABLE_LINK_OK 4
4535#define ANEG_STATE_ABILITY_DETECT_INIT 5
4536#define ANEG_STATE_ABILITY_DETECT 6
4537#define ANEG_STATE_ACK_DETECT_INIT 7
4538#define ANEG_STATE_ACK_DETECT 8
4539#define ANEG_STATE_COMPLETE_ACK_INIT 9
4540#define ANEG_STATE_COMPLETE_ACK 10
4541#define ANEG_STATE_IDLE_DETECT_INIT 11
4542#define ANEG_STATE_IDLE_DETECT 12
4543#define ANEG_STATE_LINK_OK 13
4544#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4545#define ANEG_STATE_NEXT_PAGE_WAIT 15
4546
4547 u32 flags;
4548#define MR_AN_ENABLE 0x00000001
4549#define MR_RESTART_AN 0x00000002
4550#define MR_AN_COMPLETE 0x00000004
4551#define MR_PAGE_RX 0x00000008
4552#define MR_NP_LOADED 0x00000010
4553#define MR_TOGGLE_TX 0x00000020
4554#define MR_LP_ADV_FULL_DUPLEX 0x00000040
4555#define MR_LP_ADV_HALF_DUPLEX 0x00000080
4556#define MR_LP_ADV_SYM_PAUSE 0x00000100
4557#define MR_LP_ADV_ASYM_PAUSE 0x00000200
4558#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4559#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4560#define MR_LP_ADV_NEXT_PAGE 0x00001000
4561#define MR_TOGGLE_RX 0x00002000
4562#define MR_NP_RX 0x00004000
4563
4564#define MR_LINK_OK 0x80000000
4565
4566 unsigned long link_time, cur_time;
4567
4568 u32 ability_match_cfg;
4569 int ability_match_count;
4570
4571 char ability_match, idle_match, ack_match;
4572
4573 u32 txconfig, rxconfig;
4574#define ANEG_CFG_NP 0x00000080
4575#define ANEG_CFG_ACK 0x00000040
4576#define ANEG_CFG_RF2 0x00000020
4577#define ANEG_CFG_RF1 0x00000010
4578#define ANEG_CFG_PS2 0x00000001
4579#define ANEG_CFG_PS1 0x00008000
4580#define ANEG_CFG_HD 0x00004000
4581#define ANEG_CFG_FD 0x00002000
4582#define ANEG_CFG_INVAL 0x00001f06
4583
4584};
4585#define ANEG_OK 0
4586#define ANEG_DONE 1
4587#define ANEG_TIMER_ENAB 2
4588#define ANEG_FAILED -1
4589
4590#define ANEG_STATE_SETTLE_TIME 10000
4591
4592static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4593 struct tg3_fiber_aneginfo *ap)
4594{
5be73b47 4595 u16 flowctrl;
1da177e4
LT
4596 unsigned long delta;
4597 u32 rx_cfg_reg;
4598 int ret;
4599
4600 if (ap->state == ANEG_STATE_UNKNOWN) {
4601 ap->rxconfig = 0;
4602 ap->link_time = 0;
4603 ap->cur_time = 0;
4604 ap->ability_match_cfg = 0;
4605 ap->ability_match_count = 0;
4606 ap->ability_match = 0;
4607 ap->idle_match = 0;
4608 ap->ack_match = 0;
4609 }
4610 ap->cur_time++;
4611
4612 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4613 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4614
4615 if (rx_cfg_reg != ap->ability_match_cfg) {
4616 ap->ability_match_cfg = rx_cfg_reg;
4617 ap->ability_match = 0;
4618 ap->ability_match_count = 0;
4619 } else {
4620 if (++ap->ability_match_count > 1) {
4621 ap->ability_match = 1;
4622 ap->ability_match_cfg = rx_cfg_reg;
4623 }
4624 }
4625 if (rx_cfg_reg & ANEG_CFG_ACK)
4626 ap->ack_match = 1;
4627 else
4628 ap->ack_match = 0;
4629
4630 ap->idle_match = 0;
4631 } else {
4632 ap->idle_match = 1;
4633 ap->ability_match_cfg = 0;
4634 ap->ability_match_count = 0;
4635 ap->ability_match = 0;
4636 ap->ack_match = 0;
4637
4638 rx_cfg_reg = 0;
4639 }
4640
4641 ap->rxconfig = rx_cfg_reg;
4642 ret = ANEG_OK;
4643
33f401ae 4644 switch (ap->state) {
1da177e4
LT
4645 case ANEG_STATE_UNKNOWN:
4646 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4647 ap->state = ANEG_STATE_AN_ENABLE;
4648
4649 /* fallthru */
4650 case ANEG_STATE_AN_ENABLE:
4651 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4652 if (ap->flags & MR_AN_ENABLE) {
4653 ap->link_time = 0;
4654 ap->cur_time = 0;
4655 ap->ability_match_cfg = 0;
4656 ap->ability_match_count = 0;
4657 ap->ability_match = 0;
4658 ap->idle_match = 0;
4659 ap->ack_match = 0;
4660
4661 ap->state = ANEG_STATE_RESTART_INIT;
4662 } else {
4663 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4664 }
4665 break;
4666
4667 case ANEG_STATE_RESTART_INIT:
4668 ap->link_time = ap->cur_time;
4669 ap->flags &= ~(MR_NP_LOADED);
4670 ap->txconfig = 0;
4671 tw32(MAC_TX_AUTO_NEG, 0);
4672 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4673 tw32_f(MAC_MODE, tp->mac_mode);
4674 udelay(40);
4675
4676 ret = ANEG_TIMER_ENAB;
4677 ap->state = ANEG_STATE_RESTART;
4678
4679 /* fallthru */
4680 case ANEG_STATE_RESTART:
4681 delta = ap->cur_time - ap->link_time;
859a5887 4682 if (delta > ANEG_STATE_SETTLE_TIME)
1da177e4 4683 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
859a5887 4684 else
1da177e4 4685 ret = ANEG_TIMER_ENAB;
1da177e4
LT
4686 break;
4687
4688 case ANEG_STATE_DISABLE_LINK_OK:
4689 ret = ANEG_DONE;
4690 break;
4691
4692 case ANEG_STATE_ABILITY_DETECT_INIT:
4693 ap->flags &= ~(MR_TOGGLE_TX);
5be73b47
MC
4694 ap->txconfig = ANEG_CFG_FD;
4695 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4696 if (flowctrl & ADVERTISE_1000XPAUSE)
4697 ap->txconfig |= ANEG_CFG_PS1;
4698 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4699 ap->txconfig |= ANEG_CFG_PS2;
1da177e4
LT
4700 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4701 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4702 tw32_f(MAC_MODE, tp->mac_mode);
4703 udelay(40);
4704
4705 ap->state = ANEG_STATE_ABILITY_DETECT;
4706 break;
4707
4708 case ANEG_STATE_ABILITY_DETECT:
859a5887 4709 if (ap->ability_match != 0 && ap->rxconfig != 0)
1da177e4 4710 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1da177e4
LT
4711 break;
4712
4713 case ANEG_STATE_ACK_DETECT_INIT:
4714 ap->txconfig |= ANEG_CFG_ACK;
4715 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4716 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4717 tw32_f(MAC_MODE, tp->mac_mode);
4718 udelay(40);
4719
4720 ap->state = ANEG_STATE_ACK_DETECT;
4721
4722 /* fallthru */
4723 case ANEG_STATE_ACK_DETECT:
4724 if (ap->ack_match != 0) {
4725 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4726 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4727 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4728 } else {
4729 ap->state = ANEG_STATE_AN_ENABLE;
4730 }
4731 } else if (ap->ability_match != 0 &&
4732 ap->rxconfig == 0) {
4733 ap->state = ANEG_STATE_AN_ENABLE;
4734 }
4735 break;
4736
4737 case ANEG_STATE_COMPLETE_ACK_INIT:
4738 if (ap->rxconfig & ANEG_CFG_INVAL) {
4739 ret = ANEG_FAILED;
4740 break;
4741 }
4742 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4743 MR_LP_ADV_HALF_DUPLEX |
4744 MR_LP_ADV_SYM_PAUSE |
4745 MR_LP_ADV_ASYM_PAUSE |
4746 MR_LP_ADV_REMOTE_FAULT1 |
4747 MR_LP_ADV_REMOTE_FAULT2 |
4748 MR_LP_ADV_NEXT_PAGE |
4749 MR_TOGGLE_RX |
4750 MR_NP_RX);
4751 if (ap->rxconfig & ANEG_CFG_FD)
4752 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4753 if (ap->rxconfig & ANEG_CFG_HD)
4754 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4755 if (ap->rxconfig & ANEG_CFG_PS1)
4756 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4757 if (ap->rxconfig & ANEG_CFG_PS2)
4758 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4759 if (ap->rxconfig & ANEG_CFG_RF1)
4760 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4761 if (ap->rxconfig & ANEG_CFG_RF2)
4762 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4763 if (ap->rxconfig & ANEG_CFG_NP)
4764 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4765
4766 ap->link_time = ap->cur_time;
4767
4768 ap->flags ^= (MR_TOGGLE_TX);
4769 if (ap->rxconfig & 0x0008)
4770 ap->flags |= MR_TOGGLE_RX;
4771 if (ap->rxconfig & ANEG_CFG_NP)
4772 ap->flags |= MR_NP_RX;
4773 ap->flags |= MR_PAGE_RX;
4774
4775 ap->state = ANEG_STATE_COMPLETE_ACK;
4776 ret = ANEG_TIMER_ENAB;
4777 break;
4778
4779 case ANEG_STATE_COMPLETE_ACK:
4780 if (ap->ability_match != 0 &&
4781 ap->rxconfig == 0) {
4782 ap->state = ANEG_STATE_AN_ENABLE;
4783 break;
4784 }
4785 delta = ap->cur_time - ap->link_time;
4786 if (delta > ANEG_STATE_SETTLE_TIME) {
4787 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4788 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4789 } else {
4790 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4791 !(ap->flags & MR_NP_RX)) {
4792 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4793 } else {
4794 ret = ANEG_FAILED;
4795 }
4796 }
4797 }
4798 break;
4799
4800 case ANEG_STATE_IDLE_DETECT_INIT:
4801 ap->link_time = ap->cur_time;
4802 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4803 tw32_f(MAC_MODE, tp->mac_mode);
4804 udelay(40);
4805
4806 ap->state = ANEG_STATE_IDLE_DETECT;
4807 ret = ANEG_TIMER_ENAB;
4808 break;
4809
4810 case ANEG_STATE_IDLE_DETECT:
4811 if (ap->ability_match != 0 &&
4812 ap->rxconfig == 0) {
4813 ap->state = ANEG_STATE_AN_ENABLE;
4814 break;
4815 }
4816 delta = ap->cur_time - ap->link_time;
4817 if (delta > ANEG_STATE_SETTLE_TIME) {
4818 /* XXX another gem from the Broadcom driver :( */
4819 ap->state = ANEG_STATE_LINK_OK;
4820 }
4821 break;
4822
4823 case ANEG_STATE_LINK_OK:
4824 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4825 ret = ANEG_DONE;
4826 break;
4827
4828 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4829 /* ??? unimplemented */
4830 break;
4831
4832 case ANEG_STATE_NEXT_PAGE_WAIT:
4833 /* ??? unimplemented */
4834 break;
4835
4836 default:
4837 ret = ANEG_FAILED;
4838 break;
855e1111 4839 }
1da177e4
LT
4840
4841 return ret;
4842}
4843
5be73b47 4844static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
1da177e4
LT
4845{
4846 int res = 0;
4847 struct tg3_fiber_aneginfo aninfo;
4848 int status = ANEG_FAILED;
4849 unsigned int tick;
4850 u32 tmp;
4851
4852 tw32_f(MAC_TX_AUTO_NEG, 0);
4853
4854 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4855 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4856 udelay(40);
4857
4858 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4859 udelay(40);
4860
4861 memset(&aninfo, 0, sizeof(aninfo));
4862 aninfo.flags |= MR_AN_ENABLE;
4863 aninfo.state = ANEG_STATE_UNKNOWN;
4864 aninfo.cur_time = 0;
4865 tick = 0;
4866 while (++tick < 195000) {
4867 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4868 if (status == ANEG_DONE || status == ANEG_FAILED)
4869 break;
4870
4871 udelay(1);
4872 }
4873
4874 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4875 tw32_f(MAC_MODE, tp->mac_mode);
4876 udelay(40);
4877
5be73b47
MC
4878 *txflags = aninfo.txconfig;
4879 *rxflags = aninfo.flags;
1da177e4
LT
4880
4881 if (status == ANEG_DONE &&
4882 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4883 MR_LP_ADV_FULL_DUPLEX)))
4884 res = 1;
4885
4886 return res;
4887}
4888
4889static void tg3_init_bcm8002(struct tg3 *tp)
4890{
4891 u32 mac_status = tr32(MAC_STATUS);
4892 int i;
4893
4894 /* Reset when initting first time or we have a link. */
63c3a66f 4895 if (tg3_flag(tp, INIT_COMPLETE) &&
1da177e4
LT
4896 !(mac_status & MAC_STATUS_PCS_SYNCED))
4897 return;
4898
4899 /* Set PLL lock range. */
4900 tg3_writephy(tp, 0x16, 0x8007);
4901
4902 /* SW reset */
4903 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4904
4905 /* Wait for reset to complete. */
4906 /* XXX schedule_timeout() ... */
4907 for (i = 0; i < 500; i++)
4908 udelay(10);
4909
4910 /* Config mode; select PMA/Ch 1 regs. */
4911 tg3_writephy(tp, 0x10, 0x8411);
4912
4913 /* Enable auto-lock and comdet, select txclk for tx. */
4914 tg3_writephy(tp, 0x11, 0x0a10);
4915
4916 tg3_writephy(tp, 0x18, 0x00a0);
4917 tg3_writephy(tp, 0x16, 0x41ff);
4918
4919 /* Assert and deassert POR. */
4920 tg3_writephy(tp, 0x13, 0x0400);
4921 udelay(40);
4922 tg3_writephy(tp, 0x13, 0x0000);
4923
4924 tg3_writephy(tp, 0x11, 0x0a50);
4925 udelay(40);
4926 tg3_writephy(tp, 0x11, 0x0a10);
4927
4928 /* Wait for signal to stabilize */
4929 /* XXX schedule_timeout() ... */
4930 for (i = 0; i < 15000; i++)
4931 udelay(10);
4932
4933 /* Deselect the channel register so we can read the PHYID
4934 * later.
4935 */
4936 tg3_writephy(tp, 0x10, 0x8011);
4937}
4938
4939static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4940{
82cd3d11 4941 u16 flowctrl;
1da177e4
LT
4942 u32 sg_dig_ctrl, sg_dig_status;
4943 u32 serdes_cfg, expected_sg_dig_ctrl;
4944 int workaround, port_a;
4945 int current_link_up;
4946
4947 serdes_cfg = 0;
4948 expected_sg_dig_ctrl = 0;
4949 workaround = 0;
4950 port_a = 1;
4951 current_link_up = 0;
4952
4953 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4954 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4955 workaround = 1;
4956 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4957 port_a = 0;
4958
4959 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4960 /* preserve bits 20-23 for voltage regulator */
4961 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4962 }
4963
4964 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4965
4966 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
c98f6e3b 4967 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
1da177e4
LT
4968 if (workaround) {
4969 u32 val = serdes_cfg;
4970
4971 if (port_a)
4972 val |= 0xc010000;
4973 else
4974 val |= 0x4010000;
4975 tw32_f(MAC_SERDES_CFG, val);
4976 }
c98f6e3b
MC
4977
4978 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4979 }
4980 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4981 tg3_setup_flow_control(tp, 0, 0);
4982 current_link_up = 1;
4983 }
4984 goto out;
4985 }
4986
4987 /* Want auto-negotiation. */
c98f6e3b 4988 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
1da177e4 4989
82cd3d11
MC
4990 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4991 if (flowctrl & ADVERTISE_1000XPAUSE)
4992 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4993 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4994 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
1da177e4
LT
4995
4996 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
f07e9af3 4997 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3d3ebe74
MC
4998 tp->serdes_counter &&
4999 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5000 MAC_STATUS_RCVD_CFG)) ==
5001 MAC_STATUS_PCS_SYNCED)) {
5002 tp->serdes_counter--;
5003 current_link_up = 1;
5004 goto out;
5005 }
5006restart_autoneg:
1da177e4
LT
5007 if (workaround)
5008 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
c98f6e3b 5009 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
1da177e4
LT
5010 udelay(5);
5011 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5012
3d3ebe74 5013 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 5014 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
5015 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5016 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 5017 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
5018 mac_status = tr32(MAC_STATUS);
5019
c98f6e3b 5020 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1da177e4 5021 (mac_status & MAC_STATUS_PCS_SYNCED)) {
82cd3d11
MC
5022 u32 local_adv = 0, remote_adv = 0;
5023
5024 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5025 local_adv |= ADVERTISE_1000XPAUSE;
5026 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5027 local_adv |= ADVERTISE_1000XPSE_ASYM;
1da177e4 5028
c98f6e3b 5029 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
82cd3d11 5030 remote_adv |= LPA_1000XPAUSE;
c98f6e3b 5031 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
82cd3d11 5032 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 5033
859edb26
MC
5034 tp->link_config.rmt_adv =
5035 mii_adv_to_ethtool_adv_x(remote_adv);
5036
1da177e4
LT
5037 tg3_setup_flow_control(tp, local_adv, remote_adv);
5038 current_link_up = 1;
3d3ebe74 5039 tp->serdes_counter = 0;
f07e9af3 5040 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c98f6e3b 5041 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3d3ebe74
MC
5042 if (tp->serdes_counter)
5043 tp->serdes_counter--;
1da177e4
LT
5044 else {
5045 if (workaround) {
5046 u32 val = serdes_cfg;
5047
5048 if (port_a)
5049 val |= 0xc010000;
5050 else
5051 val |= 0x4010000;
5052
5053 tw32_f(MAC_SERDES_CFG, val);
5054 }
5055
c98f6e3b 5056 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
5057 udelay(40);
5058
5059 /* Link parallel detection - link is up */
5060 /* only if we have PCS_SYNC and not */
5061 /* receiving config code words */
5062 mac_status = tr32(MAC_STATUS);
5063 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5064 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5065 tg3_setup_flow_control(tp, 0, 0);
5066 current_link_up = 1;
f07e9af3
MC
5067 tp->phy_flags |=
5068 TG3_PHYFLG_PARALLEL_DETECT;
3d3ebe74
MC
5069 tp->serdes_counter =
5070 SERDES_PARALLEL_DET_TIMEOUT;
5071 } else
5072 goto restart_autoneg;
1da177e4
LT
5073 }
5074 }
3d3ebe74
MC
5075 } else {
5076 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 5077 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
5078 }
5079
5080out:
5081 return current_link_up;
5082}
5083
5084static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5085{
5086 int current_link_up = 0;
5087
5cf64b8a 5088 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 5089 goto out;
1da177e4
LT
5090
5091 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5be73b47 5092 u32 txflags, rxflags;
1da177e4 5093 int i;
6aa20a22 5094
5be73b47
MC
5095 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5096 u32 local_adv = 0, remote_adv = 0;
1da177e4 5097
5be73b47
MC
5098 if (txflags & ANEG_CFG_PS1)
5099 local_adv |= ADVERTISE_1000XPAUSE;
5100 if (txflags & ANEG_CFG_PS2)
5101 local_adv |= ADVERTISE_1000XPSE_ASYM;
5102
5103 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5104 remote_adv |= LPA_1000XPAUSE;
5105 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5106 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 5107
859edb26
MC
5108 tp->link_config.rmt_adv =
5109 mii_adv_to_ethtool_adv_x(remote_adv);
5110
1da177e4
LT
5111 tg3_setup_flow_control(tp, local_adv, remote_adv);
5112
1da177e4
LT
5113 current_link_up = 1;
5114 }
5115 for (i = 0; i < 30; i++) {
5116 udelay(20);
5117 tw32_f(MAC_STATUS,
5118 (MAC_STATUS_SYNC_CHANGED |
5119 MAC_STATUS_CFG_CHANGED));
5120 udelay(40);
5121 if ((tr32(MAC_STATUS) &
5122 (MAC_STATUS_SYNC_CHANGED |
5123 MAC_STATUS_CFG_CHANGED)) == 0)
5124 break;
5125 }
5126
5127 mac_status = tr32(MAC_STATUS);
5128 if (current_link_up == 0 &&
5129 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5130 !(mac_status & MAC_STATUS_RCVD_CFG))
5131 current_link_up = 1;
5132 } else {
5be73b47
MC
5133 tg3_setup_flow_control(tp, 0, 0);
5134
1da177e4
LT
5135 /* Forcing 1000FD link up. */
5136 current_link_up = 1;
1da177e4
LT
5137
5138 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5139 udelay(40);
e8f3f6ca
MC
5140
5141 tw32_f(MAC_MODE, tp->mac_mode);
5142 udelay(40);
1da177e4
LT
5143 }
5144
5145out:
5146 return current_link_up;
5147}
5148
5149static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5150{
5151 u32 orig_pause_cfg;
5152 u16 orig_active_speed;
5153 u8 orig_active_duplex;
5154 u32 mac_status;
5155 int current_link_up;
5156 int i;
5157
8d018621 5158 orig_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5159 orig_active_speed = tp->link_config.active_speed;
5160 orig_active_duplex = tp->link_config.active_duplex;
5161
63c3a66f 5162 if (!tg3_flag(tp, HW_AUTONEG) &&
f4a46d1f 5163 tp->link_up &&
63c3a66f 5164 tg3_flag(tp, INIT_COMPLETE)) {
1da177e4
LT
5165 mac_status = tr32(MAC_STATUS);
5166 mac_status &= (MAC_STATUS_PCS_SYNCED |
5167 MAC_STATUS_SIGNAL_DET |
5168 MAC_STATUS_CFG_CHANGED |
5169 MAC_STATUS_RCVD_CFG);
5170 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5171 MAC_STATUS_SIGNAL_DET)) {
5172 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5173 MAC_STATUS_CFG_CHANGED));
5174 return 0;
5175 }
5176 }
5177
5178 tw32_f(MAC_TX_AUTO_NEG, 0);
5179
5180 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5181 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5182 tw32_f(MAC_MODE, tp->mac_mode);
5183 udelay(40);
5184
79eb6904 5185 if (tp->phy_id == TG3_PHY_ID_BCM8002)
1da177e4
LT
5186 tg3_init_bcm8002(tp);
5187
5188 /* Enable link change event even when serdes polling. */
5189 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5190 udelay(40);
5191
5192 current_link_up = 0;
859edb26 5193 tp->link_config.rmt_adv = 0;
1da177e4
LT
5194 mac_status = tr32(MAC_STATUS);
5195
63c3a66f 5196 if (tg3_flag(tp, HW_AUTONEG))
1da177e4
LT
5197 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5198 else
5199 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5200
898a56f8 5201 tp->napi[0].hw_status->status =
1da177e4 5202 (SD_STATUS_UPDATED |
898a56f8 5203 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
1da177e4
LT
5204
5205 for (i = 0; i < 100; i++) {
5206 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5207 MAC_STATUS_CFG_CHANGED));
5208 udelay(5);
5209 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
5210 MAC_STATUS_CFG_CHANGED |
5211 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
5212 break;
5213 }
5214
5215 mac_status = tr32(MAC_STATUS);
5216 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5217 current_link_up = 0;
3d3ebe74
MC
5218 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5219 tp->serdes_counter == 0) {
1da177e4
LT
5220 tw32_f(MAC_MODE, (tp->mac_mode |
5221 MAC_MODE_SEND_CONFIGS));
5222 udelay(1);
5223 tw32_f(MAC_MODE, tp->mac_mode);
5224 }
5225 }
5226
5227 if (current_link_up == 1) {
5228 tp->link_config.active_speed = SPEED_1000;
5229 tp->link_config.active_duplex = DUPLEX_FULL;
5230 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5231 LED_CTRL_LNKLED_OVERRIDE |
5232 LED_CTRL_1000MBPS_ON));
5233 } else {
e740522e
MC
5234 tp->link_config.active_speed = SPEED_UNKNOWN;
5235 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
1da177e4
LT
5236 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5237 LED_CTRL_LNKLED_OVERRIDE |
5238 LED_CTRL_TRAFFIC_OVERRIDE));
5239 }
5240
f4a46d1f 5241 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
8d018621 5242 u32 now_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5243 if (orig_pause_cfg != now_pause_cfg ||
5244 orig_active_speed != tp->link_config.active_speed ||
5245 orig_active_duplex != tp->link_config.active_duplex)
5246 tg3_link_report(tp);
5247 }
5248
5249 return 0;
5250}
5251
747e8f8b
MC
5252static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5253{
5254 int current_link_up, err = 0;
5255 u32 bmsr, bmcr;
5256 u16 current_speed;
5257 u8 current_duplex;
ef167e27 5258 u32 local_adv, remote_adv;
747e8f8b
MC
5259
5260 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5261 tw32_f(MAC_MODE, tp->mac_mode);
5262 udelay(40);
5263
5264 tw32(MAC_EVENT, 0);
5265
5266 tw32_f(MAC_STATUS,
5267 (MAC_STATUS_SYNC_CHANGED |
5268 MAC_STATUS_CFG_CHANGED |
5269 MAC_STATUS_MI_COMPLETION |
5270 MAC_STATUS_LNKSTATE_CHANGED));
5271 udelay(40);
5272
5273 if (force_reset)
5274 tg3_phy_reset(tp);
5275
5276 current_link_up = 0;
e740522e
MC
5277 current_speed = SPEED_UNKNOWN;
5278 current_duplex = DUPLEX_UNKNOWN;
859edb26 5279 tp->link_config.rmt_adv = 0;
747e8f8b
MC
5280
5281 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5282 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5284 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5285 bmsr |= BMSR_LSTATUS;
5286 else
5287 bmsr &= ~BMSR_LSTATUS;
5288 }
747e8f8b
MC
5289
5290 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5291
5292 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
f07e9af3 5293 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5294 /* do nothing, just check for link up at the end */
5295 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
28011cf1 5296 u32 adv, newadv;
747e8f8b
MC
5297
5298 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
28011cf1
MC
5299 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5300 ADVERTISE_1000XPAUSE |
5301 ADVERTISE_1000XPSE_ASYM |
5302 ADVERTISE_SLCT);
747e8f8b 5303
28011cf1 5304 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
37f07023 5305 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
747e8f8b 5306
28011cf1
MC
5307 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5308 tg3_writephy(tp, MII_ADVERTISE, newadv);
747e8f8b
MC
5309 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5310 tg3_writephy(tp, MII_BMCR, bmcr);
5311
5312 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 5313 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
f07e9af3 5314 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5315
5316 return err;
5317 }
5318 } else {
5319 u32 new_bmcr;
5320
5321 bmcr &= ~BMCR_SPEED1000;
5322 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5323
5324 if (tp->link_config.duplex == DUPLEX_FULL)
5325 new_bmcr |= BMCR_FULLDPLX;
5326
5327 if (new_bmcr != bmcr) {
5328 /* BMCR_SPEED1000 is a reserved bit that needs
5329 * to be set on write.
5330 */
5331 new_bmcr |= BMCR_SPEED1000;
5332
5333 /* Force a linkdown */
f4a46d1f 5334 if (tp->link_up) {
747e8f8b
MC
5335 u32 adv;
5336
5337 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5338 adv &= ~(ADVERTISE_1000XFULL |
5339 ADVERTISE_1000XHALF |
5340 ADVERTISE_SLCT);
5341 tg3_writephy(tp, MII_ADVERTISE, adv);
5342 tg3_writephy(tp, MII_BMCR, bmcr |
5343 BMCR_ANRESTART |
5344 BMCR_ANENABLE);
5345 udelay(10);
f4a46d1f 5346 tg3_carrier_off(tp);
747e8f8b
MC
5347 }
5348 tg3_writephy(tp, MII_BMCR, new_bmcr);
5349 bmcr = new_bmcr;
5350 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5351 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5352 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5353 ASIC_REV_5714) {
5354 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5355 bmsr |= BMSR_LSTATUS;
5356 else
5357 bmsr &= ~BMSR_LSTATUS;
5358 }
f07e9af3 5359 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5360 }
5361 }
5362
5363 if (bmsr & BMSR_LSTATUS) {
5364 current_speed = SPEED_1000;
5365 current_link_up = 1;
5366 if (bmcr & BMCR_FULLDPLX)
5367 current_duplex = DUPLEX_FULL;
5368 else
5369 current_duplex = DUPLEX_HALF;
5370
ef167e27
MC
5371 local_adv = 0;
5372 remote_adv = 0;
5373
747e8f8b 5374 if (bmcr & BMCR_ANENABLE) {
ef167e27 5375 u32 common;
747e8f8b
MC
5376
5377 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5378 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5379 common = local_adv & remote_adv;
5380 if (common & (ADVERTISE_1000XHALF |
5381 ADVERTISE_1000XFULL)) {
5382 if (common & ADVERTISE_1000XFULL)
5383 current_duplex = DUPLEX_FULL;
5384 else
5385 current_duplex = DUPLEX_HALF;
859edb26
MC
5386
5387 tp->link_config.rmt_adv =
5388 mii_adv_to_ethtool_adv_x(remote_adv);
63c3a66f 5389 } else if (!tg3_flag(tp, 5780_CLASS)) {
57d8b880 5390 /* Link is up via parallel detect */
859a5887 5391 } else {
747e8f8b 5392 current_link_up = 0;
859a5887 5393 }
747e8f8b
MC
5394 }
5395 }
5396
ef167e27
MC
5397 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5398 tg3_setup_flow_control(tp, local_adv, remote_adv);
5399
747e8f8b
MC
5400 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5401 if (tp->link_config.active_duplex == DUPLEX_HALF)
5402 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5403
5404 tw32_f(MAC_MODE, tp->mac_mode);
5405 udelay(40);
5406
5407 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5408
5409 tp->link_config.active_speed = current_speed;
5410 tp->link_config.active_duplex = current_duplex;
5411
f4a46d1f 5412 tg3_test_and_report_link_chg(tp, current_link_up);
747e8f8b
MC
5413 return err;
5414}
5415
5416static void tg3_serdes_parallel_detect(struct tg3 *tp)
5417{
3d3ebe74 5418 if (tp->serdes_counter) {
747e8f8b 5419 /* Give autoneg time to complete. */
3d3ebe74 5420 tp->serdes_counter--;
747e8f8b
MC
5421 return;
5422 }
c6cdf436 5423
f4a46d1f 5424 if (!tp->link_up &&
747e8f8b
MC
5425 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5426 u32 bmcr;
5427
5428 tg3_readphy(tp, MII_BMCR, &bmcr);
5429 if (bmcr & BMCR_ANENABLE) {
5430 u32 phy1, phy2;
5431
5432 /* Select shadow register 0x1f */
f08aa1a8
MC
5433 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5434 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
747e8f8b
MC
5435
5436 /* Select expansion interrupt status register */
f08aa1a8
MC
5437 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5438 MII_TG3_DSP_EXP1_INT_STAT);
5439 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5440 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5441
5442 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5443 /* We have signal detect and not receiving
5444 * config code words, link is up by parallel
5445 * detection.
5446 */
5447
5448 bmcr &= ~BMCR_ANENABLE;
5449 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5450 tg3_writephy(tp, MII_BMCR, bmcr);
f07e9af3 5451 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5452 }
5453 }
f4a46d1f 5454 } else if (tp->link_up &&
859a5887 5455 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
f07e9af3 5456 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5457 u32 phy2;
5458
5459 /* Select expansion interrupt status register */
f08aa1a8
MC
5460 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5461 MII_TG3_DSP_EXP1_INT_STAT);
5462 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5463 if (phy2 & 0x20) {
5464 u32 bmcr;
5465
5466 /* Config code words received, turn on autoneg. */
5467 tg3_readphy(tp, MII_BMCR, &bmcr);
5468 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5469
f07e9af3 5470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5471
5472 }
5473 }
5474}
5475
1da177e4
LT
5476static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5477{
f2096f94 5478 u32 val;
1da177e4
LT
5479 int err;
5480
f07e9af3 5481 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4 5482 err = tg3_setup_fiber_phy(tp, force_reset);
f07e9af3 5483 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
747e8f8b 5484 err = tg3_setup_fiber_mii_phy(tp, force_reset);
859a5887 5485 else
1da177e4 5486 err = tg3_setup_copper_phy(tp, force_reset);
1da177e4 5487
bcb37f6c 5488 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
f2096f94 5489 u32 scale;
aa6c91fe
MC
5490
5491 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5492 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5493 scale = 65;
5494 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5495 scale = 6;
5496 else
5497 scale = 12;
5498
5499 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5500 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5501 tw32(GRC_MISC_CFG, val);
5502 }
5503
f2096f94
MC
5504 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5505 (6 << TX_LENGTHS_IPG_SHIFT);
c65a17f4
MC
5506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
f2096f94
MC
5508 val |= tr32(MAC_TX_LENGTHS) &
5509 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5510 TX_LENGTHS_CNT_DWN_VAL_MSK);
5511
1da177e4
LT
5512 if (tp->link_config.active_speed == SPEED_1000 &&
5513 tp->link_config.active_duplex == DUPLEX_HALF)
f2096f94
MC
5514 tw32(MAC_TX_LENGTHS, val |
5515 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5516 else
f2096f94
MC
5517 tw32(MAC_TX_LENGTHS, val |
5518 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5519
63c3a66f 5520 if (!tg3_flag(tp, 5705_PLUS)) {
f4a46d1f 5521 if (tp->link_up) {
1da177e4 5522 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 5523 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
5524 } else {
5525 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5526 }
5527 }
5528
63c3a66f 5529 if (tg3_flag(tp, ASPM_WORKAROUND)) {
f2096f94 5530 val = tr32(PCIE_PWR_MGMT_THRESH);
f4a46d1f 5531 if (!tp->link_up)
8ed5d97e
MC
5532 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5533 tp->pwrmgmt_thresh;
5534 else
5535 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5536 tw32(PCIE_PWR_MGMT_THRESH, val);
5537 }
5538
1da177e4
LT
5539 return err;
5540}
5541
7d41e49a
MC
5542/* tp->lock must be held */
5543static u64 tg3_refclk_read(struct tg3 *tp)
5544{
5545 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5546 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5547}
5548
be947307
MC
5549/* tp->lock must be held */
5550static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5551{
5552 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5553 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5554 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5555 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5556}
5557
7d41e49a
MC
5558static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5559static inline void tg3_full_unlock(struct tg3 *tp);
5560static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5561{
5562 struct tg3 *tp = netdev_priv(dev);
5563
5564 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5565 SOF_TIMESTAMPING_RX_SOFTWARE |
5566 SOF_TIMESTAMPING_SOFTWARE |
5567 SOF_TIMESTAMPING_TX_HARDWARE |
5568 SOF_TIMESTAMPING_RX_HARDWARE |
5569 SOF_TIMESTAMPING_RAW_HARDWARE;
5570
5571 if (tp->ptp_clock)
5572 info->phc_index = ptp_clock_index(tp->ptp_clock);
5573 else
5574 info->phc_index = -1;
5575
5576 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5577
5578 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5579 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5580 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5581 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5582 return 0;
5583}
5584
5585static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5586{
5587 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5588 bool neg_adj = false;
5589 u32 correction = 0;
5590
5591 if (ppb < 0) {
5592 neg_adj = true;
5593 ppb = -ppb;
5594 }
5595
5596 /* Frequency adjustment is performed using hardware with a 24 bit
5597 * accumulator and a programmable correction value. On each clk, the
5598 * correction value gets added to the accumulator and when it
5599 * overflows, the time counter is incremented/decremented.
5600 *
5601 * So conversion from ppb to correction value is
5602 * ppb * (1 << 24) / 1000000000
5603 */
5604 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5605 TG3_EAV_REF_CLK_CORRECT_MASK;
5606
5607 tg3_full_lock(tp, 0);
5608
5609 if (correction)
5610 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5611 TG3_EAV_REF_CLK_CORRECT_EN |
5612 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5613 else
5614 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5615
5616 tg3_full_unlock(tp);
5617
5618 return 0;
5619}
5620
5621static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5622{
5623 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5624
5625 tg3_full_lock(tp, 0);
5626 tp->ptp_adjust += delta;
5627 tg3_full_unlock(tp);
5628
5629 return 0;
5630}
5631
5632static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5633{
5634 u64 ns;
5635 u32 remainder;
5636 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5637
5638 tg3_full_lock(tp, 0);
5639 ns = tg3_refclk_read(tp);
5640 ns += tp->ptp_adjust;
5641 tg3_full_unlock(tp);
5642
5643 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5644 ts->tv_nsec = remainder;
5645
5646 return 0;
5647}
5648
5649static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5650 const struct timespec *ts)
5651{
5652 u64 ns;
5653 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5654
5655 ns = timespec_to_ns(ts);
5656
5657 tg3_full_lock(tp, 0);
5658 tg3_refclk_write(tp, ns);
5659 tp->ptp_adjust = 0;
5660 tg3_full_unlock(tp);
5661
5662 return 0;
5663}
5664
5665static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5666 struct ptp_clock_request *rq, int on)
5667{
5668 return -EOPNOTSUPP;
5669}
5670
5671static const struct ptp_clock_info tg3_ptp_caps = {
5672 .owner = THIS_MODULE,
5673 .name = "tg3 clock",
5674 .max_adj = 250000000,
5675 .n_alarm = 0,
5676 .n_ext_ts = 0,
5677 .n_per_out = 0,
5678 .pps = 0,
5679 .adjfreq = tg3_ptp_adjfreq,
5680 .adjtime = tg3_ptp_adjtime,
5681 .gettime = tg3_ptp_gettime,
5682 .settime = tg3_ptp_settime,
5683 .enable = tg3_ptp_enable,
5684};
5685
fb4ce8ad
MC
5686static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5687 struct skb_shared_hwtstamps *timestamp)
5688{
5689 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5690 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5691 tp->ptp_adjust);
5692}
5693
be947307
MC
5694/* tp->lock must be held */
5695static void tg3_ptp_init(struct tg3 *tp)
5696{
5697 if (!tg3_flag(tp, PTP_CAPABLE))
5698 return;
5699
5700 /* Initialize the hardware clock to the system time. */
5701 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5702 tp->ptp_adjust = 0;
7d41e49a 5703 tp->ptp_info = tg3_ptp_caps;
be947307
MC
5704}
5705
5706/* tp->lock must be held */
5707static void tg3_ptp_resume(struct tg3 *tp)
5708{
5709 if (!tg3_flag(tp, PTP_CAPABLE))
5710 return;
5711
5712 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5713 tp->ptp_adjust = 0;
5714}
5715
5716static void tg3_ptp_fini(struct tg3 *tp)
5717{
5718 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5719 return;
5720
7d41e49a 5721 ptp_clock_unregister(tp->ptp_clock);
be947307
MC
5722 tp->ptp_clock = NULL;
5723 tp->ptp_adjust = 0;
5724}
5725
66cfd1bd
MC
5726static inline int tg3_irq_sync(struct tg3 *tp)
5727{
5728 return tp->irq_sync;
5729}
5730
97bd8e49
MC
5731static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5732{
5733 int i;
5734
5735 dst = (u32 *)((u8 *)dst + off);
5736 for (i = 0; i < len; i += sizeof(u32))
5737 *dst++ = tr32(off + i);
5738}
5739
5740static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5741{
5742 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5743 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5744 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5745 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5746 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5747 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5748 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5749 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5750 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5751 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5752 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5753 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5754 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5755 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5756 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5757 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5758 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5759 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5760 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5761
63c3a66f 5762 if (tg3_flag(tp, SUPPORT_MSIX))
97bd8e49
MC
5763 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5764
5765 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5766 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5767 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5768 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5769 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5770 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5771 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5772 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5773
63c3a66f 5774 if (!tg3_flag(tp, 5705_PLUS)) {
97bd8e49
MC
5775 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5776 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5777 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5778 }
5779
5780 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5781 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5782 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5783 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5784 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5785
63c3a66f 5786 if (tg3_flag(tp, NVRAM))
97bd8e49
MC
5787 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5788}
5789
5790static void tg3_dump_state(struct tg3 *tp)
5791{
5792 int i;
5793 u32 *regs;
5794
5795 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5796 if (!regs) {
5797 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5798 return;
5799 }
5800
63c3a66f 5801 if (tg3_flag(tp, PCI_EXPRESS)) {
97bd8e49
MC
5802 /* Read up to but not including private PCI registers */
5803 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5804 regs[i / sizeof(u32)] = tr32(i);
5805 } else
5806 tg3_dump_legacy_regs(tp, regs);
5807
5808 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5809 if (!regs[i + 0] && !regs[i + 1] &&
5810 !regs[i + 2] && !regs[i + 3])
5811 continue;
5812
5813 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5814 i * 4,
5815 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5816 }
5817
5818 kfree(regs);
5819
5820 for (i = 0; i < tp->irq_cnt; i++) {
5821 struct tg3_napi *tnapi = &tp->napi[i];
5822
5823 /* SW status block */
5824 netdev_err(tp->dev,
5825 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5826 i,
5827 tnapi->hw_status->status,
5828 tnapi->hw_status->status_tag,
5829 tnapi->hw_status->rx_jumbo_consumer,
5830 tnapi->hw_status->rx_consumer,
5831 tnapi->hw_status->rx_mini_consumer,
5832 tnapi->hw_status->idx[0].rx_producer,
5833 tnapi->hw_status->idx[0].tx_consumer);
5834
5835 netdev_err(tp->dev,
5836 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5837 i,
5838 tnapi->last_tag, tnapi->last_irq_tag,
5839 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5840 tnapi->rx_rcb_ptr,
5841 tnapi->prodring.rx_std_prod_idx,
5842 tnapi->prodring.rx_std_cons_idx,
5843 tnapi->prodring.rx_jmb_prod_idx,
5844 tnapi->prodring.rx_jmb_cons_idx);
5845 }
5846}
5847
df3e6548
MC
5848/* This is called whenever we suspect that the system chipset is re-
5849 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5850 * is bogus tx completions. We try to recover by setting the
5851 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5852 * in the workqueue.
5853 */
5854static void tg3_tx_recover(struct tg3 *tp)
5855{
63c3a66f 5856 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
df3e6548
MC
5857 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5858
5129c3a3
MC
5859 netdev_warn(tp->dev,
5860 "The system may be re-ordering memory-mapped I/O "
5861 "cycles to the network device, attempting to recover. "
5862 "Please report the problem to the driver maintainer "
5863 "and include system chipset information.\n");
df3e6548
MC
5864
5865 spin_lock(&tp->lock);
63c3a66f 5866 tg3_flag_set(tp, TX_RECOVERY_PENDING);
df3e6548
MC
5867 spin_unlock(&tp->lock);
5868}
5869
f3f3f27e 5870static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
1b2a7205 5871{
f65aac16
MC
5872 /* Tell compiler to fetch tx indices from memory. */
5873 barrier();
f3f3f27e
MC
5874 return tnapi->tx_pending -
5875 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
1b2a7205
MC
5876}
5877
1da177e4
LT
5878/* Tigon3 never reports partial packet sends. So we do not
5879 * need special logic to handle SKBs that have not had all
5880 * of their frags sent yet, like SunGEM does.
5881 */
17375d25 5882static void tg3_tx(struct tg3_napi *tnapi)
1da177e4 5883{
17375d25 5884 struct tg3 *tp = tnapi->tp;
898a56f8 5885 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
f3f3f27e 5886 u32 sw_idx = tnapi->tx_cons;
fe5f5787
MC
5887 struct netdev_queue *txq;
5888 int index = tnapi - tp->napi;
298376d3 5889 unsigned int pkts_compl = 0, bytes_compl = 0;
fe5f5787 5890
63c3a66f 5891 if (tg3_flag(tp, ENABLE_TSS))
fe5f5787
MC
5892 index--;
5893
5894 txq = netdev_get_tx_queue(tp->dev, index);
1da177e4
LT
5895
5896 while (sw_idx != hw_idx) {
df8944cf 5897 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
1da177e4 5898 struct sk_buff *skb = ri->skb;
df3e6548
MC
5899 int i, tx_bug = 0;
5900
5901 if (unlikely(skb == NULL)) {
5902 tg3_tx_recover(tp);
5903 return;
5904 }
1da177e4 5905
fb4ce8ad
MC
5906 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5907 struct skb_shared_hwtstamps timestamp;
5908 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5909 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5910
5911 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5912
5913 skb_tstamp_tx(skb, &timestamp);
5914 }
5915
f4188d8a 5916 pci_unmap_single(tp->pdev,
4e5e4f0d 5917 dma_unmap_addr(ri, mapping),
f4188d8a
AD
5918 skb_headlen(skb),
5919 PCI_DMA_TODEVICE);
1da177e4
LT
5920
5921 ri->skb = NULL;
5922
e01ee14d
MC
5923 while (ri->fragmented) {
5924 ri->fragmented = false;
5925 sw_idx = NEXT_TX(sw_idx);
5926 ri = &tnapi->tx_buffers[sw_idx];
5927 }
5928
1da177e4
LT
5929 sw_idx = NEXT_TX(sw_idx);
5930
5931 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
f3f3f27e 5932 ri = &tnapi->tx_buffers[sw_idx];
df3e6548
MC
5933 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5934 tx_bug = 1;
f4188d8a
AD
5935
5936 pci_unmap_page(tp->pdev,
4e5e4f0d 5937 dma_unmap_addr(ri, mapping),
9e903e08 5938 skb_frag_size(&skb_shinfo(skb)->frags[i]),
f4188d8a 5939 PCI_DMA_TODEVICE);
e01ee14d
MC
5940
5941 while (ri->fragmented) {
5942 ri->fragmented = false;
5943 sw_idx = NEXT_TX(sw_idx);
5944 ri = &tnapi->tx_buffers[sw_idx];
5945 }
5946
1da177e4
LT
5947 sw_idx = NEXT_TX(sw_idx);
5948 }
5949
298376d3
TH
5950 pkts_compl++;
5951 bytes_compl += skb->len;
5952
f47c11ee 5953 dev_kfree_skb(skb);
df3e6548
MC
5954
5955 if (unlikely(tx_bug)) {
5956 tg3_tx_recover(tp);
5957 return;
5958 }
1da177e4
LT
5959 }
5960
5cb917bc 5961 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
298376d3 5962
f3f3f27e 5963 tnapi->tx_cons = sw_idx;
1da177e4 5964
1b2a7205
MC
5965 /* Need to make the tx_cons update visible to tg3_start_xmit()
5966 * before checking for netif_queue_stopped(). Without the
5967 * memory barrier, there is a small possibility that tg3_start_xmit()
5968 * will miss it and cause the queue to be stopped forever.
5969 */
5970 smp_mb();
5971
fe5f5787 5972 if (unlikely(netif_tx_queue_stopped(txq) &&
f3f3f27e 5973 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
fe5f5787
MC
5974 __netif_tx_lock(txq, smp_processor_id());
5975 if (netif_tx_queue_stopped(txq) &&
f3f3f27e 5976 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
fe5f5787
MC
5977 netif_tx_wake_queue(txq);
5978 __netif_tx_unlock(txq);
51b91468 5979 }
1da177e4
LT
5980}
5981
8d4057a9
ED
5982static void tg3_frag_free(bool is_frag, void *data)
5983{
5984 if (is_frag)
5985 put_page(virt_to_head_page(data));
5986 else
5987 kfree(data);
5988}
5989
9205fd9c 5990static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
2b2cdb65 5991{
8d4057a9
ED
5992 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5993 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5994
9205fd9c 5995 if (!ri->data)
2b2cdb65
MC
5996 return;
5997
4e5e4f0d 5998 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
2b2cdb65 5999 map_sz, PCI_DMA_FROMDEVICE);
a1e8b307 6000 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
9205fd9c 6001 ri->data = NULL;
2b2cdb65
MC
6002}
6003
8d4057a9 6004
1da177e4
LT
6005/* Returns size of skb allocated or < 0 on error.
6006 *
6007 * We only need to fill in the address because the other members
6008 * of the RX descriptor are invariant, see tg3_init_rings.
6009 *
6010 * Note the purposeful assymetry of cpu vs. chip accesses. For
6011 * posting buffers we only dirty the first cache line of the RX
6012 * descriptor (containing the address). Whereas for the RX status
6013 * buffers the cpu only reads the last cacheline of the RX descriptor
6014 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6015 */
9205fd9c 6016static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
8d4057a9
ED
6017 u32 opaque_key, u32 dest_idx_unmasked,
6018 unsigned int *frag_size)
1da177e4
LT
6019{
6020 struct tg3_rx_buffer_desc *desc;
f94e290e 6021 struct ring_info *map;
9205fd9c 6022 u8 *data;
1da177e4 6023 dma_addr_t mapping;
9205fd9c 6024 int skb_size, data_size, dest_idx;
1da177e4 6025
1da177e4
LT
6026 switch (opaque_key) {
6027 case RXD_OPAQUE_RING_STD:
2c49a44d 6028 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
21f581a5
MC
6029 desc = &tpr->rx_std[dest_idx];
6030 map = &tpr->rx_std_buffers[dest_idx];
9205fd9c 6031 data_size = tp->rx_pkt_map_sz;
1da177e4
LT
6032 break;
6033
6034 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 6035 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
79ed5ac7 6036 desc = &tpr->rx_jmb[dest_idx].std;
21f581a5 6037 map = &tpr->rx_jmb_buffers[dest_idx];
9205fd9c 6038 data_size = TG3_RX_JMB_MAP_SZ;
1da177e4
LT
6039 break;
6040
6041 default:
6042 return -EINVAL;
855e1111 6043 }
1da177e4
LT
6044
6045 /* Do not overwrite any of the map or rp information
6046 * until we are sure we can commit to a new buffer.
6047 *
6048 * Callers depend upon this behavior and assume that
6049 * we leave everything unchanged if we fail.
6050 */
9205fd9c
ED
6051 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6052 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
a1e8b307
ED
6053 if (skb_size <= PAGE_SIZE) {
6054 data = netdev_alloc_frag(skb_size);
6055 *frag_size = skb_size;
8d4057a9
ED
6056 } else {
6057 data = kmalloc(skb_size, GFP_ATOMIC);
6058 *frag_size = 0;
6059 }
9205fd9c 6060 if (!data)
1da177e4
LT
6061 return -ENOMEM;
6062
9205fd9c
ED
6063 mapping = pci_map_single(tp->pdev,
6064 data + TG3_RX_OFFSET(tp),
6065 data_size,
1da177e4 6066 PCI_DMA_FROMDEVICE);
8d4057a9 6067 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
a1e8b307 6068 tg3_frag_free(skb_size <= PAGE_SIZE, data);
a21771dd
MC
6069 return -EIO;
6070 }
1da177e4 6071
9205fd9c 6072 map->data = data;
4e5e4f0d 6073 dma_unmap_addr_set(map, mapping, mapping);
1da177e4 6074
1da177e4
LT
6075 desc->addr_hi = ((u64)mapping >> 32);
6076 desc->addr_lo = ((u64)mapping & 0xffffffff);
6077
9205fd9c 6078 return data_size;
1da177e4
LT
6079}
6080
6081/* We only need to move over in the address because the other
6082 * members of the RX descriptor are invariant. See notes above
9205fd9c 6083 * tg3_alloc_rx_data for full details.
1da177e4 6084 */
a3896167
MC
6085static void tg3_recycle_rx(struct tg3_napi *tnapi,
6086 struct tg3_rx_prodring_set *dpr,
6087 u32 opaque_key, int src_idx,
6088 u32 dest_idx_unmasked)
1da177e4 6089{
17375d25 6090 struct tg3 *tp = tnapi->tp;
1da177e4
LT
6091 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6092 struct ring_info *src_map, *dest_map;
8fea32b9 6093 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
c6cdf436 6094 int dest_idx;
1da177e4
LT
6095
6096 switch (opaque_key) {
6097 case RXD_OPAQUE_RING_STD:
2c49a44d 6098 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
a3896167
MC
6099 dest_desc = &dpr->rx_std[dest_idx];
6100 dest_map = &dpr->rx_std_buffers[dest_idx];
6101 src_desc = &spr->rx_std[src_idx];
6102 src_map = &spr->rx_std_buffers[src_idx];
1da177e4
LT
6103 break;
6104
6105 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 6106 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
a3896167
MC
6107 dest_desc = &dpr->rx_jmb[dest_idx].std;
6108 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6109 src_desc = &spr->rx_jmb[src_idx].std;
6110 src_map = &spr->rx_jmb_buffers[src_idx];
1da177e4
LT
6111 break;
6112
6113 default:
6114 return;
855e1111 6115 }
1da177e4 6116
9205fd9c 6117 dest_map->data = src_map->data;
4e5e4f0d
FT
6118 dma_unmap_addr_set(dest_map, mapping,
6119 dma_unmap_addr(src_map, mapping));
1da177e4
LT
6120 dest_desc->addr_hi = src_desc->addr_hi;
6121 dest_desc->addr_lo = src_desc->addr_lo;
e92967bf
MC
6122
6123 /* Ensure that the update to the skb happens after the physical
6124 * addresses have been transferred to the new BD location.
6125 */
6126 smp_wmb();
6127
9205fd9c 6128 src_map->data = NULL;
1da177e4
LT
6129}
6130
1da177e4
LT
6131/* The RX ring scheme is composed of multiple rings which post fresh
6132 * buffers to the chip, and one special ring the chip uses to report
6133 * status back to the host.
6134 *
6135 * The special ring reports the status of received packets to the
6136 * host. The chip does not write into the original descriptor the
6137 * RX buffer was obtained from. The chip simply takes the original
6138 * descriptor as provided by the host, updates the status and length
6139 * field, then writes this into the next status ring entry.
6140 *
6141 * Each ring the host uses to post buffers to the chip is described
6142 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6143 * it is first placed into the on-chip ram. When the packet's length
6144 * is known, it walks down the TG3_BDINFO entries to select the ring.
6145 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6146 * which is within the range of the new packet's length is chosen.
6147 *
6148 * The "separate ring for rx status" scheme may sound queer, but it makes
6149 * sense from a cache coherency perspective. If only the host writes
6150 * to the buffer post rings, and only the chip writes to the rx status
6151 * rings, then cache lines never move beyond shared-modified state.
6152 * If both the host and chip were to write into the same ring, cache line
6153 * eviction could occur since both entities want it in an exclusive state.
6154 */
17375d25 6155static int tg3_rx(struct tg3_napi *tnapi, int budget)
1da177e4 6156{
17375d25 6157 struct tg3 *tp = tnapi->tp;
f92905de 6158 u32 work_mask, rx_std_posted = 0;
4361935a 6159 u32 std_prod_idx, jmb_prod_idx;
72334482 6160 u32 sw_idx = tnapi->rx_rcb_ptr;
483ba50b 6161 u16 hw_idx;
1da177e4 6162 int received;
8fea32b9 6163 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
1da177e4 6164
8d9d7cfc 6165 hw_idx = *(tnapi->rx_rcb_prod_idx);
1da177e4
LT
6166 /*
6167 * We need to order the read of hw_idx and the read of
6168 * the opaque cookie.
6169 */
6170 rmb();
1da177e4
LT
6171 work_mask = 0;
6172 received = 0;
4361935a
MC
6173 std_prod_idx = tpr->rx_std_prod_idx;
6174 jmb_prod_idx = tpr->rx_jmb_prod_idx;
1da177e4 6175 while (sw_idx != hw_idx && budget > 0) {
afc081f8 6176 struct ring_info *ri;
72334482 6177 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
1da177e4
LT
6178 unsigned int len;
6179 struct sk_buff *skb;
6180 dma_addr_t dma_addr;
6181 u32 opaque_key, desc_idx, *post_ptr;
9205fd9c 6182 u8 *data;
fb4ce8ad 6183 u64 tstamp = 0;
1da177e4
LT
6184
6185 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6186 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6187 if (opaque_key == RXD_OPAQUE_RING_STD) {
8fea32b9 6188 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4e5e4f0d 6189 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 6190 data = ri->data;
4361935a 6191 post_ptr = &std_prod_idx;
f92905de 6192 rx_std_posted++;
1da177e4 6193 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
8fea32b9 6194 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4e5e4f0d 6195 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 6196 data = ri->data;
4361935a 6197 post_ptr = &jmb_prod_idx;
21f581a5 6198 } else
1da177e4 6199 goto next_pkt_nopost;
1da177e4
LT
6200
6201 work_mask |= opaque_key;
6202
6203 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6204 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6205 drop_it:
a3896167 6206 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
6207 desc_idx, *post_ptr);
6208 drop_it_no_recycle:
6209 /* Other statistics kept track of by card. */
b0057c51 6210 tp->rx_dropped++;
1da177e4
LT
6211 goto next_pkt;
6212 }
6213
9205fd9c 6214 prefetch(data + TG3_RX_OFFSET(tp));
ad829268
MC
6215 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6216 ETH_FCS_LEN;
1da177e4 6217
fb4ce8ad
MC
6218 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6219 RXD_FLAG_PTPSTAT_PTPV1 ||
6220 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6221 RXD_FLAG_PTPSTAT_PTPV2) {
6222 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6223 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6224 }
6225
d2757fc4 6226 if (len > TG3_RX_COPY_THRESH(tp)) {
1da177e4 6227 int skb_size;
8d4057a9 6228 unsigned int frag_size;
1da177e4 6229
9205fd9c 6230 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
8d4057a9 6231 *post_ptr, &frag_size);
1da177e4
LT
6232 if (skb_size < 0)
6233 goto drop_it;
6234
287be12e 6235 pci_unmap_single(tp->pdev, dma_addr, skb_size,
1da177e4
LT
6236 PCI_DMA_FROMDEVICE);
6237
8d4057a9 6238 skb = build_skb(data, frag_size);
9205fd9c 6239 if (!skb) {
8d4057a9 6240 tg3_frag_free(frag_size != 0, data);
9205fd9c
ED
6241 goto drop_it_no_recycle;
6242 }
6243 skb_reserve(skb, TG3_RX_OFFSET(tp));
6244 /* Ensure that the update to the data happens
61e800cf
MC
6245 * after the usage of the old DMA mapping.
6246 */
6247 smp_wmb();
6248
9205fd9c 6249 ri->data = NULL;
61e800cf 6250
1da177e4 6251 } else {
a3896167 6252 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
6253 desc_idx, *post_ptr);
6254
9205fd9c
ED
6255 skb = netdev_alloc_skb(tp->dev,
6256 len + TG3_RAW_IP_ALIGN);
6257 if (skb == NULL)
1da177e4
LT
6258 goto drop_it_no_recycle;
6259
9205fd9c 6260 skb_reserve(skb, TG3_RAW_IP_ALIGN);
1da177e4 6261 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
9205fd9c
ED
6262 memcpy(skb->data,
6263 data + TG3_RX_OFFSET(tp),
6264 len);
1da177e4 6265 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
1da177e4
LT
6266 }
6267
9205fd9c 6268 skb_put(skb, len);
fb4ce8ad
MC
6269 if (tstamp)
6270 tg3_hwclock_to_timestamp(tp, tstamp,
6271 skb_hwtstamps(skb));
6272
dc668910 6273 if ((tp->dev->features & NETIF_F_RXCSUM) &&
1da177e4
LT
6274 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6275 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6276 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6277 skb->ip_summed = CHECKSUM_UNNECESSARY;
6278 else
bc8acf2c 6279 skb_checksum_none_assert(skb);
1da177e4
LT
6280
6281 skb->protocol = eth_type_trans(skb, tp->dev);
f7b493e0
MC
6282
6283 if (len > (tp->dev->mtu + ETH_HLEN) &&
6284 skb->protocol != htons(ETH_P_8021Q)) {
6285 dev_kfree_skb(skb);
b0057c51 6286 goto drop_it_no_recycle;
f7b493e0
MC
6287 }
6288
9dc7a113 6289 if (desc->type_flags & RXD_FLAG_VLAN &&
bf933c80
MC
6290 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6291 __vlan_hwaccel_put_tag(skb,
6292 desc->err_vlan & RXD_VLAN_MASK);
9dc7a113 6293
bf933c80 6294 napi_gro_receive(&tnapi->napi, skb);
1da177e4 6295
1da177e4
LT
6296 received++;
6297 budget--;
6298
6299next_pkt:
6300 (*post_ptr)++;
f92905de
MC
6301
6302 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
2c49a44d
MC
6303 tpr->rx_std_prod_idx = std_prod_idx &
6304 tp->rx_std_ring_mask;
86cfe4ff
MC
6305 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6306 tpr->rx_std_prod_idx);
f92905de
MC
6307 work_mask &= ~RXD_OPAQUE_RING_STD;
6308 rx_std_posted = 0;
6309 }
1da177e4 6310next_pkt_nopost:
483ba50b 6311 sw_idx++;
7cb32cf2 6312 sw_idx &= tp->rx_ret_ring_mask;
52f6d697
MC
6313
6314 /* Refresh hw_idx to see if there is new work */
6315 if (sw_idx == hw_idx) {
8d9d7cfc 6316 hw_idx = *(tnapi->rx_rcb_prod_idx);
52f6d697
MC
6317 rmb();
6318 }
1da177e4
LT
6319 }
6320
6321 /* ACK the status ring. */
72334482
MC
6322 tnapi->rx_rcb_ptr = sw_idx;
6323 tw32_rx_mbox(tnapi->consmbox, sw_idx);
1da177e4
LT
6324
6325 /* Refill RX ring(s). */
63c3a66f 6326 if (!tg3_flag(tp, ENABLE_RSS)) {
6541b806
MC
6327 /* Sync BD data before updating mailbox */
6328 wmb();
6329
b196c7e4 6330 if (work_mask & RXD_OPAQUE_RING_STD) {
2c49a44d
MC
6331 tpr->rx_std_prod_idx = std_prod_idx &
6332 tp->rx_std_ring_mask;
b196c7e4
MC
6333 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6334 tpr->rx_std_prod_idx);
6335 }
6336 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2c49a44d
MC
6337 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6338 tp->rx_jmb_ring_mask;
b196c7e4
MC
6339 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6340 tpr->rx_jmb_prod_idx);
6341 }
6342 mmiowb();
6343 } else if (work_mask) {
6344 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6345 * updated before the producer indices can be updated.
6346 */
6347 smp_wmb();
6348
2c49a44d
MC
6349 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6350 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
b196c7e4 6351
7ae52890
MC
6352 if (tnapi != &tp->napi[1]) {
6353 tp->rx_refill = true;
e4af1af9 6354 napi_schedule(&tp->napi[1].napi);
7ae52890 6355 }
1da177e4 6356 }
1da177e4
LT
6357
6358 return received;
6359}
6360
35f2d7d0 6361static void tg3_poll_link(struct tg3 *tp)
1da177e4 6362{
1da177e4 6363 /* handle link change and other phy events */
63c3a66f 6364 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
35f2d7d0
MC
6365 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6366
1da177e4
LT
6367 if (sblk->status & SD_STATUS_LINK_CHG) {
6368 sblk->status = SD_STATUS_UPDATED |
35f2d7d0 6369 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 6370 spin_lock(&tp->lock);
63c3a66f 6371 if (tg3_flag(tp, USE_PHYLIB)) {
dd477003
MC
6372 tw32_f(MAC_STATUS,
6373 (MAC_STATUS_SYNC_CHANGED |
6374 MAC_STATUS_CFG_CHANGED |
6375 MAC_STATUS_MI_COMPLETION |
6376 MAC_STATUS_LNKSTATE_CHANGED));
6377 udelay(40);
6378 } else
6379 tg3_setup_phy(tp, 0);
f47c11ee 6380 spin_unlock(&tp->lock);
1da177e4
LT
6381 }
6382 }
35f2d7d0
MC
6383}
6384
f89f38b8
MC
6385static int tg3_rx_prodring_xfer(struct tg3 *tp,
6386 struct tg3_rx_prodring_set *dpr,
6387 struct tg3_rx_prodring_set *spr)
b196c7e4
MC
6388{
6389 u32 si, di, cpycnt, src_prod_idx;
f89f38b8 6390 int i, err = 0;
b196c7e4
MC
6391
6392 while (1) {
6393 src_prod_idx = spr->rx_std_prod_idx;
6394
6395 /* Make sure updates to the rx_std_buffers[] entries and the
6396 * standard producer index are seen in the correct order.
6397 */
6398 smp_rmb();
6399
6400 if (spr->rx_std_cons_idx == src_prod_idx)
6401 break;
6402
6403 if (spr->rx_std_cons_idx < src_prod_idx)
6404 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6405 else
2c49a44d
MC
6406 cpycnt = tp->rx_std_ring_mask + 1 -
6407 spr->rx_std_cons_idx;
b196c7e4 6408
2c49a44d
MC
6409 cpycnt = min(cpycnt,
6410 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
b196c7e4
MC
6411
6412 si = spr->rx_std_cons_idx;
6413 di = dpr->rx_std_prod_idx;
6414
e92967bf 6415 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6416 if (dpr->rx_std_buffers[i].data) {
e92967bf 6417 cpycnt = i - di;
f89f38b8 6418 err = -ENOSPC;
e92967bf
MC
6419 break;
6420 }
6421 }
6422
6423 if (!cpycnt)
6424 break;
6425
6426 /* Ensure that updates to the rx_std_buffers ring and the
6427 * shadowed hardware producer ring from tg3_recycle_skb() are
6428 * ordered correctly WRT the skb check above.
6429 */
6430 smp_rmb();
6431
b196c7e4
MC
6432 memcpy(&dpr->rx_std_buffers[di],
6433 &spr->rx_std_buffers[si],
6434 cpycnt * sizeof(struct ring_info));
6435
6436 for (i = 0; i < cpycnt; i++, di++, si++) {
6437 struct tg3_rx_buffer_desc *sbd, *dbd;
6438 sbd = &spr->rx_std[si];
6439 dbd = &dpr->rx_std[di];
6440 dbd->addr_hi = sbd->addr_hi;
6441 dbd->addr_lo = sbd->addr_lo;
6442 }
6443
2c49a44d
MC
6444 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6445 tp->rx_std_ring_mask;
6446 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6447 tp->rx_std_ring_mask;
b196c7e4
MC
6448 }
6449
6450 while (1) {
6451 src_prod_idx = spr->rx_jmb_prod_idx;
6452
6453 /* Make sure updates to the rx_jmb_buffers[] entries and
6454 * the jumbo producer index are seen in the correct order.
6455 */
6456 smp_rmb();
6457
6458 if (spr->rx_jmb_cons_idx == src_prod_idx)
6459 break;
6460
6461 if (spr->rx_jmb_cons_idx < src_prod_idx)
6462 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6463 else
2c49a44d
MC
6464 cpycnt = tp->rx_jmb_ring_mask + 1 -
6465 spr->rx_jmb_cons_idx;
b196c7e4
MC
6466
6467 cpycnt = min(cpycnt,
2c49a44d 6468 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
b196c7e4
MC
6469
6470 si = spr->rx_jmb_cons_idx;
6471 di = dpr->rx_jmb_prod_idx;
6472
e92967bf 6473 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6474 if (dpr->rx_jmb_buffers[i].data) {
e92967bf 6475 cpycnt = i - di;
f89f38b8 6476 err = -ENOSPC;
e92967bf
MC
6477 break;
6478 }
6479 }
6480
6481 if (!cpycnt)
6482 break;
6483
6484 /* Ensure that updates to the rx_jmb_buffers ring and the
6485 * shadowed hardware producer ring from tg3_recycle_skb() are
6486 * ordered correctly WRT the skb check above.
6487 */
6488 smp_rmb();
6489
b196c7e4
MC
6490 memcpy(&dpr->rx_jmb_buffers[di],
6491 &spr->rx_jmb_buffers[si],
6492 cpycnt * sizeof(struct ring_info));
6493
6494 for (i = 0; i < cpycnt; i++, di++, si++) {
6495 struct tg3_rx_buffer_desc *sbd, *dbd;
6496 sbd = &spr->rx_jmb[si].std;
6497 dbd = &dpr->rx_jmb[di].std;
6498 dbd->addr_hi = sbd->addr_hi;
6499 dbd->addr_lo = sbd->addr_lo;
6500 }
6501
2c49a44d
MC
6502 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6503 tp->rx_jmb_ring_mask;
6504 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6505 tp->rx_jmb_ring_mask;
b196c7e4 6506 }
f89f38b8
MC
6507
6508 return err;
b196c7e4
MC
6509}
6510
35f2d7d0
MC
6511static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6512{
6513 struct tg3 *tp = tnapi->tp;
1da177e4
LT
6514
6515 /* run TX completion thread */
f3f3f27e 6516 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
17375d25 6517 tg3_tx(tnapi);
63c3a66f 6518 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
4fd7ab59 6519 return work_done;
1da177e4
LT
6520 }
6521
f891ea16
MC
6522 if (!tnapi->rx_rcb_prod_idx)
6523 return work_done;
6524
1da177e4
LT
6525 /* run RX thread, within the bounds set by NAPI.
6526 * All RX "locking" is done by ensuring outside
bea3348e 6527 * code synchronizes with tg3->napi.poll()
1da177e4 6528 */
8d9d7cfc 6529 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
17375d25 6530 work_done += tg3_rx(tnapi, budget - work_done);
1da177e4 6531
63c3a66f 6532 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
8fea32b9 6533 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
f89f38b8 6534 int i, err = 0;
e4af1af9
MC
6535 u32 std_prod_idx = dpr->rx_std_prod_idx;
6536 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
b196c7e4 6537
7ae52890 6538 tp->rx_refill = false;
9102426a 6539 for (i = 1; i <= tp->rxq_cnt; i++)
f89f38b8 6540 err |= tg3_rx_prodring_xfer(tp, dpr,
8fea32b9 6541 &tp->napi[i].prodring);
b196c7e4
MC
6542
6543 wmb();
6544
e4af1af9
MC
6545 if (std_prod_idx != dpr->rx_std_prod_idx)
6546 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6547 dpr->rx_std_prod_idx);
b196c7e4 6548
e4af1af9
MC
6549 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6550 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6551 dpr->rx_jmb_prod_idx);
b196c7e4
MC
6552
6553 mmiowb();
f89f38b8
MC
6554
6555 if (err)
6556 tw32_f(HOSTCC_MODE, tp->coal_now);
b196c7e4
MC
6557 }
6558
6f535763
DM
6559 return work_done;
6560}
6561
db219973
MC
6562static inline void tg3_reset_task_schedule(struct tg3 *tp)
6563{
6564 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6565 schedule_work(&tp->reset_task);
6566}
6567
6568static inline void tg3_reset_task_cancel(struct tg3 *tp)
6569{
6570 cancel_work_sync(&tp->reset_task);
6571 tg3_flag_clear(tp, RESET_TASK_PENDING);
c7101359 6572 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
db219973
MC
6573}
6574
35f2d7d0
MC
6575static int tg3_poll_msix(struct napi_struct *napi, int budget)
6576{
6577 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6578 struct tg3 *tp = tnapi->tp;
6579 int work_done = 0;
6580 struct tg3_hw_status *sblk = tnapi->hw_status;
6581
6582 while (1) {
6583 work_done = tg3_poll_work(tnapi, work_done, budget);
6584
63c3a66f 6585 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
35f2d7d0
MC
6586 goto tx_recovery;
6587
6588 if (unlikely(work_done >= budget))
6589 break;
6590
c6cdf436 6591 /* tp->last_tag is used in tg3_int_reenable() below
35f2d7d0
MC
6592 * to tell the hw how much work has been processed,
6593 * so we must read it before checking for more work.
6594 */
6595 tnapi->last_tag = sblk->status_tag;
6596 tnapi->last_irq_tag = tnapi->last_tag;
6597 rmb();
6598
6599 /* check for RX/TX work to do */
6d40db7b
MC
6600 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6601 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7ae52890
MC
6602
6603 /* This test here is not race free, but will reduce
6604 * the number of interrupts by looping again.
6605 */
6606 if (tnapi == &tp->napi[1] && tp->rx_refill)
6607 continue;
6608
35f2d7d0
MC
6609 napi_complete(napi);
6610 /* Reenable interrupts. */
6611 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7ae52890
MC
6612
6613 /* This test here is synchronized by napi_schedule()
6614 * and napi_complete() to close the race condition.
6615 */
6616 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6617 tw32(HOSTCC_MODE, tp->coalesce_mode |
6618 HOSTCC_MODE_ENABLE |
6619 tnapi->coal_now);
6620 }
35f2d7d0
MC
6621 mmiowb();
6622 break;
6623 }
6624 }
6625
6626 return work_done;
6627
6628tx_recovery:
6629 /* work_done is guaranteed to be less than budget. */
6630 napi_complete(napi);
db219973 6631 tg3_reset_task_schedule(tp);
35f2d7d0
MC
6632 return work_done;
6633}
6634
e64de4e6
MC
6635static void tg3_process_error(struct tg3 *tp)
6636{
6637 u32 val;
6638 bool real_error = false;
6639
63c3a66f 6640 if (tg3_flag(tp, ERROR_PROCESSED))
e64de4e6
MC
6641 return;
6642
6643 /* Check Flow Attention register */
6644 val = tr32(HOSTCC_FLOW_ATTN);
6645 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6646 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6647 real_error = true;
6648 }
6649
6650 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6651 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6652 real_error = true;
6653 }
6654
6655 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6656 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6657 real_error = true;
6658 }
6659
6660 if (!real_error)
6661 return;
6662
6663 tg3_dump_state(tp);
6664
63c3a66f 6665 tg3_flag_set(tp, ERROR_PROCESSED);
db219973 6666 tg3_reset_task_schedule(tp);
e64de4e6
MC
6667}
6668
6f535763
DM
6669static int tg3_poll(struct napi_struct *napi, int budget)
6670{
8ef0442f
MC
6671 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6672 struct tg3 *tp = tnapi->tp;
6f535763 6673 int work_done = 0;
898a56f8 6674 struct tg3_hw_status *sblk = tnapi->hw_status;
6f535763
DM
6675
6676 while (1) {
e64de4e6
MC
6677 if (sblk->status & SD_STATUS_ERROR)
6678 tg3_process_error(tp);
6679
35f2d7d0
MC
6680 tg3_poll_link(tp);
6681
17375d25 6682 work_done = tg3_poll_work(tnapi, work_done, budget);
6f535763 6683
63c3a66f 6684 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6f535763
DM
6685 goto tx_recovery;
6686
6687 if (unlikely(work_done >= budget))
6688 break;
6689
63c3a66f 6690 if (tg3_flag(tp, TAGGED_STATUS)) {
17375d25 6691 /* tp->last_tag is used in tg3_int_reenable() below
4fd7ab59
MC
6692 * to tell the hw how much work has been processed,
6693 * so we must read it before checking for more work.
6694 */
898a56f8
MC
6695 tnapi->last_tag = sblk->status_tag;
6696 tnapi->last_irq_tag = tnapi->last_tag;
4fd7ab59
MC
6697 rmb();
6698 } else
6699 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 6700
17375d25 6701 if (likely(!tg3_has_work(tnapi))) {
288379f0 6702 napi_complete(napi);
17375d25 6703 tg3_int_reenable(tnapi);
6f535763
DM
6704 break;
6705 }
1da177e4
LT
6706 }
6707
bea3348e 6708 return work_done;
6f535763
DM
6709
6710tx_recovery:
4fd7ab59 6711 /* work_done is guaranteed to be less than budget. */
288379f0 6712 napi_complete(napi);
db219973 6713 tg3_reset_task_schedule(tp);
4fd7ab59 6714 return work_done;
1da177e4
LT
6715}
6716
66cfd1bd
MC
6717static void tg3_napi_disable(struct tg3 *tp)
6718{
6719 int i;
6720
6721 for (i = tp->irq_cnt - 1; i >= 0; i--)
6722 napi_disable(&tp->napi[i].napi);
6723}
6724
6725static void tg3_napi_enable(struct tg3 *tp)
6726{
6727 int i;
6728
6729 for (i = 0; i < tp->irq_cnt; i++)
6730 napi_enable(&tp->napi[i].napi);
6731}
6732
6733static void tg3_napi_init(struct tg3 *tp)
6734{
6735 int i;
6736
6737 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6738 for (i = 1; i < tp->irq_cnt; i++)
6739 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6740}
6741
6742static void tg3_napi_fini(struct tg3 *tp)
6743{
6744 int i;
6745
6746 for (i = 0; i < tp->irq_cnt; i++)
6747 netif_napi_del(&tp->napi[i].napi);
6748}
6749
6750static inline void tg3_netif_stop(struct tg3 *tp)
6751{
6752 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6753 tg3_napi_disable(tp);
f4a46d1f 6754 netif_carrier_off(tp->dev);
66cfd1bd
MC
6755 netif_tx_disable(tp->dev);
6756}
6757
35763066 6758/* tp->lock must be held */
66cfd1bd
MC
6759static inline void tg3_netif_start(struct tg3 *tp)
6760{
be947307
MC
6761 tg3_ptp_resume(tp);
6762
66cfd1bd
MC
6763 /* NOTE: unconditional netif_tx_wake_all_queues is only
6764 * appropriate so long as all callers are assured to
6765 * have free tx slots (such as after tg3_init_hw)
6766 */
6767 netif_tx_wake_all_queues(tp->dev);
6768
f4a46d1f
NNS
6769 if (tp->link_up)
6770 netif_carrier_on(tp->dev);
6771
66cfd1bd
MC
6772 tg3_napi_enable(tp);
6773 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6774 tg3_enable_ints(tp);
6775}
6776
f47c11ee
DM
6777static void tg3_irq_quiesce(struct tg3 *tp)
6778{
4f125f42
MC
6779 int i;
6780
f47c11ee
DM
6781 BUG_ON(tp->irq_sync);
6782
6783 tp->irq_sync = 1;
6784 smp_mb();
6785
4f125f42
MC
6786 for (i = 0; i < tp->irq_cnt; i++)
6787 synchronize_irq(tp->napi[i].irq_vec);
f47c11ee
DM
6788}
6789
f47c11ee
DM
6790/* Fully shutdown all tg3 driver activity elsewhere in the system.
6791 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6792 * with as well. Most of the time, this is not necessary except when
6793 * shutting down the device.
6794 */
6795static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6796{
46966545 6797 spin_lock_bh(&tp->lock);
f47c11ee
DM
6798 if (irq_sync)
6799 tg3_irq_quiesce(tp);
f47c11ee
DM
6800}
6801
6802static inline void tg3_full_unlock(struct tg3 *tp)
6803{
f47c11ee
DM
6804 spin_unlock_bh(&tp->lock);
6805}
6806
fcfa0a32
MC
6807/* One-shot MSI handler - Chip automatically disables interrupt
6808 * after sending MSI so driver doesn't have to do it.
6809 */
7d12e780 6810static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32 6811{
09943a18
MC
6812 struct tg3_napi *tnapi = dev_id;
6813 struct tg3 *tp = tnapi->tp;
fcfa0a32 6814
898a56f8 6815 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6816 if (tnapi->rx_rcb)
6817 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
fcfa0a32
MC
6818
6819 if (likely(!tg3_irq_sync(tp)))
09943a18 6820 napi_schedule(&tnapi->napi);
fcfa0a32
MC
6821
6822 return IRQ_HANDLED;
6823}
6824
88b06bc2
MC
6825/* MSI ISR - No need to check for interrupt sharing and no need to
6826 * flush status block and interrupt mailbox. PCI ordering rules
6827 * guarantee that MSI will arrive after the status block.
6828 */
7d12e780 6829static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2 6830{
09943a18
MC
6831 struct tg3_napi *tnapi = dev_id;
6832 struct tg3 *tp = tnapi->tp;
88b06bc2 6833
898a56f8 6834 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6835 if (tnapi->rx_rcb)
6836 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
88b06bc2 6837 /*
fac9b83e 6838 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 6839 * chip-internal interrupt pending events.
fac9b83e 6840 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
6841 * NIC to stop sending us irqs, engaging "in-intr-handler"
6842 * event coalescing.
6843 */
5b39de91 6844 tw32_mailbox(tnapi->int_mbox, 0x00000001);
61487480 6845 if (likely(!tg3_irq_sync(tp)))
09943a18 6846 napi_schedule(&tnapi->napi);
61487480 6847
88b06bc2
MC
6848 return IRQ_RETVAL(1);
6849}
6850
7d12e780 6851static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4 6852{
09943a18
MC
6853 struct tg3_napi *tnapi = dev_id;
6854 struct tg3 *tp = tnapi->tp;
898a56f8 6855 struct tg3_hw_status *sblk = tnapi->hw_status;
1da177e4
LT
6856 unsigned int handled = 1;
6857
1da177e4
LT
6858 /* In INTx mode, it is possible for the interrupt to arrive at
6859 * the CPU before the status block posted prior to the interrupt.
6860 * Reading the PCI State register will confirm whether the
6861 * interrupt is ours and will flush the status block.
6862 */
d18edcb2 6863 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
63c3a66f 6864 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6865 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6866 handled = 0;
f47c11ee 6867 goto out;
fac9b83e 6868 }
d18edcb2
MC
6869 }
6870
6871 /*
6872 * Writing any value to intr-mbox-0 clears PCI INTA# and
6873 * chip-internal interrupt pending events.
6874 * Writing non-zero to intr-mbox-0 additional tells the
6875 * NIC to stop sending us irqs, engaging "in-intr-handler"
6876 * event coalescing.
c04cb347
MC
6877 *
6878 * Flush the mailbox to de-assert the IRQ immediately to prevent
6879 * spurious interrupts. The flush impacts performance but
6880 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6881 */
c04cb347 6882 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
6883 if (tg3_irq_sync(tp))
6884 goto out;
6885 sblk->status &= ~SD_STATUS_UPDATED;
17375d25 6886 if (likely(tg3_has_work(tnapi))) {
72334482 6887 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
09943a18 6888 napi_schedule(&tnapi->napi);
d18edcb2
MC
6889 } else {
6890 /* No work, shared interrupt perhaps? re-enable
6891 * interrupts, and flush that PCI write
6892 */
6893 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6894 0x00000000);
fac9b83e 6895 }
f47c11ee 6896out:
fac9b83e
DM
6897 return IRQ_RETVAL(handled);
6898}
6899
7d12e780 6900static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e 6901{
09943a18
MC
6902 struct tg3_napi *tnapi = dev_id;
6903 struct tg3 *tp = tnapi->tp;
898a56f8 6904 struct tg3_hw_status *sblk = tnapi->hw_status;
fac9b83e
DM
6905 unsigned int handled = 1;
6906
fac9b83e
DM
6907 /* In INTx mode, it is possible for the interrupt to arrive at
6908 * the CPU before the status block posted prior to the interrupt.
6909 * Reading the PCI State register will confirm whether the
6910 * interrupt is ours and will flush the status block.
6911 */
898a56f8 6912 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
63c3a66f 6913 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6914 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6915 handled = 0;
f47c11ee 6916 goto out;
1da177e4 6917 }
d18edcb2
MC
6918 }
6919
6920 /*
6921 * writing any value to intr-mbox-0 clears PCI INTA# and
6922 * chip-internal interrupt pending events.
6923 * writing non-zero to intr-mbox-0 additional tells the
6924 * NIC to stop sending us irqs, engaging "in-intr-handler"
6925 * event coalescing.
c04cb347
MC
6926 *
6927 * Flush the mailbox to de-assert the IRQ immediately to prevent
6928 * spurious interrupts. The flush impacts performance but
6929 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6930 */
c04cb347 6931 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
624f8e50
MC
6932
6933 /*
6934 * In a shared interrupt configuration, sometimes other devices'
6935 * interrupts will scream. We record the current status tag here
6936 * so that the above check can report that the screaming interrupts
6937 * are unhandled. Eventually they will be silenced.
6938 */
898a56f8 6939 tnapi->last_irq_tag = sblk->status_tag;
624f8e50 6940
d18edcb2
MC
6941 if (tg3_irq_sync(tp))
6942 goto out;
624f8e50 6943
72334482 6944 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
624f8e50 6945
09943a18 6946 napi_schedule(&tnapi->napi);
624f8e50 6947
f47c11ee 6948out:
1da177e4
LT
6949 return IRQ_RETVAL(handled);
6950}
6951
7938109f 6952/* ISR for interrupt test */
7d12e780 6953static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f 6954{
09943a18
MC
6955 struct tg3_napi *tnapi = dev_id;
6956 struct tg3 *tp = tnapi->tp;
898a56f8 6957 struct tg3_hw_status *sblk = tnapi->hw_status;
7938109f 6958
f9804ddb
MC
6959 if ((sblk->status & SD_STATUS_UPDATED) ||
6960 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 6961 tg3_disable_ints(tp);
7938109f
MC
6962 return IRQ_RETVAL(1);
6963 }
6964 return IRQ_RETVAL(0);
6965}
6966
1da177e4
LT
6967#ifdef CONFIG_NET_POLL_CONTROLLER
6968static void tg3_poll_controller(struct net_device *dev)
6969{
4f125f42 6970 int i;
88b06bc2
MC
6971 struct tg3 *tp = netdev_priv(dev);
6972
9c13cb8b
NNS
6973 if (tg3_irq_sync(tp))
6974 return;
6975
4f125f42 6976 for (i = 0; i < tp->irq_cnt; i++)
fe234f0e 6977 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1da177e4
LT
6978}
6979#endif
6980
1da177e4
LT
6981static void tg3_tx_timeout(struct net_device *dev)
6982{
6983 struct tg3 *tp = netdev_priv(dev);
6984
b0408751 6985 if (netif_msg_tx_err(tp)) {
05dbe005 6986 netdev_err(dev, "transmit timed out, resetting\n");
97bd8e49 6987 tg3_dump_state(tp);
b0408751 6988 }
1da177e4 6989
db219973 6990 tg3_reset_task_schedule(tp);
1da177e4
LT
6991}
6992
c58ec932
MC
6993/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6994static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6995{
6996 u32 base = (u32) mapping & 0xffffffff;
6997
807540ba 6998 return (base > 0xffffdcc0) && (base + len + 8 < base);
c58ec932
MC
6999}
7000
72f2afb8
MC
7001/* Test for DMA addresses > 40-bit */
7002static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7003 int len)
7004{
7005#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
63c3a66f 7006 if (tg3_flag(tp, 40BIT_DMA_BUG))
807540ba 7007 return ((u64) mapping + len) > DMA_BIT_MASK(40);
72f2afb8
MC
7008 return 0;
7009#else
7010 return 0;
7011#endif
7012}
7013
d1a3b737 7014static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
92cd3a17
MC
7015 dma_addr_t mapping, u32 len, u32 flags,
7016 u32 mss, u32 vlan)
2ffcc981 7017{
92cd3a17
MC
7018 txbd->addr_hi = ((u64) mapping >> 32);
7019 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7020 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7021 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
2ffcc981 7022}
1da177e4 7023
84b67b27 7024static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
d1a3b737
MC
7025 dma_addr_t map, u32 len, u32 flags,
7026 u32 mss, u32 vlan)
7027{
7028 struct tg3 *tp = tnapi->tp;
7029 bool hwbug = false;
7030
7031 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
3db1cd5c 7032 hwbug = true;
d1a3b737
MC
7033
7034 if (tg3_4g_overflow_test(map, len))
3db1cd5c 7035 hwbug = true;
d1a3b737
MC
7036
7037 if (tg3_40bit_overflow_test(tp, map, len))
3db1cd5c 7038 hwbug = true;
d1a3b737 7039
a4cb428d 7040 if (tp->dma_limit) {
b9e45482 7041 u32 prvidx = *entry;
e31aa987 7042 u32 tmp_flag = flags & ~TXD_FLAG_END;
a4cb428d
MC
7043 while (len > tp->dma_limit && *budget) {
7044 u32 frag_len = tp->dma_limit;
7045 len -= tp->dma_limit;
e31aa987 7046
b9e45482
MC
7047 /* Avoid the 8byte DMA problem */
7048 if (len <= 8) {
a4cb428d
MC
7049 len += tp->dma_limit / 2;
7050 frag_len = tp->dma_limit / 2;
e31aa987
MC
7051 }
7052
b9e45482
MC
7053 tnapi->tx_buffers[*entry].fragmented = true;
7054
7055 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7056 frag_len, tmp_flag, mss, vlan);
7057 *budget -= 1;
7058 prvidx = *entry;
7059 *entry = NEXT_TX(*entry);
7060
e31aa987
MC
7061 map += frag_len;
7062 }
7063
7064 if (len) {
7065 if (*budget) {
7066 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7067 len, flags, mss, vlan);
b9e45482 7068 *budget -= 1;
e31aa987
MC
7069 *entry = NEXT_TX(*entry);
7070 } else {
3db1cd5c 7071 hwbug = true;
b9e45482 7072 tnapi->tx_buffers[prvidx].fragmented = false;
e31aa987
MC
7073 }
7074 }
7075 } else {
84b67b27
MC
7076 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7077 len, flags, mss, vlan);
e31aa987
MC
7078 *entry = NEXT_TX(*entry);
7079 }
d1a3b737
MC
7080
7081 return hwbug;
7082}
7083
0d681b27 7084static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
432aa7ed
MC
7085{
7086 int i;
0d681b27 7087 struct sk_buff *skb;
df8944cf 7088 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
432aa7ed 7089
0d681b27
MC
7090 skb = txb->skb;
7091 txb->skb = NULL;
7092
432aa7ed
MC
7093 pci_unmap_single(tnapi->tp->pdev,
7094 dma_unmap_addr(txb, mapping),
7095 skb_headlen(skb),
7096 PCI_DMA_TODEVICE);
e01ee14d
MC
7097
7098 while (txb->fragmented) {
7099 txb->fragmented = false;
7100 entry = NEXT_TX(entry);
7101 txb = &tnapi->tx_buffers[entry];
7102 }
7103
ba1142e4 7104 for (i = 0; i <= last; i++) {
9e903e08 7105 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
432aa7ed
MC
7106
7107 entry = NEXT_TX(entry);
7108 txb = &tnapi->tx_buffers[entry];
7109
7110 pci_unmap_page(tnapi->tp->pdev,
7111 dma_unmap_addr(txb, mapping),
9e903e08 7112 skb_frag_size(frag), PCI_DMA_TODEVICE);
e01ee14d
MC
7113
7114 while (txb->fragmented) {
7115 txb->fragmented = false;
7116 entry = NEXT_TX(entry);
7117 txb = &tnapi->tx_buffers[entry];
7118 }
432aa7ed
MC
7119 }
7120}
7121
72f2afb8 7122/* Workaround 4GB and 40-bit hardware DMA bugs. */
24f4efd4 7123static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
f7ff1987 7124 struct sk_buff **pskb,
84b67b27 7125 u32 *entry, u32 *budget,
92cd3a17 7126 u32 base_flags, u32 mss, u32 vlan)
1da177e4 7127{
24f4efd4 7128 struct tg3 *tp = tnapi->tp;
f7ff1987 7129 struct sk_buff *new_skb, *skb = *pskb;
c58ec932 7130 dma_addr_t new_addr = 0;
432aa7ed 7131 int ret = 0;
1da177e4 7132
41588ba1
MC
7133 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7134 new_skb = skb_copy(skb, GFP_ATOMIC);
7135 else {
7136 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7137
7138 new_skb = skb_copy_expand(skb,
7139 skb_headroom(skb) + more_headroom,
7140 skb_tailroom(skb), GFP_ATOMIC);
7141 }
7142
1da177e4 7143 if (!new_skb) {
c58ec932
MC
7144 ret = -1;
7145 } else {
7146 /* New SKB is guaranteed to be linear. */
f4188d8a
AD
7147 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7148 PCI_DMA_TODEVICE);
7149 /* Make sure the mapping succeeded */
7150 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
f4188d8a 7151 dev_kfree_skb(new_skb);
c58ec932 7152 ret = -1;
c58ec932 7153 } else {
b9e45482
MC
7154 u32 save_entry = *entry;
7155
92cd3a17
MC
7156 base_flags |= TXD_FLAG_END;
7157
84b67b27
MC
7158 tnapi->tx_buffers[*entry].skb = new_skb;
7159 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
432aa7ed
MC
7160 mapping, new_addr);
7161
84b67b27 7162 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
d1a3b737
MC
7163 new_skb->len, base_flags,
7164 mss, vlan)) {
ba1142e4 7165 tg3_tx_skb_unmap(tnapi, save_entry, -1);
d1a3b737
MC
7166 dev_kfree_skb(new_skb);
7167 ret = -1;
7168 }
f4188d8a 7169 }
1da177e4
LT
7170 }
7171
7172 dev_kfree_skb(skb);
f7ff1987 7173 *pskb = new_skb;
c58ec932 7174 return ret;
1da177e4
LT
7175}
7176
2ffcc981 7177static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
52c0fd83
MC
7178
7179/* Use GSO to workaround a rare TSO bug that may be triggered when the
7180 * TSO header is greater than 80 bytes.
7181 */
7182static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7183{
7184 struct sk_buff *segs, *nskb;
f3f3f27e 7185 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
52c0fd83
MC
7186
7187 /* Estimate the number of fragments in the worst case */
f3f3f27e 7188 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
52c0fd83 7189 netif_stop_queue(tp->dev);
f65aac16
MC
7190
7191 /* netif_tx_stop_queue() must be done before checking
7192 * checking tx index in tg3_tx_avail() below, because in
7193 * tg3_tx(), we update tx index before checking for
7194 * netif_tx_queue_stopped().
7195 */
7196 smp_mb();
f3f3f27e 7197 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7f62ad5d
MC
7198 return NETDEV_TX_BUSY;
7199
7200 netif_wake_queue(tp->dev);
52c0fd83
MC
7201 }
7202
7203 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
801678c5 7204 if (IS_ERR(segs))
52c0fd83
MC
7205 goto tg3_tso_bug_end;
7206
7207 do {
7208 nskb = segs;
7209 segs = segs->next;
7210 nskb->next = NULL;
2ffcc981 7211 tg3_start_xmit(nskb, tp->dev);
52c0fd83
MC
7212 } while (segs);
7213
7214tg3_tso_bug_end:
7215 dev_kfree_skb(skb);
7216
7217 return NETDEV_TX_OK;
7218}
52c0fd83 7219
5a6f3074 7220/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
63c3a66f 7221 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5a6f3074 7222 */
2ffcc981 7223static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
7224{
7225 struct tg3 *tp = netdev_priv(dev);
92cd3a17 7226 u32 len, entry, base_flags, mss, vlan = 0;
84b67b27 7227 u32 budget;
432aa7ed 7228 int i = -1, would_hit_hwbug;
90079ce8 7229 dma_addr_t mapping;
24f4efd4
MC
7230 struct tg3_napi *tnapi;
7231 struct netdev_queue *txq;
432aa7ed 7232 unsigned int last;
f4188d8a 7233
24f4efd4
MC
7234 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7235 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
63c3a66f 7236 if (tg3_flag(tp, ENABLE_TSS))
24f4efd4 7237 tnapi++;
1da177e4 7238
84b67b27
MC
7239 budget = tg3_tx_avail(tnapi);
7240
00b70504 7241 /* We are running in BH disabled context with netif_tx_lock
bea3348e 7242 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
7243 * interrupt. Furthermore, IRQ processing runs lockless so we have
7244 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 7245 */
84b67b27 7246 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
24f4efd4
MC
7247 if (!netif_tx_queue_stopped(txq)) {
7248 netif_tx_stop_queue(txq);
1f064a87
SH
7249
7250 /* This is a hard error, log it. */
5129c3a3
MC
7251 netdev_err(dev,
7252 "BUG! Tx Ring full when queue awake!\n");
1f064a87 7253 }
1da177e4
LT
7254 return NETDEV_TX_BUSY;
7255 }
7256
f3f3f27e 7257 entry = tnapi->tx_prod;
1da177e4 7258 base_flags = 0;
84fa7933 7259 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 7260 base_flags |= TXD_FLAG_TCPUDP_CSUM;
24f4efd4 7261
be98da6a
MC
7262 mss = skb_shinfo(skb)->gso_size;
7263 if (mss) {
eddc9ec5 7264 struct iphdr *iph;
34195c3d 7265 u32 tcp_opt_len, hdr_len;
1da177e4
LT
7266
7267 if (skb_header_cloned(skb) &&
48855432
ED
7268 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7269 goto drop;
1da177e4 7270
34195c3d 7271 iph = ip_hdr(skb);
ab6a5bb6 7272 tcp_opt_len = tcp_optlen(skb);
1da177e4 7273
a5a11955 7274 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
34195c3d 7275
a5a11955 7276 if (!skb_is_gso_v6(skb)) {
34195c3d
MC
7277 iph->check = 0;
7278 iph->tot_len = htons(mss + hdr_len);
7279 }
7280
52c0fd83 7281 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
63c3a66f 7282 tg3_flag(tp, TSO_BUG))
de6f31eb 7283 return tg3_tso_bug(tp, skb);
52c0fd83 7284
1da177e4
LT
7285 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7286 TXD_FLAG_CPU_POST_DMA);
7287
63c3a66f
JP
7288 if (tg3_flag(tp, HW_TSO_1) ||
7289 tg3_flag(tp, HW_TSO_2) ||
7290 tg3_flag(tp, HW_TSO_3)) {
aa8223c7 7291 tcp_hdr(skb)->check = 0;
1da177e4 7292 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
7293 } else
7294 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7295 iph->daddr, 0,
7296 IPPROTO_TCP,
7297 0);
1da177e4 7298
63c3a66f 7299 if (tg3_flag(tp, HW_TSO_3)) {
615774fe
MC
7300 mss |= (hdr_len & 0xc) << 12;
7301 if (hdr_len & 0x10)
7302 base_flags |= 0x00000010;
7303 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 7304 } else if (tg3_flag(tp, HW_TSO_2))
92c6b8d1 7305 mss |= hdr_len << 9;
63c3a66f 7306 else if (tg3_flag(tp, HW_TSO_1) ||
92c6b8d1 7307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
eddc9ec5 7308 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
7309 int tsflags;
7310
eddc9ec5 7311 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
7312 mss |= (tsflags << 11);
7313 }
7314 } else {
eddc9ec5 7315 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
7316 int tsflags;
7317
eddc9ec5 7318 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
7319 base_flags |= tsflags << 12;
7320 }
7321 }
7322 }
bf933c80 7323
93a700a9
MC
7324 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7325 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7326 base_flags |= TXD_FLAG_JMB_PKT;
7327
92cd3a17
MC
7328 if (vlan_tx_tag_present(skb)) {
7329 base_flags |= TXD_FLAG_VLAN;
7330 vlan = vlan_tx_tag_get(skb);
7331 }
1da177e4 7332
fb4ce8ad
MC
7333 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7334 tg3_flag(tp, TX_TSTAMP_EN)) {
7335 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7336 base_flags |= TXD_FLAG_HWTSTAMP;
7337 }
7338
f4188d8a
AD
7339 len = skb_headlen(skb);
7340
7341 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
48855432
ED
7342 if (pci_dma_mapping_error(tp->pdev, mapping))
7343 goto drop;
7344
90079ce8 7345
f3f3f27e 7346 tnapi->tx_buffers[entry].skb = skb;
4e5e4f0d 7347 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
1da177e4
LT
7348
7349 would_hit_hwbug = 0;
7350
63c3a66f 7351 if (tg3_flag(tp, 5701_DMA_BUG))
c58ec932 7352 would_hit_hwbug = 1;
1da177e4 7353
84b67b27 7354 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
d1a3b737 7355 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
ba1142e4 7356 mss, vlan)) {
d1a3b737 7357 would_hit_hwbug = 1;
ba1142e4 7358 } else if (skb_shinfo(skb)->nr_frags > 0) {
92cd3a17
MC
7359 u32 tmp_mss = mss;
7360
7361 if (!tg3_flag(tp, HW_TSO_1) &&
7362 !tg3_flag(tp, HW_TSO_2) &&
7363 !tg3_flag(tp, HW_TSO_3))
7364 tmp_mss = 0;
7365
c5665a53
MC
7366 /* Now loop through additional data
7367 * fragments, and queue them.
7368 */
1da177e4
LT
7369 last = skb_shinfo(skb)->nr_frags - 1;
7370 for (i = 0; i <= last; i++) {
7371 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7372
9e903e08 7373 len = skb_frag_size(frag);
dc234d0b 7374 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
5d6bcdfe 7375 len, DMA_TO_DEVICE);
1da177e4 7376
f3f3f27e 7377 tnapi->tx_buffers[entry].skb = NULL;
4e5e4f0d 7378 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
f4188d8a 7379 mapping);
5d6bcdfe 7380 if (dma_mapping_error(&tp->pdev->dev, mapping))
f4188d8a 7381 goto dma_error;
1da177e4 7382
b9e45482
MC
7383 if (!budget ||
7384 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
84b67b27
MC
7385 len, base_flags |
7386 ((i == last) ? TXD_FLAG_END : 0),
b9e45482 7387 tmp_mss, vlan)) {
72f2afb8 7388 would_hit_hwbug = 1;
b9e45482
MC
7389 break;
7390 }
1da177e4
LT
7391 }
7392 }
7393
7394 if (would_hit_hwbug) {
0d681b27 7395 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
1da177e4
LT
7396
7397 /* If the workaround fails due to memory/mapping
7398 * failure, silently drop this packet.
7399 */
84b67b27
MC
7400 entry = tnapi->tx_prod;
7401 budget = tg3_tx_avail(tnapi);
f7ff1987 7402 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
84b67b27 7403 base_flags, mss, vlan))
48855432 7404 goto drop_nofree;
1da177e4
LT
7405 }
7406
d515b450 7407 skb_tx_timestamp(skb);
5cb917bc 7408 netdev_tx_sent_queue(txq, skb->len);
d515b450 7409
6541b806
MC
7410 /* Sync BD data before updating mailbox */
7411 wmb();
7412
1da177e4 7413 /* Packets are ready, update Tx producer idx local and on card. */
24f4efd4 7414 tw32_tx_mbox(tnapi->prodmbox, entry);
1da177e4 7415
f3f3f27e
MC
7416 tnapi->tx_prod = entry;
7417 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
24f4efd4 7418 netif_tx_stop_queue(txq);
f65aac16
MC
7419
7420 /* netif_tx_stop_queue() must be done before checking
7421 * checking tx index in tg3_tx_avail() below, because in
7422 * tg3_tx(), we update tx index before checking for
7423 * netif_tx_queue_stopped().
7424 */
7425 smp_mb();
f3f3f27e 7426 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
24f4efd4 7427 netif_tx_wake_queue(txq);
51b91468 7428 }
1da177e4 7429
cdd0db05 7430 mmiowb();
1da177e4 7431 return NETDEV_TX_OK;
f4188d8a
AD
7432
7433dma_error:
ba1142e4 7434 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
432aa7ed 7435 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
48855432
ED
7436drop:
7437 dev_kfree_skb(skb);
7438drop_nofree:
7439 tp->tx_dropped++;
f4188d8a 7440 return NETDEV_TX_OK;
1da177e4
LT
7441}
7442
6e01b20b
MC
7443static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7444{
7445 if (enable) {
7446 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7447 MAC_MODE_PORT_MODE_MASK);
7448
7449 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7450
7451 if (!tg3_flag(tp, 5705_PLUS))
7452 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7453
7454 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7455 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7456 else
7457 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7458 } else {
7459 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7460
7461 if (tg3_flag(tp, 5705_PLUS) ||
7462 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7464 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7465 }
7466
7467 tw32(MAC_MODE, tp->mac_mode);
7468 udelay(40);
7469}
7470
941ec90f 7471static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
5e5a7f37 7472{
941ec90f 7473 u32 val, bmcr, mac_mode, ptest = 0;
5e5a7f37
MC
7474
7475 tg3_phy_toggle_apd(tp, false);
7476 tg3_phy_toggle_automdix(tp, 0);
7477
941ec90f
MC
7478 if (extlpbk && tg3_phy_set_extloopbk(tp))
7479 return -EIO;
7480
7481 bmcr = BMCR_FULLDPLX;
5e5a7f37
MC
7482 switch (speed) {
7483 case SPEED_10:
7484 break;
7485 case SPEED_100:
7486 bmcr |= BMCR_SPEED100;
7487 break;
7488 case SPEED_1000:
7489 default:
7490 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7491 speed = SPEED_100;
7492 bmcr |= BMCR_SPEED100;
7493 } else {
7494 speed = SPEED_1000;
7495 bmcr |= BMCR_SPEED1000;
7496 }
7497 }
7498
941ec90f
MC
7499 if (extlpbk) {
7500 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7501 tg3_readphy(tp, MII_CTRL1000, &val);
7502 val |= CTL1000_AS_MASTER |
7503 CTL1000_ENABLE_MASTER;
7504 tg3_writephy(tp, MII_CTRL1000, val);
7505 } else {
7506 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7507 MII_TG3_FET_PTEST_TRIM_2;
7508 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7509 }
7510 } else
7511 bmcr |= BMCR_LOOPBACK;
7512
5e5a7f37
MC
7513 tg3_writephy(tp, MII_BMCR, bmcr);
7514
7515 /* The write needs to be flushed for the FETs */
7516 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7517 tg3_readphy(tp, MII_BMCR, &bmcr);
7518
7519 udelay(40);
7520
7521 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
941ec90f 7523 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
5e5a7f37
MC
7524 MII_TG3_FET_PTEST_FRC_TX_LINK |
7525 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7526
7527 /* The write needs to be flushed for the AC131 */
7528 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7529 }
7530
7531 /* Reset to prevent losing 1st rx packet intermittently */
7532 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7533 tg3_flag(tp, 5780_CLASS)) {
7534 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7535 udelay(10);
7536 tw32_f(MAC_RX_MODE, tp->rx_mode);
7537 }
7538
7539 mac_mode = tp->mac_mode &
7540 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7541 if (speed == SPEED_1000)
7542 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7543 else
7544 mac_mode |= MAC_MODE_PORT_MODE_MII;
7545
7546 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7547 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7548
7549 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7550 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7551 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7552 mac_mode |= MAC_MODE_LINK_POLARITY;
7553
7554 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7555 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7556 }
7557
7558 tw32(MAC_MODE, mac_mode);
7559 udelay(40);
941ec90f
MC
7560
7561 return 0;
5e5a7f37
MC
7562}
7563
c8f44aff 7564static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
06c03c02
MB
7565{
7566 struct tg3 *tp = netdev_priv(dev);
7567
7568 if (features & NETIF_F_LOOPBACK) {
7569 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7570 return;
7571
06c03c02 7572 spin_lock_bh(&tp->lock);
6e01b20b 7573 tg3_mac_loopback(tp, true);
06c03c02
MB
7574 netif_carrier_on(tp->dev);
7575 spin_unlock_bh(&tp->lock);
7576 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7577 } else {
7578 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7579 return;
7580
06c03c02 7581 spin_lock_bh(&tp->lock);
6e01b20b 7582 tg3_mac_loopback(tp, false);
06c03c02
MB
7583 /* Force link status check */
7584 tg3_setup_phy(tp, 1);
7585 spin_unlock_bh(&tp->lock);
7586 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7587 }
7588}
7589
c8f44aff
MM
7590static netdev_features_t tg3_fix_features(struct net_device *dev,
7591 netdev_features_t features)
dc668910
MM
7592{
7593 struct tg3 *tp = netdev_priv(dev);
7594
63c3a66f 7595 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
dc668910
MM
7596 features &= ~NETIF_F_ALL_TSO;
7597
7598 return features;
7599}
7600
c8f44aff 7601static int tg3_set_features(struct net_device *dev, netdev_features_t features)
06c03c02 7602{
c8f44aff 7603 netdev_features_t changed = dev->features ^ features;
06c03c02
MB
7604
7605 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7606 tg3_set_loopback(dev, features);
7607
7608 return 0;
7609}
7610
21f581a5
MC
7611static void tg3_rx_prodring_free(struct tg3 *tp,
7612 struct tg3_rx_prodring_set *tpr)
1da177e4 7613{
1da177e4
LT
7614 int i;
7615
8fea32b9 7616 if (tpr != &tp->napi[0].prodring) {
b196c7e4 7617 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
2c49a44d 7618 i = (i + 1) & tp->rx_std_ring_mask)
9205fd9c 7619 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
b196c7e4
MC
7620 tp->rx_pkt_map_sz);
7621
63c3a66f 7622 if (tg3_flag(tp, JUMBO_CAPABLE)) {
b196c7e4
MC
7623 for (i = tpr->rx_jmb_cons_idx;
7624 i != tpr->rx_jmb_prod_idx;
2c49a44d 7625 i = (i + 1) & tp->rx_jmb_ring_mask) {
9205fd9c 7626 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
b196c7e4
MC
7627 TG3_RX_JMB_MAP_SZ);
7628 }
7629 }
7630
2b2cdb65 7631 return;
b196c7e4 7632 }
1da177e4 7633
2c49a44d 7634 for (i = 0; i <= tp->rx_std_ring_mask; i++)
9205fd9c 7635 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
2b2cdb65 7636 tp->rx_pkt_map_sz);
1da177e4 7637
63c3a66f 7638 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7639 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
9205fd9c 7640 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
2b2cdb65 7641 TG3_RX_JMB_MAP_SZ);
1da177e4
LT
7642 }
7643}
7644
c6cdf436 7645/* Initialize rx rings for packet processing.
1da177e4
LT
7646 *
7647 * The chip has been shut down and the driver detached from
7648 * the networking, so no interrupts or new tx packets will
7649 * end up in the driver. tp->{tx,}lock are held and thus
7650 * we may not sleep.
7651 */
21f581a5
MC
7652static int tg3_rx_prodring_alloc(struct tg3 *tp,
7653 struct tg3_rx_prodring_set *tpr)
1da177e4 7654{
287be12e 7655 u32 i, rx_pkt_dma_sz;
1da177e4 7656
b196c7e4
MC
7657 tpr->rx_std_cons_idx = 0;
7658 tpr->rx_std_prod_idx = 0;
7659 tpr->rx_jmb_cons_idx = 0;
7660 tpr->rx_jmb_prod_idx = 0;
7661
8fea32b9 7662 if (tpr != &tp->napi[0].prodring) {
2c49a44d
MC
7663 memset(&tpr->rx_std_buffers[0], 0,
7664 TG3_RX_STD_BUFF_RING_SIZE(tp));
48035728 7665 if (tpr->rx_jmb_buffers)
2b2cdb65 7666 memset(&tpr->rx_jmb_buffers[0], 0,
2c49a44d 7667 TG3_RX_JMB_BUFF_RING_SIZE(tp));
2b2cdb65
MC
7668 goto done;
7669 }
7670
1da177e4 7671 /* Zero out all descriptors. */
2c49a44d 7672 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
1da177e4 7673
287be12e 7674 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
63c3a66f 7675 if (tg3_flag(tp, 5780_CLASS) &&
287be12e
MC
7676 tp->dev->mtu > ETH_DATA_LEN)
7677 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7678 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7e72aad4 7679
1da177e4
LT
7680 /* Initialize invariants of the rings, we only set this
7681 * stuff once. This works because the card does not
7682 * write into the rx buffer posting rings.
7683 */
2c49a44d 7684 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
1da177e4
LT
7685 struct tg3_rx_buffer_desc *rxd;
7686
21f581a5 7687 rxd = &tpr->rx_std[i];
287be12e 7688 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
1da177e4
LT
7689 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7690 rxd->opaque = (RXD_OPAQUE_RING_STD |
7691 (i << RXD_OPAQUE_INDEX_SHIFT));
7692 }
7693
1da177e4
LT
7694 /* Now allocate fresh SKBs for each rx ring. */
7695 for (i = 0; i < tp->rx_pending; i++) {
8d4057a9
ED
7696 unsigned int frag_size;
7697
7698 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7699 &frag_size) < 0) {
5129c3a3
MC
7700 netdev_warn(tp->dev,
7701 "Using a smaller RX standard ring. Only "
7702 "%d out of %d buffers were allocated "
7703 "successfully\n", i, tp->rx_pending);
32d8c572 7704 if (i == 0)
cf7a7298 7705 goto initfail;
32d8c572 7706 tp->rx_pending = i;
1da177e4 7707 break;
32d8c572 7708 }
1da177e4
LT
7709 }
7710
63c3a66f 7711 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
cf7a7298
MC
7712 goto done;
7713
2c49a44d 7714 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
cf7a7298 7715
63c3a66f 7716 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
0d86df80 7717 goto done;
cf7a7298 7718
2c49a44d 7719 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
0d86df80
MC
7720 struct tg3_rx_buffer_desc *rxd;
7721
7722 rxd = &tpr->rx_jmb[i].std;
7723 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7724 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7725 RXD_FLAG_JUMBO;
7726 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7727 (i << RXD_OPAQUE_INDEX_SHIFT));
7728 }
7729
7730 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8d4057a9
ED
7731 unsigned int frag_size;
7732
7733 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7734 &frag_size) < 0) {
5129c3a3
MC
7735 netdev_warn(tp->dev,
7736 "Using a smaller RX jumbo ring. Only %d "
7737 "out of %d buffers were allocated "
7738 "successfully\n", i, tp->rx_jumbo_pending);
0d86df80
MC
7739 if (i == 0)
7740 goto initfail;
7741 tp->rx_jumbo_pending = i;
7742 break;
1da177e4
LT
7743 }
7744 }
cf7a7298
MC
7745
7746done:
32d8c572 7747 return 0;
cf7a7298
MC
7748
7749initfail:
21f581a5 7750 tg3_rx_prodring_free(tp, tpr);
cf7a7298 7751 return -ENOMEM;
1da177e4
LT
7752}
7753
21f581a5
MC
7754static void tg3_rx_prodring_fini(struct tg3 *tp,
7755 struct tg3_rx_prodring_set *tpr)
1da177e4 7756{
21f581a5
MC
7757 kfree(tpr->rx_std_buffers);
7758 tpr->rx_std_buffers = NULL;
7759 kfree(tpr->rx_jmb_buffers);
7760 tpr->rx_jmb_buffers = NULL;
7761 if (tpr->rx_std) {
4bae65c8
MC
7762 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7763 tpr->rx_std, tpr->rx_std_mapping);
21f581a5 7764 tpr->rx_std = NULL;
1da177e4 7765 }
21f581a5 7766 if (tpr->rx_jmb) {
4bae65c8
MC
7767 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7768 tpr->rx_jmb, tpr->rx_jmb_mapping);
21f581a5 7769 tpr->rx_jmb = NULL;
1da177e4 7770 }
cf7a7298
MC
7771}
7772
21f581a5
MC
7773static int tg3_rx_prodring_init(struct tg3 *tp,
7774 struct tg3_rx_prodring_set *tpr)
cf7a7298 7775{
2c49a44d
MC
7776 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7777 GFP_KERNEL);
21f581a5 7778 if (!tpr->rx_std_buffers)
cf7a7298
MC
7779 return -ENOMEM;
7780
4bae65c8
MC
7781 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7782 TG3_RX_STD_RING_BYTES(tp),
7783 &tpr->rx_std_mapping,
7784 GFP_KERNEL);
21f581a5 7785 if (!tpr->rx_std)
cf7a7298
MC
7786 goto err_out;
7787
63c3a66f 7788 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7789 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
21f581a5
MC
7790 GFP_KERNEL);
7791 if (!tpr->rx_jmb_buffers)
cf7a7298
MC
7792 goto err_out;
7793
4bae65c8
MC
7794 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7795 TG3_RX_JMB_RING_BYTES(tp),
7796 &tpr->rx_jmb_mapping,
7797 GFP_KERNEL);
21f581a5 7798 if (!tpr->rx_jmb)
cf7a7298
MC
7799 goto err_out;
7800 }
7801
7802 return 0;
7803
7804err_out:
21f581a5 7805 tg3_rx_prodring_fini(tp, tpr);
cf7a7298
MC
7806 return -ENOMEM;
7807}
7808
7809/* Free up pending packets in all rx/tx rings.
7810 *
7811 * The chip has been shut down and the driver detached from
7812 * the networking, so no interrupts or new tx packets will
7813 * end up in the driver. tp->{tx,}lock is not held and we are not
7814 * in an interrupt context and thus may sleep.
7815 */
7816static void tg3_free_rings(struct tg3 *tp)
7817{
f77a6a8e 7818 int i, j;
cf7a7298 7819
f77a6a8e
MC
7820 for (j = 0; j < tp->irq_cnt; j++) {
7821 struct tg3_napi *tnapi = &tp->napi[j];
cf7a7298 7822
8fea32b9 7823 tg3_rx_prodring_free(tp, &tnapi->prodring);
b28f6428 7824
0c1d0e2b
MC
7825 if (!tnapi->tx_buffers)
7826 continue;
7827
0d681b27
MC
7828 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7829 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
cf7a7298 7830
0d681b27 7831 if (!skb)
f77a6a8e 7832 continue;
cf7a7298 7833
ba1142e4
MC
7834 tg3_tx_skb_unmap(tnapi, i,
7835 skb_shinfo(skb)->nr_frags - 1);
f77a6a8e
MC
7836
7837 dev_kfree_skb_any(skb);
7838 }
5cb917bc 7839 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
2b2cdb65 7840 }
cf7a7298
MC
7841}
7842
7843/* Initialize tx/rx rings for packet processing.
7844 *
7845 * The chip has been shut down and the driver detached from
7846 * the networking, so no interrupts or new tx packets will
7847 * end up in the driver. tp->{tx,}lock are held and thus
7848 * we may not sleep.
7849 */
7850static int tg3_init_rings(struct tg3 *tp)
7851{
f77a6a8e 7852 int i;
72334482 7853
cf7a7298
MC
7854 /* Free up all the SKBs. */
7855 tg3_free_rings(tp);
7856
f77a6a8e
MC
7857 for (i = 0; i < tp->irq_cnt; i++) {
7858 struct tg3_napi *tnapi = &tp->napi[i];
7859
7860 tnapi->last_tag = 0;
7861 tnapi->last_irq_tag = 0;
7862 tnapi->hw_status->status = 0;
7863 tnapi->hw_status->status_tag = 0;
7864 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
cf7a7298 7865
f77a6a8e
MC
7866 tnapi->tx_prod = 0;
7867 tnapi->tx_cons = 0;
0c1d0e2b
MC
7868 if (tnapi->tx_ring)
7869 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
f77a6a8e
MC
7870
7871 tnapi->rx_rcb_ptr = 0;
0c1d0e2b
MC
7872 if (tnapi->rx_rcb)
7873 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
2b2cdb65 7874
8fea32b9 7875 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
e4af1af9 7876 tg3_free_rings(tp);
2b2cdb65 7877 return -ENOMEM;
e4af1af9 7878 }
f77a6a8e 7879 }
72334482 7880
2b2cdb65 7881 return 0;
cf7a7298
MC
7882}
7883
49a359e3 7884static void tg3_mem_tx_release(struct tg3 *tp)
cf7a7298 7885{
f77a6a8e 7886 int i;
898a56f8 7887
49a359e3 7888 for (i = 0; i < tp->irq_max; i++) {
f77a6a8e
MC
7889 struct tg3_napi *tnapi = &tp->napi[i];
7890
7891 if (tnapi->tx_ring) {
4bae65c8 7892 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
f77a6a8e
MC
7893 tnapi->tx_ring, tnapi->tx_desc_mapping);
7894 tnapi->tx_ring = NULL;
7895 }
7896
7897 kfree(tnapi->tx_buffers);
7898 tnapi->tx_buffers = NULL;
49a359e3
MC
7899 }
7900}
f77a6a8e 7901
49a359e3
MC
7902static int tg3_mem_tx_acquire(struct tg3 *tp)
7903{
7904 int i;
7905 struct tg3_napi *tnapi = &tp->napi[0];
7906
7907 /* If multivector TSS is enabled, vector 0 does not handle
7908 * tx interrupts. Don't allocate any resources for it.
7909 */
7910 if (tg3_flag(tp, ENABLE_TSS))
7911 tnapi++;
7912
7913 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7914 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7915 TG3_TX_RING_SIZE, GFP_KERNEL);
7916 if (!tnapi->tx_buffers)
7917 goto err_out;
7918
7919 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7920 TG3_TX_RING_BYTES,
7921 &tnapi->tx_desc_mapping,
7922 GFP_KERNEL);
7923 if (!tnapi->tx_ring)
7924 goto err_out;
7925 }
7926
7927 return 0;
7928
7929err_out:
7930 tg3_mem_tx_release(tp);
7931 return -ENOMEM;
7932}
7933
7934static void tg3_mem_rx_release(struct tg3 *tp)
7935{
7936 int i;
7937
7938 for (i = 0; i < tp->irq_max; i++) {
7939 struct tg3_napi *tnapi = &tp->napi[i];
f77a6a8e 7940
8fea32b9
MC
7941 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7942
49a359e3
MC
7943 if (!tnapi->rx_rcb)
7944 continue;
7945
7946 dma_free_coherent(&tp->pdev->dev,
7947 TG3_RX_RCB_RING_BYTES(tp),
7948 tnapi->rx_rcb,
7949 tnapi->rx_rcb_mapping);
7950 tnapi->rx_rcb = NULL;
7951 }
7952}
7953
7954static int tg3_mem_rx_acquire(struct tg3 *tp)
7955{
7956 unsigned int i, limit;
7957
7958 limit = tp->rxq_cnt;
7959
7960 /* If RSS is enabled, we need a (dummy) producer ring
7961 * set on vector zero. This is the true hw prodring.
7962 */
7963 if (tg3_flag(tp, ENABLE_RSS))
7964 limit++;
7965
7966 for (i = 0; i < limit; i++) {
7967 struct tg3_napi *tnapi = &tp->napi[i];
7968
7969 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7970 goto err_out;
7971
7972 /* If multivector RSS is enabled, vector 0
7973 * does not handle rx or tx interrupts.
7974 * Don't allocate any resources for it.
7975 */
7976 if (!i && tg3_flag(tp, ENABLE_RSS))
7977 continue;
7978
7979 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7980 TG3_RX_RCB_RING_BYTES(tp),
7981 &tnapi->rx_rcb_mapping,
7982 GFP_KERNEL);
7983 if (!tnapi->rx_rcb)
7984 goto err_out;
7985
7986 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7987 }
7988
7989 return 0;
7990
7991err_out:
7992 tg3_mem_rx_release(tp);
7993 return -ENOMEM;
7994}
7995
7996/*
7997 * Must not be invoked with interrupt sources disabled and
7998 * the hardware shutdown down.
7999 */
8000static void tg3_free_consistent(struct tg3 *tp)
8001{
8002 int i;
8003
8004 for (i = 0; i < tp->irq_cnt; i++) {
8005 struct tg3_napi *tnapi = &tp->napi[i];
8006
f77a6a8e 8007 if (tnapi->hw_status) {
4bae65c8
MC
8008 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8009 tnapi->hw_status,
8010 tnapi->status_mapping);
f77a6a8e
MC
8011 tnapi->hw_status = NULL;
8012 }
1da177e4 8013 }
f77a6a8e 8014
49a359e3
MC
8015 tg3_mem_rx_release(tp);
8016 tg3_mem_tx_release(tp);
8017
1da177e4 8018 if (tp->hw_stats) {
4bae65c8
MC
8019 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8020 tp->hw_stats, tp->stats_mapping);
1da177e4
LT
8021 tp->hw_stats = NULL;
8022 }
8023}
8024
8025/*
8026 * Must not be invoked with interrupt sources disabled and
8027 * the hardware shutdown down. Can sleep.
8028 */
8029static int tg3_alloc_consistent(struct tg3 *tp)
8030{
f77a6a8e 8031 int i;
898a56f8 8032
4bae65c8
MC
8033 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8034 sizeof(struct tg3_hw_stats),
8035 &tp->stats_mapping,
8036 GFP_KERNEL);
f77a6a8e 8037 if (!tp->hw_stats)
1da177e4
LT
8038 goto err_out;
8039
f77a6a8e 8040 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
1da177e4 8041
f77a6a8e
MC
8042 for (i = 0; i < tp->irq_cnt; i++) {
8043 struct tg3_napi *tnapi = &tp->napi[i];
8d9d7cfc 8044 struct tg3_hw_status *sblk;
1da177e4 8045
4bae65c8
MC
8046 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8047 TG3_HW_STATUS_SIZE,
8048 &tnapi->status_mapping,
8049 GFP_KERNEL);
f77a6a8e
MC
8050 if (!tnapi->hw_status)
8051 goto err_out;
898a56f8 8052
f77a6a8e 8053 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8d9d7cfc
MC
8054 sblk = tnapi->hw_status;
8055
49a359e3 8056 if (tg3_flag(tp, ENABLE_RSS)) {
86449944 8057 u16 *prodptr = NULL;
8fea32b9 8058
49a359e3
MC
8059 /*
8060 * When RSS is enabled, the status block format changes
8061 * slightly. The "rx_jumbo_consumer", "reserved",
8062 * and "rx_mini_consumer" members get mapped to the
8063 * other three rx return ring producer indexes.
8064 */
8065 switch (i) {
8066 case 1:
8067 prodptr = &sblk->idx[0].rx_producer;
8068 break;
8069 case 2:
8070 prodptr = &sblk->rx_jumbo_consumer;
8071 break;
8072 case 3:
8073 prodptr = &sblk->reserved;
8074 break;
8075 case 4:
8076 prodptr = &sblk->rx_mini_consumer;
f891ea16
MC
8077 break;
8078 }
49a359e3
MC
8079 tnapi->rx_rcb_prod_idx = prodptr;
8080 } else {
8d9d7cfc 8081 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8d9d7cfc 8082 }
f77a6a8e 8083 }
1da177e4 8084
49a359e3
MC
8085 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8086 goto err_out;
8087
1da177e4
LT
8088 return 0;
8089
8090err_out:
8091 tg3_free_consistent(tp);
8092 return -ENOMEM;
8093}
8094
8095#define MAX_WAIT_CNT 1000
8096
8097/* To stop a block, clear the enable bit and poll till it
8098 * clears. tp->lock is held.
8099 */
b3b7d6be 8100static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
8101{
8102 unsigned int i;
8103 u32 val;
8104
63c3a66f 8105 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
8106 switch (ofs) {
8107 case RCVLSC_MODE:
8108 case DMAC_MODE:
8109 case MBFREE_MODE:
8110 case BUFMGR_MODE:
8111 case MEMARB_MODE:
8112 /* We can't enable/disable these bits of the
8113 * 5705/5750, just say success.
8114 */
8115 return 0;
8116
8117 default:
8118 break;
855e1111 8119 }
1da177e4
LT
8120 }
8121
8122 val = tr32(ofs);
8123 val &= ~enable_bit;
8124 tw32_f(ofs, val);
8125
8126 for (i = 0; i < MAX_WAIT_CNT; i++) {
8127 udelay(100);
8128 val = tr32(ofs);
8129 if ((val & enable_bit) == 0)
8130 break;
8131 }
8132
b3b7d6be 8133 if (i == MAX_WAIT_CNT && !silent) {
2445e461
MC
8134 dev_err(&tp->pdev->dev,
8135 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8136 ofs, enable_bit);
1da177e4
LT
8137 return -ENODEV;
8138 }
8139
8140 return 0;
8141}
8142
8143/* tp->lock is held. */
b3b7d6be 8144static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
8145{
8146 int i, err;
8147
8148 tg3_disable_ints(tp);
8149
8150 tp->rx_mode &= ~RX_MODE_ENABLE;
8151 tw32_f(MAC_RX_MODE, tp->rx_mode);
8152 udelay(10);
8153
b3b7d6be
DM
8154 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8155 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8156 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8157 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8158 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8159 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8160
8161 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8162 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8163 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8164 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8165 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8166 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8167 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
8168
8169 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8170 tw32_f(MAC_MODE, tp->mac_mode);
8171 udelay(40);
8172
8173 tp->tx_mode &= ~TX_MODE_ENABLE;
8174 tw32_f(MAC_TX_MODE, tp->tx_mode);
8175
8176 for (i = 0; i < MAX_WAIT_CNT; i++) {
8177 udelay(100);
8178 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8179 break;
8180 }
8181 if (i >= MAX_WAIT_CNT) {
ab96b241
MC
8182 dev_err(&tp->pdev->dev,
8183 "%s timed out, TX_MODE_ENABLE will not clear "
8184 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
e6de8ad1 8185 err |= -ENODEV;
1da177e4
LT
8186 }
8187
e6de8ad1 8188 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
8189 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8190 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
8191
8192 tw32(FTQ_RESET, 0xffffffff);
8193 tw32(FTQ_RESET, 0x00000000);
8194
b3b7d6be
DM
8195 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8196 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4 8197
f77a6a8e
MC
8198 for (i = 0; i < tp->irq_cnt; i++) {
8199 struct tg3_napi *tnapi = &tp->napi[i];
8200 if (tnapi->hw_status)
8201 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8202 }
1da177e4 8203
1da177e4
LT
8204 return err;
8205}
8206
ee6a99b5
MC
8207/* Save PCI command register before chip reset */
8208static void tg3_save_pci_state(struct tg3 *tp)
8209{
8a6eac90 8210 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
8211}
8212
8213/* Restore PCI state after chip reset */
8214static void tg3_restore_pci_state(struct tg3 *tp)
8215{
8216 u32 val;
8217
8218 /* Re-enable indirect register accesses. */
8219 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8220 tp->misc_host_ctrl);
8221
8222 /* Set MAX PCI retry to zero. */
8223 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8224 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 8225 tg3_flag(tp, PCIX_MODE))
ee6a99b5 8226 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9 8227 /* Allow reads and writes to the APE register and memory space. */
63c3a66f 8228 if (tg3_flag(tp, ENABLE_APE))
0d3031d9 8229 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
8230 PCISTATE_ALLOW_APE_SHMEM_WR |
8231 PCISTATE_ALLOW_APE_PSPACE_WR;
ee6a99b5
MC
8232 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8233
8a6eac90 8234 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 8235
2c55a3d0
MC
8236 if (!tg3_flag(tp, PCI_EXPRESS)) {
8237 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8238 tp->pci_cacheline_sz);
8239 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8240 tp->pci_lat_timer);
114342f2 8241 }
5f5c51e3 8242
ee6a99b5 8243 /* Make sure PCI-X relaxed ordering bit is clear. */
63c3a66f 8244 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
8245 u16 pcix_cmd;
8246
8247 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8248 &pcix_cmd);
8249 pcix_cmd &= ~PCI_X_CMD_ERO;
8250 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8251 pcix_cmd);
8252 }
ee6a99b5 8253
63c3a66f 8254 if (tg3_flag(tp, 5780_CLASS)) {
ee6a99b5
MC
8255
8256 /* Chip reset on 5780 will reset MSI enable bit,
8257 * so need to restore it.
8258 */
63c3a66f 8259 if (tg3_flag(tp, USING_MSI)) {
ee6a99b5
MC
8260 u16 ctrl;
8261
8262 pci_read_config_word(tp->pdev,
8263 tp->msi_cap + PCI_MSI_FLAGS,
8264 &ctrl);
8265 pci_write_config_word(tp->pdev,
8266 tp->msi_cap + PCI_MSI_FLAGS,
8267 ctrl | PCI_MSI_FLAGS_ENABLE);
8268 val = tr32(MSGINT_MODE);
8269 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8270 }
8271 }
8272}
8273
1da177e4
LT
8274/* tp->lock is held. */
8275static int tg3_chip_reset(struct tg3 *tp)
8276{
8277 u32 val;
1ee582d8 8278 void (*write_op)(struct tg3 *, u32, u32);
4f125f42 8279 int i, err;
1da177e4 8280
f49639e6
DM
8281 tg3_nvram_lock(tp);
8282
77b483f1
MC
8283 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8284
f49639e6
DM
8285 /* No matching tg3_nvram_unlock() after this because
8286 * chip reset below will undo the nvram lock.
8287 */
8288 tp->nvram_lock_cnt = 0;
1da177e4 8289
ee6a99b5
MC
8290 /* GRC_MISC_CFG core clock reset will clear the memory
8291 * enable bit in PCI register 4 and the MSI enable bit
8292 * on some chips, so we save relevant registers here.
8293 */
8294 tg3_save_pci_state(tp);
8295
d9ab5ad1 8296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
63c3a66f 8297 tg3_flag(tp, 5755_PLUS))
d9ab5ad1
MC
8298 tw32(GRC_FASTBOOT_PC, 0);
8299
1da177e4
LT
8300 /*
8301 * We must avoid the readl() that normally takes place.
8302 * It locks machines, causes machine checks, and other
8303 * fun things. So, temporarily disable the 5701
8304 * hardware workaround, while we do the reset.
8305 */
1ee582d8
MC
8306 write_op = tp->write32;
8307 if (write_op == tg3_write_flush_reg32)
8308 tp->write32 = tg3_write32;
1da177e4 8309
d18edcb2
MC
8310 /* Prevent the irq handler from reading or writing PCI registers
8311 * during chip reset when the memory enable bit in the PCI command
8312 * register may be cleared. The chip does not generate interrupt
8313 * at this time, but the irq handler may still be called due to irq
8314 * sharing or irqpoll.
8315 */
63c3a66f 8316 tg3_flag_set(tp, CHIP_RESETTING);
f77a6a8e
MC
8317 for (i = 0; i < tp->irq_cnt; i++) {
8318 struct tg3_napi *tnapi = &tp->napi[i];
8319 if (tnapi->hw_status) {
8320 tnapi->hw_status->status = 0;
8321 tnapi->hw_status->status_tag = 0;
8322 }
8323 tnapi->last_tag = 0;
8324 tnapi->last_irq_tag = 0;
b8fa2f3a 8325 }
d18edcb2 8326 smp_mb();
4f125f42
MC
8327
8328 for (i = 0; i < tp->irq_cnt; i++)
8329 synchronize_irq(tp->napi[i].irq_vec);
d18edcb2 8330
255ca311
MC
8331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8332 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8333 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8334 }
8335
1da177e4
LT
8336 /* do the reset */
8337 val = GRC_MISC_CFG_CORECLK_RESET;
8338
63c3a66f 8339 if (tg3_flag(tp, PCI_EXPRESS)) {
88075d91
MC
8340 /* Force PCIe 1.0a mode */
8341 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 8342 !tg3_flag(tp, 57765_PLUS) &&
88075d91
MC
8343 tr32(TG3_PCIE_PHY_TSTCTL) ==
8344 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8345 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8346
1da177e4
LT
8347 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8348 tw32(GRC_MISC_CFG, (1 << 29));
8349 val |= (1 << 29);
8350 }
8351 }
8352
b5d3772c
MC
8353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8354 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8355 tw32(GRC_VCPU_EXT_CTRL,
8356 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8357 }
8358
f37500d3 8359 /* Manage gphy power for all CPMU absent PCIe devices. */
63c3a66f 8360 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1da177e4 8361 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
f37500d3 8362
1da177e4
LT
8363 tw32(GRC_MISC_CFG, val);
8364
1ee582d8
MC
8365 /* restore 5701 hardware bug workaround write method */
8366 tp->write32 = write_op;
1da177e4
LT
8367
8368 /* Unfortunately, we have to delay before the PCI read back.
8369 * Some 575X chips even will not respond to a PCI cfg access
8370 * when the reset command is given to the chip.
8371 *
8372 * How do these hardware designers expect things to work
8373 * properly if the PCI write is posted for a long period
8374 * of time? It is always necessary to have some method by
8375 * which a register read back can occur to push the write
8376 * out which does the reset.
8377 *
8378 * For most tg3 variants the trick below was working.
8379 * Ho hum...
8380 */
8381 udelay(120);
8382
8383 /* Flush PCI posted writes. The normal MMIO registers
8384 * are inaccessible at this time so this is the only
8385 * way to make this reliably (actually, this is no longer
8386 * the case, see above). I tried to use indirect
8387 * register read/write but this upset some 5701 variants.
8388 */
8389 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8390
8391 udelay(120);
8392
0f49bfbd 8393 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
e7126997
MC
8394 u16 val16;
8395
1da177e4 8396 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
86449944 8397 int j;
1da177e4
LT
8398 u32 cfg_val;
8399
8400 /* Wait for link training to complete. */
86449944 8401 for (j = 0; j < 5000; j++)
1da177e4
LT
8402 udelay(100);
8403
8404 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8405 pci_write_config_dword(tp->pdev, 0xc4,
8406 cfg_val | (1 << 15));
8407 }
5e7dfd0f 8408
e7126997 8409 /* Clear the "no snoop" and "relaxed ordering" bits. */
0f49bfbd 8410 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
e7126997
MC
8411 /*
8412 * Older PCIe devices only support the 128 byte
8413 * MPS setting. Enforce the restriction.
5e7dfd0f 8414 */
63c3a66f 8415 if (!tg3_flag(tp, CPMU_PRESENT))
0f49bfbd
JL
8416 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8417 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
5e7dfd0f 8418
5e7dfd0f 8419 /* Clear error status */
0f49bfbd 8420 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
5e7dfd0f
MC
8421 PCI_EXP_DEVSTA_CED |
8422 PCI_EXP_DEVSTA_NFED |
8423 PCI_EXP_DEVSTA_FED |
8424 PCI_EXP_DEVSTA_URD);
1da177e4
LT
8425 }
8426
ee6a99b5 8427 tg3_restore_pci_state(tp);
1da177e4 8428
63c3a66f
JP
8429 tg3_flag_clear(tp, CHIP_RESETTING);
8430 tg3_flag_clear(tp, ERROR_PROCESSED);
d18edcb2 8431
ee6a99b5 8432 val = 0;
63c3a66f 8433 if (tg3_flag(tp, 5780_CLASS))
4cf78e4f 8434 val = tr32(MEMARB_MODE);
ee6a99b5 8435 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
8436
8437 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8438 tg3_stop_fw(tp);
8439 tw32(0x5000, 0x400);
8440 }
8441
8442 tw32(GRC_MODE, tp->grc_mode);
8443
8444 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 8445 val = tr32(0xc4);
1da177e4
LT
8446
8447 tw32(0xc4, val | (1 << 15));
8448 }
8449
8450 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8452 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8453 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8454 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8455 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8456 }
8457
f07e9af3 8458 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9e975cc2 8459 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
d2394e6b 8460 val = tp->mac_mode;
f07e9af3 8461 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9e975cc2 8462 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
d2394e6b 8463 val = tp->mac_mode;
1da177e4 8464 } else
d2394e6b
MC
8465 val = 0;
8466
8467 tw32_f(MAC_MODE, val);
1da177e4
LT
8468 udelay(40);
8469
77b483f1
MC
8470 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8471
7a6f4369
MC
8472 err = tg3_poll_fw(tp);
8473 if (err)
8474 return err;
1da177e4 8475
0a9140cf
MC
8476 tg3_mdio_start(tp);
8477
63c3a66f 8478 if (tg3_flag(tp, PCI_EXPRESS) &&
f6eb9b1f
MC
8479 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8480 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 8481 !tg3_flag(tp, 57765_PLUS)) {
ab0049b4 8482 val = tr32(0x7c00);
1da177e4
LT
8483
8484 tw32(0x7c00, val | (1 << 25));
8485 }
8486
d78b59f5
MC
8487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8488 val = tr32(TG3_CPMU_CLCK_ORIDE);
8489 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8490 }
8491
1da177e4 8492 /* Reprobe ASF enable state. */
63c3a66f
JP
8493 tg3_flag_clear(tp, ENABLE_ASF);
8494 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8495 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8496 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8497 u32 nic_cfg;
8498
8499 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8500 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f 8501 tg3_flag_set(tp, ENABLE_ASF);
4ba526ce 8502 tp->last_event_jiffies = jiffies;
63c3a66f
JP
8503 if (tg3_flag(tp, 5750_PLUS))
8504 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8505 }
8506 }
8507
8508 return 0;
8509}
8510
65ec698d
MC
8511static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8512static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
92feeabf 8513
1da177e4 8514/* tp->lock is held. */
944d980e 8515static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
8516{
8517 int err;
8518
8519 tg3_stop_fw(tp);
8520
944d980e 8521 tg3_write_sig_pre_reset(tp, kind);
1da177e4 8522
b3b7d6be 8523 tg3_abort_hw(tp, silent);
1da177e4
LT
8524 err = tg3_chip_reset(tp);
8525
daba2a63
MC
8526 __tg3_set_mac_addr(tp, 0);
8527
944d980e
MC
8528 tg3_write_sig_legacy(tp, kind);
8529 tg3_write_sig_post_reset(tp, kind);
1da177e4 8530
92feeabf
MC
8531 if (tp->hw_stats) {
8532 /* Save the stats across chip resets... */
b4017c53 8533 tg3_get_nstats(tp, &tp->net_stats_prev);
92feeabf
MC
8534 tg3_get_estats(tp, &tp->estats_prev);
8535
8536 /* And make sure the next sample is new data */
8537 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8538 }
8539
1da177e4
LT
8540 if (err)
8541 return err;
8542
8543 return 0;
8544}
8545
1da177e4
LT
8546static int tg3_set_mac_addr(struct net_device *dev, void *p)
8547{
8548 struct tg3 *tp = netdev_priv(dev);
8549 struct sockaddr *addr = p;
986e0aeb 8550 int err = 0, skip_mac_1 = 0;
1da177e4 8551
f9804ddb 8552 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 8553 return -EADDRNOTAVAIL;
f9804ddb 8554
1da177e4
LT
8555 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8556
e75f7c90
MC
8557 if (!netif_running(dev))
8558 return 0;
8559
63c3a66f 8560 if (tg3_flag(tp, ENABLE_ASF)) {
986e0aeb 8561 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 8562
986e0aeb
MC
8563 addr0_high = tr32(MAC_ADDR_0_HIGH);
8564 addr0_low = tr32(MAC_ADDR_0_LOW);
8565 addr1_high = tr32(MAC_ADDR_1_HIGH);
8566 addr1_low = tr32(MAC_ADDR_1_LOW);
8567
8568 /* Skip MAC addr 1 if ASF is using it. */
8569 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8570 !(addr1_high == 0 && addr1_low == 0))
8571 skip_mac_1 = 1;
58712ef9 8572 }
986e0aeb
MC
8573 spin_lock_bh(&tp->lock);
8574 __tg3_set_mac_addr(tp, skip_mac_1);
8575 spin_unlock_bh(&tp->lock);
1da177e4 8576
b9ec6c1b 8577 return err;
1da177e4
LT
8578}
8579
8580/* tp->lock is held. */
8581static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8582 dma_addr_t mapping, u32 maxlen_flags,
8583 u32 nic_addr)
8584{
8585 tg3_write_mem(tp,
8586 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8587 ((u64) mapping >> 32));
8588 tg3_write_mem(tp,
8589 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8590 ((u64) mapping & 0xffffffff));
8591 tg3_write_mem(tp,
8592 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8593 maxlen_flags);
8594
63c3a66f 8595 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8596 tg3_write_mem(tp,
8597 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8598 nic_addr);
8599}
8600
a489b6d9
MC
8601
8602static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d 8603{
a489b6d9 8604 int i = 0;
b6080e12 8605
63c3a66f 8606 if (!tg3_flag(tp, ENABLE_TSS)) {
b6080e12
MC
8607 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8608 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8609 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
b6080e12
MC
8610 } else {
8611 tw32(HOSTCC_TXCOL_TICKS, 0);
8612 tw32(HOSTCC_TXMAX_FRAMES, 0);
8613 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
a489b6d9
MC
8614
8615 for (; i < tp->txq_cnt; i++) {
8616 u32 reg;
8617
8618 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8619 tw32(reg, ec->tx_coalesce_usecs);
8620 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8621 tw32(reg, ec->tx_max_coalesced_frames);
8622 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8623 tw32(reg, ec->tx_max_coalesced_frames_irq);
8624 }
19cfaecc 8625 }
b6080e12 8626
a489b6d9
MC
8627 for (; i < tp->irq_max - 1; i++) {
8628 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8629 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8630 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8631 }
8632}
8633
8634static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8635{
8636 int i = 0;
8637 u32 limit = tp->rxq_cnt;
8638
63c3a66f 8639 if (!tg3_flag(tp, ENABLE_RSS)) {
19cfaecc
MC
8640 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8641 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8642 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
a489b6d9 8643 limit--;
19cfaecc 8644 } else {
b6080e12
MC
8645 tw32(HOSTCC_RXCOL_TICKS, 0);
8646 tw32(HOSTCC_RXMAX_FRAMES, 0);
8647 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
15f9850d 8648 }
b6080e12 8649
a489b6d9 8650 for (; i < limit; i++) {
b6080e12
MC
8651 u32 reg;
8652
8653 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8654 tw32(reg, ec->rx_coalesce_usecs);
b6080e12
MC
8655 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8656 tw32(reg, ec->rx_max_coalesced_frames);
b6080e12
MC
8657 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8658 tw32(reg, ec->rx_max_coalesced_frames_irq);
b6080e12
MC
8659 }
8660
8661 for (; i < tp->irq_max - 1; i++) {
8662 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
b6080e12 8663 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
b6080e12 8664 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
a489b6d9
MC
8665 }
8666}
19cfaecc 8667
a489b6d9
MC
8668static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8669{
8670 tg3_coal_tx_init(tp, ec);
8671 tg3_coal_rx_init(tp, ec);
8672
8673 if (!tg3_flag(tp, 5705_PLUS)) {
8674 u32 val = ec->stats_block_coalesce_usecs;
8675
8676 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8677 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8678
f4a46d1f 8679 if (!tp->link_up)
a489b6d9
MC
8680 val = 0;
8681
8682 tw32(HOSTCC_STAT_COAL_TICKS, val);
b6080e12 8683 }
15f9850d 8684}
1da177e4 8685
2d31ecaf
MC
8686/* tp->lock is held. */
8687static void tg3_rings_reset(struct tg3 *tp)
8688{
8689 int i;
f77a6a8e 8690 u32 stblk, txrcb, rxrcb, limit;
2d31ecaf
MC
8691 struct tg3_napi *tnapi = &tp->napi[0];
8692
8693 /* Disable all transmit rings but the first. */
63c3a66f 8694 if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8695 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
63c3a66f 8696 else if (tg3_flag(tp, 5717_PLUS))
3d37728b 8697 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
c65a17f4
MC
8698 else if (tg3_flag(tp, 57765_CLASS) ||
8699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
b703df6f 8700 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
2d31ecaf
MC
8701 else
8702 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8703
8704 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8705 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8706 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8707 BDINFO_FLAGS_DISABLED);
8708
8709
8710 /* Disable all receive return rings but the first. */
63c3a66f 8711 if (tg3_flag(tp, 5717_PLUS))
f6eb9b1f 8712 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
63c3a66f 8713 else if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8714 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
b703df6f 8715 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
c65a17f4 8716 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
55086ad9 8717 tg3_flag(tp, 57765_CLASS))
2d31ecaf
MC
8718 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8719 else
8720 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8721
8722 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8723 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8724 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8725 BDINFO_FLAGS_DISABLED);
8726
8727 /* Disable interrupts */
8728 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
0e6cf6a9
MC
8729 tp->napi[0].chk_msi_cnt = 0;
8730 tp->napi[0].last_rx_cons = 0;
8731 tp->napi[0].last_tx_cons = 0;
2d31ecaf
MC
8732
8733 /* Zero mailbox registers. */
63c3a66f 8734 if (tg3_flag(tp, SUPPORT_MSIX)) {
6fd45cb8 8735 for (i = 1; i < tp->irq_max; i++) {
f77a6a8e
MC
8736 tp->napi[i].tx_prod = 0;
8737 tp->napi[i].tx_cons = 0;
63c3a66f 8738 if (tg3_flag(tp, ENABLE_TSS))
c2353a32 8739 tw32_mailbox(tp->napi[i].prodmbox, 0);
f77a6a8e
MC
8740 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8741 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7f230735 8742 tp->napi[i].chk_msi_cnt = 0;
0e6cf6a9
MC
8743 tp->napi[i].last_rx_cons = 0;
8744 tp->napi[i].last_tx_cons = 0;
f77a6a8e 8745 }
63c3a66f 8746 if (!tg3_flag(tp, ENABLE_TSS))
c2353a32 8747 tw32_mailbox(tp->napi[0].prodmbox, 0);
f77a6a8e
MC
8748 } else {
8749 tp->napi[0].tx_prod = 0;
8750 tp->napi[0].tx_cons = 0;
8751 tw32_mailbox(tp->napi[0].prodmbox, 0);
8752 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8753 }
2d31ecaf
MC
8754
8755 /* Make sure the NIC-based send BD rings are disabled. */
63c3a66f 8756 if (!tg3_flag(tp, 5705_PLUS)) {
2d31ecaf
MC
8757 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8758 for (i = 0; i < 16; i++)
8759 tw32_tx_mbox(mbox + i * 8, 0);
8760 }
8761
8762 txrcb = NIC_SRAM_SEND_RCB;
8763 rxrcb = NIC_SRAM_RCV_RET_RCB;
8764
8765 /* Clear status block in ram. */
8766 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8767
8768 /* Set status block DMA address */
8769 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8770 ((u64) tnapi->status_mapping >> 32));
8771 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8772 ((u64) tnapi->status_mapping & 0xffffffff));
8773
f77a6a8e
MC
8774 if (tnapi->tx_ring) {
8775 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8776 (TG3_TX_RING_SIZE <<
8777 BDINFO_FLAGS_MAXLEN_SHIFT),
8778 NIC_SRAM_TX_BUFFER_DESC);
8779 txrcb += TG3_BDINFO_SIZE;
8780 }
8781
8782 if (tnapi->rx_rcb) {
8783 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2
MC
8784 (tp->rx_ret_ring_mask + 1) <<
8785 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
f77a6a8e
MC
8786 rxrcb += TG3_BDINFO_SIZE;
8787 }
8788
8789 stblk = HOSTCC_STATBLCK_RING1;
2d31ecaf 8790
f77a6a8e
MC
8791 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8792 u64 mapping = (u64)tnapi->status_mapping;
8793 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8794 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8795
8796 /* Clear status block in ram. */
8797 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8798
19cfaecc
MC
8799 if (tnapi->tx_ring) {
8800 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8801 (TG3_TX_RING_SIZE <<
8802 BDINFO_FLAGS_MAXLEN_SHIFT),
8803 NIC_SRAM_TX_BUFFER_DESC);
8804 txrcb += TG3_BDINFO_SIZE;
8805 }
f77a6a8e
MC
8806
8807 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2 8808 ((tp->rx_ret_ring_mask + 1) <<
f77a6a8e
MC
8809 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8810
8811 stblk += 8;
f77a6a8e
MC
8812 rxrcb += TG3_BDINFO_SIZE;
8813 }
2d31ecaf
MC
8814}
8815
eb07a940
MC
8816static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8817{
8818 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8819
63c3a66f
JP
8820 if (!tg3_flag(tp, 5750_PLUS) ||
8821 tg3_flag(tp, 5780_CLASS) ||
eb07a940 8822 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
513aa6ea
MC
8823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8824 tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8825 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8826 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8827 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8828 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8829 else
8830 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8831
8832 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8833 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8834
8835 val = min(nic_rep_thresh, host_rep_thresh);
8836 tw32(RCVBDI_STD_THRESH, val);
8837
63c3a66f 8838 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8839 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8840
63c3a66f 8841 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
eb07a940
MC
8842 return;
8843
513aa6ea 8844 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
eb07a940
MC
8845
8846 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8847
8848 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8849 tw32(RCVBDI_JUMBO_THRESH, val);
8850
63c3a66f 8851 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8852 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8853}
8854
ccd5ba9d
MC
8855static inline u32 calc_crc(unsigned char *buf, int len)
8856{
8857 u32 reg;
8858 u32 tmp;
8859 int j, k;
8860
8861 reg = 0xffffffff;
8862
8863 for (j = 0; j < len; j++) {
8864 reg ^= buf[j];
8865
8866 for (k = 0; k < 8; k++) {
8867 tmp = reg & 0x01;
8868
8869 reg >>= 1;
8870
8871 if (tmp)
8872 reg ^= 0xedb88320;
8873 }
8874 }
8875
8876 return ~reg;
8877}
8878
8879static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8880{
8881 /* accept or reject all multicast frames */
8882 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8883 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8884 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8885 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8886}
8887
8888static void __tg3_set_rx_mode(struct net_device *dev)
8889{
8890 struct tg3 *tp = netdev_priv(dev);
8891 u32 rx_mode;
8892
8893 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8894 RX_MODE_KEEP_VLAN_TAG);
8895
8896#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8897 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8898 * flag clear.
8899 */
8900 if (!tg3_flag(tp, ENABLE_ASF))
8901 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8902#endif
8903
8904 if (dev->flags & IFF_PROMISC) {
8905 /* Promiscuous mode. */
8906 rx_mode |= RX_MODE_PROMISC;
8907 } else if (dev->flags & IFF_ALLMULTI) {
8908 /* Accept all multicast. */
8909 tg3_set_multi(tp, 1);
8910 } else if (netdev_mc_empty(dev)) {
8911 /* Reject all multicast. */
8912 tg3_set_multi(tp, 0);
8913 } else {
8914 /* Accept one or more multicast(s). */
8915 struct netdev_hw_addr *ha;
8916 u32 mc_filter[4] = { 0, };
8917 u32 regidx;
8918 u32 bit;
8919 u32 crc;
8920
8921 netdev_for_each_mc_addr(ha, dev) {
8922 crc = calc_crc(ha->addr, ETH_ALEN);
8923 bit = ~crc & 0x7f;
8924 regidx = (bit & 0x60) >> 5;
8925 bit &= 0x1f;
8926 mc_filter[regidx] |= (1 << bit);
8927 }
8928
8929 tw32(MAC_HASH_REG_0, mc_filter[0]);
8930 tw32(MAC_HASH_REG_1, mc_filter[1]);
8931 tw32(MAC_HASH_REG_2, mc_filter[2]);
8932 tw32(MAC_HASH_REG_3, mc_filter[3]);
8933 }
8934
8935 if (rx_mode != tp->rx_mode) {
8936 tp->rx_mode = rx_mode;
8937 tw32_f(MAC_RX_MODE, rx_mode);
8938 udelay(10);
8939 }
8940}
8941
9102426a 8942static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
90415477
MC
8943{
8944 int i;
8945
8946 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9102426a 8947 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
90415477
MC
8948}
8949
8950static void tg3_rss_check_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8951{
8952 int i;
8953
8954 if (!tg3_flag(tp, SUPPORT_MSIX))
8955 return;
8956
0b3ba055 8957 if (tp->rxq_cnt == 1) {
bcebcc46 8958 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
90415477
MC
8959 return;
8960 }
8961
8962 /* Validate table against current IRQ count */
8963 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
0b3ba055 8964 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
90415477
MC
8965 break;
8966 }
8967
8968 if (i != TG3_RSS_INDIR_TBL_SIZE)
9102426a 8969 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
bcebcc46
MC
8970}
8971
90415477 8972static void tg3_rss_write_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8973{
8974 int i = 0;
8975 u32 reg = MAC_RSS_INDIR_TBL_0;
8976
8977 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8978 u32 val = tp->rss_ind_tbl[i];
8979 i++;
8980 for (; i % 8; i++) {
8981 val <<= 4;
8982 val |= tp->rss_ind_tbl[i];
8983 }
8984 tw32(reg, val);
8985 reg += 4;
8986 }
8987}
8988
1da177e4 8989/* tp->lock is held. */
8e7a22e3 8990static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
8991{
8992 u32 val, rdmac_mode;
8993 int i, err, limit;
8fea32b9 8994 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
1da177e4
LT
8995
8996 tg3_disable_ints(tp);
8997
8998 tg3_stop_fw(tp);
8999
9000 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9001
63c3a66f 9002 if (tg3_flag(tp, INIT_COMPLETE))
e6de8ad1 9003 tg3_abort_hw(tp, 1);
1da177e4 9004
699c0193
MC
9005 /* Enable MAC control of LPI */
9006 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
c65a17f4
MC
9007 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9008 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9009 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9010 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9011
9012 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
699c0193
MC
9013
9014 tw32_f(TG3_CPMU_EEE_CTRL,
9015 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9016
a386b901
MC
9017 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9018 TG3_CPMU_EEEMD_LPI_IN_TX |
9019 TG3_CPMU_EEEMD_LPI_IN_RX |
9020 TG3_CPMU_EEEMD_EEE_ENABLE;
9021
9022 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9023 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9024
63c3a66f 9025 if (tg3_flag(tp, ENABLE_APE))
a386b901
MC
9026 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9027
9028 tw32_f(TG3_CPMU_EEE_MODE, val);
9029
9030 tw32_f(TG3_CPMU_EEE_DBTMR1,
9031 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9032 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9033
9034 tw32_f(TG3_CPMU_EEE_DBTMR2,
d7f2ab20 9035 TG3_CPMU_DBTMR2_APE_TX_2047US |
a386b901 9036 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
699c0193
MC
9037 }
9038
603f1173 9039 if (reset_phy)
d4d2c558
MC
9040 tg3_phy_reset(tp);
9041
1da177e4
LT
9042 err = tg3_chip_reset(tp);
9043 if (err)
9044 return err;
9045
9046 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9047
bcb37f6c 9048 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
d30cdd28
MC
9049 val = tr32(TG3_CPMU_CTRL);
9050 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9051 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
9052
9053 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9054 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9055 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9056 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9057
9058 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9059 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9060 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9061 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9062
9063 val = tr32(TG3_CPMU_HST_ACC);
9064 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9065 val |= CPMU_HST_ACC_MACCLK_6_25;
9066 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
9067 }
9068
33466d93
MC
9069 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9070 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9071 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9072 PCIE_PWR_MGMT_L1_THRESH_4MS;
9073 tw32(PCIE_PWR_MGMT_THRESH, val);
521e6b90
MC
9074
9075 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9076 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9077
9078 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
33466d93 9079
f40386c8
MC
9080 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9081 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
255ca311
MC
9082 }
9083
63c3a66f 9084 if (tg3_flag(tp, L1PLLPD_EN)) {
614b0590
MC
9085 u32 grc_mode = tr32(GRC_MODE);
9086
9087 /* Access the lower 1K of PL PCIE block registers. */
9088 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9089 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9090
9091 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9092 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9093 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9094
9095 tw32(GRC_MODE, grc_mode);
9096 }
9097
55086ad9 9098 if (tg3_flag(tp, 57765_CLASS)) {
5093eedc
MC
9099 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9100 u32 grc_mode = tr32(GRC_MODE);
cea46462 9101
5093eedc
MC
9102 /* Access the lower 1K of PL PCIE block registers. */
9103 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9104 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
cea46462 9105
5093eedc
MC
9106 val = tr32(TG3_PCIE_TLDLPL_PORT +
9107 TG3_PCIE_PL_LO_PHYCTL5);
9108 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9109 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
cea46462 9110
5093eedc
MC
9111 tw32(GRC_MODE, grc_mode);
9112 }
a977dbe8 9113
1ff30a59
MC
9114 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9115 u32 grc_mode = tr32(GRC_MODE);
9116
9117 /* Access the lower 1K of DL PCIE block registers. */
9118 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9119 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9120
9121 val = tr32(TG3_PCIE_TLDLPL_PORT +
9122 TG3_PCIE_DL_LO_FTSMAX);
9123 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9124 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9125 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9126
9127 tw32(GRC_MODE, grc_mode);
9128 }
9129
a977dbe8
MC
9130 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9131 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9132 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9133 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
cea46462
MC
9134 }
9135
1da177e4
LT
9136 /* This works around an issue with Athlon chipsets on
9137 * B3 tigon3 silicon. This bit has no effect on any
9138 * other revision. But do not set this on PCI Express
795d01c5 9139 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 9140 */
63c3a66f
JP
9141 if (!tg3_flag(tp, CPMU_PRESENT)) {
9142 if (!tg3_flag(tp, PCI_EXPRESS))
795d01c5
MC
9143 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9144 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9145 }
1da177e4
LT
9146
9147 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 9148 tg3_flag(tp, PCIX_MODE)) {
1da177e4
LT
9149 val = tr32(TG3PCI_PCISTATE);
9150 val |= PCISTATE_RETRY_SAME_DMA;
9151 tw32(TG3PCI_PCISTATE, val);
9152 }
9153
63c3a66f 9154 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
9155 /* Allow reads and writes to the
9156 * APE register and memory space.
9157 */
9158 val = tr32(TG3PCI_PCISTATE);
9159 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
9160 PCISTATE_ALLOW_APE_SHMEM_WR |
9161 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
9162 tw32(TG3PCI_PCISTATE, val);
9163 }
9164
1da177e4
LT
9165 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9166 /* Enable some hw fixes. */
9167 val = tr32(TG3PCI_MSI_DATA);
9168 val |= (1 << 26) | (1 << 28) | (1 << 29);
9169 tw32(TG3PCI_MSI_DATA, val);
9170 }
9171
9172 /* Descriptor ring init may make accesses to the
9173 * NIC SRAM area to setup the TX descriptors, so we
9174 * can only do this after the hardware has been
9175 * successfully reset.
9176 */
32d8c572
MC
9177 err = tg3_init_rings(tp);
9178 if (err)
9179 return err;
1da177e4 9180
63c3a66f 9181 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
9182 val = tr32(TG3PCI_DMA_RW_CTRL) &
9183 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
1a319025
MC
9184 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9185 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
55086ad9 9186 if (!tg3_flag(tp, 57765_CLASS) &&
c65a17f4
MC
9187 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9188 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
0aebff48 9189 val |= DMA_RWCTRL_TAGGED_STAT_WA;
cbf9ca6c
MC
9190 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9191 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9192 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
9193 /* This value is determined during the probe time DMA
9194 * engine test, tg3_test_dma.
9195 */
9196 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9197 }
1da177e4
LT
9198
9199 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9200 GRC_MODE_4X_NIC_SEND_RINGS |
9201 GRC_MODE_NO_TX_PHDR_CSUM |
9202 GRC_MODE_NO_RX_PHDR_CSUM);
9203 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
9204
9205 /* Pseudo-header checksum is done by hardware logic and not
9206 * the offload processers, so make the chip do the pseudo-
9207 * header checksums on receive. For transmit it is more
9208 * convenient to do the pseudo-header checksum in software
9209 * as Linux does that on transmit for us in all cases.
9210 */
9211 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4 9212
fb4ce8ad
MC
9213 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9214 if (tp->rxptpctl)
9215 tw32(TG3_RX_PTP_CTL,
9216 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9217
9218 if (tg3_flag(tp, PTP_CAPABLE))
9219 val |= GRC_MODE_TIME_SYNC_ENABLE;
9220
9221 tw32(GRC_MODE, tp->grc_mode | val);
1da177e4
LT
9222
9223 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9224 val = tr32(GRC_MISC_CFG);
9225 val &= ~0xff;
9226 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9227 tw32(GRC_MISC_CFG, val);
9228
9229 /* Initialize MBUF/DESC pool. */
63c3a66f 9230 if (tg3_flag(tp, 5750_PLUS)) {
1da177e4
LT
9231 /* Do nothing. */
9232 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9233 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9234 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9235 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9236 else
9237 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9238 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9239 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
63c3a66f 9240 } else if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9241 int fw_len;
9242
077f849d 9243 fw_len = tp->fw_len;
1da177e4
LT
9244 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9245 tw32(BUFMGR_MB_POOL_ADDR,
9246 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9247 tw32(BUFMGR_MB_POOL_SIZE,
9248 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9249 }
1da177e4 9250
0f893dc6 9251 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
9252 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9253 tp->bufmgr_config.mbuf_read_dma_low_water);
9254 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9255 tp->bufmgr_config.mbuf_mac_rx_low_water);
9256 tw32(BUFMGR_MB_HIGH_WATER,
9257 tp->bufmgr_config.mbuf_high_water);
9258 } else {
9259 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9260 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9261 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9262 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9263 tw32(BUFMGR_MB_HIGH_WATER,
9264 tp->bufmgr_config.mbuf_high_water_jumbo);
9265 }
9266 tw32(BUFMGR_DMA_LOW_WATER,
9267 tp->bufmgr_config.dma_low_water);
9268 tw32(BUFMGR_DMA_HIGH_WATER,
9269 tp->bufmgr_config.dma_high_water);
9270
d309a46e
MC
9271 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9273 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
4d958473
MC
9274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9275 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9276 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9277 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
d309a46e 9278 tw32(BUFMGR_MODE, val);
1da177e4
LT
9279 for (i = 0; i < 2000; i++) {
9280 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9281 break;
9282 udelay(10);
9283 }
9284 if (i >= 2000) {
05dbe005 9285 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
1da177e4
LT
9286 return -ENODEV;
9287 }
9288
eb07a940
MC
9289 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9290 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
b5d3772c 9291
eb07a940 9292 tg3_setup_rxbd_thresholds(tp);
1da177e4
LT
9293
9294 /* Initialize TG3_BDINFO's at:
9295 * RCVDBDI_STD_BD: standard eth size rx ring
9296 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9297 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9298 *
9299 * like so:
9300 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9301 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9302 * ring attribute flags
9303 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9304 *
9305 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9306 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9307 *
9308 * The size of each ring is fixed in the firmware, but the location is
9309 * configurable.
9310 */
9311 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 9312 ((u64) tpr->rx_std_mapping >> 32));
1da177e4 9313 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 9314 ((u64) tpr->rx_std_mapping & 0xffffffff));
63c3a66f 9315 if (!tg3_flag(tp, 5717_PLUS))
87668d35
MC
9316 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9317 NIC_SRAM_RX_BUFFER_DESC);
1da177e4 9318
fdb72b38 9319 /* Disable the mini ring */
63c3a66f 9320 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
9321 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9322 BDINFO_FLAGS_DISABLED);
9323
fdb72b38
MC
9324 /* Program the jumbo buffer descriptor ring control
9325 * blocks on those devices that have them.
9326 */
a0512944 9327 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
63c3a66f 9328 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
1da177e4 9329
63c3a66f 9330 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
1da177e4 9331 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 9332 ((u64) tpr->rx_jmb_mapping >> 32));
1da177e4 9333 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 9334 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
de9f5230
MC
9335 val = TG3_RX_JMB_RING_SIZE(tp) <<
9336 BDINFO_FLAGS_MAXLEN_SHIFT;
1da177e4 9337 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
de9f5230 9338 val | BDINFO_FLAGS_USE_EXT_RECV);
63c3a66f 9339 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
c65a17f4
MC
9340 tg3_flag(tp, 57765_CLASS) ||
9341 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
87668d35
MC
9342 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9343 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
1da177e4
LT
9344 } else {
9345 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9346 BDINFO_FLAGS_DISABLED);
9347 }
9348
63c3a66f 9349 if (tg3_flag(tp, 57765_PLUS)) {
fa6b2aae 9350 val = TG3_RX_STD_RING_SIZE(tp);
7cb32cf2
MC
9351 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9352 val |= (TG3_RX_STD_DMA_SZ << 2);
9353 } else
04380d40 9354 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38 9355 } else
de9f5230 9356 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38
MC
9357
9358 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
1da177e4 9359
411da640 9360 tpr->rx_std_prod_idx = tp->rx_pending;
66711e66 9361 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
1da177e4 9362
63c3a66f
JP
9363 tpr->rx_jmb_prod_idx =
9364 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
66711e66 9365 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
1da177e4 9366
2d31ecaf
MC
9367 tg3_rings_reset(tp);
9368
1da177e4 9369 /* Initialize MAC address and backoff seed. */
986e0aeb 9370 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
9371
9372 /* MTU + ethernet header + FCS + optional VLAN tag */
f7b493e0
MC
9373 tw32(MAC_RX_MTU_SIZE,
9374 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
1da177e4
LT
9375
9376 /* The slot time is changed by tg3_setup_phy if we
9377 * run at gigabit with half duplex.
9378 */
f2096f94
MC
9379 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9380 (6 << TX_LENGTHS_IPG_SHIFT) |
9381 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9382
c65a17f4
MC
9383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
f2096f94
MC
9385 val |= tr32(MAC_TX_LENGTHS) &
9386 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9387 TX_LENGTHS_CNT_DWN_VAL_MSK);
9388
9389 tw32(MAC_TX_LENGTHS, val);
1da177e4
LT
9390
9391 /* Receive rules. */
9392 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9393 tw32(RCVLPC_CONFIG, 0x0181);
9394
9395 /* Calculate RDMAC_MODE setting early, we need it to determine
9396 * the RCVLPC_STATE_ENABLE mask.
9397 */
9398 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9399 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9400 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9401 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9402 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 9403
deabaac8 9404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
0339e4e3
MC
9405 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9406
57e6983c 9407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0
MC
9408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
d30cdd28
MC
9410 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9411 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9412 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9413
c5908939
MC
9414 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9415 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 9416 if (tg3_flag(tp, TSO_CAPABLE) &&
c13e3713 9417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
9418 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9419 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 9420 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
9421 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9422 }
9423 }
9424
63c3a66f 9425 if (tg3_flag(tp, PCI_EXPRESS))
85e94ced
MC
9426 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9427
63c3a66f
JP
9428 if (tg3_flag(tp, HW_TSO_1) ||
9429 tg3_flag(tp, HW_TSO_2) ||
9430 tg3_flag(tp, HW_TSO_3))
027455ad
MC
9431 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9432
108a6c16 9433 if (tg3_flag(tp, 57765_PLUS) ||
e849cdc3 9434 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
027455ad
MC
9435 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9436 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
1da177e4 9437
c65a17f4
MC
9438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9439 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
f2096f94
MC
9440 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9441
41a8a7ee
MC
9442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f 9446 tg3_flag(tp, 57765_PLUS)) {
c65a17f4
MC
9447 u32 tgtreg;
9448
9449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9450 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9451 else
9452 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9453
9454 val = tr32(tgtreg);
9455 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
b4495ed8
MC
9457 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9458 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9459 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9460 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9461 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9462 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
b75cc0e4 9463 }
c65a17f4 9464 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
41a8a7ee
MC
9465 }
9466
d78b59f5 9467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
c65a17f4
MC
9468 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9469 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9470 u32 tgtreg;
9471
9472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9473 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9474 else
9475 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9476
9477 val = tr32(tgtreg);
9478 tw32(tgtreg, val |
d309a46e
MC
9479 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9480 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9481 }
9482
1da177e4 9483 /* Receive/send statistics. */
63c3a66f 9484 if (tg3_flag(tp, 5750_PLUS)) {
1661394e
MC
9485 val = tr32(RCVLPC_STATS_ENABLE);
9486 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9487 tw32(RCVLPC_STATS_ENABLE, val);
9488 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
63c3a66f 9489 tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9490 val = tr32(RCVLPC_STATS_ENABLE);
9491 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9492 tw32(RCVLPC_STATS_ENABLE, val);
9493 } else {
9494 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9495 }
9496 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9497 tw32(SNDDATAI_STATSENAB, 0xffffff);
9498 tw32(SNDDATAI_STATSCTRL,
9499 (SNDDATAI_SCTRL_ENABLE |
9500 SNDDATAI_SCTRL_FASTUPD));
9501
9502 /* Setup host coalescing engine. */
9503 tw32(HOSTCC_MODE, 0);
9504 for (i = 0; i < 2000; i++) {
9505 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9506 break;
9507 udelay(10);
9508 }
9509
d244c892 9510 __tg3_set_coalesce(tp, &tp->coal);
1da177e4 9511
63c3a66f 9512 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
9513 /* Status/statistics block address. See tg3_timer,
9514 * the tg3_periodic_fetch_stats call there, and
9515 * tg3_get_stats to see how this works for 5705/5750 chips.
9516 */
1da177e4
LT
9517 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9518 ((u64) tp->stats_mapping >> 32));
9519 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9520 ((u64) tp->stats_mapping & 0xffffffff));
9521 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
2d31ecaf 9522
1da177e4 9523 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2d31ecaf
MC
9524
9525 /* Clear statistics and status block memory areas */
9526 for (i = NIC_SRAM_STATS_BLK;
9527 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9528 i += sizeof(u32)) {
9529 tg3_write_mem(tp, i, 0);
9530 udelay(40);
9531 }
1da177e4
LT
9532 }
9533
9534 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9535
9536 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9537 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
63c3a66f 9538 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
9539 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9540
f07e9af3
MC
9541 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9542 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c94e3941
MC
9543 /* reset to prevent losing 1st rx packet intermittently */
9544 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9545 udelay(10);
9546 }
9547
3bda1258 9548 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9e975cc2
MC
9549 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9550 MAC_MODE_FHDE_ENABLE;
9551 if (tg3_flag(tp, ENABLE_APE))
9552 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
63c3a66f 9553 if (!tg3_flag(tp, 5705_PLUS) &&
f07e9af3 9554 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
e8f3f6ca
MC
9555 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9556 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
9557 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9558 udelay(40);
9559
314fba34 9560 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
63c3a66f 9561 * If TG3_FLAG_IS_NIC is zero, we should read the
314fba34
MC
9562 * register to preserve the GPIO settings for LOMs. The GPIOs,
9563 * whether used as inputs or outputs, are set by boot code after
9564 * reset.
9565 */
63c3a66f 9566 if (!tg3_flag(tp, IS_NIC)) {
314fba34
MC
9567 u32 gpio_mask;
9568
9d26e213
MC
9569 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9570 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9571 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
9572
9573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9574 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9575 GRC_LCLCTRL_GPIO_OUTPUT3;
9576
af36e6b6
MC
9577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9578 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9579
aaf84465 9580 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
9581 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9582
9583 /* GPIO1 must be driven high for eeprom write protect */
63c3a66f 9584 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9d26e213
MC
9585 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9586 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 9587 }
1da177e4
LT
9588 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9589 udelay(100);
9590
c3b5003b 9591 if (tg3_flag(tp, USING_MSIX)) {
baf8a94a 9592 val = tr32(MSGINT_MODE);
c3b5003b
MC
9593 val |= MSGINT_MODE_ENABLE;
9594 if (tp->irq_cnt > 1)
9595 val |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
9596 if (!tg3_flag(tp, 1SHOT_MSI))
9597 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
baf8a94a
MC
9598 tw32(MSGINT_MODE, val);
9599 }
9600
63c3a66f 9601 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
9602 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9603 udelay(40);
9604 }
9605
9606 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9607 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9608 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9609 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9610 WDMAC_MODE_LNGREAD_ENAB);
9611
c5908939
MC
9612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9613 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 9614 if (tg3_flag(tp, TSO_CAPABLE) &&
1da177e4
LT
9615 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9616 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9617 /* nothing */
9618 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 9619 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
9620 val |= WDMAC_MODE_RX_ACCEL;
9621 }
9622 }
9623
d9ab5ad1 9624 /* Enable host coalescing bug fix */
63c3a66f 9625 if (tg3_flag(tp, 5755_PLUS))
f51f3562 9626 val |= WDMAC_MODE_STATUS_TAG_FIX;
d9ab5ad1 9627
788a035e
MC
9628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9629 val |= WDMAC_MODE_BURST_ALL_DATA;
9630
1da177e4
LT
9631 tw32_f(WDMAC_MODE, val);
9632 udelay(40);
9633
63c3a66f 9634 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
9635 u16 pcix_cmd;
9636
9637 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9638 &pcix_cmd);
1da177e4 9639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
9640 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9641 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9642 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
9643 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9644 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9645 }
9974a356
MC
9646 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9647 pcix_cmd);
1da177e4
LT
9648 }
9649
9650 tw32_f(RDMAC_MODE, rdmac_mode);
9651 udelay(40);
9652
091f0ea3
MC
9653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9654 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9655 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9656 break;
9657 }
9658 if (i < TG3_NUM_RDMA_CHANNELS) {
9659 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9660 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9661 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9662 tg3_flag_set(tp, 5719_RDMA_BUG);
9663 }
9664 }
9665
1da177e4 9666 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
63c3a66f 9667 if (!tg3_flag(tp, 5705_PLUS))
1da177e4 9668 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
9669
9670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9671 tw32(SNDDATAC_MODE,
9672 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9673 else
9674 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9675
1da177e4
LT
9676 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9677 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7cb32cf2 9678 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
63c3a66f 9679 if (tg3_flag(tp, LRG_PROD_RING_CAP))
7cb32cf2
MC
9680 val |= RCVDBDI_MODE_LRG_RING_SZ;
9681 tw32(RCVDBDI_MODE, val);
1da177e4 9682 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
63c3a66f
JP
9683 if (tg3_flag(tp, HW_TSO_1) ||
9684 tg3_flag(tp, HW_TSO_2) ||
9685 tg3_flag(tp, HW_TSO_3))
1da177e4 9686 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
baf8a94a 9687 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
63c3a66f 9688 if (tg3_flag(tp, ENABLE_TSS))
baf8a94a
MC
9689 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9690 tw32(SNDBDI_MODE, val);
1da177e4
LT
9691 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9692
9693 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9694 err = tg3_load_5701_a0_firmware_fix(tp);
9695 if (err)
9696 return err;
9697 }
9698
63c3a66f 9699 if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9700 err = tg3_load_tso_firmware(tp);
9701 if (err)
9702 return err;
9703 }
1da177e4
LT
9704
9705 tp->tx_mode = TX_MODE_ENABLE;
f2096f94 9706
63c3a66f 9707 if (tg3_flag(tp, 5755_PLUS) ||
b1d05210
MC
9708 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9709 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
f2096f94 9710
c65a17f4
MC
9711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
f2096f94
MC
9713 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9714 tp->tx_mode &= ~val;
9715 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9716 }
9717
1da177e4
LT
9718 tw32_f(MAC_TX_MODE, tp->tx_mode);
9719 udelay(100);
9720
63c3a66f 9721 if (tg3_flag(tp, ENABLE_RSS)) {
bcebcc46 9722 tg3_rss_write_indir_tbl(tp);
baf8a94a
MC
9723
9724 /* Setup the "secret" hash key. */
9725 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9726 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9727 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9728 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9729 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9730 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9731 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9732 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9733 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9734 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9735 }
9736
1da177e4 9737 tp->rx_mode = RX_MODE_ENABLE;
63c3a66f 9738 if (tg3_flag(tp, 5755_PLUS))
af36e6b6
MC
9739 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9740
63c3a66f 9741 if (tg3_flag(tp, ENABLE_RSS))
baf8a94a
MC
9742 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9743 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9744 RX_MODE_RSS_IPV6_HASH_EN |
9745 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9746 RX_MODE_RSS_IPV4_HASH_EN |
9747 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9748
1da177e4
LT
9749 tw32_f(MAC_RX_MODE, tp->rx_mode);
9750 udelay(10);
9751
1da177e4
LT
9752 tw32(MAC_LED_CTRL, tp->led_ctrl);
9753
9754 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
f07e9af3 9755 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4
LT
9756 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9757 udelay(10);
9758 }
9759 tw32_f(MAC_RX_MODE, tp->rx_mode);
9760 udelay(10);
9761
f07e9af3 9762 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4 9763 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
f07e9af3 9764 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
1da177e4
LT
9765 /* Set drive transmission level to 1.2V */
9766 /* only if the signal pre-emphasis bit is not set */
9767 val = tr32(MAC_SERDES_CFG);
9768 val &= 0xfffff000;
9769 val |= 0x880;
9770 tw32(MAC_SERDES_CFG, val);
9771 }
9772 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9773 tw32(MAC_SERDES_CFG, 0x616000);
9774 }
9775
9776 /* Prevent chip from dropping frames when flow control
9777 * is enabled.
9778 */
55086ad9 9779 if (tg3_flag(tp, 57765_CLASS))
666bc831
MC
9780 val = 1;
9781 else
9782 val = 2;
9783 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
1da177e4
LT
9784
9785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
f07e9af3 9786 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
1da177e4 9787 /* Use hardware link auto-negotiation */
63c3a66f 9788 tg3_flag_set(tp, HW_AUTONEG);
1da177e4
LT
9789 }
9790
f07e9af3 9791 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6ff6f81d 9792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
d4d2c558
MC
9793 u32 tmp;
9794
9795 tmp = tr32(SERDES_RX_CTRL);
9796 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9797 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9798 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9799 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9800 }
9801
63c3a66f 9802 if (!tg3_flag(tp, USE_PHYLIB)) {
c6700ce2 9803 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
80096068 9804 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1da177e4 9805
dd477003
MC
9806 err = tg3_setup_phy(tp, 0);
9807 if (err)
9808 return err;
1da177e4 9809
f07e9af3
MC
9810 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9811 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
dd477003
MC
9812 u32 tmp;
9813
9814 /* Clear CRC stats. */
9815 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9816 tg3_writephy(tp, MII_TG3_TEST1,
9817 tmp | MII_TG3_TEST1_CRC_EN);
f08aa1a8 9818 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
dd477003 9819 }
1da177e4
LT
9820 }
9821 }
9822
9823 __tg3_set_rx_mode(tp->dev);
9824
9825 /* Initialize receive rules. */
9826 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9827 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9828 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9829 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9830
63c3a66f 9831 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
1da177e4
LT
9832 limit = 8;
9833 else
9834 limit = 16;
63c3a66f 9835 if (tg3_flag(tp, ENABLE_ASF))
1da177e4
LT
9836 limit -= 4;
9837 switch (limit) {
9838 case 16:
9839 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9840 case 15:
9841 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9842 case 14:
9843 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9844 case 13:
9845 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9846 case 12:
9847 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9848 case 11:
9849 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9850 case 10:
9851 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9852 case 9:
9853 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9854 case 8:
9855 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9856 case 7:
9857 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9858 case 6:
9859 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9860 case 5:
9861 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9862 case 4:
9863 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9864 case 3:
9865 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9866 case 2:
9867 case 1:
9868
9869 default:
9870 break;
855e1111 9871 }
1da177e4 9872
63c3a66f 9873 if (tg3_flag(tp, ENABLE_APE))
9ce768ea
MC
9874 /* Write our heartbeat update interval to APE. */
9875 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9876 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 9877
1da177e4
LT
9878 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9879
1da177e4
LT
9880 return 0;
9881}
9882
9883/* Called at device open time to get the chip ready for
9884 * packet processing. Invoked with tp->lock held.
9885 */
8e7a22e3 9886static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4 9887{
1da177e4
LT
9888 tg3_switch_clocks(tp);
9889
9890 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9891
2f751b67 9892 return tg3_reset_hw(tp, reset_phy);
1da177e4
LT
9893}
9894
aed93e0b
MC
9895static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9896{
9897 int i;
9898
9899 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9900 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9901
9902 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9903 off += len;
9904
9905 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9906 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9907 memset(ocir, 0, TG3_OCIR_LEN);
9908 }
9909}
9910
9911/* sysfs attributes for hwmon */
9912static ssize_t tg3_show_temp(struct device *dev,
9913 struct device_attribute *devattr, char *buf)
9914{
9915 struct pci_dev *pdev = to_pci_dev(dev);
9916 struct net_device *netdev = pci_get_drvdata(pdev);
9917 struct tg3 *tp = netdev_priv(netdev);
9918 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9919 u32 temperature;
9920
9921 spin_lock_bh(&tp->lock);
9922 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9923 sizeof(temperature));
9924 spin_unlock_bh(&tp->lock);
9925 return sprintf(buf, "%u\n", temperature);
9926}
9927
9928
9929static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9930 TG3_TEMP_SENSOR_OFFSET);
9931static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9932 TG3_TEMP_CAUTION_OFFSET);
9933static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9934 TG3_TEMP_MAX_OFFSET);
9935
9936static struct attribute *tg3_attributes[] = {
9937 &sensor_dev_attr_temp1_input.dev_attr.attr,
9938 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9939 &sensor_dev_attr_temp1_max.dev_attr.attr,
9940 NULL
9941};
9942
9943static const struct attribute_group tg3_group = {
9944 .attrs = tg3_attributes,
9945};
9946
aed93e0b
MC
9947static void tg3_hwmon_close(struct tg3 *tp)
9948{
aed93e0b
MC
9949 if (tp->hwmon_dev) {
9950 hwmon_device_unregister(tp->hwmon_dev);
9951 tp->hwmon_dev = NULL;
9952 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9953 }
aed93e0b
MC
9954}
9955
9956static void tg3_hwmon_open(struct tg3 *tp)
9957{
aed93e0b
MC
9958 int i, err;
9959 u32 size = 0;
9960 struct pci_dev *pdev = tp->pdev;
9961 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9962
9963 tg3_sd_scan_scratchpad(tp, ocirs);
9964
9965 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9966 if (!ocirs[i].src_data_length)
9967 continue;
9968
9969 size += ocirs[i].src_hdr_length;
9970 size += ocirs[i].src_data_length;
9971 }
9972
9973 if (!size)
9974 return;
9975
9976 /* Register hwmon sysfs hooks */
9977 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9978 if (err) {
9979 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9980 return;
9981 }
9982
9983 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9984 if (IS_ERR(tp->hwmon_dev)) {
9985 tp->hwmon_dev = NULL;
9986 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9987 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9988 }
aed93e0b
MC
9989}
9990
9991
1da177e4
LT
9992#define TG3_STAT_ADD32(PSTAT, REG) \
9993do { u32 __val = tr32(REG); \
9994 (PSTAT)->low += __val; \
9995 if ((PSTAT)->low < __val) \
9996 (PSTAT)->high += 1; \
9997} while (0)
9998
9999static void tg3_periodic_fetch_stats(struct tg3 *tp)
10000{
10001 struct tg3_hw_stats *sp = tp->hw_stats;
10002
f4a46d1f 10003 if (!tp->link_up)
1da177e4
LT
10004 return;
10005
10006 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10007 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10008 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10009 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10010 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10011 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10012 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10013 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10014 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10015 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10016 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10017 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10018 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
091f0ea3
MC
10019 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10020 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10021 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10022 u32 val;
10023
10024 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10025 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10026 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10027 tg3_flag_clear(tp, 5719_RDMA_BUG);
10028 }
1da177e4
LT
10029
10030 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10031 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10032 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10033 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10034 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10035 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10036 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10037 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10038 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10039 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10040 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10041 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10042 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10043 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
10044
10045 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
310050fa
MC
10046 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10047 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10048 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
4d958473
MC
10049 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10050 } else {
10051 u32 val = tr32(HOSTCC_FLOW_ATTN);
10052 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10053 if (val) {
10054 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10055 sp->rx_discards.low += val;
10056 if (sp->rx_discards.low < val)
10057 sp->rx_discards.high += 1;
10058 }
10059 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10060 }
463d305b 10061 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
10062}
10063
0e6cf6a9
MC
10064static void tg3_chk_missed_msi(struct tg3 *tp)
10065{
10066 u32 i;
10067
10068 for (i = 0; i < tp->irq_cnt; i++) {
10069 struct tg3_napi *tnapi = &tp->napi[i];
10070
10071 if (tg3_has_work(tnapi)) {
10072 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10073 tnapi->last_tx_cons == tnapi->tx_cons) {
10074 if (tnapi->chk_msi_cnt < 1) {
10075 tnapi->chk_msi_cnt++;
10076 return;
10077 }
7f230735 10078 tg3_msi(0, tnapi);
0e6cf6a9
MC
10079 }
10080 }
10081 tnapi->chk_msi_cnt = 0;
10082 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10083 tnapi->last_tx_cons = tnapi->tx_cons;
10084 }
10085}
10086
1da177e4
LT
10087static void tg3_timer(unsigned long __opaque)
10088{
10089 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 10090
5b190624 10091 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
f475f163
MC
10092 goto restart_timer;
10093
f47c11ee 10094 spin_lock(&tp->lock);
1da177e4 10095
0e6cf6a9 10096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
55086ad9 10097 tg3_flag(tp, 57765_CLASS))
0e6cf6a9
MC
10098 tg3_chk_missed_msi(tp);
10099
63c3a66f 10100 if (!tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
10101 /* All of this garbage is because when using non-tagged
10102 * IRQ status the mailbox/status_block protocol the chip
10103 * uses with the cpu is race prone.
10104 */
898a56f8 10105 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
fac9b83e
DM
10106 tw32(GRC_LOCAL_CTRL,
10107 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10108 } else {
10109 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 10110 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
fac9b83e 10111 }
1da177e4 10112
fac9b83e 10113 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
f47c11ee 10114 spin_unlock(&tp->lock);
db219973 10115 tg3_reset_task_schedule(tp);
5b190624 10116 goto restart_timer;
fac9b83e 10117 }
1da177e4
LT
10118 }
10119
1da177e4
LT
10120 /* This part only runs once per second. */
10121 if (!--tp->timer_counter) {
63c3a66f 10122 if (tg3_flag(tp, 5705_PLUS))
fac9b83e
DM
10123 tg3_periodic_fetch_stats(tp);
10124
b0c5943f
MC
10125 if (tp->setlpicnt && !--tp->setlpicnt)
10126 tg3_phy_eee_enable(tp);
52b02d04 10127
63c3a66f 10128 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
10129 u32 mac_stat;
10130 int phy_event;
10131
10132 mac_stat = tr32(MAC_STATUS);
10133
10134 phy_event = 0;
f07e9af3 10135 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
1da177e4
LT
10136 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10137 phy_event = 1;
10138 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10139 phy_event = 1;
10140
10141 if (phy_event)
10142 tg3_setup_phy(tp, 0);
63c3a66f 10143 } else if (tg3_flag(tp, POLL_SERDES)) {
1da177e4
LT
10144 u32 mac_stat = tr32(MAC_STATUS);
10145 int need_setup = 0;
10146
f4a46d1f 10147 if (tp->link_up &&
1da177e4
LT
10148 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10149 need_setup = 1;
10150 }
f4a46d1f 10151 if (!tp->link_up &&
1da177e4
LT
10152 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10153 MAC_STATUS_SIGNAL_DET))) {
10154 need_setup = 1;
10155 }
10156 if (need_setup) {
3d3ebe74
MC
10157 if (!tp->serdes_counter) {
10158 tw32_f(MAC_MODE,
10159 (tp->mac_mode &
10160 ~MAC_MODE_PORT_MODE_MASK));
10161 udelay(40);
10162 tw32_f(MAC_MODE, tp->mac_mode);
10163 udelay(40);
10164 }
1da177e4
LT
10165 tg3_setup_phy(tp, 0);
10166 }
f07e9af3 10167 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
63c3a66f 10168 tg3_flag(tp, 5780_CLASS)) {
747e8f8b 10169 tg3_serdes_parallel_detect(tp);
57d8b880 10170 }
1da177e4
LT
10171
10172 tp->timer_counter = tp->timer_multiplier;
10173 }
10174
130b8e4d
MC
10175 /* Heartbeat is only sent once every 2 seconds.
10176 *
10177 * The heartbeat is to tell the ASF firmware that the host
10178 * driver is still alive. In the event that the OS crashes,
10179 * ASF needs to reset the hardware to free up the FIFO space
10180 * that may be filled with rx packets destined for the host.
10181 * If the FIFO is full, ASF will no longer function properly.
10182 *
10183 * Unintended resets have been reported on real time kernels
10184 * where the timer doesn't run on time. Netpoll will also have
10185 * same problem.
10186 *
10187 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10188 * to check the ring condition when the heartbeat is expiring
10189 * before doing the reset. This will prevent most unintended
10190 * resets.
10191 */
1da177e4 10192 if (!--tp->asf_counter) {
63c3a66f 10193 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7c5026aa
MC
10194 tg3_wait_for_event_ack(tp);
10195
bbadf503 10196 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 10197 FWCMD_NICDRV_ALIVE3);
bbadf503 10198 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
c6cdf436
MC
10199 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10200 TG3_FW_UPDATE_TIMEOUT_SEC);
4ba526ce
MC
10201
10202 tg3_generate_fw_event(tp);
1da177e4
LT
10203 }
10204 tp->asf_counter = tp->asf_multiplier;
10205 }
10206
f47c11ee 10207 spin_unlock(&tp->lock);
1da177e4 10208
f475f163 10209restart_timer:
1da177e4
LT
10210 tp->timer.expires = jiffies + tp->timer_offset;
10211 add_timer(&tp->timer);
10212}
10213
229b1ad1 10214static void tg3_timer_init(struct tg3 *tp)
21f7638e
MC
10215{
10216 if (tg3_flag(tp, TAGGED_STATUS) &&
10217 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10218 !tg3_flag(tp, 57765_CLASS))
10219 tp->timer_offset = HZ;
10220 else
10221 tp->timer_offset = HZ / 10;
10222
10223 BUG_ON(tp->timer_offset > HZ);
10224
10225 tp->timer_multiplier = (HZ / tp->timer_offset);
10226 tp->asf_multiplier = (HZ / tp->timer_offset) *
10227 TG3_FW_UPDATE_FREQ_SEC;
10228
10229 init_timer(&tp->timer);
10230 tp->timer.data = (unsigned long) tp;
10231 tp->timer.function = tg3_timer;
10232}
10233
10234static void tg3_timer_start(struct tg3 *tp)
10235{
10236 tp->asf_counter = tp->asf_multiplier;
10237 tp->timer_counter = tp->timer_multiplier;
10238
10239 tp->timer.expires = jiffies + tp->timer_offset;
10240 add_timer(&tp->timer);
10241}
10242
10243static void tg3_timer_stop(struct tg3 *tp)
10244{
10245 del_timer_sync(&tp->timer);
10246}
10247
10248/* Restart hardware after configuration changes, self-test, etc.
10249 * Invoked with tp->lock held.
10250 */
10251static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10252 __releases(tp->lock)
10253 __acquires(tp->lock)
10254{
10255 int err;
10256
10257 err = tg3_init_hw(tp, reset_phy);
10258 if (err) {
10259 netdev_err(tp->dev,
10260 "Failed to re-initialize device, aborting\n");
10261 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10262 tg3_full_unlock(tp);
10263 tg3_timer_stop(tp);
10264 tp->irq_sync = 0;
10265 tg3_napi_enable(tp);
10266 dev_close(tp->dev);
10267 tg3_full_lock(tp, 0);
10268 }
10269 return err;
10270}
10271
10272static void tg3_reset_task(struct work_struct *work)
10273{
10274 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10275 int err;
10276
10277 tg3_full_lock(tp, 0);
10278
10279 if (!netif_running(tp->dev)) {
10280 tg3_flag_clear(tp, RESET_TASK_PENDING);
10281 tg3_full_unlock(tp);
10282 return;
10283 }
10284
10285 tg3_full_unlock(tp);
10286
10287 tg3_phy_stop(tp);
10288
10289 tg3_netif_stop(tp);
10290
10291 tg3_full_lock(tp, 1);
10292
10293 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10294 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10295 tp->write32_rx_mbox = tg3_write_flush_reg32;
10296 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10297 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10298 }
10299
10300 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10301 err = tg3_init_hw(tp, 1);
10302 if (err)
10303 goto out;
10304
10305 tg3_netif_start(tp);
10306
10307out:
10308 tg3_full_unlock(tp);
10309
10310 if (!err)
10311 tg3_phy_start(tp);
10312
10313 tg3_flag_clear(tp, RESET_TASK_PENDING);
10314}
10315
4f125f42 10316static int tg3_request_irq(struct tg3 *tp, int irq_num)
fcfa0a32 10317{
7d12e780 10318 irq_handler_t fn;
fcfa0a32 10319 unsigned long flags;
4f125f42
MC
10320 char *name;
10321 struct tg3_napi *tnapi = &tp->napi[irq_num];
10322
10323 if (tp->irq_cnt == 1)
10324 name = tp->dev->name;
10325 else {
10326 name = &tnapi->irq_lbl[0];
10327 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10328 name[IFNAMSIZ-1] = 0;
10329 }
fcfa0a32 10330
63c3a66f 10331 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
fcfa0a32 10332 fn = tg3_msi;
63c3a66f 10333 if (tg3_flag(tp, 1SHOT_MSI))
fcfa0a32 10334 fn = tg3_msi_1shot;
ab392d2d 10335 flags = 0;
fcfa0a32
MC
10336 } else {
10337 fn = tg3_interrupt;
63c3a66f 10338 if (tg3_flag(tp, TAGGED_STATUS))
fcfa0a32 10339 fn = tg3_interrupt_tagged;
ab392d2d 10340 flags = IRQF_SHARED;
fcfa0a32 10341 }
4f125f42
MC
10342
10343 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
fcfa0a32
MC
10344}
10345
7938109f
MC
10346static int tg3_test_interrupt(struct tg3 *tp)
10347{
09943a18 10348 struct tg3_napi *tnapi = &tp->napi[0];
7938109f 10349 struct net_device *dev = tp->dev;
b16250e3 10350 int err, i, intr_ok = 0;
f6eb9b1f 10351 u32 val;
7938109f 10352
d4bc3927
MC
10353 if (!netif_running(dev))
10354 return -ENODEV;
10355
7938109f
MC
10356 tg3_disable_ints(tp);
10357
4f125f42 10358 free_irq(tnapi->irq_vec, tnapi);
7938109f 10359
f6eb9b1f
MC
10360 /*
10361 * Turn off MSI one shot mode. Otherwise this test has no
10362 * observable way to know whether the interrupt was delivered.
10363 */
3aa1cdf8 10364 if (tg3_flag(tp, 57765_PLUS)) {
f6eb9b1f
MC
10365 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10366 tw32(MSGINT_MODE, val);
10367 }
10368
4f125f42 10369 err = request_irq(tnapi->irq_vec, tg3_test_isr,
f274fd9a 10370 IRQF_SHARED, dev->name, tnapi);
7938109f
MC
10371 if (err)
10372 return err;
10373
898a56f8 10374 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
10375 tg3_enable_ints(tp);
10376
10377 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 10378 tnapi->coal_now);
7938109f
MC
10379
10380 for (i = 0; i < 5; i++) {
b16250e3
MC
10381 u32 int_mbox, misc_host_ctrl;
10382
898a56f8 10383 int_mbox = tr32_mailbox(tnapi->int_mbox);
b16250e3
MC
10384 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10385
10386 if ((int_mbox != 0) ||
10387 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10388 intr_ok = 1;
7938109f 10389 break;
b16250e3
MC
10390 }
10391
3aa1cdf8
MC
10392 if (tg3_flag(tp, 57765_PLUS) &&
10393 tnapi->hw_status->status_tag != tnapi->last_tag)
10394 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10395
7938109f
MC
10396 msleep(10);
10397 }
10398
10399 tg3_disable_ints(tp);
10400
4f125f42 10401 free_irq(tnapi->irq_vec, tnapi);
6aa20a22 10402
4f125f42 10403 err = tg3_request_irq(tp, 0);
7938109f
MC
10404
10405 if (err)
10406 return err;
10407
f6eb9b1f
MC
10408 if (intr_ok) {
10409 /* Reenable MSI one shot mode. */
5b39de91 10410 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
f6eb9b1f
MC
10411 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10412 tw32(MSGINT_MODE, val);
10413 }
7938109f 10414 return 0;
f6eb9b1f 10415 }
7938109f
MC
10416
10417 return -EIO;
10418}
10419
10420/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10421 * successfully restored
10422 */
10423static int tg3_test_msi(struct tg3 *tp)
10424{
7938109f
MC
10425 int err;
10426 u16 pci_cmd;
10427
63c3a66f 10428 if (!tg3_flag(tp, USING_MSI))
7938109f
MC
10429 return 0;
10430
10431 /* Turn off SERR reporting in case MSI terminates with Master
10432 * Abort.
10433 */
10434 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10435 pci_write_config_word(tp->pdev, PCI_COMMAND,
10436 pci_cmd & ~PCI_COMMAND_SERR);
10437
10438 err = tg3_test_interrupt(tp);
10439
10440 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10441
10442 if (!err)
10443 return 0;
10444
10445 /* other failures */
10446 if (err != -EIO)
10447 return err;
10448
10449 /* MSI test failed, go back to INTx mode */
5129c3a3
MC
10450 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10451 "to INTx mode. Please report this failure to the PCI "
10452 "maintainer and include system chipset information\n");
7938109f 10453
4f125f42 10454 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
09943a18 10455
7938109f
MC
10456 pci_disable_msi(tp->pdev);
10457
63c3a66f 10458 tg3_flag_clear(tp, USING_MSI);
dc8bf1b1 10459 tp->napi[0].irq_vec = tp->pdev->irq;
7938109f 10460
4f125f42 10461 err = tg3_request_irq(tp, 0);
7938109f
MC
10462 if (err)
10463 return err;
10464
10465 /* Need to reset the chip because the MSI cycle may have terminated
10466 * with Master Abort.
10467 */
f47c11ee 10468 tg3_full_lock(tp, 1);
7938109f 10469
944d980e 10470 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 10471 err = tg3_init_hw(tp, 1);
7938109f 10472
f47c11ee 10473 tg3_full_unlock(tp);
7938109f
MC
10474
10475 if (err)
4f125f42 10476 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7938109f
MC
10477
10478 return err;
10479}
10480
9e9fd12d
MC
10481static int tg3_request_firmware(struct tg3 *tp)
10482{
10483 const __be32 *fw_data;
10484
10485 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
05dbe005
JP
10486 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10487 tp->fw_needed);
9e9fd12d
MC
10488 return -ENOENT;
10489 }
10490
10491 fw_data = (void *)tp->fw->data;
10492
10493 /* Firmware blob starts with version numbers, followed by
10494 * start address and _full_ length including BSS sections
10495 * (which must be longer than the actual data, of course
10496 */
10497
10498 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10499 if (tp->fw_len < (tp->fw->size - 12)) {
05dbe005
JP
10500 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10501 tp->fw_len, tp->fw_needed);
9e9fd12d
MC
10502 release_firmware(tp->fw);
10503 tp->fw = NULL;
10504 return -EINVAL;
10505 }
10506
10507 /* We no longer need firmware; we have it. */
10508 tp->fw_needed = NULL;
10509 return 0;
10510}
10511
9102426a 10512static u32 tg3_irq_count(struct tg3 *tp)
679563f4 10513{
9102426a 10514 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
679563f4 10515
9102426a 10516 if (irq_cnt > 1) {
c3b5003b
MC
10517 /* We want as many rx rings enabled as there are cpus.
10518 * In multiqueue MSI-X mode, the first MSI-X vector
10519 * only deals with link interrupts, etc, so we add
10520 * one to the number of vectors we are requesting.
10521 */
9102426a 10522 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
c3b5003b 10523 }
679563f4 10524
9102426a
MC
10525 return irq_cnt;
10526}
10527
10528static bool tg3_enable_msix(struct tg3 *tp)
10529{
10530 int i, rc;
86449944 10531 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
9102426a 10532
0968169c
MC
10533 tp->txq_cnt = tp->txq_req;
10534 tp->rxq_cnt = tp->rxq_req;
10535 if (!tp->rxq_cnt)
10536 tp->rxq_cnt = netif_get_num_default_rss_queues();
9102426a
MC
10537 if (tp->rxq_cnt > tp->rxq_max)
10538 tp->rxq_cnt = tp->rxq_max;
cf6d6ea6
MC
10539
10540 /* Disable multiple TX rings by default. Simple round-robin hardware
10541 * scheduling of the TX rings can cause starvation of rings with
10542 * small packets when other rings have TSO or jumbo packets.
10543 */
10544 if (!tp->txq_req)
10545 tp->txq_cnt = 1;
9102426a
MC
10546
10547 tp->irq_cnt = tg3_irq_count(tp);
10548
679563f4
MC
10549 for (i = 0; i < tp->irq_max; i++) {
10550 msix_ent[i].entry = i;
10551 msix_ent[i].vector = 0;
10552 }
10553
10554 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
2430b031
MC
10555 if (rc < 0) {
10556 return false;
10557 } else if (rc != 0) {
679563f4
MC
10558 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10559 return false;
05dbe005
JP
10560 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10561 tp->irq_cnt, rc);
679563f4 10562 tp->irq_cnt = rc;
49a359e3 10563 tp->rxq_cnt = max(rc - 1, 1);
9102426a
MC
10564 if (tp->txq_cnt)
10565 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
679563f4
MC
10566 }
10567
10568 for (i = 0; i < tp->irq_max; i++)
10569 tp->napi[i].irq_vec = msix_ent[i].vector;
10570
49a359e3 10571 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
2ddaad39
BH
10572 pci_disable_msix(tp->pdev);
10573 return false;
10574 }
b92b9040 10575
9102426a
MC
10576 if (tp->irq_cnt == 1)
10577 return true;
d78b59f5 10578
9102426a
MC
10579 tg3_flag_set(tp, ENABLE_RSS);
10580
10581 if (tp->txq_cnt > 1)
10582 tg3_flag_set(tp, ENABLE_TSS);
10583
10584 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
2430b031 10585
679563f4
MC
10586 return true;
10587}
10588
07b0173c
MC
10589static void tg3_ints_init(struct tg3 *tp)
10590{
63c3a66f
JP
10591 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10592 !tg3_flag(tp, TAGGED_STATUS)) {
07b0173c
MC
10593 /* All MSI supporting chips should support tagged
10594 * status. Assert that this is the case.
10595 */
5129c3a3
MC
10596 netdev_warn(tp->dev,
10597 "MSI without TAGGED_STATUS? Not using MSI\n");
679563f4 10598 goto defcfg;
07b0173c 10599 }
4f125f42 10600
63c3a66f
JP
10601 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10602 tg3_flag_set(tp, USING_MSIX);
10603 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10604 tg3_flag_set(tp, USING_MSI);
679563f4 10605
63c3a66f 10606 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
679563f4 10607 u32 msi_mode = tr32(MSGINT_MODE);
63c3a66f 10608 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
baf8a94a 10609 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
10610 if (!tg3_flag(tp, 1SHOT_MSI))
10611 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
679563f4
MC
10612 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10613 }
10614defcfg:
63c3a66f 10615 if (!tg3_flag(tp, USING_MSIX)) {
679563f4
MC
10616 tp->irq_cnt = 1;
10617 tp->napi[0].irq_vec = tp->pdev->irq;
49a359e3
MC
10618 }
10619
10620 if (tp->irq_cnt == 1) {
10621 tp->txq_cnt = 1;
10622 tp->rxq_cnt = 1;
2ddaad39 10623 netif_set_real_num_tx_queues(tp->dev, 1);
85407885 10624 netif_set_real_num_rx_queues(tp->dev, 1);
679563f4 10625 }
07b0173c
MC
10626}
10627
10628static void tg3_ints_fini(struct tg3 *tp)
10629{
63c3a66f 10630 if (tg3_flag(tp, USING_MSIX))
679563f4 10631 pci_disable_msix(tp->pdev);
63c3a66f 10632 else if (tg3_flag(tp, USING_MSI))
679563f4 10633 pci_disable_msi(tp->pdev);
63c3a66f
JP
10634 tg3_flag_clear(tp, USING_MSI);
10635 tg3_flag_clear(tp, USING_MSIX);
10636 tg3_flag_clear(tp, ENABLE_RSS);
10637 tg3_flag_clear(tp, ENABLE_TSS);
07b0173c
MC
10638}
10639
be947307
MC
10640static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10641 bool init)
1da177e4 10642{
d8f4cd38 10643 struct net_device *dev = tp->dev;
4f125f42 10644 int i, err;
1da177e4 10645
679563f4
MC
10646 /*
10647 * Setup interrupts first so we know how
10648 * many NAPI resources to allocate
10649 */
10650 tg3_ints_init(tp);
10651
90415477 10652 tg3_rss_check_indir_tbl(tp);
bcebcc46 10653
1da177e4
LT
10654 /* The placement of this call is tied
10655 * to the setup and use of Host TX descriptors.
10656 */
10657 err = tg3_alloc_consistent(tp);
10658 if (err)
679563f4 10659 goto err_out1;
88b06bc2 10660
66cfd1bd
MC
10661 tg3_napi_init(tp);
10662
fed97810 10663 tg3_napi_enable(tp);
1da177e4 10664
4f125f42
MC
10665 for (i = 0; i < tp->irq_cnt; i++) {
10666 struct tg3_napi *tnapi = &tp->napi[i];
10667 err = tg3_request_irq(tp, i);
10668 if (err) {
5bc09186
MC
10669 for (i--; i >= 0; i--) {
10670 tnapi = &tp->napi[i];
4f125f42 10671 free_irq(tnapi->irq_vec, tnapi);
5bc09186
MC
10672 }
10673 goto err_out2;
4f125f42
MC
10674 }
10675 }
1da177e4 10676
f47c11ee 10677 tg3_full_lock(tp, 0);
1da177e4 10678
d8f4cd38 10679 err = tg3_init_hw(tp, reset_phy);
1da177e4 10680 if (err) {
944d980e 10681 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 10682 tg3_free_rings(tp);
1da177e4
LT
10683 }
10684
f47c11ee 10685 tg3_full_unlock(tp);
1da177e4 10686
07b0173c 10687 if (err)
679563f4 10688 goto err_out3;
1da177e4 10689
d8f4cd38 10690 if (test_irq && tg3_flag(tp, USING_MSI)) {
7938109f 10691 err = tg3_test_msi(tp);
fac9b83e 10692
7938109f 10693 if (err) {
f47c11ee 10694 tg3_full_lock(tp, 0);
944d980e 10695 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f 10696 tg3_free_rings(tp);
f47c11ee 10697 tg3_full_unlock(tp);
7938109f 10698
679563f4 10699 goto err_out2;
7938109f 10700 }
fcfa0a32 10701
63c3a66f 10702 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
f6eb9b1f 10703 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 10704
f6eb9b1f
MC
10705 tw32(PCIE_TRANSACTION_CFG,
10706 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32 10707 }
7938109f
MC
10708 }
10709
b02fd9e3
MC
10710 tg3_phy_start(tp);
10711
aed93e0b
MC
10712 tg3_hwmon_open(tp);
10713
f47c11ee 10714 tg3_full_lock(tp, 0);
1da177e4 10715
21f7638e 10716 tg3_timer_start(tp);
63c3a66f 10717 tg3_flag_set(tp, INIT_COMPLETE);
1da177e4
LT
10718 tg3_enable_ints(tp);
10719
be947307
MC
10720 if (init)
10721 tg3_ptp_init(tp);
10722 else
10723 tg3_ptp_resume(tp);
10724
10725
f47c11ee 10726 tg3_full_unlock(tp);
1da177e4 10727
fe5f5787 10728 netif_tx_start_all_queues(dev);
1da177e4 10729
06c03c02
MB
10730 /*
10731 * Reset loopback feature if it was turned on while the device was down
10732 * make sure that it's installed properly now.
10733 */
10734 if (dev->features & NETIF_F_LOOPBACK)
10735 tg3_set_loopback(dev, dev->features);
10736
1da177e4 10737 return 0;
07b0173c 10738
679563f4 10739err_out3:
4f125f42
MC
10740 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10741 struct tg3_napi *tnapi = &tp->napi[i];
10742 free_irq(tnapi->irq_vec, tnapi);
10743 }
07b0173c 10744
679563f4 10745err_out2:
fed97810 10746 tg3_napi_disable(tp);
66cfd1bd 10747 tg3_napi_fini(tp);
07b0173c 10748 tg3_free_consistent(tp);
679563f4
MC
10749
10750err_out1:
10751 tg3_ints_fini(tp);
d8f4cd38 10752
07b0173c 10753 return err;
1da177e4
LT
10754}
10755
65138594 10756static void tg3_stop(struct tg3 *tp)
1da177e4 10757{
4f125f42 10758 int i;
1da177e4 10759
db219973 10760 tg3_reset_task_cancel(tp);
bd473da3 10761 tg3_netif_stop(tp);
1da177e4 10762
21f7638e 10763 tg3_timer_stop(tp);
1da177e4 10764
aed93e0b
MC
10765 tg3_hwmon_close(tp);
10766
24bb4fb6
MC
10767 tg3_phy_stop(tp);
10768
f47c11ee 10769 tg3_full_lock(tp, 1);
1da177e4
LT
10770
10771 tg3_disable_ints(tp);
10772
944d980e 10773 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 10774 tg3_free_rings(tp);
63c3a66f 10775 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 10776
f47c11ee 10777 tg3_full_unlock(tp);
1da177e4 10778
4f125f42
MC
10779 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10780 struct tg3_napi *tnapi = &tp->napi[i];
10781 free_irq(tnapi->irq_vec, tnapi);
10782 }
07b0173c
MC
10783
10784 tg3_ints_fini(tp);
1da177e4 10785
66cfd1bd
MC
10786 tg3_napi_fini(tp);
10787
1da177e4 10788 tg3_free_consistent(tp);
65138594
MC
10789}
10790
d8f4cd38
MC
10791static int tg3_open(struct net_device *dev)
10792{
10793 struct tg3 *tp = netdev_priv(dev);
10794 int err;
10795
10796 if (tp->fw_needed) {
10797 err = tg3_request_firmware(tp);
10798 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10799 if (err)
10800 return err;
10801 } else if (err) {
10802 netdev_warn(tp->dev, "TSO capability disabled\n");
10803 tg3_flag_clear(tp, TSO_CAPABLE);
10804 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10805 netdev_notice(tp->dev, "TSO capability restored\n");
10806 tg3_flag_set(tp, TSO_CAPABLE);
10807 }
10808 }
10809
f4a46d1f 10810 tg3_carrier_off(tp);
d8f4cd38
MC
10811
10812 err = tg3_power_up(tp);
10813 if (err)
10814 return err;
10815
10816 tg3_full_lock(tp, 0);
10817
10818 tg3_disable_ints(tp);
10819 tg3_flag_clear(tp, INIT_COMPLETE);
10820
10821 tg3_full_unlock(tp);
10822
be947307 10823 err = tg3_start(tp, true, true, true);
d8f4cd38
MC
10824 if (err) {
10825 tg3_frob_aux_power(tp, false);
10826 pci_set_power_state(tp->pdev, PCI_D3hot);
10827 }
be947307 10828
7d41e49a
MC
10829 if (tg3_flag(tp, PTP_CAPABLE)) {
10830 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10831 &tp->pdev->dev);
10832 if (IS_ERR(tp->ptp_clock))
10833 tp->ptp_clock = NULL;
10834 }
10835
07b0173c 10836 return err;
1da177e4
LT
10837}
10838
1da177e4
LT
10839static int tg3_close(struct net_device *dev)
10840{
10841 struct tg3 *tp = netdev_priv(dev);
10842
be947307
MC
10843 tg3_ptp_fini(tp);
10844
65138594 10845 tg3_stop(tp);
1da177e4 10846
92feeabf
MC
10847 /* Clear stats across close / open calls */
10848 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10849 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
1da177e4 10850
c866b7ea 10851 tg3_power_down(tp);
bc1c7567 10852
f4a46d1f 10853 tg3_carrier_off(tp);
bc1c7567 10854
1da177e4
LT
10855 return 0;
10856}
10857
511d2224 10858static inline u64 get_stat64(tg3_stat64_t *val)
816f8b86
SB
10859{
10860 return ((u64)val->high << 32) | ((u64)val->low);
10861}
10862
65ec698d 10863static u64 tg3_calc_crc_errors(struct tg3 *tp)
1da177e4
LT
10864{
10865 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10866
f07e9af3 10867 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
1da177e4
LT
10868 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
10870 u32 val;
10871
569a5df8
MC
10872 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10873 tg3_writephy(tp, MII_TG3_TEST1,
10874 val | MII_TG3_TEST1_CRC_EN);
f08aa1a8 10875 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
1da177e4
LT
10876 } else
10877 val = 0;
1da177e4
LT
10878
10879 tp->phy_crc_errors += val;
10880
10881 return tp->phy_crc_errors;
10882 }
10883
10884 return get_stat64(&hw_stats->rx_fcs_errors);
10885}
10886
10887#define ESTAT_ADD(member) \
10888 estats->member = old_estats->member + \
511d2224 10889 get_stat64(&hw_stats->member)
1da177e4 10890
65ec698d 10891static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
1da177e4 10892{
1da177e4
LT
10893 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10894 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10895
1da177e4
LT
10896 ESTAT_ADD(rx_octets);
10897 ESTAT_ADD(rx_fragments);
10898 ESTAT_ADD(rx_ucast_packets);
10899 ESTAT_ADD(rx_mcast_packets);
10900 ESTAT_ADD(rx_bcast_packets);
10901 ESTAT_ADD(rx_fcs_errors);
10902 ESTAT_ADD(rx_align_errors);
10903 ESTAT_ADD(rx_xon_pause_rcvd);
10904 ESTAT_ADD(rx_xoff_pause_rcvd);
10905 ESTAT_ADD(rx_mac_ctrl_rcvd);
10906 ESTAT_ADD(rx_xoff_entered);
10907 ESTAT_ADD(rx_frame_too_long_errors);
10908 ESTAT_ADD(rx_jabbers);
10909 ESTAT_ADD(rx_undersize_packets);
10910 ESTAT_ADD(rx_in_length_errors);
10911 ESTAT_ADD(rx_out_length_errors);
10912 ESTAT_ADD(rx_64_or_less_octet_packets);
10913 ESTAT_ADD(rx_65_to_127_octet_packets);
10914 ESTAT_ADD(rx_128_to_255_octet_packets);
10915 ESTAT_ADD(rx_256_to_511_octet_packets);
10916 ESTAT_ADD(rx_512_to_1023_octet_packets);
10917 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10918 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10919 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10920 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10921 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10922
10923 ESTAT_ADD(tx_octets);
10924 ESTAT_ADD(tx_collisions);
10925 ESTAT_ADD(tx_xon_sent);
10926 ESTAT_ADD(tx_xoff_sent);
10927 ESTAT_ADD(tx_flow_control);
10928 ESTAT_ADD(tx_mac_errors);
10929 ESTAT_ADD(tx_single_collisions);
10930 ESTAT_ADD(tx_mult_collisions);
10931 ESTAT_ADD(tx_deferred);
10932 ESTAT_ADD(tx_excessive_collisions);
10933 ESTAT_ADD(tx_late_collisions);
10934 ESTAT_ADD(tx_collide_2times);
10935 ESTAT_ADD(tx_collide_3times);
10936 ESTAT_ADD(tx_collide_4times);
10937 ESTAT_ADD(tx_collide_5times);
10938 ESTAT_ADD(tx_collide_6times);
10939 ESTAT_ADD(tx_collide_7times);
10940 ESTAT_ADD(tx_collide_8times);
10941 ESTAT_ADD(tx_collide_9times);
10942 ESTAT_ADD(tx_collide_10times);
10943 ESTAT_ADD(tx_collide_11times);
10944 ESTAT_ADD(tx_collide_12times);
10945 ESTAT_ADD(tx_collide_13times);
10946 ESTAT_ADD(tx_collide_14times);
10947 ESTAT_ADD(tx_collide_15times);
10948 ESTAT_ADD(tx_ucast_packets);
10949 ESTAT_ADD(tx_mcast_packets);
10950 ESTAT_ADD(tx_bcast_packets);
10951 ESTAT_ADD(tx_carrier_sense_errors);
10952 ESTAT_ADD(tx_discards);
10953 ESTAT_ADD(tx_errors);
10954
10955 ESTAT_ADD(dma_writeq_full);
10956 ESTAT_ADD(dma_write_prioq_full);
10957 ESTAT_ADD(rxbds_empty);
10958 ESTAT_ADD(rx_discards);
10959 ESTAT_ADD(rx_errors);
10960 ESTAT_ADD(rx_threshold_hit);
10961
10962 ESTAT_ADD(dma_readq_full);
10963 ESTAT_ADD(dma_read_prioq_full);
10964 ESTAT_ADD(tx_comp_queue_full);
10965
10966 ESTAT_ADD(ring_set_send_prod_index);
10967 ESTAT_ADD(ring_status_update);
10968 ESTAT_ADD(nic_irqs);
10969 ESTAT_ADD(nic_avoided_irqs);
10970 ESTAT_ADD(nic_tx_threshold_hit);
10971
4452d099 10972 ESTAT_ADD(mbuf_lwm_thresh_hit);
1da177e4
LT
10973}
10974
65ec698d 10975static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
1da177e4 10976{
511d2224 10977 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
1da177e4
LT
10978 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10979
1da177e4
LT
10980 stats->rx_packets = old_stats->rx_packets +
10981 get_stat64(&hw_stats->rx_ucast_packets) +
10982 get_stat64(&hw_stats->rx_mcast_packets) +
10983 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 10984
1da177e4
LT
10985 stats->tx_packets = old_stats->tx_packets +
10986 get_stat64(&hw_stats->tx_ucast_packets) +
10987 get_stat64(&hw_stats->tx_mcast_packets) +
10988 get_stat64(&hw_stats->tx_bcast_packets);
10989
10990 stats->rx_bytes = old_stats->rx_bytes +
10991 get_stat64(&hw_stats->rx_octets);
10992 stats->tx_bytes = old_stats->tx_bytes +
10993 get_stat64(&hw_stats->tx_octets);
10994
10995 stats->rx_errors = old_stats->rx_errors +
4f63b877 10996 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
10997 stats->tx_errors = old_stats->tx_errors +
10998 get_stat64(&hw_stats->tx_errors) +
10999 get_stat64(&hw_stats->tx_mac_errors) +
11000 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11001 get_stat64(&hw_stats->tx_discards);
11002
11003 stats->multicast = old_stats->multicast +
11004 get_stat64(&hw_stats->rx_mcast_packets);
11005 stats->collisions = old_stats->collisions +
11006 get_stat64(&hw_stats->tx_collisions);
11007
11008 stats->rx_length_errors = old_stats->rx_length_errors +
11009 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11010 get_stat64(&hw_stats->rx_undersize_packets);
11011
11012 stats->rx_over_errors = old_stats->rx_over_errors +
11013 get_stat64(&hw_stats->rxbds_empty);
11014 stats->rx_frame_errors = old_stats->rx_frame_errors +
11015 get_stat64(&hw_stats->rx_align_errors);
11016 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11017 get_stat64(&hw_stats->tx_discards);
11018 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11019 get_stat64(&hw_stats->tx_carrier_sense_errors);
11020
11021 stats->rx_crc_errors = old_stats->rx_crc_errors +
65ec698d 11022 tg3_calc_crc_errors(tp);
1da177e4 11023
4f63b877
JL
11024 stats->rx_missed_errors = old_stats->rx_missed_errors +
11025 get_stat64(&hw_stats->rx_discards);
11026
b0057c51 11027 stats->rx_dropped = tp->rx_dropped;
48855432 11028 stats->tx_dropped = tp->tx_dropped;
1da177e4
LT
11029}
11030
1da177e4
LT
11031static int tg3_get_regs_len(struct net_device *dev)
11032{
97bd8e49 11033 return TG3_REG_BLK_SIZE;
1da177e4
LT
11034}
11035
11036static void tg3_get_regs(struct net_device *dev,
11037 struct ethtool_regs *regs, void *_p)
11038{
1da177e4 11039 struct tg3 *tp = netdev_priv(dev);
1da177e4
LT
11040
11041 regs->version = 0;
11042
97bd8e49 11043 memset(_p, 0, TG3_REG_BLK_SIZE);
1da177e4 11044
80096068 11045 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
11046 return;
11047
f47c11ee 11048 tg3_full_lock(tp, 0);
1da177e4 11049
97bd8e49 11050 tg3_dump_legacy_regs(tp, (u32 *)_p);
1da177e4 11051
f47c11ee 11052 tg3_full_unlock(tp);
1da177e4
LT
11053}
11054
11055static int tg3_get_eeprom_len(struct net_device *dev)
11056{
11057 struct tg3 *tp = netdev_priv(dev);
11058
11059 return tp->nvram_size;
11060}
11061
1da177e4
LT
11062static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11063{
11064 struct tg3 *tp = netdev_priv(dev);
11065 int ret;
11066 u8 *pd;
b9fc7dc5 11067 u32 i, offset, len, b_offset, b_count;
a9dc529d 11068 __be32 val;
1da177e4 11069
63c3a66f 11070 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
11071 return -EINVAL;
11072
80096068 11073 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
11074 return -EAGAIN;
11075
1da177e4
LT
11076 offset = eeprom->offset;
11077 len = eeprom->len;
11078 eeprom->len = 0;
11079
11080 eeprom->magic = TG3_EEPROM_MAGIC;
11081
11082 if (offset & 3) {
11083 /* adjustments to start on required 4 byte boundary */
11084 b_offset = offset & 3;
11085 b_count = 4 - b_offset;
11086 if (b_count > len) {
11087 /* i.e. offset=1 len=2 */
11088 b_count = len;
11089 }
a9dc529d 11090 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
1da177e4
LT
11091 if (ret)
11092 return ret;
be98da6a 11093 memcpy(data, ((char *)&val) + b_offset, b_count);
1da177e4
LT
11094 len -= b_count;
11095 offset += b_count;
c6cdf436 11096 eeprom->len += b_count;
1da177e4
LT
11097 }
11098
25985edc 11099 /* read bytes up to the last 4 byte boundary */
1da177e4
LT
11100 pd = &data[eeprom->len];
11101 for (i = 0; i < (len - (len & 3)); i += 4) {
a9dc529d 11102 ret = tg3_nvram_read_be32(tp, offset + i, &val);
1da177e4
LT
11103 if (ret) {
11104 eeprom->len += i;
11105 return ret;
11106 }
1da177e4
LT
11107 memcpy(pd + i, &val, 4);
11108 }
11109 eeprom->len += i;
11110
11111 if (len & 3) {
11112 /* read last bytes not ending on 4 byte boundary */
11113 pd = &data[eeprom->len];
11114 b_count = len & 3;
11115 b_offset = offset + len - b_count;
a9dc529d 11116 ret = tg3_nvram_read_be32(tp, b_offset, &val);
1da177e4
LT
11117 if (ret)
11118 return ret;
b9fc7dc5 11119 memcpy(pd, &val, b_count);
1da177e4
LT
11120 eeprom->len += b_count;
11121 }
11122 return 0;
11123}
11124
1da177e4
LT
11125static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11126{
11127 struct tg3 *tp = netdev_priv(dev);
11128 int ret;
b9fc7dc5 11129 u32 offset, len, b_offset, odd_len;
1da177e4 11130 u8 *buf;
a9dc529d 11131 __be32 start, end;
1da177e4 11132
80096068 11133 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
11134 return -EAGAIN;
11135
63c3a66f 11136 if (tg3_flag(tp, NO_NVRAM) ||
df259d8c 11137 eeprom->magic != TG3_EEPROM_MAGIC)
1da177e4
LT
11138 return -EINVAL;
11139
11140 offset = eeprom->offset;
11141 len = eeprom->len;
11142
11143 if ((b_offset = (offset & 3))) {
11144 /* adjustments to start on required 4 byte boundary */
a9dc529d 11145 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
1da177e4
LT
11146 if (ret)
11147 return ret;
1da177e4
LT
11148 len += b_offset;
11149 offset &= ~3;
1c8594b4
MC
11150 if (len < 4)
11151 len = 4;
1da177e4
LT
11152 }
11153
11154 odd_len = 0;
1c8594b4 11155 if (len & 3) {
1da177e4
LT
11156 /* adjustments to end on required 4 byte boundary */
11157 odd_len = 1;
11158 len = (len + 3) & ~3;
a9dc529d 11159 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
1da177e4
LT
11160 if (ret)
11161 return ret;
1da177e4
LT
11162 }
11163
11164 buf = data;
11165 if (b_offset || odd_len) {
11166 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 11167 if (!buf)
1da177e4
LT
11168 return -ENOMEM;
11169 if (b_offset)
11170 memcpy(buf, &start, 4);
11171 if (odd_len)
11172 memcpy(buf+len-4, &end, 4);
11173 memcpy(buf + b_offset, data, eeprom->len);
11174 }
11175
11176 ret = tg3_nvram_write_block(tp, offset, len, buf);
11177
11178 if (buf != data)
11179 kfree(buf);
11180
11181 return ret;
11182}
11183
11184static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11185{
b02fd9e3
MC
11186 struct tg3 *tp = netdev_priv(dev);
11187
63c3a66f 11188 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 11189 struct phy_device *phydev;
f07e9af3 11190 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 11191 return -EAGAIN;
3f0e3ad7
MC
11192 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11193 return phy_ethtool_gset(phydev, cmd);
b02fd9e3 11194 }
6aa20a22 11195
1da177e4
LT
11196 cmd->supported = (SUPPORTED_Autoneg);
11197
f07e9af3 11198 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
1da177e4
LT
11199 cmd->supported |= (SUPPORTED_1000baseT_Half |
11200 SUPPORTED_1000baseT_Full);
11201
f07e9af3 11202 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
1da177e4
LT
11203 cmd->supported |= (SUPPORTED_100baseT_Half |
11204 SUPPORTED_100baseT_Full |
11205 SUPPORTED_10baseT_Half |
11206 SUPPORTED_10baseT_Full |
3bebab59 11207 SUPPORTED_TP);
ef348144
KK
11208 cmd->port = PORT_TP;
11209 } else {
1da177e4 11210 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
11211 cmd->port = PORT_FIBRE;
11212 }
6aa20a22 11213
1da177e4 11214 cmd->advertising = tp->link_config.advertising;
5bb09778
MC
11215 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11216 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11217 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11218 cmd->advertising |= ADVERTISED_Pause;
11219 } else {
11220 cmd->advertising |= ADVERTISED_Pause |
11221 ADVERTISED_Asym_Pause;
11222 }
11223 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11224 cmd->advertising |= ADVERTISED_Asym_Pause;
11225 }
11226 }
f4a46d1f 11227 if (netif_running(dev) && tp->link_up) {
70739497 11228 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
1da177e4 11229 cmd->duplex = tp->link_config.active_duplex;
859edb26 11230 cmd->lp_advertising = tp->link_config.rmt_adv;
e348c5e7
MC
11231 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11232 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11233 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11234 else
11235 cmd->eth_tp_mdix = ETH_TP_MDI;
11236 }
64c22182 11237 } else {
e740522e
MC
11238 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11239 cmd->duplex = DUPLEX_UNKNOWN;
e348c5e7 11240 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
1da177e4 11241 }
882e9793 11242 cmd->phy_address = tp->phy_addr;
7e5856bd 11243 cmd->transceiver = XCVR_INTERNAL;
1da177e4
LT
11244 cmd->autoneg = tp->link_config.autoneg;
11245 cmd->maxtxpkt = 0;
11246 cmd->maxrxpkt = 0;
11247 return 0;
11248}
6aa20a22 11249
1da177e4
LT
11250static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11251{
11252 struct tg3 *tp = netdev_priv(dev);
25db0338 11253 u32 speed = ethtool_cmd_speed(cmd);
6aa20a22 11254
63c3a66f 11255 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 11256 struct phy_device *phydev;
f07e9af3 11257 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 11258 return -EAGAIN;
3f0e3ad7
MC
11259 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11260 return phy_ethtool_sset(phydev, cmd);
b02fd9e3
MC
11261 }
11262
7e5856bd
MC
11263 if (cmd->autoneg != AUTONEG_ENABLE &&
11264 cmd->autoneg != AUTONEG_DISABLE)
37ff238d 11265 return -EINVAL;
7e5856bd
MC
11266
11267 if (cmd->autoneg == AUTONEG_DISABLE &&
11268 cmd->duplex != DUPLEX_FULL &&
11269 cmd->duplex != DUPLEX_HALF)
37ff238d 11270 return -EINVAL;
1da177e4 11271
7e5856bd
MC
11272 if (cmd->autoneg == AUTONEG_ENABLE) {
11273 u32 mask = ADVERTISED_Autoneg |
11274 ADVERTISED_Pause |
11275 ADVERTISED_Asym_Pause;
11276
f07e9af3 11277 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
7e5856bd
MC
11278 mask |= ADVERTISED_1000baseT_Half |
11279 ADVERTISED_1000baseT_Full;
11280
f07e9af3 11281 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
7e5856bd
MC
11282 mask |= ADVERTISED_100baseT_Half |
11283 ADVERTISED_100baseT_Full |
11284 ADVERTISED_10baseT_Half |
11285 ADVERTISED_10baseT_Full |
11286 ADVERTISED_TP;
11287 else
11288 mask |= ADVERTISED_FIBRE;
11289
11290 if (cmd->advertising & ~mask)
11291 return -EINVAL;
11292
11293 mask &= (ADVERTISED_1000baseT_Half |
11294 ADVERTISED_1000baseT_Full |
11295 ADVERTISED_100baseT_Half |
11296 ADVERTISED_100baseT_Full |
11297 ADVERTISED_10baseT_Half |
11298 ADVERTISED_10baseT_Full);
11299
11300 cmd->advertising &= mask;
11301 } else {
f07e9af3 11302 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
25db0338 11303 if (speed != SPEED_1000)
7e5856bd
MC
11304 return -EINVAL;
11305
11306 if (cmd->duplex != DUPLEX_FULL)
11307 return -EINVAL;
11308 } else {
25db0338
DD
11309 if (speed != SPEED_100 &&
11310 speed != SPEED_10)
7e5856bd
MC
11311 return -EINVAL;
11312 }
11313 }
11314
f47c11ee 11315 tg3_full_lock(tp, 0);
1da177e4
LT
11316
11317 tp->link_config.autoneg = cmd->autoneg;
11318 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
11319 tp->link_config.advertising = (cmd->advertising |
11320 ADVERTISED_Autoneg);
e740522e
MC
11321 tp->link_config.speed = SPEED_UNKNOWN;
11322 tp->link_config.duplex = DUPLEX_UNKNOWN;
1da177e4
LT
11323 } else {
11324 tp->link_config.advertising = 0;
25db0338 11325 tp->link_config.speed = speed;
1da177e4 11326 tp->link_config.duplex = cmd->duplex;
b02fd9e3 11327 }
6aa20a22 11328
1da177e4
LT
11329 if (netif_running(dev))
11330 tg3_setup_phy(tp, 1);
11331
f47c11ee 11332 tg3_full_unlock(tp);
6aa20a22 11333
1da177e4
LT
11334 return 0;
11335}
6aa20a22 11336
1da177e4
LT
11337static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11338{
11339 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11340
68aad78c
RJ
11341 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11342 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11343 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11344 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
1da177e4 11345}
6aa20a22 11346
1da177e4
LT
11347static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11348{
11349 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11350
63c3a66f 11351 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
a85feb8c
GZ
11352 wol->supported = WAKE_MAGIC;
11353 else
11354 wol->supported = 0;
1da177e4 11355 wol->wolopts = 0;
63c3a66f 11356 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
1da177e4
LT
11357 wol->wolopts = WAKE_MAGIC;
11358 memset(&wol->sopass, 0, sizeof(wol->sopass));
11359}
6aa20a22 11360
1da177e4
LT
11361static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11362{
11363 struct tg3 *tp = netdev_priv(dev);
12dac075 11364 struct device *dp = &tp->pdev->dev;
6aa20a22 11365
1da177e4
LT
11366 if (wol->wolopts & ~WAKE_MAGIC)
11367 return -EINVAL;
11368 if ((wol->wolopts & WAKE_MAGIC) &&
63c3a66f 11369 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
1da177e4 11370 return -EINVAL;
6aa20a22 11371
f2dc0d18
RW
11372 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11373
f47c11ee 11374 spin_lock_bh(&tp->lock);
f2dc0d18 11375 if (device_may_wakeup(dp))
63c3a66f 11376 tg3_flag_set(tp, WOL_ENABLE);
f2dc0d18 11377 else
63c3a66f 11378 tg3_flag_clear(tp, WOL_ENABLE);
f47c11ee 11379 spin_unlock_bh(&tp->lock);
6aa20a22 11380
1da177e4
LT
11381 return 0;
11382}
6aa20a22 11383
1da177e4
LT
11384static u32 tg3_get_msglevel(struct net_device *dev)
11385{
11386 struct tg3 *tp = netdev_priv(dev);
11387 return tp->msg_enable;
11388}
6aa20a22 11389
1da177e4
LT
11390static void tg3_set_msglevel(struct net_device *dev, u32 value)
11391{
11392 struct tg3 *tp = netdev_priv(dev);
11393 tp->msg_enable = value;
11394}
6aa20a22 11395
1da177e4
LT
11396static int tg3_nway_reset(struct net_device *dev)
11397{
11398 struct tg3 *tp = netdev_priv(dev);
1da177e4 11399 int r;
6aa20a22 11400
1da177e4
LT
11401 if (!netif_running(dev))
11402 return -EAGAIN;
11403
f07e9af3 11404 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
c94e3941
MC
11405 return -EINVAL;
11406
63c3a66f 11407 if (tg3_flag(tp, USE_PHYLIB)) {
f07e9af3 11408 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 11409 return -EAGAIN;
3f0e3ad7 11410 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
11411 } else {
11412 u32 bmcr;
11413
11414 spin_lock_bh(&tp->lock);
11415 r = -EINVAL;
11416 tg3_readphy(tp, MII_BMCR, &bmcr);
11417 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11418 ((bmcr & BMCR_ANENABLE) ||
f07e9af3 11419 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
b02fd9e3
MC
11420 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11421 BMCR_ANENABLE);
11422 r = 0;
11423 }
11424 spin_unlock_bh(&tp->lock);
1da177e4 11425 }
6aa20a22 11426
1da177e4
LT
11427 return r;
11428}
6aa20a22 11429
1da177e4
LT
11430static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11431{
11432 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11433
2c49a44d 11434 ering->rx_max_pending = tp->rx_std_ring_mask;
63c3a66f 11435 if (tg3_flag(tp, JUMBO_RING_ENABLE))
2c49a44d 11436 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
4f81c32b
MC
11437 else
11438 ering->rx_jumbo_max_pending = 0;
11439
11440 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
11441
11442 ering->rx_pending = tp->rx_pending;
63c3a66f 11443 if (tg3_flag(tp, JUMBO_RING_ENABLE))
4f81c32b
MC
11444 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11445 else
11446 ering->rx_jumbo_pending = 0;
11447
f3f3f27e 11448 ering->tx_pending = tp->napi[0].tx_pending;
1da177e4 11449}
6aa20a22 11450
1da177e4
LT
11451static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11452{
11453 struct tg3 *tp = netdev_priv(dev);
646c9edd 11454 int i, irq_sync = 0, err = 0;
6aa20a22 11455
2c49a44d
MC
11456 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11457 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
bc3a9254
MC
11458 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11459 (ering->tx_pending <= MAX_SKB_FRAGS) ||
63c3a66f 11460 (tg3_flag(tp, TSO_BUG) &&
bc3a9254 11461 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 11462 return -EINVAL;
6aa20a22 11463
bbe832c0 11464 if (netif_running(dev)) {
b02fd9e3 11465 tg3_phy_stop(tp);
1da177e4 11466 tg3_netif_stop(tp);
bbe832c0
MC
11467 irq_sync = 1;
11468 }
1da177e4 11469
bbe832c0 11470 tg3_full_lock(tp, irq_sync);
6aa20a22 11471
1da177e4
LT
11472 tp->rx_pending = ering->rx_pending;
11473
63c3a66f 11474 if (tg3_flag(tp, MAX_RXPEND_64) &&
1da177e4
LT
11475 tp->rx_pending > 63)
11476 tp->rx_pending = 63;
11477 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
646c9edd 11478
6fd45cb8 11479 for (i = 0; i < tp->irq_max; i++)
646c9edd 11480 tp->napi[i].tx_pending = ering->tx_pending;
1da177e4
LT
11481
11482 if (netif_running(dev)) {
944d980e 11483 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
11484 err = tg3_restart_hw(tp, 1);
11485 if (!err)
11486 tg3_netif_start(tp);
1da177e4
LT
11487 }
11488
f47c11ee 11489 tg3_full_unlock(tp);
6aa20a22 11490
b02fd9e3
MC
11491 if (irq_sync && !err)
11492 tg3_phy_start(tp);
11493
b9ec6c1b 11494 return err;
1da177e4 11495}
6aa20a22 11496
1da177e4
LT
11497static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11498{
11499 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11500
63c3a66f 11501 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
8d018621 11502
4a2db503 11503 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
8d018621
MC
11504 epause->rx_pause = 1;
11505 else
11506 epause->rx_pause = 0;
11507
4a2db503 11508 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
8d018621
MC
11509 epause->tx_pause = 1;
11510 else
11511 epause->tx_pause = 0;
1da177e4 11512}
6aa20a22 11513
1da177e4
LT
11514static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11515{
11516 struct tg3 *tp = netdev_priv(dev);
b02fd9e3 11517 int err = 0;
6aa20a22 11518
63c3a66f 11519 if (tg3_flag(tp, USE_PHYLIB)) {
2712168f
MC
11520 u32 newadv;
11521 struct phy_device *phydev;
1da177e4 11522
2712168f 11523 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
f47c11ee 11524
2712168f
MC
11525 if (!(phydev->supported & SUPPORTED_Pause) ||
11526 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
2259dca3 11527 (epause->rx_pause != epause->tx_pause)))
2712168f 11528 return -EINVAL;
1da177e4 11529
2712168f
MC
11530 tp->link_config.flowctrl = 0;
11531 if (epause->rx_pause) {
11532 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11533
11534 if (epause->tx_pause) {
11535 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11536 newadv = ADVERTISED_Pause;
b02fd9e3 11537 } else
2712168f
MC
11538 newadv = ADVERTISED_Pause |
11539 ADVERTISED_Asym_Pause;
11540 } else if (epause->tx_pause) {
11541 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11542 newadv = ADVERTISED_Asym_Pause;
11543 } else
11544 newadv = 0;
11545
11546 if (epause->autoneg)
63c3a66f 11547 tg3_flag_set(tp, PAUSE_AUTONEG);
2712168f 11548 else
63c3a66f 11549 tg3_flag_clear(tp, PAUSE_AUTONEG);
2712168f 11550
f07e9af3 11551 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2712168f
MC
11552 u32 oldadv = phydev->advertising &
11553 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11554 if (oldadv != newadv) {
11555 phydev->advertising &=
11556 ~(ADVERTISED_Pause |
11557 ADVERTISED_Asym_Pause);
11558 phydev->advertising |= newadv;
11559 if (phydev->autoneg) {
11560 /*
11561 * Always renegotiate the link to
11562 * inform our link partner of our
11563 * flow control settings, even if the
11564 * flow control is forced. Let
11565 * tg3_adjust_link() do the final
11566 * flow control setup.
11567 */
11568 return phy_start_aneg(phydev);
b02fd9e3 11569 }
b02fd9e3 11570 }
b02fd9e3 11571
2712168f 11572 if (!epause->autoneg)
b02fd9e3 11573 tg3_setup_flow_control(tp, 0, 0);
2712168f 11574 } else {
c6700ce2 11575 tp->link_config.advertising &=
2712168f
MC
11576 ~(ADVERTISED_Pause |
11577 ADVERTISED_Asym_Pause);
c6700ce2 11578 tp->link_config.advertising |= newadv;
b02fd9e3
MC
11579 }
11580 } else {
11581 int irq_sync = 0;
11582
11583 if (netif_running(dev)) {
11584 tg3_netif_stop(tp);
11585 irq_sync = 1;
11586 }
11587
11588 tg3_full_lock(tp, irq_sync);
11589
11590 if (epause->autoneg)
63c3a66f 11591 tg3_flag_set(tp, PAUSE_AUTONEG);
b02fd9e3 11592 else
63c3a66f 11593 tg3_flag_clear(tp, PAUSE_AUTONEG);
b02fd9e3 11594 if (epause->rx_pause)
e18ce346 11595 tp->link_config.flowctrl |= FLOW_CTRL_RX;
b02fd9e3 11596 else
e18ce346 11597 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
b02fd9e3 11598 if (epause->tx_pause)
e18ce346 11599 tp->link_config.flowctrl |= FLOW_CTRL_TX;
b02fd9e3 11600 else
e18ce346 11601 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
b02fd9e3
MC
11602
11603 if (netif_running(dev)) {
11604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605 err = tg3_restart_hw(tp, 1);
11606 if (!err)
11607 tg3_netif_start(tp);
11608 }
11609
11610 tg3_full_unlock(tp);
11611 }
6aa20a22 11612
b9ec6c1b 11613 return err;
1da177e4 11614}
6aa20a22 11615
de6f31eb 11616static int tg3_get_sset_count(struct net_device *dev, int sset)
1da177e4 11617{
b9f2c044
JG
11618 switch (sset) {
11619 case ETH_SS_TEST:
11620 return TG3_NUM_TEST;
11621 case ETH_SS_STATS:
11622 return TG3_NUM_STATS;
11623 default:
11624 return -EOPNOTSUPP;
11625 }
4cafd3f5
MC
11626}
11627
90415477
MC
11628static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11629 u32 *rules __always_unused)
11630{
11631 struct tg3 *tp = netdev_priv(dev);
11632
11633 if (!tg3_flag(tp, SUPPORT_MSIX))
11634 return -EOPNOTSUPP;
11635
11636 switch (info->cmd) {
11637 case ETHTOOL_GRXRINGS:
11638 if (netif_running(tp->dev))
9102426a 11639 info->data = tp->rxq_cnt;
90415477
MC
11640 else {
11641 info->data = num_online_cpus();
9102426a
MC
11642 if (info->data > TG3_RSS_MAX_NUM_QS)
11643 info->data = TG3_RSS_MAX_NUM_QS;
90415477
MC
11644 }
11645
11646 /* The first interrupt vector only
11647 * handles link interrupts.
11648 */
11649 info->data -= 1;
11650 return 0;
11651
11652 default:
11653 return -EOPNOTSUPP;
11654 }
11655}
11656
11657static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11658{
11659 u32 size = 0;
11660 struct tg3 *tp = netdev_priv(dev);
11661
11662 if (tg3_flag(tp, SUPPORT_MSIX))
11663 size = TG3_RSS_INDIR_TBL_SIZE;
11664
11665 return size;
11666}
11667
11668static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11669{
11670 struct tg3 *tp = netdev_priv(dev);
11671 int i;
11672
11673 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11674 indir[i] = tp->rss_ind_tbl[i];
11675
11676 return 0;
11677}
11678
11679static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11680{
11681 struct tg3 *tp = netdev_priv(dev);
11682 size_t i;
11683
11684 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11685 tp->rss_ind_tbl[i] = indir[i];
11686
11687 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11688 return 0;
11689
11690 /* It is legal to write the indirection
11691 * table while the device is running.
11692 */
11693 tg3_full_lock(tp, 0);
11694 tg3_rss_write_indir_tbl(tp);
11695 tg3_full_unlock(tp);
11696
11697 return 0;
11698}
11699
0968169c
MC
11700static void tg3_get_channels(struct net_device *dev,
11701 struct ethtool_channels *channel)
11702{
11703 struct tg3 *tp = netdev_priv(dev);
11704 u32 deflt_qs = netif_get_num_default_rss_queues();
11705
11706 channel->max_rx = tp->rxq_max;
11707 channel->max_tx = tp->txq_max;
11708
11709 if (netif_running(dev)) {
11710 channel->rx_count = tp->rxq_cnt;
11711 channel->tx_count = tp->txq_cnt;
11712 } else {
11713 if (tp->rxq_req)
11714 channel->rx_count = tp->rxq_req;
11715 else
11716 channel->rx_count = min(deflt_qs, tp->rxq_max);
11717
11718 if (tp->txq_req)
11719 channel->tx_count = tp->txq_req;
11720 else
11721 channel->tx_count = min(deflt_qs, tp->txq_max);
11722 }
11723}
11724
11725static int tg3_set_channels(struct net_device *dev,
11726 struct ethtool_channels *channel)
11727{
11728 struct tg3 *tp = netdev_priv(dev);
11729
11730 if (!tg3_flag(tp, SUPPORT_MSIX))
11731 return -EOPNOTSUPP;
11732
11733 if (channel->rx_count > tp->rxq_max ||
11734 channel->tx_count > tp->txq_max)
11735 return -EINVAL;
11736
11737 tp->rxq_req = channel->rx_count;
11738 tp->txq_req = channel->tx_count;
11739
11740 if (!netif_running(dev))
11741 return 0;
11742
11743 tg3_stop(tp);
11744
f4a46d1f 11745 tg3_carrier_off(tp);
0968169c 11746
be947307 11747 tg3_start(tp, true, false, false);
0968169c
MC
11748
11749 return 0;
11750}
11751
de6f31eb 11752static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1da177e4
LT
11753{
11754 switch (stringset) {
11755 case ETH_SS_STATS:
11756 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11757 break;
4cafd3f5
MC
11758 case ETH_SS_TEST:
11759 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11760 break;
1da177e4
LT
11761 default:
11762 WARN_ON(1); /* we need a WARN() */
11763 break;
11764 }
11765}
11766
81b8709c 11767static int tg3_set_phys_id(struct net_device *dev,
11768 enum ethtool_phys_id_state state)
4009a93d
MC
11769{
11770 struct tg3 *tp = netdev_priv(dev);
4009a93d
MC
11771
11772 if (!netif_running(tp->dev))
11773 return -EAGAIN;
11774
81b8709c 11775 switch (state) {
11776 case ETHTOOL_ID_ACTIVE:
fce55922 11777 return 1; /* cycle on/off once per second */
4009a93d 11778
81b8709c 11779 case ETHTOOL_ID_ON:
11780 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11781 LED_CTRL_1000MBPS_ON |
11782 LED_CTRL_100MBPS_ON |
11783 LED_CTRL_10MBPS_ON |
11784 LED_CTRL_TRAFFIC_OVERRIDE |
11785 LED_CTRL_TRAFFIC_BLINK |
11786 LED_CTRL_TRAFFIC_LED);
11787 break;
6aa20a22 11788
81b8709c 11789 case ETHTOOL_ID_OFF:
11790 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11791 LED_CTRL_TRAFFIC_OVERRIDE);
11792 break;
4009a93d 11793
81b8709c 11794 case ETHTOOL_ID_INACTIVE:
11795 tw32(MAC_LED_CTRL, tp->led_ctrl);
11796 break;
4009a93d 11797 }
81b8709c 11798
4009a93d
MC
11799 return 0;
11800}
11801
de6f31eb 11802static void tg3_get_ethtool_stats(struct net_device *dev,
1da177e4
LT
11803 struct ethtool_stats *estats, u64 *tmp_stats)
11804{
11805 struct tg3 *tp = netdev_priv(dev);
0e6c9da3 11806
b546e46f
MC
11807 if (tp->hw_stats)
11808 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11809 else
11810 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
1da177e4
LT
11811}
11812
535a490e 11813static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
c3e94500
MC
11814{
11815 int i;
11816 __be32 *buf;
11817 u32 offset = 0, len = 0;
11818 u32 magic, val;
11819
63c3a66f 11820 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
c3e94500
MC
11821 return NULL;
11822
11823 if (magic == TG3_EEPROM_MAGIC) {
11824 for (offset = TG3_NVM_DIR_START;
11825 offset < TG3_NVM_DIR_END;
11826 offset += TG3_NVM_DIRENT_SIZE) {
11827 if (tg3_nvram_read(tp, offset, &val))
11828 return NULL;
11829
11830 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11831 TG3_NVM_DIRTYPE_EXTVPD)
11832 break;
11833 }
11834
11835 if (offset != TG3_NVM_DIR_END) {
11836 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11837 if (tg3_nvram_read(tp, offset + 4, &offset))
11838 return NULL;
11839
11840 offset = tg3_nvram_logical_addr(tp, offset);
11841 }
11842 }
11843
11844 if (!offset || !len) {
11845 offset = TG3_NVM_VPD_OFF;
11846 len = TG3_NVM_VPD_LEN;
11847 }
11848
11849 buf = kmalloc(len, GFP_KERNEL);
11850 if (buf == NULL)
11851 return NULL;
11852
11853 if (magic == TG3_EEPROM_MAGIC) {
11854 for (i = 0; i < len; i += 4) {
11855 /* The data is in little-endian format in NVRAM.
11856 * Use the big-endian read routines to preserve
11857 * the byte order as it exists in NVRAM.
11858 */
11859 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11860 goto error;
11861 }
11862 } else {
11863 u8 *ptr;
11864 ssize_t cnt;
11865 unsigned int pos = 0;
11866
11867 ptr = (u8 *)&buf[0];
11868 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11869 cnt = pci_read_vpd(tp->pdev, pos,
11870 len - pos, ptr);
11871 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11872 cnt = 0;
11873 else if (cnt < 0)
11874 goto error;
11875 }
11876 if (pos != len)
11877 goto error;
11878 }
11879
535a490e
MC
11880 *vpdlen = len;
11881
c3e94500
MC
11882 return buf;
11883
11884error:
11885 kfree(buf);
11886 return NULL;
11887}
11888
566f86ad 11889#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
11890#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11891#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11892#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
727a6d9f
MC
11893#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11894#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
bda18faf 11895#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
b16250e3
MC
11896#define NVRAM_SELFBOOT_HW_SIZE 0x20
11897#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
11898
11899static int tg3_test_nvram(struct tg3 *tp)
11900{
535a490e 11901 u32 csum, magic, len;
a9dc529d 11902 __be32 *buf;
ab0049b4 11903 int i, j, k, err = 0, size;
566f86ad 11904
63c3a66f 11905 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
11906 return 0;
11907
e4f34110 11908 if (tg3_nvram_read(tp, 0, &magic) != 0)
1b27777a
MC
11909 return -EIO;
11910
1b27777a
MC
11911 if (magic == TG3_EEPROM_MAGIC)
11912 size = NVRAM_TEST_SIZE;
b16250e3 11913 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
11914 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11915 TG3_EEPROM_SB_FORMAT_1) {
11916 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11917 case TG3_EEPROM_SB_REVISION_0:
11918 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11919 break;
11920 case TG3_EEPROM_SB_REVISION_2:
11921 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11922 break;
11923 case TG3_EEPROM_SB_REVISION_3:
11924 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11925 break;
727a6d9f
MC
11926 case TG3_EEPROM_SB_REVISION_4:
11927 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11928 break;
11929 case TG3_EEPROM_SB_REVISION_5:
11930 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11931 break;
11932 case TG3_EEPROM_SB_REVISION_6:
11933 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11934 break;
a5767dec 11935 default:
727a6d9f 11936 return -EIO;
a5767dec
MC
11937 }
11938 } else
1b27777a 11939 return 0;
b16250e3
MC
11940 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11941 size = NVRAM_SELFBOOT_HW_SIZE;
11942 else
1b27777a
MC
11943 return -EIO;
11944
11945 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
11946 if (buf == NULL)
11947 return -ENOMEM;
11948
1b27777a
MC
11949 err = -EIO;
11950 for (i = 0, j = 0; i < size; i += 4, j++) {
a9dc529d
MC
11951 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11952 if (err)
566f86ad 11953 break;
566f86ad 11954 }
1b27777a 11955 if (i < size)
566f86ad
MC
11956 goto out;
11957
1b27777a 11958 /* Selfboot format */
a9dc529d 11959 magic = be32_to_cpu(buf[0]);
b9fc7dc5 11960 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
b16250e3 11961 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
11962 u8 *buf8 = (u8 *) buf, csum8 = 0;
11963
b9fc7dc5 11964 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
a5767dec
MC
11965 TG3_EEPROM_SB_REVISION_2) {
11966 /* For rev 2, the csum doesn't include the MBA. */
11967 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11968 csum8 += buf8[i];
11969 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11970 csum8 += buf8[i];
11971 } else {
11972 for (i = 0; i < size; i++)
11973 csum8 += buf8[i];
11974 }
1b27777a 11975
ad96b485
AB
11976 if (csum8 == 0) {
11977 err = 0;
11978 goto out;
11979 }
11980
11981 err = -EIO;
11982 goto out;
1b27777a 11983 }
566f86ad 11984
b9fc7dc5 11985 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
b16250e3
MC
11986 TG3_EEPROM_MAGIC_HW) {
11987 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
a9dc529d 11988 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
b16250e3 11989 u8 *buf8 = (u8 *) buf;
b16250e3
MC
11990
11991 /* Separate the parity bits and the data bytes. */
11992 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11993 if ((i == 0) || (i == 8)) {
11994 int l;
11995 u8 msk;
11996
11997 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11998 parity[k++] = buf8[i] & msk;
11999 i++;
859a5887 12000 } else if (i == 16) {
b16250e3
MC
12001 int l;
12002 u8 msk;
12003
12004 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12005 parity[k++] = buf8[i] & msk;
12006 i++;
12007
12008 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12009 parity[k++] = buf8[i] & msk;
12010 i++;
12011 }
12012 data[j++] = buf8[i];
12013 }
12014
12015 err = -EIO;
12016 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12017 u8 hw8 = hweight8(data[i]);
12018
12019 if ((hw8 & 0x1) && parity[i])
12020 goto out;
12021 else if (!(hw8 & 0x1) && !parity[i])
12022 goto out;
12023 }
12024 err = 0;
12025 goto out;
12026 }
12027
01c3a392
MC
12028 err = -EIO;
12029
566f86ad
MC
12030 /* Bootstrap checksum at offset 0x10 */
12031 csum = calc_crc((unsigned char *) buf, 0x10);
01c3a392 12032 if (csum != le32_to_cpu(buf[0x10/4]))
566f86ad
MC
12033 goto out;
12034
12035 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12036 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
01c3a392 12037 if (csum != le32_to_cpu(buf[0xfc/4]))
a9dc529d 12038 goto out;
566f86ad 12039
c3e94500
MC
12040 kfree(buf);
12041
535a490e 12042 buf = tg3_vpd_readblock(tp, &len);
c3e94500
MC
12043 if (!buf)
12044 return -ENOMEM;
d4894f3e 12045
535a490e 12046 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
d4894f3e
MC
12047 if (i > 0) {
12048 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12049 if (j < 0)
12050 goto out;
12051
535a490e 12052 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
d4894f3e
MC
12053 goto out;
12054
12055 i += PCI_VPD_LRDT_TAG_SIZE;
12056 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12057 PCI_VPD_RO_KEYWORD_CHKSUM);
12058 if (j > 0) {
12059 u8 csum8 = 0;
12060
12061 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12062
12063 for (i = 0; i <= j; i++)
12064 csum8 += ((u8 *)buf)[i];
12065
12066 if (csum8)
12067 goto out;
12068 }
12069 }
12070
566f86ad
MC
12071 err = 0;
12072
12073out:
12074 kfree(buf);
12075 return err;
12076}
12077
ca43007a
MC
12078#define TG3_SERDES_TIMEOUT_SEC 2
12079#define TG3_COPPER_TIMEOUT_SEC 6
12080
12081static int tg3_test_link(struct tg3 *tp)
12082{
12083 int i, max;
12084
12085 if (!netif_running(tp->dev))
12086 return -ENODEV;
12087
f07e9af3 12088 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
ca43007a
MC
12089 max = TG3_SERDES_TIMEOUT_SEC;
12090 else
12091 max = TG3_COPPER_TIMEOUT_SEC;
12092
12093 for (i = 0; i < max; i++) {
f4a46d1f 12094 if (tp->link_up)
ca43007a
MC
12095 return 0;
12096
12097 if (msleep_interruptible(1000))
12098 break;
12099 }
12100
12101 return -EIO;
12102}
12103
a71116d1 12104/* Only test the commonly used registers */
30ca3e37 12105static int tg3_test_registers(struct tg3 *tp)
a71116d1 12106{
b16250e3 12107 int i, is_5705, is_5750;
a71116d1
MC
12108 u32 offset, read_mask, write_mask, val, save_val, read_val;
12109 static struct {
12110 u16 offset;
12111 u16 flags;
12112#define TG3_FL_5705 0x1
12113#define TG3_FL_NOT_5705 0x2
12114#define TG3_FL_NOT_5788 0x4
b16250e3 12115#define TG3_FL_NOT_5750 0x8
a71116d1
MC
12116 u32 read_mask;
12117 u32 write_mask;
12118 } reg_tbl[] = {
12119 /* MAC Control Registers */
12120 { MAC_MODE, TG3_FL_NOT_5705,
12121 0x00000000, 0x00ef6f8c },
12122 { MAC_MODE, TG3_FL_5705,
12123 0x00000000, 0x01ef6b8c },
12124 { MAC_STATUS, TG3_FL_NOT_5705,
12125 0x03800107, 0x00000000 },
12126 { MAC_STATUS, TG3_FL_5705,
12127 0x03800100, 0x00000000 },
12128 { MAC_ADDR_0_HIGH, 0x0000,
12129 0x00000000, 0x0000ffff },
12130 { MAC_ADDR_0_LOW, 0x0000,
c6cdf436 12131 0x00000000, 0xffffffff },
a71116d1
MC
12132 { MAC_RX_MTU_SIZE, 0x0000,
12133 0x00000000, 0x0000ffff },
12134 { MAC_TX_MODE, 0x0000,
12135 0x00000000, 0x00000070 },
12136 { MAC_TX_LENGTHS, 0x0000,
12137 0x00000000, 0x00003fff },
12138 { MAC_RX_MODE, TG3_FL_NOT_5705,
12139 0x00000000, 0x000007fc },
12140 { MAC_RX_MODE, TG3_FL_5705,
12141 0x00000000, 0x000007dc },
12142 { MAC_HASH_REG_0, 0x0000,
12143 0x00000000, 0xffffffff },
12144 { MAC_HASH_REG_1, 0x0000,
12145 0x00000000, 0xffffffff },
12146 { MAC_HASH_REG_2, 0x0000,
12147 0x00000000, 0xffffffff },
12148 { MAC_HASH_REG_3, 0x0000,
12149 0x00000000, 0xffffffff },
12150
12151 /* Receive Data and Receive BD Initiator Control Registers. */
12152 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12153 0x00000000, 0xffffffff },
12154 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12155 0x00000000, 0xffffffff },
12156 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12157 0x00000000, 0x00000003 },
12158 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12159 0x00000000, 0xffffffff },
12160 { RCVDBDI_STD_BD+0, 0x0000,
12161 0x00000000, 0xffffffff },
12162 { RCVDBDI_STD_BD+4, 0x0000,
12163 0x00000000, 0xffffffff },
12164 { RCVDBDI_STD_BD+8, 0x0000,
12165 0x00000000, 0xffff0002 },
12166 { RCVDBDI_STD_BD+0xc, 0x0000,
12167 0x00000000, 0xffffffff },
6aa20a22 12168
a71116d1
MC
12169 /* Receive BD Initiator Control Registers. */
12170 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12171 0x00000000, 0xffffffff },
12172 { RCVBDI_STD_THRESH, TG3_FL_5705,
12173 0x00000000, 0x000003ff },
12174 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12175 0x00000000, 0xffffffff },
6aa20a22 12176
a71116d1
MC
12177 /* Host Coalescing Control Registers. */
12178 { HOSTCC_MODE, TG3_FL_NOT_5705,
12179 0x00000000, 0x00000004 },
12180 { HOSTCC_MODE, TG3_FL_5705,
12181 0x00000000, 0x000000f6 },
12182 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12183 0x00000000, 0xffffffff },
12184 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12185 0x00000000, 0x000003ff },
12186 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12187 0x00000000, 0xffffffff },
12188 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12189 0x00000000, 0x000003ff },
12190 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12191 0x00000000, 0xffffffff },
12192 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12193 0x00000000, 0x000000ff },
12194 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12195 0x00000000, 0xffffffff },
12196 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12197 0x00000000, 0x000000ff },
12198 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12199 0x00000000, 0xffffffff },
12200 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12201 0x00000000, 0xffffffff },
12202 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12203 0x00000000, 0xffffffff },
12204 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12205 0x00000000, 0x000000ff },
12206 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12207 0x00000000, 0xffffffff },
12208 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12209 0x00000000, 0x000000ff },
12210 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12211 0x00000000, 0xffffffff },
12212 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12213 0x00000000, 0xffffffff },
12214 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12215 0x00000000, 0xffffffff },
12216 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12217 0x00000000, 0xffffffff },
12218 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12219 0x00000000, 0xffffffff },
12220 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12221 0xffffffff, 0x00000000 },
12222 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12223 0xffffffff, 0x00000000 },
12224
12225 /* Buffer Manager Control Registers. */
b16250e3 12226 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 12227 0x00000000, 0x007fff80 },
b16250e3 12228 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
12229 0x00000000, 0x007fffff },
12230 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12231 0x00000000, 0x0000003f },
12232 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12233 0x00000000, 0x000001ff },
12234 { BUFMGR_MB_HIGH_WATER, 0x0000,
12235 0x00000000, 0x000001ff },
12236 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12237 0xffffffff, 0x00000000 },
12238 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12239 0xffffffff, 0x00000000 },
6aa20a22 12240
a71116d1
MC
12241 /* Mailbox Registers */
12242 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12243 0x00000000, 0x000001ff },
12244 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12245 0x00000000, 0x000001ff },
12246 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12247 0x00000000, 0x000007ff },
12248 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12249 0x00000000, 0x000001ff },
12250
12251 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12252 };
12253
b16250e3 12254 is_5705 = is_5750 = 0;
63c3a66f 12255 if (tg3_flag(tp, 5705_PLUS)) {
a71116d1 12256 is_5705 = 1;
63c3a66f 12257 if (tg3_flag(tp, 5750_PLUS))
b16250e3
MC
12258 is_5750 = 1;
12259 }
a71116d1
MC
12260
12261 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12262 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12263 continue;
12264
12265 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12266 continue;
12267
63c3a66f 12268 if (tg3_flag(tp, IS_5788) &&
a71116d1
MC
12269 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12270 continue;
12271
b16250e3
MC
12272 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12273 continue;
12274
a71116d1
MC
12275 offset = (u32) reg_tbl[i].offset;
12276 read_mask = reg_tbl[i].read_mask;
12277 write_mask = reg_tbl[i].write_mask;
12278
12279 /* Save the original register content */
12280 save_val = tr32(offset);
12281
12282 /* Determine the read-only value. */
12283 read_val = save_val & read_mask;
12284
12285 /* Write zero to the register, then make sure the read-only bits
12286 * are not changed and the read/write bits are all zeros.
12287 */
12288 tw32(offset, 0);
12289
12290 val = tr32(offset);
12291
12292 /* Test the read-only and read/write bits. */
12293 if (((val & read_mask) != read_val) || (val & write_mask))
12294 goto out;
12295
12296 /* Write ones to all the bits defined by RdMask and WrMask, then
12297 * make sure the read-only bits are not changed and the
12298 * read/write bits are all ones.
12299 */
12300 tw32(offset, read_mask | write_mask);
12301
12302 val = tr32(offset);
12303
12304 /* Test the read-only bits. */
12305 if ((val & read_mask) != read_val)
12306 goto out;
12307
12308 /* Test the read/write bits. */
12309 if ((val & write_mask) != write_mask)
12310 goto out;
12311
12312 tw32(offset, save_val);
12313 }
12314
12315 return 0;
12316
12317out:
9f88f29f 12318 if (netif_msg_hw(tp))
2445e461
MC
12319 netdev_err(tp->dev,
12320 "Register test failed at offset %x\n", offset);
a71116d1
MC
12321 tw32(offset, save_val);
12322 return -EIO;
12323}
12324
7942e1db
MC
12325static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12326{
f71e1309 12327 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
12328 int i;
12329 u32 j;
12330
e9edda69 12331 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
12332 for (j = 0; j < len; j += 4) {
12333 u32 val;
12334
12335 tg3_write_mem(tp, offset + j, test_pattern[i]);
12336 tg3_read_mem(tp, offset + j, &val);
12337 if (val != test_pattern[i])
12338 return -EIO;
12339 }
12340 }
12341 return 0;
12342}
12343
12344static int tg3_test_memory(struct tg3 *tp)
12345{
12346 static struct mem_entry {
12347 u32 offset;
12348 u32 len;
12349 } mem_tbl_570x[] = {
38690194 12350 { 0x00000000, 0x00b50},
7942e1db
MC
12351 { 0x00002000, 0x1c000},
12352 { 0xffffffff, 0x00000}
12353 }, mem_tbl_5705[] = {
12354 { 0x00000100, 0x0000c},
12355 { 0x00000200, 0x00008},
7942e1db
MC
12356 { 0x00004000, 0x00800},
12357 { 0x00006000, 0x01000},
12358 { 0x00008000, 0x02000},
12359 { 0x00010000, 0x0e000},
12360 { 0xffffffff, 0x00000}
79f4d13a
MC
12361 }, mem_tbl_5755[] = {
12362 { 0x00000200, 0x00008},
12363 { 0x00004000, 0x00800},
12364 { 0x00006000, 0x00800},
12365 { 0x00008000, 0x02000},
12366 { 0x00010000, 0x0c000},
12367 { 0xffffffff, 0x00000}
b16250e3
MC
12368 }, mem_tbl_5906[] = {
12369 { 0x00000200, 0x00008},
12370 { 0x00004000, 0x00400},
12371 { 0x00006000, 0x00400},
12372 { 0x00008000, 0x01000},
12373 { 0x00010000, 0x01000},
12374 { 0xffffffff, 0x00000}
8b5a6c42
MC
12375 }, mem_tbl_5717[] = {
12376 { 0x00000200, 0x00008},
12377 { 0x00010000, 0x0a000},
12378 { 0x00020000, 0x13c00},
12379 { 0xffffffff, 0x00000}
12380 }, mem_tbl_57765[] = {
12381 { 0x00000200, 0x00008},
12382 { 0x00004000, 0x00800},
12383 { 0x00006000, 0x09800},
12384 { 0x00010000, 0x0a000},
12385 { 0xffffffff, 0x00000}
7942e1db
MC
12386 };
12387 struct mem_entry *mem_tbl;
12388 int err = 0;
12389 int i;
12390
63c3a66f 12391 if (tg3_flag(tp, 5717_PLUS))
8b5a6c42 12392 mem_tbl = mem_tbl_5717;
c65a17f4
MC
12393 else if (tg3_flag(tp, 57765_CLASS) ||
12394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8b5a6c42 12395 mem_tbl = mem_tbl_57765;
63c3a66f 12396 else if (tg3_flag(tp, 5755_PLUS))
321d32a0
MC
12397 mem_tbl = mem_tbl_5755;
12398 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12399 mem_tbl = mem_tbl_5906;
63c3a66f 12400 else if (tg3_flag(tp, 5705_PLUS))
321d32a0
MC
12401 mem_tbl = mem_tbl_5705;
12402 else
7942e1db
MC
12403 mem_tbl = mem_tbl_570x;
12404
12405 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
be98da6a
MC
12406 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12407 if (err)
7942e1db
MC
12408 break;
12409 }
6aa20a22 12410
7942e1db
MC
12411 return err;
12412}
12413
bb158d69
MC
12414#define TG3_TSO_MSS 500
12415
12416#define TG3_TSO_IP_HDR_LEN 20
12417#define TG3_TSO_TCP_HDR_LEN 20
12418#define TG3_TSO_TCP_OPT_LEN 12
12419
12420static const u8 tg3_tso_header[] = {
124210x08, 0x00,
124220x45, 0x00, 0x00, 0x00,
124230x00, 0x00, 0x40, 0x00,
124240x40, 0x06, 0x00, 0x00,
124250x0a, 0x00, 0x00, 0x01,
124260x0a, 0x00, 0x00, 0x02,
124270x0d, 0x00, 0xe0, 0x00,
124280x00, 0x00, 0x01, 0x00,
124290x00, 0x00, 0x02, 0x00,
124300x80, 0x10, 0x10, 0x00,
124310x14, 0x09, 0x00, 0x00,
124320x01, 0x01, 0x08, 0x0a,
124330x11, 0x11, 0x11, 0x11,
124340x11, 0x11, 0x11, 0x11,
12435};
9f40dead 12436
28a45957 12437static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
c76949a6 12438{
5e5a7f37 12439 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
bb158d69 12440 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
84b67b27 12441 u32 budget;
9205fd9c
ED
12442 struct sk_buff *skb;
12443 u8 *tx_data, *rx_data;
c76949a6
MC
12444 dma_addr_t map;
12445 int num_pkts, tx_len, rx_len, i, err;
12446 struct tg3_rx_buffer_desc *desc;
898a56f8 12447 struct tg3_napi *tnapi, *rnapi;
8fea32b9 12448 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
c76949a6 12449
c8873405
MC
12450 tnapi = &tp->napi[0];
12451 rnapi = &tp->napi[0];
0c1d0e2b 12452 if (tp->irq_cnt > 1) {
63c3a66f 12453 if (tg3_flag(tp, ENABLE_RSS))
1da85aa3 12454 rnapi = &tp->napi[1];
63c3a66f 12455 if (tg3_flag(tp, ENABLE_TSS))
c8873405 12456 tnapi = &tp->napi[1];
0c1d0e2b 12457 }
fd2ce37f 12458 coal_now = tnapi->coal_now | rnapi->coal_now;
898a56f8 12459
c76949a6
MC
12460 err = -EIO;
12461
4852a861 12462 tx_len = pktsz;
a20e9c62 12463 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
12464 if (!skb)
12465 return -ENOMEM;
12466
c76949a6
MC
12467 tx_data = skb_put(skb, tx_len);
12468 memcpy(tx_data, tp->dev->dev_addr, 6);
12469 memset(tx_data + 6, 0x0, 8);
12470
4852a861 12471 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
c76949a6 12472
28a45957 12473 if (tso_loopback) {
bb158d69
MC
12474 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12475
12476 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12477 TG3_TSO_TCP_OPT_LEN;
12478
12479 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12480 sizeof(tg3_tso_header));
12481 mss = TG3_TSO_MSS;
12482
12483 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12484 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12485
12486 /* Set the total length field in the IP header */
12487 iph->tot_len = htons((u16)(mss + hdr_len));
12488
12489 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12490 TXD_FLAG_CPU_POST_DMA);
12491
63c3a66f
JP
12492 if (tg3_flag(tp, HW_TSO_1) ||
12493 tg3_flag(tp, HW_TSO_2) ||
12494 tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
12495 struct tcphdr *th;
12496 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12497 th = (struct tcphdr *)&tx_data[val];
12498 th->check = 0;
12499 } else
12500 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12501
63c3a66f 12502 if (tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
12503 mss |= (hdr_len & 0xc) << 12;
12504 if (hdr_len & 0x10)
12505 base_flags |= 0x00000010;
12506 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 12507 } else if (tg3_flag(tp, HW_TSO_2))
bb158d69 12508 mss |= hdr_len << 9;
63c3a66f 12509 else if (tg3_flag(tp, HW_TSO_1) ||
bb158d69
MC
12510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12511 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12512 } else {
12513 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12514 }
12515
12516 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12517 } else {
12518 num_pkts = 1;
12519 data_off = ETH_HLEN;
c441b456
MC
12520
12521 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12522 tx_len > VLAN_ETH_FRAME_LEN)
12523 base_flags |= TXD_FLAG_JMB_PKT;
bb158d69
MC
12524 }
12525
12526 for (i = data_off; i < tx_len; i++)
c76949a6
MC
12527 tx_data[i] = (u8) (i & 0xff);
12528
f4188d8a
AD
12529 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12530 if (pci_dma_mapping_error(tp->pdev, map)) {
a21771dd
MC
12531 dev_kfree_skb(skb);
12532 return -EIO;
12533 }
c76949a6 12534
0d681b27
MC
12535 val = tnapi->tx_prod;
12536 tnapi->tx_buffers[val].skb = skb;
12537 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12538
c76949a6 12539 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 12540 rnapi->coal_now);
c76949a6
MC
12541
12542 udelay(10);
12543
898a56f8 12544 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
c76949a6 12545
84b67b27
MC
12546 budget = tg3_tx_avail(tnapi);
12547 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
d1a3b737
MC
12548 base_flags | TXD_FLAG_END, mss, 0)) {
12549 tnapi->tx_buffers[val].skb = NULL;
12550 dev_kfree_skb(skb);
12551 return -EIO;
12552 }
c76949a6 12553
f3f3f27e 12554 tnapi->tx_prod++;
c76949a6 12555
6541b806
MC
12556 /* Sync BD data before updating mailbox */
12557 wmb();
12558
f3f3f27e
MC
12559 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12560 tr32_mailbox(tnapi->prodmbox);
c76949a6
MC
12561
12562 udelay(10);
12563
303fc921
MC
12564 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12565 for (i = 0; i < 35; i++) {
c76949a6 12566 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 12567 coal_now);
c76949a6
MC
12568
12569 udelay(10);
12570
898a56f8
MC
12571 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12572 rx_idx = rnapi->hw_status->idx[0].rx_producer;
f3f3f27e 12573 if ((tx_idx == tnapi->tx_prod) &&
c76949a6
MC
12574 (rx_idx == (rx_start_idx + num_pkts)))
12575 break;
12576 }
12577
ba1142e4 12578 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
c76949a6
MC
12579 dev_kfree_skb(skb);
12580
f3f3f27e 12581 if (tx_idx != tnapi->tx_prod)
c76949a6
MC
12582 goto out;
12583
12584 if (rx_idx != rx_start_idx + num_pkts)
12585 goto out;
12586
bb158d69
MC
12587 val = data_off;
12588 while (rx_idx != rx_start_idx) {
12589 desc = &rnapi->rx_rcb[rx_start_idx++];
12590 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12591 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
c76949a6 12592
bb158d69
MC
12593 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12594 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12595 goto out;
c76949a6 12596
bb158d69
MC
12597 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12598 - ETH_FCS_LEN;
c76949a6 12599
28a45957 12600 if (!tso_loopback) {
bb158d69
MC
12601 if (rx_len != tx_len)
12602 goto out;
4852a861 12603
bb158d69
MC
12604 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12605 if (opaque_key != RXD_OPAQUE_RING_STD)
12606 goto out;
12607 } else {
12608 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12609 goto out;
12610 }
12611 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12612 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
54e0a67f 12613 >> RXD_TCPCSUM_SHIFT != 0xffff) {
4852a861 12614 goto out;
bb158d69 12615 }
4852a861 12616
bb158d69 12617 if (opaque_key == RXD_OPAQUE_RING_STD) {
9205fd9c 12618 rx_data = tpr->rx_std_buffers[desc_idx].data;
bb158d69
MC
12619 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12620 mapping);
12621 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
9205fd9c 12622 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
bb158d69
MC
12623 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12624 mapping);
12625 } else
12626 goto out;
c76949a6 12627
bb158d69
MC
12628 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12629 PCI_DMA_FROMDEVICE);
c76949a6 12630
9205fd9c 12631 rx_data += TG3_RX_OFFSET(tp);
bb158d69 12632 for (i = data_off; i < rx_len; i++, val++) {
9205fd9c 12633 if (*(rx_data + i) != (u8) (val & 0xff))
bb158d69
MC
12634 goto out;
12635 }
c76949a6 12636 }
bb158d69 12637
c76949a6 12638 err = 0;
6aa20a22 12639
9205fd9c 12640 /* tg3_free_rings will unmap and free the rx_data */
c76949a6
MC
12641out:
12642 return err;
12643}
12644
00c266b7
MC
12645#define TG3_STD_LOOPBACK_FAILED 1
12646#define TG3_JMB_LOOPBACK_FAILED 2
bb158d69 12647#define TG3_TSO_LOOPBACK_FAILED 4
28a45957
MC
12648#define TG3_LOOPBACK_FAILED \
12649 (TG3_STD_LOOPBACK_FAILED | \
12650 TG3_JMB_LOOPBACK_FAILED | \
12651 TG3_TSO_LOOPBACK_FAILED)
00c266b7 12652
941ec90f 12653static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
9f40dead 12654{
28a45957 12655 int err = -EIO;
2215e24c 12656 u32 eee_cap;
c441b456
MC
12657 u32 jmb_pkt_sz = 9000;
12658
12659 if (tp->dma_limit)
12660 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
9f40dead 12661
ab789046
MC
12662 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12663 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12664
28a45957 12665 if (!netif_running(tp->dev)) {
93df8b8f
NNS
12666 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12667 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
941ec90f 12668 if (do_extlpbk)
93df8b8f 12669 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
28a45957
MC
12670 goto done;
12671 }
12672
b9ec6c1b 12673 err = tg3_reset_hw(tp, 1);
ab789046 12674 if (err) {
93df8b8f
NNS
12675 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12676 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
941ec90f 12677 if (do_extlpbk)
93df8b8f 12678 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
ab789046
MC
12679 goto done;
12680 }
9f40dead 12681
63c3a66f 12682 if (tg3_flag(tp, ENABLE_RSS)) {
4a85f098
MC
12683 int i;
12684
12685 /* Reroute all rx packets to the 1st queue */
12686 for (i = MAC_RSS_INDIR_TBL_0;
12687 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12688 tw32(i, 0x0);
12689 }
12690
6e01b20b
MC
12691 /* HW errata - mac loopback fails in some cases on 5780.
12692 * Normal traffic and PHY loopback are not affected by
12693 * errata. Also, the MAC loopback test is deprecated for
12694 * all newer ASIC revisions.
12695 */
12696 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12697 !tg3_flag(tp, CPMU_PRESENT)) {
12698 tg3_mac_loopback(tp, true);
9936bcf6 12699
28a45957 12700 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
93df8b8f 12701 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
6e01b20b
MC
12702
12703 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12704 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
93df8b8f 12705 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
6e01b20b
MC
12706
12707 tg3_mac_loopback(tp, false);
12708 }
4852a861 12709
f07e9af3 12710 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
63c3a66f 12711 !tg3_flag(tp, USE_PHYLIB)) {
5e5a7f37
MC
12712 int i;
12713
941ec90f 12714 tg3_phy_lpbk_set(tp, 0, false);
5e5a7f37
MC
12715
12716 /* Wait for link */
12717 for (i = 0; i < 100; i++) {
12718 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12719 break;
12720 mdelay(1);
12721 }
12722
28a45957 12723 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
93df8b8f 12724 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
63c3a66f 12725 if (tg3_flag(tp, TSO_CAPABLE) &&
28a45957 12726 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
93df8b8f 12727 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
63c3a66f 12728 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12729 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
93df8b8f 12730 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
9f40dead 12731
941ec90f
MC
12732 if (do_extlpbk) {
12733 tg3_phy_lpbk_set(tp, 0, true);
12734
12735 /* All link indications report up, but the hardware
12736 * isn't really ready for about 20 msec. Double it
12737 * to be sure.
12738 */
12739 mdelay(40);
12740
12741 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
93df8b8f
NNS
12742 data[TG3_EXT_LOOPB_TEST] |=
12743 TG3_STD_LOOPBACK_FAILED;
941ec90f
MC
12744 if (tg3_flag(tp, TSO_CAPABLE) &&
12745 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
93df8b8f
NNS
12746 data[TG3_EXT_LOOPB_TEST] |=
12747 TG3_TSO_LOOPBACK_FAILED;
941ec90f 12748 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12749 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
93df8b8f
NNS
12750 data[TG3_EXT_LOOPB_TEST] |=
12751 TG3_JMB_LOOPBACK_FAILED;
941ec90f
MC
12752 }
12753
5e5a7f37
MC
12754 /* Re-enable gphy autopowerdown. */
12755 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12756 tg3_phy_toggle_apd(tp, true);
12757 }
6833c043 12758
93df8b8f
NNS
12759 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12760 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
28a45957 12761
ab789046
MC
12762done:
12763 tp->phy_flags |= eee_cap;
12764
9f40dead
MC
12765 return err;
12766}
12767
4cafd3f5
MC
12768static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12769 u64 *data)
12770{
566f86ad 12771 struct tg3 *tp = netdev_priv(dev);
941ec90f 12772 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
566f86ad 12773
bed9829f
MC
12774 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12775 tg3_power_up(tp)) {
12776 etest->flags |= ETH_TEST_FL_FAILED;
12777 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12778 return;
12779 }
bc1c7567 12780
566f86ad
MC
12781 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12782
12783 if (tg3_test_nvram(tp) != 0) {
12784 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12785 data[TG3_NVRAM_TEST] = 1;
566f86ad 12786 }
941ec90f 12787 if (!doextlpbk && tg3_test_link(tp)) {
ca43007a 12788 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12789 data[TG3_LINK_TEST] = 1;
ca43007a 12790 }
a71116d1 12791 if (etest->flags & ETH_TEST_FL_OFFLINE) {
b02fd9e3 12792 int err, err2 = 0, irq_sync = 0;
bbe832c0
MC
12793
12794 if (netif_running(dev)) {
b02fd9e3 12795 tg3_phy_stop(tp);
a71116d1 12796 tg3_netif_stop(tp);
bbe832c0
MC
12797 irq_sync = 1;
12798 }
a71116d1 12799
bbe832c0 12800 tg3_full_lock(tp, irq_sync);
a71116d1 12801 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 12802 err = tg3_nvram_lock(tp);
a71116d1 12803 tg3_halt_cpu(tp, RX_CPU_BASE);
63c3a66f 12804 if (!tg3_flag(tp, 5705_PLUS))
a71116d1 12805 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
12806 if (!err)
12807 tg3_nvram_unlock(tp);
a71116d1 12808
f07e9af3 12809 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
d9ab5ad1
MC
12810 tg3_phy_reset(tp);
12811
a71116d1
MC
12812 if (tg3_test_registers(tp) != 0) {
12813 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12814 data[TG3_REGISTER_TEST] = 1;
a71116d1 12815 }
28a45957 12816
7942e1db
MC
12817 if (tg3_test_memory(tp) != 0) {
12818 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12819 data[TG3_MEMORY_TEST] = 1;
7942e1db 12820 }
28a45957 12821
941ec90f
MC
12822 if (doextlpbk)
12823 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12824
93df8b8f 12825 if (tg3_test_loopback(tp, data, doextlpbk))
c76949a6 12826 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 12827
f47c11ee
DM
12828 tg3_full_unlock(tp);
12829
d4bc3927
MC
12830 if (tg3_test_interrupt(tp) != 0) {
12831 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12832 data[TG3_INTERRUPT_TEST] = 1;
d4bc3927 12833 }
f47c11ee
DM
12834
12835 tg3_full_lock(tp, 0);
d4bc3927 12836
a71116d1
MC
12837 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12838 if (netif_running(dev)) {
63c3a66f 12839 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
12840 err2 = tg3_restart_hw(tp, 1);
12841 if (!err2)
b9ec6c1b 12842 tg3_netif_start(tp);
a71116d1 12843 }
f47c11ee
DM
12844
12845 tg3_full_unlock(tp);
b02fd9e3
MC
12846
12847 if (irq_sync && !err2)
12848 tg3_phy_start(tp);
a71116d1 12849 }
80096068 12850 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
c866b7ea 12851 tg3_power_down(tp);
bc1c7567 12852
4cafd3f5
MC
12853}
12854
0a633ac2
MC
12855static int tg3_hwtstamp_ioctl(struct net_device *dev,
12856 struct ifreq *ifr, int cmd)
12857{
12858 struct tg3 *tp = netdev_priv(dev);
12859 struct hwtstamp_config stmpconf;
12860
12861 if (!tg3_flag(tp, PTP_CAPABLE))
12862 return -EINVAL;
12863
12864 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12865 return -EFAULT;
12866
12867 if (stmpconf.flags)
12868 return -EINVAL;
12869
12870 switch (stmpconf.tx_type) {
12871 case HWTSTAMP_TX_ON:
12872 tg3_flag_set(tp, TX_TSTAMP_EN);
12873 break;
12874 case HWTSTAMP_TX_OFF:
12875 tg3_flag_clear(tp, TX_TSTAMP_EN);
12876 break;
12877 default:
12878 return -ERANGE;
12879 }
12880
12881 switch (stmpconf.rx_filter) {
12882 case HWTSTAMP_FILTER_NONE:
12883 tp->rxptpctl = 0;
12884 break;
12885 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12886 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12887 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12888 break;
12889 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12890 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12891 TG3_RX_PTP_CTL_SYNC_EVNT;
12892 break;
12893 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12894 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12895 TG3_RX_PTP_CTL_DELAY_REQ;
12896 break;
12897 case HWTSTAMP_FILTER_PTP_V2_EVENT:
12898 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12899 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12900 break;
12901 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12902 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12903 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12904 break;
12905 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12906 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12907 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12908 break;
12909 case HWTSTAMP_FILTER_PTP_V2_SYNC:
12910 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12911 TG3_RX_PTP_CTL_SYNC_EVNT;
12912 break;
12913 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12914 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12915 TG3_RX_PTP_CTL_SYNC_EVNT;
12916 break;
12917 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12918 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12919 TG3_RX_PTP_CTL_SYNC_EVNT;
12920 break;
12921 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12922 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12923 TG3_RX_PTP_CTL_DELAY_REQ;
12924 break;
12925 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12926 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12927 TG3_RX_PTP_CTL_DELAY_REQ;
12928 break;
12929 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12930 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12931 TG3_RX_PTP_CTL_DELAY_REQ;
12932 break;
12933 default:
12934 return -ERANGE;
12935 }
12936
12937 if (netif_running(dev) && tp->rxptpctl)
12938 tw32(TG3_RX_PTP_CTL,
12939 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12940
12941 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12942 -EFAULT : 0;
12943}
12944
1da177e4
LT
12945static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12946{
12947 struct mii_ioctl_data *data = if_mii(ifr);
12948 struct tg3 *tp = netdev_priv(dev);
12949 int err;
12950
63c3a66f 12951 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 12952 struct phy_device *phydev;
f07e9af3 12953 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 12954 return -EAGAIN;
3f0e3ad7 12955 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
28b04113 12956 return phy_mii_ioctl(phydev, ifr, cmd);
b02fd9e3
MC
12957 }
12958
33f401ae 12959 switch (cmd) {
1da177e4 12960 case SIOCGMIIPHY:
882e9793 12961 data->phy_id = tp->phy_addr;
1da177e4
LT
12962
12963 /* fallthru */
12964 case SIOCGMIIREG: {
12965 u32 mii_regval;
12966
f07e9af3 12967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12968 break; /* We have no PHY */
12969
34eea5ac 12970 if (!netif_running(dev))
bc1c7567
MC
12971 return -EAGAIN;
12972
f47c11ee 12973 spin_lock_bh(&tp->lock);
1da177e4 12974 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 12975 spin_unlock_bh(&tp->lock);
1da177e4
LT
12976
12977 data->val_out = mii_regval;
12978
12979 return err;
12980 }
12981
12982 case SIOCSMIIREG:
f07e9af3 12983 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12984 break; /* We have no PHY */
12985
34eea5ac 12986 if (!netif_running(dev))
bc1c7567
MC
12987 return -EAGAIN;
12988
f47c11ee 12989 spin_lock_bh(&tp->lock);
1da177e4 12990 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 12991 spin_unlock_bh(&tp->lock);
1da177e4
LT
12992
12993 return err;
12994
0a633ac2
MC
12995 case SIOCSHWTSTAMP:
12996 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12997
1da177e4
LT
12998 default:
12999 /* do nothing */
13000 break;
13001 }
13002 return -EOPNOTSUPP;
13003}
13004
15f9850d
DM
13005static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13006{
13007 struct tg3 *tp = netdev_priv(dev);
13008
13009 memcpy(ec, &tp->coal, sizeof(*ec));
13010 return 0;
13011}
13012
d244c892
MC
13013static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13014{
13015 struct tg3 *tp = netdev_priv(dev);
13016 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13017 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13018
63c3a66f 13019 if (!tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
13020 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13021 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13022 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13023 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13024 }
13025
13026 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13027 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13028 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13029 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13030 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13031 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13032 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13033 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13034 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13035 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13036 return -EINVAL;
13037
13038 /* No rx interrupts will be generated if both are zero */
13039 if ((ec->rx_coalesce_usecs == 0) &&
13040 (ec->rx_max_coalesced_frames == 0))
13041 return -EINVAL;
13042
13043 /* No tx interrupts will be generated if both are zero */
13044 if ((ec->tx_coalesce_usecs == 0) &&
13045 (ec->tx_max_coalesced_frames == 0))
13046 return -EINVAL;
13047
13048 /* Only copy relevant parameters, ignore all others. */
13049 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13050 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13051 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13052 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13053 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13054 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13055 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13056 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13057 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13058
13059 if (netif_running(dev)) {
13060 tg3_full_lock(tp, 0);
13061 __tg3_set_coalesce(tp, &tp->coal);
13062 tg3_full_unlock(tp);
13063 }
13064 return 0;
13065}
13066
7282d491 13067static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
13068 .get_settings = tg3_get_settings,
13069 .set_settings = tg3_set_settings,
13070 .get_drvinfo = tg3_get_drvinfo,
13071 .get_regs_len = tg3_get_regs_len,
13072 .get_regs = tg3_get_regs,
13073 .get_wol = tg3_get_wol,
13074 .set_wol = tg3_set_wol,
13075 .get_msglevel = tg3_get_msglevel,
13076 .set_msglevel = tg3_set_msglevel,
13077 .nway_reset = tg3_nway_reset,
13078 .get_link = ethtool_op_get_link,
13079 .get_eeprom_len = tg3_get_eeprom_len,
13080 .get_eeprom = tg3_get_eeprom,
13081 .set_eeprom = tg3_set_eeprom,
13082 .get_ringparam = tg3_get_ringparam,
13083 .set_ringparam = tg3_set_ringparam,
13084 .get_pauseparam = tg3_get_pauseparam,
13085 .set_pauseparam = tg3_set_pauseparam,
4cafd3f5 13086 .self_test = tg3_self_test,
1da177e4 13087 .get_strings = tg3_get_strings,
81b8709c 13088 .set_phys_id = tg3_set_phys_id,
1da177e4 13089 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 13090 .get_coalesce = tg3_get_coalesce,
d244c892 13091 .set_coalesce = tg3_set_coalesce,
b9f2c044 13092 .get_sset_count = tg3_get_sset_count,
90415477
MC
13093 .get_rxnfc = tg3_get_rxnfc,
13094 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13095 .get_rxfh_indir = tg3_get_rxfh_indir,
13096 .set_rxfh_indir = tg3_set_rxfh_indir,
0968169c
MC
13097 .get_channels = tg3_get_channels,
13098 .set_channels = tg3_set_channels,
7d41e49a 13099 .get_ts_info = tg3_get_ts_info,
1da177e4
LT
13100};
13101
b4017c53
DM
13102static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13103 struct rtnl_link_stats64 *stats)
13104{
13105 struct tg3 *tp = netdev_priv(dev);
13106
0f566b20
MC
13107 spin_lock_bh(&tp->lock);
13108 if (!tp->hw_stats) {
13109 spin_unlock_bh(&tp->lock);
b4017c53 13110 return &tp->net_stats_prev;
0f566b20 13111 }
b4017c53 13112
b4017c53
DM
13113 tg3_get_nstats(tp, stats);
13114 spin_unlock_bh(&tp->lock);
13115
13116 return stats;
13117}
13118
ccd5ba9d
MC
13119static void tg3_set_rx_mode(struct net_device *dev)
13120{
13121 struct tg3 *tp = netdev_priv(dev);
13122
13123 if (!netif_running(dev))
13124 return;
13125
13126 tg3_full_lock(tp, 0);
13127 __tg3_set_rx_mode(dev);
13128 tg3_full_unlock(tp);
13129}
13130
faf1627a
MC
13131static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13132 int new_mtu)
13133{
13134 dev->mtu = new_mtu;
13135
13136 if (new_mtu > ETH_DATA_LEN) {
13137 if (tg3_flag(tp, 5780_CLASS)) {
13138 netdev_update_features(dev);
13139 tg3_flag_clear(tp, TSO_CAPABLE);
13140 } else {
13141 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13142 }
13143 } else {
13144 if (tg3_flag(tp, 5780_CLASS)) {
13145 tg3_flag_set(tp, TSO_CAPABLE);
13146 netdev_update_features(dev);
13147 }
13148 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13149 }
13150}
13151
13152static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13153{
13154 struct tg3 *tp = netdev_priv(dev);
2fae5e36 13155 int err, reset_phy = 0;
faf1627a
MC
13156
13157 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13158 return -EINVAL;
13159
13160 if (!netif_running(dev)) {
13161 /* We'll just catch it later when the
13162 * device is up'd.
13163 */
13164 tg3_set_mtu(dev, tp, new_mtu);
13165 return 0;
13166 }
13167
13168 tg3_phy_stop(tp);
13169
13170 tg3_netif_stop(tp);
13171
13172 tg3_full_lock(tp, 1);
13173
13174 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13175
13176 tg3_set_mtu(dev, tp, new_mtu);
13177
2fae5e36
MC
13178 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13179 * breaks all requests to 256 bytes.
13180 */
13181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13182 reset_phy = 1;
13183
13184 err = tg3_restart_hw(tp, reset_phy);
faf1627a
MC
13185
13186 if (!err)
13187 tg3_netif_start(tp);
13188
13189 tg3_full_unlock(tp);
13190
13191 if (!err)
13192 tg3_phy_start(tp);
13193
13194 return err;
13195}
13196
13197static const struct net_device_ops tg3_netdev_ops = {
13198 .ndo_open = tg3_open,
13199 .ndo_stop = tg3_close,
13200 .ndo_start_xmit = tg3_start_xmit,
13201 .ndo_get_stats64 = tg3_get_stats64,
13202 .ndo_validate_addr = eth_validate_addr,
13203 .ndo_set_rx_mode = tg3_set_rx_mode,
13204 .ndo_set_mac_address = tg3_set_mac_addr,
13205 .ndo_do_ioctl = tg3_ioctl,
13206 .ndo_tx_timeout = tg3_tx_timeout,
13207 .ndo_change_mtu = tg3_change_mtu,
13208 .ndo_fix_features = tg3_fix_features,
13209 .ndo_set_features = tg3_set_features,
13210#ifdef CONFIG_NET_POLL_CONTROLLER
13211 .ndo_poll_controller = tg3_poll_controller,
13212#endif
13213};
13214
229b1ad1 13215static void tg3_get_eeprom_size(struct tg3 *tp)
1da177e4 13216{
1b27777a 13217 u32 cursize, val, magic;
1da177e4
LT
13218
13219 tp->nvram_size = EEPROM_CHIP_SIZE;
13220
e4f34110 13221 if (tg3_nvram_read(tp, 0, &magic) != 0)
1da177e4
LT
13222 return;
13223
b16250e3
MC
13224 if ((magic != TG3_EEPROM_MAGIC) &&
13225 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13226 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
13227 return;
13228
13229 /*
13230 * Size the chip by reading offsets at increasing powers of two.
13231 * When we encounter our validation signature, we know the addressing
13232 * has wrapped around, and thus have our chip size.
13233 */
1b27777a 13234 cursize = 0x10;
1da177e4
LT
13235
13236 while (cursize < tp->nvram_size) {
e4f34110 13237 if (tg3_nvram_read(tp, cursize, &val) != 0)
1da177e4
LT
13238 return;
13239
1820180b 13240 if (val == magic)
1da177e4
LT
13241 break;
13242
13243 cursize <<= 1;
13244 }
13245
13246 tp->nvram_size = cursize;
13247}
6aa20a22 13248
229b1ad1 13249static void tg3_get_nvram_size(struct tg3 *tp)
1da177e4
LT
13250{
13251 u32 val;
13252
63c3a66f 13253 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
1b27777a
MC
13254 return;
13255
13256 /* Selfboot format */
1820180b 13257 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
13258 tg3_get_eeprom_size(tp);
13259 return;
13260 }
13261
6d348f2c 13262 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
1da177e4 13263 if (val != 0) {
6d348f2c
MC
13264 /* This is confusing. We want to operate on the
13265 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13266 * call will read from NVRAM and byteswap the data
13267 * according to the byteswapping settings for all
13268 * other register accesses. This ensures the data we
13269 * want will always reside in the lower 16-bits.
13270 * However, the data in NVRAM is in LE format, which
13271 * means the data from the NVRAM read will always be
13272 * opposite the endianness of the CPU. The 16-bit
13273 * byteswap then brings the data to CPU endianness.
13274 */
13275 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
1da177e4
LT
13276 return;
13277 }
13278 }
fd1122a2 13279 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
1da177e4
LT
13280}
13281
229b1ad1 13282static void tg3_get_nvram_info(struct tg3 *tp)
1da177e4
LT
13283{
13284 u32 nvcfg1;
13285
13286 nvcfg1 = tr32(NVRAM_CFG1);
13287 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
63c3a66f 13288 tg3_flag_set(tp, FLASH);
8590a603 13289 } else {
1da177e4
LT
13290 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13291 tw32(NVRAM_CFG1, nvcfg1);
13292 }
13293
6ff6f81d 13294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
63c3a66f 13295 tg3_flag(tp, 5780_CLASS)) {
1da177e4 13296 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8590a603
MC
13297 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13298 tp->nvram_jedecnum = JEDEC_ATMEL;
13299 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 13300 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
13301 break;
13302 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13303 tp->nvram_jedecnum = JEDEC_ATMEL;
13304 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13305 break;
13306 case FLASH_VENDOR_ATMEL_EEPROM:
13307 tp->nvram_jedecnum = JEDEC_ATMEL;
13308 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
63c3a66f 13309 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
13310 break;
13311 case FLASH_VENDOR_ST:
13312 tp->nvram_jedecnum = JEDEC_ST;
13313 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
63c3a66f 13314 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
13315 break;
13316 case FLASH_VENDOR_SAIFUN:
13317 tp->nvram_jedecnum = JEDEC_SAIFUN;
13318 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13319 break;
13320 case FLASH_VENDOR_SST_SMALL:
13321 case FLASH_VENDOR_SST_LARGE:
13322 tp->nvram_jedecnum = JEDEC_SST;
13323 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13324 break;
1da177e4 13325 }
8590a603 13326 } else {
1da177e4
LT
13327 tp->nvram_jedecnum = JEDEC_ATMEL;
13328 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 13329 tg3_flag_set(tp, NVRAM_BUFFERED);
1da177e4
LT
13330 }
13331}
13332
229b1ad1 13333static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
a1b950d5
MC
13334{
13335 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13336 case FLASH_5752PAGE_SIZE_256:
13337 tp->nvram_pagesize = 256;
13338 break;
13339 case FLASH_5752PAGE_SIZE_512:
13340 tp->nvram_pagesize = 512;
13341 break;
13342 case FLASH_5752PAGE_SIZE_1K:
13343 tp->nvram_pagesize = 1024;
13344 break;
13345 case FLASH_5752PAGE_SIZE_2K:
13346 tp->nvram_pagesize = 2048;
13347 break;
13348 case FLASH_5752PAGE_SIZE_4K:
13349 tp->nvram_pagesize = 4096;
13350 break;
13351 case FLASH_5752PAGE_SIZE_264:
13352 tp->nvram_pagesize = 264;
13353 break;
13354 case FLASH_5752PAGE_SIZE_528:
13355 tp->nvram_pagesize = 528;
13356 break;
13357 }
13358}
13359
229b1ad1 13360static void tg3_get_5752_nvram_info(struct tg3 *tp)
361b4ac2
MC
13361{
13362 u32 nvcfg1;
13363
13364 nvcfg1 = tr32(NVRAM_CFG1);
13365
e6af301b
MC
13366 /* NVRAM protection for TPM */
13367 if (nvcfg1 & (1 << 27))
63c3a66f 13368 tg3_flag_set(tp, PROTECTED_NVRAM);
e6af301b 13369
361b4ac2 13370 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
13371 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13372 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13373 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13374 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
13375 break;
13376 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13377 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13378 tg3_flag_set(tp, NVRAM_BUFFERED);
13379 tg3_flag_set(tp, FLASH);
8590a603
MC
13380 break;
13381 case FLASH_5752VENDOR_ST_M45PE10:
13382 case FLASH_5752VENDOR_ST_M45PE20:
13383 case FLASH_5752VENDOR_ST_M45PE40:
13384 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13385 tg3_flag_set(tp, NVRAM_BUFFERED);
13386 tg3_flag_set(tp, FLASH);
8590a603 13387 break;
361b4ac2
MC
13388 }
13389
63c3a66f 13390 if (tg3_flag(tp, FLASH)) {
a1b950d5 13391 tg3_nvram_get_pagesize(tp, nvcfg1);
8590a603 13392 } else {
361b4ac2
MC
13393 /* For eeprom, set pagesize to maximum eeprom size */
13394 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13395
13396 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13397 tw32(NVRAM_CFG1, nvcfg1);
13398 }
13399}
13400
229b1ad1 13401static void tg3_get_5755_nvram_info(struct tg3 *tp)
d3c7b886 13402{
989a9d23 13403 u32 nvcfg1, protect = 0;
d3c7b886
MC
13404
13405 nvcfg1 = tr32(NVRAM_CFG1);
13406
13407 /* NVRAM protection for TPM */
989a9d23 13408 if (nvcfg1 & (1 << 27)) {
63c3a66f 13409 tg3_flag_set(tp, PROTECTED_NVRAM);
989a9d23
MC
13410 protect = 1;
13411 }
d3c7b886 13412
989a9d23
MC
13413 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13414 switch (nvcfg1) {
8590a603
MC
13415 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13416 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13417 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13418 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13419 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13420 tg3_flag_set(tp, NVRAM_BUFFERED);
13421 tg3_flag_set(tp, FLASH);
8590a603
MC
13422 tp->nvram_pagesize = 264;
13423 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13424 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13425 tp->nvram_size = (protect ? 0x3e200 :
13426 TG3_NVRAM_SIZE_512KB);
13427 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13428 tp->nvram_size = (protect ? 0x1f200 :
13429 TG3_NVRAM_SIZE_256KB);
13430 else
13431 tp->nvram_size = (protect ? 0x1f200 :
13432 TG3_NVRAM_SIZE_128KB);
13433 break;
13434 case FLASH_5752VENDOR_ST_M45PE10:
13435 case FLASH_5752VENDOR_ST_M45PE20:
13436 case FLASH_5752VENDOR_ST_M45PE40:
13437 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13438 tg3_flag_set(tp, NVRAM_BUFFERED);
13439 tg3_flag_set(tp, FLASH);
8590a603
MC
13440 tp->nvram_pagesize = 256;
13441 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13442 tp->nvram_size = (protect ?
13443 TG3_NVRAM_SIZE_64KB :
13444 TG3_NVRAM_SIZE_128KB);
13445 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13446 tp->nvram_size = (protect ?
13447 TG3_NVRAM_SIZE_64KB :
13448 TG3_NVRAM_SIZE_256KB);
13449 else
13450 tp->nvram_size = (protect ?
13451 TG3_NVRAM_SIZE_128KB :
13452 TG3_NVRAM_SIZE_512KB);
13453 break;
d3c7b886
MC
13454 }
13455}
13456
229b1ad1 13457static void tg3_get_5787_nvram_info(struct tg3 *tp)
1b27777a
MC
13458{
13459 u32 nvcfg1;
13460
13461 nvcfg1 = tr32(NVRAM_CFG1);
13462
13463 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
13464 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13465 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13466 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13467 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13468 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13469 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603 13470 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
1b27777a 13471
8590a603
MC
13472 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13473 tw32(NVRAM_CFG1, nvcfg1);
13474 break;
13475 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13476 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13477 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13478 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13479 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13480 tg3_flag_set(tp, NVRAM_BUFFERED);
13481 tg3_flag_set(tp, FLASH);
8590a603
MC
13482 tp->nvram_pagesize = 264;
13483 break;
13484 case FLASH_5752VENDOR_ST_M45PE10:
13485 case FLASH_5752VENDOR_ST_M45PE20:
13486 case FLASH_5752VENDOR_ST_M45PE40:
13487 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13488 tg3_flag_set(tp, NVRAM_BUFFERED);
13489 tg3_flag_set(tp, FLASH);
8590a603
MC
13490 tp->nvram_pagesize = 256;
13491 break;
1b27777a
MC
13492 }
13493}
13494
229b1ad1 13495static void tg3_get_5761_nvram_info(struct tg3 *tp)
6b91fa02
MC
13496{
13497 u32 nvcfg1, protect = 0;
13498
13499 nvcfg1 = tr32(NVRAM_CFG1);
13500
13501 /* NVRAM protection for TPM */
13502 if (nvcfg1 & (1 << 27)) {
63c3a66f 13503 tg3_flag_set(tp, PROTECTED_NVRAM);
6b91fa02
MC
13504 protect = 1;
13505 }
13506
13507 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13508 switch (nvcfg1) {
8590a603
MC
13509 case FLASH_5761VENDOR_ATMEL_ADB021D:
13510 case FLASH_5761VENDOR_ATMEL_ADB041D:
13511 case FLASH_5761VENDOR_ATMEL_ADB081D:
13512 case FLASH_5761VENDOR_ATMEL_ADB161D:
13513 case FLASH_5761VENDOR_ATMEL_MDB021D:
13514 case FLASH_5761VENDOR_ATMEL_MDB041D:
13515 case FLASH_5761VENDOR_ATMEL_MDB081D:
13516 case FLASH_5761VENDOR_ATMEL_MDB161D:
13517 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13518 tg3_flag_set(tp, NVRAM_BUFFERED);
13519 tg3_flag_set(tp, FLASH);
13520 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
8590a603
MC
13521 tp->nvram_pagesize = 256;
13522 break;
13523 case FLASH_5761VENDOR_ST_A_M45PE20:
13524 case FLASH_5761VENDOR_ST_A_M45PE40:
13525 case FLASH_5761VENDOR_ST_A_M45PE80:
13526 case FLASH_5761VENDOR_ST_A_M45PE16:
13527 case FLASH_5761VENDOR_ST_M_M45PE20:
13528 case FLASH_5761VENDOR_ST_M_M45PE40:
13529 case FLASH_5761VENDOR_ST_M_M45PE80:
13530 case FLASH_5761VENDOR_ST_M_M45PE16:
13531 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13532 tg3_flag_set(tp, NVRAM_BUFFERED);
13533 tg3_flag_set(tp, FLASH);
8590a603
MC
13534 tp->nvram_pagesize = 256;
13535 break;
6b91fa02
MC
13536 }
13537
13538 if (protect) {
13539 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13540 } else {
13541 switch (nvcfg1) {
8590a603
MC
13542 case FLASH_5761VENDOR_ATMEL_ADB161D:
13543 case FLASH_5761VENDOR_ATMEL_MDB161D:
13544 case FLASH_5761VENDOR_ST_A_M45PE16:
13545 case FLASH_5761VENDOR_ST_M_M45PE16:
13546 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13547 break;
13548 case FLASH_5761VENDOR_ATMEL_ADB081D:
13549 case FLASH_5761VENDOR_ATMEL_MDB081D:
13550 case FLASH_5761VENDOR_ST_A_M45PE80:
13551 case FLASH_5761VENDOR_ST_M_M45PE80:
13552 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13553 break;
13554 case FLASH_5761VENDOR_ATMEL_ADB041D:
13555 case FLASH_5761VENDOR_ATMEL_MDB041D:
13556 case FLASH_5761VENDOR_ST_A_M45PE40:
13557 case FLASH_5761VENDOR_ST_M_M45PE40:
13558 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13559 break;
13560 case FLASH_5761VENDOR_ATMEL_ADB021D:
13561 case FLASH_5761VENDOR_ATMEL_MDB021D:
13562 case FLASH_5761VENDOR_ST_A_M45PE20:
13563 case FLASH_5761VENDOR_ST_M_M45PE20:
13564 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13565 break;
6b91fa02
MC
13566 }
13567 }
13568}
13569
229b1ad1 13570static void tg3_get_5906_nvram_info(struct tg3 *tp)
b5d3772c
MC
13571{
13572 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13573 tg3_flag_set(tp, NVRAM_BUFFERED);
b5d3772c
MC
13574 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13575}
13576
229b1ad1 13577static void tg3_get_57780_nvram_info(struct tg3 *tp)
321d32a0
MC
13578{
13579 u32 nvcfg1;
13580
13581 nvcfg1 = tr32(NVRAM_CFG1);
13582
13583 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13584 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13585 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13586 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13587 tg3_flag_set(tp, NVRAM_BUFFERED);
321d32a0
MC
13588 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13589
13590 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13591 tw32(NVRAM_CFG1, nvcfg1);
13592 return;
13593 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13594 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13595 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13596 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13597 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13598 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13599 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13600 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13601 tg3_flag_set(tp, NVRAM_BUFFERED);
13602 tg3_flag_set(tp, FLASH);
321d32a0
MC
13603
13604 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13605 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13606 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13607 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13608 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13609 break;
13610 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13611 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13612 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13613 break;
13614 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13615 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13616 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13617 break;
13618 }
13619 break;
13620 case FLASH_5752VENDOR_ST_M45PE10:
13621 case FLASH_5752VENDOR_ST_M45PE20:
13622 case FLASH_5752VENDOR_ST_M45PE40:
13623 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13624 tg3_flag_set(tp, NVRAM_BUFFERED);
13625 tg3_flag_set(tp, FLASH);
321d32a0
MC
13626
13627 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13628 case FLASH_5752VENDOR_ST_M45PE10:
13629 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13630 break;
13631 case FLASH_5752VENDOR_ST_M45PE20:
13632 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13633 break;
13634 case FLASH_5752VENDOR_ST_M45PE40:
13635 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13636 break;
13637 }
13638 break;
13639 default:
63c3a66f 13640 tg3_flag_set(tp, NO_NVRAM);
321d32a0
MC
13641 return;
13642 }
13643
a1b950d5
MC
13644 tg3_nvram_get_pagesize(tp, nvcfg1);
13645 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13646 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
a1b950d5
MC
13647}
13648
13649
229b1ad1 13650static void tg3_get_5717_nvram_info(struct tg3 *tp)
a1b950d5
MC
13651{
13652 u32 nvcfg1;
13653
13654 nvcfg1 = tr32(NVRAM_CFG1);
13655
13656 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13657 case FLASH_5717VENDOR_ATMEL_EEPROM:
13658 case FLASH_5717VENDOR_MICRO_EEPROM:
13659 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13660 tg3_flag_set(tp, NVRAM_BUFFERED);
a1b950d5
MC
13661 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13662
13663 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13664 tw32(NVRAM_CFG1, nvcfg1);
13665 return;
13666 case FLASH_5717VENDOR_ATMEL_MDB011D:
13667 case FLASH_5717VENDOR_ATMEL_ADB011B:
13668 case FLASH_5717VENDOR_ATMEL_ADB011D:
13669 case FLASH_5717VENDOR_ATMEL_MDB021D:
13670 case FLASH_5717VENDOR_ATMEL_ADB021B:
13671 case FLASH_5717VENDOR_ATMEL_ADB021D:
13672 case FLASH_5717VENDOR_ATMEL_45USPT:
13673 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13674 tg3_flag_set(tp, NVRAM_BUFFERED);
13675 tg3_flag_set(tp, FLASH);
a1b950d5
MC
13676
13677 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13678 case FLASH_5717VENDOR_ATMEL_MDB021D:
66ee33bf
MC
13679 /* Detect size with tg3_nvram_get_size() */
13680 break;
a1b950d5
MC
13681 case FLASH_5717VENDOR_ATMEL_ADB021B:
13682 case FLASH_5717VENDOR_ATMEL_ADB021D:
13683 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13684 break;
13685 default:
13686 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13687 break;
13688 }
321d32a0 13689 break;
a1b950d5
MC
13690 case FLASH_5717VENDOR_ST_M_M25PE10:
13691 case FLASH_5717VENDOR_ST_A_M25PE10:
13692 case FLASH_5717VENDOR_ST_M_M45PE10:
13693 case FLASH_5717VENDOR_ST_A_M45PE10:
13694 case FLASH_5717VENDOR_ST_M_M25PE20:
13695 case FLASH_5717VENDOR_ST_A_M25PE20:
13696 case FLASH_5717VENDOR_ST_M_M45PE20:
13697 case FLASH_5717VENDOR_ST_A_M45PE20:
13698 case FLASH_5717VENDOR_ST_25USPT:
13699 case FLASH_5717VENDOR_ST_45USPT:
13700 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13701 tg3_flag_set(tp, NVRAM_BUFFERED);
13702 tg3_flag_set(tp, FLASH);
a1b950d5
MC
13703
13704 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13705 case FLASH_5717VENDOR_ST_M_M25PE20:
a1b950d5 13706 case FLASH_5717VENDOR_ST_M_M45PE20:
66ee33bf
MC
13707 /* Detect size with tg3_nvram_get_size() */
13708 break;
13709 case FLASH_5717VENDOR_ST_A_M25PE20:
a1b950d5
MC
13710 case FLASH_5717VENDOR_ST_A_M45PE20:
13711 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13712 break;
13713 default:
13714 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13715 break;
13716 }
321d32a0 13717 break;
a1b950d5 13718 default:
63c3a66f 13719 tg3_flag_set(tp, NO_NVRAM);
a1b950d5 13720 return;
321d32a0 13721 }
a1b950d5
MC
13722
13723 tg3_nvram_get_pagesize(tp, nvcfg1);
13724 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13725 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
321d32a0
MC
13726}
13727
229b1ad1 13728static void tg3_get_5720_nvram_info(struct tg3 *tp)
9b91b5f1
MC
13729{
13730 u32 nvcfg1, nvmpinstrp;
13731
13732 nvcfg1 = tr32(NVRAM_CFG1);
13733 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13734
c86a8560
MC
13735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13736 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13737 tg3_flag_set(tp, NO_NVRAM);
13738 return;
13739 }
13740
13741 switch (nvmpinstrp) {
13742 case FLASH_5762_EEPROM_HD:
13743 nvmpinstrp = FLASH_5720_EEPROM_HD;
17e1a42f 13744 break;
c86a8560
MC
13745 case FLASH_5762_EEPROM_LD:
13746 nvmpinstrp = FLASH_5720_EEPROM_LD;
17e1a42f 13747 break;
c86a8560
MC
13748 }
13749 }
13750
9b91b5f1
MC
13751 switch (nvmpinstrp) {
13752 case FLASH_5720_EEPROM_HD:
13753 case FLASH_5720_EEPROM_LD:
13754 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13755 tg3_flag_set(tp, NVRAM_BUFFERED);
9b91b5f1
MC
13756
13757 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13758 tw32(NVRAM_CFG1, nvcfg1);
13759 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13760 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13761 else
13762 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13763 return;
13764 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13765 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13766 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13767 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13768 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13769 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13770 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13771 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13772 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13773 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13774 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13775 case FLASH_5720VENDOR_ATMEL_45USPT:
13776 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13777 tg3_flag_set(tp, NVRAM_BUFFERED);
13778 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
13779
13780 switch (nvmpinstrp) {
13781 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13782 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13783 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13784 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13785 break;
13786 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13787 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13788 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13789 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13790 break;
13791 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13792 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13793 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13794 break;
13795 default:
13796 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13797 break;
13798 }
13799 break;
13800 case FLASH_5720VENDOR_M_ST_M25PE10:
13801 case FLASH_5720VENDOR_M_ST_M45PE10:
13802 case FLASH_5720VENDOR_A_ST_M25PE10:
13803 case FLASH_5720VENDOR_A_ST_M45PE10:
13804 case FLASH_5720VENDOR_M_ST_M25PE20:
13805 case FLASH_5720VENDOR_M_ST_M45PE20:
13806 case FLASH_5720VENDOR_A_ST_M25PE20:
13807 case FLASH_5720VENDOR_A_ST_M45PE20:
13808 case FLASH_5720VENDOR_M_ST_M25PE40:
13809 case FLASH_5720VENDOR_M_ST_M45PE40:
13810 case FLASH_5720VENDOR_A_ST_M25PE40:
13811 case FLASH_5720VENDOR_A_ST_M45PE40:
13812 case FLASH_5720VENDOR_M_ST_M25PE80:
13813 case FLASH_5720VENDOR_M_ST_M45PE80:
13814 case FLASH_5720VENDOR_A_ST_M25PE80:
13815 case FLASH_5720VENDOR_A_ST_M45PE80:
13816 case FLASH_5720VENDOR_ST_25USPT:
13817 case FLASH_5720VENDOR_ST_45USPT:
13818 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13819 tg3_flag_set(tp, NVRAM_BUFFERED);
13820 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
13821
13822 switch (nvmpinstrp) {
13823 case FLASH_5720VENDOR_M_ST_M25PE20:
13824 case FLASH_5720VENDOR_M_ST_M45PE20:
13825 case FLASH_5720VENDOR_A_ST_M25PE20:
13826 case FLASH_5720VENDOR_A_ST_M45PE20:
13827 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13828 break;
13829 case FLASH_5720VENDOR_M_ST_M25PE40:
13830 case FLASH_5720VENDOR_M_ST_M45PE40:
13831 case FLASH_5720VENDOR_A_ST_M25PE40:
13832 case FLASH_5720VENDOR_A_ST_M45PE40:
13833 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13834 break;
13835 case FLASH_5720VENDOR_M_ST_M25PE80:
13836 case FLASH_5720VENDOR_M_ST_M45PE80:
13837 case FLASH_5720VENDOR_A_ST_M25PE80:
13838 case FLASH_5720VENDOR_A_ST_M45PE80:
13839 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13840 break;
13841 default:
13842 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13843 break;
13844 }
13845 break;
13846 default:
63c3a66f 13847 tg3_flag_set(tp, NO_NVRAM);
9b91b5f1
MC
13848 return;
13849 }
13850
13851 tg3_nvram_get_pagesize(tp, nvcfg1);
13852 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13853 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
c86a8560
MC
13854
13855 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13856 u32 val;
13857
13858 if (tg3_nvram_read(tp, 0, &val))
13859 return;
13860
13861 if (val != TG3_EEPROM_MAGIC &&
13862 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13863 tg3_flag_set(tp, NO_NVRAM);
13864 }
9b91b5f1
MC
13865}
13866
1da177e4 13867/* Chips other than 5700/5701 use the NVRAM for fetching info. */
229b1ad1 13868static void tg3_nvram_init(struct tg3 *tp)
1da177e4 13869{
1da177e4
LT
13870 tw32_f(GRC_EEPROM_ADDR,
13871 (EEPROM_ADDR_FSM_RESET |
13872 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13873 EEPROM_ADDR_CLKPERD_SHIFT)));
13874
9d57f01c 13875 msleep(1);
1da177e4
LT
13876
13877 /* Enable seeprom accesses. */
13878 tw32_f(GRC_LOCAL_CTRL,
13879 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13880 udelay(100);
13881
13882 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13883 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
63c3a66f 13884 tg3_flag_set(tp, NVRAM);
1da177e4 13885
ec41c7df 13886 if (tg3_nvram_lock(tp)) {
5129c3a3
MC
13887 netdev_warn(tp->dev,
13888 "Cannot get nvram lock, %s failed\n",
05dbe005 13889 __func__);
ec41c7df
MC
13890 return;
13891 }
e6af301b 13892 tg3_enable_nvram_access(tp);
1da177e4 13893
989a9d23
MC
13894 tp->nvram_size = 0;
13895
361b4ac2
MC
13896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13897 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
13898 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13899 tg3_get_5755_nvram_info(tp);
d30cdd28 13900 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
57e6983c
MC
13901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1b27777a 13903 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
13904 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13905 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
13906 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13907 tg3_get_5906_nvram_info(tp);
b703df6f 13908 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 13909 tg3_flag(tp, 57765_CLASS))
321d32a0 13910 tg3_get_57780_nvram_info(tp);
9b91b5f1
MC
13911 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13912 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a1b950d5 13913 tg3_get_5717_nvram_info(tp);
c86a8560
MC
13914 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13915 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9b91b5f1 13916 tg3_get_5720_nvram_info(tp);
361b4ac2
MC
13917 else
13918 tg3_get_nvram_info(tp);
13919
989a9d23
MC
13920 if (tp->nvram_size == 0)
13921 tg3_get_nvram_size(tp);
1da177e4 13922
e6af301b 13923 tg3_disable_nvram_access(tp);
381291b7 13924 tg3_nvram_unlock(tp);
1da177e4
LT
13925
13926 } else {
63c3a66f
JP
13927 tg3_flag_clear(tp, NVRAM);
13928 tg3_flag_clear(tp, NVRAM_BUFFERED);
1da177e4
LT
13929
13930 tg3_get_eeprom_size(tp);
13931 }
13932}
13933
1da177e4
LT
13934struct subsys_tbl_ent {
13935 u16 subsys_vendor, subsys_devid;
13936 u32 phy_id;
13937};
13938
229b1ad1 13939static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
1da177e4 13940 /* Broadcom boards. */
24daf2b0 13941 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13942 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
24daf2b0 13943 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13944 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
24daf2b0 13945 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13946 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
24daf2b0
MC
13947 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13948 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13949 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13950 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
24daf2b0 13951 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13952 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13953 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13954 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13955 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13956 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
24daf2b0 13957 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13958 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
24daf2b0 13959 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13960 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
24daf2b0 13961 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13962 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
1da177e4
LT
13963
13964 /* 3com boards. */
24daf2b0 13965 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13966 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
24daf2b0 13967 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13968 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13969 { TG3PCI_SUBVENDOR_ID_3COM,
13970 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13971 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13972 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
24daf2b0 13973 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13974 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13975
13976 /* DELL boards. */
24daf2b0 13977 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13978 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
24daf2b0 13979 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13980 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
24daf2b0 13981 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13982 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
24daf2b0 13983 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13984 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
1da177e4
LT
13985
13986 /* Compaq boards. */
24daf2b0 13987 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13988 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
24daf2b0 13989 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13990 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13991 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13992 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13993 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13994 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
24daf2b0 13995 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13996 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13997
13998 /* IBM boards. */
24daf2b0
MC
13999 { TG3PCI_SUBVENDOR_ID_IBM,
14000 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
1da177e4
LT
14001};
14002
229b1ad1 14003static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
1da177e4
LT
14004{
14005 int i;
14006
14007 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14008 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14009 tp->pdev->subsystem_vendor) &&
14010 (subsys_id_to_phy_id[i].subsys_devid ==
14011 tp->pdev->subsystem_device))
14012 return &subsys_id_to_phy_id[i];
14013 }
14014 return NULL;
14015}
14016
229b1ad1 14017static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 14018{
1da177e4 14019 u32 val;
f49639e6 14020
79eb6904 14021 tp->phy_id = TG3_PHY_ID_INVALID;
7d0c41ef
MC
14022 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14023
a85feb8c 14024 /* Assume an onboard device and WOL capable by default. */
63c3a66f
JP
14025 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14026 tg3_flag_set(tp, WOL_CAP);
72b845e0 14027
b5d3772c 14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 14029 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
63c3a66f
JP
14030 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14031 tg3_flag_set(tp, IS_NIC);
9d26e213 14032 }
0527ba35
MC
14033 val = tr32(VCPU_CFGSHDW);
14034 if (val & VCPU_CFGSHDW_ASPM_DBNC)
63c3a66f 14035 tg3_flag_set(tp, ASPM_WORKAROUND);
0527ba35 14036 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
6fdbab9d 14037 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
63c3a66f 14038 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
14039 device_set_wakeup_enable(&tp->pdev->dev, true);
14040 }
05ac4cb7 14041 goto done;
b5d3772c
MC
14042 }
14043
1da177e4
LT
14044 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14045 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14046 u32 nic_cfg, led_cfg;
a9daf367 14047 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
7d0c41ef 14048 int eeprom_phy_serdes = 0;
1da177e4
LT
14049
14050 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14051 tp->nic_sram_data_cfg = nic_cfg;
14052
14053 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14054 ver >>= NIC_SRAM_DATA_VER_SHIFT;
6ff6f81d
MC
14055 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14056 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14057 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
1da177e4
LT
14058 (ver > 0) && (ver < 0x100))
14059 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14060
a9daf367
MC
14061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14062 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14063
1da177e4
LT
14064 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14065 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14066 eeprom_phy_serdes = 1;
14067
14068 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14069 if (nic_phy_id != 0) {
14070 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14071 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14072
14073 eeprom_phy_id = (id1 >> 16) << 10;
14074 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14075 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14076 } else
14077 eeprom_phy_id = 0;
14078
7d0c41ef 14079 tp->phy_id = eeprom_phy_id;
747e8f8b 14080 if (eeprom_phy_serdes) {
63c3a66f 14081 if (!tg3_flag(tp, 5705_PLUS))
f07e9af3 14082 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
a50d0796 14083 else
f07e9af3 14084 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
747e8f8b 14085 }
7d0c41ef 14086
63c3a66f 14087 if (tg3_flag(tp, 5750_PLUS))
1da177e4
LT
14088 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14089 SHASTA_EXT_LED_MODE_MASK);
cbf46853 14090 else
1da177e4
LT
14091 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14092
14093 switch (led_cfg) {
14094 default:
14095 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14096 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14097 break;
14098
14099 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14100 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14101 break;
14102
14103 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14104 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
14105
14106 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14107 * read on some older 5700/5701 bootcode.
14108 */
14109 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14110 ASIC_REV_5700 ||
14111 GET_ASIC_REV(tp->pci_chip_rev_id) ==
14112 ASIC_REV_5701)
14113 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14114
1da177e4
LT
14115 break;
14116
14117 case SHASTA_EXT_LED_SHARED:
14118 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14119 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14120 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14121 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14122 LED_CTRL_MODE_PHY_2);
14123 break;
14124
14125 case SHASTA_EXT_LED_MAC:
14126 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14127 break;
14128
14129 case SHASTA_EXT_LED_COMBO:
14130 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14131 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14132 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14133 LED_CTRL_MODE_PHY_2);
14134 break;
14135
855e1111 14136 }
1da177e4
LT
14137
14138 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14140 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14141 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14142
b2a5c19c
MC
14143 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14144 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
5f60891b 14145
9d26e213 14146 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
63c3a66f 14147 tg3_flag_set(tp, EEPROM_WRITE_PROT);
9d26e213
MC
14148 if ((tp->pdev->subsystem_vendor ==
14149 PCI_VENDOR_ID_ARIMA) &&
14150 (tp->pdev->subsystem_device == 0x205a ||
14151 tp->pdev->subsystem_device == 0x2063))
63c3a66f 14152 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
9d26e213 14153 } else {
63c3a66f
JP
14154 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14155 tg3_flag_set(tp, IS_NIC);
9d26e213 14156 }
1da177e4
LT
14157
14158 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f
JP
14159 tg3_flag_set(tp, ENABLE_ASF);
14160 if (tg3_flag(tp, 5750_PLUS))
14161 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4 14162 }
b2b98d4a
MC
14163
14164 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
63c3a66f
JP
14165 tg3_flag(tp, 5750_PLUS))
14166 tg3_flag_set(tp, ENABLE_APE);
b2b98d4a 14167
f07e9af3 14168 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
a85feb8c 14169 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
63c3a66f 14170 tg3_flag_clear(tp, WOL_CAP);
1da177e4 14171
63c3a66f 14172 if (tg3_flag(tp, WOL_CAP) &&
6fdbab9d 14173 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
63c3a66f 14174 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
14175 device_set_wakeup_enable(&tp->pdev->dev, true);
14176 }
0527ba35 14177
1da177e4 14178 if (cfg2 & (1 << 17))
f07e9af3 14179 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
1da177e4
LT
14180
14181 /* serdes signal pre-emphasis in register 0x590 set by */
14182 /* bootcode if bit 18 is set */
14183 if (cfg2 & (1 << 18))
f07e9af3 14184 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
8ed5d97e 14185
63c3a66f
JP
14186 if ((tg3_flag(tp, 57765_PLUS) ||
14187 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14188 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
6833c043 14189 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
f07e9af3 14190 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
6833c043 14191
63c3a66f 14192 if (tg3_flag(tp, PCI_EXPRESS) &&
8c69b1e7 14193 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 14194 !tg3_flag(tp, 57765_PLUS)) {
8ed5d97e
MC
14195 u32 cfg3;
14196
14197 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14198 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
63c3a66f 14199 tg3_flag_set(tp, ASPM_WORKAROUND);
8ed5d97e 14200 }
a9daf367 14201
14417063 14202 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
63c3a66f 14203 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
a9daf367 14204 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
63c3a66f 14205 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
a9daf367 14206 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
63c3a66f 14207 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
1da177e4 14208 }
05ac4cb7 14209done:
63c3a66f 14210 if (tg3_flag(tp, WOL_CAP))
43067ed8 14211 device_set_wakeup_enable(&tp->pdev->dev,
63c3a66f 14212 tg3_flag(tp, WOL_ENABLE));
43067ed8
RW
14213 else
14214 device_set_wakeup_capable(&tp->pdev->dev, false);
7d0c41ef
MC
14215}
14216
c86a8560
MC
14217static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14218{
14219 int i, err;
14220 u32 val2, off = offset * 8;
14221
14222 err = tg3_nvram_lock(tp);
14223 if (err)
14224 return err;
14225
14226 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14227 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14228 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14229 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14230 udelay(10);
14231
14232 for (i = 0; i < 100; i++) {
14233 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14234 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14235 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14236 break;
14237 }
14238 udelay(10);
14239 }
14240
14241 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14242
14243 tg3_nvram_unlock(tp);
14244 if (val2 & APE_OTP_STATUS_CMD_DONE)
14245 return 0;
14246
14247 return -EBUSY;
14248}
14249
229b1ad1 14250static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
b2a5c19c
MC
14251{
14252 int i;
14253 u32 val;
14254
14255 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14256 tw32(OTP_CTRL, cmd);
14257
14258 /* Wait for up to 1 ms for command to execute. */
14259 for (i = 0; i < 100; i++) {
14260 val = tr32(OTP_STATUS);
14261 if (val & OTP_STATUS_CMD_DONE)
14262 break;
14263 udelay(10);
14264 }
14265
14266 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14267}
14268
14269/* Read the gphy configuration from the OTP region of the chip. The gphy
14270 * configuration is a 32-bit value that straddles the alignment boundary.
14271 * We do two 32-bit reads and then shift and merge the results.
14272 */
229b1ad1 14273static u32 tg3_read_otp_phycfg(struct tg3 *tp)
b2a5c19c
MC
14274{
14275 u32 bhalf_otp, thalf_otp;
14276
14277 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14278
14279 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14280 return 0;
14281
14282 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14283
14284 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14285 return 0;
14286
14287 thalf_otp = tr32(OTP_READ_DATA);
14288
14289 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14290
14291 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14292 return 0;
14293
14294 bhalf_otp = tr32(OTP_READ_DATA);
14295
14296 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14297}
14298
229b1ad1 14299static void tg3_phy_init_link_config(struct tg3 *tp)
e256f8a3 14300{
202ff1c2 14301 u32 adv = ADVERTISED_Autoneg;
e256f8a3
MC
14302
14303 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14304 adv |= ADVERTISED_1000baseT_Half |
14305 ADVERTISED_1000baseT_Full;
14306
14307 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14308 adv |= ADVERTISED_100baseT_Half |
14309 ADVERTISED_100baseT_Full |
14310 ADVERTISED_10baseT_Half |
14311 ADVERTISED_10baseT_Full |
14312 ADVERTISED_TP;
14313 else
14314 adv |= ADVERTISED_FIBRE;
14315
14316 tp->link_config.advertising = adv;
e740522e
MC
14317 tp->link_config.speed = SPEED_UNKNOWN;
14318 tp->link_config.duplex = DUPLEX_UNKNOWN;
e256f8a3 14319 tp->link_config.autoneg = AUTONEG_ENABLE;
e740522e
MC
14320 tp->link_config.active_speed = SPEED_UNKNOWN;
14321 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
34655ad6
MC
14322
14323 tp->old_link = -1;
e256f8a3
MC
14324}
14325
229b1ad1 14326static int tg3_phy_probe(struct tg3 *tp)
7d0c41ef
MC
14327{
14328 u32 hw_phy_id_1, hw_phy_id_2;
14329 u32 hw_phy_id, hw_phy_id_masked;
14330 int err;
1da177e4 14331
e256f8a3 14332 /* flow control autonegotiation is default behavior */
63c3a66f 14333 tg3_flag_set(tp, PAUSE_AUTONEG);
e256f8a3
MC
14334 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14335
8151ad57
MC
14336 if (tg3_flag(tp, ENABLE_APE)) {
14337 switch (tp->pci_fn) {
14338 case 0:
14339 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14340 break;
14341 case 1:
14342 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14343 break;
14344 case 2:
14345 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14346 break;
14347 case 3:
14348 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14349 break;
14350 }
14351 }
14352
63c3a66f 14353 if (tg3_flag(tp, USE_PHYLIB))
b02fd9e3
MC
14354 return tg3_phy_init(tp);
14355
1da177e4 14356 /* Reading the PHY ID register can conflict with ASF
877d0310 14357 * firmware access to the PHY hardware.
1da177e4
LT
14358 */
14359 err = 0;
63c3a66f 14360 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
79eb6904 14361 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
1da177e4
LT
14362 } else {
14363 /* Now read the physical PHY_ID from the chip and verify
14364 * that it is sane. If it doesn't look good, we fall back
14365 * to either the hard-coded table based PHY_ID and failing
14366 * that the value found in the eeprom area.
14367 */
14368 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14369 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14370
14371 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14372 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14373 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14374
79eb6904 14375 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
1da177e4
LT
14376 }
14377
79eb6904 14378 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
1da177e4 14379 tp->phy_id = hw_phy_id;
79eb6904 14380 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
f07e9af3 14381 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
da6b2d01 14382 else
f07e9af3 14383 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
1da177e4 14384 } else {
79eb6904 14385 if (tp->phy_id != TG3_PHY_ID_INVALID) {
7d0c41ef
MC
14386 /* Do nothing, phy ID already set up in
14387 * tg3_get_eeprom_hw_cfg().
14388 */
1da177e4
LT
14389 } else {
14390 struct subsys_tbl_ent *p;
14391
14392 /* No eeprom signature? Try the hardcoded
14393 * subsys device table.
14394 */
24daf2b0 14395 p = tg3_lookup_by_subsys(tp);
1da177e4
LT
14396 if (!p)
14397 return -ENODEV;
14398
14399 tp->phy_id = p->phy_id;
14400 if (!tp->phy_id ||
79eb6904 14401 tp->phy_id == TG3_PHY_ID_BCM8002)
f07e9af3 14402 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
1da177e4
LT
14403 }
14404 }
14405
a6b68dab 14406 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
5baa5e9a
MC
14407 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
c65a17f4 14409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
5baa5e9a 14410 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
a6b68dab
MC
14411 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14412 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14413 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
52b02d04
MC
14414 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14415
e256f8a3
MC
14416 tg3_phy_init_link_config(tp);
14417
f07e9af3 14418 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
63c3a66f
JP
14419 !tg3_flag(tp, ENABLE_APE) &&
14420 !tg3_flag(tp, ENABLE_ASF)) {
e2bf73e7 14421 u32 bmsr, dummy;
1da177e4
LT
14422
14423 tg3_readphy(tp, MII_BMSR, &bmsr);
14424 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14425 (bmsr & BMSR_LSTATUS))
14426 goto skip_phy_reset;
6aa20a22 14427
1da177e4
LT
14428 err = tg3_phy_reset(tp);
14429 if (err)
14430 return err;
14431
42b64a45 14432 tg3_phy_set_wirespeed(tp);
1da177e4 14433
e2bf73e7 14434 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
42b64a45
MC
14435 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14436 tp->link_config.flowctrl);
1da177e4
LT
14437
14438 tg3_writephy(tp, MII_BMCR,
14439 BMCR_ANENABLE | BMCR_ANRESTART);
14440 }
1da177e4
LT
14441 }
14442
14443skip_phy_reset:
79eb6904 14444 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
14445 err = tg3_init_5401phy_dsp(tp);
14446 if (err)
14447 return err;
1da177e4 14448
1da177e4
LT
14449 err = tg3_init_5401phy_dsp(tp);
14450 }
14451
1da177e4
LT
14452 return err;
14453}
14454
229b1ad1 14455static void tg3_read_vpd(struct tg3 *tp)
1da177e4 14456{
a4a8bb15 14457 u8 *vpd_data;
4181b2c8 14458 unsigned int block_end, rosize, len;
535a490e 14459 u32 vpdlen;
184b8904 14460 int j, i = 0;
a4a8bb15 14461
535a490e 14462 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
a4a8bb15
MC
14463 if (!vpd_data)
14464 goto out_no_vpd;
1da177e4 14465
535a490e 14466 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
4181b2c8
MC
14467 if (i < 0)
14468 goto out_not_found;
1da177e4 14469
4181b2c8
MC
14470 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14471 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14472 i += PCI_VPD_LRDT_TAG_SIZE;
1da177e4 14473
535a490e 14474 if (block_end > vpdlen)
4181b2c8 14475 goto out_not_found;
af2c6a4a 14476
184b8904
MC
14477 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14478 PCI_VPD_RO_KEYWORD_MFR_ID);
14479 if (j > 0) {
14480 len = pci_vpd_info_field_size(&vpd_data[j]);
14481
14482 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14483 if (j + len > block_end || len != 4 ||
14484 memcmp(&vpd_data[j], "1028", 4))
14485 goto partno;
14486
14487 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14488 PCI_VPD_RO_KEYWORD_VENDOR0);
14489 if (j < 0)
14490 goto partno;
14491
14492 len = pci_vpd_info_field_size(&vpd_data[j]);
14493
14494 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14495 if (j + len > block_end)
14496 goto partno;
14497
14498 memcpy(tp->fw_ver, &vpd_data[j], len);
535a490e 14499 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
184b8904
MC
14500 }
14501
14502partno:
4181b2c8
MC
14503 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14504 PCI_VPD_RO_KEYWORD_PARTNO);
14505 if (i < 0)
14506 goto out_not_found;
af2c6a4a 14507
4181b2c8 14508 len = pci_vpd_info_field_size(&vpd_data[i]);
1da177e4 14509
4181b2c8
MC
14510 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14511 if (len > TG3_BPN_SIZE ||
535a490e 14512 (len + i) > vpdlen)
4181b2c8 14513 goto out_not_found;
1da177e4 14514
4181b2c8 14515 memcpy(tp->board_part_number, &vpd_data[i], len);
1da177e4 14516
1da177e4 14517out_not_found:
a4a8bb15 14518 kfree(vpd_data);
37a949c5 14519 if (tp->board_part_number[0])
a4a8bb15
MC
14520 return;
14521
14522out_no_vpd:
37a949c5 14523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
79d49695
MC
14524 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14525 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
37a949c5
MC
14526 strcpy(tp->board_part_number, "BCM5717");
14527 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14528 strcpy(tp->board_part_number, "BCM5718");
14529 else
14530 goto nomatch;
14531 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14532 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14533 strcpy(tp->board_part_number, "BCM57780");
14534 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14535 strcpy(tp->board_part_number, "BCM57760");
14536 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14537 strcpy(tp->board_part_number, "BCM57790");
14538 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14539 strcpy(tp->board_part_number, "BCM57788");
14540 else
14541 goto nomatch;
14542 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14543 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14544 strcpy(tp->board_part_number, "BCM57761");
14545 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14546 strcpy(tp->board_part_number, "BCM57765");
14547 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14548 strcpy(tp->board_part_number, "BCM57781");
14549 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14550 strcpy(tp->board_part_number, "BCM57785");
14551 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14552 strcpy(tp->board_part_number, "BCM57791");
14553 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14554 strcpy(tp->board_part_number, "BCM57795");
14555 else
14556 goto nomatch;
55086ad9
MC
14557 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14558 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14559 strcpy(tp->board_part_number, "BCM57762");
14560 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14561 strcpy(tp->board_part_number, "BCM57766");
14562 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14563 strcpy(tp->board_part_number, "BCM57782");
14564 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14565 strcpy(tp->board_part_number, "BCM57786");
14566 else
14567 goto nomatch;
37a949c5 14568 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b5d3772c 14569 strcpy(tp->board_part_number, "BCM95906");
37a949c5
MC
14570 } else {
14571nomatch:
b5d3772c 14572 strcpy(tp->board_part_number, "none");
37a949c5 14573 }
1da177e4
LT
14574}
14575
229b1ad1 14576static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
9c8a620e
MC
14577{
14578 u32 val;
14579
e4f34110 14580 if (tg3_nvram_read(tp, offset, &val) ||
9c8a620e 14581 (val & 0xfc000000) != 0x0c000000 ||
e4f34110 14582 tg3_nvram_read(tp, offset + 4, &val) ||
9c8a620e
MC
14583 val != 0)
14584 return 0;
14585
14586 return 1;
14587}
14588
229b1ad1 14589static void tg3_read_bc_ver(struct tg3 *tp)
acd9c119 14590{
ff3a7cb2 14591 u32 val, offset, start, ver_offset;
75f9936e 14592 int i, dst_off;
ff3a7cb2 14593 bool newver = false;
acd9c119
MC
14594
14595 if (tg3_nvram_read(tp, 0xc, &offset) ||
14596 tg3_nvram_read(tp, 0x4, &start))
14597 return;
14598
14599 offset = tg3_nvram_logical_addr(tp, offset);
14600
ff3a7cb2 14601 if (tg3_nvram_read(tp, offset, &val))
acd9c119
MC
14602 return;
14603
ff3a7cb2
MC
14604 if ((val & 0xfc000000) == 0x0c000000) {
14605 if (tg3_nvram_read(tp, offset + 4, &val))
acd9c119
MC
14606 return;
14607
ff3a7cb2
MC
14608 if (val == 0)
14609 newver = true;
14610 }
14611
75f9936e
MC
14612 dst_off = strlen(tp->fw_ver);
14613
ff3a7cb2 14614 if (newver) {
75f9936e
MC
14615 if (TG3_VER_SIZE - dst_off < 16 ||
14616 tg3_nvram_read(tp, offset + 8, &ver_offset))
ff3a7cb2
MC
14617 return;
14618
14619 offset = offset + ver_offset - start;
14620 for (i = 0; i < 16; i += 4) {
14621 __be32 v;
14622 if (tg3_nvram_read_be32(tp, offset + i, &v))
14623 return;
14624
75f9936e 14625 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
ff3a7cb2
MC
14626 }
14627 } else {
14628 u32 major, minor;
14629
14630 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14631 return;
14632
14633 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14634 TG3_NVM_BCVER_MAJSFT;
14635 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
75f9936e
MC
14636 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14637 "v%d.%02d", major, minor);
acd9c119
MC
14638 }
14639}
14640
229b1ad1 14641static void tg3_read_hwsb_ver(struct tg3 *tp)
a6f6cb1c
MC
14642{
14643 u32 val, major, minor;
14644
14645 /* Use native endian representation */
14646 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14647 return;
14648
14649 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14650 TG3_NVM_HWSB_CFG1_MAJSFT;
14651 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14652 TG3_NVM_HWSB_CFG1_MINSFT;
14653
14654 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14655}
14656
229b1ad1 14657static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
dfe00d7d
MC
14658{
14659 u32 offset, major, minor, build;
14660
75f9936e 14661 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
dfe00d7d
MC
14662
14663 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14664 return;
14665
14666 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14667 case TG3_EEPROM_SB_REVISION_0:
14668 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14669 break;
14670 case TG3_EEPROM_SB_REVISION_2:
14671 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14672 break;
14673 case TG3_EEPROM_SB_REVISION_3:
14674 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14675 break;
a4153d40
MC
14676 case TG3_EEPROM_SB_REVISION_4:
14677 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14678 break;
14679 case TG3_EEPROM_SB_REVISION_5:
14680 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14681 break;
bba226ac
MC
14682 case TG3_EEPROM_SB_REVISION_6:
14683 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14684 break;
dfe00d7d
MC
14685 default:
14686 return;
14687 }
14688
e4f34110 14689 if (tg3_nvram_read(tp, offset, &val))
dfe00d7d
MC
14690 return;
14691
14692 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14693 TG3_EEPROM_SB_EDH_BLD_SHFT;
14694 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14695 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14696 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14697
14698 if (minor > 99 || build > 26)
14699 return;
14700
75f9936e
MC
14701 offset = strlen(tp->fw_ver);
14702 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14703 " v%d.%02d", major, minor);
dfe00d7d
MC
14704
14705 if (build > 0) {
75f9936e
MC
14706 offset = strlen(tp->fw_ver);
14707 if (offset < TG3_VER_SIZE - 1)
14708 tp->fw_ver[offset] = 'a' + build - 1;
dfe00d7d
MC
14709 }
14710}
14711
229b1ad1 14712static void tg3_read_mgmtfw_ver(struct tg3 *tp)
c4e6575c
MC
14713{
14714 u32 val, offset, start;
acd9c119 14715 int i, vlen;
9c8a620e
MC
14716
14717 for (offset = TG3_NVM_DIR_START;
14718 offset < TG3_NVM_DIR_END;
14719 offset += TG3_NVM_DIRENT_SIZE) {
e4f34110 14720 if (tg3_nvram_read(tp, offset, &val))
c4e6575c
MC
14721 return;
14722
9c8a620e
MC
14723 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14724 break;
14725 }
14726
14727 if (offset == TG3_NVM_DIR_END)
14728 return;
14729
63c3a66f 14730 if (!tg3_flag(tp, 5705_PLUS))
9c8a620e 14731 start = 0x08000000;
e4f34110 14732 else if (tg3_nvram_read(tp, offset - 4, &start))
9c8a620e
MC
14733 return;
14734
e4f34110 14735 if (tg3_nvram_read(tp, offset + 4, &offset) ||
9c8a620e 14736 !tg3_fw_img_is_valid(tp, offset) ||
e4f34110 14737 tg3_nvram_read(tp, offset + 8, &val))
9c8a620e
MC
14738 return;
14739
14740 offset += val - start;
14741
acd9c119 14742 vlen = strlen(tp->fw_ver);
9c8a620e 14743
acd9c119
MC
14744 tp->fw_ver[vlen++] = ',';
14745 tp->fw_ver[vlen++] = ' ';
9c8a620e
MC
14746
14747 for (i = 0; i < 4; i++) {
a9dc529d
MC
14748 __be32 v;
14749 if (tg3_nvram_read_be32(tp, offset, &v))
c4e6575c
MC
14750 return;
14751
b9fc7dc5 14752 offset += sizeof(v);
c4e6575c 14753
acd9c119
MC
14754 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14755 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
9c8a620e 14756 break;
c4e6575c 14757 }
9c8a620e 14758
acd9c119
MC
14759 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14760 vlen += sizeof(v);
c4e6575c 14761 }
acd9c119
MC
14762}
14763
229b1ad1 14764static void tg3_probe_ncsi(struct tg3 *tp)
7fd76445 14765{
7fd76445 14766 u32 apedata;
7fd76445
MC
14767
14768 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14769 if (apedata != APE_SEG_SIG_MAGIC)
14770 return;
14771
14772 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14773 if (!(apedata & APE_FW_STATUS_READY))
14774 return;
14775
165f4d1c
MC
14776 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14777 tg3_flag_set(tp, APE_HAS_NCSI);
14778}
14779
229b1ad1 14780static void tg3_read_dash_ver(struct tg3 *tp)
165f4d1c
MC
14781{
14782 int vlen;
14783 u32 apedata;
14784 char *fwtype;
14785
7fd76445
MC
14786 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14787
165f4d1c 14788 if (tg3_flag(tp, APE_HAS_NCSI))
ecc79648 14789 fwtype = "NCSI";
c86a8560
MC
14790 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14791 fwtype = "SMASH";
165f4d1c 14792 else
ecc79648
MC
14793 fwtype = "DASH";
14794
7fd76445
MC
14795 vlen = strlen(tp->fw_ver);
14796
ecc79648
MC
14797 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14798 fwtype,
7fd76445
MC
14799 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14800 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14801 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14802 (apedata & APE_FW_VERSION_BLDMSK));
14803}
14804
c86a8560
MC
14805static void tg3_read_otp_ver(struct tg3 *tp)
14806{
14807 u32 val, val2;
14808
14809 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
14810 return;
14811
14812 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14813 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14814 TG3_OTP_MAGIC0_VALID(val)) {
14815 u64 val64 = (u64) val << 32 | val2;
14816 u32 ver = 0;
14817 int i, vlen;
14818
14819 for (i = 0; i < 7; i++) {
14820 if ((val64 & 0xff) == 0)
14821 break;
14822 ver = val64 & 0xff;
14823 val64 >>= 8;
14824 }
14825 vlen = strlen(tp->fw_ver);
14826 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14827 }
14828}
14829
229b1ad1 14830static void tg3_read_fw_ver(struct tg3 *tp)
acd9c119
MC
14831{
14832 u32 val;
75f9936e 14833 bool vpd_vers = false;
acd9c119 14834
75f9936e
MC
14835 if (tp->fw_ver[0] != 0)
14836 vpd_vers = true;
df259d8c 14837
63c3a66f 14838 if (tg3_flag(tp, NO_NVRAM)) {
75f9936e 14839 strcat(tp->fw_ver, "sb");
c86a8560 14840 tg3_read_otp_ver(tp);
df259d8c
MC
14841 return;
14842 }
14843
acd9c119
MC
14844 if (tg3_nvram_read(tp, 0, &val))
14845 return;
14846
14847 if (val == TG3_EEPROM_MAGIC)
14848 tg3_read_bc_ver(tp);
14849 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14850 tg3_read_sb_ver(tp, val);
a6f6cb1c
MC
14851 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14852 tg3_read_hwsb_ver(tp);
acd9c119 14853
165f4d1c
MC
14854 if (tg3_flag(tp, ENABLE_ASF)) {
14855 if (tg3_flag(tp, ENABLE_APE)) {
14856 tg3_probe_ncsi(tp);
14857 if (!vpd_vers)
14858 tg3_read_dash_ver(tp);
14859 } else if (!vpd_vers) {
14860 tg3_read_mgmtfw_ver(tp);
14861 }
c9cab24e 14862 }
9c8a620e
MC
14863
14864 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
14865}
14866
7cb32cf2
MC
14867static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14868{
63c3a66f 14869 if (tg3_flag(tp, LRG_PROD_RING_CAP))
de9f5230 14870 return TG3_RX_RET_MAX_SIZE_5717;
63c3a66f 14871 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
de9f5230 14872 return TG3_RX_RET_MAX_SIZE_5700;
7cb32cf2 14873 else
de9f5230 14874 return TG3_RX_RET_MAX_SIZE_5705;
7cb32cf2
MC
14875}
14876
4143470c 14877static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
895950c2
JP
14878 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14879 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14880 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14881 { },
14882};
14883
229b1ad1 14884static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16c7fa7d
MC
14885{
14886 struct pci_dev *peer;
14887 unsigned int func, devnr = tp->pdev->devfn & ~7;
14888
14889 for (func = 0; func < 8; func++) {
14890 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14891 if (peer && peer != tp->pdev)
14892 break;
14893 pci_dev_put(peer);
14894 }
14895 /* 5704 can be configured in single-port mode, set peer to
14896 * tp->pdev in that case.
14897 */
14898 if (!peer) {
14899 peer = tp->pdev;
14900 return peer;
14901 }
14902
14903 /*
14904 * We don't need to keep the refcount elevated; there's no way
14905 * to remove one half of this device without removing the other
14906 */
14907 pci_dev_put(peer);
14908
14909 return peer;
14910}
14911
229b1ad1 14912static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
42b123b1
MC
14913{
14914 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14916 u32 reg;
14917
14918 /* All devices that use the alternate
14919 * ASIC REV location have a CPMU.
14920 */
14921 tg3_flag_set(tp, CPMU_PRESENT);
14922
14923 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
79d49695 14924 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
42b123b1
MC
14925 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14926 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
c65a17f4
MC
14927 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
14928 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
14929 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
14930 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
42b123b1
MC
14931 reg = TG3PCI_GEN2_PRODID_ASICREV;
14932 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14933 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14934 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14935 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14936 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14937 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14938 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14939 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14940 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14941 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14942 reg = TG3PCI_GEN15_PRODID_ASICREV;
14943 else
14944 reg = TG3PCI_PRODID_ASICREV;
14945
14946 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14947 }
14948
14949 /* Wrong chip ID in 5752 A0. This code can be removed later
14950 * as A0 is not in production.
14951 */
14952 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14953 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14954
79d49695
MC
14955 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14956 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14957
42b123b1
MC
14958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14961 tg3_flag_set(tp, 5717_PLUS);
14962
14963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14965 tg3_flag_set(tp, 57765_CLASS);
14966
c65a17f4
MC
14967 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
14968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
42b123b1
MC
14969 tg3_flag_set(tp, 57765_PLUS);
14970
14971 /* Intentionally exclude ASIC_REV_5906 */
14972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14975 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14978 tg3_flag(tp, 57765_PLUS))
14979 tg3_flag_set(tp, 5755_PLUS);
14980
14981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14983 tg3_flag_set(tp, 5780_CLASS);
14984
14985 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14988 tg3_flag(tp, 5755_PLUS) ||
14989 tg3_flag(tp, 5780_CLASS))
14990 tg3_flag_set(tp, 5750_PLUS);
14991
14992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14993 tg3_flag(tp, 5750_PLUS))
14994 tg3_flag_set(tp, 5705_PLUS);
14995}
14996
3d567e0e
NNS
14997static bool tg3_10_100_only_device(struct tg3 *tp,
14998 const struct pci_device_id *ent)
14999{
15000 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15001
15002 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15003 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15004 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15005 return true;
15006
15007 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
15009 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15010 return true;
15011 } else {
15012 return true;
15013 }
15014 }
15015
15016 return false;
15017}
15018
1dd06ae8 15019static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
1da177e4 15020{
1da177e4 15021 u32 misc_ctrl_reg;
1da177e4
LT
15022 u32 pci_state_reg, grc_misc_cfg;
15023 u32 val;
15024 u16 pci_cmd;
5e7dfd0f 15025 int err;
1da177e4 15026
1da177e4
LT
15027 /* Force memory write invalidate off. If we leave it on,
15028 * then on 5700_BX chips we have to enable a workaround.
15029 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15030 * to match the cacheline size. The Broadcom driver have this
15031 * workaround but turns MWI off all the times so never uses
15032 * it. This seems to suggest that the workaround is insufficient.
15033 */
15034 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15035 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15036 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15037
16821285
MC
15038 /* Important! -- Make sure register accesses are byteswapped
15039 * correctly. Also, for those chips that require it, make
15040 * sure that indirect register accesses are enabled before
15041 * the first operation.
1da177e4
LT
15042 */
15043 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15044 &misc_ctrl_reg);
16821285
MC
15045 tp->misc_host_ctrl |= (misc_ctrl_reg &
15046 MISC_HOST_CTRL_CHIPREV);
15047 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15048 tp->misc_host_ctrl);
1da177e4 15049
42b123b1 15050 tg3_detect_asic_rev(tp, misc_ctrl_reg);
ff645bec 15051
6892914f
MC
15052 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15053 * we need to disable memory and use config. cycles
15054 * only to access all registers. The 5702/03 chips
15055 * can mistakenly decode the special cycles from the
15056 * ICH chipsets as memory write cycles, causing corruption
15057 * of register and memory space. Only certain ICH bridges
15058 * will drive special cycles with non-zero data during the
15059 * address phase which can fall within the 5703's address
15060 * range. This is not an ICH bug as the PCI spec allows
15061 * non-zero address during special cycles. However, only
15062 * these ICH bridges are known to drive non-zero addresses
15063 * during special cycles.
15064 *
15065 * Since special cycles do not cross PCI bridges, we only
15066 * enable this workaround if the 5703 is on the secondary
15067 * bus of these ICH bridges.
15068 */
15069 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
15070 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
15071 static struct tg3_dev_id {
15072 u32 vendor;
15073 u32 device;
15074 u32 rev;
15075 } ich_chipsets[] = {
15076 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15077 PCI_ANY_ID },
15078 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15079 PCI_ANY_ID },
15080 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15081 0xa },
15082 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15083 PCI_ANY_ID },
15084 { },
15085 };
15086 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15087 struct pci_dev *bridge = NULL;
15088
15089 while (pci_id->vendor != 0) {
15090 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15091 bridge);
15092 if (!bridge) {
15093 pci_id++;
15094 continue;
15095 }
15096 if (pci_id->rev != PCI_ANY_ID) {
44c10138 15097 if (bridge->revision > pci_id->rev)
6892914f
MC
15098 continue;
15099 }
15100 if (bridge->subordinate &&
15101 (bridge->subordinate->number ==
15102 tp->pdev->bus->number)) {
63c3a66f 15103 tg3_flag_set(tp, ICH_WORKAROUND);
6892914f
MC
15104 pci_dev_put(bridge);
15105 break;
15106 }
15107 }
15108 }
15109
6ff6f81d 15110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
41588ba1
MC
15111 static struct tg3_dev_id {
15112 u32 vendor;
15113 u32 device;
15114 } bridge_chipsets[] = {
15115 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15116 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15117 { },
15118 };
15119 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15120 struct pci_dev *bridge = NULL;
15121
15122 while (pci_id->vendor != 0) {
15123 bridge = pci_get_device(pci_id->vendor,
15124 pci_id->device,
15125 bridge);
15126 if (!bridge) {
15127 pci_id++;
15128 continue;
15129 }
15130 if (bridge->subordinate &&
15131 (bridge->subordinate->number <=
15132 tp->pdev->bus->number) &&
b918c62e 15133 (bridge->subordinate->busn_res.end >=
41588ba1 15134 tp->pdev->bus->number)) {
63c3a66f 15135 tg3_flag_set(tp, 5701_DMA_BUG);
41588ba1
MC
15136 pci_dev_put(bridge);
15137 break;
15138 }
15139 }
15140 }
15141
4a29cc2e
MC
15142 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15143 * DMA addresses > 40-bit. This bridge may have other additional
15144 * 57xx devices behind it in some 4-port NIC designs for example.
15145 * Any tg3 device found behind the bridge will also need the 40-bit
15146 * DMA workaround.
15147 */
42b123b1 15148 if (tg3_flag(tp, 5780_CLASS)) {
63c3a66f 15149 tg3_flag_set(tp, 40BIT_DMA_BUG);
4cf78e4f 15150 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
859a5887 15151 } else {
4a29cc2e
MC
15152 struct pci_dev *bridge = NULL;
15153
15154 do {
15155 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15156 PCI_DEVICE_ID_SERVERWORKS_EPB,
15157 bridge);
15158 if (bridge && bridge->subordinate &&
15159 (bridge->subordinate->number <=
15160 tp->pdev->bus->number) &&
b918c62e 15161 (bridge->subordinate->busn_res.end >=
4a29cc2e 15162 tp->pdev->bus->number)) {
63c3a66f 15163 tg3_flag_set(tp, 40BIT_DMA_BUG);
4a29cc2e
MC
15164 pci_dev_put(bridge);
15165 break;
15166 }
15167 } while (bridge);
15168 }
4cf78e4f 15169
f6eb9b1f 15170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3a1e19d3 15171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
7544b097
MC
15172 tp->pdev_peer = tg3_find_peer(tp);
15173
507399f1 15174 /* Determine TSO capabilities */
a0512944 15175 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
4d163b75 15176 ; /* Do nothing. HW bug. */
63c3a66f
JP
15177 else if (tg3_flag(tp, 57765_PLUS))
15178 tg3_flag_set(tp, HW_TSO_3);
15179 else if (tg3_flag(tp, 5755_PLUS) ||
e849cdc3 15180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f
JP
15181 tg3_flag_set(tp, HW_TSO_2);
15182 else if (tg3_flag(tp, 5750_PLUS)) {
15183 tg3_flag_set(tp, HW_TSO_1);
15184 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
15185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15186 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
63c3a66f 15187 tg3_flag_clear(tp, TSO_BUG);
507399f1
MC
15188 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15189 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15190 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 15191 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
15192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15193 tp->fw_needed = FIRMWARE_TG3TSO5;
15194 else
15195 tp->fw_needed = FIRMWARE_TG3TSO;
15196 }
15197
dabc5c67 15198 /* Selectively allow TSO based on operating conditions */
6ff6f81d
MC
15199 if (tg3_flag(tp, HW_TSO_1) ||
15200 tg3_flag(tp, HW_TSO_2) ||
15201 tg3_flag(tp, HW_TSO_3) ||
cf9ecf4b
MC
15202 tp->fw_needed) {
15203 /* For firmware TSO, assume ASF is disabled.
15204 * We'll disable TSO later if we discover ASF
15205 * is enabled in tg3_get_eeprom_hw_cfg().
15206 */
dabc5c67 15207 tg3_flag_set(tp, TSO_CAPABLE);
cf9ecf4b 15208 } else {
dabc5c67
MC
15209 tg3_flag_clear(tp, TSO_CAPABLE);
15210 tg3_flag_clear(tp, TSO_BUG);
15211 tp->fw_needed = NULL;
15212 }
15213
15214 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15215 tp->fw_needed = FIRMWARE_TG3;
15216
507399f1
MC
15217 tp->irq_max = 1;
15218
63c3a66f
JP
15219 if (tg3_flag(tp, 5750_PLUS)) {
15220 tg3_flag_set(tp, SUPPORT_MSI);
7544b097
MC
15221 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15222 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15223 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15224 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15225 tp->pdev_peer == tp->pdev))
63c3a66f 15226 tg3_flag_clear(tp, SUPPORT_MSI);
7544b097 15227
63c3a66f 15228 if (tg3_flag(tp, 5755_PLUS) ||
b5d3772c 15229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
63c3a66f 15230 tg3_flag_set(tp, 1SHOT_MSI);
52c0fd83 15231 }
4f125f42 15232
63c3a66f
JP
15233 if (tg3_flag(tp, 57765_PLUS)) {
15234 tg3_flag_set(tp, SUPPORT_MSIX);
507399f1
MC
15235 tp->irq_max = TG3_IRQ_MAX_VECS;
15236 }
f6eb9b1f 15237 }
0e1406dd 15238
9102426a
MC
15239 tp->txq_max = 1;
15240 tp->rxq_max = 1;
15241 if (tp->irq_max > 1) {
15242 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15243 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15244
15245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15247 tp->txq_max = tp->irq_max - 1;
15248 }
15249
b7abee6e
MC
15250 if (tg3_flag(tp, 5755_PLUS) ||
15251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f 15252 tg3_flag_set(tp, SHORT_DMA_BUG);
f6eb9b1f 15253
e31aa987 15254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a4cb428d 15255 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
e31aa987 15256
fa6b2aae
MC
15257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
c65a17f4
MC
15259 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
63c3a66f 15261 tg3_flag_set(tp, LRG_PROD_RING_CAP);
de9f5230 15262
63c3a66f 15263 if (tg3_flag(tp, 57765_PLUS) &&
a0512944 15264 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
63c3a66f 15265 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
b703df6f 15266
63c3a66f
JP
15267 if (!tg3_flag(tp, 5705_PLUS) ||
15268 tg3_flag(tp, 5780_CLASS) ||
15269 tg3_flag(tp, USE_JUMBO_BDFLAG))
15270 tg3_flag_set(tp, JUMBO_CAPABLE);
0f893dc6 15271
52f4490c
MC
15272 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15273 &pci_state_reg);
15274
708ebb3a 15275 if (pci_is_pcie(tp->pdev)) {
5e7dfd0f
MC
15276 u16 lnkctl;
15277
63c3a66f 15278 tg3_flag_set(tp, PCI_EXPRESS);
5f5c51e3 15279
0f49bfbd 15280 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
5e7dfd0f 15281 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
7196cd6c
MC
15282 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15283 ASIC_REV_5906) {
63c3a66f 15284 tg3_flag_clear(tp, HW_TSO_2);
dabc5c67 15285 tg3_flag_clear(tp, TSO_CAPABLE);
7196cd6c 15286 }
5e7dfd0f 15287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0 15288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9cf74ebb
MC
15289 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15290 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
63c3a66f 15291 tg3_flag_set(tp, CLKREQ_BUG);
614b0590 15292 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
63c3a66f 15293 tg3_flag_set(tp, L1PLLPD_EN);
c7835a77 15294 }
52f4490c 15295 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
708ebb3a
JM
15296 /* BCM5785 devices are effectively PCIe devices, and should
15297 * follow PCIe codepaths, but do not have a PCIe capabilities
15298 * section.
93a700a9 15299 */
63c3a66f
JP
15300 tg3_flag_set(tp, PCI_EXPRESS);
15301 } else if (!tg3_flag(tp, 5705_PLUS) ||
15302 tg3_flag(tp, 5780_CLASS)) {
52f4490c
MC
15303 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15304 if (!tp->pcix_cap) {
2445e461
MC
15305 dev_err(&tp->pdev->dev,
15306 "Cannot find PCI-X capability, aborting\n");
52f4490c
MC
15307 return -EIO;
15308 }
15309
15310 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
63c3a66f 15311 tg3_flag_set(tp, PCIX_MODE);
52f4490c 15312 }
1da177e4 15313
399de50b
MC
15314 /* If we have an AMD 762 or VIA K8T800 chipset, write
15315 * reordering to the mailbox registers done by the host
15316 * controller can cause major troubles. We read back from
15317 * every mailbox register write to force the writes to be
15318 * posted to the chip in order.
15319 */
4143470c 15320 if (pci_dev_present(tg3_write_reorder_chipsets) &&
63c3a66f
JP
15321 !tg3_flag(tp, PCI_EXPRESS))
15322 tg3_flag_set(tp, MBOX_WRITE_REORDER);
399de50b 15323
69fc4053
MC
15324 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15325 &tp->pci_cacheline_sz);
15326 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15327 &tp->pci_lat_timer);
1da177e4
LT
15328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15329 tp->pci_lat_timer < 64) {
15330 tp->pci_lat_timer = 64;
69fc4053
MC
15331 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15332 tp->pci_lat_timer);
1da177e4
LT
15333 }
15334
16821285
MC
15335 /* Important! -- It is critical that the PCI-X hw workaround
15336 * situation is decided before the first MMIO register access.
15337 */
52f4490c
MC
15338 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15339 /* 5700 BX chips need to have their TX producer index
15340 * mailboxes written twice to workaround a bug.
15341 */
63c3a66f 15342 tg3_flag_set(tp, TXD_MBOX_HWBUG);
1da177e4 15343
52f4490c 15344 /* If we are in PCI-X mode, enable register write workaround.
1da177e4
LT
15345 *
15346 * The workaround is to use indirect register accesses
15347 * for all chip writes not to mailbox registers.
15348 */
63c3a66f 15349 if (tg3_flag(tp, PCIX_MODE)) {
1da177e4 15350 u32 pm_reg;
1da177e4 15351
63c3a66f 15352 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
15353
15354 /* The chip can have it's power management PCI config
15355 * space registers clobbered due to this bug.
15356 * So explicitly force the chip into D0 here.
15357 */
9974a356
MC
15358 pci_read_config_dword(tp->pdev,
15359 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
15360 &pm_reg);
15361 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15362 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
15363 pci_write_config_dword(tp->pdev,
15364 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
15365 pm_reg);
15366
15367 /* Also, force SERR#/PERR# in PCI command. */
15368 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15369 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15370 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15371 }
15372 }
15373
1da177e4 15374 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
63c3a66f 15375 tg3_flag_set(tp, PCI_HIGH_SPEED);
1da177e4 15376 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
63c3a66f 15377 tg3_flag_set(tp, PCI_32BIT);
1da177e4
LT
15378
15379 /* Chip-specific fixup from Broadcom driver */
15380 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15381 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15382 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15383 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15384 }
15385
1ee582d8 15386 /* Default fast path register access methods */
20094930 15387 tp->read32 = tg3_read32;
1ee582d8 15388 tp->write32 = tg3_write32;
09ee929c 15389 tp->read32_mbox = tg3_read32;
20094930 15390 tp->write32_mbox = tg3_write32;
1ee582d8
MC
15391 tp->write32_tx_mbox = tg3_write32;
15392 tp->write32_rx_mbox = tg3_write32;
15393
15394 /* Various workaround register access methods */
63c3a66f 15395 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
1ee582d8 15396 tp->write32 = tg3_write_indirect_reg32;
98efd8a6 15397 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
63c3a66f 15398 (tg3_flag(tp, PCI_EXPRESS) &&
98efd8a6
MC
15399 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15400 /*
15401 * Back to back register writes can cause problems on these
15402 * chips, the workaround is to read back all reg writes
15403 * except those to mailbox regs.
15404 *
15405 * See tg3_write_indirect_reg32().
15406 */
1ee582d8 15407 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
15408 }
15409
63c3a66f 15410 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
1ee582d8 15411 tp->write32_tx_mbox = tg3_write32_tx_mbox;
63c3a66f 15412 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1ee582d8
MC
15413 tp->write32_rx_mbox = tg3_write_flush_reg32;
15414 }
20094930 15415
63c3a66f 15416 if (tg3_flag(tp, ICH_WORKAROUND)) {
6892914f
MC
15417 tp->read32 = tg3_read_indirect_reg32;
15418 tp->write32 = tg3_write_indirect_reg32;
15419 tp->read32_mbox = tg3_read_indirect_mbox;
15420 tp->write32_mbox = tg3_write_indirect_mbox;
15421 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15422 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15423
15424 iounmap(tp->regs);
22abe310 15425 tp->regs = NULL;
6892914f
MC
15426
15427 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15428 pci_cmd &= ~PCI_COMMAND_MEMORY;
15429 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15430 }
b5d3772c
MC
15431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15432 tp->read32_mbox = tg3_read32_mbox_5906;
15433 tp->write32_mbox = tg3_write32_mbox_5906;
15434 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15435 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15436 }
6892914f 15437
bbadf503 15438 if (tp->write32 == tg3_write_indirect_reg32 ||
63c3a66f 15439 (tg3_flag(tp, PCIX_MODE) &&
bbadf503 15440 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 15441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
63c3a66f 15442 tg3_flag_set(tp, SRAM_USE_CONFIG);
bbadf503 15443
16821285
MC
15444 /* The memory arbiter has to be enabled in order for SRAM accesses
15445 * to succeed. Normally on powerup the tg3 chip firmware will make
15446 * sure it is enabled, but other entities such as system netboot
15447 * code might disable it.
15448 */
15449 val = tr32(MEMARB_MODE);
15450 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15451
9dc5e342
MC
15452 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15454 tg3_flag(tp, 5780_CLASS)) {
15455 if (tg3_flag(tp, PCIX_MODE)) {
15456 pci_read_config_dword(tp->pdev,
15457 tp->pcix_cap + PCI_X_STATUS,
15458 &val);
15459 tp->pci_fn = val & 0x7;
15460 }
857001f0
MC
15461 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9dc5e342
MC
15463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15464 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
857001f0
MC
15465 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15466 val = tr32(TG3_CPMU_STATUS);
15467
15468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
15469 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15470 else
9dc5e342
MC
15471 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15472 TG3_CPMU_STATUS_FSHFT_5719;
69f11c99
MC
15473 }
15474
7d0c41ef 15475 /* Get eeprom hw config before calling tg3_set_power_state().
63c3a66f 15476 * In particular, the TG3_FLAG_IS_NIC flag must be
7d0c41ef
MC
15477 * determined before calling tg3_set_power_state() so that
15478 * we know whether or not to switch out of Vaux power.
15479 * When the flag is set, it means that GPIO1 is used for eeprom
15480 * write protect and also implies that it is a LOM where GPIOs
15481 * are not used to switch power.
6aa20a22 15482 */
7d0c41ef
MC
15483 tg3_get_eeprom_hw_cfg(tp);
15484
cf9ecf4b
MC
15485 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15486 tg3_flag_clear(tp, TSO_CAPABLE);
15487 tg3_flag_clear(tp, TSO_BUG);
15488 tp->fw_needed = NULL;
15489 }
15490
63c3a66f 15491 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
15492 /* Allow reads and writes to the
15493 * APE register and memory space.
15494 */
15495 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
15496 PCISTATE_ALLOW_APE_SHMEM_WR |
15497 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
15498 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15499 pci_state_reg);
c9cab24e
MC
15500
15501 tg3_ape_lock_init(tp);
0d3031d9
MC
15502 }
15503
16821285
MC
15504 /* Set up tp->grc_local_ctrl before calling
15505 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15506 * will bring 5700's external PHY out of reset.
314fba34
MC
15507 * It is also used as eeprom write protect on LOMs.
15508 */
15509 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
6ff6f81d 15510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
63c3a66f 15511 tg3_flag(tp, EEPROM_WRITE_PROT))
314fba34
MC
15512 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15513 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
15514 /* Unused GPIO3 must be driven as output on 5752 because there
15515 * are no pull-up resistors on unused GPIO pins.
15516 */
15517 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15518 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 15519
321d32a0 15520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
cb4ed1fd 15521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 15522 tg3_flag(tp, 57765_CLASS))
af36e6b6
MC
15523 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15524
8d519ab2
MC
15525 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15526 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
5f0c4a3c
MC
15527 /* Turn off the debug UART. */
15528 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
63c3a66f 15529 if (tg3_flag(tp, IS_NIC))
5f0c4a3c
MC
15530 /* Keep VMain power. */
15531 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15532 GRC_LCLCTRL_GPIO_OUTPUT0;
15533 }
15534
c86a8560
MC
15535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15536 tp->grc_local_ctrl |=
15537 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15538
16821285
MC
15539 /* Switch out of Vaux if it is a NIC */
15540 tg3_pwrsrc_switch_to_vmain(tp);
1da177e4 15541
1da177e4
LT
15542 /* Derive initial jumbo mode from MTU assigned in
15543 * ether_setup() via the alloc_etherdev() call
15544 */
63c3a66f
JP
15545 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15546 tg3_flag_set(tp, JUMBO_RING_ENABLE);
1da177e4
LT
15547
15548 /* Determine WakeOnLan speed to use. */
15549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15550 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15551 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15552 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
63c3a66f 15553 tg3_flag_clear(tp, WOL_SPEED_100MB);
1da177e4 15554 } else {
63c3a66f 15555 tg3_flag_set(tp, WOL_SPEED_100MB);
1da177e4
LT
15556 }
15557
7f97a4bd 15558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
f07e9af3 15559 tp->phy_flags |= TG3_PHYFLG_IS_FET;
7f97a4bd 15560
1da177e4 15561 /* A few boards don't want Ethernet@WireSpeed phy feature */
6ff6f81d
MC
15562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15563 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
1da177e4 15564 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 15565 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
f07e9af3
MC
15566 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15567 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15568 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
1da177e4
LT
15569
15570 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15571 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
f07e9af3 15572 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
1da177e4 15573 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
f07e9af3 15574 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
1da177e4 15575
63c3a66f 15576 if (tg3_flag(tp, 5705_PLUS) &&
f07e9af3 15577 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
321d32a0 15578 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
f6eb9b1f 15579 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
63c3a66f 15580 !tg3_flag(tp, 57765_PLUS)) {
c424cb24 15581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 15582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
15583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15584 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
15585 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15586 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
f07e9af3 15587 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
c1d2a196 15588 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
f07e9af3 15589 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
321d32a0 15590 } else
f07e9af3 15591 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
c424cb24 15592 }
1da177e4 15593
b2a5c19c
MC
15594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15595 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15596 tp->phy_otp = tg3_read_otp_phycfg(tp);
15597 if (tp->phy_otp == 0)
15598 tp->phy_otp = TG3_OTP_DEFAULT;
15599 }
15600
63c3a66f 15601 if (tg3_flag(tp, CPMU_PRESENT))
8ef21428
MC
15602 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15603 else
15604 tp->mi_mode = MAC_MI_MODE_BASE;
15605
1da177e4 15606 tp->coalesce_mode = 0;
1da177e4
LT
15607 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15608 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15609 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15610
4d958473
MC
15611 /* Set these bits to enable statistics workaround. */
15612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15613 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15614 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15615 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15616 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15617 }
15618
321d32a0
MC
15619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15620 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
63c3a66f 15621 tg3_flag_set(tp, USE_PHYLIB);
57e6983c 15622
158d7abd
MC
15623 err = tg3_mdio_init(tp);
15624 if (err)
15625 return err;
1da177e4
LT
15626
15627 /* Initialize data/descriptor byte/word swapping. */
15628 val = tr32(GRC_MODE);
c65a17f4
MC
15629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
f2096f94
MC
15631 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15632 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15633 GRC_MODE_B2HRX_ENABLE |
15634 GRC_MODE_HTX2B_ENABLE |
15635 GRC_MODE_HOST_STACKUP);
15636 else
15637 val &= GRC_MODE_HOST_STACKUP;
15638
1da177e4
LT
15639 tw32(GRC_MODE, val | tp->grc_mode);
15640
15641 tg3_switch_clocks(tp);
15642
15643 /* Clear this out for sanity. */
15644 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15645
15646 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15647 &pci_state_reg);
15648 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
63c3a66f 15649 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
1da177e4
LT
15650 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15651
15652 if (chiprevid == CHIPREV_ID_5701_A0 ||
15653 chiprevid == CHIPREV_ID_5701_B0 ||
15654 chiprevid == CHIPREV_ID_5701_B2 ||
15655 chiprevid == CHIPREV_ID_5701_B5) {
15656 void __iomem *sram_base;
15657
15658 /* Write some dummy words into the SRAM status block
15659 * area, see if it reads back correctly. If the return
15660 * value is bad, force enable the PCIX workaround.
15661 */
15662 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15663
15664 writel(0x00000000, sram_base);
15665 writel(0x00000000, sram_base + 4);
15666 writel(0xffffffff, sram_base + 4);
15667 if (readl(sram_base) != 0x00000000)
63c3a66f 15668 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
15669 }
15670 }
15671
15672 udelay(50);
15673 tg3_nvram_init(tp);
15674
15675 grc_misc_cfg = tr32(GRC_MISC_CFG);
15676 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15677
1da177e4
LT
15678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15679 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15680 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
63c3a66f 15681 tg3_flag_set(tp, IS_5788);
1da177e4 15682
63c3a66f 15683 if (!tg3_flag(tp, IS_5788) &&
6ff6f81d 15684 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
63c3a66f
JP
15685 tg3_flag_set(tp, TAGGED_STATUS);
15686 if (tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
15687 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15688 HOSTCC_MODE_CLRTICK_TXBD);
15689
15690 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15691 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15692 tp->misc_host_ctrl);
15693 }
15694
3bda1258 15695 /* Preserve the APE MAC_MODE bits */
63c3a66f 15696 if (tg3_flag(tp, ENABLE_APE))
d2394e6b 15697 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
3bda1258 15698 else
6e01b20b 15699 tp->mac_mode = 0;
3bda1258 15700
3d567e0e 15701 if (tg3_10_100_only_device(tp, ent))
f07e9af3 15702 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
1da177e4
LT
15703
15704 err = tg3_phy_probe(tp);
15705 if (err) {
2445e461 15706 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
1da177e4 15707 /* ... but do not return immediately ... */
b02fd9e3 15708 tg3_mdio_fini(tp);
1da177e4
LT
15709 }
15710
184b8904 15711 tg3_read_vpd(tp);
c4e6575c 15712 tg3_read_fw_ver(tp);
1da177e4 15713
f07e9af3
MC
15714 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15715 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
15716 } else {
15717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
f07e9af3 15718 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4 15719 else
f07e9af3 15720 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
15721 }
15722
15723 /* 5700 {AX,BX} chips have a broken status block link
15724 * change bit implementation, so we must use the
15725 * status register in those cases.
15726 */
15727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
63c3a66f 15728 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4 15729 else
63c3a66f 15730 tg3_flag_clear(tp, USE_LINKCHG_REG);
1da177e4
LT
15731
15732 /* The led_ctrl is set during tg3_phy_probe, here we might
15733 * have to force the link status polling mechanism based
15734 * upon subsystem IDs.
15735 */
15736 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 15737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
f07e9af3
MC
15738 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15739 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
63c3a66f 15740 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4
LT
15741 }
15742
15743 /* For all SERDES we poll the MAC status register. */
f07e9af3 15744 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
63c3a66f 15745 tg3_flag_set(tp, POLL_SERDES);
1da177e4 15746 else
63c3a66f 15747 tg3_flag_clear(tp, POLL_SERDES);
1da177e4 15748
9205fd9c 15749 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
d2757fc4 15750 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
1da177e4 15751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
63c3a66f 15752 tg3_flag(tp, PCIX_MODE)) {
9205fd9c 15753 tp->rx_offset = NET_SKB_PAD;
d2757fc4 15754#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
9dc7a113 15755 tp->rx_copy_thresh = ~(u16)0;
d2757fc4
MC
15756#endif
15757 }
1da177e4 15758
2c49a44d
MC
15759 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15760 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
7cb32cf2
MC
15761 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15762
2c49a44d 15763 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
f92905de
MC
15764
15765 /* Increment the rx prod index on the rx std ring by at most
15766 * 8 for these chips to workaround hw errata.
15767 */
15768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15770 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15771 tp->rx_std_max_post = 8;
15772
63c3a66f 15773 if (tg3_flag(tp, ASPM_WORKAROUND))
8ed5d97e
MC
15774 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15775 PCIE_PWR_MGMT_L1_THRESH_MSK;
15776
1da177e4
LT
15777 return err;
15778}
15779
49b6e95f 15780#ifdef CONFIG_SPARC
229b1ad1 15781static int tg3_get_macaddr_sparc(struct tg3 *tp)
1da177e4
LT
15782{
15783 struct net_device *dev = tp->dev;
15784 struct pci_dev *pdev = tp->pdev;
49b6e95f 15785 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 15786 const unsigned char *addr;
49b6e95f
DM
15787 int len;
15788
15789 addr = of_get_property(dp, "local-mac-address", &len);
15790 if (addr && len == 6) {
15791 memcpy(dev->dev_addr, addr, 6);
49b6e95f 15792 return 0;
1da177e4
LT
15793 }
15794 return -ENODEV;
15795}
15796
229b1ad1 15797static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
1da177e4
LT
15798{
15799 struct net_device *dev = tp->dev;
15800
15801 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15802 return 0;
15803}
15804#endif
15805
229b1ad1 15806static int tg3_get_device_address(struct tg3 *tp)
1da177e4
LT
15807{
15808 struct net_device *dev = tp->dev;
15809 u32 hi, lo, mac_offset;
008652b3 15810 int addr_ok = 0;
1da177e4 15811
49b6e95f 15812#ifdef CONFIG_SPARC
1da177e4
LT
15813 if (!tg3_get_macaddr_sparc(tp))
15814 return 0;
15815#endif
15816
15817 mac_offset = 0x7c;
6ff6f81d 15818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
63c3a66f 15819 tg3_flag(tp, 5780_CLASS)) {
1da177e4
LT
15820 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15821 mac_offset = 0xcc;
15822 if (tg3_nvram_lock(tp))
15823 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15824 else
15825 tg3_nvram_unlock(tp);
63c3a66f 15826 } else if (tg3_flag(tp, 5717_PLUS)) {
69f11c99 15827 if (tp->pci_fn & 1)
a1b950d5 15828 mac_offset = 0xcc;
69f11c99 15829 if (tp->pci_fn > 1)
a50d0796 15830 mac_offset += 0x18c;
a1b950d5 15831 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
b5d3772c 15832 mac_offset = 0x10;
1da177e4
LT
15833
15834 /* First try to get it from MAC address mailbox. */
15835 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15836 if ((hi >> 16) == 0x484b) {
15837 dev->dev_addr[0] = (hi >> 8) & 0xff;
15838 dev->dev_addr[1] = (hi >> 0) & 0xff;
15839
15840 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15841 dev->dev_addr[2] = (lo >> 24) & 0xff;
15842 dev->dev_addr[3] = (lo >> 16) & 0xff;
15843 dev->dev_addr[4] = (lo >> 8) & 0xff;
15844 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 15845
008652b3
MC
15846 /* Some old bootcode may report a 0 MAC address in SRAM */
15847 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15848 }
15849 if (!addr_ok) {
15850 /* Next, try NVRAM. */
63c3a66f 15851 if (!tg3_flag(tp, NO_NVRAM) &&
df259d8c 15852 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
6d348f2c 15853 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
62cedd11
MC
15854 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15855 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
008652b3
MC
15856 }
15857 /* Finally just fetch it out of the MAC control regs. */
15858 else {
15859 hi = tr32(MAC_ADDR_0_HIGH);
15860 lo = tr32(MAC_ADDR_0_LOW);
15861
15862 dev->dev_addr[5] = lo & 0xff;
15863 dev->dev_addr[4] = (lo >> 8) & 0xff;
15864 dev->dev_addr[3] = (lo >> 16) & 0xff;
15865 dev->dev_addr[2] = (lo >> 24) & 0xff;
15866 dev->dev_addr[1] = hi & 0xff;
15867 dev->dev_addr[0] = (hi >> 8) & 0xff;
15868 }
1da177e4
LT
15869 }
15870
15871 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7582a335 15872#ifdef CONFIG_SPARC
1da177e4
LT
15873 if (!tg3_get_default_macaddr_sparc(tp))
15874 return 0;
15875#endif
15876 return -EINVAL;
15877 }
15878 return 0;
15879}
15880
59e6b434
DM
15881#define BOUNDARY_SINGLE_CACHELINE 1
15882#define BOUNDARY_MULTI_CACHELINE 2
15883
229b1ad1 15884static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
59e6b434
DM
15885{
15886 int cacheline_size;
15887 u8 byte;
15888 int goal;
15889
15890 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15891 if (byte == 0)
15892 cacheline_size = 1024;
15893 else
15894 cacheline_size = (int) byte * 4;
15895
15896 /* On 5703 and later chips, the boundary bits have no
15897 * effect.
15898 */
15899 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15900 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
63c3a66f 15901 !tg3_flag(tp, PCI_EXPRESS))
59e6b434
DM
15902 goto out;
15903
15904#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15905 goal = BOUNDARY_MULTI_CACHELINE;
15906#else
15907#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15908 goal = BOUNDARY_SINGLE_CACHELINE;
15909#else
15910 goal = 0;
15911#endif
15912#endif
15913
63c3a66f 15914 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
15915 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15916 goto out;
15917 }
15918
59e6b434
DM
15919 if (!goal)
15920 goto out;
15921
15922 /* PCI controllers on most RISC systems tend to disconnect
15923 * when a device tries to burst across a cache-line boundary.
15924 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15925 *
15926 * Unfortunately, for PCI-E there are only limited
15927 * write-side controls for this, and thus for reads
15928 * we will still get the disconnects. We'll also waste
15929 * these PCI cycles for both read and write for chips
15930 * other than 5700 and 5701 which do not implement the
15931 * boundary bits.
15932 */
63c3a66f 15933 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
15934 switch (cacheline_size) {
15935 case 16:
15936 case 32:
15937 case 64:
15938 case 128:
15939 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15940 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15941 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15942 } else {
15943 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15944 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15945 }
15946 break;
15947
15948 case 256:
15949 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15950 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15951 break;
15952
15953 default:
15954 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15955 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15956 break;
855e1111 15957 }
63c3a66f 15958 } else if (tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
15959 switch (cacheline_size) {
15960 case 16:
15961 case 32:
15962 case 64:
15963 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15964 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15965 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15966 break;
15967 }
15968 /* fallthrough */
15969 case 128:
15970 default:
15971 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15972 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15973 break;
855e1111 15974 }
59e6b434
DM
15975 } else {
15976 switch (cacheline_size) {
15977 case 16:
15978 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15979 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15980 DMA_RWCTRL_WRITE_BNDRY_16);
15981 break;
15982 }
15983 /* fallthrough */
15984 case 32:
15985 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15986 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15987 DMA_RWCTRL_WRITE_BNDRY_32);
15988 break;
15989 }
15990 /* fallthrough */
15991 case 64:
15992 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15993 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15994 DMA_RWCTRL_WRITE_BNDRY_64);
15995 break;
15996 }
15997 /* fallthrough */
15998 case 128:
15999 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16000 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16001 DMA_RWCTRL_WRITE_BNDRY_128);
16002 break;
16003 }
16004 /* fallthrough */
16005 case 256:
16006 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16007 DMA_RWCTRL_WRITE_BNDRY_256);
16008 break;
16009 case 512:
16010 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16011 DMA_RWCTRL_WRITE_BNDRY_512);
16012 break;
16013 case 1024:
16014 default:
16015 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16016 DMA_RWCTRL_WRITE_BNDRY_1024);
16017 break;
855e1111 16018 }
59e6b434
DM
16019 }
16020
16021out:
16022 return val;
16023}
16024
229b1ad1
BP
16025static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16026 int size, int to_device)
1da177e4
LT
16027{
16028 struct tg3_internal_buffer_desc test_desc;
16029 u32 sram_dma_descs;
16030 int i, ret;
16031
16032 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16033
16034 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16035 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16036 tw32(RDMAC_STATUS, 0);
16037 tw32(WDMAC_STATUS, 0);
16038
16039 tw32(BUFMGR_MODE, 0);
16040 tw32(FTQ_RESET, 0);
16041
16042 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16043 test_desc.addr_lo = buf_dma & 0xffffffff;
16044 test_desc.nic_mbuf = 0x00002100;
16045 test_desc.len = size;
16046
16047 /*
16048 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16049 * the *second* time the tg3 driver was getting loaded after an
16050 * initial scan.
16051 *
16052 * Broadcom tells me:
16053 * ...the DMA engine is connected to the GRC block and a DMA
16054 * reset may affect the GRC block in some unpredictable way...
16055 * The behavior of resets to individual blocks has not been tested.
16056 *
16057 * Broadcom noted the GRC reset will also reset all sub-components.
16058 */
16059 if (to_device) {
16060 test_desc.cqid_sqid = (13 << 8) | 2;
16061
16062 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16063 udelay(40);
16064 } else {
16065 test_desc.cqid_sqid = (16 << 8) | 7;
16066
16067 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16068 udelay(40);
16069 }
16070 test_desc.flags = 0x00000005;
16071
16072 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16073 u32 val;
16074
16075 val = *(((u32 *)&test_desc) + i);
16076 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16077 sram_dma_descs + (i * sizeof(u32)));
16078 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16079 }
16080 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16081
859a5887 16082 if (to_device)
1da177e4 16083 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
859a5887 16084 else
1da177e4 16085 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
1da177e4
LT
16086
16087 ret = -ENODEV;
16088 for (i = 0; i < 40; i++) {
16089 u32 val;
16090
16091 if (to_device)
16092 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16093 else
16094 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16095 if ((val & 0xffff) == sram_dma_descs) {
16096 ret = 0;
16097 break;
16098 }
16099
16100 udelay(100);
16101 }
16102
16103 return ret;
16104}
16105
ded7340d 16106#define TEST_BUFFER_SIZE 0x2000
1da177e4 16107
4143470c 16108static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
895950c2
JP
16109 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16110 { },
16111};
16112
229b1ad1 16113static int tg3_test_dma(struct tg3 *tp)
1da177e4
LT
16114{
16115 dma_addr_t buf_dma;
59e6b434 16116 u32 *buf, saved_dma_rwctrl;
cbf9ca6c 16117 int ret = 0;
1da177e4 16118
4bae65c8
MC
16119 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16120 &buf_dma, GFP_KERNEL);
1da177e4
LT
16121 if (!buf) {
16122 ret = -ENOMEM;
16123 goto out_nofree;
16124 }
16125
16126 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16127 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16128
59e6b434 16129 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4 16130
63c3a66f 16131 if (tg3_flag(tp, 57765_PLUS))
cbf9ca6c
MC
16132 goto out;
16133
63c3a66f 16134 if (tg3_flag(tp, PCI_EXPRESS)) {
1da177e4
LT
16135 /* DMA read watermark not used on PCIE */
16136 tp->dma_rwctrl |= 0x00180000;
63c3a66f 16137 } else if (!tg3_flag(tp, PCIX_MODE)) {
85e94ced
MC
16138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
16140 tp->dma_rwctrl |= 0x003f0000;
16141 else
16142 tp->dma_rwctrl |= 0x003f000f;
16143 } else {
16144 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16146 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 16147 u32 read_water = 0x7;
1da177e4 16148
4a29cc2e
MC
16149 /* If the 5704 is behind the EPB bridge, we can
16150 * do the less restrictive ONE_DMA workaround for
16151 * better performance.
16152 */
63c3a66f 16153 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
4a29cc2e
MC
16154 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16155 tp->dma_rwctrl |= 0x8000;
16156 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
16157 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16158
49afdeb6
MC
16159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16160 read_water = 4;
59e6b434 16161 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
16162 tp->dma_rwctrl |=
16163 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16164 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16165 (1 << 23);
4cf78e4f
MC
16166 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16167 /* 5780 always in PCIX mode */
16168 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
16169 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16170 /* 5714 always in PCIX mode */
16171 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
16172 } else {
16173 tp->dma_rwctrl |= 0x001b000f;
16174 }
16175 }
16176
16177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16179 tp->dma_rwctrl &= 0xfffffff0;
16180
16181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16183 /* Remove this if it causes problems for some boards. */
16184 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16185
16186 /* On 5700/5701 chips, we need to set this bit.
16187 * Otherwise the chip will issue cacheline transactions
16188 * to streamable DMA memory with not all the byte
16189 * enables turned on. This is an error on several
16190 * RISC PCI controllers, in particular sparc64.
16191 *
16192 * On 5703/5704 chips, this bit has been reassigned
16193 * a different meaning. In particular, it is used
16194 * on those chips to enable a PCI-X workaround.
16195 */
16196 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16197 }
16198
16199 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16200
16201#if 0
16202 /* Unneeded, already done by tg3_get_invariants. */
16203 tg3_switch_clocks(tp);
16204#endif
16205
1da177e4
LT
16206 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16207 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16208 goto out;
16209
59e6b434
DM
16210 /* It is best to perform DMA test with maximum write burst size
16211 * to expose the 5700/5701 write DMA bug.
16212 */
16213 saved_dma_rwctrl = tp->dma_rwctrl;
16214 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16215 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16216
1da177e4
LT
16217 while (1) {
16218 u32 *p = buf, i;
16219
16220 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16221 p[i] = i;
16222
16223 /* Send the buffer to the chip. */
16224 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16225 if (ret) {
2445e461
MC
16226 dev_err(&tp->pdev->dev,
16227 "%s: Buffer write failed. err = %d\n",
16228 __func__, ret);
1da177e4
LT
16229 break;
16230 }
16231
16232#if 0
16233 /* validate data reached card RAM correctly. */
16234 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16235 u32 val;
16236 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16237 if (le32_to_cpu(val) != p[i]) {
2445e461
MC
16238 dev_err(&tp->pdev->dev,
16239 "%s: Buffer corrupted on device! "
16240 "(%d != %d)\n", __func__, val, i);
1da177e4
LT
16241 /* ret = -ENODEV here? */
16242 }
16243 p[i] = 0;
16244 }
16245#endif
16246 /* Now read it back. */
16247 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16248 if (ret) {
5129c3a3
MC
16249 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16250 "err = %d\n", __func__, ret);
1da177e4
LT
16251 break;
16252 }
16253
16254 /* Verify it. */
16255 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16256 if (p[i] == i)
16257 continue;
16258
59e6b434
DM
16259 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16260 DMA_RWCTRL_WRITE_BNDRY_16) {
16261 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
16262 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16263 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16264 break;
16265 } else {
2445e461
MC
16266 dev_err(&tp->pdev->dev,
16267 "%s: Buffer corrupted on read back! "
16268 "(%d != %d)\n", __func__, p[i], i);
1da177e4
LT
16269 ret = -ENODEV;
16270 goto out;
16271 }
16272 }
16273
16274 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16275 /* Success. */
16276 ret = 0;
16277 break;
16278 }
16279 }
59e6b434
DM
16280 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16281 DMA_RWCTRL_WRITE_BNDRY_16) {
16282 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
16283 * now look for chipsets that are known to expose the
16284 * DMA bug without failing the test.
59e6b434 16285 */
4143470c 16286 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
6d1cfbab
MC
16287 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16288 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
859a5887 16289 } else {
6d1cfbab
MC
16290 /* Safe to use the calculated DMA boundary. */
16291 tp->dma_rwctrl = saved_dma_rwctrl;
859a5887 16292 }
6d1cfbab 16293
59e6b434
DM
16294 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16295 }
1da177e4
LT
16296
16297out:
4bae65c8 16298 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
1da177e4
LT
16299out_nofree:
16300 return ret;
16301}
16302
229b1ad1 16303static void tg3_init_bufmgr_config(struct tg3 *tp)
1da177e4 16304{
63c3a66f 16305 if (tg3_flag(tp, 57765_PLUS)) {
666bc831
MC
16306 tp->bufmgr_config.mbuf_read_dma_low_water =
16307 DEFAULT_MB_RDMA_LOW_WATER_5705;
16308 tp->bufmgr_config.mbuf_mac_rx_low_water =
16309 DEFAULT_MB_MACRX_LOW_WATER_57765;
16310 tp->bufmgr_config.mbuf_high_water =
16311 DEFAULT_MB_HIGH_WATER_57765;
16312
16313 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16314 DEFAULT_MB_RDMA_LOW_WATER_5705;
16315 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16316 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16317 tp->bufmgr_config.mbuf_high_water_jumbo =
16318 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
63c3a66f 16319 } else if (tg3_flag(tp, 5705_PLUS)) {
fdfec172
MC
16320 tp->bufmgr_config.mbuf_read_dma_low_water =
16321 DEFAULT_MB_RDMA_LOW_WATER_5705;
16322 tp->bufmgr_config.mbuf_mac_rx_low_water =
16323 DEFAULT_MB_MACRX_LOW_WATER_5705;
16324 tp->bufmgr_config.mbuf_high_water =
16325 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
16326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16327 tp->bufmgr_config.mbuf_mac_rx_low_water =
16328 DEFAULT_MB_MACRX_LOW_WATER_5906;
16329 tp->bufmgr_config.mbuf_high_water =
16330 DEFAULT_MB_HIGH_WATER_5906;
16331 }
fdfec172
MC
16332
16333 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16334 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16335 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16336 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16337 tp->bufmgr_config.mbuf_high_water_jumbo =
16338 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16339 } else {
16340 tp->bufmgr_config.mbuf_read_dma_low_water =
16341 DEFAULT_MB_RDMA_LOW_WATER;
16342 tp->bufmgr_config.mbuf_mac_rx_low_water =
16343 DEFAULT_MB_MACRX_LOW_WATER;
16344 tp->bufmgr_config.mbuf_high_water =
16345 DEFAULT_MB_HIGH_WATER;
16346
16347 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16348 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16349 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16350 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16351 tp->bufmgr_config.mbuf_high_water_jumbo =
16352 DEFAULT_MB_HIGH_WATER_JUMBO;
16353 }
1da177e4
LT
16354
16355 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16356 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16357}
16358
229b1ad1 16359static char *tg3_phy_string(struct tg3 *tp)
1da177e4 16360{
79eb6904
MC
16361 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16362 case TG3_PHY_ID_BCM5400: return "5400";
16363 case TG3_PHY_ID_BCM5401: return "5401";
16364 case TG3_PHY_ID_BCM5411: return "5411";
16365 case TG3_PHY_ID_BCM5701: return "5701";
16366 case TG3_PHY_ID_BCM5703: return "5703";
16367 case TG3_PHY_ID_BCM5704: return "5704";
16368 case TG3_PHY_ID_BCM5705: return "5705";
16369 case TG3_PHY_ID_BCM5750: return "5750";
16370 case TG3_PHY_ID_BCM5752: return "5752";
16371 case TG3_PHY_ID_BCM5714: return "5714";
16372 case TG3_PHY_ID_BCM5780: return "5780";
16373 case TG3_PHY_ID_BCM5755: return "5755";
16374 case TG3_PHY_ID_BCM5787: return "5787";
16375 case TG3_PHY_ID_BCM5784: return "5784";
16376 case TG3_PHY_ID_BCM5756: return "5722/5756";
16377 case TG3_PHY_ID_BCM5906: return "5906";
16378 case TG3_PHY_ID_BCM5761: return "5761";
16379 case TG3_PHY_ID_BCM5718C: return "5718C";
16380 case TG3_PHY_ID_BCM5718S: return "5718S";
16381 case TG3_PHY_ID_BCM57765: return "57765";
302b500b 16382 case TG3_PHY_ID_BCM5719C: return "5719C";
6418f2c1 16383 case TG3_PHY_ID_BCM5720C: return "5720C";
c65a17f4 16384 case TG3_PHY_ID_BCM5762: return "5762C";
79eb6904 16385 case TG3_PHY_ID_BCM8002: return "8002/serdes";
1da177e4
LT
16386 case 0: return "serdes";
16387 default: return "unknown";
855e1111 16388 }
1da177e4
LT
16389}
16390
229b1ad1 16391static char *tg3_bus_string(struct tg3 *tp, char *str)
f9804ddb 16392{
63c3a66f 16393 if (tg3_flag(tp, PCI_EXPRESS)) {
f9804ddb
MC
16394 strcpy(str, "PCI Express");
16395 return str;
63c3a66f 16396 } else if (tg3_flag(tp, PCIX_MODE)) {
f9804ddb
MC
16397 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16398
16399 strcpy(str, "PCIX:");
16400
16401 if ((clock_ctrl == 7) ||
16402 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16403 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16404 strcat(str, "133MHz");
16405 else if (clock_ctrl == 0)
16406 strcat(str, "33MHz");
16407 else if (clock_ctrl == 2)
16408 strcat(str, "50MHz");
16409 else if (clock_ctrl == 4)
16410 strcat(str, "66MHz");
16411 else if (clock_ctrl == 6)
16412 strcat(str, "100MHz");
f9804ddb
MC
16413 } else {
16414 strcpy(str, "PCI:");
63c3a66f 16415 if (tg3_flag(tp, PCI_HIGH_SPEED))
f9804ddb
MC
16416 strcat(str, "66MHz");
16417 else
16418 strcat(str, "33MHz");
16419 }
63c3a66f 16420 if (tg3_flag(tp, PCI_32BIT))
f9804ddb
MC
16421 strcat(str, ":32-bit");
16422 else
16423 strcat(str, ":64-bit");
16424 return str;
16425}
16426
229b1ad1 16427static void tg3_init_coal(struct tg3 *tp)
15f9850d
DM
16428{
16429 struct ethtool_coalesce *ec = &tp->coal;
16430
16431 memset(ec, 0, sizeof(*ec));
16432 ec->cmd = ETHTOOL_GCOALESCE;
16433 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16434 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16435 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16436 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16437 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16438 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16439 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16440 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16441 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16442
16443 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16444 HOSTCC_MODE_CLRTICK_TXBD)) {
16445 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16446 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16447 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16448 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16449 }
d244c892 16450
63c3a66f 16451 if (tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
16452 ec->rx_coalesce_usecs_irq = 0;
16453 ec->tx_coalesce_usecs_irq = 0;
16454 ec->stats_block_coalesce_usecs = 0;
16455 }
15f9850d
DM
16456}
16457
229b1ad1 16458static int tg3_init_one(struct pci_dev *pdev,
1da177e4
LT
16459 const struct pci_device_id *ent)
16460{
1da177e4
LT
16461 struct net_device *dev;
16462 struct tg3 *tp;
646c9edd
MC
16463 int i, err, pm_cap;
16464 u32 sndmbx, rcvmbx, intmbx;
f9804ddb 16465 char str[40];
72f2afb8 16466 u64 dma_mask, persist_dma_mask;
c8f44aff 16467 netdev_features_t features = 0;
1da177e4 16468
05dbe005 16469 printk_once(KERN_INFO "%s\n", version);
1da177e4
LT
16470
16471 err = pci_enable_device(pdev);
16472 if (err) {
2445e461 16473 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
16474 return err;
16475 }
16476
1da177e4
LT
16477 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16478 if (err) {
2445e461 16479 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
16480 goto err_out_disable_pdev;
16481 }
16482
16483 pci_set_master(pdev);
16484
16485 /* Find power-management capability. */
16486 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16487 if (pm_cap == 0) {
2445e461
MC
16488 dev_err(&pdev->dev,
16489 "Cannot find Power Management capability, aborting\n");
1da177e4
LT
16490 err = -EIO;
16491 goto err_out_free_res;
16492 }
16493
16821285
MC
16494 err = pci_set_power_state(pdev, PCI_D0);
16495 if (err) {
16496 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16497 goto err_out_free_res;
16498 }
16499
fe5f5787 16500 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
1da177e4 16501 if (!dev) {
1da177e4 16502 err = -ENOMEM;
16821285 16503 goto err_out_power_down;
1da177e4
LT
16504 }
16505
1da177e4
LT
16506 SET_NETDEV_DEV(dev, &pdev->dev);
16507
1da177e4
LT
16508 tp = netdev_priv(dev);
16509 tp->pdev = pdev;
16510 tp->dev = dev;
16511 tp->pm_cap = pm_cap;
1da177e4
LT
16512 tp->rx_mode = TG3_DEF_RX_MODE;
16513 tp->tx_mode = TG3_DEF_TX_MODE;
9c13cb8b 16514 tp->irq_sync = 1;
8ef21428 16515
1da177e4
LT
16516 if (tg3_debug > 0)
16517 tp->msg_enable = tg3_debug;
16518 else
16519 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16520
16521 /* The word/byte swap controls here control register access byte
16522 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16523 * setting below.
16524 */
16525 tp->misc_host_ctrl =
16526 MISC_HOST_CTRL_MASK_PCI_INT |
16527 MISC_HOST_CTRL_WORD_SWAP |
16528 MISC_HOST_CTRL_INDIR_ACCESS |
16529 MISC_HOST_CTRL_PCISTATE_RW;
16530
16531 /* The NONFRM (non-frame) byte/word swap controls take effect
16532 * on descriptor entries, anything which isn't packet data.
16533 *
16534 * The StrongARM chips on the board (one for tx, one for rx)
16535 * are running in big-endian mode.
16536 */
16537 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16538 GRC_MODE_WSWAP_NONFRM_DATA);
16539#ifdef __BIG_ENDIAN
16540 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16541#endif
16542 spin_lock_init(&tp->lock);
1da177e4 16543 spin_lock_init(&tp->indirect_lock);
c4028958 16544 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4 16545
d5fe488a 16546 tp->regs = pci_ioremap_bar(pdev, BAR_0);
ab0049b4 16547 if (!tp->regs) {
ab96b241 16548 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
1da177e4
LT
16549 err = -ENOMEM;
16550 goto err_out_free_dev;
16551 }
16552
c9cab24e
MC
16553 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16554 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16555 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16556 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16557 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
79d49695 16558 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
c9cab24e
MC
16559 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16560 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
c65a17f4
MC
16561 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16562 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16563 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16564 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
c9cab24e
MC
16565 tg3_flag_set(tp, ENABLE_APE);
16566 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16567 if (!tp->aperegs) {
16568 dev_err(&pdev->dev,
16569 "Cannot map APE registers, aborting\n");
16570 err = -ENOMEM;
16571 goto err_out_iounmap;
16572 }
16573 }
16574
1da177e4
LT
16575 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16576 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
1da177e4 16577
1da177e4 16578 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4 16579 dev->watchdog_timeo = TG3_TX_TIMEOUT;
2ffcc981 16580 dev->netdev_ops = &tg3_netdev_ops;
1da177e4 16581 dev->irq = pdev->irq;
1da177e4 16582
3d567e0e 16583 err = tg3_get_invariants(tp, ent);
1da177e4 16584 if (err) {
ab96b241
MC
16585 dev_err(&pdev->dev,
16586 "Problem fetching invariants of chip, aborting\n");
c9cab24e 16587 goto err_out_apeunmap;
1da177e4
LT
16588 }
16589
4a29cc2e
MC
16590 /* The EPB bridge inside 5714, 5715, and 5780 and any
16591 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
16592 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16593 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16594 * do DMA address check in tg3_start_xmit().
16595 */
63c3a66f 16596 if (tg3_flag(tp, IS_5788))
284901a9 16597 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
63c3a66f 16598 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
50cf156a 16599 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
72f2afb8 16600#ifdef CONFIG_HIGHMEM
6a35528a 16601 dma_mask = DMA_BIT_MASK(64);
72f2afb8 16602#endif
4a29cc2e 16603 } else
6a35528a 16604 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
72f2afb8
MC
16605
16606 /* Configure DMA attributes. */
284901a9 16607 if (dma_mask > DMA_BIT_MASK(32)) {
72f2afb8
MC
16608 err = pci_set_dma_mask(pdev, dma_mask);
16609 if (!err) {
0da0606f 16610 features |= NETIF_F_HIGHDMA;
72f2afb8
MC
16611 err = pci_set_consistent_dma_mask(pdev,
16612 persist_dma_mask);
16613 if (err < 0) {
ab96b241
MC
16614 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16615 "DMA for consistent allocations\n");
c9cab24e 16616 goto err_out_apeunmap;
72f2afb8
MC
16617 }
16618 }
16619 }
284901a9
YH
16620 if (err || dma_mask == DMA_BIT_MASK(32)) {
16621 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
72f2afb8 16622 if (err) {
ab96b241
MC
16623 dev_err(&pdev->dev,
16624 "No usable DMA configuration, aborting\n");
c9cab24e 16625 goto err_out_apeunmap;
72f2afb8
MC
16626 }
16627 }
16628
fdfec172 16629 tg3_init_bufmgr_config(tp);
1da177e4 16630
0da0606f
MC
16631 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16632
16633 /* 5700 B0 chips do not support checksumming correctly due
16634 * to hardware bugs.
16635 */
16636 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16637 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16638
16639 if (tg3_flag(tp, 5755_PLUS))
16640 features |= NETIF_F_IPV6_CSUM;
16641 }
16642
4e3a7aaa
MC
16643 /* TSO is on by default on chips that support hardware TSO.
16644 * Firmware TSO on older chips gives lower performance, so it
16645 * is off by default, but can be enabled using ethtool.
16646 */
63c3a66f
JP
16647 if ((tg3_flag(tp, HW_TSO_1) ||
16648 tg3_flag(tp, HW_TSO_2) ||
16649 tg3_flag(tp, HW_TSO_3)) &&
0da0606f
MC
16650 (features & NETIF_F_IP_CSUM))
16651 features |= NETIF_F_TSO;
63c3a66f 16652 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
0da0606f
MC
16653 if (features & NETIF_F_IPV6_CSUM)
16654 features |= NETIF_F_TSO6;
63c3a66f 16655 if (tg3_flag(tp, HW_TSO_3) ||
e849cdc3 16656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c
MC
16657 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16658 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
63c3a66f 16659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
dc668910 16660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
0da0606f 16661 features |= NETIF_F_TSO_ECN;
b0026624 16662 }
1da177e4 16663
d542fe27
MC
16664 dev->features |= features;
16665 dev->vlan_features |= features;
16666
06c03c02
MB
16667 /*
16668 * Add loopback capability only for a subset of devices that support
16669 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16670 * loopback for the remaining devices.
16671 */
16672 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16673 !tg3_flag(tp, CPMU_PRESENT))
16674 /* Add the loopback capability */
0da0606f
MC
16675 features |= NETIF_F_LOOPBACK;
16676
0da0606f 16677 dev->hw_features |= features;
06c03c02 16678
1da177e4 16679 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
63c3a66f 16680 !tg3_flag(tp, TSO_CAPABLE) &&
1da177e4 16681 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
63c3a66f 16682 tg3_flag_set(tp, MAX_RXPEND_64);
1da177e4
LT
16683 tp->rx_pending = 63;
16684 }
16685
1da177e4
LT
16686 err = tg3_get_device_address(tp);
16687 if (err) {
ab96b241
MC
16688 dev_err(&pdev->dev,
16689 "Could not obtain valid ethernet address, aborting\n");
c9cab24e 16690 goto err_out_apeunmap;
c88864df
MC
16691 }
16692
1da177e4
LT
16693 /*
16694 * Reset chip in case UNDI or EFI driver did not shutdown
16695 * DMA self test will enable WDMAC and we'll see (spurious)
16696 * pending DMA on the PCI bus at that point.
16697 */
16698 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16699 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 16700 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 16701 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
16702 }
16703
16704 err = tg3_test_dma(tp);
16705 if (err) {
ab96b241 16706 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
c88864df 16707 goto err_out_apeunmap;
1da177e4
LT
16708 }
16709
78f90dcf
MC
16710 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16711 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16712 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
6fd45cb8 16713 for (i = 0; i < tp->irq_max; i++) {
78f90dcf
MC
16714 struct tg3_napi *tnapi = &tp->napi[i];
16715
16716 tnapi->tp = tp;
16717 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16718
16719 tnapi->int_mbox = intmbx;
93a700a9 16720 if (i <= 4)
78f90dcf
MC
16721 intmbx += 0x8;
16722 else
16723 intmbx += 0x4;
16724
16725 tnapi->consmbox = rcvmbx;
16726 tnapi->prodmbox = sndmbx;
16727
66cfd1bd 16728 if (i)
78f90dcf 16729 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
66cfd1bd 16730 else
78f90dcf 16731 tnapi->coal_now = HOSTCC_MODE_NOW;
78f90dcf 16732
63c3a66f 16733 if (!tg3_flag(tp, SUPPORT_MSIX))
78f90dcf
MC
16734 break;
16735
16736 /*
16737 * If we support MSIX, we'll be using RSS. If we're using
16738 * RSS, the first vector only handles link interrupts and the
16739 * remaining vectors handle rx and tx interrupts. Reuse the
16740 * mailbox values for the next iteration. The values we setup
16741 * above are still useful for the single vectored mode.
16742 */
16743 if (!i)
16744 continue;
16745
16746 rcvmbx += 0x8;
16747
16748 if (sndmbx & 0x4)
16749 sndmbx -= 0x4;
16750 else
16751 sndmbx += 0xc;
16752 }
16753
15f9850d
DM
16754 tg3_init_coal(tp);
16755
c49a1561
MC
16756 pci_set_drvdata(pdev, dev);
16757
fb4ce8ad 16758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
c65a17f4
MC
16759 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
fb4ce8ad
MC
16761 tg3_flag_set(tp, PTP_CAPABLE);
16762
cd0d7228
MC
16763 if (tg3_flag(tp, 5717_PLUS)) {
16764 /* Resume a low-power mode */
16765 tg3_frob_aux_power(tp, false);
16766 }
16767
21f7638e
MC
16768 tg3_timer_init(tp);
16769
1da177e4
LT
16770 err = register_netdev(dev);
16771 if (err) {
ab96b241 16772 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
0d3031d9 16773 goto err_out_apeunmap;
1da177e4
LT
16774 }
16775
05dbe005
JP
16776 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16777 tp->board_part_number,
16778 tp->pci_chip_rev_id,
16779 tg3_bus_string(tp, str),
16780 dev->dev_addr);
1da177e4 16781
f07e9af3 16782 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7
MC
16783 struct phy_device *phydev;
16784 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
5129c3a3
MC
16785 netdev_info(dev,
16786 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
05dbe005 16787 phydev->drv->name, dev_name(&phydev->dev));
f07e9af3
MC
16788 } else {
16789 char *ethtype;
16790
16791 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16792 ethtype = "10/100Base-TX";
16793 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16794 ethtype = "1000Base-SX";
16795 else
16796 ethtype = "10/100/1000Base-T";
16797
5129c3a3 16798 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
47007831
MC
16799 "(WireSpeed[%d], EEE[%d])\n",
16800 tg3_phy_string(tp), ethtype,
16801 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16802 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
f07e9af3 16803 }
05dbe005
JP
16804
16805 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
dc668910 16806 (dev->features & NETIF_F_RXCSUM) != 0,
63c3a66f 16807 tg3_flag(tp, USE_LINKCHG_REG) != 0,
f07e9af3 16808 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
63c3a66f
JP
16809 tg3_flag(tp, ENABLE_ASF) != 0,
16810 tg3_flag(tp, TSO_CAPABLE) != 0);
05dbe005
JP
16811 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16812 tp->dma_rwctrl,
16813 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16814 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
1da177e4 16815
b45aa2f6
MC
16816 pci_save_state(pdev);
16817
1da177e4
LT
16818 return 0;
16819
0d3031d9
MC
16820err_out_apeunmap:
16821 if (tp->aperegs) {
16822 iounmap(tp->aperegs);
16823 tp->aperegs = NULL;
16824 }
16825
1da177e4 16826err_out_iounmap:
6892914f
MC
16827 if (tp->regs) {
16828 iounmap(tp->regs);
22abe310 16829 tp->regs = NULL;
6892914f 16830 }
1da177e4
LT
16831
16832err_out_free_dev:
16833 free_netdev(dev);
16834
16821285
MC
16835err_out_power_down:
16836 pci_set_power_state(pdev, PCI_D3hot);
16837
1da177e4
LT
16838err_out_free_res:
16839 pci_release_regions(pdev);
16840
16841err_out_disable_pdev:
16842 pci_disable_device(pdev);
16843 pci_set_drvdata(pdev, NULL);
16844 return err;
16845}
16846
229b1ad1 16847static void tg3_remove_one(struct pci_dev *pdev)
1da177e4
LT
16848{
16849 struct net_device *dev = pci_get_drvdata(pdev);
16850
16851 if (dev) {
16852 struct tg3 *tp = netdev_priv(dev);
16853
e3c5530b 16854 release_firmware(tp->fw);
077f849d 16855
db219973 16856 tg3_reset_task_cancel(tp);
158d7abd 16857
e730c823 16858 if (tg3_flag(tp, USE_PHYLIB)) {
b02fd9e3 16859 tg3_phy_fini(tp);
158d7abd 16860 tg3_mdio_fini(tp);
b02fd9e3 16861 }
158d7abd 16862
1da177e4 16863 unregister_netdev(dev);
0d3031d9
MC
16864 if (tp->aperegs) {
16865 iounmap(tp->aperegs);
16866 tp->aperegs = NULL;
16867 }
6892914f
MC
16868 if (tp->regs) {
16869 iounmap(tp->regs);
22abe310 16870 tp->regs = NULL;
6892914f 16871 }
1da177e4
LT
16872 free_netdev(dev);
16873 pci_release_regions(pdev);
16874 pci_disable_device(pdev);
16875 pci_set_drvdata(pdev, NULL);
16876 }
16877}
16878
aa6027ca 16879#ifdef CONFIG_PM_SLEEP
c866b7ea 16880static int tg3_suspend(struct device *device)
1da177e4 16881{
c866b7ea 16882 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
16883 struct net_device *dev = pci_get_drvdata(pdev);
16884 struct tg3 *tp = netdev_priv(dev);
16885 int err;
16886
16887 if (!netif_running(dev))
16888 return 0;
16889
db219973 16890 tg3_reset_task_cancel(tp);
b02fd9e3 16891 tg3_phy_stop(tp);
1da177e4
LT
16892 tg3_netif_stop(tp);
16893
21f7638e 16894 tg3_timer_stop(tp);
1da177e4 16895
f47c11ee 16896 tg3_full_lock(tp, 1);
1da177e4 16897 tg3_disable_ints(tp);
f47c11ee 16898 tg3_full_unlock(tp);
1da177e4
LT
16899
16900 netif_device_detach(dev);
16901
f47c11ee 16902 tg3_full_lock(tp, 0);
944d980e 16903 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
63c3a66f 16904 tg3_flag_clear(tp, INIT_COMPLETE);
f47c11ee 16905 tg3_full_unlock(tp);
1da177e4 16906
c866b7ea 16907 err = tg3_power_down_prepare(tp);
1da177e4 16908 if (err) {
b02fd9e3
MC
16909 int err2;
16910
f47c11ee 16911 tg3_full_lock(tp, 0);
1da177e4 16912
63c3a66f 16913 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
16914 err2 = tg3_restart_hw(tp, 1);
16915 if (err2)
b9ec6c1b 16916 goto out;
1da177e4 16917
21f7638e 16918 tg3_timer_start(tp);
1da177e4
LT
16919
16920 netif_device_attach(dev);
16921 tg3_netif_start(tp);
16922
b9ec6c1b 16923out:
f47c11ee 16924 tg3_full_unlock(tp);
b02fd9e3
MC
16925
16926 if (!err2)
16927 tg3_phy_start(tp);
1da177e4
LT
16928 }
16929
16930 return err;
16931}
16932
c866b7ea 16933static int tg3_resume(struct device *device)
1da177e4 16934{
c866b7ea 16935 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
16936 struct net_device *dev = pci_get_drvdata(pdev);
16937 struct tg3 *tp = netdev_priv(dev);
16938 int err;
16939
16940 if (!netif_running(dev))
16941 return 0;
16942
1da177e4
LT
16943 netif_device_attach(dev);
16944
f47c11ee 16945 tg3_full_lock(tp, 0);
1da177e4 16946
63c3a66f 16947 tg3_flag_set(tp, INIT_COMPLETE);
b9ec6c1b
MC
16948 err = tg3_restart_hw(tp, 1);
16949 if (err)
16950 goto out;
1da177e4 16951
21f7638e 16952 tg3_timer_start(tp);
1da177e4 16953
1da177e4
LT
16954 tg3_netif_start(tp);
16955
b9ec6c1b 16956out:
f47c11ee 16957 tg3_full_unlock(tp);
1da177e4 16958
b02fd9e3
MC
16959 if (!err)
16960 tg3_phy_start(tp);
16961
b9ec6c1b 16962 return err;
1da177e4
LT
16963}
16964
c866b7ea 16965static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
aa6027ca
ED
16966#define TG3_PM_OPS (&tg3_pm_ops)
16967
16968#else
16969
16970#define TG3_PM_OPS NULL
16971
16972#endif /* CONFIG_PM_SLEEP */
c866b7ea 16973
b45aa2f6
MC
16974/**
16975 * tg3_io_error_detected - called when PCI error is detected
16976 * @pdev: Pointer to PCI device
16977 * @state: The current pci connection state
16978 *
16979 * This function is called after a PCI bus error affecting
16980 * this device has been detected.
16981 */
16982static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16983 pci_channel_state_t state)
16984{
16985 struct net_device *netdev = pci_get_drvdata(pdev);
16986 struct tg3 *tp = netdev_priv(netdev);
16987 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16988
16989 netdev_info(netdev, "PCI I/O error detected\n");
16990
16991 rtnl_lock();
16992
16993 if (!netif_running(netdev))
16994 goto done;
16995
16996 tg3_phy_stop(tp);
16997
16998 tg3_netif_stop(tp);
16999
21f7638e 17000 tg3_timer_stop(tp);
b45aa2f6
MC
17001
17002 /* Want to make sure that the reset task doesn't run */
db219973 17003 tg3_reset_task_cancel(tp);
b45aa2f6
MC
17004
17005 netif_device_detach(netdev);
17006
17007 /* Clean up software state, even if MMIO is blocked */
17008 tg3_full_lock(tp, 0);
17009 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17010 tg3_full_unlock(tp);
17011
17012done:
17013 if (state == pci_channel_io_perm_failure)
17014 err = PCI_ERS_RESULT_DISCONNECT;
17015 else
17016 pci_disable_device(pdev);
17017
17018 rtnl_unlock();
17019
17020 return err;
17021}
17022
17023/**
17024 * tg3_io_slot_reset - called after the pci bus has been reset.
17025 * @pdev: Pointer to PCI device
17026 *
17027 * Restart the card from scratch, as if from a cold-boot.
17028 * At this point, the card has exprienced a hard reset,
17029 * followed by fixups by BIOS, and has its config space
17030 * set up identically to what it was at cold boot.
17031 */
17032static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17033{
17034 struct net_device *netdev = pci_get_drvdata(pdev);
17035 struct tg3 *tp = netdev_priv(netdev);
17036 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17037 int err;
17038
17039 rtnl_lock();
17040
17041 if (pci_enable_device(pdev)) {
17042 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17043 goto done;
17044 }
17045
17046 pci_set_master(pdev);
17047 pci_restore_state(pdev);
17048 pci_save_state(pdev);
17049
17050 if (!netif_running(netdev)) {
17051 rc = PCI_ERS_RESULT_RECOVERED;
17052 goto done;
17053 }
17054
17055 err = tg3_power_up(tp);
bed9829f 17056 if (err)
b45aa2f6 17057 goto done;
b45aa2f6
MC
17058
17059 rc = PCI_ERS_RESULT_RECOVERED;
17060
17061done:
17062 rtnl_unlock();
17063
17064 return rc;
17065}
17066
17067/**
17068 * tg3_io_resume - called when traffic can start flowing again.
17069 * @pdev: Pointer to PCI device
17070 *
17071 * This callback is called when the error recovery driver tells
17072 * us that its OK to resume normal operation.
17073 */
17074static void tg3_io_resume(struct pci_dev *pdev)
17075{
17076 struct net_device *netdev = pci_get_drvdata(pdev);
17077 struct tg3 *tp = netdev_priv(netdev);
17078 int err;
17079
17080 rtnl_lock();
17081
17082 if (!netif_running(netdev))
17083 goto done;
17084
17085 tg3_full_lock(tp, 0);
63c3a66f 17086 tg3_flag_set(tp, INIT_COMPLETE);
b45aa2f6 17087 err = tg3_restart_hw(tp, 1);
b45aa2f6 17088 if (err) {
35763066 17089 tg3_full_unlock(tp);
b45aa2f6
MC
17090 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17091 goto done;
17092 }
17093
17094 netif_device_attach(netdev);
17095
21f7638e 17096 tg3_timer_start(tp);
b45aa2f6
MC
17097
17098 tg3_netif_start(tp);
17099
35763066
NNS
17100 tg3_full_unlock(tp);
17101
b45aa2f6
MC
17102 tg3_phy_start(tp);
17103
17104done:
17105 rtnl_unlock();
17106}
17107
3646f0e5 17108static const struct pci_error_handlers tg3_err_handler = {
b45aa2f6
MC
17109 .error_detected = tg3_io_error_detected,
17110 .slot_reset = tg3_io_slot_reset,
17111 .resume = tg3_io_resume
17112};
17113
1da177e4
LT
17114static struct pci_driver tg3_driver = {
17115 .name = DRV_MODULE_NAME,
17116 .id_table = tg3_pci_tbl,
17117 .probe = tg3_init_one,
229b1ad1 17118 .remove = tg3_remove_one,
b45aa2f6 17119 .err_handler = &tg3_err_handler,
aa6027ca 17120 .driver.pm = TG3_PM_OPS,
1da177e4
LT
17121};
17122
17123static int __init tg3_init(void)
17124{
29917620 17125 return pci_register_driver(&tg3_driver);
1da177e4
LT
17126}
17127
17128static void __exit tg3_cleanup(void)
17129{
17130 pci_unregister_driver(&tg3_driver);
17131}
17132
17133module_init(tg3_init);
17134module_exit(tg3_cleanup);
This page took 3.281954 seconds and 5 git commands to generate.