2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version
[] =
220 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION
);
226 MODULE_FIRMWARE(FIRMWARE_TG3
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
230 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug
, int, 0);
232 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
257 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
258 TG3_DRV_DATA_FLAG_5705_10_100
},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
260 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
261 TG3_DRV_DATA_FLAG_5705_10_100
},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
264 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
265 TG3_DRV_DATA_FLAG_5705_10_100
},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
272 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
278 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
287 PCI_VENDOR_ID_LENOVO
,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
289 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
292 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
312 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
313 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
315 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
316 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
320 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
330 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
332 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
353 static const struct {
354 const char string
[ETH_GSTRING_LEN
];
355 } ethtool_stats_keys
[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string
[ETH_GSTRING_LEN
];
449 } ethtool_test_keys
[] = {
450 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
451 [TG3_LINK_TEST
] = { "link test (online) " },
452 [TG3_REGISTER_TEST
] = { "register test (offline)" },
453 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
465 writel(val
, tp
->regs
+ off
);
468 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
470 return readl(tp
->regs
+ off
);
473 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
475 writel(val
, tp
->aperegs
+ off
);
478 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
480 return readl(tp
->aperegs
+ off
);
483 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
487 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
489 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
490 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
493 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
495 writel(val
, tp
->regs
+ off
);
496 readl(tp
->regs
+ off
);
499 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
504 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
505 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
506 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
507 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
511 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
515 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
516 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
517 TG3_64BIT_REG_LOW
, val
);
520 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
521 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
522 TG3_64BIT_REG_LOW
, val
);
526 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
527 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
528 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
529 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
536 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
537 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
541 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
546 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
547 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
548 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
549 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
560 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
561 /* Non-posted methods */
562 tp
->write32(tp
, off
, val
);
565 tg3_write32(tp
, off
, val
);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
579 tp
->write32_mbox(tp
, off
, val
);
580 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
581 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
582 !tg3_flag(tp
, ICH_WORKAROUND
)))
583 tp
->read32_mbox(tp
, off
);
586 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
588 void __iomem
*mbox
= tp
->regs
+ off
;
590 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
592 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
593 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
597 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
599 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
602 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
604 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
622 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
623 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
626 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
627 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
628 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
629 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
635 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
640 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
643 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
647 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
648 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
653 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
654 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
655 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
656 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
662 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
667 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
670 static void tg3_ape_lock_init(struct tg3
*tp
)
675 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
676 regbase
= TG3_APE_LOCK_GRANT
;
678 regbase
= TG3_APE_PER_LOCK_GRANT
;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
683 case TG3_APE_LOCK_PHY0
:
684 case TG3_APE_LOCK_PHY1
:
685 case TG3_APE_LOCK_PHY2
:
686 case TG3_APE_LOCK_PHY3
:
687 bit
= APE_LOCK_GRANT_DRIVER
;
691 bit
= APE_LOCK_GRANT_DRIVER
;
693 bit
= 1 << tp
->pci_fn
;
695 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
700 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
704 u32 status
, req
, gnt
, bit
;
706 if (!tg3_flag(tp
, ENABLE_APE
))
710 case TG3_APE_LOCK_GPIO
:
711 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
713 case TG3_APE_LOCK_GRC
:
714 case TG3_APE_LOCK_MEM
:
716 bit
= APE_LOCK_REQ_DRIVER
;
718 bit
= 1 << tp
->pci_fn
;
720 case TG3_APE_LOCK_PHY0
:
721 case TG3_APE_LOCK_PHY1
:
722 case TG3_APE_LOCK_PHY2
:
723 case TG3_APE_LOCK_PHY3
:
724 bit
= APE_LOCK_REQ_DRIVER
;
730 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
731 req
= TG3_APE_LOCK_REQ
;
732 gnt
= TG3_APE_LOCK_GRANT
;
734 req
= TG3_APE_PER_LOCK_REQ
;
735 gnt
= TG3_APE_PER_LOCK_GRANT
;
740 tg3_ape_write32(tp
, req
+ off
, bit
);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i
= 0; i
< 100; i
++) {
744 status
= tg3_ape_read32(tp
, gnt
+ off
);
747 if (pci_channel_offline(tp
->pdev
))
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp
, gnt
+ off
, bit
);
762 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
766 if (!tg3_flag(tp
, ENABLE_APE
))
770 case TG3_APE_LOCK_GPIO
:
771 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
773 case TG3_APE_LOCK_GRC
:
774 case TG3_APE_LOCK_MEM
:
776 bit
= APE_LOCK_GRANT_DRIVER
;
778 bit
= 1 << tp
->pci_fn
;
780 case TG3_APE_LOCK_PHY0
:
781 case TG3_APE_LOCK_PHY1
:
782 case TG3_APE_LOCK_PHY2
:
783 case TG3_APE_LOCK_PHY3
:
784 bit
= APE_LOCK_GRANT_DRIVER
;
790 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
791 gnt
= TG3_APE_LOCK_GRANT
;
793 gnt
= TG3_APE_PER_LOCK_GRANT
;
795 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
798 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
803 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
806 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
807 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
810 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
813 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
816 return timeout_us
? 0 : -EBUSY
;
819 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
823 for (i
= 0; i
< timeout_us
/ 10; i
++) {
824 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
826 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
832 return i
== timeout_us
/ 10;
835 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
839 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
841 if (!tg3_flag(tp
, APE_HAS_NCSI
))
844 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
845 if (apedata
!= APE_SEG_SIG_MAGIC
)
848 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
849 if (!(apedata
& APE_FW_STATUS_READY
))
852 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
854 msgoff
= bufoff
+ 2 * sizeof(u32
);
855 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
860 /* Cap xfer sizes to scratchpad limits. */
861 length
= (len
> maxlen
) ? maxlen
: len
;
864 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
865 if (!(apedata
& APE_FW_STATUS_READY
))
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err
= tg3_ape_event_lock(tp
, 1000);
873 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
874 APE_EVENT_STATUS_SCRTCHPD_READ
|
875 APE_EVENT_STATUS_EVENT_PENDING
;
876 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
878 tg3_ape_write32(tp
, bufoff
, base_off
);
879 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
881 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
882 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
886 if (tg3_ape_wait_for_event(tp
, 30000))
889 for (i
= 0; length
; i
+= 4, length
-= 4) {
890 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
891 memcpy(data
, &val
, sizeof(u32
));
899 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
904 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
905 if (apedata
!= APE_SEG_SIG_MAGIC
)
908 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
909 if (!(apedata
& APE_FW_STATUS_READY
))
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err
= tg3_ape_event_lock(tp
, 1000);
917 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
918 event
| APE_EVENT_STATUS_EVENT_PENDING
);
920 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
921 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
926 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
931 if (!tg3_flag(tp
, ENABLE_APE
))
935 case RESET_KIND_INIT
:
936 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
937 APE_HOST_SEG_SIG_MAGIC
);
938 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
939 APE_HOST_SEG_LEN_MAGIC
);
940 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
941 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
942 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
944 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
945 APE_HOST_BEHAV_NO_PHYLOCK
);
946 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
947 TG3_APE_HOST_DRVR_STATE_START
);
949 event
= APE_EVENT_STATUS_STATE_START
;
951 case RESET_KIND_SHUTDOWN
:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
957 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
959 if (device_may_wakeup(&tp
->pdev
->dev
) &&
960 tg3_flag(tp
, WOL_ENABLE
)) {
961 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
962 TG3_APE_HOST_WOL_SPEED_AUTO
);
963 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
965 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
967 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
969 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
975 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
977 tg3_ape_send_event(tp
, event
);
980 static void tg3_disable_ints(struct tg3
*tp
)
984 tw32(TG3PCI_MISC_HOST_CTRL
,
985 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
986 for (i
= 0; i
< tp
->irq_max
; i
++)
987 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
990 static void tg3_enable_ints(struct tg3
*tp
)
997 tw32(TG3PCI_MISC_HOST_CTRL
,
998 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1000 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1001 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1002 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1004 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1005 if (tg3_flag(tp
, 1SHOT_MSI
))
1006 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1008 tp
->coal_now
|= tnapi
->coal_now
;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1013 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1014 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1016 tw32(HOSTCC_MODE
, tp
->coal_now
);
1018 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1021 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1023 struct tg3
*tp
= tnapi
->tp
;
1024 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1025 unsigned int work_exists
= 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1029 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1033 /* check for TX work to do */
1034 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1037 /* check for RX work to do */
1038 if (tnapi
->rx_rcb_prod_idx
&&
1039 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1052 struct tg3
*tp
= tnapi
->tp
;
1054 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1062 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1063 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1066 static void tg3_switch_clocks(struct tg3
*tp
)
1069 u32 orig_clock_ctrl
;
1071 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1074 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1076 orig_clock_ctrl
= clock_ctrl
;
1077 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1078 CLOCK_CTRL_CLKRUN_OENABLE
|
1080 tp
->pci_clock_ctrl
= clock_ctrl
;
1082 if (tg3_flag(tp
, 5705_PLUS
)) {
1083 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1085 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1087 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1090 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1093 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1108 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1110 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1114 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1118 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1119 MI_COM_PHY_ADDR_MASK
);
1120 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1121 MI_COM_REG_ADDR_MASK
);
1122 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1124 tw32_f(MAC_MI_COM
, frame_val
);
1126 loops
= PHY_BUSY_LOOPS
;
1127 while (loops
!= 0) {
1129 frame_val
= tr32(MAC_MI_COM
);
1131 if ((frame_val
& MI_COM_BUSY
) == 0) {
1133 frame_val
= tr32(MAC_MI_COM
);
1141 *val
= frame_val
& MI_COM_DATA_MASK
;
1145 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1146 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1150 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1155 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1157 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1160 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1167 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1168 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1171 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1173 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1177 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1179 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1180 MI_COM_PHY_ADDR_MASK
);
1181 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1182 MI_COM_REG_ADDR_MASK
);
1183 frame_val
|= (val
& MI_COM_DATA_MASK
);
1184 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1186 tw32_f(MAC_MI_COM
, frame_val
);
1188 loops
= PHY_BUSY_LOOPS
;
1189 while (loops
!= 0) {
1191 frame_val
= tr32(MAC_MI_COM
);
1192 if ((frame_val
& MI_COM_BUSY
) == 0) {
1194 frame_val
= tr32(MAC_MI_COM
);
1204 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1205 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1209 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1214 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1216 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1219 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1223 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1227 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1231 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1232 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1236 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1242 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1246 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1250 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1254 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1255 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1259 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1265 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1269 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1271 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1276 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1280 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1282 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1287 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1291 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1292 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1295 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1300 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1302 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1303 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1305 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1313 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1319 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1321 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1323 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1324 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1329 static int tg3_bmcr_reset(struct tg3
*tp
)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control
= BMCR_RESET
;
1338 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1344 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1348 if ((phy_control
& BMCR_RESET
) == 0) {
1360 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1362 struct tg3
*tp
= bp
->priv
;
1365 spin_lock_bh(&tp
->lock
);
1367 if (tg3_readphy(tp
, reg
, &val
))
1370 spin_unlock_bh(&tp
->lock
);
1375 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1377 struct tg3
*tp
= bp
->priv
;
1380 spin_lock_bh(&tp
->lock
);
1382 if (tg3_writephy(tp
, reg
, val
))
1385 spin_unlock_bh(&tp
->lock
);
1390 static int tg3_mdio_reset(struct mii_bus
*bp
)
1395 static void tg3_mdio_config_5785(struct tg3
*tp
)
1398 struct phy_device
*phydev
;
1400 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1401 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1402 case PHY_ID_BCM50610
:
1403 case PHY_ID_BCM50610M
:
1404 val
= MAC_PHYCFG2_50610_LED_MODES
;
1406 case PHY_ID_BCMAC131
:
1407 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1409 case PHY_ID_RTL8211C
:
1410 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1412 case PHY_ID_RTL8201E
:
1413 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1419 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1420 tw32(MAC_PHYCFG2
, val
);
1422 val
= tr32(MAC_PHYCFG1
);
1423 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1424 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1425 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1426 tw32(MAC_PHYCFG1
, val
);
1431 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1432 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1433 MAC_PHYCFG2_FMODE_MASK_MASK
|
1434 MAC_PHYCFG2_GMODE_MASK_MASK
|
1435 MAC_PHYCFG2_ACT_MASK_MASK
|
1436 MAC_PHYCFG2_QUAL_MASK_MASK
|
1437 MAC_PHYCFG2_INBAND_ENABLE
;
1439 tw32(MAC_PHYCFG2
, val
);
1441 val
= tr32(MAC_PHYCFG1
);
1442 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1444 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1445 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1446 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1447 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1448 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1450 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1451 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1452 tw32(MAC_PHYCFG1
, val
);
1454 val
= tr32(MAC_EXT_RGMII_MODE
);
1455 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1456 MAC_RGMII_MODE_RX_QUALITY
|
1457 MAC_RGMII_MODE_RX_ACTIVITY
|
1458 MAC_RGMII_MODE_RX_ENG_DET
|
1459 MAC_RGMII_MODE_TX_ENABLE
|
1460 MAC_RGMII_MODE_TX_LOWPWR
|
1461 MAC_RGMII_MODE_TX_RESET
);
1462 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1463 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1464 val
|= MAC_RGMII_MODE_RX_INT_B
|
1465 MAC_RGMII_MODE_RX_QUALITY
|
1466 MAC_RGMII_MODE_RX_ACTIVITY
|
1467 MAC_RGMII_MODE_RX_ENG_DET
;
1468 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1469 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1470 MAC_RGMII_MODE_TX_LOWPWR
|
1471 MAC_RGMII_MODE_TX_RESET
;
1473 tw32(MAC_EXT_RGMII_MODE
, val
);
1476 static void tg3_mdio_start(struct tg3
*tp
)
1478 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1479 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1482 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1483 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1484 tg3_mdio_config_5785(tp
);
1487 static int tg3_mdio_init(struct tg3
*tp
)
1491 struct phy_device
*phydev
;
1493 if (tg3_flag(tp
, 5717_PLUS
)) {
1496 tp
->phy_addr
= tp
->pci_fn
+ 1;
1498 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1499 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1501 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1506 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1510 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1513 tp
->mdio_bus
= mdiobus_alloc();
1514 if (tp
->mdio_bus
== NULL
)
1517 tp
->mdio_bus
->name
= "tg3 mdio bus";
1518 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1519 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1520 tp
->mdio_bus
->priv
= tp
;
1521 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1522 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1523 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1524 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1525 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1526 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1528 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1529 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1539 i
= mdiobus_register(tp
->mdio_bus
);
1541 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1542 mdiobus_free(tp
->mdio_bus
);
1546 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1548 if (!phydev
|| !phydev
->drv
) {
1549 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1550 mdiobus_unregister(tp
->mdio_bus
);
1551 mdiobus_free(tp
->mdio_bus
);
1555 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1556 case PHY_ID_BCM57780
:
1557 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1558 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1560 case PHY_ID_BCM50610
:
1561 case PHY_ID_BCM50610M
:
1562 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1563 PHY_BRCM_RX_REFCLK_UNUSED
|
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1566 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1567 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1568 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1569 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1570 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1571 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1573 case PHY_ID_RTL8211C
:
1574 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1576 case PHY_ID_RTL8201E
:
1577 case PHY_ID_BCMAC131
:
1578 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1579 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1580 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1584 tg3_flag_set(tp
, MDIOBUS_INITED
);
1586 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1587 tg3_mdio_config_5785(tp
);
1592 static void tg3_mdio_fini(struct tg3
*tp
)
1594 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1595 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1596 mdiobus_unregister(tp
->mdio_bus
);
1597 mdiobus_free(tp
->mdio_bus
);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1606 val
= tr32(GRC_RX_CPU_EVENT
);
1607 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1608 tw32_f(GRC_RX_CPU_EVENT
, val
);
1610 tp
->last_event_jiffies
= jiffies
;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1619 unsigned int delay_cnt
;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1626 if (time_remain
< 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt
= jiffies_to_usecs(time_remain
);
1631 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1632 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1633 delay_cnt
= (delay_cnt
>> 3) + 1;
1635 for (i
= 0; i
< delay_cnt
; i
++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1638 if (pci_channel_offline(tp
->pdev
))
1645 /* tp->lock is held. */
1646 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1651 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1653 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1654 val
|= (reg
& 0xffff);
1658 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1660 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1661 val
|= (reg
& 0xffff);
1665 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1666 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1668 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1669 val
|= (reg
& 0xffff);
1673 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1680 /* tp->lock is held. */
1681 static void tg3_ump_link_report(struct tg3
*tp
)
1685 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1688 tg3_phy_gather_ump_data(tp
, data
);
1690 tg3_wait_for_event_ack(tp
);
1692 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1693 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1694 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1695 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1696 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1697 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1699 tg3_generate_fw_event(tp
);
1702 /* tp->lock is held. */
1703 static void tg3_stop_fw(struct tg3
*tp
)
1705 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1706 /* Wait for RX cpu to ACK the previous event. */
1707 tg3_wait_for_event_ack(tp
);
1709 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1711 tg3_generate_fw_event(tp
);
1713 /* Wait for RX cpu to ACK this event. */
1714 tg3_wait_for_event_ack(tp
);
1718 /* tp->lock is held. */
1719 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1721 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1722 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1724 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1726 case RESET_KIND_INIT
:
1727 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1731 case RESET_KIND_SHUTDOWN
:
1732 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1736 case RESET_KIND_SUSPEND
:
1737 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1750 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1752 case RESET_KIND_INIT
:
1753 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1754 DRV_STATE_START_DONE
);
1757 case RESET_KIND_SHUTDOWN
:
1758 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1759 DRV_STATE_UNLOAD_DONE
);
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1771 if (tg3_flag(tp
, ENABLE_ASF
)) {
1773 case RESET_KIND_INIT
:
1774 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1778 case RESET_KIND_SHUTDOWN
:
1779 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1783 case RESET_KIND_SUSPEND
:
1784 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1794 static int tg3_poll_fw(struct tg3
*tp
)
1799 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1802 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1803 /* We don't use firmware. */
1807 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1808 /* Wait up to 20ms for init done. */
1809 for (i
= 0; i
< 200; i
++) {
1810 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1812 if (pci_channel_offline(tp
->pdev
))
1820 /* Wait for firmware initialization to complete. */
1821 for (i
= 0; i
< 100000; i
++) {
1822 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1823 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1825 if (pci_channel_offline(tp
->pdev
)) {
1826 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1827 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1828 netdev_info(tp
->dev
, "No firmware running\n");
1837 /* Chip might not be fitted with firmware. Some Sun onboard
1838 * parts are configured like that. So don't signal the timeout
1839 * of the above loop as an error, but do report the lack of
1840 * running firmware once.
1842 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1843 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1845 netdev_info(tp
->dev
, "No firmware running\n");
1848 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1849 /* The 57765 A0 needs a little more
1850 * time to do some important work.
1858 static void tg3_link_report(struct tg3
*tp
)
1860 if (!netif_carrier_ok(tp
->dev
)) {
1861 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1862 tg3_ump_link_report(tp
);
1863 } else if (netif_msg_link(tp
)) {
1864 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1865 (tp
->link_config
.active_speed
== SPEED_1000
?
1867 (tp
->link_config
.active_speed
== SPEED_100
?
1869 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1872 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1873 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1875 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1878 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1879 netdev_info(tp
->dev
, "EEE is %s\n",
1880 tp
->setlpicnt
? "enabled" : "disabled");
1882 tg3_ump_link_report(tp
);
1885 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1888 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1892 if (adv
& ADVERTISE_PAUSE_CAP
) {
1893 flowctrl
|= FLOW_CTRL_RX
;
1894 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1895 flowctrl
|= FLOW_CTRL_TX
;
1896 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1897 flowctrl
|= FLOW_CTRL_TX
;
1902 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1906 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1907 miireg
= ADVERTISE_1000XPAUSE
;
1908 else if (flow_ctrl
& FLOW_CTRL_TX
)
1909 miireg
= ADVERTISE_1000XPSE_ASYM
;
1910 else if (flow_ctrl
& FLOW_CTRL_RX
)
1911 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1918 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1922 if (adv
& ADVERTISE_1000XPAUSE
) {
1923 flowctrl
|= FLOW_CTRL_RX
;
1924 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1925 flowctrl
|= FLOW_CTRL_TX
;
1926 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1927 flowctrl
|= FLOW_CTRL_TX
;
1932 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1936 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1937 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1938 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1939 if (lcladv
& ADVERTISE_1000XPAUSE
)
1941 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1948 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1952 u32 old_rx_mode
= tp
->rx_mode
;
1953 u32 old_tx_mode
= tp
->tx_mode
;
1955 if (tg3_flag(tp
, USE_PHYLIB
))
1956 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1958 autoneg
= tp
->link_config
.autoneg
;
1960 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1961 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1962 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1964 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1966 flowctrl
= tp
->link_config
.flowctrl
;
1968 tp
->link_config
.active_flowctrl
= flowctrl
;
1970 if (flowctrl
& FLOW_CTRL_RX
)
1971 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1973 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1975 if (old_rx_mode
!= tp
->rx_mode
)
1976 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1978 if (flowctrl
& FLOW_CTRL_TX
)
1979 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1981 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1983 if (old_tx_mode
!= tp
->tx_mode
)
1984 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1987 static void tg3_adjust_link(struct net_device
*dev
)
1989 u8 oldflowctrl
, linkmesg
= 0;
1990 u32 mac_mode
, lcl_adv
, rmt_adv
;
1991 struct tg3
*tp
= netdev_priv(dev
);
1992 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1994 spin_lock_bh(&tp
->lock
);
1996 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1997 MAC_MODE_HALF_DUPLEX
);
1999 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2005 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2006 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2007 else if (phydev
->speed
== SPEED_1000
||
2008 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2009 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2011 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2013 if (phydev
->duplex
== DUPLEX_HALF
)
2014 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2016 lcl_adv
= mii_advertise_flowctrl(
2017 tp
->link_config
.flowctrl
);
2020 rmt_adv
= LPA_PAUSE_CAP
;
2021 if (phydev
->asym_pause
)
2022 rmt_adv
|= LPA_PAUSE_ASYM
;
2025 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2027 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2029 if (mac_mode
!= tp
->mac_mode
) {
2030 tp
->mac_mode
= mac_mode
;
2031 tw32_f(MAC_MODE
, tp
->mac_mode
);
2035 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2036 if (phydev
->speed
== SPEED_10
)
2038 MAC_MI_STAT_10MBPS_MODE
|
2039 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2041 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2044 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2045 tw32(MAC_TX_LENGTHS
,
2046 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2047 (6 << TX_LENGTHS_IPG_SHIFT
) |
2048 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2050 tw32(MAC_TX_LENGTHS
,
2051 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2052 (6 << TX_LENGTHS_IPG_SHIFT
) |
2053 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2055 if (phydev
->link
!= tp
->old_link
||
2056 phydev
->speed
!= tp
->link_config
.active_speed
||
2057 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2058 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2061 tp
->old_link
= phydev
->link
;
2062 tp
->link_config
.active_speed
= phydev
->speed
;
2063 tp
->link_config
.active_duplex
= phydev
->duplex
;
2065 spin_unlock_bh(&tp
->lock
);
2068 tg3_link_report(tp
);
2071 static int tg3_phy_init(struct tg3
*tp
)
2073 struct phy_device
*phydev
;
2075 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2078 /* Bring the PHY back to a known state. */
2081 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2083 /* Attach the MAC to the PHY. */
2084 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2085 tg3_adjust_link
, phydev
->interface
);
2086 if (IS_ERR(phydev
)) {
2087 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2088 return PTR_ERR(phydev
);
2091 /* Mask with MAC supported features. */
2092 switch (phydev
->interface
) {
2093 case PHY_INTERFACE_MODE_GMII
:
2094 case PHY_INTERFACE_MODE_RGMII
:
2095 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2096 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2098 SUPPORTED_Asym_Pause
);
2102 case PHY_INTERFACE_MODE_MII
:
2103 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2105 SUPPORTED_Asym_Pause
);
2108 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2112 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2114 phydev
->advertising
= phydev
->supported
;
2119 static void tg3_phy_start(struct tg3
*tp
)
2121 struct phy_device
*phydev
;
2123 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2126 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2128 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2129 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2130 phydev
->speed
= tp
->link_config
.speed
;
2131 phydev
->duplex
= tp
->link_config
.duplex
;
2132 phydev
->autoneg
= tp
->link_config
.autoneg
;
2133 phydev
->advertising
= tp
->link_config
.advertising
;
2138 phy_start_aneg(phydev
);
2141 static void tg3_phy_stop(struct tg3
*tp
)
2143 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2146 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2149 static void tg3_phy_fini(struct tg3
*tp
)
2151 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2152 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2153 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2157 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2162 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2165 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2166 /* Cannot do read-modify-write on 5401 */
2167 err
= tg3_phy_auxctl_write(tp
,
2168 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2169 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2174 err
= tg3_phy_auxctl_read(tp
,
2175 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2179 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2180 err
= tg3_phy_auxctl_write(tp
,
2181 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2187 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2191 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2194 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2195 phytest
| MII_TG3_FET_SHADOW_EN
);
2196 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2198 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2200 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2201 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2203 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2207 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2211 if (!tg3_flag(tp
, 5705_PLUS
) ||
2212 (tg3_flag(tp
, 5717_PLUS
) &&
2213 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2216 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2217 tg3_phy_fet_toggle_apd(tp
, enable
);
2221 reg
= MII_TG3_MISC_SHDW_WREN
|
2222 MII_TG3_MISC_SHDW_SCR5_SEL
|
2223 MII_TG3_MISC_SHDW_SCR5_LPED
|
2224 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2225 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2226 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2227 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2228 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2230 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2233 reg
= MII_TG3_MISC_SHDW_WREN
|
2234 MII_TG3_MISC_SHDW_APD_SEL
|
2235 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2237 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2239 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2242 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2246 if (!tg3_flag(tp
, 5705_PLUS
) ||
2247 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2250 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2253 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2254 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2256 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2257 ephy
| MII_TG3_FET_SHADOW_EN
);
2258 if (!tg3_readphy(tp
, reg
, &phy
)) {
2260 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2262 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2263 tg3_writephy(tp
, reg
, phy
);
2265 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2270 ret
= tg3_phy_auxctl_read(tp
,
2271 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2274 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2276 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2277 tg3_phy_auxctl_write(tp
,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2283 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2288 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2291 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2293 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2294 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2297 static void tg3_phy_apply_otp(struct tg3
*tp
)
2306 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2309 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2310 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2311 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2313 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2314 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2315 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2317 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2318 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2319 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2321 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2322 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2324 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2325 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2327 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2328 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2329 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2331 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2334 static void tg3_eee_pull_config(struct tg3
*tp
, struct ethtool_eee
*eee
)
2337 struct ethtool_eee
*dest
= &tp
->eee
;
2339 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2345 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, TG3_CL45_D7_EEERES_STAT
, &val
))
2348 /* Pull eee_active */
2349 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2350 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
) {
2351 dest
->eee_active
= 1;
2353 dest
->eee_active
= 0;
2355 /* Pull lp advertised settings */
2356 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
, &val
))
2358 dest
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2360 /* Pull advertised and eee_enabled settings */
2361 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
2363 dest
->eee_enabled
= !!val
;
2364 dest
->advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2366 /* Pull tx_lpi_enabled */
2367 val
= tr32(TG3_CPMU_EEE_MODE
);
2368 dest
->tx_lpi_enabled
= !!(val
& TG3_CPMU_EEEMD_LPI_IN_TX
);
2370 /* Pull lpi timer value */
2371 dest
->tx_lpi_timer
= tr32(TG3_CPMU_EEE_DBTMR1
) & 0xffff;
2374 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2378 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2383 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2385 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2386 (tp
->link_config
.active_speed
== SPEED_100
||
2387 tp
->link_config
.active_speed
== SPEED_1000
)) {
2390 if (tp
->link_config
.active_speed
== SPEED_1000
)
2391 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2393 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2395 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2397 tg3_eee_pull_config(tp
, NULL
);
2398 if (tp
->eee
.eee_active
)
2402 if (!tp
->setlpicnt
) {
2403 if (current_link_up
&&
2404 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2405 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2406 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2409 val
= tr32(TG3_CPMU_EEE_MODE
);
2410 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2414 static void tg3_phy_eee_enable(struct tg3
*tp
)
2418 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2419 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2420 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2421 tg3_flag(tp
, 57765_CLASS
)) &&
2422 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2423 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2424 MII_TG3_DSP_TAP26_RMRXSTO
;
2425 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2426 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2429 val
= tr32(TG3_CPMU_EEE_MODE
);
2430 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2433 static int tg3_wait_macro_done(struct tg3
*tp
)
2440 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2441 if ((tmp32
& 0x1000) == 0)
2451 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2453 static const u32 test_pat
[4][6] = {
2454 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2455 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2456 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2457 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2461 for (chan
= 0; chan
< 4; chan
++) {
2464 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2465 (chan
* 0x2000) | 0x0200);
2466 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2468 for (i
= 0; i
< 6; i
++)
2469 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2472 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2473 if (tg3_wait_macro_done(tp
)) {
2478 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2479 (chan
* 0x2000) | 0x0200);
2480 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2481 if (tg3_wait_macro_done(tp
)) {
2486 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2487 if (tg3_wait_macro_done(tp
)) {
2492 for (i
= 0; i
< 6; i
+= 2) {
2495 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2496 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2497 tg3_wait_macro_done(tp
)) {
2503 if (low
!= test_pat
[chan
][i
] ||
2504 high
!= test_pat
[chan
][i
+1]) {
2505 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2506 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2507 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2517 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2521 for (chan
= 0; chan
< 4; chan
++) {
2524 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2525 (chan
* 0x2000) | 0x0200);
2526 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2527 for (i
= 0; i
< 6; i
++)
2528 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2529 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2530 if (tg3_wait_macro_done(tp
))
2537 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2539 u32 reg32
, phy9_orig
;
2540 int retries
, do_phy_reset
, err
;
2546 err
= tg3_bmcr_reset(tp
);
2552 /* Disable transmitter and interrupt. */
2553 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2557 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2559 /* Set full-duplex, 1000 mbps. */
2560 tg3_writephy(tp
, MII_BMCR
,
2561 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2563 /* Set to master mode. */
2564 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2567 tg3_writephy(tp
, MII_CTRL1000
,
2568 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2570 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2574 /* Block the PHY control access. */
2575 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2577 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2580 } while (--retries
);
2582 err
= tg3_phy_reset_chanpat(tp
);
2586 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2588 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2589 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2591 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2593 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2595 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2597 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2604 static void tg3_carrier_off(struct tg3
*tp
)
2606 netif_carrier_off(tp
->dev
);
2607 tp
->link_up
= false;
2610 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2612 if (tg3_flag(tp
, ENABLE_ASF
))
2613 netdev_warn(tp
->dev
,
2614 "Management side-band traffic will be interrupted during phy settings change\n");
2617 /* This will reset the tigon3 PHY if there is no valid
2618 * link unless the FORCE argument is non-zero.
2620 static int tg3_phy_reset(struct tg3
*tp
)
2625 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2626 val
= tr32(GRC_MISC_CFG
);
2627 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2630 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2631 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2635 if (netif_running(tp
->dev
) && tp
->link_up
) {
2636 netif_carrier_off(tp
->dev
);
2637 tg3_link_report(tp
);
2640 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2641 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2642 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2643 err
= tg3_phy_reset_5703_4_5(tp
);
2650 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2651 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2652 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2653 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2655 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2658 err
= tg3_bmcr_reset(tp
);
2662 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2663 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2664 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2666 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2669 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2670 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2671 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2672 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2673 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2674 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2676 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2680 if (tg3_flag(tp
, 5717_PLUS
) &&
2681 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2684 tg3_phy_apply_otp(tp
);
2686 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2687 tg3_phy_toggle_apd(tp
, true);
2689 tg3_phy_toggle_apd(tp
, false);
2692 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2693 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2694 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2695 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2696 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2699 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2700 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2701 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2704 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2705 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2706 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2707 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2708 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2709 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2711 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2712 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2713 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2714 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2715 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2716 tg3_writephy(tp
, MII_TG3_TEST1
,
2717 MII_TG3_TEST1_TRIM_EN
| 0x4);
2719 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2721 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2725 /* Set Extended packet length bit (bit 14) on all chips that */
2726 /* support jumbo frames */
2727 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2728 /* Cannot do read-modify-write on 5401 */
2729 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2730 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2731 /* Set bit 14 with read-modify-write to preserve other bits */
2732 err
= tg3_phy_auxctl_read(tp
,
2733 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2735 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2736 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2739 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2740 * jumbo frames transmission.
2742 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2743 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2744 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2745 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2748 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2749 /* adjust output voltage */
2750 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2753 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2754 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2756 tg3_phy_toggle_automdix(tp
, true);
2757 tg3_phy_set_wirespeed(tp
);
2761 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2762 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2763 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2764 TG3_GPIO_MSG_NEED_VAUX)
2765 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2766 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2767 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2768 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2769 (TG3_GPIO_MSG_DRVR_PRES << 12))
2771 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2772 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2773 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2774 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2775 (TG3_GPIO_MSG_NEED_VAUX << 12))
2777 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2781 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2782 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2783 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2785 status
= tr32(TG3_CPMU_DRV_STATUS
);
2787 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2788 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2789 status
|= (newstat
<< shift
);
2791 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2792 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2793 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2795 tw32(TG3_CPMU_DRV_STATUS
, status
);
2797 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2800 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2802 if (!tg3_flag(tp
, IS_NIC
))
2805 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2806 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2807 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2808 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2811 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2813 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2814 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2816 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2818 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2825 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2829 if (!tg3_flag(tp
, IS_NIC
) ||
2830 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2831 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2834 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2836 tw32_wait_f(GRC_LOCAL_CTRL
,
2837 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2838 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2840 tw32_wait_f(GRC_LOCAL_CTRL
,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2844 tw32_wait_f(GRC_LOCAL_CTRL
,
2845 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2849 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2851 if (!tg3_flag(tp
, IS_NIC
))
2854 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2855 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2856 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2857 (GRC_LCLCTRL_GPIO_OE0
|
2858 GRC_LCLCTRL_GPIO_OE1
|
2859 GRC_LCLCTRL_GPIO_OE2
|
2860 GRC_LCLCTRL_GPIO_OUTPUT0
|
2861 GRC_LCLCTRL_GPIO_OUTPUT1
),
2862 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2863 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2864 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2865 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2866 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2867 GRC_LCLCTRL_GPIO_OE1
|
2868 GRC_LCLCTRL_GPIO_OE2
|
2869 GRC_LCLCTRL_GPIO_OUTPUT0
|
2870 GRC_LCLCTRL_GPIO_OUTPUT1
|
2872 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2875 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2876 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2877 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2879 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2880 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2884 u32 grc_local_ctrl
= 0;
2886 /* Workaround to prevent overdrawing Amps. */
2887 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2888 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2889 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2891 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2894 /* On 5753 and variants, GPIO2 cannot be used. */
2895 no_gpio2
= tp
->nic_sram_data_cfg
&
2896 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2898 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2899 GRC_LCLCTRL_GPIO_OE1
|
2900 GRC_LCLCTRL_GPIO_OE2
|
2901 GRC_LCLCTRL_GPIO_OUTPUT1
|
2902 GRC_LCLCTRL_GPIO_OUTPUT2
;
2904 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2905 GRC_LCLCTRL_GPIO_OUTPUT2
);
2907 tw32_wait_f(GRC_LOCAL_CTRL
,
2908 tp
->grc_local_ctrl
| grc_local_ctrl
,
2909 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2911 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2913 tw32_wait_f(GRC_LOCAL_CTRL
,
2914 tp
->grc_local_ctrl
| grc_local_ctrl
,
2915 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2918 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2919 tw32_wait_f(GRC_LOCAL_CTRL
,
2920 tp
->grc_local_ctrl
| grc_local_ctrl
,
2921 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2926 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2930 /* Serialize power state transitions */
2931 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2934 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2935 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2937 msg
= tg3_set_function_status(tp
, msg
);
2939 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2942 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2943 tg3_pwrsrc_switch_to_vaux(tp
);
2945 tg3_pwrsrc_die_with_vmain(tp
);
2948 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2951 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2953 bool need_vaux
= false;
2955 /* The GPIOs do something completely different on 57765. */
2956 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2959 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2960 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2961 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2962 tg3_frob_aux_power_5717(tp
, include_wol
?
2963 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2967 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2968 struct net_device
*dev_peer
;
2970 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2972 /* remove_one() may have been run on the peer. */
2974 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2976 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2979 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2980 tg3_flag(tp_peer
, ENABLE_ASF
))
2985 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2986 tg3_flag(tp
, ENABLE_ASF
))
2990 tg3_pwrsrc_switch_to_vaux(tp
);
2992 tg3_pwrsrc_die_with_vmain(tp
);
2995 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2997 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2999 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
3000 if (speed
!= SPEED_10
)
3002 } else if (speed
== SPEED_10
)
3008 static bool tg3_phy_power_bug(struct tg3
*tp
)
3010 switch (tg3_asic_rev(tp
)) {
3015 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3024 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3033 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3037 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3040 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3041 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3042 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3043 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3046 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3047 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3048 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3053 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3055 val
= tr32(GRC_MISC_CFG
);
3056 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3059 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3061 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3064 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3065 tg3_writephy(tp
, MII_BMCR
,
3066 BMCR_ANENABLE
| BMCR_ANRESTART
);
3068 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3069 phytest
| MII_TG3_FET_SHADOW_EN
);
3070 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3071 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3073 MII_TG3_FET_SHDW_AUXMODE4
,
3076 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3079 } else if (do_low_power
) {
3080 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3081 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3083 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3084 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3085 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3086 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3089 /* The PHY should not be powered down on some chips because
3092 if (tg3_phy_power_bug(tp
))
3095 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3096 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3097 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3098 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3099 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3100 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3103 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3106 /* tp->lock is held. */
3107 static int tg3_nvram_lock(struct tg3
*tp
)
3109 if (tg3_flag(tp
, NVRAM
)) {
3112 if (tp
->nvram_lock_cnt
== 0) {
3113 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3114 for (i
= 0; i
< 8000; i
++) {
3115 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3120 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3124 tp
->nvram_lock_cnt
++;
3129 /* tp->lock is held. */
3130 static void tg3_nvram_unlock(struct tg3
*tp
)
3132 if (tg3_flag(tp
, NVRAM
)) {
3133 if (tp
->nvram_lock_cnt
> 0)
3134 tp
->nvram_lock_cnt
--;
3135 if (tp
->nvram_lock_cnt
== 0)
3136 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3140 /* tp->lock is held. */
3141 static void tg3_enable_nvram_access(struct tg3
*tp
)
3143 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3144 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3146 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3150 /* tp->lock is held. */
3151 static void tg3_disable_nvram_access(struct tg3
*tp
)
3153 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3154 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3156 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3160 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3161 u32 offset
, u32
*val
)
3166 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3169 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3170 EEPROM_ADDR_DEVID_MASK
|
3172 tw32(GRC_EEPROM_ADDR
,
3174 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3175 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3176 EEPROM_ADDR_ADDR_MASK
) |
3177 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3179 for (i
= 0; i
< 1000; i
++) {
3180 tmp
= tr32(GRC_EEPROM_ADDR
);
3182 if (tmp
& EEPROM_ADDR_COMPLETE
)
3186 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3189 tmp
= tr32(GRC_EEPROM_DATA
);
3192 * The data will always be opposite the native endian
3193 * format. Perform a blind byteswap to compensate.
3200 #define NVRAM_CMD_TIMEOUT 10000
3202 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3206 tw32(NVRAM_CMD
, nvram_cmd
);
3207 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3209 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3215 if (i
== NVRAM_CMD_TIMEOUT
)
3221 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3223 if (tg3_flag(tp
, NVRAM
) &&
3224 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3225 tg3_flag(tp
, FLASH
) &&
3226 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3227 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3229 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3230 ATMEL_AT45DB0X1B_PAGE_POS
) +
3231 (addr
% tp
->nvram_pagesize
);
3236 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3238 if (tg3_flag(tp
, NVRAM
) &&
3239 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3240 tg3_flag(tp
, FLASH
) &&
3241 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3242 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3244 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3245 tp
->nvram_pagesize
) +
3246 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3251 /* NOTE: Data read in from NVRAM is byteswapped according to
3252 * the byteswapping settings for all other register accesses.
3253 * tg3 devices are BE devices, so on a BE machine, the data
3254 * returned will be exactly as it is seen in NVRAM. On a LE
3255 * machine, the 32-bit value will be byteswapped.
3257 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3261 if (!tg3_flag(tp
, NVRAM
))
3262 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3264 offset
= tg3_nvram_phys_addr(tp
, offset
);
3266 if (offset
> NVRAM_ADDR_MSK
)
3269 ret
= tg3_nvram_lock(tp
);
3273 tg3_enable_nvram_access(tp
);
3275 tw32(NVRAM_ADDR
, offset
);
3276 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3277 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3280 *val
= tr32(NVRAM_RDDATA
);
3282 tg3_disable_nvram_access(tp
);
3284 tg3_nvram_unlock(tp
);
3289 /* Ensures NVRAM data is in bytestream format. */
3290 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3293 int res
= tg3_nvram_read(tp
, offset
, &v
);
3295 *val
= cpu_to_be32(v
);
3299 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3300 u32 offset
, u32 len
, u8
*buf
)
3305 for (i
= 0; i
< len
; i
+= 4) {
3311 memcpy(&data
, buf
+ i
, 4);
3314 * The SEEPROM interface expects the data to always be opposite
3315 * the native endian format. We accomplish this by reversing
3316 * all the operations that would have been performed on the
3317 * data from a call to tg3_nvram_read_be32().
3319 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3321 val
= tr32(GRC_EEPROM_ADDR
);
3322 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3324 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3326 tw32(GRC_EEPROM_ADDR
, val
|
3327 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3328 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3332 for (j
= 0; j
< 1000; j
++) {
3333 val
= tr32(GRC_EEPROM_ADDR
);
3335 if (val
& EEPROM_ADDR_COMPLETE
)
3339 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3348 /* offset and length are dword aligned */
3349 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3353 u32 pagesize
= tp
->nvram_pagesize
;
3354 u32 pagemask
= pagesize
- 1;
3358 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3364 u32 phy_addr
, page_off
, size
;
3366 phy_addr
= offset
& ~pagemask
;
3368 for (j
= 0; j
< pagesize
; j
+= 4) {
3369 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3370 (__be32
*) (tmp
+ j
));
3377 page_off
= offset
& pagemask
;
3384 memcpy(tmp
+ page_off
, buf
, size
);
3386 offset
= offset
+ (pagesize
- page_off
);
3388 tg3_enable_nvram_access(tp
);
3391 * Before we can erase the flash page, we need
3392 * to issue a special "write enable" command.
3394 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3396 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3399 /* Erase the target page */
3400 tw32(NVRAM_ADDR
, phy_addr
);
3402 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3403 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3405 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3408 /* Issue another write enable to start the write. */
3409 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3411 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3414 for (j
= 0; j
< pagesize
; j
+= 4) {
3417 data
= *((__be32
*) (tmp
+ j
));
3419 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3421 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3423 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3427 nvram_cmd
|= NVRAM_CMD_FIRST
;
3428 else if (j
== (pagesize
- 4))
3429 nvram_cmd
|= NVRAM_CMD_LAST
;
3431 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3439 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3440 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3447 /* offset and length are dword aligned */
3448 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3453 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3454 u32 page_off
, phy_addr
, nvram_cmd
;
3457 memcpy(&data
, buf
+ i
, 4);
3458 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3460 page_off
= offset
% tp
->nvram_pagesize
;
3462 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3464 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3466 if (page_off
== 0 || i
== 0)
3467 nvram_cmd
|= NVRAM_CMD_FIRST
;
3468 if (page_off
== (tp
->nvram_pagesize
- 4))
3469 nvram_cmd
|= NVRAM_CMD_LAST
;
3472 nvram_cmd
|= NVRAM_CMD_LAST
;
3474 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3475 !tg3_flag(tp
, FLASH
) ||
3476 !tg3_flag(tp
, 57765_PLUS
))
3477 tw32(NVRAM_ADDR
, phy_addr
);
3479 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3480 !tg3_flag(tp
, 5755_PLUS
) &&
3481 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3482 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3485 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3486 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3490 if (!tg3_flag(tp
, FLASH
)) {
3491 /* We always do complete word writes to eeprom. */
3492 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3495 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3502 /* offset and length are dword aligned */
3503 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3507 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3508 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3509 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3513 if (!tg3_flag(tp
, NVRAM
)) {
3514 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3518 ret
= tg3_nvram_lock(tp
);
3522 tg3_enable_nvram_access(tp
);
3523 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3524 tw32(NVRAM_WRITE1
, 0x406);
3526 grc_mode
= tr32(GRC_MODE
);
3527 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3529 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3530 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3533 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3537 grc_mode
= tr32(GRC_MODE
);
3538 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3540 tg3_disable_nvram_access(tp
);
3541 tg3_nvram_unlock(tp
);
3544 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3545 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3552 #define RX_CPU_SCRATCH_BASE 0x30000
3553 #define RX_CPU_SCRATCH_SIZE 0x04000
3554 #define TX_CPU_SCRATCH_BASE 0x34000
3555 #define TX_CPU_SCRATCH_SIZE 0x04000
3557 /* tp->lock is held. */
3558 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3561 const int iters
= 10000;
3563 for (i
= 0; i
< iters
; i
++) {
3564 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3565 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3566 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3568 if (pci_channel_offline(tp
->pdev
))
3572 return (i
== iters
) ? -EBUSY
: 0;
3575 /* tp->lock is held. */
3576 static int tg3_rxcpu_pause(struct tg3
*tp
)
3578 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3580 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3581 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3587 /* tp->lock is held. */
3588 static int tg3_txcpu_pause(struct tg3
*tp
)
3590 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3593 /* tp->lock is held. */
3594 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3596 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3597 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3600 /* tp->lock is held. */
3601 static void tg3_rxcpu_resume(struct tg3
*tp
)
3603 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3606 /* tp->lock is held. */
3607 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3611 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3613 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3614 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3616 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3619 if (cpu_base
== RX_CPU_BASE
) {
3620 rc
= tg3_rxcpu_pause(tp
);
3623 * There is only an Rx CPU for the 5750 derivative in the
3626 if (tg3_flag(tp
, IS_SSB_CORE
))
3629 rc
= tg3_txcpu_pause(tp
);
3633 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3634 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3638 /* Clear firmware's nvram arbitration. */
3639 if (tg3_flag(tp
, NVRAM
))
3640 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3644 static int tg3_fw_data_len(struct tg3
*tp
,
3645 const struct tg3_firmware_hdr
*fw_hdr
)
3649 /* Non fragmented firmware have one firmware header followed by a
3650 * contiguous chunk of data to be written. The length field in that
3651 * header is not the length of data to be written but the complete
3652 * length of the bss. The data length is determined based on
3653 * tp->fw->size minus headers.
3655 * Fragmented firmware have a main header followed by multiple
3656 * fragments. Each fragment is identical to non fragmented firmware
3657 * with a firmware header followed by a contiguous chunk of data. In
3658 * the main header, the length field is unused and set to 0xffffffff.
3659 * In each fragment header the length is the entire size of that
3660 * fragment i.e. fragment data + header length. Data length is
3661 * therefore length field in the header minus TG3_FW_HDR_LEN.
3663 if (tp
->fw_len
== 0xffffffff)
3664 fw_len
= be32_to_cpu(fw_hdr
->len
);
3666 fw_len
= tp
->fw
->size
;
3668 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3671 /* tp->lock is held. */
3672 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3673 u32 cpu_scratch_base
, int cpu_scratch_size
,
3674 const struct tg3_firmware_hdr
*fw_hdr
)
3677 void (*write_op
)(struct tg3
*, u32
, u32
);
3678 int total_len
= tp
->fw
->size
;
3680 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3682 "%s: Trying to load TX cpu firmware which is 5705\n",
3687 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3688 write_op
= tg3_write_mem
;
3690 write_op
= tg3_write_indirect_reg32
;
3692 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3693 /* It is possible that bootcode is still loading at this point.
3694 * Get the nvram lock first before halting the cpu.
3696 int lock_err
= tg3_nvram_lock(tp
);
3697 err
= tg3_halt_cpu(tp
, cpu_base
);
3699 tg3_nvram_unlock(tp
);
3703 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3704 write_op(tp
, cpu_scratch_base
+ i
, 0);
3705 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3706 tw32(cpu_base
+ CPU_MODE
,
3707 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3709 /* Subtract additional main header for fragmented firmware and
3710 * advance to the first fragment
3712 total_len
-= TG3_FW_HDR_LEN
;
3717 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3718 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3719 write_op(tp
, cpu_scratch_base
+
3720 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3722 be32_to_cpu(fw_data
[i
]));
3724 total_len
-= be32_to_cpu(fw_hdr
->len
);
3726 /* Advance to next fragment */
3727 fw_hdr
= (struct tg3_firmware_hdr
*)
3728 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3729 } while (total_len
> 0);
3737 /* tp->lock is held. */
3738 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3741 const int iters
= 5;
3743 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3744 tw32_f(cpu_base
+ CPU_PC
, pc
);
3746 for (i
= 0; i
< iters
; i
++) {
3747 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3749 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3750 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3751 tw32_f(cpu_base
+ CPU_PC
, pc
);
3755 return (i
== iters
) ? -EBUSY
: 0;
3758 /* tp->lock is held. */
3759 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3761 const struct tg3_firmware_hdr
*fw_hdr
;
3764 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3766 /* Firmware blob starts with version numbers, followed by
3767 start address and length. We are setting complete length.
3768 length = end_address_of_bss - start_address_of_text.
3769 Remainder is the blob to be loaded contiguously
3770 from start address. */
3772 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3773 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3778 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3779 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3784 /* Now startup only the RX cpu. */
3785 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3786 be32_to_cpu(fw_hdr
->base_addr
));
3788 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3789 "should be %08x\n", __func__
,
3790 tr32(RX_CPU_BASE
+ CPU_PC
),
3791 be32_to_cpu(fw_hdr
->base_addr
));
3795 tg3_rxcpu_resume(tp
);
3800 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3802 const int iters
= 1000;
3806 /* Wait for boot code to complete initialization and enter service
3807 * loop. It is then safe to download service patches
3809 for (i
= 0; i
< iters
; i
++) {
3810 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3817 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3821 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3823 netdev_warn(tp
->dev
,
3824 "Other patches exist. Not downloading EEE patch\n");
3831 /* tp->lock is held. */
3832 static void tg3_load_57766_firmware(struct tg3
*tp
)
3834 struct tg3_firmware_hdr
*fw_hdr
;
3836 if (!tg3_flag(tp
, NO_NVRAM
))
3839 if (tg3_validate_rxcpu_state(tp
))
3845 /* This firmware blob has a different format than older firmware
3846 * releases as given below. The main difference is we have fragmented
3847 * data to be written to non-contiguous locations.
3849 * In the beginning we have a firmware header identical to other
3850 * firmware which consists of version, base addr and length. The length
3851 * here is unused and set to 0xffffffff.
3853 * This is followed by a series of firmware fragments which are
3854 * individually identical to previous firmware. i.e. they have the
3855 * firmware header and followed by data for that fragment. The version
3856 * field of the individual fragment header is unused.
3859 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3860 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3863 if (tg3_rxcpu_pause(tp
))
3866 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3867 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3869 tg3_rxcpu_resume(tp
);
3872 /* tp->lock is held. */
3873 static int tg3_load_tso_firmware(struct tg3
*tp
)
3875 const struct tg3_firmware_hdr
*fw_hdr
;
3876 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3879 if (!tg3_flag(tp
, FW_TSO
))
3882 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3884 /* Firmware blob starts with version numbers, followed by
3885 start address and length. We are setting complete length.
3886 length = end_address_of_bss - start_address_of_text.
3887 Remainder is the blob to be loaded contiguously
3888 from start address. */
3890 cpu_scratch_size
= tp
->fw_len
;
3892 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3893 cpu_base
= RX_CPU_BASE
;
3894 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3896 cpu_base
= TX_CPU_BASE
;
3897 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3898 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3901 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3902 cpu_scratch_base
, cpu_scratch_size
,
3907 /* Now startup the cpu. */
3908 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3909 be32_to_cpu(fw_hdr
->base_addr
));
3912 "%s fails to set CPU PC, is %08x should be %08x\n",
3913 __func__
, tr32(cpu_base
+ CPU_PC
),
3914 be32_to_cpu(fw_hdr
->base_addr
));
3918 tg3_resume_cpu(tp
, cpu_base
);
3923 /* tp->lock is held. */
3924 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3926 u32 addr_high
, addr_low
;
3929 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3930 tp
->dev
->dev_addr
[1]);
3931 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3932 (tp
->dev
->dev_addr
[3] << 16) |
3933 (tp
->dev
->dev_addr
[4] << 8) |
3934 (tp
->dev
->dev_addr
[5] << 0));
3935 for (i
= 0; i
< 4; i
++) {
3936 if (i
== 1 && skip_mac_1
)
3938 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3939 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3942 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3943 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3944 for (i
= 0; i
< 12; i
++) {
3945 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3946 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3950 addr_high
= (tp
->dev
->dev_addr
[0] +
3951 tp
->dev
->dev_addr
[1] +
3952 tp
->dev
->dev_addr
[2] +
3953 tp
->dev
->dev_addr
[3] +
3954 tp
->dev
->dev_addr
[4] +
3955 tp
->dev
->dev_addr
[5]) &
3956 TX_BACKOFF_SEED_MASK
;
3957 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3960 static void tg3_enable_register_access(struct tg3
*tp
)
3963 * Make sure register accesses (indirect or otherwise) will function
3966 pci_write_config_dword(tp
->pdev
,
3967 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3970 static int tg3_power_up(struct tg3
*tp
)
3974 tg3_enable_register_access(tp
);
3976 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3978 /* Switch out of Vaux if it is a NIC */
3979 tg3_pwrsrc_switch_to_vmain(tp
);
3981 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3987 static int tg3_setup_phy(struct tg3
*, bool);
3989 static int tg3_power_down_prepare(struct tg3
*tp
)
3992 bool device_should_wake
, do_low_power
;
3994 tg3_enable_register_access(tp
);
3996 /* Restore the CLKREQ setting. */
3997 if (tg3_flag(tp
, CLKREQ_BUG
))
3998 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3999 PCI_EXP_LNKCTL_CLKREQ_EN
);
4001 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
4002 tw32(TG3PCI_MISC_HOST_CTRL
,
4003 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
4005 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
4006 tg3_flag(tp
, WOL_ENABLE
);
4008 if (tg3_flag(tp
, USE_PHYLIB
)) {
4009 do_low_power
= false;
4010 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
4011 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4012 struct phy_device
*phydev
;
4013 u32 phyid
, advertising
;
4015 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
4017 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4019 tp
->link_config
.speed
= phydev
->speed
;
4020 tp
->link_config
.duplex
= phydev
->duplex
;
4021 tp
->link_config
.autoneg
= phydev
->autoneg
;
4022 tp
->link_config
.advertising
= phydev
->advertising
;
4024 advertising
= ADVERTISED_TP
|
4026 ADVERTISED_Autoneg
|
4027 ADVERTISED_10baseT_Half
;
4029 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4030 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4032 ADVERTISED_100baseT_Half
|
4033 ADVERTISED_100baseT_Full
|
4034 ADVERTISED_10baseT_Full
;
4036 advertising
|= ADVERTISED_10baseT_Full
;
4039 phydev
->advertising
= advertising
;
4041 phy_start_aneg(phydev
);
4043 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4044 if (phyid
!= PHY_ID_BCMAC131
) {
4045 phyid
&= PHY_BCM_OUI_MASK
;
4046 if (phyid
== PHY_BCM_OUI_1
||
4047 phyid
== PHY_BCM_OUI_2
||
4048 phyid
== PHY_BCM_OUI_3
)
4049 do_low_power
= true;
4053 do_low_power
= true;
4055 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4056 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4058 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4059 tg3_setup_phy(tp
, false);
4062 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4065 val
= tr32(GRC_VCPU_EXT_CTRL
);
4066 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4067 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4071 for (i
= 0; i
< 200; i
++) {
4072 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4073 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4078 if (tg3_flag(tp
, WOL_CAP
))
4079 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4080 WOL_DRV_STATE_SHUTDOWN
|
4084 if (device_should_wake
) {
4087 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4089 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4090 tg3_phy_auxctl_write(tp
,
4091 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4092 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4093 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4094 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4098 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4099 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4100 else if (tp
->phy_flags
&
4101 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4102 if (tp
->link_config
.active_speed
== SPEED_1000
)
4103 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4105 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4107 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4109 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4110 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4111 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4112 SPEED_100
: SPEED_10
;
4113 if (tg3_5700_link_polarity(tp
, speed
))
4114 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4116 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4119 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4122 if (!tg3_flag(tp
, 5750_PLUS
))
4123 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4125 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4126 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4127 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4128 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4130 if (tg3_flag(tp
, ENABLE_APE
))
4131 mac_mode
|= MAC_MODE_APE_TX_EN
|
4132 MAC_MODE_APE_RX_EN
|
4133 MAC_MODE_TDE_ENABLE
;
4135 tw32_f(MAC_MODE
, mac_mode
);
4138 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4142 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4143 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4144 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4147 base_val
= tp
->pci_clock_ctrl
;
4148 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4149 CLOCK_CTRL_TXCLK_DISABLE
);
4151 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4152 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4153 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4154 tg3_flag(tp
, CPMU_PRESENT
) ||
4155 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4157 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4158 u32 newbits1
, newbits2
;
4160 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4161 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4162 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4163 CLOCK_CTRL_TXCLK_DISABLE
|
4165 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4166 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4167 newbits1
= CLOCK_CTRL_625_CORE
;
4168 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4170 newbits1
= CLOCK_CTRL_ALTCLK
;
4171 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4174 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4177 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4180 if (!tg3_flag(tp
, 5705_PLUS
)) {
4183 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4184 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4185 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4186 CLOCK_CTRL_TXCLK_DISABLE
|
4187 CLOCK_CTRL_44MHZ_CORE
);
4189 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4192 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4193 tp
->pci_clock_ctrl
| newbits3
, 40);
4197 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4198 tg3_power_down_phy(tp
, do_low_power
);
4200 tg3_frob_aux_power(tp
, true);
4202 /* Workaround for unstable PLL clock */
4203 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4204 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4205 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4206 u32 val
= tr32(0x7d00);
4208 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4210 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4213 err
= tg3_nvram_lock(tp
);
4214 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4216 tg3_nvram_unlock(tp
);
4220 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4222 tg3_ape_driver_state_change(tp
, RESET_KIND_SHUTDOWN
);
4227 static void tg3_power_down(struct tg3
*tp
)
4229 tg3_power_down_prepare(tp
);
4231 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4232 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4235 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4237 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4238 case MII_TG3_AUX_STAT_10HALF
:
4240 *duplex
= DUPLEX_HALF
;
4243 case MII_TG3_AUX_STAT_10FULL
:
4245 *duplex
= DUPLEX_FULL
;
4248 case MII_TG3_AUX_STAT_100HALF
:
4250 *duplex
= DUPLEX_HALF
;
4253 case MII_TG3_AUX_STAT_100FULL
:
4255 *duplex
= DUPLEX_FULL
;
4258 case MII_TG3_AUX_STAT_1000HALF
:
4259 *speed
= SPEED_1000
;
4260 *duplex
= DUPLEX_HALF
;
4263 case MII_TG3_AUX_STAT_1000FULL
:
4264 *speed
= SPEED_1000
;
4265 *duplex
= DUPLEX_FULL
;
4269 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4270 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4272 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4276 *speed
= SPEED_UNKNOWN
;
4277 *duplex
= DUPLEX_UNKNOWN
;
4282 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4287 new_adv
= ADVERTISE_CSMA
;
4288 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4289 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4291 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4295 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4296 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4298 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4299 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4300 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4302 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4307 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4310 tw32(TG3_CPMU_EEE_MODE
,
4311 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4313 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4318 /* Advertise 100-BaseTX EEE ability */
4319 if (advertise
& ADVERTISED_100baseT_Full
)
4320 val
|= MDIO_AN_EEE_ADV_100TX
;
4321 /* Advertise 1000-BaseT EEE ability */
4322 if (advertise
& ADVERTISED_1000baseT_Full
)
4323 val
|= MDIO_AN_EEE_ADV_1000T
;
4325 if (!tp
->eee
.eee_enabled
) {
4327 tp
->eee
.advertised
= 0;
4329 tp
->eee
.advertised
= advertise
&
4330 (ADVERTISED_100baseT_Full
|
4331 ADVERTISED_1000baseT_Full
);
4334 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4338 switch (tg3_asic_rev(tp
)) {
4340 case ASIC_REV_57765
:
4341 case ASIC_REV_57766
:
4343 /* If we advertised any eee advertisements above... */
4345 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4346 MII_TG3_DSP_TAP26_RMRXSTO
|
4347 MII_TG3_DSP_TAP26_OPCSINPT
;
4348 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4352 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4353 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4354 MII_TG3_DSP_CH34TP2_HIBW01
);
4357 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4366 static void tg3_phy_copper_begin(struct tg3
*tp
)
4368 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4369 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4372 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4373 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4374 adv
= ADVERTISED_10baseT_Half
|
4375 ADVERTISED_10baseT_Full
;
4376 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4377 adv
|= ADVERTISED_100baseT_Half
|
4378 ADVERTISED_100baseT_Full
;
4379 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
)
4380 adv
|= ADVERTISED_1000baseT_Half
|
4381 ADVERTISED_1000baseT_Full
;
4383 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4385 adv
= tp
->link_config
.advertising
;
4386 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4387 adv
&= ~(ADVERTISED_1000baseT_Half
|
4388 ADVERTISED_1000baseT_Full
);
4390 fc
= tp
->link_config
.flowctrl
;
4393 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4395 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4396 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4397 /* Normally during power down we want to autonegotiate
4398 * the lowest possible speed for WOL. However, to avoid
4399 * link flap, we leave it untouched.
4404 tg3_writephy(tp
, MII_BMCR
,
4405 BMCR_ANENABLE
| BMCR_ANRESTART
);
4408 u32 bmcr
, orig_bmcr
;
4410 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4411 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4413 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4414 /* With autoneg disabled, 5715 only links up when the
4415 * advertisement register has the configured speed
4418 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4422 switch (tp
->link_config
.speed
) {
4428 bmcr
|= BMCR_SPEED100
;
4432 bmcr
|= BMCR_SPEED1000
;
4436 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4437 bmcr
|= BMCR_FULLDPLX
;
4439 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4440 (bmcr
!= orig_bmcr
)) {
4441 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4442 for (i
= 0; i
< 1500; i
++) {
4446 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4447 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4449 if (!(tmp
& BMSR_LSTATUS
)) {
4454 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4460 static int tg3_phy_pull_config(struct tg3
*tp
)
4465 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4469 if (!(val
& BMCR_ANENABLE
)) {
4470 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4471 tp
->link_config
.advertising
= 0;
4472 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4476 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4478 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4481 tp
->link_config
.speed
= SPEED_10
;
4484 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4487 tp
->link_config
.speed
= SPEED_100
;
4489 case BMCR_SPEED1000
:
4490 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4491 tp
->link_config
.speed
= SPEED_1000
;
4499 if (val
& BMCR_FULLDPLX
)
4500 tp
->link_config
.duplex
= DUPLEX_FULL
;
4502 tp
->link_config
.duplex
= DUPLEX_HALF
;
4504 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4510 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4511 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4512 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4514 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4517 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4521 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4522 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4524 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4526 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4529 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4532 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4533 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4537 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4539 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4543 adv
= tg3_decode_flowctrl_1000X(val
);
4544 tp
->link_config
.flowctrl
= adv
;
4546 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4547 adv
= mii_adv_to_ethtool_adv_x(val
);
4550 tp
->link_config
.advertising
|= adv
;
4557 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4561 /* Turn off tap power management. */
4562 /* Set Extended packet length bit */
4563 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4565 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4566 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4567 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4568 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4569 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4576 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4578 struct ethtool_eee eee
;
4580 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4583 tg3_eee_pull_config(tp
, &eee
);
4585 if (tp
->eee
.eee_enabled
) {
4586 if (tp
->eee
.advertised
!= eee
.advertised
||
4587 tp
->eee
.tx_lpi_timer
!= eee
.tx_lpi_timer
||
4588 tp
->eee
.tx_lpi_enabled
!= eee
.tx_lpi_enabled
)
4591 /* EEE is disabled but we're advertising */
4599 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4601 u32 advmsk
, tgtadv
, advertising
;
4603 advertising
= tp
->link_config
.advertising
;
4604 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4606 advmsk
= ADVERTISE_ALL
;
4607 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4608 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4609 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4612 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4615 if ((*lcladv
& advmsk
) != tgtadv
)
4618 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4621 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4623 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4627 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4628 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4629 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4630 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4631 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4633 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4636 if (tg3_ctrl
!= tgtadv
)
4643 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4647 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4650 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4653 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4656 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4659 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4660 tp
->link_config
.rmt_adv
= lpeth
;
4665 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4667 if (curr_link_up
!= tp
->link_up
) {
4669 netif_carrier_on(tp
->dev
);
4671 netif_carrier_off(tp
->dev
);
4672 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4673 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4676 tg3_link_report(tp
);
4683 static void tg3_clear_mac_status(struct tg3
*tp
)
4688 MAC_STATUS_SYNC_CHANGED
|
4689 MAC_STATUS_CFG_CHANGED
|
4690 MAC_STATUS_MI_COMPLETION
|
4691 MAC_STATUS_LNKSTATE_CHANGED
);
4695 static void tg3_setup_eee(struct tg3
*tp
)
4699 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
4700 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
4701 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
4702 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
4704 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
4706 tw32_f(TG3_CPMU_EEE_CTRL
,
4707 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
4709 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
4710 (tp
->eee
.tx_lpi_enabled
? TG3_CPMU_EEEMD_LPI_IN_TX
: 0) |
4711 TG3_CPMU_EEEMD_LPI_IN_RX
|
4712 TG3_CPMU_EEEMD_EEE_ENABLE
;
4714 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
4715 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
4717 if (tg3_flag(tp
, ENABLE_APE
))
4718 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
4720 tw32_f(TG3_CPMU_EEE_MODE
, tp
->eee
.eee_enabled
? val
: 0);
4722 tw32_f(TG3_CPMU_EEE_DBTMR1
,
4723 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
4724 (tp
->eee
.tx_lpi_timer
& 0xffff));
4726 tw32_f(TG3_CPMU_EEE_DBTMR2
,
4727 TG3_CPMU_DBTMR2_APE_TX_2047US
|
4728 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
4731 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4733 bool current_link_up
;
4735 u32 lcl_adv
, rmt_adv
;
4740 tg3_clear_mac_status(tp
);
4742 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4744 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4748 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4750 /* Some third-party PHYs need to be reset on link going
4753 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4754 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4755 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4757 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4758 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4759 !(bmsr
& BMSR_LSTATUS
))
4765 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4766 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4767 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4768 !tg3_flag(tp
, INIT_COMPLETE
))
4771 if (!(bmsr
& BMSR_LSTATUS
)) {
4772 err
= tg3_init_5401phy_dsp(tp
);
4776 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4777 for (i
= 0; i
< 1000; i
++) {
4779 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4780 (bmsr
& BMSR_LSTATUS
)) {
4786 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4787 TG3_PHY_REV_BCM5401_B0
&&
4788 !(bmsr
& BMSR_LSTATUS
) &&
4789 tp
->link_config
.active_speed
== SPEED_1000
) {
4790 err
= tg3_phy_reset(tp
);
4792 err
= tg3_init_5401phy_dsp(tp
);
4797 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4798 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4799 /* 5701 {A0,B0} CRC bug workaround */
4800 tg3_writephy(tp
, 0x15, 0x0a75);
4801 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4802 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4803 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4806 /* Clear pending interrupts... */
4807 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4808 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4810 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4811 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4812 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4813 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4815 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4816 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4817 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4818 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4819 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4821 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4824 current_link_up
= false;
4825 current_speed
= SPEED_UNKNOWN
;
4826 current_duplex
= DUPLEX_UNKNOWN
;
4827 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4828 tp
->link_config
.rmt_adv
= 0;
4830 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4831 err
= tg3_phy_auxctl_read(tp
,
4832 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4834 if (!err
&& !(val
& (1 << 10))) {
4835 tg3_phy_auxctl_write(tp
,
4836 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4843 for (i
= 0; i
< 100; i
++) {
4844 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4845 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4846 (bmsr
& BMSR_LSTATUS
))
4851 if (bmsr
& BMSR_LSTATUS
) {
4854 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4855 for (i
= 0; i
< 2000; i
++) {
4857 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4862 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4867 for (i
= 0; i
< 200; i
++) {
4868 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4869 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4871 if (bmcr
&& bmcr
!= 0x7fff)
4879 tp
->link_config
.active_speed
= current_speed
;
4880 tp
->link_config
.active_duplex
= current_duplex
;
4882 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4883 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4885 if ((bmcr
& BMCR_ANENABLE
) &&
4887 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4888 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4889 current_link_up
= true;
4891 /* EEE settings changes take effect only after a phy
4892 * reset. If we have skipped a reset due to Link Flap
4893 * Avoidance being enabled, do it now.
4895 if (!eee_config_ok
&&
4896 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4902 if (!(bmcr
& BMCR_ANENABLE
) &&
4903 tp
->link_config
.speed
== current_speed
&&
4904 tp
->link_config
.duplex
== current_duplex
) {
4905 current_link_up
= true;
4909 if (current_link_up
&&
4910 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4913 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4914 reg
= MII_TG3_FET_GEN_STAT
;
4915 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4917 reg
= MII_TG3_EXT_STAT
;
4918 bit
= MII_TG3_EXT_STAT_MDIX
;
4921 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4922 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4924 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4929 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4930 tg3_phy_copper_begin(tp
);
4932 if (tg3_flag(tp
, ROBOSWITCH
)) {
4933 current_link_up
= true;
4934 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4935 current_speed
= SPEED_1000
;
4936 current_duplex
= DUPLEX_FULL
;
4937 tp
->link_config
.active_speed
= current_speed
;
4938 tp
->link_config
.active_duplex
= current_duplex
;
4941 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4942 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4943 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4944 current_link_up
= true;
4947 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4948 if (current_link_up
) {
4949 if (tp
->link_config
.active_speed
== SPEED_100
||
4950 tp
->link_config
.active_speed
== SPEED_10
)
4951 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4953 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4954 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4955 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4957 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4959 /* In order for the 5750 core in BCM4785 chip to work properly
4960 * in RGMII mode, the Led Control Register must be set up.
4962 if (tg3_flag(tp
, RGMII_MODE
)) {
4963 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
4964 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
4966 if (tp
->link_config
.active_speed
== SPEED_10
)
4967 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
4968 else if (tp
->link_config
.active_speed
== SPEED_100
)
4969 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4970 LED_CTRL_100MBPS_ON
);
4971 else if (tp
->link_config
.active_speed
== SPEED_1000
)
4972 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4973 LED_CTRL_1000MBPS_ON
);
4975 tw32(MAC_LED_CTRL
, led_ctrl
);
4979 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4980 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4981 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4983 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4984 if (current_link_up
&&
4985 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4986 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4988 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4991 /* ??? Without this setting Netgear GA302T PHY does not
4992 * ??? send/receive packets...
4994 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4995 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
4996 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4997 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5001 tw32_f(MAC_MODE
, tp
->mac_mode
);
5004 tg3_phy_eee_adjust(tp
, current_link_up
);
5006 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
5007 /* Polled via timer. */
5008 tw32_f(MAC_EVENT
, 0);
5010 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5014 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
5016 tp
->link_config
.active_speed
== SPEED_1000
&&
5017 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
5020 (MAC_STATUS_SYNC_CHANGED
|
5021 MAC_STATUS_CFG_CHANGED
));
5024 NIC_SRAM_FIRMWARE_MBOX
,
5025 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
5028 /* Prevent send BD corruption. */
5029 if (tg3_flag(tp
, CLKREQ_BUG
)) {
5030 if (tp
->link_config
.active_speed
== SPEED_100
||
5031 tp
->link_config
.active_speed
== SPEED_10
)
5032 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5033 PCI_EXP_LNKCTL_CLKREQ_EN
);
5035 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5036 PCI_EXP_LNKCTL_CLKREQ_EN
);
5039 tg3_test_and_report_link_chg(tp
, current_link_up
);
5044 struct tg3_fiber_aneginfo
{
5046 #define ANEG_STATE_UNKNOWN 0
5047 #define ANEG_STATE_AN_ENABLE 1
5048 #define ANEG_STATE_RESTART_INIT 2
5049 #define ANEG_STATE_RESTART 3
5050 #define ANEG_STATE_DISABLE_LINK_OK 4
5051 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5052 #define ANEG_STATE_ABILITY_DETECT 6
5053 #define ANEG_STATE_ACK_DETECT_INIT 7
5054 #define ANEG_STATE_ACK_DETECT 8
5055 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5056 #define ANEG_STATE_COMPLETE_ACK 10
5057 #define ANEG_STATE_IDLE_DETECT_INIT 11
5058 #define ANEG_STATE_IDLE_DETECT 12
5059 #define ANEG_STATE_LINK_OK 13
5060 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5061 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5064 #define MR_AN_ENABLE 0x00000001
5065 #define MR_RESTART_AN 0x00000002
5066 #define MR_AN_COMPLETE 0x00000004
5067 #define MR_PAGE_RX 0x00000008
5068 #define MR_NP_LOADED 0x00000010
5069 #define MR_TOGGLE_TX 0x00000020
5070 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5071 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5072 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5073 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5074 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5075 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5076 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5077 #define MR_TOGGLE_RX 0x00002000
5078 #define MR_NP_RX 0x00004000
5080 #define MR_LINK_OK 0x80000000
5082 unsigned long link_time
, cur_time
;
5084 u32 ability_match_cfg
;
5085 int ability_match_count
;
5087 char ability_match
, idle_match
, ack_match
;
5089 u32 txconfig
, rxconfig
;
5090 #define ANEG_CFG_NP 0x00000080
5091 #define ANEG_CFG_ACK 0x00000040
5092 #define ANEG_CFG_RF2 0x00000020
5093 #define ANEG_CFG_RF1 0x00000010
5094 #define ANEG_CFG_PS2 0x00000001
5095 #define ANEG_CFG_PS1 0x00008000
5096 #define ANEG_CFG_HD 0x00004000
5097 #define ANEG_CFG_FD 0x00002000
5098 #define ANEG_CFG_INVAL 0x00001f06
5103 #define ANEG_TIMER_ENAB 2
5104 #define ANEG_FAILED -1
5106 #define ANEG_STATE_SETTLE_TIME 10000
5108 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5109 struct tg3_fiber_aneginfo
*ap
)
5112 unsigned long delta
;
5116 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5120 ap
->ability_match_cfg
= 0;
5121 ap
->ability_match_count
= 0;
5122 ap
->ability_match
= 0;
5128 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5129 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5131 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5132 ap
->ability_match_cfg
= rx_cfg_reg
;
5133 ap
->ability_match
= 0;
5134 ap
->ability_match_count
= 0;
5136 if (++ap
->ability_match_count
> 1) {
5137 ap
->ability_match
= 1;
5138 ap
->ability_match_cfg
= rx_cfg_reg
;
5141 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5149 ap
->ability_match_cfg
= 0;
5150 ap
->ability_match_count
= 0;
5151 ap
->ability_match
= 0;
5157 ap
->rxconfig
= rx_cfg_reg
;
5160 switch (ap
->state
) {
5161 case ANEG_STATE_UNKNOWN
:
5162 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5163 ap
->state
= ANEG_STATE_AN_ENABLE
;
5166 case ANEG_STATE_AN_ENABLE
:
5167 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5168 if (ap
->flags
& MR_AN_ENABLE
) {
5171 ap
->ability_match_cfg
= 0;
5172 ap
->ability_match_count
= 0;
5173 ap
->ability_match
= 0;
5177 ap
->state
= ANEG_STATE_RESTART_INIT
;
5179 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5183 case ANEG_STATE_RESTART_INIT
:
5184 ap
->link_time
= ap
->cur_time
;
5185 ap
->flags
&= ~(MR_NP_LOADED
);
5187 tw32(MAC_TX_AUTO_NEG
, 0);
5188 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5189 tw32_f(MAC_MODE
, tp
->mac_mode
);
5192 ret
= ANEG_TIMER_ENAB
;
5193 ap
->state
= ANEG_STATE_RESTART
;
5196 case ANEG_STATE_RESTART
:
5197 delta
= ap
->cur_time
- ap
->link_time
;
5198 if (delta
> ANEG_STATE_SETTLE_TIME
)
5199 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5201 ret
= ANEG_TIMER_ENAB
;
5204 case ANEG_STATE_DISABLE_LINK_OK
:
5208 case ANEG_STATE_ABILITY_DETECT_INIT
:
5209 ap
->flags
&= ~(MR_TOGGLE_TX
);
5210 ap
->txconfig
= ANEG_CFG_FD
;
5211 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5212 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5213 ap
->txconfig
|= ANEG_CFG_PS1
;
5214 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5215 ap
->txconfig
|= ANEG_CFG_PS2
;
5216 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5217 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5218 tw32_f(MAC_MODE
, tp
->mac_mode
);
5221 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5224 case ANEG_STATE_ABILITY_DETECT
:
5225 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5226 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5229 case ANEG_STATE_ACK_DETECT_INIT
:
5230 ap
->txconfig
|= ANEG_CFG_ACK
;
5231 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5232 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5233 tw32_f(MAC_MODE
, tp
->mac_mode
);
5236 ap
->state
= ANEG_STATE_ACK_DETECT
;
5239 case ANEG_STATE_ACK_DETECT
:
5240 if (ap
->ack_match
!= 0) {
5241 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5242 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5243 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5245 ap
->state
= ANEG_STATE_AN_ENABLE
;
5247 } else if (ap
->ability_match
!= 0 &&
5248 ap
->rxconfig
== 0) {
5249 ap
->state
= ANEG_STATE_AN_ENABLE
;
5253 case ANEG_STATE_COMPLETE_ACK_INIT
:
5254 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5258 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5259 MR_LP_ADV_HALF_DUPLEX
|
5260 MR_LP_ADV_SYM_PAUSE
|
5261 MR_LP_ADV_ASYM_PAUSE
|
5262 MR_LP_ADV_REMOTE_FAULT1
|
5263 MR_LP_ADV_REMOTE_FAULT2
|
5264 MR_LP_ADV_NEXT_PAGE
|
5267 if (ap
->rxconfig
& ANEG_CFG_FD
)
5268 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5269 if (ap
->rxconfig
& ANEG_CFG_HD
)
5270 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5271 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5272 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5273 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5274 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5275 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5276 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5277 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5278 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5279 if (ap
->rxconfig
& ANEG_CFG_NP
)
5280 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5282 ap
->link_time
= ap
->cur_time
;
5284 ap
->flags
^= (MR_TOGGLE_TX
);
5285 if (ap
->rxconfig
& 0x0008)
5286 ap
->flags
|= MR_TOGGLE_RX
;
5287 if (ap
->rxconfig
& ANEG_CFG_NP
)
5288 ap
->flags
|= MR_NP_RX
;
5289 ap
->flags
|= MR_PAGE_RX
;
5291 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5292 ret
= ANEG_TIMER_ENAB
;
5295 case ANEG_STATE_COMPLETE_ACK
:
5296 if (ap
->ability_match
!= 0 &&
5297 ap
->rxconfig
== 0) {
5298 ap
->state
= ANEG_STATE_AN_ENABLE
;
5301 delta
= ap
->cur_time
- ap
->link_time
;
5302 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5303 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5304 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5306 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5307 !(ap
->flags
& MR_NP_RX
)) {
5308 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5316 case ANEG_STATE_IDLE_DETECT_INIT
:
5317 ap
->link_time
= ap
->cur_time
;
5318 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5319 tw32_f(MAC_MODE
, tp
->mac_mode
);
5322 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5323 ret
= ANEG_TIMER_ENAB
;
5326 case ANEG_STATE_IDLE_DETECT
:
5327 if (ap
->ability_match
!= 0 &&
5328 ap
->rxconfig
== 0) {
5329 ap
->state
= ANEG_STATE_AN_ENABLE
;
5332 delta
= ap
->cur_time
- ap
->link_time
;
5333 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5334 /* XXX another gem from the Broadcom driver :( */
5335 ap
->state
= ANEG_STATE_LINK_OK
;
5339 case ANEG_STATE_LINK_OK
:
5340 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5344 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5345 /* ??? unimplemented */
5348 case ANEG_STATE_NEXT_PAGE_WAIT
:
5349 /* ??? unimplemented */
5360 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5363 struct tg3_fiber_aneginfo aninfo
;
5364 int status
= ANEG_FAILED
;
5368 tw32_f(MAC_TX_AUTO_NEG
, 0);
5370 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5371 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5374 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5377 memset(&aninfo
, 0, sizeof(aninfo
));
5378 aninfo
.flags
|= MR_AN_ENABLE
;
5379 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5380 aninfo
.cur_time
= 0;
5382 while (++tick
< 195000) {
5383 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5384 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5390 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5391 tw32_f(MAC_MODE
, tp
->mac_mode
);
5394 *txflags
= aninfo
.txconfig
;
5395 *rxflags
= aninfo
.flags
;
5397 if (status
== ANEG_DONE
&&
5398 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5399 MR_LP_ADV_FULL_DUPLEX
)))
5405 static void tg3_init_bcm8002(struct tg3
*tp
)
5407 u32 mac_status
= tr32(MAC_STATUS
);
5410 /* Reset when initting first time or we have a link. */
5411 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5412 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5415 /* Set PLL lock range. */
5416 tg3_writephy(tp
, 0x16, 0x8007);
5419 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5421 /* Wait for reset to complete. */
5422 /* XXX schedule_timeout() ... */
5423 for (i
= 0; i
< 500; i
++)
5426 /* Config mode; select PMA/Ch 1 regs. */
5427 tg3_writephy(tp
, 0x10, 0x8411);
5429 /* Enable auto-lock and comdet, select txclk for tx. */
5430 tg3_writephy(tp
, 0x11, 0x0a10);
5432 tg3_writephy(tp
, 0x18, 0x00a0);
5433 tg3_writephy(tp
, 0x16, 0x41ff);
5435 /* Assert and deassert POR. */
5436 tg3_writephy(tp
, 0x13, 0x0400);
5438 tg3_writephy(tp
, 0x13, 0x0000);
5440 tg3_writephy(tp
, 0x11, 0x0a50);
5442 tg3_writephy(tp
, 0x11, 0x0a10);
5444 /* Wait for signal to stabilize */
5445 /* XXX schedule_timeout() ... */
5446 for (i
= 0; i
< 15000; i
++)
5449 /* Deselect the channel register so we can read the PHYID
5452 tg3_writephy(tp
, 0x10, 0x8011);
5455 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5458 bool current_link_up
;
5459 u32 sg_dig_ctrl
, sg_dig_status
;
5460 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5461 int workaround
, port_a
;
5464 expected_sg_dig_ctrl
= 0;
5467 current_link_up
= false;
5469 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5470 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5472 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5475 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5476 /* preserve bits 20-23 for voltage regulator */
5477 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5480 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5482 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5483 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5485 u32 val
= serdes_cfg
;
5491 tw32_f(MAC_SERDES_CFG
, val
);
5494 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5496 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5497 tg3_setup_flow_control(tp
, 0, 0);
5498 current_link_up
= true;
5503 /* Want auto-negotiation. */
5504 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5506 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5507 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5508 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5509 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5510 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5512 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5513 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5514 tp
->serdes_counter
&&
5515 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5516 MAC_STATUS_RCVD_CFG
)) ==
5517 MAC_STATUS_PCS_SYNCED
)) {
5518 tp
->serdes_counter
--;
5519 current_link_up
= true;
5524 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5525 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5527 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5529 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5530 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5531 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5532 MAC_STATUS_SIGNAL_DET
)) {
5533 sg_dig_status
= tr32(SG_DIG_STATUS
);
5534 mac_status
= tr32(MAC_STATUS
);
5536 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5537 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5538 u32 local_adv
= 0, remote_adv
= 0;
5540 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5541 local_adv
|= ADVERTISE_1000XPAUSE
;
5542 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5543 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5545 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5546 remote_adv
|= LPA_1000XPAUSE
;
5547 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5548 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5550 tp
->link_config
.rmt_adv
=
5551 mii_adv_to_ethtool_adv_x(remote_adv
);
5553 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5554 current_link_up
= true;
5555 tp
->serdes_counter
= 0;
5556 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5557 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5558 if (tp
->serdes_counter
)
5559 tp
->serdes_counter
--;
5562 u32 val
= serdes_cfg
;
5569 tw32_f(MAC_SERDES_CFG
, val
);
5572 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5575 /* Link parallel detection - link is up */
5576 /* only if we have PCS_SYNC and not */
5577 /* receiving config code words */
5578 mac_status
= tr32(MAC_STATUS
);
5579 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5580 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5581 tg3_setup_flow_control(tp
, 0, 0);
5582 current_link_up
= true;
5584 TG3_PHYFLG_PARALLEL_DETECT
;
5585 tp
->serdes_counter
=
5586 SERDES_PARALLEL_DET_TIMEOUT
;
5588 goto restart_autoneg
;
5592 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5593 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5597 return current_link_up
;
5600 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5602 bool current_link_up
= false;
5604 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5607 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5608 u32 txflags
, rxflags
;
5611 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5612 u32 local_adv
= 0, remote_adv
= 0;
5614 if (txflags
& ANEG_CFG_PS1
)
5615 local_adv
|= ADVERTISE_1000XPAUSE
;
5616 if (txflags
& ANEG_CFG_PS2
)
5617 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5619 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5620 remote_adv
|= LPA_1000XPAUSE
;
5621 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5622 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5624 tp
->link_config
.rmt_adv
=
5625 mii_adv_to_ethtool_adv_x(remote_adv
);
5627 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5629 current_link_up
= true;
5631 for (i
= 0; i
< 30; i
++) {
5634 (MAC_STATUS_SYNC_CHANGED
|
5635 MAC_STATUS_CFG_CHANGED
));
5637 if ((tr32(MAC_STATUS
) &
5638 (MAC_STATUS_SYNC_CHANGED
|
5639 MAC_STATUS_CFG_CHANGED
)) == 0)
5643 mac_status
= tr32(MAC_STATUS
);
5644 if (!current_link_up
&&
5645 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5646 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5647 current_link_up
= true;
5649 tg3_setup_flow_control(tp
, 0, 0);
5651 /* Forcing 1000FD link up. */
5652 current_link_up
= true;
5654 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5657 tw32_f(MAC_MODE
, tp
->mac_mode
);
5662 return current_link_up
;
5665 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5668 u16 orig_active_speed
;
5669 u8 orig_active_duplex
;
5671 bool current_link_up
;
5674 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5675 orig_active_speed
= tp
->link_config
.active_speed
;
5676 orig_active_duplex
= tp
->link_config
.active_duplex
;
5678 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5680 tg3_flag(tp
, INIT_COMPLETE
)) {
5681 mac_status
= tr32(MAC_STATUS
);
5682 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5683 MAC_STATUS_SIGNAL_DET
|
5684 MAC_STATUS_CFG_CHANGED
|
5685 MAC_STATUS_RCVD_CFG
);
5686 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5687 MAC_STATUS_SIGNAL_DET
)) {
5688 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5689 MAC_STATUS_CFG_CHANGED
));
5694 tw32_f(MAC_TX_AUTO_NEG
, 0);
5696 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5697 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5698 tw32_f(MAC_MODE
, tp
->mac_mode
);
5701 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5702 tg3_init_bcm8002(tp
);
5704 /* Enable link change event even when serdes polling. */
5705 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5708 current_link_up
= false;
5709 tp
->link_config
.rmt_adv
= 0;
5710 mac_status
= tr32(MAC_STATUS
);
5712 if (tg3_flag(tp
, HW_AUTONEG
))
5713 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5715 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5717 tp
->napi
[0].hw_status
->status
=
5718 (SD_STATUS_UPDATED
|
5719 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5721 for (i
= 0; i
< 100; i
++) {
5722 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5723 MAC_STATUS_CFG_CHANGED
));
5725 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5726 MAC_STATUS_CFG_CHANGED
|
5727 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5731 mac_status
= tr32(MAC_STATUS
);
5732 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5733 current_link_up
= false;
5734 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5735 tp
->serdes_counter
== 0) {
5736 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5737 MAC_MODE_SEND_CONFIGS
));
5739 tw32_f(MAC_MODE
, tp
->mac_mode
);
5743 if (current_link_up
) {
5744 tp
->link_config
.active_speed
= SPEED_1000
;
5745 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5746 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5747 LED_CTRL_LNKLED_OVERRIDE
|
5748 LED_CTRL_1000MBPS_ON
));
5750 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5751 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5752 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5753 LED_CTRL_LNKLED_OVERRIDE
|
5754 LED_CTRL_TRAFFIC_OVERRIDE
));
5757 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5758 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5759 if (orig_pause_cfg
!= now_pause_cfg
||
5760 orig_active_speed
!= tp
->link_config
.active_speed
||
5761 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5762 tg3_link_report(tp
);
5768 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5772 u16 current_speed
= SPEED_UNKNOWN
;
5773 u8 current_duplex
= DUPLEX_UNKNOWN
;
5774 bool current_link_up
= false;
5775 u32 local_adv
, remote_adv
, sgsr
;
5777 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5778 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5779 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5780 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5785 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5787 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5788 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5790 current_link_up
= true;
5791 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5792 current_speed
= SPEED_1000
;
5793 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5794 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5795 current_speed
= SPEED_100
;
5796 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5798 current_speed
= SPEED_10
;
5799 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5802 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5803 current_duplex
= DUPLEX_FULL
;
5805 current_duplex
= DUPLEX_HALF
;
5808 tw32_f(MAC_MODE
, tp
->mac_mode
);
5811 tg3_clear_mac_status(tp
);
5813 goto fiber_setup_done
;
5816 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5817 tw32_f(MAC_MODE
, tp
->mac_mode
);
5820 tg3_clear_mac_status(tp
);
5825 tp
->link_config
.rmt_adv
= 0;
5827 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5828 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5829 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5830 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5831 bmsr
|= BMSR_LSTATUS
;
5833 bmsr
&= ~BMSR_LSTATUS
;
5836 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5838 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5839 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5840 /* do nothing, just check for link up at the end */
5841 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5844 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5845 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5846 ADVERTISE_1000XPAUSE
|
5847 ADVERTISE_1000XPSE_ASYM
|
5850 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5851 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5853 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5854 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5855 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5856 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5858 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5859 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5860 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5867 bmcr
&= ~BMCR_SPEED1000
;
5868 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5870 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5871 new_bmcr
|= BMCR_FULLDPLX
;
5873 if (new_bmcr
!= bmcr
) {
5874 /* BMCR_SPEED1000 is a reserved bit that needs
5875 * to be set on write.
5877 new_bmcr
|= BMCR_SPEED1000
;
5879 /* Force a linkdown */
5883 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5884 adv
&= ~(ADVERTISE_1000XFULL
|
5885 ADVERTISE_1000XHALF
|
5887 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5888 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5892 tg3_carrier_off(tp
);
5894 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5896 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5897 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5898 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5899 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5900 bmsr
|= BMSR_LSTATUS
;
5902 bmsr
&= ~BMSR_LSTATUS
;
5904 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5908 if (bmsr
& BMSR_LSTATUS
) {
5909 current_speed
= SPEED_1000
;
5910 current_link_up
= true;
5911 if (bmcr
& BMCR_FULLDPLX
)
5912 current_duplex
= DUPLEX_FULL
;
5914 current_duplex
= DUPLEX_HALF
;
5919 if (bmcr
& BMCR_ANENABLE
) {
5922 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5923 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5924 common
= local_adv
& remote_adv
;
5925 if (common
& (ADVERTISE_1000XHALF
|
5926 ADVERTISE_1000XFULL
)) {
5927 if (common
& ADVERTISE_1000XFULL
)
5928 current_duplex
= DUPLEX_FULL
;
5930 current_duplex
= DUPLEX_HALF
;
5932 tp
->link_config
.rmt_adv
=
5933 mii_adv_to_ethtool_adv_x(remote_adv
);
5934 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5935 /* Link is up via parallel detect */
5937 current_link_up
= false;
5943 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5944 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5946 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5947 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5948 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5950 tw32_f(MAC_MODE
, tp
->mac_mode
);
5953 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5955 tp
->link_config
.active_speed
= current_speed
;
5956 tp
->link_config
.active_duplex
= current_duplex
;
5958 tg3_test_and_report_link_chg(tp
, current_link_up
);
5962 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5964 if (tp
->serdes_counter
) {
5965 /* Give autoneg time to complete. */
5966 tp
->serdes_counter
--;
5971 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5974 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5975 if (bmcr
& BMCR_ANENABLE
) {
5978 /* Select shadow register 0x1f */
5979 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5980 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5982 /* Select expansion interrupt status register */
5983 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5984 MII_TG3_DSP_EXP1_INT_STAT
);
5985 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5986 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5988 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5989 /* We have signal detect and not receiving
5990 * config code words, link is up by parallel
5994 bmcr
&= ~BMCR_ANENABLE
;
5995 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5996 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5997 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
6000 } else if (tp
->link_up
&&
6001 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
6002 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
6005 /* Select expansion interrupt status register */
6006 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6007 MII_TG3_DSP_EXP1_INT_STAT
);
6008 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6012 /* Config code words received, turn on autoneg. */
6013 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6014 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
6016 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
6022 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
6027 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
6028 err
= tg3_setup_fiber_phy(tp
, force_reset
);
6029 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
6030 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
6032 err
= tg3_setup_copper_phy(tp
, force_reset
);
6034 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
6037 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
6038 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
6040 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
6045 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
6046 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
6047 tw32(GRC_MISC_CFG
, val
);
6050 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6051 (6 << TX_LENGTHS_IPG_SHIFT
);
6052 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
6053 tg3_asic_rev(tp
) == ASIC_REV_5762
)
6054 val
|= tr32(MAC_TX_LENGTHS
) &
6055 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
6056 TX_LENGTHS_CNT_DWN_VAL_MSK
);
6058 if (tp
->link_config
.active_speed
== SPEED_1000
&&
6059 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6060 tw32(MAC_TX_LENGTHS
, val
|
6061 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6063 tw32(MAC_TX_LENGTHS
, val
|
6064 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6066 if (!tg3_flag(tp
, 5705_PLUS
)) {
6068 tw32(HOSTCC_STAT_COAL_TICKS
,
6069 tp
->coal
.stats_block_coalesce_usecs
);
6071 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6075 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6076 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6078 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6081 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6082 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6088 /* tp->lock must be held */
6089 static u64
tg3_refclk_read(struct tg3
*tp
)
6091 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6092 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6095 /* tp->lock must be held */
6096 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6098 tw32(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_STOP
);
6099 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6100 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6101 tw32_f(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_RESUME
);
6104 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6105 static inline void tg3_full_unlock(struct tg3
*tp
);
6106 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6108 struct tg3
*tp
= netdev_priv(dev
);
6110 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6111 SOF_TIMESTAMPING_RX_SOFTWARE
|
6112 SOF_TIMESTAMPING_SOFTWARE
;
6114 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6115 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6116 SOF_TIMESTAMPING_RX_HARDWARE
|
6117 SOF_TIMESTAMPING_RAW_HARDWARE
;
6121 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6123 info
->phc_index
= -1;
6125 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6127 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6128 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6129 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6130 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6134 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6136 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6137 bool neg_adj
= false;
6145 /* Frequency adjustment is performed using hardware with a 24 bit
6146 * accumulator and a programmable correction value. On each clk, the
6147 * correction value gets added to the accumulator and when it
6148 * overflows, the time counter is incremented/decremented.
6150 * So conversion from ppb to correction value is
6151 * ppb * (1 << 24) / 1000000000
6153 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6154 TG3_EAV_REF_CLK_CORRECT_MASK
;
6156 tg3_full_lock(tp
, 0);
6159 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6160 TG3_EAV_REF_CLK_CORRECT_EN
|
6161 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6163 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6165 tg3_full_unlock(tp
);
6170 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6172 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6174 tg3_full_lock(tp
, 0);
6175 tp
->ptp_adjust
+= delta
;
6176 tg3_full_unlock(tp
);
6181 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
6185 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6187 tg3_full_lock(tp
, 0);
6188 ns
= tg3_refclk_read(tp
);
6189 ns
+= tp
->ptp_adjust
;
6190 tg3_full_unlock(tp
);
6192 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
6193 ts
->tv_nsec
= remainder
;
6198 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6199 const struct timespec
*ts
)
6202 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6204 ns
= timespec_to_ns(ts
);
6206 tg3_full_lock(tp
, 0);
6207 tg3_refclk_write(tp
, ns
);
6209 tg3_full_unlock(tp
);
6214 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6215 struct ptp_clock_request
*rq
, int on
)
6220 static const struct ptp_clock_info tg3_ptp_caps
= {
6221 .owner
= THIS_MODULE
,
6222 .name
= "tg3 clock",
6223 .max_adj
= 250000000,
6228 .adjfreq
= tg3_ptp_adjfreq
,
6229 .adjtime
= tg3_ptp_adjtime
,
6230 .gettime
= tg3_ptp_gettime
,
6231 .settime
= tg3_ptp_settime
,
6232 .enable
= tg3_ptp_enable
,
6235 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6236 struct skb_shared_hwtstamps
*timestamp
)
6238 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6239 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6243 /* tp->lock must be held */
6244 static void tg3_ptp_init(struct tg3
*tp
)
6246 if (!tg3_flag(tp
, PTP_CAPABLE
))
6249 /* Initialize the hardware clock to the system time. */
6250 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6252 tp
->ptp_info
= tg3_ptp_caps
;
6255 /* tp->lock must be held */
6256 static void tg3_ptp_resume(struct tg3
*tp
)
6258 if (!tg3_flag(tp
, PTP_CAPABLE
))
6261 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6265 static void tg3_ptp_fini(struct tg3
*tp
)
6267 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6270 ptp_clock_unregister(tp
->ptp_clock
);
6271 tp
->ptp_clock
= NULL
;
6275 static inline int tg3_irq_sync(struct tg3
*tp
)
6277 return tp
->irq_sync
;
6280 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6284 dst
= (u32
*)((u8
*)dst
+ off
);
6285 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6286 *dst
++ = tr32(off
+ i
);
6289 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6291 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6292 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6293 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6294 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6295 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6296 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6297 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6298 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6299 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6300 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6301 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6302 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6303 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6304 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6305 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6306 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6307 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6308 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6309 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6311 if (tg3_flag(tp
, SUPPORT_MSIX
))
6312 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6314 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6315 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6316 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6317 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6318 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6319 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6320 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6321 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6323 if (!tg3_flag(tp
, 5705_PLUS
)) {
6324 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6325 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6326 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6329 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6330 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6331 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6332 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6333 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6335 if (tg3_flag(tp
, NVRAM
))
6336 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6339 static void tg3_dump_state(struct tg3
*tp
)
6344 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6348 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6349 /* Read up to but not including private PCI registers */
6350 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6351 regs
[i
/ sizeof(u32
)] = tr32(i
);
6353 tg3_dump_legacy_regs(tp
, regs
);
6355 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6356 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6357 !regs
[i
+ 2] && !regs
[i
+ 3])
6360 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6362 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6367 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6368 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6370 /* SW status block */
6372 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6374 tnapi
->hw_status
->status
,
6375 tnapi
->hw_status
->status_tag
,
6376 tnapi
->hw_status
->rx_jumbo_consumer
,
6377 tnapi
->hw_status
->rx_consumer
,
6378 tnapi
->hw_status
->rx_mini_consumer
,
6379 tnapi
->hw_status
->idx
[0].rx_producer
,
6380 tnapi
->hw_status
->idx
[0].tx_consumer
);
6383 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6385 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6386 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6388 tnapi
->prodring
.rx_std_prod_idx
,
6389 tnapi
->prodring
.rx_std_cons_idx
,
6390 tnapi
->prodring
.rx_jmb_prod_idx
,
6391 tnapi
->prodring
.rx_jmb_cons_idx
);
6395 /* This is called whenever we suspect that the system chipset is re-
6396 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6397 * is bogus tx completions. We try to recover by setting the
6398 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6401 static void tg3_tx_recover(struct tg3
*tp
)
6403 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6404 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6406 netdev_warn(tp
->dev
,
6407 "The system may be re-ordering memory-mapped I/O "
6408 "cycles to the network device, attempting to recover. "
6409 "Please report the problem to the driver maintainer "
6410 "and include system chipset information.\n");
6412 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6415 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6417 /* Tell compiler to fetch tx indices from memory. */
6419 return tnapi
->tx_pending
-
6420 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6423 /* Tigon3 never reports partial packet sends. So we do not
6424 * need special logic to handle SKBs that have not had all
6425 * of their frags sent yet, like SunGEM does.
6427 static void tg3_tx(struct tg3_napi
*tnapi
)
6429 struct tg3
*tp
= tnapi
->tp
;
6430 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6431 u32 sw_idx
= tnapi
->tx_cons
;
6432 struct netdev_queue
*txq
;
6433 int index
= tnapi
- tp
->napi
;
6434 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6436 if (tg3_flag(tp
, ENABLE_TSS
))
6439 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6441 while (sw_idx
!= hw_idx
) {
6442 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6443 struct sk_buff
*skb
= ri
->skb
;
6446 if (unlikely(skb
== NULL
)) {
6451 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6452 struct skb_shared_hwtstamps timestamp
;
6453 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6454 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6456 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6458 skb_tstamp_tx(skb
, ×tamp
);
6461 pci_unmap_single(tp
->pdev
,
6462 dma_unmap_addr(ri
, mapping
),
6468 while (ri
->fragmented
) {
6469 ri
->fragmented
= false;
6470 sw_idx
= NEXT_TX(sw_idx
);
6471 ri
= &tnapi
->tx_buffers
[sw_idx
];
6474 sw_idx
= NEXT_TX(sw_idx
);
6476 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6477 ri
= &tnapi
->tx_buffers
[sw_idx
];
6478 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6481 pci_unmap_page(tp
->pdev
,
6482 dma_unmap_addr(ri
, mapping
),
6483 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6486 while (ri
->fragmented
) {
6487 ri
->fragmented
= false;
6488 sw_idx
= NEXT_TX(sw_idx
);
6489 ri
= &tnapi
->tx_buffers
[sw_idx
];
6492 sw_idx
= NEXT_TX(sw_idx
);
6496 bytes_compl
+= skb
->len
;
6500 if (unlikely(tx_bug
)) {
6506 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6508 tnapi
->tx_cons
= sw_idx
;
6510 /* Need to make the tx_cons update visible to tg3_start_xmit()
6511 * before checking for netif_queue_stopped(). Without the
6512 * memory barrier, there is a small possibility that tg3_start_xmit()
6513 * will miss it and cause the queue to be stopped forever.
6517 if (unlikely(netif_tx_queue_stopped(txq
) &&
6518 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6519 __netif_tx_lock(txq
, smp_processor_id());
6520 if (netif_tx_queue_stopped(txq
) &&
6521 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6522 netif_tx_wake_queue(txq
);
6523 __netif_tx_unlock(txq
);
6527 static void tg3_frag_free(bool is_frag
, void *data
)
6530 put_page(virt_to_head_page(data
));
6535 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6537 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6538 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6543 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6544 map_sz
, PCI_DMA_FROMDEVICE
);
6545 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6550 /* Returns size of skb allocated or < 0 on error.
6552 * We only need to fill in the address because the other members
6553 * of the RX descriptor are invariant, see tg3_init_rings.
6555 * Note the purposeful assymetry of cpu vs. chip accesses. For
6556 * posting buffers we only dirty the first cache line of the RX
6557 * descriptor (containing the address). Whereas for the RX status
6558 * buffers the cpu only reads the last cacheline of the RX descriptor
6559 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6561 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6562 u32 opaque_key
, u32 dest_idx_unmasked
,
6563 unsigned int *frag_size
)
6565 struct tg3_rx_buffer_desc
*desc
;
6566 struct ring_info
*map
;
6569 int skb_size
, data_size
, dest_idx
;
6571 switch (opaque_key
) {
6572 case RXD_OPAQUE_RING_STD
:
6573 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6574 desc
= &tpr
->rx_std
[dest_idx
];
6575 map
= &tpr
->rx_std_buffers
[dest_idx
];
6576 data_size
= tp
->rx_pkt_map_sz
;
6579 case RXD_OPAQUE_RING_JUMBO
:
6580 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6581 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6582 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6583 data_size
= TG3_RX_JMB_MAP_SZ
;
6590 /* Do not overwrite any of the map or rp information
6591 * until we are sure we can commit to a new buffer.
6593 * Callers depend upon this behavior and assume that
6594 * we leave everything unchanged if we fail.
6596 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6597 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6598 if (skb_size
<= PAGE_SIZE
) {
6599 data
= netdev_alloc_frag(skb_size
);
6600 *frag_size
= skb_size
;
6602 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6608 mapping
= pci_map_single(tp
->pdev
,
6609 data
+ TG3_RX_OFFSET(tp
),
6611 PCI_DMA_FROMDEVICE
);
6612 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6613 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6618 dma_unmap_addr_set(map
, mapping
, mapping
);
6620 desc
->addr_hi
= ((u64
)mapping
>> 32);
6621 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6626 /* We only need to move over in the address because the other
6627 * members of the RX descriptor are invariant. See notes above
6628 * tg3_alloc_rx_data for full details.
6630 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6631 struct tg3_rx_prodring_set
*dpr
,
6632 u32 opaque_key
, int src_idx
,
6633 u32 dest_idx_unmasked
)
6635 struct tg3
*tp
= tnapi
->tp
;
6636 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6637 struct ring_info
*src_map
, *dest_map
;
6638 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6641 switch (opaque_key
) {
6642 case RXD_OPAQUE_RING_STD
:
6643 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6644 dest_desc
= &dpr
->rx_std
[dest_idx
];
6645 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6646 src_desc
= &spr
->rx_std
[src_idx
];
6647 src_map
= &spr
->rx_std_buffers
[src_idx
];
6650 case RXD_OPAQUE_RING_JUMBO
:
6651 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6652 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6653 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6654 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6655 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6662 dest_map
->data
= src_map
->data
;
6663 dma_unmap_addr_set(dest_map
, mapping
,
6664 dma_unmap_addr(src_map
, mapping
));
6665 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6666 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6668 /* Ensure that the update to the skb happens after the physical
6669 * addresses have been transferred to the new BD location.
6673 src_map
->data
= NULL
;
6676 /* The RX ring scheme is composed of multiple rings which post fresh
6677 * buffers to the chip, and one special ring the chip uses to report
6678 * status back to the host.
6680 * The special ring reports the status of received packets to the
6681 * host. The chip does not write into the original descriptor the
6682 * RX buffer was obtained from. The chip simply takes the original
6683 * descriptor as provided by the host, updates the status and length
6684 * field, then writes this into the next status ring entry.
6686 * Each ring the host uses to post buffers to the chip is described
6687 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6688 * it is first placed into the on-chip ram. When the packet's length
6689 * is known, it walks down the TG3_BDINFO entries to select the ring.
6690 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6691 * which is within the range of the new packet's length is chosen.
6693 * The "separate ring for rx status" scheme may sound queer, but it makes
6694 * sense from a cache coherency perspective. If only the host writes
6695 * to the buffer post rings, and only the chip writes to the rx status
6696 * rings, then cache lines never move beyond shared-modified state.
6697 * If both the host and chip were to write into the same ring, cache line
6698 * eviction could occur since both entities want it in an exclusive state.
6700 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6702 struct tg3
*tp
= tnapi
->tp
;
6703 u32 work_mask
, rx_std_posted
= 0;
6704 u32 std_prod_idx
, jmb_prod_idx
;
6705 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6708 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6710 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6712 * We need to order the read of hw_idx and the read of
6713 * the opaque cookie.
6718 std_prod_idx
= tpr
->rx_std_prod_idx
;
6719 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6720 while (sw_idx
!= hw_idx
&& budget
> 0) {
6721 struct ring_info
*ri
;
6722 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6724 struct sk_buff
*skb
;
6725 dma_addr_t dma_addr
;
6726 u32 opaque_key
, desc_idx
, *post_ptr
;
6730 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6731 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6732 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6733 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6734 dma_addr
= dma_unmap_addr(ri
, mapping
);
6736 post_ptr
= &std_prod_idx
;
6738 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6739 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6740 dma_addr
= dma_unmap_addr(ri
, mapping
);
6742 post_ptr
= &jmb_prod_idx
;
6744 goto next_pkt_nopost
;
6746 work_mask
|= opaque_key
;
6748 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6749 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6751 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6752 desc_idx
, *post_ptr
);
6754 /* Other statistics kept track of by card. */
6759 prefetch(data
+ TG3_RX_OFFSET(tp
));
6760 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6763 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6764 RXD_FLAG_PTPSTAT_PTPV1
||
6765 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6766 RXD_FLAG_PTPSTAT_PTPV2
) {
6767 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6768 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6771 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6773 unsigned int frag_size
;
6775 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6776 *post_ptr
, &frag_size
);
6780 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6781 PCI_DMA_FROMDEVICE
);
6783 skb
= build_skb(data
, frag_size
);
6785 tg3_frag_free(frag_size
!= 0, data
);
6786 goto drop_it_no_recycle
;
6788 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6789 /* Ensure that the update to the data happens
6790 * after the usage of the old DMA mapping.
6797 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6798 desc_idx
, *post_ptr
);
6800 skb
= netdev_alloc_skb(tp
->dev
,
6801 len
+ TG3_RAW_IP_ALIGN
);
6803 goto drop_it_no_recycle
;
6805 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6806 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6808 data
+ TG3_RX_OFFSET(tp
),
6810 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6815 tg3_hwclock_to_timestamp(tp
, tstamp
,
6816 skb_hwtstamps(skb
));
6818 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6819 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6820 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6821 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6822 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6824 skb_checksum_none_assert(skb
);
6826 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6828 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6829 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6831 goto drop_it_no_recycle
;
6834 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6835 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6836 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6837 desc
->err_vlan
& RXD_VLAN_MASK
);
6839 napi_gro_receive(&tnapi
->napi
, skb
);
6847 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6848 tpr
->rx_std_prod_idx
= std_prod_idx
&
6849 tp
->rx_std_ring_mask
;
6850 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6851 tpr
->rx_std_prod_idx
);
6852 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6857 sw_idx
&= tp
->rx_ret_ring_mask
;
6859 /* Refresh hw_idx to see if there is new work */
6860 if (sw_idx
== hw_idx
) {
6861 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6866 /* ACK the status ring. */
6867 tnapi
->rx_rcb_ptr
= sw_idx
;
6868 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6870 /* Refill RX ring(s). */
6871 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6872 /* Sync BD data before updating mailbox */
6875 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6876 tpr
->rx_std_prod_idx
= std_prod_idx
&
6877 tp
->rx_std_ring_mask
;
6878 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6879 tpr
->rx_std_prod_idx
);
6881 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6882 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6883 tp
->rx_jmb_ring_mask
;
6884 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6885 tpr
->rx_jmb_prod_idx
);
6888 } else if (work_mask
) {
6889 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6890 * updated before the producer indices can be updated.
6894 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6895 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6897 if (tnapi
!= &tp
->napi
[1]) {
6898 tp
->rx_refill
= true;
6899 napi_schedule(&tp
->napi
[1].napi
);
6906 static void tg3_poll_link(struct tg3
*tp
)
6908 /* handle link change and other phy events */
6909 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6910 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6912 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6913 sblk
->status
= SD_STATUS_UPDATED
|
6914 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6915 spin_lock(&tp
->lock
);
6916 if (tg3_flag(tp
, USE_PHYLIB
)) {
6918 (MAC_STATUS_SYNC_CHANGED
|
6919 MAC_STATUS_CFG_CHANGED
|
6920 MAC_STATUS_MI_COMPLETION
|
6921 MAC_STATUS_LNKSTATE_CHANGED
));
6924 tg3_setup_phy(tp
, false);
6925 spin_unlock(&tp
->lock
);
6930 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6931 struct tg3_rx_prodring_set
*dpr
,
6932 struct tg3_rx_prodring_set
*spr
)
6934 u32 si
, di
, cpycnt
, src_prod_idx
;
6938 src_prod_idx
= spr
->rx_std_prod_idx
;
6940 /* Make sure updates to the rx_std_buffers[] entries and the
6941 * standard producer index are seen in the correct order.
6945 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6948 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6949 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6951 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6952 spr
->rx_std_cons_idx
;
6954 cpycnt
= min(cpycnt
,
6955 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6957 si
= spr
->rx_std_cons_idx
;
6958 di
= dpr
->rx_std_prod_idx
;
6960 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6961 if (dpr
->rx_std_buffers
[i
].data
) {
6971 /* Ensure that updates to the rx_std_buffers ring and the
6972 * shadowed hardware producer ring from tg3_recycle_skb() are
6973 * ordered correctly WRT the skb check above.
6977 memcpy(&dpr
->rx_std_buffers
[di
],
6978 &spr
->rx_std_buffers
[si
],
6979 cpycnt
* sizeof(struct ring_info
));
6981 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6982 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6983 sbd
= &spr
->rx_std
[si
];
6984 dbd
= &dpr
->rx_std
[di
];
6985 dbd
->addr_hi
= sbd
->addr_hi
;
6986 dbd
->addr_lo
= sbd
->addr_lo
;
6989 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6990 tp
->rx_std_ring_mask
;
6991 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6992 tp
->rx_std_ring_mask
;
6996 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6998 /* Make sure updates to the rx_jmb_buffers[] entries and
6999 * the jumbo producer index are seen in the correct order.
7003 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
7006 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
7007 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
7009 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
7010 spr
->rx_jmb_cons_idx
;
7012 cpycnt
= min(cpycnt
,
7013 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
7015 si
= spr
->rx_jmb_cons_idx
;
7016 di
= dpr
->rx_jmb_prod_idx
;
7018 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7019 if (dpr
->rx_jmb_buffers
[i
].data
) {
7029 /* Ensure that updates to the rx_jmb_buffers ring and the
7030 * shadowed hardware producer ring from tg3_recycle_skb() are
7031 * ordered correctly WRT the skb check above.
7035 memcpy(&dpr
->rx_jmb_buffers
[di
],
7036 &spr
->rx_jmb_buffers
[si
],
7037 cpycnt
* sizeof(struct ring_info
));
7039 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7040 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7041 sbd
= &spr
->rx_jmb
[si
].std
;
7042 dbd
= &dpr
->rx_jmb
[di
].std
;
7043 dbd
->addr_hi
= sbd
->addr_hi
;
7044 dbd
->addr_lo
= sbd
->addr_lo
;
7047 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
7048 tp
->rx_jmb_ring_mask
;
7049 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
7050 tp
->rx_jmb_ring_mask
;
7056 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7058 struct tg3
*tp
= tnapi
->tp
;
7060 /* run TX completion thread */
7061 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7063 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7067 if (!tnapi
->rx_rcb_prod_idx
)
7070 /* run RX thread, within the bounds set by NAPI.
7071 * All RX "locking" is done by ensuring outside
7072 * code synchronizes with tg3->napi.poll()
7074 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7075 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7077 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7078 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7080 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7081 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7083 tp
->rx_refill
= false;
7084 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7085 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7086 &tp
->napi
[i
].prodring
);
7090 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7091 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7092 dpr
->rx_std_prod_idx
);
7094 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7095 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7096 dpr
->rx_jmb_prod_idx
);
7101 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7107 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7109 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7110 schedule_work(&tp
->reset_task
);
7113 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7115 cancel_work_sync(&tp
->reset_task
);
7116 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7117 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7120 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7122 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7123 struct tg3
*tp
= tnapi
->tp
;
7125 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7128 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7130 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7133 if (unlikely(work_done
>= budget
))
7136 /* tp->last_tag is used in tg3_int_reenable() below
7137 * to tell the hw how much work has been processed,
7138 * so we must read it before checking for more work.
7140 tnapi
->last_tag
= sblk
->status_tag
;
7141 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7144 /* check for RX/TX work to do */
7145 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7146 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7148 /* This test here is not race free, but will reduce
7149 * the number of interrupts by looping again.
7151 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7154 napi_complete(napi
);
7155 /* Reenable interrupts. */
7156 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7158 /* This test here is synchronized by napi_schedule()
7159 * and napi_complete() to close the race condition.
7161 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7162 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7163 HOSTCC_MODE_ENABLE
|
7174 /* work_done is guaranteed to be less than budget. */
7175 napi_complete(napi
);
7176 tg3_reset_task_schedule(tp
);
7180 static void tg3_process_error(struct tg3
*tp
)
7183 bool real_error
= false;
7185 if (tg3_flag(tp
, ERROR_PROCESSED
))
7188 /* Check Flow Attention register */
7189 val
= tr32(HOSTCC_FLOW_ATTN
);
7190 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7191 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7195 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7196 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7200 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7201 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7210 tg3_flag_set(tp
, ERROR_PROCESSED
);
7211 tg3_reset_task_schedule(tp
);
7214 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7216 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7217 struct tg3
*tp
= tnapi
->tp
;
7219 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7222 if (sblk
->status
& SD_STATUS_ERROR
)
7223 tg3_process_error(tp
);
7227 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7229 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7232 if (unlikely(work_done
>= budget
))
7235 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7236 /* tp->last_tag is used in tg3_int_reenable() below
7237 * to tell the hw how much work has been processed,
7238 * so we must read it before checking for more work.
7240 tnapi
->last_tag
= sblk
->status_tag
;
7241 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7244 sblk
->status
&= ~SD_STATUS_UPDATED
;
7246 if (likely(!tg3_has_work(tnapi
))) {
7247 napi_complete(napi
);
7248 tg3_int_reenable(tnapi
);
7256 /* work_done is guaranteed to be less than budget. */
7257 napi_complete(napi
);
7258 tg3_reset_task_schedule(tp
);
7262 static void tg3_napi_disable(struct tg3
*tp
)
7266 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7267 napi_disable(&tp
->napi
[i
].napi
);
7270 static void tg3_napi_enable(struct tg3
*tp
)
7274 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7275 napi_enable(&tp
->napi
[i
].napi
);
7278 static void tg3_napi_init(struct tg3
*tp
)
7282 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7283 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7284 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7287 static void tg3_napi_fini(struct tg3
*tp
)
7291 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7292 netif_napi_del(&tp
->napi
[i
].napi
);
7295 static inline void tg3_netif_stop(struct tg3
*tp
)
7297 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
7298 tg3_napi_disable(tp
);
7299 netif_carrier_off(tp
->dev
);
7300 netif_tx_disable(tp
->dev
);
7303 /* tp->lock must be held */
7304 static inline void tg3_netif_start(struct tg3
*tp
)
7308 /* NOTE: unconditional netif_tx_wake_all_queues is only
7309 * appropriate so long as all callers are assured to
7310 * have free tx slots (such as after tg3_init_hw)
7312 netif_tx_wake_all_queues(tp
->dev
);
7315 netif_carrier_on(tp
->dev
);
7317 tg3_napi_enable(tp
);
7318 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7319 tg3_enable_ints(tp
);
7322 static void tg3_irq_quiesce(struct tg3
*tp
)
7326 BUG_ON(tp
->irq_sync
);
7331 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7332 synchronize_irq(tp
->napi
[i
].irq_vec
);
7335 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7336 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7337 * with as well. Most of the time, this is not necessary except when
7338 * shutting down the device.
7340 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7342 spin_lock_bh(&tp
->lock
);
7344 tg3_irq_quiesce(tp
);
7347 static inline void tg3_full_unlock(struct tg3
*tp
)
7349 spin_unlock_bh(&tp
->lock
);
7352 /* One-shot MSI handler - Chip automatically disables interrupt
7353 * after sending MSI so driver doesn't have to do it.
7355 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7357 struct tg3_napi
*tnapi
= dev_id
;
7358 struct tg3
*tp
= tnapi
->tp
;
7360 prefetch(tnapi
->hw_status
);
7362 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7364 if (likely(!tg3_irq_sync(tp
)))
7365 napi_schedule(&tnapi
->napi
);
7370 /* MSI ISR - No need to check for interrupt sharing and no need to
7371 * flush status block and interrupt mailbox. PCI ordering rules
7372 * guarantee that MSI will arrive after the status block.
7374 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7376 struct tg3_napi
*tnapi
= dev_id
;
7377 struct tg3
*tp
= tnapi
->tp
;
7379 prefetch(tnapi
->hw_status
);
7381 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7383 * Writing any value to intr-mbox-0 clears PCI INTA# and
7384 * chip-internal interrupt pending events.
7385 * Writing non-zero to intr-mbox-0 additional tells the
7386 * NIC to stop sending us irqs, engaging "in-intr-handler"
7389 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7390 if (likely(!tg3_irq_sync(tp
)))
7391 napi_schedule(&tnapi
->napi
);
7393 return IRQ_RETVAL(1);
7396 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7398 struct tg3_napi
*tnapi
= dev_id
;
7399 struct tg3
*tp
= tnapi
->tp
;
7400 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7401 unsigned int handled
= 1;
7403 /* In INTx mode, it is possible for the interrupt to arrive at
7404 * the CPU before the status block posted prior to the interrupt.
7405 * Reading the PCI State register will confirm whether the
7406 * interrupt is ours and will flush the status block.
7408 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7409 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7410 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7417 * Writing any value to intr-mbox-0 clears PCI INTA# and
7418 * chip-internal interrupt pending events.
7419 * Writing non-zero to intr-mbox-0 additional tells the
7420 * NIC to stop sending us irqs, engaging "in-intr-handler"
7423 * Flush the mailbox to de-assert the IRQ immediately to prevent
7424 * spurious interrupts. The flush impacts performance but
7425 * excessive spurious interrupts can be worse in some cases.
7427 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7428 if (tg3_irq_sync(tp
))
7430 sblk
->status
&= ~SD_STATUS_UPDATED
;
7431 if (likely(tg3_has_work(tnapi
))) {
7432 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7433 napi_schedule(&tnapi
->napi
);
7435 /* No work, shared interrupt perhaps? re-enable
7436 * interrupts, and flush that PCI write
7438 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7442 return IRQ_RETVAL(handled
);
7445 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7447 struct tg3_napi
*tnapi
= dev_id
;
7448 struct tg3
*tp
= tnapi
->tp
;
7449 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7450 unsigned int handled
= 1;
7452 /* In INTx mode, it is possible for the interrupt to arrive at
7453 * the CPU before the status block posted prior to the interrupt.
7454 * Reading the PCI State register will confirm whether the
7455 * interrupt is ours and will flush the status block.
7457 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7458 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7459 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7466 * writing any value to intr-mbox-0 clears PCI INTA# and
7467 * chip-internal interrupt pending events.
7468 * writing non-zero to intr-mbox-0 additional tells the
7469 * NIC to stop sending us irqs, engaging "in-intr-handler"
7472 * Flush the mailbox to de-assert the IRQ immediately to prevent
7473 * spurious interrupts. The flush impacts performance but
7474 * excessive spurious interrupts can be worse in some cases.
7476 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7479 * In a shared interrupt configuration, sometimes other devices'
7480 * interrupts will scream. We record the current status tag here
7481 * so that the above check can report that the screaming interrupts
7482 * are unhandled. Eventually they will be silenced.
7484 tnapi
->last_irq_tag
= sblk
->status_tag
;
7486 if (tg3_irq_sync(tp
))
7489 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7491 napi_schedule(&tnapi
->napi
);
7494 return IRQ_RETVAL(handled
);
7497 /* ISR for interrupt test */
7498 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7500 struct tg3_napi
*tnapi
= dev_id
;
7501 struct tg3
*tp
= tnapi
->tp
;
7502 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7504 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7505 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7506 tg3_disable_ints(tp
);
7507 return IRQ_RETVAL(1);
7509 return IRQ_RETVAL(0);
7512 #ifdef CONFIG_NET_POLL_CONTROLLER
7513 static void tg3_poll_controller(struct net_device
*dev
)
7516 struct tg3
*tp
= netdev_priv(dev
);
7518 if (tg3_irq_sync(tp
))
7521 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7522 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7526 static void tg3_tx_timeout(struct net_device
*dev
)
7528 struct tg3
*tp
= netdev_priv(dev
);
7530 if (netif_msg_tx_err(tp
)) {
7531 netdev_err(dev
, "transmit timed out, resetting\n");
7535 tg3_reset_task_schedule(tp
);
7538 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7539 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7541 u32 base
= (u32
) mapping
& 0xffffffff;
7543 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
7546 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7547 * of any 4GB boundaries: 4G, 8G, etc
7549 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7552 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7553 u32 base
= (u32
) mapping
& 0xffffffff;
7555 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7560 /* Test for DMA addresses > 40-bit */
7561 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7564 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7565 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7566 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7573 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7574 dma_addr_t mapping
, u32 len
, u32 flags
,
7577 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7578 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7579 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7580 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7583 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7584 dma_addr_t map
, u32 len
, u32 flags
,
7587 struct tg3
*tp
= tnapi
->tp
;
7590 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7593 if (tg3_4g_overflow_test(map
, len
))
7596 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7599 if (tg3_40bit_overflow_test(tp
, map
, len
))
7602 if (tp
->dma_limit
) {
7603 u32 prvidx
= *entry
;
7604 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7605 while (len
> tp
->dma_limit
&& *budget
) {
7606 u32 frag_len
= tp
->dma_limit
;
7607 len
-= tp
->dma_limit
;
7609 /* Avoid the 8byte DMA problem */
7611 len
+= tp
->dma_limit
/ 2;
7612 frag_len
= tp
->dma_limit
/ 2;
7615 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7617 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7618 frag_len
, tmp_flag
, mss
, vlan
);
7621 *entry
= NEXT_TX(*entry
);
7628 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7629 len
, flags
, mss
, vlan
);
7631 *entry
= NEXT_TX(*entry
);
7634 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7638 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7639 len
, flags
, mss
, vlan
);
7640 *entry
= NEXT_TX(*entry
);
7646 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7649 struct sk_buff
*skb
;
7650 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7655 pci_unmap_single(tnapi
->tp
->pdev
,
7656 dma_unmap_addr(txb
, mapping
),
7660 while (txb
->fragmented
) {
7661 txb
->fragmented
= false;
7662 entry
= NEXT_TX(entry
);
7663 txb
= &tnapi
->tx_buffers
[entry
];
7666 for (i
= 0; i
<= last
; i
++) {
7667 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7669 entry
= NEXT_TX(entry
);
7670 txb
= &tnapi
->tx_buffers
[entry
];
7672 pci_unmap_page(tnapi
->tp
->pdev
,
7673 dma_unmap_addr(txb
, mapping
),
7674 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7676 while (txb
->fragmented
) {
7677 txb
->fragmented
= false;
7678 entry
= NEXT_TX(entry
);
7679 txb
= &tnapi
->tx_buffers
[entry
];
7684 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7685 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7686 struct sk_buff
**pskb
,
7687 u32
*entry
, u32
*budget
,
7688 u32 base_flags
, u32 mss
, u32 vlan
)
7690 struct tg3
*tp
= tnapi
->tp
;
7691 struct sk_buff
*new_skb
, *skb
= *pskb
;
7692 dma_addr_t new_addr
= 0;
7695 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7696 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7698 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7700 new_skb
= skb_copy_expand(skb
,
7701 skb_headroom(skb
) + more_headroom
,
7702 skb_tailroom(skb
), GFP_ATOMIC
);
7708 /* New SKB is guaranteed to be linear. */
7709 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7711 /* Make sure the mapping succeeded */
7712 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7713 dev_kfree_skb(new_skb
);
7716 u32 save_entry
= *entry
;
7718 base_flags
|= TXD_FLAG_END
;
7720 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7721 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7724 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7725 new_skb
->len
, base_flags
,
7727 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7728 dev_kfree_skb(new_skb
);
7739 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7741 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7742 * TSO header is greater than 80 bytes.
7744 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7746 struct sk_buff
*segs
, *nskb
;
7747 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7749 /* Estimate the number of fragments in the worst case */
7750 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7751 netif_stop_queue(tp
->dev
);
7753 /* netif_tx_stop_queue() must be done before checking
7754 * checking tx index in tg3_tx_avail() below, because in
7755 * tg3_tx(), we update tx index before checking for
7756 * netif_tx_queue_stopped().
7759 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7760 return NETDEV_TX_BUSY
;
7762 netif_wake_queue(tp
->dev
);
7765 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7767 goto tg3_tso_bug_end
;
7773 tg3_start_xmit(nskb
, tp
->dev
);
7779 return NETDEV_TX_OK
;
7782 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7783 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7785 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7787 struct tg3
*tp
= netdev_priv(dev
);
7788 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7790 int i
= -1, would_hit_hwbug
;
7792 struct tg3_napi
*tnapi
;
7793 struct netdev_queue
*txq
;
7796 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7797 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7798 if (tg3_flag(tp
, ENABLE_TSS
))
7801 budget
= tg3_tx_avail(tnapi
);
7803 /* We are running in BH disabled context with netif_tx_lock
7804 * and TX reclaim runs via tp->napi.poll inside of a software
7805 * interrupt. Furthermore, IRQ processing runs lockless so we have
7806 * no IRQ context deadlocks to worry about either. Rejoice!
7808 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7809 if (!netif_tx_queue_stopped(txq
)) {
7810 netif_tx_stop_queue(txq
);
7812 /* This is a hard error, log it. */
7814 "BUG! Tx Ring full when queue awake!\n");
7816 return NETDEV_TX_BUSY
;
7819 entry
= tnapi
->tx_prod
;
7821 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7822 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7824 mss
= skb_shinfo(skb
)->gso_size
;
7827 u32 tcp_opt_len
, hdr_len
;
7829 if (skb_header_cloned(skb
) &&
7830 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7834 tcp_opt_len
= tcp_optlen(skb
);
7836 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7838 if (!skb_is_gso_v6(skb
)) {
7840 iph
->tot_len
= htons(mss
+ hdr_len
);
7843 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7844 tg3_flag(tp
, TSO_BUG
))
7845 return tg3_tso_bug(tp
, skb
);
7847 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7848 TXD_FLAG_CPU_POST_DMA
);
7850 if (tg3_flag(tp
, HW_TSO_1
) ||
7851 tg3_flag(tp
, HW_TSO_2
) ||
7852 tg3_flag(tp
, HW_TSO_3
)) {
7853 tcp_hdr(skb
)->check
= 0;
7854 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7856 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7861 if (tg3_flag(tp
, HW_TSO_3
)) {
7862 mss
|= (hdr_len
& 0xc) << 12;
7864 base_flags
|= 0x00000010;
7865 base_flags
|= (hdr_len
& 0x3e0) << 5;
7866 } else if (tg3_flag(tp
, HW_TSO_2
))
7867 mss
|= hdr_len
<< 9;
7868 else if (tg3_flag(tp
, HW_TSO_1
) ||
7869 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7870 if (tcp_opt_len
|| iph
->ihl
> 5) {
7873 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7874 mss
|= (tsflags
<< 11);
7877 if (tcp_opt_len
|| iph
->ihl
> 5) {
7880 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7881 base_flags
|= tsflags
<< 12;
7886 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7887 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7888 base_flags
|= TXD_FLAG_JMB_PKT
;
7890 if (vlan_tx_tag_present(skb
)) {
7891 base_flags
|= TXD_FLAG_VLAN
;
7892 vlan
= vlan_tx_tag_get(skb
);
7895 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7896 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7897 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7898 base_flags
|= TXD_FLAG_HWTSTAMP
;
7901 len
= skb_headlen(skb
);
7903 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7904 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7908 tnapi
->tx_buffers
[entry
].skb
= skb
;
7909 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7911 would_hit_hwbug
= 0;
7913 if (tg3_flag(tp
, 5701_DMA_BUG
))
7914 would_hit_hwbug
= 1;
7916 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7917 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7919 would_hit_hwbug
= 1;
7920 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7923 if (!tg3_flag(tp
, HW_TSO_1
) &&
7924 !tg3_flag(tp
, HW_TSO_2
) &&
7925 !tg3_flag(tp
, HW_TSO_3
))
7928 /* Now loop through additional data
7929 * fragments, and queue them.
7931 last
= skb_shinfo(skb
)->nr_frags
- 1;
7932 for (i
= 0; i
<= last
; i
++) {
7933 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7935 len
= skb_frag_size(frag
);
7936 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7937 len
, DMA_TO_DEVICE
);
7939 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7940 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7942 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7946 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7948 ((i
== last
) ? TXD_FLAG_END
: 0),
7950 would_hit_hwbug
= 1;
7956 if (would_hit_hwbug
) {
7957 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7959 /* If the workaround fails due to memory/mapping
7960 * failure, silently drop this packet.
7962 entry
= tnapi
->tx_prod
;
7963 budget
= tg3_tx_avail(tnapi
);
7964 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7965 base_flags
, mss
, vlan
))
7969 skb_tx_timestamp(skb
);
7970 netdev_tx_sent_queue(txq
, skb
->len
);
7972 /* Sync BD data before updating mailbox */
7975 /* Packets are ready, update Tx producer idx local and on card. */
7976 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7978 tnapi
->tx_prod
= entry
;
7979 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7980 netif_tx_stop_queue(txq
);
7982 /* netif_tx_stop_queue() must be done before checking
7983 * checking tx index in tg3_tx_avail() below, because in
7984 * tg3_tx(), we update tx index before checking for
7985 * netif_tx_queue_stopped().
7988 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7989 netif_tx_wake_queue(txq
);
7993 return NETDEV_TX_OK
;
7996 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7997 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
8002 return NETDEV_TX_OK
;
8005 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
8008 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
8009 MAC_MODE_PORT_MODE_MASK
);
8011 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
8013 if (!tg3_flag(tp
, 5705_PLUS
))
8014 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8016 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
8017 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8019 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8021 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
8023 if (tg3_flag(tp
, 5705_PLUS
) ||
8024 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
8025 tg3_asic_rev(tp
) == ASIC_REV_5700
)
8026 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8029 tw32(MAC_MODE
, tp
->mac_mode
);
8033 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
8035 u32 val
, bmcr
, mac_mode
, ptest
= 0;
8037 tg3_phy_toggle_apd(tp
, false);
8038 tg3_phy_toggle_automdix(tp
, false);
8040 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
8043 bmcr
= BMCR_FULLDPLX
;
8048 bmcr
|= BMCR_SPEED100
;
8052 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
8054 bmcr
|= BMCR_SPEED100
;
8057 bmcr
|= BMCR_SPEED1000
;
8062 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8063 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8064 val
|= CTL1000_AS_MASTER
|
8065 CTL1000_ENABLE_MASTER
;
8066 tg3_writephy(tp
, MII_CTRL1000
, val
);
8068 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8069 MII_TG3_FET_PTEST_TRIM_2
;
8070 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8073 bmcr
|= BMCR_LOOPBACK
;
8075 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8077 /* The write needs to be flushed for the FETs */
8078 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8079 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8083 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8084 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8085 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8086 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8087 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8089 /* The write needs to be flushed for the AC131 */
8090 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8093 /* Reset to prevent losing 1st rx packet intermittently */
8094 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8095 tg3_flag(tp
, 5780_CLASS
)) {
8096 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8098 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8101 mac_mode
= tp
->mac_mode
&
8102 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8103 if (speed
== SPEED_1000
)
8104 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8106 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8108 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8109 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8111 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8112 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8113 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8114 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8116 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8117 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8120 tw32(MAC_MODE
, mac_mode
);
8126 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8128 struct tg3
*tp
= netdev_priv(dev
);
8130 if (features
& NETIF_F_LOOPBACK
) {
8131 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8134 spin_lock_bh(&tp
->lock
);
8135 tg3_mac_loopback(tp
, true);
8136 netif_carrier_on(tp
->dev
);
8137 spin_unlock_bh(&tp
->lock
);
8138 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8140 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8143 spin_lock_bh(&tp
->lock
);
8144 tg3_mac_loopback(tp
, false);
8145 /* Force link status check */
8146 tg3_setup_phy(tp
, true);
8147 spin_unlock_bh(&tp
->lock
);
8148 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8152 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8153 netdev_features_t features
)
8155 struct tg3
*tp
= netdev_priv(dev
);
8157 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8158 features
&= ~NETIF_F_ALL_TSO
;
8163 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8165 netdev_features_t changed
= dev
->features
^ features
;
8167 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8168 tg3_set_loopback(dev
, features
);
8173 static void tg3_rx_prodring_free(struct tg3
*tp
,
8174 struct tg3_rx_prodring_set
*tpr
)
8178 if (tpr
!= &tp
->napi
[0].prodring
) {
8179 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8180 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8181 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8184 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8185 for (i
= tpr
->rx_jmb_cons_idx
;
8186 i
!= tpr
->rx_jmb_prod_idx
;
8187 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8188 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8196 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8197 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8200 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8201 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8202 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8207 /* Initialize rx rings for packet processing.
8209 * The chip has been shut down and the driver detached from
8210 * the networking, so no interrupts or new tx packets will
8211 * end up in the driver. tp->{tx,}lock are held and thus
8214 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8215 struct tg3_rx_prodring_set
*tpr
)
8217 u32 i
, rx_pkt_dma_sz
;
8219 tpr
->rx_std_cons_idx
= 0;
8220 tpr
->rx_std_prod_idx
= 0;
8221 tpr
->rx_jmb_cons_idx
= 0;
8222 tpr
->rx_jmb_prod_idx
= 0;
8224 if (tpr
!= &tp
->napi
[0].prodring
) {
8225 memset(&tpr
->rx_std_buffers
[0], 0,
8226 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8227 if (tpr
->rx_jmb_buffers
)
8228 memset(&tpr
->rx_jmb_buffers
[0], 0,
8229 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8233 /* Zero out all descriptors. */
8234 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8236 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8237 if (tg3_flag(tp
, 5780_CLASS
) &&
8238 tp
->dev
->mtu
> ETH_DATA_LEN
)
8239 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8240 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8242 /* Initialize invariants of the rings, we only set this
8243 * stuff once. This works because the card does not
8244 * write into the rx buffer posting rings.
8246 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8247 struct tg3_rx_buffer_desc
*rxd
;
8249 rxd
= &tpr
->rx_std
[i
];
8250 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8251 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8252 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8253 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8256 /* Now allocate fresh SKBs for each rx ring. */
8257 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8258 unsigned int frag_size
;
8260 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8262 netdev_warn(tp
->dev
,
8263 "Using a smaller RX standard ring. Only "
8264 "%d out of %d buffers were allocated "
8265 "successfully\n", i
, tp
->rx_pending
);
8273 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8276 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8278 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8281 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8282 struct tg3_rx_buffer_desc
*rxd
;
8284 rxd
= &tpr
->rx_jmb
[i
].std
;
8285 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8286 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8288 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8289 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8292 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8293 unsigned int frag_size
;
8295 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8297 netdev_warn(tp
->dev
,
8298 "Using a smaller RX jumbo ring. Only %d "
8299 "out of %d buffers were allocated "
8300 "successfully\n", i
, tp
->rx_jumbo_pending
);
8303 tp
->rx_jumbo_pending
= i
;
8312 tg3_rx_prodring_free(tp
, tpr
);
8316 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8317 struct tg3_rx_prodring_set
*tpr
)
8319 kfree(tpr
->rx_std_buffers
);
8320 tpr
->rx_std_buffers
= NULL
;
8321 kfree(tpr
->rx_jmb_buffers
);
8322 tpr
->rx_jmb_buffers
= NULL
;
8324 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8325 tpr
->rx_std
, tpr
->rx_std_mapping
);
8329 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8330 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8335 static int tg3_rx_prodring_init(struct tg3
*tp
,
8336 struct tg3_rx_prodring_set
*tpr
)
8338 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8340 if (!tpr
->rx_std_buffers
)
8343 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8344 TG3_RX_STD_RING_BYTES(tp
),
8345 &tpr
->rx_std_mapping
,
8350 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8351 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8353 if (!tpr
->rx_jmb_buffers
)
8356 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8357 TG3_RX_JMB_RING_BYTES(tp
),
8358 &tpr
->rx_jmb_mapping
,
8367 tg3_rx_prodring_fini(tp
, tpr
);
8371 /* Free up pending packets in all rx/tx rings.
8373 * The chip has been shut down and the driver detached from
8374 * the networking, so no interrupts or new tx packets will
8375 * end up in the driver. tp->{tx,}lock is not held and we are not
8376 * in an interrupt context and thus may sleep.
8378 static void tg3_free_rings(struct tg3
*tp
)
8382 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8383 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8385 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8387 if (!tnapi
->tx_buffers
)
8390 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8391 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8396 tg3_tx_skb_unmap(tnapi
, i
,
8397 skb_shinfo(skb
)->nr_frags
- 1);
8399 dev_kfree_skb_any(skb
);
8401 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8405 /* Initialize tx/rx rings for packet processing.
8407 * The chip has been shut down and the driver detached from
8408 * the networking, so no interrupts or new tx packets will
8409 * end up in the driver. tp->{tx,}lock are held and thus
8412 static int tg3_init_rings(struct tg3
*tp
)
8416 /* Free up all the SKBs. */
8419 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8420 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8422 tnapi
->last_tag
= 0;
8423 tnapi
->last_irq_tag
= 0;
8424 tnapi
->hw_status
->status
= 0;
8425 tnapi
->hw_status
->status_tag
= 0;
8426 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8431 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8433 tnapi
->rx_rcb_ptr
= 0;
8435 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8437 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8446 static void tg3_mem_tx_release(struct tg3
*tp
)
8450 for (i
= 0; i
< tp
->irq_max
; i
++) {
8451 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8453 if (tnapi
->tx_ring
) {
8454 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8455 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8456 tnapi
->tx_ring
= NULL
;
8459 kfree(tnapi
->tx_buffers
);
8460 tnapi
->tx_buffers
= NULL
;
8464 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8467 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8469 /* If multivector TSS is enabled, vector 0 does not handle
8470 * tx interrupts. Don't allocate any resources for it.
8472 if (tg3_flag(tp
, ENABLE_TSS
))
8475 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8476 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8477 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8478 if (!tnapi
->tx_buffers
)
8481 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8483 &tnapi
->tx_desc_mapping
,
8485 if (!tnapi
->tx_ring
)
8492 tg3_mem_tx_release(tp
);
8496 static void tg3_mem_rx_release(struct tg3
*tp
)
8500 for (i
= 0; i
< tp
->irq_max
; i
++) {
8501 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8503 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8508 dma_free_coherent(&tp
->pdev
->dev
,
8509 TG3_RX_RCB_RING_BYTES(tp
),
8511 tnapi
->rx_rcb_mapping
);
8512 tnapi
->rx_rcb
= NULL
;
8516 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8518 unsigned int i
, limit
;
8520 limit
= tp
->rxq_cnt
;
8522 /* If RSS is enabled, we need a (dummy) producer ring
8523 * set on vector zero. This is the true hw prodring.
8525 if (tg3_flag(tp
, ENABLE_RSS
))
8528 for (i
= 0; i
< limit
; i
++) {
8529 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8531 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8534 /* If multivector RSS is enabled, vector 0
8535 * does not handle rx or tx interrupts.
8536 * Don't allocate any resources for it.
8538 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8541 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8542 TG3_RX_RCB_RING_BYTES(tp
),
8543 &tnapi
->rx_rcb_mapping
,
8544 GFP_KERNEL
| __GFP_ZERO
);
8552 tg3_mem_rx_release(tp
);
8557 * Must not be invoked with interrupt sources disabled and
8558 * the hardware shutdown down.
8560 static void tg3_free_consistent(struct tg3
*tp
)
8564 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8565 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8567 if (tnapi
->hw_status
) {
8568 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8570 tnapi
->status_mapping
);
8571 tnapi
->hw_status
= NULL
;
8575 tg3_mem_rx_release(tp
);
8576 tg3_mem_tx_release(tp
);
8579 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8580 tp
->hw_stats
, tp
->stats_mapping
);
8581 tp
->hw_stats
= NULL
;
8586 * Must not be invoked with interrupt sources disabled and
8587 * the hardware shutdown down. Can sleep.
8589 static int tg3_alloc_consistent(struct tg3
*tp
)
8593 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8594 sizeof(struct tg3_hw_stats
),
8596 GFP_KERNEL
| __GFP_ZERO
);
8600 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8601 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8602 struct tg3_hw_status
*sblk
;
8604 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8606 &tnapi
->status_mapping
,
8607 GFP_KERNEL
| __GFP_ZERO
);
8608 if (!tnapi
->hw_status
)
8611 sblk
= tnapi
->hw_status
;
8613 if (tg3_flag(tp
, ENABLE_RSS
)) {
8614 u16
*prodptr
= NULL
;
8617 * When RSS is enabled, the status block format changes
8618 * slightly. The "rx_jumbo_consumer", "reserved",
8619 * and "rx_mini_consumer" members get mapped to the
8620 * other three rx return ring producer indexes.
8624 prodptr
= &sblk
->idx
[0].rx_producer
;
8627 prodptr
= &sblk
->rx_jumbo_consumer
;
8630 prodptr
= &sblk
->reserved
;
8633 prodptr
= &sblk
->rx_mini_consumer
;
8636 tnapi
->rx_rcb_prod_idx
= prodptr
;
8638 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8642 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8648 tg3_free_consistent(tp
);
8652 #define MAX_WAIT_CNT 1000
8654 /* To stop a block, clear the enable bit and poll till it
8655 * clears. tp->lock is held.
8657 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8662 if (tg3_flag(tp
, 5705_PLUS
)) {
8669 /* We can't enable/disable these bits of the
8670 * 5705/5750, just say success.
8683 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8684 if (pci_channel_offline(tp
->pdev
)) {
8685 dev_err(&tp
->pdev
->dev
,
8686 "tg3_stop_block device offline, "
8687 "ofs=%lx enable_bit=%x\n",
8694 if ((val
& enable_bit
) == 0)
8698 if (i
== MAX_WAIT_CNT
&& !silent
) {
8699 dev_err(&tp
->pdev
->dev
,
8700 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8708 /* tp->lock is held. */
8709 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8713 tg3_disable_ints(tp
);
8715 if (pci_channel_offline(tp
->pdev
)) {
8716 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8717 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8722 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8723 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8726 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8727 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8728 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8729 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8730 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8731 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8733 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8734 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8735 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8736 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8737 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8738 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8739 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8741 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8742 tw32_f(MAC_MODE
, tp
->mac_mode
);
8745 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8746 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8748 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8750 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8753 if (i
>= MAX_WAIT_CNT
) {
8754 dev_err(&tp
->pdev
->dev
,
8755 "%s timed out, TX_MODE_ENABLE will not clear "
8756 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8760 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8761 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8762 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8764 tw32(FTQ_RESET
, 0xffffffff);
8765 tw32(FTQ_RESET
, 0x00000000);
8767 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8768 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8771 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8772 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8773 if (tnapi
->hw_status
)
8774 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8780 /* Save PCI command register before chip reset */
8781 static void tg3_save_pci_state(struct tg3
*tp
)
8783 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8786 /* Restore PCI state after chip reset */
8787 static void tg3_restore_pci_state(struct tg3
*tp
)
8791 /* Re-enable indirect register accesses. */
8792 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8793 tp
->misc_host_ctrl
);
8795 /* Set MAX PCI retry to zero. */
8796 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8797 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8798 tg3_flag(tp
, PCIX_MODE
))
8799 val
|= PCISTATE_RETRY_SAME_DMA
;
8800 /* Allow reads and writes to the APE register and memory space. */
8801 if (tg3_flag(tp
, ENABLE_APE
))
8802 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8803 PCISTATE_ALLOW_APE_SHMEM_WR
|
8804 PCISTATE_ALLOW_APE_PSPACE_WR
;
8805 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8807 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8809 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8810 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8811 tp
->pci_cacheline_sz
);
8812 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8816 /* Make sure PCI-X relaxed ordering bit is clear. */
8817 if (tg3_flag(tp
, PCIX_MODE
)) {
8820 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8822 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8823 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8827 if (tg3_flag(tp
, 5780_CLASS
)) {
8829 /* Chip reset on 5780 will reset MSI enable bit,
8830 * so need to restore it.
8832 if (tg3_flag(tp
, USING_MSI
)) {
8835 pci_read_config_word(tp
->pdev
,
8836 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8838 pci_write_config_word(tp
->pdev
,
8839 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8840 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8841 val
= tr32(MSGINT_MODE
);
8842 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8847 /* tp->lock is held. */
8848 static int tg3_chip_reset(struct tg3
*tp
)
8851 void (*write_op
)(struct tg3
*, u32
, u32
);
8856 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8858 /* No matching tg3_nvram_unlock() after this because
8859 * chip reset below will undo the nvram lock.
8861 tp
->nvram_lock_cnt
= 0;
8863 /* GRC_MISC_CFG core clock reset will clear the memory
8864 * enable bit in PCI register 4 and the MSI enable bit
8865 * on some chips, so we save relevant registers here.
8867 tg3_save_pci_state(tp
);
8869 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
8870 tg3_flag(tp
, 5755_PLUS
))
8871 tw32(GRC_FASTBOOT_PC
, 0);
8874 * We must avoid the readl() that normally takes place.
8875 * It locks machines, causes machine checks, and other
8876 * fun things. So, temporarily disable the 5701
8877 * hardware workaround, while we do the reset.
8879 write_op
= tp
->write32
;
8880 if (write_op
== tg3_write_flush_reg32
)
8881 tp
->write32
= tg3_write32
;
8883 /* Prevent the irq handler from reading or writing PCI registers
8884 * during chip reset when the memory enable bit in the PCI command
8885 * register may be cleared. The chip does not generate interrupt
8886 * at this time, but the irq handler may still be called due to irq
8887 * sharing or irqpoll.
8889 tg3_flag_set(tp
, CHIP_RESETTING
);
8890 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8891 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8892 if (tnapi
->hw_status
) {
8893 tnapi
->hw_status
->status
= 0;
8894 tnapi
->hw_status
->status_tag
= 0;
8896 tnapi
->last_tag
= 0;
8897 tnapi
->last_irq_tag
= 0;
8901 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8902 synchronize_irq(tp
->napi
[i
].irq_vec
);
8904 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
8905 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8906 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8910 val
= GRC_MISC_CFG_CORECLK_RESET
;
8912 if (tg3_flag(tp
, PCI_EXPRESS
)) {
8913 /* Force PCIe 1.0a mode */
8914 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8915 !tg3_flag(tp
, 57765_PLUS
) &&
8916 tr32(TG3_PCIE_PHY_TSTCTL
) ==
8917 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
8918 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
8920 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
8921 tw32(GRC_MISC_CFG
, (1 << 29));
8926 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
8927 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8928 tw32(GRC_VCPU_EXT_CTRL
,
8929 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8932 /* Manage gphy power for all CPMU absent PCIe devices. */
8933 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8934 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8936 tw32(GRC_MISC_CFG
, val
);
8938 /* restore 5701 hardware bug workaround write method */
8939 tp
->write32
= write_op
;
8941 /* Unfortunately, we have to delay before the PCI read back.
8942 * Some 575X chips even will not respond to a PCI cfg access
8943 * when the reset command is given to the chip.
8945 * How do these hardware designers expect things to work
8946 * properly if the PCI write is posted for a long period
8947 * of time? It is always necessary to have some method by
8948 * which a register read back can occur to push the write
8949 * out which does the reset.
8951 * For most tg3 variants the trick below was working.
8956 /* Flush PCI posted writes. The normal MMIO registers
8957 * are inaccessible at this time so this is the only
8958 * way to make this reliably (actually, this is no longer
8959 * the case, see above). I tried to use indirect
8960 * register read/write but this upset some 5701 variants.
8962 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8966 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8969 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
8973 /* Wait for link training to complete. */
8974 for (j
= 0; j
< 5000; j
++)
8977 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8978 pci_write_config_dword(tp
->pdev
, 0xc4,
8979 cfg_val
| (1 << 15));
8982 /* Clear the "no snoop" and "relaxed ordering" bits. */
8983 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8985 * Older PCIe devices only support the 128 byte
8986 * MPS setting. Enforce the restriction.
8988 if (!tg3_flag(tp
, CPMU_PRESENT
))
8989 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8990 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8992 /* Clear error status */
8993 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8994 PCI_EXP_DEVSTA_CED
|
8995 PCI_EXP_DEVSTA_NFED
|
8996 PCI_EXP_DEVSTA_FED
|
8997 PCI_EXP_DEVSTA_URD
);
9000 tg3_restore_pci_state(tp
);
9002 tg3_flag_clear(tp
, CHIP_RESETTING
);
9003 tg3_flag_clear(tp
, ERROR_PROCESSED
);
9006 if (tg3_flag(tp
, 5780_CLASS
))
9007 val
= tr32(MEMARB_MODE
);
9008 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9010 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
9012 tw32(0x5000, 0x400);
9015 if (tg3_flag(tp
, IS_SSB_CORE
)) {
9017 * BCM4785: In order to avoid repercussions from using
9018 * potentially defective internal ROM, stop the Rx RISC CPU,
9019 * which is not required.
9022 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9025 err
= tg3_poll_fw(tp
);
9029 tw32(GRC_MODE
, tp
->grc_mode
);
9031 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
9034 tw32(0xc4, val
| (1 << 15));
9037 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
9038 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9039 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
9040 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
9041 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
9042 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9045 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9046 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
9048 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9049 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
9054 tw32_f(MAC_MODE
, val
);
9057 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9061 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9062 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9063 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9064 !tg3_flag(tp
, 57765_PLUS
)) {
9067 tw32(0x7c00, val
| (1 << 25));
9070 if (tg3_asic_rev(tp
) == ASIC_REV_5720
) {
9071 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9072 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9075 /* Reprobe ASF enable state. */
9076 tg3_flag_clear(tp
, ENABLE_ASF
);
9077 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9078 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9080 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9081 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9082 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9085 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9086 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9087 tg3_flag_set(tp
, ENABLE_ASF
);
9088 tp
->last_event_jiffies
= jiffies
;
9089 if (tg3_flag(tp
, 5750_PLUS
))
9090 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9092 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9093 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9094 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9095 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9096 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9103 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9104 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9106 /* tp->lock is held. */
9107 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9113 tg3_write_sig_pre_reset(tp
, kind
);
9115 tg3_abort_hw(tp
, silent
);
9116 err
= tg3_chip_reset(tp
);
9118 __tg3_set_mac_addr(tp
, false);
9120 tg3_write_sig_legacy(tp
, kind
);
9121 tg3_write_sig_post_reset(tp
, kind
);
9124 /* Save the stats across chip resets... */
9125 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9126 tg3_get_estats(tp
, &tp
->estats_prev
);
9128 /* And make sure the next sample is new data */
9129 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9138 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9140 struct tg3
*tp
= netdev_priv(dev
);
9141 struct sockaddr
*addr
= p
;
9143 bool skip_mac_1
= false;
9145 if (!is_valid_ether_addr(addr
->sa_data
))
9146 return -EADDRNOTAVAIL
;
9148 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9150 if (!netif_running(dev
))
9153 if (tg3_flag(tp
, ENABLE_ASF
)) {
9154 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9156 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9157 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9158 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9159 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9161 /* Skip MAC addr 1 if ASF is using it. */
9162 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9163 !(addr1_high
== 0 && addr1_low
== 0))
9166 spin_lock_bh(&tp
->lock
);
9167 __tg3_set_mac_addr(tp
, skip_mac_1
);
9168 spin_unlock_bh(&tp
->lock
);
9173 /* tp->lock is held. */
9174 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9175 dma_addr_t mapping
, u32 maxlen_flags
,
9179 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9180 ((u64
) mapping
>> 32));
9182 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9183 ((u64
) mapping
& 0xffffffff));
9185 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9188 if (!tg3_flag(tp
, 5705_PLUS
))
9190 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9195 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9199 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9200 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9201 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9202 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9204 tw32(HOSTCC_TXCOL_TICKS
, 0);
9205 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9206 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9208 for (; i
< tp
->txq_cnt
; i
++) {
9211 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9212 tw32(reg
, ec
->tx_coalesce_usecs
);
9213 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9214 tw32(reg
, ec
->tx_max_coalesced_frames
);
9215 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9216 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9220 for (; i
< tp
->irq_max
- 1; i
++) {
9221 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9222 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9223 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9227 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9230 u32 limit
= tp
->rxq_cnt
;
9232 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9233 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9234 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9235 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9238 tw32(HOSTCC_RXCOL_TICKS
, 0);
9239 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9240 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9243 for (; i
< limit
; i
++) {
9246 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9247 tw32(reg
, ec
->rx_coalesce_usecs
);
9248 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9249 tw32(reg
, ec
->rx_max_coalesced_frames
);
9250 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9251 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9254 for (; i
< tp
->irq_max
- 1; i
++) {
9255 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9256 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9257 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9261 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9263 tg3_coal_tx_init(tp
, ec
);
9264 tg3_coal_rx_init(tp
, ec
);
9266 if (!tg3_flag(tp
, 5705_PLUS
)) {
9267 u32 val
= ec
->stats_block_coalesce_usecs
;
9269 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9270 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9275 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9279 /* tp->lock is held. */
9280 static void tg3_tx_rcbs_disable(struct tg3
*tp
)
9284 /* Disable all transmit rings but the first. */
9285 if (!tg3_flag(tp
, 5705_PLUS
))
9286 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9287 else if (tg3_flag(tp
, 5717_PLUS
))
9288 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9289 else if (tg3_flag(tp
, 57765_CLASS
) ||
9290 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9291 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9293 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9295 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9296 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9297 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9298 BDINFO_FLAGS_DISABLED
);
9301 /* tp->lock is held. */
9302 static void tg3_tx_rcbs_init(struct tg3
*tp
)
9305 u32 txrcb
= NIC_SRAM_SEND_RCB
;
9307 if (tg3_flag(tp
, ENABLE_TSS
))
9310 for (; i
< tp
->irq_max
; i
++, txrcb
+= TG3_BDINFO_SIZE
) {
9311 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9313 if (!tnapi
->tx_ring
)
9316 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9317 (TG3_TX_RING_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
),
9318 NIC_SRAM_TX_BUFFER_DESC
);
9322 /* tp->lock is held. */
9323 static void tg3_rx_ret_rcbs_disable(struct tg3
*tp
)
9327 /* Disable all receive return rings but the first. */
9328 if (tg3_flag(tp
, 5717_PLUS
))
9329 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9330 else if (!tg3_flag(tp
, 5705_PLUS
))
9331 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9332 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9333 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9334 tg3_flag(tp
, 57765_CLASS
))
9335 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9337 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9339 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9340 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9341 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9342 BDINFO_FLAGS_DISABLED
);
9345 /* tp->lock is held. */
9346 static void tg3_rx_ret_rcbs_init(struct tg3
*tp
)
9349 u32 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9351 if (tg3_flag(tp
, ENABLE_RSS
))
9354 for (; i
< tp
->irq_max
; i
++, rxrcb
+= TG3_BDINFO_SIZE
) {
9355 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9360 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9361 (tp
->rx_ret_ring_mask
+ 1) <<
9362 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9366 /* tp->lock is held. */
9367 static void tg3_rings_reset(struct tg3
*tp
)
9371 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9373 tg3_tx_rcbs_disable(tp
);
9375 tg3_rx_ret_rcbs_disable(tp
);
9377 /* Disable interrupts */
9378 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9379 tp
->napi
[0].chk_msi_cnt
= 0;
9380 tp
->napi
[0].last_rx_cons
= 0;
9381 tp
->napi
[0].last_tx_cons
= 0;
9383 /* Zero mailbox registers. */
9384 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9385 for (i
= 1; i
< tp
->irq_max
; i
++) {
9386 tp
->napi
[i
].tx_prod
= 0;
9387 tp
->napi
[i
].tx_cons
= 0;
9388 if (tg3_flag(tp
, ENABLE_TSS
))
9389 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9390 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9391 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9392 tp
->napi
[i
].chk_msi_cnt
= 0;
9393 tp
->napi
[i
].last_rx_cons
= 0;
9394 tp
->napi
[i
].last_tx_cons
= 0;
9396 if (!tg3_flag(tp
, ENABLE_TSS
))
9397 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9399 tp
->napi
[0].tx_prod
= 0;
9400 tp
->napi
[0].tx_cons
= 0;
9401 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9402 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9405 /* Make sure the NIC-based send BD rings are disabled. */
9406 if (!tg3_flag(tp
, 5705_PLUS
)) {
9407 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9408 for (i
= 0; i
< 16; i
++)
9409 tw32_tx_mbox(mbox
+ i
* 8, 0);
9412 /* Clear status block in ram. */
9413 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9415 /* Set status block DMA address */
9416 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9417 ((u64
) tnapi
->status_mapping
>> 32));
9418 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9419 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9421 stblk
= HOSTCC_STATBLCK_RING1
;
9423 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9424 u64 mapping
= (u64
)tnapi
->status_mapping
;
9425 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9426 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9429 /* Clear status block in ram. */
9430 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9433 tg3_tx_rcbs_init(tp
);
9434 tg3_rx_ret_rcbs_init(tp
);
9437 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9439 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9441 if (!tg3_flag(tp
, 5750_PLUS
) ||
9442 tg3_flag(tp
, 5780_CLASS
) ||
9443 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9444 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9445 tg3_flag(tp
, 57765_PLUS
))
9446 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9447 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9448 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9449 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9451 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9453 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9454 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9456 val
= min(nic_rep_thresh
, host_rep_thresh
);
9457 tw32(RCVBDI_STD_THRESH
, val
);
9459 if (tg3_flag(tp
, 57765_PLUS
))
9460 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9462 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9465 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9467 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9469 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9470 tw32(RCVBDI_JUMBO_THRESH
, val
);
9472 if (tg3_flag(tp
, 57765_PLUS
))
9473 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9476 static inline u32
calc_crc(unsigned char *buf
, int len
)
9484 for (j
= 0; j
< len
; j
++) {
9487 for (k
= 0; k
< 8; k
++) {
9500 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9502 /* accept or reject all multicast frames */
9503 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9504 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9505 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9506 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9509 static void __tg3_set_rx_mode(struct net_device
*dev
)
9511 struct tg3
*tp
= netdev_priv(dev
);
9514 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9515 RX_MODE_KEEP_VLAN_TAG
);
9517 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9518 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9521 if (!tg3_flag(tp
, ENABLE_ASF
))
9522 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9525 if (dev
->flags
& IFF_PROMISC
) {
9526 /* Promiscuous mode. */
9527 rx_mode
|= RX_MODE_PROMISC
;
9528 } else if (dev
->flags
& IFF_ALLMULTI
) {
9529 /* Accept all multicast. */
9530 tg3_set_multi(tp
, 1);
9531 } else if (netdev_mc_empty(dev
)) {
9532 /* Reject all multicast. */
9533 tg3_set_multi(tp
, 0);
9535 /* Accept one or more multicast(s). */
9536 struct netdev_hw_addr
*ha
;
9537 u32 mc_filter
[4] = { 0, };
9542 netdev_for_each_mc_addr(ha
, dev
) {
9543 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9545 regidx
= (bit
& 0x60) >> 5;
9547 mc_filter
[regidx
] |= (1 << bit
);
9550 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9551 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9552 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9553 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9556 if (rx_mode
!= tp
->rx_mode
) {
9557 tp
->rx_mode
= rx_mode
;
9558 tw32_f(MAC_RX_MODE
, rx_mode
);
9563 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9567 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9568 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9571 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9575 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9578 if (tp
->rxq_cnt
== 1) {
9579 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9583 /* Validate table against current IRQ count */
9584 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9585 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9589 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9590 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9593 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9596 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9598 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9599 u32 val
= tp
->rss_ind_tbl
[i
];
9601 for (; i
% 8; i
++) {
9603 val
|= tp
->rss_ind_tbl
[i
];
9610 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9612 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9613 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9615 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9618 /* tp->lock is held. */
9619 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9621 u32 val
, rdmac_mode
;
9623 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9625 tg3_disable_ints(tp
);
9629 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9631 if (tg3_flag(tp
, INIT_COMPLETE
))
9632 tg3_abort_hw(tp
, 1);
9634 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9635 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9636 tg3_phy_pull_config(tp
);
9637 tg3_eee_pull_config(tp
, NULL
);
9638 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9641 /* Enable MAC control of LPI */
9642 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
9648 err
= tg3_chip_reset(tp
);
9652 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9654 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9655 val
= tr32(TG3_CPMU_CTRL
);
9656 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9657 tw32(TG3_CPMU_CTRL
, val
);
9659 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9660 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9661 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9662 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9664 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9665 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9666 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9667 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9669 val
= tr32(TG3_CPMU_HST_ACC
);
9670 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9671 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9672 tw32(TG3_CPMU_HST_ACC
, val
);
9675 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9676 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9677 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9678 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9679 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9681 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9682 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9684 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9686 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9687 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9690 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9691 u32 grc_mode
= tr32(GRC_MODE
);
9693 /* Access the lower 1K of PL PCIE block registers. */
9694 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9695 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9697 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9698 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9699 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9701 tw32(GRC_MODE
, grc_mode
);
9704 if (tg3_flag(tp
, 57765_CLASS
)) {
9705 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9706 u32 grc_mode
= tr32(GRC_MODE
);
9708 /* Access the lower 1K of PL PCIE block registers. */
9709 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9710 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9712 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9713 TG3_PCIE_PL_LO_PHYCTL5
);
9714 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9715 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9717 tw32(GRC_MODE
, grc_mode
);
9720 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9723 /* Fix transmit hangs */
9724 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9725 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9726 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9728 grc_mode
= tr32(GRC_MODE
);
9730 /* Access the lower 1K of DL PCIE block registers. */
9731 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9732 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9734 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9735 TG3_PCIE_DL_LO_FTSMAX
);
9736 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9737 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9738 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9740 tw32(GRC_MODE
, grc_mode
);
9743 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9744 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9745 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9746 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9749 /* This works around an issue with Athlon chipsets on
9750 * B3 tigon3 silicon. This bit has no effect on any
9751 * other revision. But do not set this on PCI Express
9752 * chips and don't even touch the clocks if the CPMU is present.
9754 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9755 if (!tg3_flag(tp
, PCI_EXPRESS
))
9756 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9757 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9760 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9761 tg3_flag(tp
, PCIX_MODE
)) {
9762 val
= tr32(TG3PCI_PCISTATE
);
9763 val
|= PCISTATE_RETRY_SAME_DMA
;
9764 tw32(TG3PCI_PCISTATE
, val
);
9767 if (tg3_flag(tp
, ENABLE_APE
)) {
9768 /* Allow reads and writes to the
9769 * APE register and memory space.
9771 val
= tr32(TG3PCI_PCISTATE
);
9772 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9773 PCISTATE_ALLOW_APE_SHMEM_WR
|
9774 PCISTATE_ALLOW_APE_PSPACE_WR
;
9775 tw32(TG3PCI_PCISTATE
, val
);
9778 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9779 /* Enable some hw fixes. */
9780 val
= tr32(TG3PCI_MSI_DATA
);
9781 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9782 tw32(TG3PCI_MSI_DATA
, val
);
9785 /* Descriptor ring init may make accesses to the
9786 * NIC SRAM area to setup the TX descriptors, so we
9787 * can only do this after the hardware has been
9788 * successfully reset.
9790 err
= tg3_init_rings(tp
);
9794 if (tg3_flag(tp
, 57765_PLUS
)) {
9795 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9796 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9797 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9798 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9799 if (!tg3_flag(tp
, 57765_CLASS
) &&
9800 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9801 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9802 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9803 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9804 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9805 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9806 /* This value is determined during the probe time DMA
9807 * engine test, tg3_test_dma.
9809 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9812 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9813 GRC_MODE_4X_NIC_SEND_RINGS
|
9814 GRC_MODE_NO_TX_PHDR_CSUM
|
9815 GRC_MODE_NO_RX_PHDR_CSUM
);
9816 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9818 /* Pseudo-header checksum is done by hardware logic and not
9819 * the offload processers, so make the chip do the pseudo-
9820 * header checksums on receive. For transmit it is more
9821 * convenient to do the pseudo-header checksum in software
9822 * as Linux does that on transmit for us in all cases.
9824 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9826 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9828 tw32(TG3_RX_PTP_CTL
,
9829 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9831 if (tg3_flag(tp
, PTP_CAPABLE
))
9832 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9834 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9836 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9837 val
= tr32(GRC_MISC_CFG
);
9839 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9840 tw32(GRC_MISC_CFG
, val
);
9842 /* Initialize MBUF/DESC pool. */
9843 if (tg3_flag(tp
, 5750_PLUS
)) {
9845 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
9846 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9847 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
9848 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9850 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9851 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9852 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9853 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9856 fw_len
= tp
->fw_len
;
9857 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9858 tw32(BUFMGR_MB_POOL_ADDR
,
9859 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9860 tw32(BUFMGR_MB_POOL_SIZE
,
9861 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9864 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9865 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9866 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9867 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9868 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9869 tw32(BUFMGR_MB_HIGH_WATER
,
9870 tp
->bufmgr_config
.mbuf_high_water
);
9872 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9873 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9874 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9875 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9876 tw32(BUFMGR_MB_HIGH_WATER
,
9877 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9879 tw32(BUFMGR_DMA_LOW_WATER
,
9880 tp
->bufmgr_config
.dma_low_water
);
9881 tw32(BUFMGR_DMA_HIGH_WATER
,
9882 tp
->bufmgr_config
.dma_high_water
);
9884 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9885 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9886 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9887 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
9888 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9889 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
9890 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9891 tw32(BUFMGR_MODE
, val
);
9892 for (i
= 0; i
< 2000; i
++) {
9893 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9898 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9902 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
9903 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9905 tg3_setup_rxbd_thresholds(tp
);
9907 /* Initialize TG3_BDINFO's at:
9908 * RCVDBDI_STD_BD: standard eth size rx ring
9909 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9910 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9913 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9914 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9915 * ring attribute flags
9916 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9918 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9919 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9921 * The size of each ring is fixed in the firmware, but the location is
9924 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9925 ((u64
) tpr
->rx_std_mapping
>> 32));
9926 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9927 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
9928 if (!tg3_flag(tp
, 5717_PLUS
))
9929 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
9930 NIC_SRAM_RX_BUFFER_DESC
);
9932 /* Disable the mini ring */
9933 if (!tg3_flag(tp
, 5705_PLUS
))
9934 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9935 BDINFO_FLAGS_DISABLED
);
9937 /* Program the jumbo buffer descriptor ring control
9938 * blocks on those devices that have them.
9940 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9941 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
9943 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
9944 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9945 ((u64
) tpr
->rx_jmb_mapping
>> 32));
9946 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9947 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
9948 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
9949 BDINFO_FLAGS_MAXLEN_SHIFT
;
9950 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9951 val
| BDINFO_FLAGS_USE_EXT_RECV
);
9952 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
9953 tg3_flag(tp
, 57765_CLASS
) ||
9954 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9955 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
9956 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
9958 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9959 BDINFO_FLAGS_DISABLED
);
9962 if (tg3_flag(tp
, 57765_PLUS
)) {
9963 val
= TG3_RX_STD_RING_SIZE(tp
);
9964 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
9965 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
9967 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9969 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9971 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
9973 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
9974 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
9976 tpr
->rx_jmb_prod_idx
=
9977 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
9978 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
9980 tg3_rings_reset(tp
);
9982 /* Initialize MAC address and backoff seed. */
9983 __tg3_set_mac_addr(tp
, false);
9985 /* MTU + ethernet header + FCS + optional VLAN tag */
9986 tw32(MAC_RX_MTU_SIZE
,
9987 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
9989 /* The slot time is changed by tg3_setup_phy if we
9990 * run at gigabit with half duplex.
9992 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9993 (6 << TX_LENGTHS_IPG_SHIFT
) |
9994 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9996 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9997 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9998 val
|= tr32(MAC_TX_LENGTHS
) &
9999 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
10000 TX_LENGTHS_CNT_DWN_VAL_MSK
);
10002 tw32(MAC_TX_LENGTHS
, val
);
10004 /* Receive rules. */
10005 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
10006 tw32(RCVLPC_CONFIG
, 0x0181);
10008 /* Calculate RDMAC_MODE setting early, we need it to determine
10009 * the RCVLPC_STATE_ENABLE mask.
10011 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
10012 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
10013 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
10014 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
10015 RDMAC_MODE_LNGREAD_ENAB
);
10017 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
10018 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
10020 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
10021 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10022 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10023 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
10024 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
10025 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
10027 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10028 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10029 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10030 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
10031 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
10032 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10033 !tg3_flag(tp
, IS_5788
)) {
10034 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10038 if (tg3_flag(tp
, PCI_EXPRESS
))
10039 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10041 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10043 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10044 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
10045 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
10049 if (tg3_flag(tp
, HW_TSO_1
) ||
10050 tg3_flag(tp
, HW_TSO_2
) ||
10051 tg3_flag(tp
, HW_TSO_3
))
10052 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
10054 if (tg3_flag(tp
, 57765_PLUS
) ||
10055 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10056 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10057 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10059 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10060 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10061 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10063 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10064 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10065 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10066 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10067 tg3_flag(tp
, 57765_PLUS
)) {
10070 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10071 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10073 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10075 val
= tr32(tgtreg
);
10076 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10077 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10078 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10079 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10080 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10081 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10082 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10083 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10085 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10088 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10089 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10090 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10093 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10094 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10096 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10098 val
= tr32(tgtreg
);
10100 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10101 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10104 /* Receive/send statistics. */
10105 if (tg3_flag(tp
, 5750_PLUS
)) {
10106 val
= tr32(RCVLPC_STATS_ENABLE
);
10107 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10108 tw32(RCVLPC_STATS_ENABLE
, val
);
10109 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10110 tg3_flag(tp
, TSO_CAPABLE
)) {
10111 val
= tr32(RCVLPC_STATS_ENABLE
);
10112 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10113 tw32(RCVLPC_STATS_ENABLE
, val
);
10115 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10117 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10118 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10119 tw32(SNDDATAI_STATSCTRL
,
10120 (SNDDATAI_SCTRL_ENABLE
|
10121 SNDDATAI_SCTRL_FASTUPD
));
10123 /* Setup host coalescing engine. */
10124 tw32(HOSTCC_MODE
, 0);
10125 for (i
= 0; i
< 2000; i
++) {
10126 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10131 __tg3_set_coalesce(tp
, &tp
->coal
);
10133 if (!tg3_flag(tp
, 5705_PLUS
)) {
10134 /* Status/statistics block address. See tg3_timer,
10135 * the tg3_periodic_fetch_stats call there, and
10136 * tg3_get_stats to see how this works for 5705/5750 chips.
10138 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10139 ((u64
) tp
->stats_mapping
>> 32));
10140 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10141 ((u64
) tp
->stats_mapping
& 0xffffffff));
10142 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10144 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10146 /* Clear statistics and status block memory areas */
10147 for (i
= NIC_SRAM_STATS_BLK
;
10148 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10149 i
+= sizeof(u32
)) {
10150 tg3_write_mem(tp
, i
, 0);
10155 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10157 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10158 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10159 if (!tg3_flag(tp
, 5705_PLUS
))
10160 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10162 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10163 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10164 /* reset to prevent losing 1st rx packet intermittently */
10165 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10169 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10170 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10171 MAC_MODE_FHDE_ENABLE
;
10172 if (tg3_flag(tp
, ENABLE_APE
))
10173 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10174 if (!tg3_flag(tp
, 5705_PLUS
) &&
10175 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10176 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10177 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10178 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10181 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10182 * If TG3_FLAG_IS_NIC is zero, we should read the
10183 * register to preserve the GPIO settings for LOMs. The GPIOs,
10184 * whether used as inputs or outputs, are set by boot code after
10187 if (!tg3_flag(tp
, IS_NIC
)) {
10190 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10191 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10192 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10194 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10195 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10196 GRC_LCLCTRL_GPIO_OUTPUT3
;
10198 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10199 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10201 tp
->grc_local_ctrl
&= ~gpio_mask
;
10202 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10204 /* GPIO1 must be driven high for eeprom write protect */
10205 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10206 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10207 GRC_LCLCTRL_GPIO_OUTPUT1
);
10209 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10212 if (tg3_flag(tp
, USING_MSIX
)) {
10213 val
= tr32(MSGINT_MODE
);
10214 val
|= MSGINT_MODE_ENABLE
;
10215 if (tp
->irq_cnt
> 1)
10216 val
|= MSGINT_MODE_MULTIVEC_EN
;
10217 if (!tg3_flag(tp
, 1SHOT_MSI
))
10218 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10219 tw32(MSGINT_MODE
, val
);
10222 if (!tg3_flag(tp
, 5705_PLUS
)) {
10223 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10227 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10228 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10229 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10230 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10231 WDMAC_MODE_LNGREAD_ENAB
);
10233 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10234 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10235 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10236 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10237 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10239 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10240 !tg3_flag(tp
, IS_5788
)) {
10241 val
|= WDMAC_MODE_RX_ACCEL
;
10245 /* Enable host coalescing bug fix */
10246 if (tg3_flag(tp
, 5755_PLUS
))
10247 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10249 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10250 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10252 tw32_f(WDMAC_MODE
, val
);
10255 if (tg3_flag(tp
, PCIX_MODE
)) {
10258 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10260 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10261 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10262 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10263 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10264 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10265 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10267 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10271 tw32_f(RDMAC_MODE
, rdmac_mode
);
10274 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10275 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10276 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10277 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10280 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10281 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10282 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10283 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10284 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10288 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10289 if (!tg3_flag(tp
, 5705_PLUS
))
10290 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10292 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10293 tw32(SNDDATAC_MODE
,
10294 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10296 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10298 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10299 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10300 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10301 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10302 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10303 tw32(RCVDBDI_MODE
, val
);
10304 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10305 if (tg3_flag(tp
, HW_TSO_1
) ||
10306 tg3_flag(tp
, HW_TSO_2
) ||
10307 tg3_flag(tp
, HW_TSO_3
))
10308 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10309 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10310 if (tg3_flag(tp
, ENABLE_TSS
))
10311 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10312 tw32(SNDBDI_MODE
, val
);
10313 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10315 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10316 err
= tg3_load_5701_a0_firmware_fix(tp
);
10321 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10322 /* Ignore any errors for the firmware download. If download
10323 * fails, the device will operate with EEE disabled
10325 tg3_load_57766_firmware(tp
);
10328 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10329 err
= tg3_load_tso_firmware(tp
);
10334 tp
->tx_mode
= TX_MODE_ENABLE
;
10336 if (tg3_flag(tp
, 5755_PLUS
) ||
10337 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10338 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10340 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10341 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10342 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10343 tp
->tx_mode
&= ~val
;
10344 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10347 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10350 if (tg3_flag(tp
, ENABLE_RSS
)) {
10351 tg3_rss_write_indir_tbl(tp
);
10353 /* Setup the "secret" hash key. */
10354 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
10355 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
10356 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
10357 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
10358 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
10359 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
10360 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
10361 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
10362 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
10363 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
10366 tp
->rx_mode
= RX_MODE_ENABLE
;
10367 if (tg3_flag(tp
, 5755_PLUS
))
10368 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10370 if (tg3_flag(tp
, ENABLE_RSS
))
10371 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10372 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10373 RX_MODE_RSS_IPV6_HASH_EN
|
10374 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10375 RX_MODE_RSS_IPV4_HASH_EN
|
10376 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10378 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10381 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10383 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10384 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10385 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10388 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10391 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10392 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10393 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10394 /* Set drive transmission level to 1.2V */
10395 /* only if the signal pre-emphasis bit is not set */
10396 val
= tr32(MAC_SERDES_CFG
);
10399 tw32(MAC_SERDES_CFG
, val
);
10401 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10402 tw32(MAC_SERDES_CFG
, 0x616000);
10405 /* Prevent chip from dropping frames when flow control
10408 if (tg3_flag(tp
, 57765_CLASS
))
10412 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10414 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10415 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10416 /* Use hardware link auto-negotiation */
10417 tg3_flag_set(tp
, HW_AUTONEG
);
10420 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10421 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10424 tmp
= tr32(SERDES_RX_CTRL
);
10425 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10426 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10427 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10428 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10431 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10432 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10433 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10435 err
= tg3_setup_phy(tp
, false);
10439 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10440 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10443 /* Clear CRC stats. */
10444 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10445 tg3_writephy(tp
, MII_TG3_TEST1
,
10446 tmp
| MII_TG3_TEST1_CRC_EN
);
10447 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10452 __tg3_set_rx_mode(tp
->dev
);
10454 /* Initialize receive rules. */
10455 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10456 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10457 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10458 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10460 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10464 if (tg3_flag(tp
, ENABLE_ASF
))
10468 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10470 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10472 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10474 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10476 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10478 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10480 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10482 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10484 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10486 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10488 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10490 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10492 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10494 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10502 if (tg3_flag(tp
, ENABLE_APE
))
10503 /* Write our heartbeat update interval to APE. */
10504 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10505 APE_HOST_HEARTBEAT_INT_DISABLE
);
10507 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10512 /* Called at device open time to get the chip ready for
10513 * packet processing. Invoked with tp->lock held.
10515 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10517 /* Chip may have been just powered on. If so, the boot code may still
10518 * be running initialization. Wait for it to finish to avoid races in
10519 * accessing the hardware.
10521 tg3_enable_register_access(tp
);
10524 tg3_switch_clocks(tp
);
10526 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10528 return tg3_reset_hw(tp
, reset_phy
);
10531 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10535 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10536 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10538 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10541 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10542 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10543 memset(ocir
, 0, TG3_OCIR_LEN
);
10547 /* sysfs attributes for hwmon */
10548 static ssize_t
tg3_show_temp(struct device
*dev
,
10549 struct device_attribute
*devattr
, char *buf
)
10551 struct pci_dev
*pdev
= to_pci_dev(dev
);
10552 struct net_device
*netdev
= pci_get_drvdata(pdev
);
10553 struct tg3
*tp
= netdev_priv(netdev
);
10554 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10557 spin_lock_bh(&tp
->lock
);
10558 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10559 sizeof(temperature
));
10560 spin_unlock_bh(&tp
->lock
);
10561 return sprintf(buf
, "%u\n", temperature
);
10565 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10566 TG3_TEMP_SENSOR_OFFSET
);
10567 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10568 TG3_TEMP_CAUTION_OFFSET
);
10569 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10570 TG3_TEMP_MAX_OFFSET
);
10572 static struct attribute
*tg3_attributes
[] = {
10573 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10574 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10575 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10579 static const struct attribute_group tg3_group
= {
10580 .attrs
= tg3_attributes
,
10583 static void tg3_hwmon_close(struct tg3
*tp
)
10585 if (tp
->hwmon_dev
) {
10586 hwmon_device_unregister(tp
->hwmon_dev
);
10587 tp
->hwmon_dev
= NULL
;
10588 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
10592 static void tg3_hwmon_open(struct tg3
*tp
)
10596 struct pci_dev
*pdev
= tp
->pdev
;
10597 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10599 tg3_sd_scan_scratchpad(tp
, ocirs
);
10601 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10602 if (!ocirs
[i
].src_data_length
)
10605 size
+= ocirs
[i
].src_hdr_length
;
10606 size
+= ocirs
[i
].src_data_length
;
10612 /* Register hwmon sysfs hooks */
10613 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
10615 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
10619 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
10620 if (IS_ERR(tp
->hwmon_dev
)) {
10621 tp
->hwmon_dev
= NULL
;
10622 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10623 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
10628 #define TG3_STAT_ADD32(PSTAT, REG) \
10629 do { u32 __val = tr32(REG); \
10630 (PSTAT)->low += __val; \
10631 if ((PSTAT)->low < __val) \
10632 (PSTAT)->high += 1; \
10635 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10637 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10642 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10643 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10644 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10645 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10646 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10647 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10648 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10649 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10650 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10651 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10652 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10653 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10654 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10655 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10656 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10657 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10660 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10661 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10662 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10663 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10666 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10667 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10668 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10669 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10670 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10671 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10672 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10673 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10674 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10675 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10676 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10677 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10678 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10679 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10681 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10682 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10683 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10684 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10685 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10687 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10688 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10690 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10691 sp
->rx_discards
.low
+= val
;
10692 if (sp
->rx_discards
.low
< val
)
10693 sp
->rx_discards
.high
+= 1;
10695 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10697 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10700 static void tg3_chk_missed_msi(struct tg3
*tp
)
10704 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10705 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10707 if (tg3_has_work(tnapi
)) {
10708 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10709 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10710 if (tnapi
->chk_msi_cnt
< 1) {
10711 tnapi
->chk_msi_cnt
++;
10717 tnapi
->chk_msi_cnt
= 0;
10718 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10719 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10723 static void tg3_timer(unsigned long __opaque
)
10725 struct tg3
*tp
= (struct tg3
*) __opaque
;
10727 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10728 goto restart_timer
;
10730 spin_lock(&tp
->lock
);
10732 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10733 tg3_flag(tp
, 57765_CLASS
))
10734 tg3_chk_missed_msi(tp
);
10736 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10737 /* BCM4785: Flush posted writes from GbE to host memory. */
10741 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10742 /* All of this garbage is because when using non-tagged
10743 * IRQ status the mailbox/status_block protocol the chip
10744 * uses with the cpu is race prone.
10746 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10747 tw32(GRC_LOCAL_CTRL
,
10748 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10750 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10751 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10754 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10755 spin_unlock(&tp
->lock
);
10756 tg3_reset_task_schedule(tp
);
10757 goto restart_timer
;
10761 /* This part only runs once per second. */
10762 if (!--tp
->timer_counter
) {
10763 if (tg3_flag(tp
, 5705_PLUS
))
10764 tg3_periodic_fetch_stats(tp
);
10766 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10767 tg3_phy_eee_enable(tp
);
10769 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10773 mac_stat
= tr32(MAC_STATUS
);
10776 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10777 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10779 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10783 tg3_setup_phy(tp
, false);
10784 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10785 u32 mac_stat
= tr32(MAC_STATUS
);
10786 int need_setup
= 0;
10789 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10792 if (!tp
->link_up
&&
10793 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10794 MAC_STATUS_SIGNAL_DET
))) {
10798 if (!tp
->serdes_counter
) {
10801 ~MAC_MODE_PORT_MODE_MASK
));
10803 tw32_f(MAC_MODE
, tp
->mac_mode
);
10806 tg3_setup_phy(tp
, false);
10808 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10809 tg3_flag(tp
, 5780_CLASS
)) {
10810 tg3_serdes_parallel_detect(tp
);
10813 tp
->timer_counter
= tp
->timer_multiplier
;
10816 /* Heartbeat is only sent once every 2 seconds.
10818 * The heartbeat is to tell the ASF firmware that the host
10819 * driver is still alive. In the event that the OS crashes,
10820 * ASF needs to reset the hardware to free up the FIFO space
10821 * that may be filled with rx packets destined for the host.
10822 * If the FIFO is full, ASF will no longer function properly.
10824 * Unintended resets have been reported on real time kernels
10825 * where the timer doesn't run on time. Netpoll will also have
10828 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10829 * to check the ring condition when the heartbeat is expiring
10830 * before doing the reset. This will prevent most unintended
10833 if (!--tp
->asf_counter
) {
10834 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10835 tg3_wait_for_event_ack(tp
);
10837 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10838 FWCMD_NICDRV_ALIVE3
);
10839 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10840 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10841 TG3_FW_UPDATE_TIMEOUT_SEC
);
10843 tg3_generate_fw_event(tp
);
10845 tp
->asf_counter
= tp
->asf_multiplier
;
10848 spin_unlock(&tp
->lock
);
10851 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10852 add_timer(&tp
->timer
);
10855 static void tg3_timer_init(struct tg3
*tp
)
10857 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10858 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10859 !tg3_flag(tp
, 57765_CLASS
))
10860 tp
->timer_offset
= HZ
;
10862 tp
->timer_offset
= HZ
/ 10;
10864 BUG_ON(tp
->timer_offset
> HZ
);
10866 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10867 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10868 TG3_FW_UPDATE_FREQ_SEC
;
10870 init_timer(&tp
->timer
);
10871 tp
->timer
.data
= (unsigned long) tp
;
10872 tp
->timer
.function
= tg3_timer
;
10875 static void tg3_timer_start(struct tg3
*tp
)
10877 tp
->asf_counter
= tp
->asf_multiplier
;
10878 tp
->timer_counter
= tp
->timer_multiplier
;
10880 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10881 add_timer(&tp
->timer
);
10884 static void tg3_timer_stop(struct tg3
*tp
)
10886 del_timer_sync(&tp
->timer
);
10889 /* Restart hardware after configuration changes, self-test, etc.
10890 * Invoked with tp->lock held.
10892 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
10893 __releases(tp
->lock
)
10894 __acquires(tp
->lock
)
10898 err
= tg3_init_hw(tp
, reset_phy
);
10900 netdev_err(tp
->dev
,
10901 "Failed to re-initialize device, aborting\n");
10902 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10903 tg3_full_unlock(tp
);
10904 tg3_timer_stop(tp
);
10906 tg3_napi_enable(tp
);
10907 dev_close(tp
->dev
);
10908 tg3_full_lock(tp
, 0);
10913 static void tg3_reset_task(struct work_struct
*work
)
10915 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10918 tg3_full_lock(tp
, 0);
10920 if (!netif_running(tp
->dev
)) {
10921 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10922 tg3_full_unlock(tp
);
10926 tg3_full_unlock(tp
);
10930 tg3_netif_stop(tp
);
10932 tg3_full_lock(tp
, 1);
10934 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
10935 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10936 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10937 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
10938 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
10941 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
10942 err
= tg3_init_hw(tp
, true);
10946 tg3_netif_start(tp
);
10949 tg3_full_unlock(tp
);
10954 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10957 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
10960 unsigned long flags
;
10962 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
10964 if (tp
->irq_cnt
== 1)
10965 name
= tp
->dev
->name
;
10967 name
= &tnapi
->irq_lbl
[0];
10968 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
10969 name
[IFNAMSIZ
-1] = 0;
10972 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10974 if (tg3_flag(tp
, 1SHOT_MSI
))
10975 fn
= tg3_msi_1shot
;
10978 fn
= tg3_interrupt
;
10979 if (tg3_flag(tp
, TAGGED_STATUS
))
10980 fn
= tg3_interrupt_tagged
;
10981 flags
= IRQF_SHARED
;
10984 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
10987 static int tg3_test_interrupt(struct tg3
*tp
)
10989 struct tg3_napi
*tnapi
= &tp
->napi
[0];
10990 struct net_device
*dev
= tp
->dev
;
10991 int err
, i
, intr_ok
= 0;
10994 if (!netif_running(dev
))
10997 tg3_disable_ints(tp
);
10999 free_irq(tnapi
->irq_vec
, tnapi
);
11002 * Turn off MSI one shot mode. Otherwise this test has no
11003 * observable way to know whether the interrupt was delivered.
11005 if (tg3_flag(tp
, 57765_PLUS
)) {
11006 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
11007 tw32(MSGINT_MODE
, val
);
11010 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
11011 IRQF_SHARED
, dev
->name
, tnapi
);
11015 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
11016 tg3_enable_ints(tp
);
11018 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11021 for (i
= 0; i
< 5; i
++) {
11022 u32 int_mbox
, misc_host_ctrl
;
11024 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
11025 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
11027 if ((int_mbox
!= 0) ||
11028 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
11033 if (tg3_flag(tp
, 57765_PLUS
) &&
11034 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
11035 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
11040 tg3_disable_ints(tp
);
11042 free_irq(tnapi
->irq_vec
, tnapi
);
11044 err
= tg3_request_irq(tp
, 0);
11050 /* Reenable MSI one shot mode. */
11051 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
11052 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
11053 tw32(MSGINT_MODE
, val
);
11061 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11062 * successfully restored
11064 static int tg3_test_msi(struct tg3
*tp
)
11069 if (!tg3_flag(tp
, USING_MSI
))
11072 /* Turn off SERR reporting in case MSI terminates with Master
11075 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11076 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11077 pci_cmd
& ~PCI_COMMAND_SERR
);
11079 err
= tg3_test_interrupt(tp
);
11081 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11086 /* other failures */
11090 /* MSI test failed, go back to INTx mode */
11091 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11092 "to INTx mode. Please report this failure to the PCI "
11093 "maintainer and include system chipset information\n");
11095 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11097 pci_disable_msi(tp
->pdev
);
11099 tg3_flag_clear(tp
, USING_MSI
);
11100 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11102 err
= tg3_request_irq(tp
, 0);
11106 /* Need to reset the chip because the MSI cycle may have terminated
11107 * with Master Abort.
11109 tg3_full_lock(tp
, 1);
11111 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11112 err
= tg3_init_hw(tp
, true);
11114 tg3_full_unlock(tp
);
11117 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11122 static int tg3_request_firmware(struct tg3
*tp
)
11124 const struct tg3_firmware_hdr
*fw_hdr
;
11126 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11127 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11132 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11134 /* Firmware blob starts with version numbers, followed by
11135 * start address and _full_ length including BSS sections
11136 * (which must be longer than the actual data, of course
11139 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11140 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11141 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11142 tp
->fw_len
, tp
->fw_needed
);
11143 release_firmware(tp
->fw
);
11148 /* We no longer need firmware; we have it. */
11149 tp
->fw_needed
= NULL
;
11153 static u32
tg3_irq_count(struct tg3
*tp
)
11155 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11158 /* We want as many rx rings enabled as there are cpus.
11159 * In multiqueue MSI-X mode, the first MSI-X vector
11160 * only deals with link interrupts, etc, so we add
11161 * one to the number of vectors we are requesting.
11163 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11169 static bool tg3_enable_msix(struct tg3
*tp
)
11172 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11174 tp
->txq_cnt
= tp
->txq_req
;
11175 tp
->rxq_cnt
= tp
->rxq_req
;
11177 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11178 if (tp
->rxq_cnt
> tp
->rxq_max
)
11179 tp
->rxq_cnt
= tp
->rxq_max
;
11181 /* Disable multiple TX rings by default. Simple round-robin hardware
11182 * scheduling of the TX rings can cause starvation of rings with
11183 * small packets when other rings have TSO or jumbo packets.
11188 tp
->irq_cnt
= tg3_irq_count(tp
);
11190 for (i
= 0; i
< tp
->irq_max
; i
++) {
11191 msix_ent
[i
].entry
= i
;
11192 msix_ent
[i
].vector
= 0;
11195 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
11198 } else if (rc
!= 0) {
11199 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
11201 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11204 tp
->rxq_cnt
= max(rc
- 1, 1);
11206 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11209 for (i
= 0; i
< tp
->irq_max
; i
++)
11210 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11212 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11213 pci_disable_msix(tp
->pdev
);
11217 if (tp
->irq_cnt
== 1)
11220 tg3_flag_set(tp
, ENABLE_RSS
);
11222 if (tp
->txq_cnt
> 1)
11223 tg3_flag_set(tp
, ENABLE_TSS
);
11225 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11230 static void tg3_ints_init(struct tg3
*tp
)
11232 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11233 !tg3_flag(tp
, TAGGED_STATUS
)) {
11234 /* All MSI supporting chips should support tagged
11235 * status. Assert that this is the case.
11237 netdev_warn(tp
->dev
,
11238 "MSI without TAGGED_STATUS? Not using MSI\n");
11242 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11243 tg3_flag_set(tp
, USING_MSIX
);
11244 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11245 tg3_flag_set(tp
, USING_MSI
);
11247 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11248 u32 msi_mode
= tr32(MSGINT_MODE
);
11249 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11250 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11251 if (!tg3_flag(tp
, 1SHOT_MSI
))
11252 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11253 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11256 if (!tg3_flag(tp
, USING_MSIX
)) {
11258 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11261 if (tp
->irq_cnt
== 1) {
11264 netif_set_real_num_tx_queues(tp
->dev
, 1);
11265 netif_set_real_num_rx_queues(tp
->dev
, 1);
11269 static void tg3_ints_fini(struct tg3
*tp
)
11271 if (tg3_flag(tp
, USING_MSIX
))
11272 pci_disable_msix(tp
->pdev
);
11273 else if (tg3_flag(tp
, USING_MSI
))
11274 pci_disable_msi(tp
->pdev
);
11275 tg3_flag_clear(tp
, USING_MSI
);
11276 tg3_flag_clear(tp
, USING_MSIX
);
11277 tg3_flag_clear(tp
, ENABLE_RSS
);
11278 tg3_flag_clear(tp
, ENABLE_TSS
);
11281 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11284 struct net_device
*dev
= tp
->dev
;
11288 * Setup interrupts first so we know how
11289 * many NAPI resources to allocate
11293 tg3_rss_check_indir_tbl(tp
);
11295 /* The placement of this call is tied
11296 * to the setup and use of Host TX descriptors.
11298 err
= tg3_alloc_consistent(tp
);
11300 goto out_ints_fini
;
11304 tg3_napi_enable(tp
);
11306 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11307 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11308 err
= tg3_request_irq(tp
, i
);
11310 for (i
--; i
>= 0; i
--) {
11311 tnapi
= &tp
->napi
[i
];
11312 free_irq(tnapi
->irq_vec
, tnapi
);
11314 goto out_napi_fini
;
11318 tg3_full_lock(tp
, 0);
11321 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
11323 err
= tg3_init_hw(tp
, reset_phy
);
11325 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11326 tg3_free_rings(tp
);
11329 tg3_full_unlock(tp
);
11334 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11335 err
= tg3_test_msi(tp
);
11338 tg3_full_lock(tp
, 0);
11339 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11340 tg3_free_rings(tp
);
11341 tg3_full_unlock(tp
);
11343 goto out_napi_fini
;
11346 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11347 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11349 tw32(PCIE_TRANSACTION_CFG
,
11350 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11356 tg3_hwmon_open(tp
);
11358 tg3_full_lock(tp
, 0);
11360 tg3_timer_start(tp
);
11361 tg3_flag_set(tp
, INIT_COMPLETE
);
11362 tg3_enable_ints(tp
);
11367 tg3_ptp_resume(tp
);
11370 tg3_full_unlock(tp
);
11372 netif_tx_start_all_queues(dev
);
11375 * Reset loopback feature if it was turned on while the device was down
11376 * make sure that it's installed properly now.
11378 if (dev
->features
& NETIF_F_LOOPBACK
)
11379 tg3_set_loopback(dev
, dev
->features
);
11384 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11385 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11386 free_irq(tnapi
->irq_vec
, tnapi
);
11390 tg3_napi_disable(tp
);
11392 tg3_free_consistent(tp
);
11400 static void tg3_stop(struct tg3
*tp
)
11404 tg3_reset_task_cancel(tp
);
11405 tg3_netif_stop(tp
);
11407 tg3_timer_stop(tp
);
11409 tg3_hwmon_close(tp
);
11413 tg3_full_lock(tp
, 1);
11415 tg3_disable_ints(tp
);
11417 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11418 tg3_free_rings(tp
);
11419 tg3_flag_clear(tp
, INIT_COMPLETE
);
11421 tg3_full_unlock(tp
);
11423 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11424 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11425 free_irq(tnapi
->irq_vec
, tnapi
);
11432 tg3_free_consistent(tp
);
11435 static int tg3_open(struct net_device
*dev
)
11437 struct tg3
*tp
= netdev_priv(dev
);
11440 if (tp
->fw_needed
) {
11441 err
= tg3_request_firmware(tp
);
11442 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11444 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11445 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11446 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11447 netdev_warn(tp
->dev
, "EEE capability restored\n");
11448 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11450 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11454 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11455 tg3_flag_clear(tp
, TSO_CAPABLE
);
11456 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11457 netdev_notice(tp
->dev
, "TSO capability restored\n");
11458 tg3_flag_set(tp
, TSO_CAPABLE
);
11462 tg3_carrier_off(tp
);
11464 err
= tg3_power_up(tp
);
11468 tg3_full_lock(tp
, 0);
11470 tg3_disable_ints(tp
);
11471 tg3_flag_clear(tp
, INIT_COMPLETE
);
11473 tg3_full_unlock(tp
);
11475 err
= tg3_start(tp
,
11476 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11479 tg3_frob_aux_power(tp
, false);
11480 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11483 if (tg3_flag(tp
, PTP_CAPABLE
)) {
11484 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
11486 if (IS_ERR(tp
->ptp_clock
))
11487 tp
->ptp_clock
= NULL
;
11493 static int tg3_close(struct net_device
*dev
)
11495 struct tg3
*tp
= netdev_priv(dev
);
11501 /* Clear stats across close / open calls */
11502 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
11503 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
11505 tg3_power_down(tp
);
11507 tg3_carrier_off(tp
);
11512 static inline u64
get_stat64(tg3_stat64_t
*val
)
11514 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11517 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11519 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11521 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11522 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11523 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11526 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11527 tg3_writephy(tp
, MII_TG3_TEST1
,
11528 val
| MII_TG3_TEST1_CRC_EN
);
11529 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11533 tp
->phy_crc_errors
+= val
;
11535 return tp
->phy_crc_errors
;
11538 return get_stat64(&hw_stats
->rx_fcs_errors
);
11541 #define ESTAT_ADD(member) \
11542 estats->member = old_estats->member + \
11543 get_stat64(&hw_stats->member)
11545 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11547 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11548 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11550 ESTAT_ADD(rx_octets
);
11551 ESTAT_ADD(rx_fragments
);
11552 ESTAT_ADD(rx_ucast_packets
);
11553 ESTAT_ADD(rx_mcast_packets
);
11554 ESTAT_ADD(rx_bcast_packets
);
11555 ESTAT_ADD(rx_fcs_errors
);
11556 ESTAT_ADD(rx_align_errors
);
11557 ESTAT_ADD(rx_xon_pause_rcvd
);
11558 ESTAT_ADD(rx_xoff_pause_rcvd
);
11559 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11560 ESTAT_ADD(rx_xoff_entered
);
11561 ESTAT_ADD(rx_frame_too_long_errors
);
11562 ESTAT_ADD(rx_jabbers
);
11563 ESTAT_ADD(rx_undersize_packets
);
11564 ESTAT_ADD(rx_in_length_errors
);
11565 ESTAT_ADD(rx_out_length_errors
);
11566 ESTAT_ADD(rx_64_or_less_octet_packets
);
11567 ESTAT_ADD(rx_65_to_127_octet_packets
);
11568 ESTAT_ADD(rx_128_to_255_octet_packets
);
11569 ESTAT_ADD(rx_256_to_511_octet_packets
);
11570 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11571 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11572 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11573 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11574 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11575 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11577 ESTAT_ADD(tx_octets
);
11578 ESTAT_ADD(tx_collisions
);
11579 ESTAT_ADD(tx_xon_sent
);
11580 ESTAT_ADD(tx_xoff_sent
);
11581 ESTAT_ADD(tx_flow_control
);
11582 ESTAT_ADD(tx_mac_errors
);
11583 ESTAT_ADD(tx_single_collisions
);
11584 ESTAT_ADD(tx_mult_collisions
);
11585 ESTAT_ADD(tx_deferred
);
11586 ESTAT_ADD(tx_excessive_collisions
);
11587 ESTAT_ADD(tx_late_collisions
);
11588 ESTAT_ADD(tx_collide_2times
);
11589 ESTAT_ADD(tx_collide_3times
);
11590 ESTAT_ADD(tx_collide_4times
);
11591 ESTAT_ADD(tx_collide_5times
);
11592 ESTAT_ADD(tx_collide_6times
);
11593 ESTAT_ADD(tx_collide_7times
);
11594 ESTAT_ADD(tx_collide_8times
);
11595 ESTAT_ADD(tx_collide_9times
);
11596 ESTAT_ADD(tx_collide_10times
);
11597 ESTAT_ADD(tx_collide_11times
);
11598 ESTAT_ADD(tx_collide_12times
);
11599 ESTAT_ADD(tx_collide_13times
);
11600 ESTAT_ADD(tx_collide_14times
);
11601 ESTAT_ADD(tx_collide_15times
);
11602 ESTAT_ADD(tx_ucast_packets
);
11603 ESTAT_ADD(tx_mcast_packets
);
11604 ESTAT_ADD(tx_bcast_packets
);
11605 ESTAT_ADD(tx_carrier_sense_errors
);
11606 ESTAT_ADD(tx_discards
);
11607 ESTAT_ADD(tx_errors
);
11609 ESTAT_ADD(dma_writeq_full
);
11610 ESTAT_ADD(dma_write_prioq_full
);
11611 ESTAT_ADD(rxbds_empty
);
11612 ESTAT_ADD(rx_discards
);
11613 ESTAT_ADD(rx_errors
);
11614 ESTAT_ADD(rx_threshold_hit
);
11616 ESTAT_ADD(dma_readq_full
);
11617 ESTAT_ADD(dma_read_prioq_full
);
11618 ESTAT_ADD(tx_comp_queue_full
);
11620 ESTAT_ADD(ring_set_send_prod_index
);
11621 ESTAT_ADD(ring_status_update
);
11622 ESTAT_ADD(nic_irqs
);
11623 ESTAT_ADD(nic_avoided_irqs
);
11624 ESTAT_ADD(nic_tx_threshold_hit
);
11626 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11629 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11631 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11632 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11634 stats
->rx_packets
= old_stats
->rx_packets
+
11635 get_stat64(&hw_stats
->rx_ucast_packets
) +
11636 get_stat64(&hw_stats
->rx_mcast_packets
) +
11637 get_stat64(&hw_stats
->rx_bcast_packets
);
11639 stats
->tx_packets
= old_stats
->tx_packets
+
11640 get_stat64(&hw_stats
->tx_ucast_packets
) +
11641 get_stat64(&hw_stats
->tx_mcast_packets
) +
11642 get_stat64(&hw_stats
->tx_bcast_packets
);
11644 stats
->rx_bytes
= old_stats
->rx_bytes
+
11645 get_stat64(&hw_stats
->rx_octets
);
11646 stats
->tx_bytes
= old_stats
->tx_bytes
+
11647 get_stat64(&hw_stats
->tx_octets
);
11649 stats
->rx_errors
= old_stats
->rx_errors
+
11650 get_stat64(&hw_stats
->rx_errors
);
11651 stats
->tx_errors
= old_stats
->tx_errors
+
11652 get_stat64(&hw_stats
->tx_errors
) +
11653 get_stat64(&hw_stats
->tx_mac_errors
) +
11654 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11655 get_stat64(&hw_stats
->tx_discards
);
11657 stats
->multicast
= old_stats
->multicast
+
11658 get_stat64(&hw_stats
->rx_mcast_packets
);
11659 stats
->collisions
= old_stats
->collisions
+
11660 get_stat64(&hw_stats
->tx_collisions
);
11662 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11663 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11664 get_stat64(&hw_stats
->rx_undersize_packets
);
11666 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
11667 get_stat64(&hw_stats
->rxbds_empty
);
11668 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11669 get_stat64(&hw_stats
->rx_align_errors
);
11670 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11671 get_stat64(&hw_stats
->tx_discards
);
11672 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11673 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11675 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11676 tg3_calc_crc_errors(tp
);
11678 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11679 get_stat64(&hw_stats
->rx_discards
);
11681 stats
->rx_dropped
= tp
->rx_dropped
;
11682 stats
->tx_dropped
= tp
->tx_dropped
;
11685 static int tg3_get_regs_len(struct net_device
*dev
)
11687 return TG3_REG_BLK_SIZE
;
11690 static void tg3_get_regs(struct net_device
*dev
,
11691 struct ethtool_regs
*regs
, void *_p
)
11693 struct tg3
*tp
= netdev_priv(dev
);
11697 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11699 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11702 tg3_full_lock(tp
, 0);
11704 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11706 tg3_full_unlock(tp
);
11709 static int tg3_get_eeprom_len(struct net_device
*dev
)
11711 struct tg3
*tp
= netdev_priv(dev
);
11713 return tp
->nvram_size
;
11716 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11718 struct tg3
*tp
= netdev_priv(dev
);
11721 u32 i
, offset
, len
, b_offset
, b_count
;
11724 if (tg3_flag(tp
, NO_NVRAM
))
11727 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11730 offset
= eeprom
->offset
;
11734 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11737 /* adjustments to start on required 4 byte boundary */
11738 b_offset
= offset
& 3;
11739 b_count
= 4 - b_offset
;
11740 if (b_count
> len
) {
11741 /* i.e. offset=1 len=2 */
11744 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11747 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11750 eeprom
->len
+= b_count
;
11753 /* read bytes up to the last 4 byte boundary */
11754 pd
= &data
[eeprom
->len
];
11755 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11756 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11761 memcpy(pd
+ i
, &val
, 4);
11766 /* read last bytes not ending on 4 byte boundary */
11767 pd
= &data
[eeprom
->len
];
11769 b_offset
= offset
+ len
- b_count
;
11770 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11773 memcpy(pd
, &val
, b_count
);
11774 eeprom
->len
+= b_count
;
11779 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11781 struct tg3
*tp
= netdev_priv(dev
);
11783 u32 offset
, len
, b_offset
, odd_len
;
11787 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11790 if (tg3_flag(tp
, NO_NVRAM
) ||
11791 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11794 offset
= eeprom
->offset
;
11797 if ((b_offset
= (offset
& 3))) {
11798 /* adjustments to start on required 4 byte boundary */
11799 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11810 /* adjustments to end on required 4 byte boundary */
11812 len
= (len
+ 3) & ~3;
11813 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11819 if (b_offset
|| odd_len
) {
11820 buf
= kmalloc(len
, GFP_KERNEL
);
11824 memcpy(buf
, &start
, 4);
11826 memcpy(buf
+len
-4, &end
, 4);
11827 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11830 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11838 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11840 struct tg3
*tp
= netdev_priv(dev
);
11842 if (tg3_flag(tp
, USE_PHYLIB
)) {
11843 struct phy_device
*phydev
;
11844 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11846 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11847 return phy_ethtool_gset(phydev
, cmd
);
11850 cmd
->supported
= (SUPPORTED_Autoneg
);
11852 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11853 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11854 SUPPORTED_1000baseT_Full
);
11856 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11857 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11858 SUPPORTED_100baseT_Full
|
11859 SUPPORTED_10baseT_Half
|
11860 SUPPORTED_10baseT_Full
|
11862 cmd
->port
= PORT_TP
;
11864 cmd
->supported
|= SUPPORTED_FIBRE
;
11865 cmd
->port
= PORT_FIBRE
;
11868 cmd
->advertising
= tp
->link_config
.advertising
;
11869 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11870 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11871 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11872 cmd
->advertising
|= ADVERTISED_Pause
;
11874 cmd
->advertising
|= ADVERTISED_Pause
|
11875 ADVERTISED_Asym_Pause
;
11877 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11878 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11881 if (netif_running(dev
) && tp
->link_up
) {
11882 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11883 cmd
->duplex
= tp
->link_config
.active_duplex
;
11884 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11885 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11886 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11887 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11889 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11892 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11893 cmd
->duplex
= DUPLEX_UNKNOWN
;
11894 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11896 cmd
->phy_address
= tp
->phy_addr
;
11897 cmd
->transceiver
= XCVR_INTERNAL
;
11898 cmd
->autoneg
= tp
->link_config
.autoneg
;
11904 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11906 struct tg3
*tp
= netdev_priv(dev
);
11907 u32 speed
= ethtool_cmd_speed(cmd
);
11909 if (tg3_flag(tp
, USE_PHYLIB
)) {
11910 struct phy_device
*phydev
;
11911 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11913 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11914 return phy_ethtool_sset(phydev
, cmd
);
11917 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11918 cmd
->autoneg
!= AUTONEG_DISABLE
)
11921 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
11922 cmd
->duplex
!= DUPLEX_FULL
&&
11923 cmd
->duplex
!= DUPLEX_HALF
)
11926 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11927 u32 mask
= ADVERTISED_Autoneg
|
11929 ADVERTISED_Asym_Pause
;
11931 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11932 mask
|= ADVERTISED_1000baseT_Half
|
11933 ADVERTISED_1000baseT_Full
;
11935 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
11936 mask
|= ADVERTISED_100baseT_Half
|
11937 ADVERTISED_100baseT_Full
|
11938 ADVERTISED_10baseT_Half
|
11939 ADVERTISED_10baseT_Full
|
11942 mask
|= ADVERTISED_FIBRE
;
11944 if (cmd
->advertising
& ~mask
)
11947 mask
&= (ADVERTISED_1000baseT_Half
|
11948 ADVERTISED_1000baseT_Full
|
11949 ADVERTISED_100baseT_Half
|
11950 ADVERTISED_100baseT_Full
|
11951 ADVERTISED_10baseT_Half
|
11952 ADVERTISED_10baseT_Full
);
11954 cmd
->advertising
&= mask
;
11956 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
11957 if (speed
!= SPEED_1000
)
11960 if (cmd
->duplex
!= DUPLEX_FULL
)
11963 if (speed
!= SPEED_100
&&
11969 tg3_full_lock(tp
, 0);
11971 tp
->link_config
.autoneg
= cmd
->autoneg
;
11972 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11973 tp
->link_config
.advertising
= (cmd
->advertising
|
11974 ADVERTISED_Autoneg
);
11975 tp
->link_config
.speed
= SPEED_UNKNOWN
;
11976 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
11978 tp
->link_config
.advertising
= 0;
11979 tp
->link_config
.speed
= speed
;
11980 tp
->link_config
.duplex
= cmd
->duplex
;
11983 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
11985 tg3_warn_mgmt_link_flap(tp
);
11987 if (netif_running(dev
))
11988 tg3_setup_phy(tp
, true);
11990 tg3_full_unlock(tp
);
11995 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
11997 struct tg3
*tp
= netdev_priv(dev
);
11999 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
12000 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
12001 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
12002 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
12005 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12007 struct tg3
*tp
= netdev_priv(dev
);
12009 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
12010 wol
->supported
= WAKE_MAGIC
;
12012 wol
->supported
= 0;
12014 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
12015 wol
->wolopts
= WAKE_MAGIC
;
12016 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
12019 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12021 struct tg3
*tp
= netdev_priv(dev
);
12022 struct device
*dp
= &tp
->pdev
->dev
;
12024 if (wol
->wolopts
& ~WAKE_MAGIC
)
12026 if ((wol
->wolopts
& WAKE_MAGIC
) &&
12027 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
12030 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
12032 spin_lock_bh(&tp
->lock
);
12033 if (device_may_wakeup(dp
))
12034 tg3_flag_set(tp
, WOL_ENABLE
);
12036 tg3_flag_clear(tp
, WOL_ENABLE
);
12037 spin_unlock_bh(&tp
->lock
);
12042 static u32
tg3_get_msglevel(struct net_device
*dev
)
12044 struct tg3
*tp
= netdev_priv(dev
);
12045 return tp
->msg_enable
;
12048 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
12050 struct tg3
*tp
= netdev_priv(dev
);
12051 tp
->msg_enable
= value
;
12054 static int tg3_nway_reset(struct net_device
*dev
)
12056 struct tg3
*tp
= netdev_priv(dev
);
12059 if (!netif_running(dev
))
12062 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12065 tg3_warn_mgmt_link_flap(tp
);
12067 if (tg3_flag(tp
, USE_PHYLIB
)) {
12068 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12070 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
12074 spin_lock_bh(&tp
->lock
);
12076 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12077 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12078 ((bmcr
& BMCR_ANENABLE
) ||
12079 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12080 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12084 spin_unlock_bh(&tp
->lock
);
12090 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12092 struct tg3
*tp
= netdev_priv(dev
);
12094 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12095 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12096 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12098 ering
->rx_jumbo_max_pending
= 0;
12100 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12102 ering
->rx_pending
= tp
->rx_pending
;
12103 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12104 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12106 ering
->rx_jumbo_pending
= 0;
12108 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12111 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12113 struct tg3
*tp
= netdev_priv(dev
);
12114 int i
, irq_sync
= 0, err
= 0;
12116 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12117 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12118 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12119 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12120 (tg3_flag(tp
, TSO_BUG
) &&
12121 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12124 if (netif_running(dev
)) {
12126 tg3_netif_stop(tp
);
12130 tg3_full_lock(tp
, irq_sync
);
12132 tp
->rx_pending
= ering
->rx_pending
;
12134 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12135 tp
->rx_pending
> 63)
12136 tp
->rx_pending
= 63;
12137 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12139 for (i
= 0; i
< tp
->irq_max
; i
++)
12140 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12142 if (netif_running(dev
)) {
12143 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12144 err
= tg3_restart_hw(tp
, false);
12146 tg3_netif_start(tp
);
12149 tg3_full_unlock(tp
);
12151 if (irq_sync
&& !err
)
12157 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12159 struct tg3
*tp
= netdev_priv(dev
);
12161 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12163 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12164 epause
->rx_pause
= 1;
12166 epause
->rx_pause
= 0;
12168 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12169 epause
->tx_pause
= 1;
12171 epause
->tx_pause
= 0;
12174 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12176 struct tg3
*tp
= netdev_priv(dev
);
12179 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12180 tg3_warn_mgmt_link_flap(tp
);
12182 if (tg3_flag(tp
, USE_PHYLIB
)) {
12184 struct phy_device
*phydev
;
12186 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
12188 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
12189 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
12190 (epause
->rx_pause
!= epause
->tx_pause
)))
12193 tp
->link_config
.flowctrl
= 0;
12194 if (epause
->rx_pause
) {
12195 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12197 if (epause
->tx_pause
) {
12198 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12199 newadv
= ADVERTISED_Pause
;
12201 newadv
= ADVERTISED_Pause
|
12202 ADVERTISED_Asym_Pause
;
12203 } else if (epause
->tx_pause
) {
12204 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12205 newadv
= ADVERTISED_Asym_Pause
;
12209 if (epause
->autoneg
)
12210 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12212 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12214 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12215 u32 oldadv
= phydev
->advertising
&
12216 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
12217 if (oldadv
!= newadv
) {
12218 phydev
->advertising
&=
12219 ~(ADVERTISED_Pause
|
12220 ADVERTISED_Asym_Pause
);
12221 phydev
->advertising
|= newadv
;
12222 if (phydev
->autoneg
) {
12224 * Always renegotiate the link to
12225 * inform our link partner of our
12226 * flow control settings, even if the
12227 * flow control is forced. Let
12228 * tg3_adjust_link() do the final
12229 * flow control setup.
12231 return phy_start_aneg(phydev
);
12235 if (!epause
->autoneg
)
12236 tg3_setup_flow_control(tp
, 0, 0);
12238 tp
->link_config
.advertising
&=
12239 ~(ADVERTISED_Pause
|
12240 ADVERTISED_Asym_Pause
);
12241 tp
->link_config
.advertising
|= newadv
;
12246 if (netif_running(dev
)) {
12247 tg3_netif_stop(tp
);
12251 tg3_full_lock(tp
, irq_sync
);
12253 if (epause
->autoneg
)
12254 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12256 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12257 if (epause
->rx_pause
)
12258 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12260 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12261 if (epause
->tx_pause
)
12262 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12264 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12266 if (netif_running(dev
)) {
12267 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12268 err
= tg3_restart_hw(tp
, false);
12270 tg3_netif_start(tp
);
12273 tg3_full_unlock(tp
);
12276 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12281 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12285 return TG3_NUM_TEST
;
12287 return TG3_NUM_STATS
;
12289 return -EOPNOTSUPP
;
12293 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12294 u32
*rules __always_unused
)
12296 struct tg3
*tp
= netdev_priv(dev
);
12298 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12299 return -EOPNOTSUPP
;
12301 switch (info
->cmd
) {
12302 case ETHTOOL_GRXRINGS
:
12303 if (netif_running(tp
->dev
))
12304 info
->data
= tp
->rxq_cnt
;
12306 info
->data
= num_online_cpus();
12307 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12308 info
->data
= TG3_RSS_MAX_NUM_QS
;
12311 /* The first interrupt vector only
12312 * handles link interrupts.
12318 return -EOPNOTSUPP
;
12322 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12325 struct tg3
*tp
= netdev_priv(dev
);
12327 if (tg3_flag(tp
, SUPPORT_MSIX
))
12328 size
= TG3_RSS_INDIR_TBL_SIZE
;
12333 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
12335 struct tg3
*tp
= netdev_priv(dev
);
12338 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12339 indir
[i
] = tp
->rss_ind_tbl
[i
];
12344 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
12346 struct tg3
*tp
= netdev_priv(dev
);
12349 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12350 tp
->rss_ind_tbl
[i
] = indir
[i
];
12352 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12355 /* It is legal to write the indirection
12356 * table while the device is running.
12358 tg3_full_lock(tp
, 0);
12359 tg3_rss_write_indir_tbl(tp
);
12360 tg3_full_unlock(tp
);
12365 static void tg3_get_channels(struct net_device
*dev
,
12366 struct ethtool_channels
*channel
)
12368 struct tg3
*tp
= netdev_priv(dev
);
12369 u32 deflt_qs
= netif_get_num_default_rss_queues();
12371 channel
->max_rx
= tp
->rxq_max
;
12372 channel
->max_tx
= tp
->txq_max
;
12374 if (netif_running(dev
)) {
12375 channel
->rx_count
= tp
->rxq_cnt
;
12376 channel
->tx_count
= tp
->txq_cnt
;
12379 channel
->rx_count
= tp
->rxq_req
;
12381 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12384 channel
->tx_count
= tp
->txq_req
;
12386 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12390 static int tg3_set_channels(struct net_device
*dev
,
12391 struct ethtool_channels
*channel
)
12393 struct tg3
*tp
= netdev_priv(dev
);
12395 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12396 return -EOPNOTSUPP
;
12398 if (channel
->rx_count
> tp
->rxq_max
||
12399 channel
->tx_count
> tp
->txq_max
)
12402 tp
->rxq_req
= channel
->rx_count
;
12403 tp
->txq_req
= channel
->tx_count
;
12405 if (!netif_running(dev
))
12410 tg3_carrier_off(tp
);
12412 tg3_start(tp
, true, false, false);
12417 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12419 switch (stringset
) {
12421 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12424 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12427 WARN_ON(1); /* we need a WARN() */
12432 static int tg3_set_phys_id(struct net_device
*dev
,
12433 enum ethtool_phys_id_state state
)
12435 struct tg3
*tp
= netdev_priv(dev
);
12437 if (!netif_running(tp
->dev
))
12441 case ETHTOOL_ID_ACTIVE
:
12442 return 1; /* cycle on/off once per second */
12444 case ETHTOOL_ID_ON
:
12445 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12446 LED_CTRL_1000MBPS_ON
|
12447 LED_CTRL_100MBPS_ON
|
12448 LED_CTRL_10MBPS_ON
|
12449 LED_CTRL_TRAFFIC_OVERRIDE
|
12450 LED_CTRL_TRAFFIC_BLINK
|
12451 LED_CTRL_TRAFFIC_LED
);
12454 case ETHTOOL_ID_OFF
:
12455 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12456 LED_CTRL_TRAFFIC_OVERRIDE
);
12459 case ETHTOOL_ID_INACTIVE
:
12460 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12467 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12468 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12470 struct tg3
*tp
= netdev_priv(dev
);
12473 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12475 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12478 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12482 u32 offset
= 0, len
= 0;
12485 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12488 if (magic
== TG3_EEPROM_MAGIC
) {
12489 for (offset
= TG3_NVM_DIR_START
;
12490 offset
< TG3_NVM_DIR_END
;
12491 offset
+= TG3_NVM_DIRENT_SIZE
) {
12492 if (tg3_nvram_read(tp
, offset
, &val
))
12495 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12496 TG3_NVM_DIRTYPE_EXTVPD
)
12500 if (offset
!= TG3_NVM_DIR_END
) {
12501 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12502 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12505 offset
= tg3_nvram_logical_addr(tp
, offset
);
12509 if (!offset
|| !len
) {
12510 offset
= TG3_NVM_VPD_OFF
;
12511 len
= TG3_NVM_VPD_LEN
;
12514 buf
= kmalloc(len
, GFP_KERNEL
);
12518 if (magic
== TG3_EEPROM_MAGIC
) {
12519 for (i
= 0; i
< len
; i
+= 4) {
12520 /* The data is in little-endian format in NVRAM.
12521 * Use the big-endian read routines to preserve
12522 * the byte order as it exists in NVRAM.
12524 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12530 unsigned int pos
= 0;
12532 ptr
= (u8
*)&buf
[0];
12533 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12534 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12536 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12554 #define NVRAM_TEST_SIZE 0x100
12555 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12556 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12557 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12558 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12559 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12560 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12561 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12562 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12564 static int tg3_test_nvram(struct tg3
*tp
)
12566 u32 csum
, magic
, len
;
12568 int i
, j
, k
, err
= 0, size
;
12570 if (tg3_flag(tp
, NO_NVRAM
))
12573 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12576 if (magic
== TG3_EEPROM_MAGIC
)
12577 size
= NVRAM_TEST_SIZE
;
12578 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12579 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12580 TG3_EEPROM_SB_FORMAT_1
) {
12581 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12582 case TG3_EEPROM_SB_REVISION_0
:
12583 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12585 case TG3_EEPROM_SB_REVISION_2
:
12586 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12588 case TG3_EEPROM_SB_REVISION_3
:
12589 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12591 case TG3_EEPROM_SB_REVISION_4
:
12592 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12594 case TG3_EEPROM_SB_REVISION_5
:
12595 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12597 case TG3_EEPROM_SB_REVISION_6
:
12598 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12605 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12606 size
= NVRAM_SELFBOOT_HW_SIZE
;
12610 buf
= kmalloc(size
, GFP_KERNEL
);
12615 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12616 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12623 /* Selfboot format */
12624 magic
= be32_to_cpu(buf
[0]);
12625 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12626 TG3_EEPROM_MAGIC_FW
) {
12627 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12629 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12630 TG3_EEPROM_SB_REVISION_2
) {
12631 /* For rev 2, the csum doesn't include the MBA. */
12632 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12634 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12637 for (i
= 0; i
< size
; i
++)
12650 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12651 TG3_EEPROM_MAGIC_HW
) {
12652 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12653 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12654 u8
*buf8
= (u8
*) buf
;
12656 /* Separate the parity bits and the data bytes. */
12657 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12658 if ((i
== 0) || (i
== 8)) {
12662 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12663 parity
[k
++] = buf8
[i
] & msk
;
12665 } else if (i
== 16) {
12669 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12670 parity
[k
++] = buf8
[i
] & msk
;
12673 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12674 parity
[k
++] = buf8
[i
] & msk
;
12677 data
[j
++] = buf8
[i
];
12681 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12682 u8 hw8
= hweight8(data
[i
]);
12684 if ((hw8
& 0x1) && parity
[i
])
12686 else if (!(hw8
& 0x1) && !parity
[i
])
12695 /* Bootstrap checksum at offset 0x10 */
12696 csum
= calc_crc((unsigned char *) buf
, 0x10);
12697 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12700 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12701 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12702 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12707 buf
= tg3_vpd_readblock(tp
, &len
);
12711 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12713 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12717 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12720 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12721 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12722 PCI_VPD_RO_KEYWORD_CHKSUM
);
12726 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12728 for (i
= 0; i
<= j
; i
++)
12729 csum8
+= ((u8
*)buf
)[i
];
12743 #define TG3_SERDES_TIMEOUT_SEC 2
12744 #define TG3_COPPER_TIMEOUT_SEC 6
12746 static int tg3_test_link(struct tg3
*tp
)
12750 if (!netif_running(tp
->dev
))
12753 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12754 max
= TG3_SERDES_TIMEOUT_SEC
;
12756 max
= TG3_COPPER_TIMEOUT_SEC
;
12758 for (i
= 0; i
< max
; i
++) {
12762 if (msleep_interruptible(1000))
12769 /* Only test the commonly used registers */
12770 static int tg3_test_registers(struct tg3
*tp
)
12772 int i
, is_5705
, is_5750
;
12773 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12777 #define TG3_FL_5705 0x1
12778 #define TG3_FL_NOT_5705 0x2
12779 #define TG3_FL_NOT_5788 0x4
12780 #define TG3_FL_NOT_5750 0x8
12784 /* MAC Control Registers */
12785 { MAC_MODE
, TG3_FL_NOT_5705
,
12786 0x00000000, 0x00ef6f8c },
12787 { MAC_MODE
, TG3_FL_5705
,
12788 0x00000000, 0x01ef6b8c },
12789 { MAC_STATUS
, TG3_FL_NOT_5705
,
12790 0x03800107, 0x00000000 },
12791 { MAC_STATUS
, TG3_FL_5705
,
12792 0x03800100, 0x00000000 },
12793 { MAC_ADDR_0_HIGH
, 0x0000,
12794 0x00000000, 0x0000ffff },
12795 { MAC_ADDR_0_LOW
, 0x0000,
12796 0x00000000, 0xffffffff },
12797 { MAC_RX_MTU_SIZE
, 0x0000,
12798 0x00000000, 0x0000ffff },
12799 { MAC_TX_MODE
, 0x0000,
12800 0x00000000, 0x00000070 },
12801 { MAC_TX_LENGTHS
, 0x0000,
12802 0x00000000, 0x00003fff },
12803 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12804 0x00000000, 0x000007fc },
12805 { MAC_RX_MODE
, TG3_FL_5705
,
12806 0x00000000, 0x000007dc },
12807 { MAC_HASH_REG_0
, 0x0000,
12808 0x00000000, 0xffffffff },
12809 { MAC_HASH_REG_1
, 0x0000,
12810 0x00000000, 0xffffffff },
12811 { MAC_HASH_REG_2
, 0x0000,
12812 0x00000000, 0xffffffff },
12813 { MAC_HASH_REG_3
, 0x0000,
12814 0x00000000, 0xffffffff },
12816 /* Receive Data and Receive BD Initiator Control Registers. */
12817 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12818 0x00000000, 0xffffffff },
12819 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12820 0x00000000, 0xffffffff },
12821 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12822 0x00000000, 0x00000003 },
12823 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12824 0x00000000, 0xffffffff },
12825 { RCVDBDI_STD_BD
+0, 0x0000,
12826 0x00000000, 0xffffffff },
12827 { RCVDBDI_STD_BD
+4, 0x0000,
12828 0x00000000, 0xffffffff },
12829 { RCVDBDI_STD_BD
+8, 0x0000,
12830 0x00000000, 0xffff0002 },
12831 { RCVDBDI_STD_BD
+0xc, 0x0000,
12832 0x00000000, 0xffffffff },
12834 /* Receive BD Initiator Control Registers. */
12835 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12836 0x00000000, 0xffffffff },
12837 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12838 0x00000000, 0x000003ff },
12839 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12840 0x00000000, 0xffffffff },
12842 /* Host Coalescing Control Registers. */
12843 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12844 0x00000000, 0x00000004 },
12845 { HOSTCC_MODE
, TG3_FL_5705
,
12846 0x00000000, 0x000000f6 },
12847 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12848 0x00000000, 0xffffffff },
12849 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12850 0x00000000, 0x000003ff },
12851 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12852 0x00000000, 0xffffffff },
12853 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12854 0x00000000, 0x000003ff },
12855 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12856 0x00000000, 0xffffffff },
12857 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12858 0x00000000, 0x000000ff },
12859 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12860 0x00000000, 0xffffffff },
12861 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12862 0x00000000, 0x000000ff },
12863 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12864 0x00000000, 0xffffffff },
12865 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12866 0x00000000, 0xffffffff },
12867 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12868 0x00000000, 0xffffffff },
12869 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12870 0x00000000, 0x000000ff },
12871 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12872 0x00000000, 0xffffffff },
12873 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12874 0x00000000, 0x000000ff },
12875 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12876 0x00000000, 0xffffffff },
12877 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12878 0x00000000, 0xffffffff },
12879 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12880 0x00000000, 0xffffffff },
12881 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12882 0x00000000, 0xffffffff },
12883 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12884 0x00000000, 0xffffffff },
12885 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12886 0xffffffff, 0x00000000 },
12887 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12888 0xffffffff, 0x00000000 },
12890 /* Buffer Manager Control Registers. */
12891 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12892 0x00000000, 0x007fff80 },
12893 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12894 0x00000000, 0x007fffff },
12895 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12896 0x00000000, 0x0000003f },
12897 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12898 0x00000000, 0x000001ff },
12899 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12900 0x00000000, 0x000001ff },
12901 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12902 0xffffffff, 0x00000000 },
12903 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12904 0xffffffff, 0x00000000 },
12906 /* Mailbox Registers */
12907 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12908 0x00000000, 0x000001ff },
12909 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12910 0x00000000, 0x000001ff },
12911 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12912 0x00000000, 0x000007ff },
12913 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12914 0x00000000, 0x000001ff },
12916 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12919 is_5705
= is_5750
= 0;
12920 if (tg3_flag(tp
, 5705_PLUS
)) {
12922 if (tg3_flag(tp
, 5750_PLUS
))
12926 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
12927 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
12930 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
12933 if (tg3_flag(tp
, IS_5788
) &&
12934 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
12937 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
12940 offset
= (u32
) reg_tbl
[i
].offset
;
12941 read_mask
= reg_tbl
[i
].read_mask
;
12942 write_mask
= reg_tbl
[i
].write_mask
;
12944 /* Save the original register content */
12945 save_val
= tr32(offset
);
12947 /* Determine the read-only value. */
12948 read_val
= save_val
& read_mask
;
12950 /* Write zero to the register, then make sure the read-only bits
12951 * are not changed and the read/write bits are all zeros.
12955 val
= tr32(offset
);
12957 /* Test the read-only and read/write bits. */
12958 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
12961 /* Write ones to all the bits defined by RdMask and WrMask, then
12962 * make sure the read-only bits are not changed and the
12963 * read/write bits are all ones.
12965 tw32(offset
, read_mask
| write_mask
);
12967 val
= tr32(offset
);
12969 /* Test the read-only bits. */
12970 if ((val
& read_mask
) != read_val
)
12973 /* Test the read/write bits. */
12974 if ((val
& write_mask
) != write_mask
)
12977 tw32(offset
, save_val
);
12983 if (netif_msg_hw(tp
))
12984 netdev_err(tp
->dev
,
12985 "Register test failed at offset %x\n", offset
);
12986 tw32(offset
, save_val
);
12990 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
12992 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12996 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
12997 for (j
= 0; j
< len
; j
+= 4) {
13000 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
13001 tg3_read_mem(tp
, offset
+ j
, &val
);
13002 if (val
!= test_pattern
[i
])
13009 static int tg3_test_memory(struct tg3
*tp
)
13011 static struct mem_entry
{
13014 } mem_tbl_570x
[] = {
13015 { 0x00000000, 0x00b50},
13016 { 0x00002000, 0x1c000},
13017 { 0xffffffff, 0x00000}
13018 }, mem_tbl_5705
[] = {
13019 { 0x00000100, 0x0000c},
13020 { 0x00000200, 0x00008},
13021 { 0x00004000, 0x00800},
13022 { 0x00006000, 0x01000},
13023 { 0x00008000, 0x02000},
13024 { 0x00010000, 0x0e000},
13025 { 0xffffffff, 0x00000}
13026 }, mem_tbl_5755
[] = {
13027 { 0x00000200, 0x00008},
13028 { 0x00004000, 0x00800},
13029 { 0x00006000, 0x00800},
13030 { 0x00008000, 0x02000},
13031 { 0x00010000, 0x0c000},
13032 { 0xffffffff, 0x00000}
13033 }, mem_tbl_5906
[] = {
13034 { 0x00000200, 0x00008},
13035 { 0x00004000, 0x00400},
13036 { 0x00006000, 0x00400},
13037 { 0x00008000, 0x01000},
13038 { 0x00010000, 0x01000},
13039 { 0xffffffff, 0x00000}
13040 }, mem_tbl_5717
[] = {
13041 { 0x00000200, 0x00008},
13042 { 0x00010000, 0x0a000},
13043 { 0x00020000, 0x13c00},
13044 { 0xffffffff, 0x00000}
13045 }, mem_tbl_57765
[] = {
13046 { 0x00000200, 0x00008},
13047 { 0x00004000, 0x00800},
13048 { 0x00006000, 0x09800},
13049 { 0x00010000, 0x0a000},
13050 { 0xffffffff, 0x00000}
13052 struct mem_entry
*mem_tbl
;
13056 if (tg3_flag(tp
, 5717_PLUS
))
13057 mem_tbl
= mem_tbl_5717
;
13058 else if (tg3_flag(tp
, 57765_CLASS
) ||
13059 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13060 mem_tbl
= mem_tbl_57765
;
13061 else if (tg3_flag(tp
, 5755_PLUS
))
13062 mem_tbl
= mem_tbl_5755
;
13063 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13064 mem_tbl
= mem_tbl_5906
;
13065 else if (tg3_flag(tp
, 5705_PLUS
))
13066 mem_tbl
= mem_tbl_5705
;
13068 mem_tbl
= mem_tbl_570x
;
13070 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13071 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13079 #define TG3_TSO_MSS 500
13081 #define TG3_TSO_IP_HDR_LEN 20
13082 #define TG3_TSO_TCP_HDR_LEN 20
13083 #define TG3_TSO_TCP_OPT_LEN 12
13085 static const u8 tg3_tso_header
[] = {
13087 0x45, 0x00, 0x00, 0x00,
13088 0x00, 0x00, 0x40, 0x00,
13089 0x40, 0x06, 0x00, 0x00,
13090 0x0a, 0x00, 0x00, 0x01,
13091 0x0a, 0x00, 0x00, 0x02,
13092 0x0d, 0x00, 0xe0, 0x00,
13093 0x00, 0x00, 0x01, 0x00,
13094 0x00, 0x00, 0x02, 0x00,
13095 0x80, 0x10, 0x10, 0x00,
13096 0x14, 0x09, 0x00, 0x00,
13097 0x01, 0x01, 0x08, 0x0a,
13098 0x11, 0x11, 0x11, 0x11,
13099 0x11, 0x11, 0x11, 0x11,
13102 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13104 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13105 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13107 struct sk_buff
*skb
;
13108 u8
*tx_data
, *rx_data
;
13110 int num_pkts
, tx_len
, rx_len
, i
, err
;
13111 struct tg3_rx_buffer_desc
*desc
;
13112 struct tg3_napi
*tnapi
, *rnapi
;
13113 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13115 tnapi
= &tp
->napi
[0];
13116 rnapi
= &tp
->napi
[0];
13117 if (tp
->irq_cnt
> 1) {
13118 if (tg3_flag(tp
, ENABLE_RSS
))
13119 rnapi
= &tp
->napi
[1];
13120 if (tg3_flag(tp
, ENABLE_TSS
))
13121 tnapi
= &tp
->napi
[1];
13123 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13128 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13132 tx_data
= skb_put(skb
, tx_len
);
13133 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
13134 memset(tx_data
+ 6, 0x0, 8);
13136 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13138 if (tso_loopback
) {
13139 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13141 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13142 TG3_TSO_TCP_OPT_LEN
;
13144 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13145 sizeof(tg3_tso_header
));
13148 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13149 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13151 /* Set the total length field in the IP header */
13152 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13154 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13155 TXD_FLAG_CPU_POST_DMA
);
13157 if (tg3_flag(tp
, HW_TSO_1
) ||
13158 tg3_flag(tp
, HW_TSO_2
) ||
13159 tg3_flag(tp
, HW_TSO_3
)) {
13161 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13162 th
= (struct tcphdr
*)&tx_data
[val
];
13165 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13167 if (tg3_flag(tp
, HW_TSO_3
)) {
13168 mss
|= (hdr_len
& 0xc) << 12;
13169 if (hdr_len
& 0x10)
13170 base_flags
|= 0x00000010;
13171 base_flags
|= (hdr_len
& 0x3e0) << 5;
13172 } else if (tg3_flag(tp
, HW_TSO_2
))
13173 mss
|= hdr_len
<< 9;
13174 else if (tg3_flag(tp
, HW_TSO_1
) ||
13175 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13176 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13178 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13181 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13184 data_off
= ETH_HLEN
;
13186 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13187 tx_len
> VLAN_ETH_FRAME_LEN
)
13188 base_flags
|= TXD_FLAG_JMB_PKT
;
13191 for (i
= data_off
; i
< tx_len
; i
++)
13192 tx_data
[i
] = (u8
) (i
& 0xff);
13194 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13195 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13196 dev_kfree_skb(skb
);
13200 val
= tnapi
->tx_prod
;
13201 tnapi
->tx_buffers
[val
].skb
= skb
;
13202 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13204 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13209 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13211 budget
= tg3_tx_avail(tnapi
);
13212 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13213 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13214 tnapi
->tx_buffers
[val
].skb
= NULL
;
13215 dev_kfree_skb(skb
);
13221 /* Sync BD data before updating mailbox */
13224 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13225 tr32_mailbox(tnapi
->prodmbox
);
13229 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13230 for (i
= 0; i
< 35; i
++) {
13231 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13236 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13237 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13238 if ((tx_idx
== tnapi
->tx_prod
) &&
13239 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13243 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13244 dev_kfree_skb(skb
);
13246 if (tx_idx
!= tnapi
->tx_prod
)
13249 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13253 while (rx_idx
!= rx_start_idx
) {
13254 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13255 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13256 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13258 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13259 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13262 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13265 if (!tso_loopback
) {
13266 if (rx_len
!= tx_len
)
13269 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13270 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13273 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13276 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13277 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13278 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13282 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13283 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13284 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13286 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13287 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13288 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13293 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13294 PCI_DMA_FROMDEVICE
);
13296 rx_data
+= TG3_RX_OFFSET(tp
);
13297 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13298 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13305 /* tg3_free_rings will unmap and free the rx_data */
13310 #define TG3_STD_LOOPBACK_FAILED 1
13311 #define TG3_JMB_LOOPBACK_FAILED 2
13312 #define TG3_TSO_LOOPBACK_FAILED 4
13313 #define TG3_LOOPBACK_FAILED \
13314 (TG3_STD_LOOPBACK_FAILED | \
13315 TG3_JMB_LOOPBACK_FAILED | \
13316 TG3_TSO_LOOPBACK_FAILED)
13318 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13322 u32 jmb_pkt_sz
= 9000;
13325 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13327 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13328 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13330 if (!netif_running(tp
->dev
)) {
13331 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13332 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13334 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13338 err
= tg3_reset_hw(tp
, true);
13340 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13341 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13343 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13347 if (tg3_flag(tp
, ENABLE_RSS
)) {
13350 /* Reroute all rx packets to the 1st queue */
13351 for (i
= MAC_RSS_INDIR_TBL_0
;
13352 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13356 /* HW errata - mac loopback fails in some cases on 5780.
13357 * Normal traffic and PHY loopback are not affected by
13358 * errata. Also, the MAC loopback test is deprecated for
13359 * all newer ASIC revisions.
13361 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13362 !tg3_flag(tp
, CPMU_PRESENT
)) {
13363 tg3_mac_loopback(tp
, true);
13365 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13366 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13368 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13369 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13370 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13372 tg3_mac_loopback(tp
, false);
13375 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13376 !tg3_flag(tp
, USE_PHYLIB
)) {
13379 tg3_phy_lpbk_set(tp
, 0, false);
13381 /* Wait for link */
13382 for (i
= 0; i
< 100; i
++) {
13383 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13388 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13389 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13390 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13391 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13392 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13393 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13394 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13395 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13398 tg3_phy_lpbk_set(tp
, 0, true);
13400 /* All link indications report up, but the hardware
13401 * isn't really ready for about 20 msec. Double it
13406 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13407 data
[TG3_EXT_LOOPB_TEST
] |=
13408 TG3_STD_LOOPBACK_FAILED
;
13409 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13410 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13411 data
[TG3_EXT_LOOPB_TEST
] |=
13412 TG3_TSO_LOOPBACK_FAILED
;
13413 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13414 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13415 data
[TG3_EXT_LOOPB_TEST
] |=
13416 TG3_JMB_LOOPBACK_FAILED
;
13419 /* Re-enable gphy autopowerdown. */
13420 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13421 tg3_phy_toggle_apd(tp
, true);
13424 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13425 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13428 tp
->phy_flags
|= eee_cap
;
13433 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13436 struct tg3
*tp
= netdev_priv(dev
);
13437 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13439 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
13440 if (tg3_power_up(tp
)) {
13441 etest
->flags
|= ETH_TEST_FL_FAILED
;
13442 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13445 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
13448 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13450 if (tg3_test_nvram(tp
) != 0) {
13451 etest
->flags
|= ETH_TEST_FL_FAILED
;
13452 data
[TG3_NVRAM_TEST
] = 1;
13454 if (!doextlpbk
&& tg3_test_link(tp
)) {
13455 etest
->flags
|= ETH_TEST_FL_FAILED
;
13456 data
[TG3_LINK_TEST
] = 1;
13458 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13459 int err
, err2
= 0, irq_sync
= 0;
13461 if (netif_running(dev
)) {
13463 tg3_netif_stop(tp
);
13467 tg3_full_lock(tp
, irq_sync
);
13468 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13469 err
= tg3_nvram_lock(tp
);
13470 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13471 if (!tg3_flag(tp
, 5705_PLUS
))
13472 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13474 tg3_nvram_unlock(tp
);
13476 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13479 if (tg3_test_registers(tp
) != 0) {
13480 etest
->flags
|= ETH_TEST_FL_FAILED
;
13481 data
[TG3_REGISTER_TEST
] = 1;
13484 if (tg3_test_memory(tp
) != 0) {
13485 etest
->flags
|= ETH_TEST_FL_FAILED
;
13486 data
[TG3_MEMORY_TEST
] = 1;
13490 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13492 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13493 etest
->flags
|= ETH_TEST_FL_FAILED
;
13495 tg3_full_unlock(tp
);
13497 if (tg3_test_interrupt(tp
) != 0) {
13498 etest
->flags
|= ETH_TEST_FL_FAILED
;
13499 data
[TG3_INTERRUPT_TEST
] = 1;
13502 tg3_full_lock(tp
, 0);
13504 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13505 if (netif_running(dev
)) {
13506 tg3_flag_set(tp
, INIT_COMPLETE
);
13507 err2
= tg3_restart_hw(tp
, true);
13509 tg3_netif_start(tp
);
13512 tg3_full_unlock(tp
);
13514 if (irq_sync
&& !err2
)
13517 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13518 tg3_power_down(tp
);
13522 static int tg3_hwtstamp_ioctl(struct net_device
*dev
,
13523 struct ifreq
*ifr
, int cmd
)
13525 struct tg3
*tp
= netdev_priv(dev
);
13526 struct hwtstamp_config stmpconf
;
13528 if (!tg3_flag(tp
, PTP_CAPABLE
))
13531 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13534 if (stmpconf
.flags
)
13537 switch (stmpconf
.tx_type
) {
13538 case HWTSTAMP_TX_ON
:
13539 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13541 case HWTSTAMP_TX_OFF
:
13542 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13548 switch (stmpconf
.rx_filter
) {
13549 case HWTSTAMP_FILTER_NONE
:
13552 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13553 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13554 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13556 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13557 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13558 TG3_RX_PTP_CTL_SYNC_EVNT
;
13560 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13561 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13562 TG3_RX_PTP_CTL_DELAY_REQ
;
13564 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13565 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13566 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13568 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13569 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13570 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13572 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13573 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13574 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13576 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13577 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13578 TG3_RX_PTP_CTL_SYNC_EVNT
;
13580 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13581 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13582 TG3_RX_PTP_CTL_SYNC_EVNT
;
13584 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13585 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13586 TG3_RX_PTP_CTL_SYNC_EVNT
;
13588 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13589 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13590 TG3_RX_PTP_CTL_DELAY_REQ
;
13592 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13593 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13594 TG3_RX_PTP_CTL_DELAY_REQ
;
13596 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13597 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13598 TG3_RX_PTP_CTL_DELAY_REQ
;
13604 if (netif_running(dev
) && tp
->rxptpctl
)
13605 tw32(TG3_RX_PTP_CTL
,
13606 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13608 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13612 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13614 struct mii_ioctl_data
*data
= if_mii(ifr
);
13615 struct tg3
*tp
= netdev_priv(dev
);
13618 if (tg3_flag(tp
, USE_PHYLIB
)) {
13619 struct phy_device
*phydev
;
13620 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13622 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
13623 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13628 data
->phy_id
= tp
->phy_addr
;
13631 case SIOCGMIIREG
: {
13634 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13635 break; /* We have no PHY */
13637 if (!netif_running(dev
))
13640 spin_lock_bh(&tp
->lock
);
13641 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13642 data
->reg_num
& 0x1f, &mii_regval
);
13643 spin_unlock_bh(&tp
->lock
);
13645 data
->val_out
= mii_regval
;
13651 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13652 break; /* We have no PHY */
13654 if (!netif_running(dev
))
13657 spin_lock_bh(&tp
->lock
);
13658 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13659 data
->reg_num
& 0x1f, data
->val_in
);
13660 spin_unlock_bh(&tp
->lock
);
13664 case SIOCSHWTSTAMP
:
13665 return tg3_hwtstamp_ioctl(dev
, ifr
, cmd
);
13671 return -EOPNOTSUPP
;
13674 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13676 struct tg3
*tp
= netdev_priv(dev
);
13678 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13682 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13684 struct tg3
*tp
= netdev_priv(dev
);
13685 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13686 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13688 if (!tg3_flag(tp
, 5705_PLUS
)) {
13689 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13690 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13691 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13692 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13695 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13696 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13697 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13698 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13699 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13700 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13701 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13702 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13703 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13704 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13707 /* No rx interrupts will be generated if both are zero */
13708 if ((ec
->rx_coalesce_usecs
== 0) &&
13709 (ec
->rx_max_coalesced_frames
== 0))
13712 /* No tx interrupts will be generated if both are zero */
13713 if ((ec
->tx_coalesce_usecs
== 0) &&
13714 (ec
->tx_max_coalesced_frames
== 0))
13717 /* Only copy relevant parameters, ignore all others. */
13718 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13719 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13720 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13721 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13722 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13723 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13724 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13725 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13726 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13728 if (netif_running(dev
)) {
13729 tg3_full_lock(tp
, 0);
13730 __tg3_set_coalesce(tp
, &tp
->coal
);
13731 tg3_full_unlock(tp
);
13736 static int tg3_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
13738 struct tg3
*tp
= netdev_priv(dev
);
13740 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
13741 netdev_warn(tp
->dev
, "Board does not support EEE!\n");
13742 return -EOPNOTSUPP
;
13745 if (edata
->advertised
!= tp
->eee
.advertised
) {
13746 netdev_warn(tp
->dev
,
13747 "Direct manipulation of EEE advertisement is not supported\n");
13751 if (edata
->tx_lpi_timer
> TG3_CPMU_DBTMR1_LNKIDLE_MAX
) {
13752 netdev_warn(tp
->dev
,
13753 "Maximal Tx Lpi timer supported is %#x(u)\n",
13754 TG3_CPMU_DBTMR1_LNKIDLE_MAX
);
13760 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
13761 tg3_warn_mgmt_link_flap(tp
);
13763 if (netif_running(tp
->dev
)) {
13764 tg3_full_lock(tp
, 0);
13767 tg3_full_unlock(tp
);
13773 static int tg3_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
13775 struct tg3
*tp
= netdev_priv(dev
);
13777 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
13778 netdev_warn(tp
->dev
,
13779 "Board does not support EEE!\n");
13780 return -EOPNOTSUPP
;
13787 static const struct ethtool_ops tg3_ethtool_ops
= {
13788 .get_settings
= tg3_get_settings
,
13789 .set_settings
= tg3_set_settings
,
13790 .get_drvinfo
= tg3_get_drvinfo
,
13791 .get_regs_len
= tg3_get_regs_len
,
13792 .get_regs
= tg3_get_regs
,
13793 .get_wol
= tg3_get_wol
,
13794 .set_wol
= tg3_set_wol
,
13795 .get_msglevel
= tg3_get_msglevel
,
13796 .set_msglevel
= tg3_set_msglevel
,
13797 .nway_reset
= tg3_nway_reset
,
13798 .get_link
= ethtool_op_get_link
,
13799 .get_eeprom_len
= tg3_get_eeprom_len
,
13800 .get_eeprom
= tg3_get_eeprom
,
13801 .set_eeprom
= tg3_set_eeprom
,
13802 .get_ringparam
= tg3_get_ringparam
,
13803 .set_ringparam
= tg3_set_ringparam
,
13804 .get_pauseparam
= tg3_get_pauseparam
,
13805 .set_pauseparam
= tg3_set_pauseparam
,
13806 .self_test
= tg3_self_test
,
13807 .get_strings
= tg3_get_strings
,
13808 .set_phys_id
= tg3_set_phys_id
,
13809 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13810 .get_coalesce
= tg3_get_coalesce
,
13811 .set_coalesce
= tg3_set_coalesce
,
13812 .get_sset_count
= tg3_get_sset_count
,
13813 .get_rxnfc
= tg3_get_rxnfc
,
13814 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13815 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13816 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13817 .get_channels
= tg3_get_channels
,
13818 .set_channels
= tg3_set_channels
,
13819 .get_ts_info
= tg3_get_ts_info
,
13820 .get_eee
= tg3_get_eee
,
13821 .set_eee
= tg3_set_eee
,
13824 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13825 struct rtnl_link_stats64
*stats
)
13827 struct tg3
*tp
= netdev_priv(dev
);
13829 spin_lock_bh(&tp
->lock
);
13830 if (!tp
->hw_stats
) {
13831 spin_unlock_bh(&tp
->lock
);
13832 return &tp
->net_stats_prev
;
13835 tg3_get_nstats(tp
, stats
);
13836 spin_unlock_bh(&tp
->lock
);
13841 static void tg3_set_rx_mode(struct net_device
*dev
)
13843 struct tg3
*tp
= netdev_priv(dev
);
13845 if (!netif_running(dev
))
13848 tg3_full_lock(tp
, 0);
13849 __tg3_set_rx_mode(dev
);
13850 tg3_full_unlock(tp
);
13853 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13856 dev
->mtu
= new_mtu
;
13858 if (new_mtu
> ETH_DATA_LEN
) {
13859 if (tg3_flag(tp
, 5780_CLASS
)) {
13860 netdev_update_features(dev
);
13861 tg3_flag_clear(tp
, TSO_CAPABLE
);
13863 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13866 if (tg3_flag(tp
, 5780_CLASS
)) {
13867 tg3_flag_set(tp
, TSO_CAPABLE
);
13868 netdev_update_features(dev
);
13870 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
13874 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
13876 struct tg3
*tp
= netdev_priv(dev
);
13878 bool reset_phy
= false;
13880 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
13883 if (!netif_running(dev
)) {
13884 /* We'll just catch it later when the
13887 tg3_set_mtu(dev
, tp
, new_mtu
);
13893 tg3_netif_stop(tp
);
13895 tg3_full_lock(tp
, 1);
13897 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13899 tg3_set_mtu(dev
, tp
, new_mtu
);
13901 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13902 * breaks all requests to 256 bytes.
13904 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
13907 err
= tg3_restart_hw(tp
, reset_phy
);
13910 tg3_netif_start(tp
);
13912 tg3_full_unlock(tp
);
13920 static const struct net_device_ops tg3_netdev_ops
= {
13921 .ndo_open
= tg3_open
,
13922 .ndo_stop
= tg3_close
,
13923 .ndo_start_xmit
= tg3_start_xmit
,
13924 .ndo_get_stats64
= tg3_get_stats64
,
13925 .ndo_validate_addr
= eth_validate_addr
,
13926 .ndo_set_rx_mode
= tg3_set_rx_mode
,
13927 .ndo_set_mac_address
= tg3_set_mac_addr
,
13928 .ndo_do_ioctl
= tg3_ioctl
,
13929 .ndo_tx_timeout
= tg3_tx_timeout
,
13930 .ndo_change_mtu
= tg3_change_mtu
,
13931 .ndo_fix_features
= tg3_fix_features
,
13932 .ndo_set_features
= tg3_set_features
,
13933 #ifdef CONFIG_NET_POLL_CONTROLLER
13934 .ndo_poll_controller
= tg3_poll_controller
,
13938 static void tg3_get_eeprom_size(struct tg3
*tp
)
13940 u32 cursize
, val
, magic
;
13942 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
13944 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
13947 if ((magic
!= TG3_EEPROM_MAGIC
) &&
13948 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
13949 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
13953 * Size the chip by reading offsets at increasing powers of two.
13954 * When we encounter our validation signature, we know the addressing
13955 * has wrapped around, and thus have our chip size.
13959 while (cursize
< tp
->nvram_size
) {
13960 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
13969 tp
->nvram_size
= cursize
;
13972 static void tg3_get_nvram_size(struct tg3
*tp
)
13976 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
13979 /* Selfboot format */
13980 if (val
!= TG3_EEPROM_MAGIC
) {
13981 tg3_get_eeprom_size(tp
);
13985 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
13987 /* This is confusing. We want to operate on the
13988 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13989 * call will read from NVRAM and byteswap the data
13990 * according to the byteswapping settings for all
13991 * other register accesses. This ensures the data we
13992 * want will always reside in the lower 16-bits.
13993 * However, the data in NVRAM is in LE format, which
13994 * means the data from the NVRAM read will always be
13995 * opposite the endianness of the CPU. The 16-bit
13996 * byteswap then brings the data to CPU endianness.
13998 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
14002 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14005 static void tg3_get_nvram_info(struct tg3
*tp
)
14009 nvcfg1
= tr32(NVRAM_CFG1
);
14010 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
14011 tg3_flag_set(tp
, FLASH
);
14013 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14014 tw32(NVRAM_CFG1
, nvcfg1
);
14017 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
14018 tg3_flag(tp
, 5780_CLASS
)) {
14019 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
14020 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
14021 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14022 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14023 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14025 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
14026 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14027 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
14029 case FLASH_VENDOR_ATMEL_EEPROM
:
14030 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14031 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14032 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14034 case FLASH_VENDOR_ST
:
14035 tp
->nvram_jedecnum
= JEDEC_ST
;
14036 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
14037 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14039 case FLASH_VENDOR_SAIFUN
:
14040 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
14041 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
14043 case FLASH_VENDOR_SST_SMALL
:
14044 case FLASH_VENDOR_SST_LARGE
:
14045 tp
->nvram_jedecnum
= JEDEC_SST
;
14046 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
14050 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14051 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14052 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14056 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
14058 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
14059 case FLASH_5752PAGE_SIZE_256
:
14060 tp
->nvram_pagesize
= 256;
14062 case FLASH_5752PAGE_SIZE_512
:
14063 tp
->nvram_pagesize
= 512;
14065 case FLASH_5752PAGE_SIZE_1K
:
14066 tp
->nvram_pagesize
= 1024;
14068 case FLASH_5752PAGE_SIZE_2K
:
14069 tp
->nvram_pagesize
= 2048;
14071 case FLASH_5752PAGE_SIZE_4K
:
14072 tp
->nvram_pagesize
= 4096;
14074 case FLASH_5752PAGE_SIZE_264
:
14075 tp
->nvram_pagesize
= 264;
14077 case FLASH_5752PAGE_SIZE_528
:
14078 tp
->nvram_pagesize
= 528;
14083 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
14087 nvcfg1
= tr32(NVRAM_CFG1
);
14089 /* NVRAM protection for TPM */
14090 if (nvcfg1
& (1 << 27))
14091 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14093 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14094 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
14095 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
14096 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14097 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14099 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14100 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14101 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14102 tg3_flag_set(tp
, FLASH
);
14104 case FLASH_5752VENDOR_ST_M45PE10
:
14105 case FLASH_5752VENDOR_ST_M45PE20
:
14106 case FLASH_5752VENDOR_ST_M45PE40
:
14107 tp
->nvram_jedecnum
= JEDEC_ST
;
14108 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14109 tg3_flag_set(tp
, FLASH
);
14113 if (tg3_flag(tp
, FLASH
)) {
14114 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14116 /* For eeprom, set pagesize to maximum eeprom size */
14117 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14119 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14120 tw32(NVRAM_CFG1
, nvcfg1
);
14124 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14126 u32 nvcfg1
, protect
= 0;
14128 nvcfg1
= tr32(NVRAM_CFG1
);
14130 /* NVRAM protection for TPM */
14131 if (nvcfg1
& (1 << 27)) {
14132 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14136 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14138 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14139 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14140 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14141 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14142 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14143 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14144 tg3_flag_set(tp
, FLASH
);
14145 tp
->nvram_pagesize
= 264;
14146 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14147 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14148 tp
->nvram_size
= (protect
? 0x3e200 :
14149 TG3_NVRAM_SIZE_512KB
);
14150 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14151 tp
->nvram_size
= (protect
? 0x1f200 :
14152 TG3_NVRAM_SIZE_256KB
);
14154 tp
->nvram_size
= (protect
? 0x1f200 :
14155 TG3_NVRAM_SIZE_128KB
);
14157 case FLASH_5752VENDOR_ST_M45PE10
:
14158 case FLASH_5752VENDOR_ST_M45PE20
:
14159 case FLASH_5752VENDOR_ST_M45PE40
:
14160 tp
->nvram_jedecnum
= JEDEC_ST
;
14161 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14162 tg3_flag_set(tp
, FLASH
);
14163 tp
->nvram_pagesize
= 256;
14164 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14165 tp
->nvram_size
= (protect
?
14166 TG3_NVRAM_SIZE_64KB
:
14167 TG3_NVRAM_SIZE_128KB
);
14168 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14169 tp
->nvram_size
= (protect
?
14170 TG3_NVRAM_SIZE_64KB
:
14171 TG3_NVRAM_SIZE_256KB
);
14173 tp
->nvram_size
= (protect
?
14174 TG3_NVRAM_SIZE_128KB
:
14175 TG3_NVRAM_SIZE_512KB
);
14180 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14184 nvcfg1
= tr32(NVRAM_CFG1
);
14186 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14187 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14188 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14189 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14190 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14191 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14192 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14193 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14195 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14196 tw32(NVRAM_CFG1
, nvcfg1
);
14198 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14199 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14200 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14201 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14202 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14203 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14204 tg3_flag_set(tp
, FLASH
);
14205 tp
->nvram_pagesize
= 264;
14207 case FLASH_5752VENDOR_ST_M45PE10
:
14208 case FLASH_5752VENDOR_ST_M45PE20
:
14209 case FLASH_5752VENDOR_ST_M45PE40
:
14210 tp
->nvram_jedecnum
= JEDEC_ST
;
14211 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14212 tg3_flag_set(tp
, FLASH
);
14213 tp
->nvram_pagesize
= 256;
14218 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14220 u32 nvcfg1
, protect
= 0;
14222 nvcfg1
= tr32(NVRAM_CFG1
);
14224 /* NVRAM protection for TPM */
14225 if (nvcfg1
& (1 << 27)) {
14226 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14230 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14232 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14233 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14234 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14235 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14236 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14237 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14238 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14239 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14240 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14241 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14242 tg3_flag_set(tp
, FLASH
);
14243 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14244 tp
->nvram_pagesize
= 256;
14246 case FLASH_5761VENDOR_ST_A_M45PE20
:
14247 case FLASH_5761VENDOR_ST_A_M45PE40
:
14248 case FLASH_5761VENDOR_ST_A_M45PE80
:
14249 case FLASH_5761VENDOR_ST_A_M45PE16
:
14250 case FLASH_5761VENDOR_ST_M_M45PE20
:
14251 case FLASH_5761VENDOR_ST_M_M45PE40
:
14252 case FLASH_5761VENDOR_ST_M_M45PE80
:
14253 case FLASH_5761VENDOR_ST_M_M45PE16
:
14254 tp
->nvram_jedecnum
= JEDEC_ST
;
14255 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14256 tg3_flag_set(tp
, FLASH
);
14257 tp
->nvram_pagesize
= 256;
14262 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14265 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14266 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14267 case FLASH_5761VENDOR_ST_A_M45PE16
:
14268 case FLASH_5761VENDOR_ST_M_M45PE16
:
14269 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14271 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14272 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14273 case FLASH_5761VENDOR_ST_A_M45PE80
:
14274 case FLASH_5761VENDOR_ST_M_M45PE80
:
14275 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14277 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14278 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14279 case FLASH_5761VENDOR_ST_A_M45PE40
:
14280 case FLASH_5761VENDOR_ST_M_M45PE40
:
14281 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14283 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14284 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14285 case FLASH_5761VENDOR_ST_A_M45PE20
:
14286 case FLASH_5761VENDOR_ST_M_M45PE20
:
14287 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14293 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14295 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14296 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14297 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14300 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14304 nvcfg1
= tr32(NVRAM_CFG1
);
14306 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14307 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14308 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14309 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14310 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14311 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14313 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14314 tw32(NVRAM_CFG1
, nvcfg1
);
14316 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14317 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14318 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14319 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14320 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14321 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14322 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14323 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14324 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14325 tg3_flag_set(tp
, FLASH
);
14327 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14328 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14329 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14330 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14331 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14333 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14334 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14335 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14337 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14338 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14339 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14343 case FLASH_5752VENDOR_ST_M45PE10
:
14344 case FLASH_5752VENDOR_ST_M45PE20
:
14345 case FLASH_5752VENDOR_ST_M45PE40
:
14346 tp
->nvram_jedecnum
= JEDEC_ST
;
14347 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14348 tg3_flag_set(tp
, FLASH
);
14350 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14351 case FLASH_5752VENDOR_ST_M45PE10
:
14352 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14354 case FLASH_5752VENDOR_ST_M45PE20
:
14355 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14357 case FLASH_5752VENDOR_ST_M45PE40
:
14358 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14363 tg3_flag_set(tp
, NO_NVRAM
);
14367 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14368 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14369 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14373 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14377 nvcfg1
= tr32(NVRAM_CFG1
);
14379 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14380 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14381 case FLASH_5717VENDOR_MICRO_EEPROM
:
14382 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14383 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14384 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14386 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14387 tw32(NVRAM_CFG1
, nvcfg1
);
14389 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14390 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14391 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14392 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14393 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14394 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14395 case FLASH_5717VENDOR_ATMEL_45USPT
:
14396 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14397 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14398 tg3_flag_set(tp
, FLASH
);
14400 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14401 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14402 /* Detect size with tg3_nvram_get_size() */
14404 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14405 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14406 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14409 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14413 case FLASH_5717VENDOR_ST_M_M25PE10
:
14414 case FLASH_5717VENDOR_ST_A_M25PE10
:
14415 case FLASH_5717VENDOR_ST_M_M45PE10
:
14416 case FLASH_5717VENDOR_ST_A_M45PE10
:
14417 case FLASH_5717VENDOR_ST_M_M25PE20
:
14418 case FLASH_5717VENDOR_ST_A_M25PE20
:
14419 case FLASH_5717VENDOR_ST_M_M45PE20
:
14420 case FLASH_5717VENDOR_ST_A_M45PE20
:
14421 case FLASH_5717VENDOR_ST_25USPT
:
14422 case FLASH_5717VENDOR_ST_45USPT
:
14423 tp
->nvram_jedecnum
= JEDEC_ST
;
14424 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14425 tg3_flag_set(tp
, FLASH
);
14427 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14428 case FLASH_5717VENDOR_ST_M_M25PE20
:
14429 case FLASH_5717VENDOR_ST_M_M45PE20
:
14430 /* Detect size with tg3_nvram_get_size() */
14432 case FLASH_5717VENDOR_ST_A_M25PE20
:
14433 case FLASH_5717VENDOR_ST_A_M45PE20
:
14434 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14437 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14442 tg3_flag_set(tp
, NO_NVRAM
);
14446 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14447 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14448 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14451 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14453 u32 nvcfg1
, nvmpinstrp
;
14455 nvcfg1
= tr32(NVRAM_CFG1
);
14456 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14458 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14459 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14460 tg3_flag_set(tp
, NO_NVRAM
);
14464 switch (nvmpinstrp
) {
14465 case FLASH_5762_EEPROM_HD
:
14466 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14468 case FLASH_5762_EEPROM_LD
:
14469 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14471 case FLASH_5720VENDOR_M_ST_M45PE20
:
14472 /* This pinstrap supports multiple sizes, so force it
14473 * to read the actual size from location 0xf0.
14475 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14480 switch (nvmpinstrp
) {
14481 case FLASH_5720_EEPROM_HD
:
14482 case FLASH_5720_EEPROM_LD
:
14483 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14484 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14486 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14487 tw32(NVRAM_CFG1
, nvcfg1
);
14488 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14489 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14491 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14493 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14494 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14495 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14496 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14497 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14498 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14499 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14500 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14501 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14502 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14503 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14504 case FLASH_5720VENDOR_ATMEL_45USPT
:
14505 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14506 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14507 tg3_flag_set(tp
, FLASH
);
14509 switch (nvmpinstrp
) {
14510 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14511 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14512 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14513 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14515 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14516 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14517 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14518 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14520 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14521 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14522 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14525 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14526 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14530 case FLASH_5720VENDOR_M_ST_M25PE10
:
14531 case FLASH_5720VENDOR_M_ST_M45PE10
:
14532 case FLASH_5720VENDOR_A_ST_M25PE10
:
14533 case FLASH_5720VENDOR_A_ST_M45PE10
:
14534 case FLASH_5720VENDOR_M_ST_M25PE20
:
14535 case FLASH_5720VENDOR_M_ST_M45PE20
:
14536 case FLASH_5720VENDOR_A_ST_M25PE20
:
14537 case FLASH_5720VENDOR_A_ST_M45PE20
:
14538 case FLASH_5720VENDOR_M_ST_M25PE40
:
14539 case FLASH_5720VENDOR_M_ST_M45PE40
:
14540 case FLASH_5720VENDOR_A_ST_M25PE40
:
14541 case FLASH_5720VENDOR_A_ST_M45PE40
:
14542 case FLASH_5720VENDOR_M_ST_M25PE80
:
14543 case FLASH_5720VENDOR_M_ST_M45PE80
:
14544 case FLASH_5720VENDOR_A_ST_M25PE80
:
14545 case FLASH_5720VENDOR_A_ST_M45PE80
:
14546 case FLASH_5720VENDOR_ST_25USPT
:
14547 case FLASH_5720VENDOR_ST_45USPT
:
14548 tp
->nvram_jedecnum
= JEDEC_ST
;
14549 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14550 tg3_flag_set(tp
, FLASH
);
14552 switch (nvmpinstrp
) {
14553 case FLASH_5720VENDOR_M_ST_M25PE20
:
14554 case FLASH_5720VENDOR_M_ST_M45PE20
:
14555 case FLASH_5720VENDOR_A_ST_M25PE20
:
14556 case FLASH_5720VENDOR_A_ST_M45PE20
:
14557 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14559 case FLASH_5720VENDOR_M_ST_M25PE40
:
14560 case FLASH_5720VENDOR_M_ST_M45PE40
:
14561 case FLASH_5720VENDOR_A_ST_M25PE40
:
14562 case FLASH_5720VENDOR_A_ST_M45PE40
:
14563 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14565 case FLASH_5720VENDOR_M_ST_M25PE80
:
14566 case FLASH_5720VENDOR_M_ST_M45PE80
:
14567 case FLASH_5720VENDOR_A_ST_M25PE80
:
14568 case FLASH_5720VENDOR_A_ST_M45PE80
:
14569 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14572 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14573 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14578 tg3_flag_set(tp
, NO_NVRAM
);
14582 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14583 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14584 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14586 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14589 if (tg3_nvram_read(tp
, 0, &val
))
14592 if (val
!= TG3_EEPROM_MAGIC
&&
14593 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14594 tg3_flag_set(tp
, NO_NVRAM
);
14598 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14599 static void tg3_nvram_init(struct tg3
*tp
)
14601 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14602 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14603 tg3_flag_clear(tp
, NVRAM
);
14604 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14605 tg3_flag_set(tp
, NO_NVRAM
);
14609 tw32_f(GRC_EEPROM_ADDR
,
14610 (EEPROM_ADDR_FSM_RESET
|
14611 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14612 EEPROM_ADDR_CLKPERD_SHIFT
)));
14616 /* Enable seeprom accesses. */
14617 tw32_f(GRC_LOCAL_CTRL
,
14618 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14621 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14622 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14623 tg3_flag_set(tp
, NVRAM
);
14625 if (tg3_nvram_lock(tp
)) {
14626 netdev_warn(tp
->dev
,
14627 "Cannot get nvram lock, %s failed\n",
14631 tg3_enable_nvram_access(tp
);
14633 tp
->nvram_size
= 0;
14635 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14636 tg3_get_5752_nvram_info(tp
);
14637 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14638 tg3_get_5755_nvram_info(tp
);
14639 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14640 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14641 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14642 tg3_get_5787_nvram_info(tp
);
14643 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14644 tg3_get_5761_nvram_info(tp
);
14645 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14646 tg3_get_5906_nvram_info(tp
);
14647 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14648 tg3_flag(tp
, 57765_CLASS
))
14649 tg3_get_57780_nvram_info(tp
);
14650 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14651 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14652 tg3_get_5717_nvram_info(tp
);
14653 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14654 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14655 tg3_get_5720_nvram_info(tp
);
14657 tg3_get_nvram_info(tp
);
14659 if (tp
->nvram_size
== 0)
14660 tg3_get_nvram_size(tp
);
14662 tg3_disable_nvram_access(tp
);
14663 tg3_nvram_unlock(tp
);
14666 tg3_flag_clear(tp
, NVRAM
);
14667 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14669 tg3_get_eeprom_size(tp
);
14673 struct subsys_tbl_ent
{
14674 u16 subsys_vendor
, subsys_devid
;
14678 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14679 /* Broadcom boards. */
14680 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14681 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14682 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14683 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14684 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14685 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14686 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14687 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14688 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14689 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14690 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14691 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14692 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14693 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14694 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14695 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14696 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14697 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14698 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14699 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14700 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14701 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14704 { TG3PCI_SUBVENDOR_ID_3COM
,
14705 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14706 { TG3PCI_SUBVENDOR_ID_3COM
,
14707 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14708 { TG3PCI_SUBVENDOR_ID_3COM
,
14709 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14710 { TG3PCI_SUBVENDOR_ID_3COM
,
14711 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14712 { TG3PCI_SUBVENDOR_ID_3COM
,
14713 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14716 { TG3PCI_SUBVENDOR_ID_DELL
,
14717 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14718 { TG3PCI_SUBVENDOR_ID_DELL
,
14719 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14720 { TG3PCI_SUBVENDOR_ID_DELL
,
14721 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14722 { TG3PCI_SUBVENDOR_ID_DELL
,
14723 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14725 /* Compaq boards. */
14726 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14727 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14728 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14729 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14730 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14731 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14732 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14733 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14734 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14735 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14738 { TG3PCI_SUBVENDOR_ID_IBM
,
14739 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14742 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14746 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14747 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14748 tp
->pdev
->subsystem_vendor
) &&
14749 (subsys_id_to_phy_id
[i
].subsys_devid
==
14750 tp
->pdev
->subsystem_device
))
14751 return &subsys_id_to_phy_id
[i
];
14756 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14760 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14761 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14763 /* Assume an onboard device and WOL capable by default. */
14764 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14765 tg3_flag_set(tp
, WOL_CAP
);
14767 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14768 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14769 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14770 tg3_flag_set(tp
, IS_NIC
);
14772 val
= tr32(VCPU_CFGSHDW
);
14773 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14774 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14775 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14776 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14777 tg3_flag_set(tp
, WOL_ENABLE
);
14778 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14783 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
14784 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
14785 u32 nic_cfg
, led_cfg
;
14786 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
14787 int eeprom_phy_serdes
= 0;
14789 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
14790 tp
->nic_sram_data_cfg
= nic_cfg
;
14792 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
14793 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
14794 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14795 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
14796 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
14797 (ver
> 0) && (ver
< 0x100))
14798 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
14800 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
14801 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14803 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14804 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14805 eeprom_phy_serdes
= 1;
14807 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14808 if (nic_phy_id
!= 0) {
14809 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14810 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14812 eeprom_phy_id
= (id1
>> 16) << 10;
14813 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14814 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14818 tp
->phy_id
= eeprom_phy_id
;
14819 if (eeprom_phy_serdes
) {
14820 if (!tg3_flag(tp
, 5705_PLUS
))
14821 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14823 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14826 if (tg3_flag(tp
, 5750_PLUS
))
14827 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14828 SHASTA_EXT_LED_MODE_MASK
);
14830 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14834 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14835 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14838 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14839 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14842 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14843 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14845 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14846 * read on some older 5700/5701 bootcode.
14848 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
14849 tg3_asic_rev(tp
) == ASIC_REV_5701
)
14850 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14854 case SHASTA_EXT_LED_SHARED
:
14855 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
14856 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
14857 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
14858 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14859 LED_CTRL_MODE_PHY_2
);
14862 case SHASTA_EXT_LED_MAC
:
14863 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
14866 case SHASTA_EXT_LED_COMBO
:
14867 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
14868 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
14869 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14870 LED_CTRL_MODE_PHY_2
);
14875 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
14876 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
14877 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
14878 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14880 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
14881 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14883 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
14884 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14885 if ((tp
->pdev
->subsystem_vendor
==
14886 PCI_VENDOR_ID_ARIMA
) &&
14887 (tp
->pdev
->subsystem_device
== 0x205a ||
14888 tp
->pdev
->subsystem_device
== 0x2063))
14889 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14891 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14892 tg3_flag_set(tp
, IS_NIC
);
14895 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
14896 tg3_flag_set(tp
, ENABLE_ASF
);
14897 if (tg3_flag(tp
, 5750_PLUS
))
14898 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
14901 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
14902 tg3_flag(tp
, 5750_PLUS
))
14903 tg3_flag_set(tp
, ENABLE_APE
);
14905 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
14906 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
14907 tg3_flag_clear(tp
, WOL_CAP
);
14909 if (tg3_flag(tp
, WOL_CAP
) &&
14910 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
14911 tg3_flag_set(tp
, WOL_ENABLE
);
14912 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14915 if (cfg2
& (1 << 17))
14916 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
14918 /* serdes signal pre-emphasis in register 0x590 set by */
14919 /* bootcode if bit 18 is set */
14920 if (cfg2
& (1 << 18))
14921 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
14923 if ((tg3_flag(tp
, 57765_PLUS
) ||
14924 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
14925 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
14926 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
14927 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
14929 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14932 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
14933 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
14934 !tg3_flag(tp
, 57765_PLUS
) &&
14935 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
14936 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14937 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
14938 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
14939 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
14940 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
14943 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
14944 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
14945 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
14946 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
14947 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
14948 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
14951 if (tg3_flag(tp
, WOL_CAP
))
14952 device_set_wakeup_enable(&tp
->pdev
->dev
,
14953 tg3_flag(tp
, WOL_ENABLE
));
14955 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
14958 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
14961 u32 val2
, off
= offset
* 8;
14963 err
= tg3_nvram_lock(tp
);
14967 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
14968 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
14969 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
14970 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
14973 for (i
= 0; i
< 100; i
++) {
14974 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
14975 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
14976 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
14982 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
14984 tg3_nvram_unlock(tp
);
14985 if (val2
& APE_OTP_STATUS_CMD_DONE
)
14991 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
14996 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
14997 tw32(OTP_CTRL
, cmd
);
14999 /* Wait for up to 1 ms for command to execute. */
15000 for (i
= 0; i
< 100; i
++) {
15001 val
= tr32(OTP_STATUS
);
15002 if (val
& OTP_STATUS_CMD_DONE
)
15007 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
15010 /* Read the gphy configuration from the OTP region of the chip. The gphy
15011 * configuration is a 32-bit value that straddles the alignment boundary.
15012 * We do two 32-bit reads and then shift and merge the results.
15014 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
15016 u32 bhalf_otp
, thalf_otp
;
15018 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
15020 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
15023 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
15025 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15028 thalf_otp
= tr32(OTP_READ_DATA
);
15030 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
15032 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15035 bhalf_otp
= tr32(OTP_READ_DATA
);
15037 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
15040 static void tg3_phy_init_link_config(struct tg3
*tp
)
15042 u32 adv
= ADVERTISED_Autoneg
;
15044 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15045 adv
|= ADVERTISED_1000baseT_Half
|
15046 ADVERTISED_1000baseT_Full
;
15048 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15049 adv
|= ADVERTISED_100baseT_Half
|
15050 ADVERTISED_100baseT_Full
|
15051 ADVERTISED_10baseT_Half
|
15052 ADVERTISED_10baseT_Full
|
15055 adv
|= ADVERTISED_FIBRE
;
15057 tp
->link_config
.advertising
= adv
;
15058 tp
->link_config
.speed
= SPEED_UNKNOWN
;
15059 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
15060 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
15061 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
15062 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
15067 static int tg3_phy_probe(struct tg3
*tp
)
15069 u32 hw_phy_id_1
, hw_phy_id_2
;
15070 u32 hw_phy_id
, hw_phy_id_masked
;
15073 /* flow control autonegotiation is default behavior */
15074 tg3_flag_set(tp
, PAUSE_AUTONEG
);
15075 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
15077 if (tg3_flag(tp
, ENABLE_APE
)) {
15078 switch (tp
->pci_fn
) {
15080 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
15083 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
15086 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
15089 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
15094 if (!tg3_flag(tp
, ENABLE_ASF
) &&
15095 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15096 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15097 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
15098 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
15100 if (tg3_flag(tp
, USE_PHYLIB
))
15101 return tg3_phy_init(tp
);
15103 /* Reading the PHY ID register can conflict with ASF
15104 * firmware access to the PHY hardware.
15107 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
15108 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
15110 /* Now read the physical PHY_ID from the chip and verify
15111 * that it is sane. If it doesn't look good, we fall back
15112 * to either the hard-coded table based PHY_ID and failing
15113 * that the value found in the eeprom area.
15115 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15116 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15118 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15119 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15120 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15122 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15125 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15126 tp
->phy_id
= hw_phy_id
;
15127 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15128 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15130 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15132 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15133 /* Do nothing, phy ID already set up in
15134 * tg3_get_eeprom_hw_cfg().
15137 struct subsys_tbl_ent
*p
;
15139 /* No eeprom signature? Try the hardcoded
15140 * subsys device table.
15142 p
= tg3_lookup_by_subsys(tp
);
15144 tp
->phy_id
= p
->phy_id
;
15145 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15146 /* For now we saw the IDs 0xbc050cd0,
15147 * 0xbc050f80 and 0xbc050c30 on devices
15148 * connected to an BCM4785 and there are
15149 * probably more. Just assume that the phy is
15150 * supported when it is connected to a SSB core
15157 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15158 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15162 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15163 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15164 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15165 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15166 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15167 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15168 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15169 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15170 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
))) {
15171 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15173 tp
->eee
.supported
= SUPPORTED_100baseT_Full
|
15174 SUPPORTED_1000baseT_Full
;
15175 tp
->eee
.advertised
= ADVERTISED_100baseT_Full
|
15176 ADVERTISED_1000baseT_Full
;
15177 tp
->eee
.eee_enabled
= 1;
15178 tp
->eee
.tx_lpi_enabled
= 1;
15179 tp
->eee
.tx_lpi_timer
= TG3_CPMU_DBTMR1_LNKIDLE_2047US
;
15182 tg3_phy_init_link_config(tp
);
15184 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15185 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15186 !tg3_flag(tp
, ENABLE_APE
) &&
15187 !tg3_flag(tp
, ENABLE_ASF
)) {
15190 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15191 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15192 (bmsr
& BMSR_LSTATUS
))
15193 goto skip_phy_reset
;
15195 err
= tg3_phy_reset(tp
);
15199 tg3_phy_set_wirespeed(tp
);
15201 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15202 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15203 tp
->link_config
.flowctrl
);
15205 tg3_writephy(tp
, MII_BMCR
,
15206 BMCR_ANENABLE
| BMCR_ANRESTART
);
15211 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15212 err
= tg3_init_5401phy_dsp(tp
);
15216 err
= tg3_init_5401phy_dsp(tp
);
15222 static void tg3_read_vpd(struct tg3
*tp
)
15225 unsigned int block_end
, rosize
, len
;
15229 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15233 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15235 goto out_not_found
;
15237 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15238 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15239 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15241 if (block_end
> vpdlen
)
15242 goto out_not_found
;
15244 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15245 PCI_VPD_RO_KEYWORD_MFR_ID
);
15247 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15249 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15250 if (j
+ len
> block_end
|| len
!= 4 ||
15251 memcmp(&vpd_data
[j
], "1028", 4))
15254 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15255 PCI_VPD_RO_KEYWORD_VENDOR0
);
15259 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15261 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15262 if (j
+ len
> block_end
)
15265 if (len
>= sizeof(tp
->fw_ver
))
15266 len
= sizeof(tp
->fw_ver
) - 1;
15267 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15268 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15273 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15274 PCI_VPD_RO_KEYWORD_PARTNO
);
15276 goto out_not_found
;
15278 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15280 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15281 if (len
> TG3_BPN_SIZE
||
15282 (len
+ i
) > vpdlen
)
15283 goto out_not_found
;
15285 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15289 if (tp
->board_part_number
[0])
15293 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15294 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15295 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15296 strcpy(tp
->board_part_number
, "BCM5717");
15297 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15298 strcpy(tp
->board_part_number
, "BCM5718");
15301 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15302 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15303 strcpy(tp
->board_part_number
, "BCM57780");
15304 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15305 strcpy(tp
->board_part_number
, "BCM57760");
15306 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15307 strcpy(tp
->board_part_number
, "BCM57790");
15308 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15309 strcpy(tp
->board_part_number
, "BCM57788");
15312 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15313 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15314 strcpy(tp
->board_part_number
, "BCM57761");
15315 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15316 strcpy(tp
->board_part_number
, "BCM57765");
15317 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15318 strcpy(tp
->board_part_number
, "BCM57781");
15319 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15320 strcpy(tp
->board_part_number
, "BCM57785");
15321 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15322 strcpy(tp
->board_part_number
, "BCM57791");
15323 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15324 strcpy(tp
->board_part_number
, "BCM57795");
15327 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15328 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15329 strcpy(tp
->board_part_number
, "BCM57762");
15330 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15331 strcpy(tp
->board_part_number
, "BCM57766");
15332 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15333 strcpy(tp
->board_part_number
, "BCM57782");
15334 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15335 strcpy(tp
->board_part_number
, "BCM57786");
15338 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15339 strcpy(tp
->board_part_number
, "BCM95906");
15342 strcpy(tp
->board_part_number
, "none");
15346 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15350 if (tg3_nvram_read(tp
, offset
, &val
) ||
15351 (val
& 0xfc000000) != 0x0c000000 ||
15352 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15359 static void tg3_read_bc_ver(struct tg3
*tp
)
15361 u32 val
, offset
, start
, ver_offset
;
15363 bool newver
= false;
15365 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15366 tg3_nvram_read(tp
, 0x4, &start
))
15369 offset
= tg3_nvram_logical_addr(tp
, offset
);
15371 if (tg3_nvram_read(tp
, offset
, &val
))
15374 if ((val
& 0xfc000000) == 0x0c000000) {
15375 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15382 dst_off
= strlen(tp
->fw_ver
);
15385 if (TG3_VER_SIZE
- dst_off
< 16 ||
15386 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15389 offset
= offset
+ ver_offset
- start
;
15390 for (i
= 0; i
< 16; i
+= 4) {
15392 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15395 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15400 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15403 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15404 TG3_NVM_BCVER_MAJSFT
;
15405 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15406 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15407 "v%d.%02d", major
, minor
);
15411 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15413 u32 val
, major
, minor
;
15415 /* Use native endian representation */
15416 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15419 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15420 TG3_NVM_HWSB_CFG1_MAJSFT
;
15421 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15422 TG3_NVM_HWSB_CFG1_MINSFT
;
15424 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15427 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15429 u32 offset
, major
, minor
, build
;
15431 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15433 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15436 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15437 case TG3_EEPROM_SB_REVISION_0
:
15438 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15440 case TG3_EEPROM_SB_REVISION_2
:
15441 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15443 case TG3_EEPROM_SB_REVISION_3
:
15444 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15446 case TG3_EEPROM_SB_REVISION_4
:
15447 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15449 case TG3_EEPROM_SB_REVISION_5
:
15450 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15452 case TG3_EEPROM_SB_REVISION_6
:
15453 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15459 if (tg3_nvram_read(tp
, offset
, &val
))
15462 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15463 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15464 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15465 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15466 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15468 if (minor
> 99 || build
> 26)
15471 offset
= strlen(tp
->fw_ver
);
15472 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15473 " v%d.%02d", major
, minor
);
15476 offset
= strlen(tp
->fw_ver
);
15477 if (offset
< TG3_VER_SIZE
- 1)
15478 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15482 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15484 u32 val
, offset
, start
;
15487 for (offset
= TG3_NVM_DIR_START
;
15488 offset
< TG3_NVM_DIR_END
;
15489 offset
+= TG3_NVM_DIRENT_SIZE
) {
15490 if (tg3_nvram_read(tp
, offset
, &val
))
15493 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15497 if (offset
== TG3_NVM_DIR_END
)
15500 if (!tg3_flag(tp
, 5705_PLUS
))
15501 start
= 0x08000000;
15502 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15505 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15506 !tg3_fw_img_is_valid(tp
, offset
) ||
15507 tg3_nvram_read(tp
, offset
+ 8, &val
))
15510 offset
+= val
- start
;
15512 vlen
= strlen(tp
->fw_ver
);
15514 tp
->fw_ver
[vlen
++] = ',';
15515 tp
->fw_ver
[vlen
++] = ' ';
15517 for (i
= 0; i
< 4; i
++) {
15519 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15522 offset
+= sizeof(v
);
15524 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15525 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15529 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15534 static void tg3_probe_ncsi(struct tg3
*tp
)
15538 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15539 if (apedata
!= APE_SEG_SIG_MAGIC
)
15542 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15543 if (!(apedata
& APE_FW_STATUS_READY
))
15546 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15547 tg3_flag_set(tp
, APE_HAS_NCSI
);
15550 static void tg3_read_dash_ver(struct tg3
*tp
)
15556 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15558 if (tg3_flag(tp
, APE_HAS_NCSI
))
15560 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15565 vlen
= strlen(tp
->fw_ver
);
15567 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15569 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15570 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15571 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15572 (apedata
& APE_FW_VERSION_BLDMSK
));
15575 static void tg3_read_otp_ver(struct tg3
*tp
)
15579 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15582 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15583 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15584 TG3_OTP_MAGIC0_VALID(val
)) {
15585 u64 val64
= (u64
) val
<< 32 | val2
;
15589 for (i
= 0; i
< 7; i
++) {
15590 if ((val64
& 0xff) == 0)
15592 ver
= val64
& 0xff;
15595 vlen
= strlen(tp
->fw_ver
);
15596 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
15600 static void tg3_read_fw_ver(struct tg3
*tp
)
15603 bool vpd_vers
= false;
15605 if (tp
->fw_ver
[0] != 0)
15608 if (tg3_flag(tp
, NO_NVRAM
)) {
15609 strcat(tp
->fw_ver
, "sb");
15610 tg3_read_otp_ver(tp
);
15614 if (tg3_nvram_read(tp
, 0, &val
))
15617 if (val
== TG3_EEPROM_MAGIC
)
15618 tg3_read_bc_ver(tp
);
15619 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
15620 tg3_read_sb_ver(tp
, val
);
15621 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
15622 tg3_read_hwsb_ver(tp
);
15624 if (tg3_flag(tp
, ENABLE_ASF
)) {
15625 if (tg3_flag(tp
, ENABLE_APE
)) {
15626 tg3_probe_ncsi(tp
);
15628 tg3_read_dash_ver(tp
);
15629 } else if (!vpd_vers
) {
15630 tg3_read_mgmtfw_ver(tp
);
15634 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15637 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15639 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15640 return TG3_RX_RET_MAX_SIZE_5717
;
15641 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15642 return TG3_RX_RET_MAX_SIZE_5700
;
15644 return TG3_RX_RET_MAX_SIZE_5705
;
15647 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
15648 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15649 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15650 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15654 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15656 struct pci_dev
*peer
;
15657 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15659 for (func
= 0; func
< 8; func
++) {
15660 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15661 if (peer
&& peer
!= tp
->pdev
)
15665 /* 5704 can be configured in single-port mode, set peer to
15666 * tp->pdev in that case.
15674 * We don't need to keep the refcount elevated; there's no way
15675 * to remove one half of this device without removing the other
15682 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15684 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15685 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15688 /* All devices that use the alternate
15689 * ASIC REV location have a CPMU.
15691 tg3_flag_set(tp
, CPMU_PRESENT
);
15693 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15694 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15695 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15696 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15697 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15698 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15699 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15700 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
)
15701 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15702 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15703 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15704 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15705 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15706 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15707 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15708 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15709 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15710 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15711 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15712 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15714 reg
= TG3PCI_PRODID_ASICREV
;
15716 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15719 /* Wrong chip ID in 5752 A0. This code can be removed later
15720 * as A0 is not in production.
15722 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15723 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15725 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15726 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15728 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15729 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15730 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15731 tg3_flag_set(tp
, 5717_PLUS
);
15733 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15734 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15735 tg3_flag_set(tp
, 57765_CLASS
);
15737 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15738 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15739 tg3_flag_set(tp
, 57765_PLUS
);
15741 /* Intentionally exclude ASIC_REV_5906 */
15742 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15743 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15744 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15745 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15746 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15747 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15748 tg3_flag(tp
, 57765_PLUS
))
15749 tg3_flag_set(tp
, 5755_PLUS
);
15751 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15752 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15753 tg3_flag_set(tp
, 5780_CLASS
);
15755 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15756 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15757 tg3_asic_rev(tp
) == ASIC_REV_5906
||
15758 tg3_flag(tp
, 5755_PLUS
) ||
15759 tg3_flag(tp
, 5780_CLASS
))
15760 tg3_flag_set(tp
, 5750_PLUS
);
15762 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
15763 tg3_flag(tp
, 5750_PLUS
))
15764 tg3_flag_set(tp
, 5705_PLUS
);
15767 static bool tg3_10_100_only_device(struct tg3
*tp
,
15768 const struct pci_device_id
*ent
)
15770 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
15772 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15773 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
15774 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
15777 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
15778 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
15779 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
15789 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
15792 u32 pci_state_reg
, grc_misc_cfg
;
15797 /* Force memory write invalidate off. If we leave it on,
15798 * then on 5700_BX chips we have to enable a workaround.
15799 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15800 * to match the cacheline size. The Broadcom driver have this
15801 * workaround but turns MWI off all the times so never uses
15802 * it. This seems to suggest that the workaround is insufficient.
15804 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15805 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
15806 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15808 /* Important! -- Make sure register accesses are byteswapped
15809 * correctly. Also, for those chips that require it, make
15810 * sure that indirect register accesses are enabled before
15811 * the first operation.
15813 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15815 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
15816 MISC_HOST_CTRL_CHIPREV
);
15817 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15818 tp
->misc_host_ctrl
);
15820 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
15822 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15823 * we need to disable memory and use config. cycles
15824 * only to access all registers. The 5702/03 chips
15825 * can mistakenly decode the special cycles from the
15826 * ICH chipsets as memory write cycles, causing corruption
15827 * of register and memory space. Only certain ICH bridges
15828 * will drive special cycles with non-zero data during the
15829 * address phase which can fall within the 5703's address
15830 * range. This is not an ICH bug as the PCI spec allows
15831 * non-zero address during special cycles. However, only
15832 * these ICH bridges are known to drive non-zero addresses
15833 * during special cycles.
15835 * Since special cycles do not cross PCI bridges, we only
15836 * enable this workaround if the 5703 is on the secondary
15837 * bus of these ICH bridges.
15839 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
15840 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
15841 static struct tg3_dev_id
{
15845 } ich_chipsets
[] = {
15846 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
15848 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
15850 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
15852 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
15856 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
15857 struct pci_dev
*bridge
= NULL
;
15859 while (pci_id
->vendor
!= 0) {
15860 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
15866 if (pci_id
->rev
!= PCI_ANY_ID
) {
15867 if (bridge
->revision
> pci_id
->rev
)
15870 if (bridge
->subordinate
&&
15871 (bridge
->subordinate
->number
==
15872 tp
->pdev
->bus
->number
)) {
15873 tg3_flag_set(tp
, ICH_WORKAROUND
);
15874 pci_dev_put(bridge
);
15880 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
15881 static struct tg3_dev_id
{
15884 } bridge_chipsets
[] = {
15885 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
15886 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
15889 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
15890 struct pci_dev
*bridge
= NULL
;
15892 while (pci_id
->vendor
!= 0) {
15893 bridge
= pci_get_device(pci_id
->vendor
,
15900 if (bridge
->subordinate
&&
15901 (bridge
->subordinate
->number
<=
15902 tp
->pdev
->bus
->number
) &&
15903 (bridge
->subordinate
->busn_res
.end
>=
15904 tp
->pdev
->bus
->number
)) {
15905 tg3_flag_set(tp
, 5701_DMA_BUG
);
15906 pci_dev_put(bridge
);
15912 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15913 * DMA addresses > 40-bit. This bridge may have other additional
15914 * 57xx devices behind it in some 4-port NIC designs for example.
15915 * Any tg3 device found behind the bridge will also need the 40-bit
15918 if (tg3_flag(tp
, 5780_CLASS
)) {
15919 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15920 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
15922 struct pci_dev
*bridge
= NULL
;
15925 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
15926 PCI_DEVICE_ID_SERVERWORKS_EPB
,
15928 if (bridge
&& bridge
->subordinate
&&
15929 (bridge
->subordinate
->number
<=
15930 tp
->pdev
->bus
->number
) &&
15931 (bridge
->subordinate
->busn_res
.end
>=
15932 tp
->pdev
->bus
->number
)) {
15933 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15934 pci_dev_put(bridge
);
15940 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15941 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15942 tp
->pdev_peer
= tg3_find_peer(tp
);
15944 /* Determine TSO capabilities */
15945 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
15946 ; /* Do nothing. HW bug. */
15947 else if (tg3_flag(tp
, 57765_PLUS
))
15948 tg3_flag_set(tp
, HW_TSO_3
);
15949 else if (tg3_flag(tp
, 5755_PLUS
) ||
15950 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15951 tg3_flag_set(tp
, HW_TSO_2
);
15952 else if (tg3_flag(tp
, 5750_PLUS
)) {
15953 tg3_flag_set(tp
, HW_TSO_1
);
15954 tg3_flag_set(tp
, TSO_BUG
);
15955 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
15956 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
15957 tg3_flag_clear(tp
, TSO_BUG
);
15958 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15959 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15960 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
15961 tg3_flag_set(tp
, FW_TSO
);
15962 tg3_flag_set(tp
, TSO_BUG
);
15963 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
15964 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
15966 tp
->fw_needed
= FIRMWARE_TG3TSO
;
15969 /* Selectively allow TSO based on operating conditions */
15970 if (tg3_flag(tp
, HW_TSO_1
) ||
15971 tg3_flag(tp
, HW_TSO_2
) ||
15972 tg3_flag(tp
, HW_TSO_3
) ||
15973 tg3_flag(tp
, FW_TSO
)) {
15974 /* For firmware TSO, assume ASF is disabled.
15975 * We'll disable TSO later if we discover ASF
15976 * is enabled in tg3_get_eeprom_hw_cfg().
15978 tg3_flag_set(tp
, TSO_CAPABLE
);
15980 tg3_flag_clear(tp
, TSO_CAPABLE
);
15981 tg3_flag_clear(tp
, TSO_BUG
);
15982 tp
->fw_needed
= NULL
;
15985 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
15986 tp
->fw_needed
= FIRMWARE_TG3
;
15988 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
15989 tp
->fw_needed
= FIRMWARE_TG357766
;
15993 if (tg3_flag(tp
, 5750_PLUS
)) {
15994 tg3_flag_set(tp
, SUPPORT_MSI
);
15995 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
15996 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
15997 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
15998 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
15999 tp
->pdev_peer
== tp
->pdev
))
16000 tg3_flag_clear(tp
, SUPPORT_MSI
);
16002 if (tg3_flag(tp
, 5755_PLUS
) ||
16003 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16004 tg3_flag_set(tp
, 1SHOT_MSI
);
16007 if (tg3_flag(tp
, 57765_PLUS
)) {
16008 tg3_flag_set(tp
, SUPPORT_MSIX
);
16009 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
16015 if (tp
->irq_max
> 1) {
16016 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
16017 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
16019 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16020 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16021 tp
->txq_max
= tp
->irq_max
- 1;
16024 if (tg3_flag(tp
, 5755_PLUS
) ||
16025 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16026 tg3_flag_set(tp
, SHORT_DMA_BUG
);
16028 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
16029 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
16031 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16032 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16033 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16034 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16035 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
16037 if (tg3_flag(tp
, 57765_PLUS
) &&
16038 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
16039 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
16041 if (!tg3_flag(tp
, 5705_PLUS
) ||
16042 tg3_flag(tp
, 5780_CLASS
) ||
16043 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
16044 tg3_flag_set(tp
, JUMBO_CAPABLE
);
16046 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16049 if (pci_is_pcie(tp
->pdev
)) {
16052 tg3_flag_set(tp
, PCI_EXPRESS
);
16054 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
16055 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
16056 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16057 tg3_flag_clear(tp
, HW_TSO_2
);
16058 tg3_flag_clear(tp
, TSO_CAPABLE
);
16060 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
16061 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16062 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
16063 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
16064 tg3_flag_set(tp
, CLKREQ_BUG
);
16065 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
16066 tg3_flag_set(tp
, L1PLLPD_EN
);
16068 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
16069 /* BCM5785 devices are effectively PCIe devices, and should
16070 * follow PCIe codepaths, but do not have a PCIe capabilities
16073 tg3_flag_set(tp
, PCI_EXPRESS
);
16074 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
16075 tg3_flag(tp
, 5780_CLASS
)) {
16076 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
16077 if (!tp
->pcix_cap
) {
16078 dev_err(&tp
->pdev
->dev
,
16079 "Cannot find PCI-X capability, aborting\n");
16083 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
16084 tg3_flag_set(tp
, PCIX_MODE
);
16087 /* If we have an AMD 762 or VIA K8T800 chipset, write
16088 * reordering to the mailbox registers done by the host
16089 * controller can cause major troubles. We read back from
16090 * every mailbox register write to force the writes to be
16091 * posted to the chip in order.
16093 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
16094 !tg3_flag(tp
, PCI_EXPRESS
))
16095 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
16097 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
16098 &tp
->pci_cacheline_sz
);
16099 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16100 &tp
->pci_lat_timer
);
16101 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16102 tp
->pci_lat_timer
< 64) {
16103 tp
->pci_lat_timer
= 64;
16104 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16105 tp
->pci_lat_timer
);
16108 /* Important! -- It is critical that the PCI-X hw workaround
16109 * situation is decided before the first MMIO register access.
16111 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
16112 /* 5700 BX chips need to have their TX producer index
16113 * mailboxes written twice to workaround a bug.
16115 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
16117 /* If we are in PCI-X mode, enable register write workaround.
16119 * The workaround is to use indirect register accesses
16120 * for all chip writes not to mailbox registers.
16122 if (tg3_flag(tp
, PCIX_MODE
)) {
16125 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16127 /* The chip can have it's power management PCI config
16128 * space registers clobbered due to this bug.
16129 * So explicitly force the chip into D0 here.
16131 pci_read_config_dword(tp
->pdev
,
16132 tp
->pm_cap
+ PCI_PM_CTRL
,
16134 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16135 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16136 pci_write_config_dword(tp
->pdev
,
16137 tp
->pm_cap
+ PCI_PM_CTRL
,
16140 /* Also, force SERR#/PERR# in PCI command. */
16141 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16142 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16143 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16147 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16148 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16149 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16150 tg3_flag_set(tp
, PCI_32BIT
);
16152 /* Chip-specific fixup from Broadcom driver */
16153 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16154 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16155 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16156 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16159 /* Default fast path register access methods */
16160 tp
->read32
= tg3_read32
;
16161 tp
->write32
= tg3_write32
;
16162 tp
->read32_mbox
= tg3_read32
;
16163 tp
->write32_mbox
= tg3_write32
;
16164 tp
->write32_tx_mbox
= tg3_write32
;
16165 tp
->write32_rx_mbox
= tg3_write32
;
16167 /* Various workaround register access methods */
16168 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16169 tp
->write32
= tg3_write_indirect_reg32
;
16170 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16171 (tg3_flag(tp
, PCI_EXPRESS
) &&
16172 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16174 * Back to back register writes can cause problems on these
16175 * chips, the workaround is to read back all reg writes
16176 * except those to mailbox regs.
16178 * See tg3_write_indirect_reg32().
16180 tp
->write32
= tg3_write_flush_reg32
;
16183 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16184 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16185 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16186 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16189 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16190 tp
->read32
= tg3_read_indirect_reg32
;
16191 tp
->write32
= tg3_write_indirect_reg32
;
16192 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16193 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16194 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16195 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16200 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16201 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16202 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16204 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16205 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16206 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16207 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16208 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16211 if (tp
->write32
== tg3_write_indirect_reg32
||
16212 (tg3_flag(tp
, PCIX_MODE
) &&
16213 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16214 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16215 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16217 /* The memory arbiter has to be enabled in order for SRAM accesses
16218 * to succeed. Normally on powerup the tg3 chip firmware will make
16219 * sure it is enabled, but other entities such as system netboot
16220 * code might disable it.
16222 val
= tr32(MEMARB_MODE
);
16223 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16225 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16226 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16227 tg3_flag(tp
, 5780_CLASS
)) {
16228 if (tg3_flag(tp
, PCIX_MODE
)) {
16229 pci_read_config_dword(tp
->pdev
,
16230 tp
->pcix_cap
+ PCI_X_STATUS
,
16232 tp
->pci_fn
= val
& 0x7;
16234 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16235 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16236 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16237 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16238 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16239 val
= tr32(TG3_CPMU_STATUS
);
16241 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16242 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16244 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16245 TG3_CPMU_STATUS_FSHFT_5719
;
16248 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16249 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16250 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16253 /* Get eeprom hw config before calling tg3_set_power_state().
16254 * In particular, the TG3_FLAG_IS_NIC flag must be
16255 * determined before calling tg3_set_power_state() so that
16256 * we know whether or not to switch out of Vaux power.
16257 * When the flag is set, it means that GPIO1 is used for eeprom
16258 * write protect and also implies that it is a LOM where GPIOs
16259 * are not used to switch power.
16261 tg3_get_eeprom_hw_cfg(tp
);
16263 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16264 tg3_flag_clear(tp
, TSO_CAPABLE
);
16265 tg3_flag_clear(tp
, TSO_BUG
);
16266 tp
->fw_needed
= NULL
;
16269 if (tg3_flag(tp
, ENABLE_APE
)) {
16270 /* Allow reads and writes to the
16271 * APE register and memory space.
16273 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16274 PCISTATE_ALLOW_APE_SHMEM_WR
|
16275 PCISTATE_ALLOW_APE_PSPACE_WR
;
16276 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16279 tg3_ape_lock_init(tp
);
16282 /* Set up tp->grc_local_ctrl before calling
16283 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16284 * will bring 5700's external PHY out of reset.
16285 * It is also used as eeprom write protect on LOMs.
16287 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16288 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16289 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16290 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16291 GRC_LCLCTRL_GPIO_OUTPUT1
);
16292 /* Unused GPIO3 must be driven as output on 5752 because there
16293 * are no pull-up resistors on unused GPIO pins.
16295 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16296 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16298 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16299 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16300 tg3_flag(tp
, 57765_CLASS
))
16301 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16303 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16304 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16305 /* Turn off the debug UART. */
16306 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16307 if (tg3_flag(tp
, IS_NIC
))
16308 /* Keep VMain power. */
16309 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16310 GRC_LCLCTRL_GPIO_OUTPUT0
;
16313 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16314 tp
->grc_local_ctrl
|=
16315 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16317 /* Switch out of Vaux if it is a NIC */
16318 tg3_pwrsrc_switch_to_vmain(tp
);
16320 /* Derive initial jumbo mode from MTU assigned in
16321 * ether_setup() via the alloc_etherdev() call
16323 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16324 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16326 /* Determine WakeOnLan speed to use. */
16327 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16328 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16329 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16330 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16331 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16333 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16336 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16337 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16339 /* A few boards don't want Ethernet@WireSpeed phy feature */
16340 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16341 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16342 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16343 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16344 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16345 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16346 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16348 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16349 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16350 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16351 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16352 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16354 if (tg3_flag(tp
, 5705_PLUS
) &&
16355 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16356 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16357 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16358 !tg3_flag(tp
, 57765_PLUS
)) {
16359 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16360 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16361 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16362 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16363 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16364 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16365 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16366 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16367 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16369 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16372 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16373 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16374 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16375 if (tp
->phy_otp
== 0)
16376 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16379 if (tg3_flag(tp
, CPMU_PRESENT
))
16380 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16382 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16384 tp
->coalesce_mode
= 0;
16385 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16386 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16387 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16389 /* Set these bits to enable statistics workaround. */
16390 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16391 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16392 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16393 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16394 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16397 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16398 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16399 tg3_flag_set(tp
, USE_PHYLIB
);
16401 err
= tg3_mdio_init(tp
);
16405 /* Initialize data/descriptor byte/word swapping. */
16406 val
= tr32(GRC_MODE
);
16407 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16408 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16409 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16410 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16411 GRC_MODE_B2HRX_ENABLE
|
16412 GRC_MODE_HTX2B_ENABLE
|
16413 GRC_MODE_HOST_STACKUP
);
16415 val
&= GRC_MODE_HOST_STACKUP
;
16417 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16419 tg3_switch_clocks(tp
);
16421 /* Clear this out for sanity. */
16422 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16424 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16426 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16427 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16428 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16429 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16430 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16431 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16432 void __iomem
*sram_base
;
16434 /* Write some dummy words into the SRAM status block
16435 * area, see if it reads back correctly. If the return
16436 * value is bad, force enable the PCIX workaround.
16438 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16440 writel(0x00000000, sram_base
);
16441 writel(0x00000000, sram_base
+ 4);
16442 writel(0xffffffff, sram_base
+ 4);
16443 if (readl(sram_base
) != 0x00000000)
16444 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16449 tg3_nvram_init(tp
);
16451 /* If the device has an NVRAM, no need to load patch firmware */
16452 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16453 !tg3_flag(tp
, NO_NVRAM
))
16454 tp
->fw_needed
= NULL
;
16456 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16457 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16459 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16460 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16461 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16462 tg3_flag_set(tp
, IS_5788
);
16464 if (!tg3_flag(tp
, IS_5788
) &&
16465 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16466 tg3_flag_set(tp
, TAGGED_STATUS
);
16467 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16468 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16469 HOSTCC_MODE_CLRTICK_TXBD
);
16471 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16472 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16473 tp
->misc_host_ctrl
);
16476 /* Preserve the APE MAC_MODE bits */
16477 if (tg3_flag(tp
, ENABLE_APE
))
16478 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16482 if (tg3_10_100_only_device(tp
, ent
))
16483 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16485 err
= tg3_phy_probe(tp
);
16487 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16488 /* ... but do not return immediately ... */
16493 tg3_read_fw_ver(tp
);
16495 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16496 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16498 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16499 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16501 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16504 /* 5700 {AX,BX} chips have a broken status block link
16505 * change bit implementation, so we must use the
16506 * status register in those cases.
16508 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16509 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16511 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16513 /* The led_ctrl is set during tg3_phy_probe, here we might
16514 * have to force the link status polling mechanism based
16515 * upon subsystem IDs.
16517 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16518 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16519 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16520 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16521 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16524 /* For all SERDES we poll the MAC status register. */
16525 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16526 tg3_flag_set(tp
, POLL_SERDES
);
16528 tg3_flag_clear(tp
, POLL_SERDES
);
16530 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16531 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16532 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16533 tg3_flag(tp
, PCIX_MODE
)) {
16534 tp
->rx_offset
= NET_SKB_PAD
;
16535 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16536 tp
->rx_copy_thresh
= ~(u16
)0;
16540 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16541 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16542 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16544 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16546 /* Increment the rx prod index on the rx std ring by at most
16547 * 8 for these chips to workaround hw errata.
16549 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16550 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16551 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16552 tp
->rx_std_max_post
= 8;
16554 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16555 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16556 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16561 #ifdef CONFIG_SPARC
16562 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
16564 struct net_device
*dev
= tp
->dev
;
16565 struct pci_dev
*pdev
= tp
->pdev
;
16566 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
16567 const unsigned char *addr
;
16570 addr
= of_get_property(dp
, "local-mac-address", &len
);
16571 if (addr
&& len
== 6) {
16572 memcpy(dev
->dev_addr
, addr
, 6);
16578 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
16580 struct net_device
*dev
= tp
->dev
;
16582 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
16587 static int tg3_get_device_address(struct tg3
*tp
)
16589 struct net_device
*dev
= tp
->dev
;
16590 u32 hi
, lo
, mac_offset
;
16594 #ifdef CONFIG_SPARC
16595 if (!tg3_get_macaddr_sparc(tp
))
16599 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16600 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16601 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16606 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16607 tg3_flag(tp
, 5780_CLASS
)) {
16608 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
16610 if (tg3_nvram_lock(tp
))
16611 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
16613 tg3_nvram_unlock(tp
);
16614 } else if (tg3_flag(tp
, 5717_PLUS
)) {
16615 if (tp
->pci_fn
& 1)
16617 if (tp
->pci_fn
> 1)
16618 mac_offset
+= 0x18c;
16619 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16622 /* First try to get it from MAC address mailbox. */
16623 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
16624 if ((hi
>> 16) == 0x484b) {
16625 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16626 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
16628 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
16629 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16630 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16631 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16632 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
16634 /* Some old bootcode may report a 0 MAC address in SRAM */
16635 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
16638 /* Next, try NVRAM. */
16639 if (!tg3_flag(tp
, NO_NVRAM
) &&
16640 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16641 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16642 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16643 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16645 /* Finally just fetch it out of the MAC control regs. */
16647 hi
= tr32(MAC_ADDR_0_HIGH
);
16648 lo
= tr32(MAC_ADDR_0_LOW
);
16650 dev
->dev_addr
[5] = lo
& 0xff;
16651 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16652 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16653 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16654 dev
->dev_addr
[1] = hi
& 0xff;
16655 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16659 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
16660 #ifdef CONFIG_SPARC
16661 if (!tg3_get_default_macaddr_sparc(tp
))
16669 #define BOUNDARY_SINGLE_CACHELINE 1
16670 #define BOUNDARY_MULTI_CACHELINE 2
16672 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
16674 int cacheline_size
;
16678 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16680 cacheline_size
= 1024;
16682 cacheline_size
= (int) byte
* 4;
16684 /* On 5703 and later chips, the boundary bits have no
16687 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16688 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16689 !tg3_flag(tp
, PCI_EXPRESS
))
16692 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16693 goal
= BOUNDARY_MULTI_CACHELINE
;
16695 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16696 goal
= BOUNDARY_SINGLE_CACHELINE
;
16702 if (tg3_flag(tp
, 57765_PLUS
)) {
16703 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16710 /* PCI controllers on most RISC systems tend to disconnect
16711 * when a device tries to burst across a cache-line boundary.
16712 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16714 * Unfortunately, for PCI-E there are only limited
16715 * write-side controls for this, and thus for reads
16716 * we will still get the disconnects. We'll also waste
16717 * these PCI cycles for both read and write for chips
16718 * other than 5700 and 5701 which do not implement the
16721 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16722 switch (cacheline_size
) {
16727 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16728 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16729 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16731 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16732 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16737 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16738 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16742 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16743 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16746 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16747 switch (cacheline_size
) {
16751 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16752 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16753 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
16759 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16760 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
16764 switch (cacheline_size
) {
16766 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16767 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
16768 DMA_RWCTRL_WRITE_BNDRY_16
);
16773 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16774 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
16775 DMA_RWCTRL_WRITE_BNDRY_32
);
16780 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16781 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
16782 DMA_RWCTRL_WRITE_BNDRY_64
);
16787 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16788 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
16789 DMA_RWCTRL_WRITE_BNDRY_128
);
16794 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
16795 DMA_RWCTRL_WRITE_BNDRY_256
);
16798 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
16799 DMA_RWCTRL_WRITE_BNDRY_512
);
16803 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
16804 DMA_RWCTRL_WRITE_BNDRY_1024
);
16813 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
16814 int size
, bool to_device
)
16816 struct tg3_internal_buffer_desc test_desc
;
16817 u32 sram_dma_descs
;
16820 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
16822 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
16823 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
16824 tw32(RDMAC_STATUS
, 0);
16825 tw32(WDMAC_STATUS
, 0);
16827 tw32(BUFMGR_MODE
, 0);
16828 tw32(FTQ_RESET
, 0);
16830 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
16831 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
16832 test_desc
.nic_mbuf
= 0x00002100;
16833 test_desc
.len
= size
;
16836 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16837 * the *second* time the tg3 driver was getting loaded after an
16840 * Broadcom tells me:
16841 * ...the DMA engine is connected to the GRC block and a DMA
16842 * reset may affect the GRC block in some unpredictable way...
16843 * The behavior of resets to individual blocks has not been tested.
16845 * Broadcom noted the GRC reset will also reset all sub-components.
16848 test_desc
.cqid_sqid
= (13 << 8) | 2;
16850 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
16853 test_desc
.cqid_sqid
= (16 << 8) | 7;
16855 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
16858 test_desc
.flags
= 0x00000005;
16860 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
16863 val
= *(((u32
*)&test_desc
) + i
);
16864 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
16865 sram_dma_descs
+ (i
* sizeof(u32
)));
16866 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
16868 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16871 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
16873 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
16876 for (i
= 0; i
< 40; i
++) {
16880 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
16882 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
16883 if ((val
& 0xffff) == sram_dma_descs
) {
16894 #define TEST_BUFFER_SIZE 0x2000
16896 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
16897 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
16901 static int tg3_test_dma(struct tg3
*tp
)
16903 dma_addr_t buf_dma
;
16904 u32
*buf
, saved_dma_rwctrl
;
16907 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
16908 &buf_dma
, GFP_KERNEL
);
16914 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
16915 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
16917 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
16919 if (tg3_flag(tp
, 57765_PLUS
))
16922 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16923 /* DMA read watermark not used on PCIE */
16924 tp
->dma_rwctrl
|= 0x00180000;
16925 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
16926 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16927 tg3_asic_rev(tp
) == ASIC_REV_5750
)
16928 tp
->dma_rwctrl
|= 0x003f0000;
16930 tp
->dma_rwctrl
|= 0x003f000f;
16932 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16933 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
16934 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
16935 u32 read_water
= 0x7;
16937 /* If the 5704 is behind the EPB bridge, we can
16938 * do the less restrictive ONE_DMA workaround for
16939 * better performance.
16941 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
16942 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16943 tp
->dma_rwctrl
|= 0x8000;
16944 else if (ccval
== 0x6 || ccval
== 0x7)
16945 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16947 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
16949 /* Set bit 23 to enable PCIX hw bug fix */
16951 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
16952 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
16954 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
16955 /* 5780 always in PCIX mode */
16956 tp
->dma_rwctrl
|= 0x00144000;
16957 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
16958 /* 5714 always in PCIX mode */
16959 tp
->dma_rwctrl
|= 0x00148000;
16961 tp
->dma_rwctrl
|= 0x001b000f;
16964 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
16965 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16967 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16968 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16969 tp
->dma_rwctrl
&= 0xfffffff0;
16971 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16972 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16973 /* Remove this if it causes problems for some boards. */
16974 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
16976 /* On 5700/5701 chips, we need to set this bit.
16977 * Otherwise the chip will issue cacheline transactions
16978 * to streamable DMA memory with not all the byte
16979 * enables turned on. This is an error on several
16980 * RISC PCI controllers, in particular sparc64.
16982 * On 5703/5704 chips, this bit has been reassigned
16983 * a different meaning. In particular, it is used
16984 * on those chips to enable a PCI-X workaround.
16986 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
16989 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16992 /* Unneeded, already done by tg3_get_invariants. */
16993 tg3_switch_clocks(tp
);
16996 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16997 tg3_asic_rev(tp
) != ASIC_REV_5701
)
17000 /* It is best to perform DMA test with maximum write burst size
17001 * to expose the 5700/5701 write DMA bug.
17003 saved_dma_rwctrl
= tp
->dma_rwctrl
;
17004 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17005 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17010 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
17013 /* Send the buffer to the chip. */
17014 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
17016 dev_err(&tp
->pdev
->dev
,
17017 "%s: Buffer write failed. err = %d\n",
17023 /* validate data reached card RAM correctly. */
17024 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17026 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
17027 if (le32_to_cpu(val
) != p
[i
]) {
17028 dev_err(&tp
->pdev
->dev
,
17029 "%s: Buffer corrupted on device! "
17030 "(%d != %d)\n", __func__
, val
, i
);
17031 /* ret = -ENODEV here? */
17036 /* Now read it back. */
17037 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
17039 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
17040 "err = %d\n", __func__
, ret
);
17045 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17049 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17050 DMA_RWCTRL_WRITE_BNDRY_16
) {
17051 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17052 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17053 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17056 dev_err(&tp
->pdev
->dev
,
17057 "%s: Buffer corrupted on read back! "
17058 "(%d != %d)\n", __func__
, p
[i
], i
);
17064 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
17070 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17071 DMA_RWCTRL_WRITE_BNDRY_16
) {
17072 /* DMA test passed without adjusting DMA boundary,
17073 * now look for chipsets that are known to expose the
17074 * DMA bug without failing the test.
17076 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
17077 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17078 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17080 /* Safe to use the calculated DMA boundary. */
17081 tp
->dma_rwctrl
= saved_dma_rwctrl
;
17084 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17088 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
17093 static void tg3_init_bufmgr_config(struct tg3
*tp
)
17095 if (tg3_flag(tp
, 57765_PLUS
)) {
17096 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17097 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17098 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17099 DEFAULT_MB_MACRX_LOW_WATER_57765
;
17100 tp
->bufmgr_config
.mbuf_high_water
=
17101 DEFAULT_MB_HIGH_WATER_57765
;
17103 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17104 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17105 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17106 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
17107 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17108 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
17109 } else if (tg3_flag(tp
, 5705_PLUS
)) {
17110 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17111 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17112 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17113 DEFAULT_MB_MACRX_LOW_WATER_5705
;
17114 tp
->bufmgr_config
.mbuf_high_water
=
17115 DEFAULT_MB_HIGH_WATER_5705
;
17116 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
17117 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17118 DEFAULT_MB_MACRX_LOW_WATER_5906
;
17119 tp
->bufmgr_config
.mbuf_high_water
=
17120 DEFAULT_MB_HIGH_WATER_5906
;
17123 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17124 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17125 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17126 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17127 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17128 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17130 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17131 DEFAULT_MB_RDMA_LOW_WATER
;
17132 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17133 DEFAULT_MB_MACRX_LOW_WATER
;
17134 tp
->bufmgr_config
.mbuf_high_water
=
17135 DEFAULT_MB_HIGH_WATER
;
17137 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17138 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17139 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17140 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17141 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17142 DEFAULT_MB_HIGH_WATER_JUMBO
;
17145 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17146 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17149 static char *tg3_phy_string(struct tg3
*tp
)
17151 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17152 case TG3_PHY_ID_BCM5400
: return "5400";
17153 case TG3_PHY_ID_BCM5401
: return "5401";
17154 case TG3_PHY_ID_BCM5411
: return "5411";
17155 case TG3_PHY_ID_BCM5701
: return "5701";
17156 case TG3_PHY_ID_BCM5703
: return "5703";
17157 case TG3_PHY_ID_BCM5704
: return "5704";
17158 case TG3_PHY_ID_BCM5705
: return "5705";
17159 case TG3_PHY_ID_BCM5750
: return "5750";
17160 case TG3_PHY_ID_BCM5752
: return "5752";
17161 case TG3_PHY_ID_BCM5714
: return "5714";
17162 case TG3_PHY_ID_BCM5780
: return "5780";
17163 case TG3_PHY_ID_BCM5755
: return "5755";
17164 case TG3_PHY_ID_BCM5787
: return "5787";
17165 case TG3_PHY_ID_BCM5784
: return "5784";
17166 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17167 case TG3_PHY_ID_BCM5906
: return "5906";
17168 case TG3_PHY_ID_BCM5761
: return "5761";
17169 case TG3_PHY_ID_BCM5718C
: return "5718C";
17170 case TG3_PHY_ID_BCM5718S
: return "5718S";
17171 case TG3_PHY_ID_BCM57765
: return "57765";
17172 case TG3_PHY_ID_BCM5719C
: return "5719C";
17173 case TG3_PHY_ID_BCM5720C
: return "5720C";
17174 case TG3_PHY_ID_BCM5762
: return "5762C";
17175 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17176 case 0: return "serdes";
17177 default: return "unknown";
17181 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17183 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17184 strcpy(str
, "PCI Express");
17186 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17187 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17189 strcpy(str
, "PCIX:");
17191 if ((clock_ctrl
== 7) ||
17192 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17193 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17194 strcat(str
, "133MHz");
17195 else if (clock_ctrl
== 0)
17196 strcat(str
, "33MHz");
17197 else if (clock_ctrl
== 2)
17198 strcat(str
, "50MHz");
17199 else if (clock_ctrl
== 4)
17200 strcat(str
, "66MHz");
17201 else if (clock_ctrl
== 6)
17202 strcat(str
, "100MHz");
17204 strcpy(str
, "PCI:");
17205 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17206 strcat(str
, "66MHz");
17208 strcat(str
, "33MHz");
17210 if (tg3_flag(tp
, PCI_32BIT
))
17211 strcat(str
, ":32-bit");
17213 strcat(str
, ":64-bit");
17217 static void tg3_init_coal(struct tg3
*tp
)
17219 struct ethtool_coalesce
*ec
= &tp
->coal
;
17221 memset(ec
, 0, sizeof(*ec
));
17222 ec
->cmd
= ETHTOOL_GCOALESCE
;
17223 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17224 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17225 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17226 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17227 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17228 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17229 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17230 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17231 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17233 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17234 HOSTCC_MODE_CLRTICK_TXBD
)) {
17235 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17236 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17237 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17238 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17241 if (tg3_flag(tp
, 5705_PLUS
)) {
17242 ec
->rx_coalesce_usecs_irq
= 0;
17243 ec
->tx_coalesce_usecs_irq
= 0;
17244 ec
->stats_block_coalesce_usecs
= 0;
17248 static int tg3_init_one(struct pci_dev
*pdev
,
17249 const struct pci_device_id
*ent
)
17251 struct net_device
*dev
;
17254 u32 sndmbx
, rcvmbx
, intmbx
;
17256 u64 dma_mask
, persist_dma_mask
;
17257 netdev_features_t features
= 0;
17259 printk_once(KERN_INFO
"%s\n", version
);
17261 err
= pci_enable_device(pdev
);
17263 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17267 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17269 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17270 goto err_out_disable_pdev
;
17273 pci_set_master(pdev
);
17275 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17278 goto err_out_free_res
;
17281 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17283 tp
= netdev_priv(dev
);
17286 tp
->pm_cap
= pdev
->pm_cap
;
17287 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17288 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17292 tp
->msg_enable
= tg3_debug
;
17294 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17296 if (pdev_is_ssb_gige_core(pdev
)) {
17297 tg3_flag_set(tp
, IS_SSB_CORE
);
17298 if (ssb_gige_must_flush_posted_writes(pdev
))
17299 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17300 if (ssb_gige_one_dma_at_once(pdev
))
17301 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17302 if (ssb_gige_have_roboswitch(pdev
))
17303 tg3_flag_set(tp
, ROBOSWITCH
);
17304 if (ssb_gige_is_rgmii(pdev
))
17305 tg3_flag_set(tp
, RGMII_MODE
);
17308 /* The word/byte swap controls here control register access byte
17309 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17312 tp
->misc_host_ctrl
=
17313 MISC_HOST_CTRL_MASK_PCI_INT
|
17314 MISC_HOST_CTRL_WORD_SWAP
|
17315 MISC_HOST_CTRL_INDIR_ACCESS
|
17316 MISC_HOST_CTRL_PCISTATE_RW
;
17318 /* The NONFRM (non-frame) byte/word swap controls take effect
17319 * on descriptor entries, anything which isn't packet data.
17321 * The StrongARM chips on the board (one for tx, one for rx)
17322 * are running in big-endian mode.
17324 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17325 GRC_MODE_WSWAP_NONFRM_DATA
);
17326 #ifdef __BIG_ENDIAN
17327 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17329 spin_lock_init(&tp
->lock
);
17330 spin_lock_init(&tp
->indirect_lock
);
17331 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17333 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17335 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17337 goto err_out_free_dev
;
17340 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17341 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17342 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17343 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17344 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17345 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17346 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17347 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17348 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17349 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17350 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17351 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
) {
17352 tg3_flag_set(tp
, ENABLE_APE
);
17353 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17354 if (!tp
->aperegs
) {
17355 dev_err(&pdev
->dev
,
17356 "Cannot map APE registers, aborting\n");
17358 goto err_out_iounmap
;
17362 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17363 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17365 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17366 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17367 dev
->netdev_ops
= &tg3_netdev_ops
;
17368 dev
->irq
= pdev
->irq
;
17370 err
= tg3_get_invariants(tp
, ent
);
17372 dev_err(&pdev
->dev
,
17373 "Problem fetching invariants of chip, aborting\n");
17374 goto err_out_apeunmap
;
17377 /* The EPB bridge inside 5714, 5715, and 5780 and any
17378 * device behind the EPB cannot support DMA addresses > 40-bit.
17379 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17380 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17381 * do DMA address check in tg3_start_xmit().
17383 if (tg3_flag(tp
, IS_5788
))
17384 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17385 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17386 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17387 #ifdef CONFIG_HIGHMEM
17388 dma_mask
= DMA_BIT_MASK(64);
17391 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17393 /* Configure DMA attributes. */
17394 if (dma_mask
> DMA_BIT_MASK(32)) {
17395 err
= pci_set_dma_mask(pdev
, dma_mask
);
17397 features
|= NETIF_F_HIGHDMA
;
17398 err
= pci_set_consistent_dma_mask(pdev
,
17401 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17402 "DMA for consistent allocations\n");
17403 goto err_out_apeunmap
;
17407 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17408 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17410 dev_err(&pdev
->dev
,
17411 "No usable DMA configuration, aborting\n");
17412 goto err_out_apeunmap
;
17416 tg3_init_bufmgr_config(tp
);
17418 features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
17420 /* 5700 B0 chips do not support checksumming correctly due
17421 * to hardware bugs.
17423 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17424 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17426 if (tg3_flag(tp
, 5755_PLUS
))
17427 features
|= NETIF_F_IPV6_CSUM
;
17430 /* TSO is on by default on chips that support hardware TSO.
17431 * Firmware TSO on older chips gives lower performance, so it
17432 * is off by default, but can be enabled using ethtool.
17434 if ((tg3_flag(tp
, HW_TSO_1
) ||
17435 tg3_flag(tp
, HW_TSO_2
) ||
17436 tg3_flag(tp
, HW_TSO_3
)) &&
17437 (features
& NETIF_F_IP_CSUM
))
17438 features
|= NETIF_F_TSO
;
17439 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17440 if (features
& NETIF_F_IPV6_CSUM
)
17441 features
|= NETIF_F_TSO6
;
17442 if (tg3_flag(tp
, HW_TSO_3
) ||
17443 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17444 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17445 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17446 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17447 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17448 features
|= NETIF_F_TSO_ECN
;
17451 dev
->features
|= features
;
17452 dev
->vlan_features
|= features
;
17455 * Add loopback capability only for a subset of devices that support
17456 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17457 * loopback for the remaining devices.
17459 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17460 !tg3_flag(tp
, CPMU_PRESENT
))
17461 /* Add the loopback capability */
17462 features
|= NETIF_F_LOOPBACK
;
17464 dev
->hw_features
|= features
;
17466 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17467 !tg3_flag(tp
, TSO_CAPABLE
) &&
17468 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17469 tg3_flag_set(tp
, MAX_RXPEND_64
);
17470 tp
->rx_pending
= 63;
17473 err
= tg3_get_device_address(tp
);
17475 dev_err(&pdev
->dev
,
17476 "Could not obtain valid ethernet address, aborting\n");
17477 goto err_out_apeunmap
;
17481 * Reset chip in case UNDI or EFI driver did not shutdown
17482 * DMA self test will enable WDMAC and we'll see (spurious)
17483 * pending DMA on the PCI bus at that point.
17485 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17486 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17487 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17488 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17491 err
= tg3_test_dma(tp
);
17493 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17494 goto err_out_apeunmap
;
17497 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17498 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17499 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17500 for (i
= 0; i
< tp
->irq_max
; i
++) {
17501 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17504 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17506 tnapi
->int_mbox
= intmbx
;
17512 tnapi
->consmbox
= rcvmbx
;
17513 tnapi
->prodmbox
= sndmbx
;
17516 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17518 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17520 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17524 * If we support MSIX, we'll be using RSS. If we're using
17525 * RSS, the first vector only handles link interrupts and the
17526 * remaining vectors handle rx and tx interrupts. Reuse the
17527 * mailbox values for the next iteration. The values we setup
17528 * above are still useful for the single vectored mode.
17543 pci_set_drvdata(pdev
, dev
);
17545 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17546 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17547 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17548 tg3_flag_set(tp
, PTP_CAPABLE
);
17550 if (tg3_flag(tp
, 5717_PLUS
)) {
17551 /* Resume a low-power mode */
17552 tg3_frob_aux_power(tp
, false);
17555 tg3_timer_init(tp
);
17557 tg3_carrier_off(tp
);
17559 err
= register_netdev(dev
);
17561 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17562 goto err_out_apeunmap
;
17565 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17566 tp
->board_part_number
,
17567 tg3_chip_rev_id(tp
),
17568 tg3_bus_string(tp
, str
),
17571 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
17572 struct phy_device
*phydev
;
17573 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
17575 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17576 phydev
->drv
->name
, dev_name(&phydev
->dev
));
17580 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17581 ethtype
= "10/100Base-TX";
17582 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17583 ethtype
= "1000Base-SX";
17585 ethtype
= "10/100/1000Base-T";
17587 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17588 "(WireSpeed[%d], EEE[%d])\n",
17589 tg3_phy_string(tp
), ethtype
,
17590 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17591 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17594 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17595 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17596 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17597 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17598 tg3_flag(tp
, ENABLE_ASF
) != 0,
17599 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17600 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17602 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17603 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17605 pci_save_state(pdev
);
17611 iounmap(tp
->aperegs
);
17612 tp
->aperegs
= NULL
;
17625 pci_release_regions(pdev
);
17627 err_out_disable_pdev
:
17628 if (pci_is_enabled(pdev
))
17629 pci_disable_device(pdev
);
17630 pci_set_drvdata(pdev
, NULL
);
17634 static void tg3_remove_one(struct pci_dev
*pdev
)
17636 struct net_device
*dev
= pci_get_drvdata(pdev
);
17639 struct tg3
*tp
= netdev_priv(dev
);
17641 release_firmware(tp
->fw
);
17643 tg3_reset_task_cancel(tp
);
17645 if (tg3_flag(tp
, USE_PHYLIB
)) {
17650 unregister_netdev(dev
);
17652 iounmap(tp
->aperegs
);
17653 tp
->aperegs
= NULL
;
17660 pci_release_regions(pdev
);
17661 pci_disable_device(pdev
);
17662 pci_set_drvdata(pdev
, NULL
);
17666 #ifdef CONFIG_PM_SLEEP
17667 static int tg3_suspend(struct device
*device
)
17669 struct pci_dev
*pdev
= to_pci_dev(device
);
17670 struct net_device
*dev
= pci_get_drvdata(pdev
);
17671 struct tg3
*tp
= netdev_priv(dev
);
17674 if (!netif_running(dev
))
17677 tg3_reset_task_cancel(tp
);
17679 tg3_netif_stop(tp
);
17681 tg3_timer_stop(tp
);
17683 tg3_full_lock(tp
, 1);
17684 tg3_disable_ints(tp
);
17685 tg3_full_unlock(tp
);
17687 netif_device_detach(dev
);
17689 tg3_full_lock(tp
, 0);
17690 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17691 tg3_flag_clear(tp
, INIT_COMPLETE
);
17692 tg3_full_unlock(tp
);
17694 err
= tg3_power_down_prepare(tp
);
17698 tg3_full_lock(tp
, 0);
17700 tg3_flag_set(tp
, INIT_COMPLETE
);
17701 err2
= tg3_restart_hw(tp
, true);
17705 tg3_timer_start(tp
);
17707 netif_device_attach(dev
);
17708 tg3_netif_start(tp
);
17711 tg3_full_unlock(tp
);
17720 static int tg3_resume(struct device
*device
)
17722 struct pci_dev
*pdev
= to_pci_dev(device
);
17723 struct net_device
*dev
= pci_get_drvdata(pdev
);
17724 struct tg3
*tp
= netdev_priv(dev
);
17727 if (!netif_running(dev
))
17730 netif_device_attach(dev
);
17732 tg3_full_lock(tp
, 0);
17734 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
17736 tg3_flag_set(tp
, INIT_COMPLETE
);
17737 err
= tg3_restart_hw(tp
,
17738 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
17742 tg3_timer_start(tp
);
17744 tg3_netif_start(tp
);
17747 tg3_full_unlock(tp
);
17754 #endif /* CONFIG_PM_SLEEP */
17756 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17759 * tg3_io_error_detected - called when PCI error is detected
17760 * @pdev: Pointer to PCI device
17761 * @state: The current pci connection state
17763 * This function is called after a PCI bus error affecting
17764 * this device has been detected.
17766 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
17767 pci_channel_state_t state
)
17769 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17770 struct tg3
*tp
= netdev_priv(netdev
);
17771 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
17773 netdev_info(netdev
, "PCI I/O error detected\n");
17777 /* We probably don't have netdev yet */
17778 if (!netdev
|| !netif_running(netdev
))
17783 tg3_netif_stop(tp
);
17785 tg3_timer_stop(tp
);
17787 /* Want to make sure that the reset task doesn't run */
17788 tg3_reset_task_cancel(tp
);
17790 netif_device_detach(netdev
);
17792 /* Clean up software state, even if MMIO is blocked */
17793 tg3_full_lock(tp
, 0);
17794 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
17795 tg3_full_unlock(tp
);
17798 if (state
== pci_channel_io_perm_failure
) {
17799 tg3_napi_enable(tp
);
17801 err
= PCI_ERS_RESULT_DISCONNECT
;
17803 pci_disable_device(pdev
);
17812 * tg3_io_slot_reset - called after the pci bus has been reset.
17813 * @pdev: Pointer to PCI device
17815 * Restart the card from scratch, as if from a cold-boot.
17816 * At this point, the card has exprienced a hard reset,
17817 * followed by fixups by BIOS, and has its config space
17818 * set up identically to what it was at cold boot.
17820 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
17822 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17823 struct tg3
*tp
= netdev_priv(netdev
);
17824 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
17829 if (pci_enable_device(pdev
)) {
17830 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
17834 pci_set_master(pdev
);
17835 pci_restore_state(pdev
);
17836 pci_save_state(pdev
);
17838 if (!netif_running(netdev
)) {
17839 rc
= PCI_ERS_RESULT_RECOVERED
;
17843 err
= tg3_power_up(tp
);
17847 rc
= PCI_ERS_RESULT_RECOVERED
;
17850 if (rc
!= PCI_ERS_RESULT_RECOVERED
&& netif_running(netdev
)) {
17851 tg3_napi_enable(tp
);
17860 * tg3_io_resume - called when traffic can start flowing again.
17861 * @pdev: Pointer to PCI device
17863 * This callback is called when the error recovery driver tells
17864 * us that its OK to resume normal operation.
17866 static void tg3_io_resume(struct pci_dev
*pdev
)
17868 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17869 struct tg3
*tp
= netdev_priv(netdev
);
17874 if (!netif_running(netdev
))
17877 tg3_full_lock(tp
, 0);
17878 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
17879 tg3_flag_set(tp
, INIT_COMPLETE
);
17880 err
= tg3_restart_hw(tp
, true);
17882 tg3_full_unlock(tp
);
17883 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
17887 netif_device_attach(netdev
);
17889 tg3_timer_start(tp
);
17891 tg3_netif_start(tp
);
17893 tg3_full_unlock(tp
);
17901 static const struct pci_error_handlers tg3_err_handler
= {
17902 .error_detected
= tg3_io_error_detected
,
17903 .slot_reset
= tg3_io_slot_reset
,
17904 .resume
= tg3_io_resume
17907 static struct pci_driver tg3_driver
= {
17908 .name
= DRV_MODULE_NAME
,
17909 .id_table
= tg3_pci_tbl
,
17910 .probe
= tg3_init_one
,
17911 .remove
= tg3_remove_one
,
17912 .err_handler
= &tg3_err_handler
,
17913 .driver
.pm
= &tg3_pm_ops
,
17916 module_pci_driver(tg3_driver
);