ethtool: Use full 32 bit speed range in ethtool's set_settings
[deliverable/linux.git] / drivers / net / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0 0
61 #define BAR_2 2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69 return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74 set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME "tg3"
90 #define TG3_MAJ_NUM 3
91 #define TG3_MIN_NUM 118
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "April 22, 2011"
95
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
100 (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK | \
103 NETIF_MSG_TIMER | \
104 NETIF_MSG_IFDOWN | \
105 NETIF_MSG_IFUP | \
106 NETIF_MSG_RX_ERR | \
107 NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
111 */
112
113 #define TG3_TX_TIMEOUT (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
123 */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
133
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
139 */
140
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
143
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
151 TG3_TX_RING_SIZE)
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153
154 #define TG3_DMA_BYTE_ENAB 64
155
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
158
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
160
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
174 *
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
180 */
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
184 #else
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
186 #endif
187
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
190
191 #define TG3_RAW_IP_ALIGN 2
192
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
194
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
198
199 static char version[] __devinitdata =
200 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209
210 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295 {}
296 };
297
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
299
300 static const struct {
301 const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
303 { "rx_octets" },
304 { "rx_fragments" },
305 { "rx_ucast_packets" },
306 { "rx_mcast_packets" },
307 { "rx_bcast_packets" },
308 { "rx_fcs_errors" },
309 { "rx_align_errors" },
310 { "rx_xon_pause_rcvd" },
311 { "rx_xoff_pause_rcvd" },
312 { "rx_mac_ctrl_rcvd" },
313 { "rx_xoff_entered" },
314 { "rx_frame_too_long_errors" },
315 { "rx_jabbers" },
316 { "rx_undersize_packets" },
317 { "rx_in_length_errors" },
318 { "rx_out_length_errors" },
319 { "rx_64_or_less_octet_packets" },
320 { "rx_65_to_127_octet_packets" },
321 { "rx_128_to_255_octet_packets" },
322 { "rx_256_to_511_octet_packets" },
323 { "rx_512_to_1023_octet_packets" },
324 { "rx_1024_to_1522_octet_packets" },
325 { "rx_1523_to_2047_octet_packets" },
326 { "rx_2048_to_4095_octet_packets" },
327 { "rx_4096_to_8191_octet_packets" },
328 { "rx_8192_to_9022_octet_packets" },
329
330 { "tx_octets" },
331 { "tx_collisions" },
332
333 { "tx_xon_sent" },
334 { "tx_xoff_sent" },
335 { "tx_flow_control" },
336 { "tx_mac_errors" },
337 { "tx_single_collisions" },
338 { "tx_mult_collisions" },
339 { "tx_deferred" },
340 { "tx_excessive_collisions" },
341 { "tx_late_collisions" },
342 { "tx_collide_2times" },
343 { "tx_collide_3times" },
344 { "tx_collide_4times" },
345 { "tx_collide_5times" },
346 { "tx_collide_6times" },
347 { "tx_collide_7times" },
348 { "tx_collide_8times" },
349 { "tx_collide_9times" },
350 { "tx_collide_10times" },
351 { "tx_collide_11times" },
352 { "tx_collide_12times" },
353 { "tx_collide_13times" },
354 { "tx_collide_14times" },
355 { "tx_collide_15times" },
356 { "tx_ucast_packets" },
357 { "tx_mcast_packets" },
358 { "tx_bcast_packets" },
359 { "tx_carrier_sense_errors" },
360 { "tx_discards" },
361 { "tx_errors" },
362
363 { "dma_writeq_full" },
364 { "dma_write_prioq_full" },
365 { "rxbds_empty" },
366 { "rx_discards" },
367 { "mbuf_lwm_thresh_hit" },
368 { "rx_errors" },
369 { "rx_threshold_hit" },
370
371 { "dma_readq_full" },
372 { "dma_read_prioq_full" },
373 { "tx_comp_queue_full" },
374
375 { "ring_set_send_prod_index" },
376 { "ring_status_update" },
377 { "nic_irqs" },
378 { "nic_avoided_irqs" },
379 { "nic_tx_threshold_hit" }
380 };
381
382 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
383
384
385 static const struct {
386 const char string[ETH_GSTRING_LEN];
387 } ethtool_test_keys[] = {
388 { "nvram test (online) " },
389 { "link test (online) " },
390 { "register test (offline)" },
391 { "memory test (offline)" },
392 { "loopback test (offline)" },
393 { "interrupt test (offline)" },
394 };
395
396 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
397
398
399 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
400 {
401 writel(val, tp->regs + off);
402 }
403
404 static u32 tg3_read32(struct tg3 *tp, u32 off)
405 {
406 return readl(tp->regs + off);
407 }
408
409 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
410 {
411 writel(val, tp->aperegs + off);
412 }
413
414 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
415 {
416 return readl(tp->aperegs + off);
417 }
418
419 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
420 {
421 unsigned long flags;
422
423 spin_lock_irqsave(&tp->indirect_lock, flags);
424 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
426 spin_unlock_irqrestore(&tp->indirect_lock, flags);
427 }
428
429 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
430 {
431 writel(val, tp->regs + off);
432 readl(tp->regs + off);
433 }
434
435 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
436 {
437 unsigned long flags;
438 u32 val;
439
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 return val;
445 }
446
447 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
448 {
449 unsigned long flags;
450
451 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
452 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
453 TG3_64BIT_REG_LOW, val);
454 return;
455 }
456 if (off == TG3_RX_STD_PROD_IDX_REG) {
457 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
460 }
461
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
466
467 /* In indirect mode when disabling interrupts, we also need
468 * to clear the interrupt bit in the GRC local ctrl register.
469 */
470 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
471 (val == 0x1)) {
472 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
473 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
474 }
475 }
476
477 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
478 {
479 unsigned long flags;
480 u32 val;
481
482 spin_lock_irqsave(&tp->indirect_lock, flags);
483 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 return val;
487 }
488
489 /* usec_wait specifies the wait time in usec when writing to certain registers
490 * where it is unsafe to read back the register without some delay.
491 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
492 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
493 */
494 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
495 {
496 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
497 /* Non-posted methods */
498 tp->write32(tp, off, val);
499 else {
500 /* Posted method */
501 tg3_write32(tp, off, val);
502 if (usec_wait)
503 udelay(usec_wait);
504 tp->read32(tp, off);
505 }
506 /* Wait again after the read for the posted method to guarantee that
507 * the wait time is met.
508 */
509 if (usec_wait)
510 udelay(usec_wait);
511 }
512
513 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
514 {
515 tp->write32_mbox(tp, off, val);
516 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
517 tp->read32_mbox(tp, off);
518 }
519
520 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
521 {
522 void __iomem *mbox = tp->regs + off;
523 writel(val, mbox);
524 if (tg3_flag(tp, TXD_MBOX_HWBUG))
525 writel(val, mbox);
526 if (tg3_flag(tp, MBOX_WRITE_REORDER))
527 readl(mbox);
528 }
529
530 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
531 {
532 return readl(tp->regs + off + GRCMBOX_BASE);
533 }
534
535 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
536 {
537 writel(val, tp->regs + off + GRCMBOX_BASE);
538 }
539
540 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
541 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
542 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
543 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
544 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
545
546 #define tw32(reg, val) tp->write32(tp, reg, val)
547 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
548 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
549 #define tr32(reg) tp->read32(tp, reg)
550
551 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
552 {
553 unsigned long flags;
554
555 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
556 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
557 return;
558
559 spin_lock_irqsave(&tp->indirect_lock, flags);
560 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
561 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
562 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
563
564 /* Always leave this as zero. */
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
566 } else {
567 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
568 tw32_f(TG3PCI_MEM_WIN_DATA, val);
569
570 /* Always leave this as zero. */
571 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
572 }
573 spin_unlock_irqrestore(&tp->indirect_lock, flags);
574 }
575
576 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
577 {
578 unsigned long flags;
579
580 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
581 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
582 *val = 0;
583 return;
584 }
585
586 spin_lock_irqsave(&tp->indirect_lock, flags);
587 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
588 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
589 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
590
591 /* Always leave this as zero. */
592 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
593 } else {
594 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
595 *val = tr32(TG3PCI_MEM_WIN_DATA);
596
597 /* Always leave this as zero. */
598 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
599 }
600 spin_unlock_irqrestore(&tp->indirect_lock, flags);
601 }
602
603 static void tg3_ape_lock_init(struct tg3 *tp)
604 {
605 int i;
606 u32 regbase;
607
608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
609 regbase = TG3_APE_LOCK_GRANT;
610 else
611 regbase = TG3_APE_PER_LOCK_GRANT;
612
613 /* Make sure the driver hasn't any stale locks. */
614 for (i = 0; i < 8; i++)
615 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
616 }
617
618 static int tg3_ape_lock(struct tg3 *tp, int locknum)
619 {
620 int i, off;
621 int ret = 0;
622 u32 status, req, gnt;
623
624 if (!tg3_flag(tp, ENABLE_APE))
625 return 0;
626
627 switch (locknum) {
628 case TG3_APE_LOCK_GRC:
629 case TG3_APE_LOCK_MEM:
630 break;
631 default:
632 return -EINVAL;
633 }
634
635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
636 req = TG3_APE_LOCK_REQ;
637 gnt = TG3_APE_LOCK_GRANT;
638 } else {
639 req = TG3_APE_PER_LOCK_REQ;
640 gnt = TG3_APE_PER_LOCK_GRANT;
641 }
642
643 off = 4 * locknum;
644
645 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
646
647 /* Wait for up to 1 millisecond to acquire lock. */
648 for (i = 0; i < 100; i++) {
649 status = tg3_ape_read32(tp, gnt + off);
650 if (status == APE_LOCK_GRANT_DRIVER)
651 break;
652 udelay(10);
653 }
654
655 if (status != APE_LOCK_GRANT_DRIVER) {
656 /* Revoke the lock request. */
657 tg3_ape_write32(tp, gnt + off,
658 APE_LOCK_GRANT_DRIVER);
659
660 ret = -EBUSY;
661 }
662
663 return ret;
664 }
665
666 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
667 {
668 u32 gnt;
669
670 if (!tg3_flag(tp, ENABLE_APE))
671 return;
672
673 switch (locknum) {
674 case TG3_APE_LOCK_GRC:
675 case TG3_APE_LOCK_MEM:
676 break;
677 default:
678 return;
679 }
680
681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
682 gnt = TG3_APE_LOCK_GRANT;
683 else
684 gnt = TG3_APE_PER_LOCK_GRANT;
685
686 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
687 }
688
689 static void tg3_disable_ints(struct tg3 *tp)
690 {
691 int i;
692
693 tw32(TG3PCI_MISC_HOST_CTRL,
694 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
695 for (i = 0; i < tp->irq_max; i++)
696 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
697 }
698
699 static void tg3_enable_ints(struct tg3 *tp)
700 {
701 int i;
702
703 tp->irq_sync = 0;
704 wmb();
705
706 tw32(TG3PCI_MISC_HOST_CTRL,
707 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
708
709 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
710 for (i = 0; i < tp->irq_cnt; i++) {
711 struct tg3_napi *tnapi = &tp->napi[i];
712
713 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
714 if (tg3_flag(tp, 1SHOT_MSI))
715 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716
717 tp->coal_now |= tnapi->coal_now;
718 }
719
720 /* Force an initial interrupt */
721 if (!tg3_flag(tp, TAGGED_STATUS) &&
722 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
723 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
724 else
725 tw32(HOSTCC_MODE, tp->coal_now);
726
727 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
728 }
729
730 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
731 {
732 struct tg3 *tp = tnapi->tp;
733 struct tg3_hw_status *sblk = tnapi->hw_status;
734 unsigned int work_exists = 0;
735
736 /* check for phy events */
737 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
738 if (sblk->status & SD_STATUS_LINK_CHG)
739 work_exists = 1;
740 }
741 /* check for RX/TX work to do */
742 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
743 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
744 work_exists = 1;
745
746 return work_exists;
747 }
748
749 /* tg3_int_reenable
750 * similar to tg3_enable_ints, but it accurately determines whether there
751 * is new work pending and can return without flushing the PIO write
752 * which reenables interrupts
753 */
754 static void tg3_int_reenable(struct tg3_napi *tnapi)
755 {
756 struct tg3 *tp = tnapi->tp;
757
758 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
759 mmiowb();
760
761 /* When doing tagged status, this work check is unnecessary.
762 * The last_tag we write above tells the chip which piece of
763 * work we've completed.
764 */
765 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
766 tw32(HOSTCC_MODE, tp->coalesce_mode |
767 HOSTCC_MODE_ENABLE | tnapi->coal_now);
768 }
769
770 static void tg3_switch_clocks(struct tg3 *tp)
771 {
772 u32 clock_ctrl;
773 u32 orig_clock_ctrl;
774
775 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
776 return;
777
778 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
779
780 orig_clock_ctrl = clock_ctrl;
781 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
782 CLOCK_CTRL_CLKRUN_OENABLE |
783 0x1f);
784 tp->pci_clock_ctrl = clock_ctrl;
785
786 if (tg3_flag(tp, 5705_PLUS)) {
787 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
788 tw32_wait_f(TG3PCI_CLOCK_CTRL,
789 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
790 }
791 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
792 tw32_wait_f(TG3PCI_CLOCK_CTRL,
793 clock_ctrl |
794 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
795 40);
796 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797 clock_ctrl | (CLOCK_CTRL_ALTCLK),
798 40);
799 }
800 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
801 }
802
803 #define PHY_BUSY_LOOPS 5000
804
805 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
806 {
807 u32 frame_val;
808 unsigned int loops;
809 int ret;
810
811 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812 tw32_f(MAC_MI_MODE,
813 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
814 udelay(80);
815 }
816
817 *val = 0x0;
818
819 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
820 MI_COM_PHY_ADDR_MASK);
821 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
822 MI_COM_REG_ADDR_MASK);
823 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
824
825 tw32_f(MAC_MI_COM, frame_val);
826
827 loops = PHY_BUSY_LOOPS;
828 while (loops != 0) {
829 udelay(10);
830 frame_val = tr32(MAC_MI_COM);
831
832 if ((frame_val & MI_COM_BUSY) == 0) {
833 udelay(5);
834 frame_val = tr32(MAC_MI_COM);
835 break;
836 }
837 loops -= 1;
838 }
839
840 ret = -EBUSY;
841 if (loops != 0) {
842 *val = frame_val & MI_COM_DATA_MASK;
843 ret = 0;
844 }
845
846 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 tw32_f(MAC_MI_MODE, tp->mi_mode);
848 udelay(80);
849 }
850
851 return ret;
852 }
853
854 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
855 {
856 u32 frame_val;
857 unsigned int loops;
858 int ret;
859
860 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
861 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
862 return 0;
863
864 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
865 tw32_f(MAC_MI_MODE,
866 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
867 udelay(80);
868 }
869
870 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
871 MI_COM_PHY_ADDR_MASK);
872 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
873 MI_COM_REG_ADDR_MASK);
874 frame_val |= (val & MI_COM_DATA_MASK);
875 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
876
877 tw32_f(MAC_MI_COM, frame_val);
878
879 loops = PHY_BUSY_LOOPS;
880 while (loops != 0) {
881 udelay(10);
882 frame_val = tr32(MAC_MI_COM);
883 if ((frame_val & MI_COM_BUSY) == 0) {
884 udelay(5);
885 frame_val = tr32(MAC_MI_COM);
886 break;
887 }
888 loops -= 1;
889 }
890
891 ret = -EBUSY;
892 if (loops != 0)
893 ret = 0;
894
895 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896 tw32_f(MAC_MI_MODE, tp->mi_mode);
897 udelay(80);
898 }
899
900 return ret;
901 }
902
903 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
904 {
905 int err;
906
907 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
908 if (err)
909 goto done;
910
911 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
912 if (err)
913 goto done;
914
915 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
916 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
917 if (err)
918 goto done;
919
920 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
921
922 done:
923 return err;
924 }
925
926 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
927 {
928 int err;
929
930 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
931 if (err)
932 goto done;
933
934 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
935 if (err)
936 goto done;
937
938 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
939 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
940 if (err)
941 goto done;
942
943 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
944
945 done:
946 return err;
947 }
948
949 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
950 {
951 int err;
952
953 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
954 if (!err)
955 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
956
957 return err;
958 }
959
960 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
961 {
962 int err;
963
964 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
965 if (!err)
966 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
967
968 return err;
969 }
970
971 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
972 {
973 int err;
974
975 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
976 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
977 MII_TG3_AUXCTL_SHDWSEL_MISC);
978 if (!err)
979 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
980
981 return err;
982 }
983
984 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
985 {
986 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
987 set |= MII_TG3_AUXCTL_MISC_WREN;
988
989 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
990 }
991
992 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995 MII_TG3_AUXCTL_ACTL_TX_6DB)
996
997 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999 MII_TG3_AUXCTL_ACTL_TX_6DB);
1000
1001 static int tg3_bmcr_reset(struct tg3 *tp)
1002 {
1003 u32 phy_control;
1004 int limit, err;
1005
1006 /* OK, reset it, and poll the BMCR_RESET bit until it
1007 * clears or we time out.
1008 */
1009 phy_control = BMCR_RESET;
1010 err = tg3_writephy(tp, MII_BMCR, phy_control);
1011 if (err != 0)
1012 return -EBUSY;
1013
1014 limit = 5000;
1015 while (limit--) {
1016 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1017 if (err != 0)
1018 return -EBUSY;
1019
1020 if ((phy_control & BMCR_RESET) == 0) {
1021 udelay(40);
1022 break;
1023 }
1024 udelay(10);
1025 }
1026 if (limit < 0)
1027 return -EBUSY;
1028
1029 return 0;
1030 }
1031
1032 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1033 {
1034 struct tg3 *tp = bp->priv;
1035 u32 val;
1036
1037 spin_lock_bh(&tp->lock);
1038
1039 if (tg3_readphy(tp, reg, &val))
1040 val = -EIO;
1041
1042 spin_unlock_bh(&tp->lock);
1043
1044 return val;
1045 }
1046
1047 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1048 {
1049 struct tg3 *tp = bp->priv;
1050 u32 ret = 0;
1051
1052 spin_lock_bh(&tp->lock);
1053
1054 if (tg3_writephy(tp, reg, val))
1055 ret = -EIO;
1056
1057 spin_unlock_bh(&tp->lock);
1058
1059 return ret;
1060 }
1061
1062 static int tg3_mdio_reset(struct mii_bus *bp)
1063 {
1064 return 0;
1065 }
1066
1067 static void tg3_mdio_config_5785(struct tg3 *tp)
1068 {
1069 u32 val;
1070 struct phy_device *phydev;
1071
1072 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1073 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1074 case PHY_ID_BCM50610:
1075 case PHY_ID_BCM50610M:
1076 val = MAC_PHYCFG2_50610_LED_MODES;
1077 break;
1078 case PHY_ID_BCMAC131:
1079 val = MAC_PHYCFG2_AC131_LED_MODES;
1080 break;
1081 case PHY_ID_RTL8211C:
1082 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1083 break;
1084 case PHY_ID_RTL8201E:
1085 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1086 break;
1087 default:
1088 return;
1089 }
1090
1091 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1092 tw32(MAC_PHYCFG2, val);
1093
1094 val = tr32(MAC_PHYCFG1);
1095 val &= ~(MAC_PHYCFG1_RGMII_INT |
1096 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1097 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1098 tw32(MAC_PHYCFG1, val);
1099
1100 return;
1101 }
1102
1103 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1104 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1105 MAC_PHYCFG2_FMODE_MASK_MASK |
1106 MAC_PHYCFG2_GMODE_MASK_MASK |
1107 MAC_PHYCFG2_ACT_MASK_MASK |
1108 MAC_PHYCFG2_QUAL_MASK_MASK |
1109 MAC_PHYCFG2_INBAND_ENABLE;
1110
1111 tw32(MAC_PHYCFG2, val);
1112
1113 val = tr32(MAC_PHYCFG1);
1114 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1115 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1116 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1117 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1118 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1119 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1120 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1121 }
1122 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1123 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1124 tw32(MAC_PHYCFG1, val);
1125
1126 val = tr32(MAC_EXT_RGMII_MODE);
1127 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1128 MAC_RGMII_MODE_RX_QUALITY |
1129 MAC_RGMII_MODE_RX_ACTIVITY |
1130 MAC_RGMII_MODE_RX_ENG_DET |
1131 MAC_RGMII_MODE_TX_ENABLE |
1132 MAC_RGMII_MODE_TX_LOWPWR |
1133 MAC_RGMII_MODE_TX_RESET);
1134 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1135 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1136 val |= MAC_RGMII_MODE_RX_INT_B |
1137 MAC_RGMII_MODE_RX_QUALITY |
1138 MAC_RGMII_MODE_RX_ACTIVITY |
1139 MAC_RGMII_MODE_RX_ENG_DET;
1140 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1141 val |= MAC_RGMII_MODE_TX_ENABLE |
1142 MAC_RGMII_MODE_TX_LOWPWR |
1143 MAC_RGMII_MODE_TX_RESET;
1144 }
1145 tw32(MAC_EXT_RGMII_MODE, val);
1146 }
1147
1148 static void tg3_mdio_start(struct tg3 *tp)
1149 {
1150 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 udelay(80);
1153
1154 if (tg3_flag(tp, MDIOBUS_INITED) &&
1155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1156 tg3_mdio_config_5785(tp);
1157 }
1158
1159 static int tg3_mdio_init(struct tg3 *tp)
1160 {
1161 int i;
1162 u32 reg;
1163 struct phy_device *phydev;
1164
1165 if (tg3_flag(tp, 5717_PLUS)) {
1166 u32 is_serdes;
1167
1168 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1169
1170 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1171 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1172 else
1173 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1174 TG3_CPMU_PHY_STRAP_IS_SERDES;
1175 if (is_serdes)
1176 tp->phy_addr += 7;
1177 } else
1178 tp->phy_addr = TG3_PHY_MII_ADDR;
1179
1180 tg3_mdio_start(tp);
1181
1182 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1183 return 0;
1184
1185 tp->mdio_bus = mdiobus_alloc();
1186 if (tp->mdio_bus == NULL)
1187 return -ENOMEM;
1188
1189 tp->mdio_bus->name = "tg3 mdio bus";
1190 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1191 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1192 tp->mdio_bus->priv = tp;
1193 tp->mdio_bus->parent = &tp->pdev->dev;
1194 tp->mdio_bus->read = &tg3_mdio_read;
1195 tp->mdio_bus->write = &tg3_mdio_write;
1196 tp->mdio_bus->reset = &tg3_mdio_reset;
1197 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1198 tp->mdio_bus->irq = &tp->mdio_irq[0];
1199
1200 for (i = 0; i < PHY_MAX_ADDR; i++)
1201 tp->mdio_bus->irq[i] = PHY_POLL;
1202
1203 /* The bus registration will look for all the PHYs on the mdio bus.
1204 * Unfortunately, it does not ensure the PHY is powered up before
1205 * accessing the PHY ID registers. A chip reset is the
1206 * quickest way to bring the device back to an operational state..
1207 */
1208 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1209 tg3_bmcr_reset(tp);
1210
1211 i = mdiobus_register(tp->mdio_bus);
1212 if (i) {
1213 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1214 mdiobus_free(tp->mdio_bus);
1215 return i;
1216 }
1217
1218 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1219
1220 if (!phydev || !phydev->drv) {
1221 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1222 mdiobus_unregister(tp->mdio_bus);
1223 mdiobus_free(tp->mdio_bus);
1224 return -ENODEV;
1225 }
1226
1227 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1228 case PHY_ID_BCM57780:
1229 phydev->interface = PHY_INTERFACE_MODE_GMII;
1230 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1231 break;
1232 case PHY_ID_BCM50610:
1233 case PHY_ID_BCM50610M:
1234 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1235 PHY_BRCM_RX_REFCLK_UNUSED |
1236 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1237 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1239 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1240 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1241 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1242 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1243 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1244 /* fallthru */
1245 case PHY_ID_RTL8211C:
1246 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1247 break;
1248 case PHY_ID_RTL8201E:
1249 case PHY_ID_BCMAC131:
1250 phydev->interface = PHY_INTERFACE_MODE_MII;
1251 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1252 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1253 break;
1254 }
1255
1256 tg3_flag_set(tp, MDIOBUS_INITED);
1257
1258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1259 tg3_mdio_config_5785(tp);
1260
1261 return 0;
1262 }
1263
1264 static void tg3_mdio_fini(struct tg3 *tp)
1265 {
1266 if (tg3_flag(tp, MDIOBUS_INITED)) {
1267 tg3_flag_clear(tp, MDIOBUS_INITED);
1268 mdiobus_unregister(tp->mdio_bus);
1269 mdiobus_free(tp->mdio_bus);
1270 }
1271 }
1272
1273 /* tp->lock is held. */
1274 static inline void tg3_generate_fw_event(struct tg3 *tp)
1275 {
1276 u32 val;
1277
1278 val = tr32(GRC_RX_CPU_EVENT);
1279 val |= GRC_RX_CPU_DRIVER_EVENT;
1280 tw32_f(GRC_RX_CPU_EVENT, val);
1281
1282 tp->last_event_jiffies = jiffies;
1283 }
1284
1285 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1286
1287 /* tp->lock is held. */
1288 static void tg3_wait_for_event_ack(struct tg3 *tp)
1289 {
1290 int i;
1291 unsigned int delay_cnt;
1292 long time_remain;
1293
1294 /* If enough time has passed, no wait is necessary. */
1295 time_remain = (long)(tp->last_event_jiffies + 1 +
1296 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1297 (long)jiffies;
1298 if (time_remain < 0)
1299 return;
1300
1301 /* Check if we can shorten the wait time. */
1302 delay_cnt = jiffies_to_usecs(time_remain);
1303 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1304 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1305 delay_cnt = (delay_cnt >> 3) + 1;
1306
1307 for (i = 0; i < delay_cnt; i++) {
1308 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1309 break;
1310 udelay(8);
1311 }
1312 }
1313
1314 /* tp->lock is held. */
1315 static void tg3_ump_link_report(struct tg3 *tp)
1316 {
1317 u32 reg;
1318 u32 val;
1319
1320 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1321 return;
1322
1323 tg3_wait_for_event_ack(tp);
1324
1325 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1326
1327 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1328
1329 val = 0;
1330 if (!tg3_readphy(tp, MII_BMCR, &reg))
1331 val = reg << 16;
1332 if (!tg3_readphy(tp, MII_BMSR, &reg))
1333 val |= (reg & 0xffff);
1334 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1335
1336 val = 0;
1337 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1338 val = reg << 16;
1339 if (!tg3_readphy(tp, MII_LPA, &reg))
1340 val |= (reg & 0xffff);
1341 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1342
1343 val = 0;
1344 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1345 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1346 val = reg << 16;
1347 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1348 val |= (reg & 0xffff);
1349 }
1350 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1351
1352 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1353 val = reg << 16;
1354 else
1355 val = 0;
1356 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1357
1358 tg3_generate_fw_event(tp);
1359 }
1360
1361 static void tg3_link_report(struct tg3 *tp)
1362 {
1363 if (!netif_carrier_ok(tp->dev)) {
1364 netif_info(tp, link, tp->dev, "Link is down\n");
1365 tg3_ump_link_report(tp);
1366 } else if (netif_msg_link(tp)) {
1367 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1368 (tp->link_config.active_speed == SPEED_1000 ?
1369 1000 :
1370 (tp->link_config.active_speed == SPEED_100 ?
1371 100 : 10)),
1372 (tp->link_config.active_duplex == DUPLEX_FULL ?
1373 "full" : "half"));
1374
1375 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1376 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1377 "on" : "off",
1378 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1379 "on" : "off");
1380
1381 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1382 netdev_info(tp->dev, "EEE is %s\n",
1383 tp->setlpicnt ? "enabled" : "disabled");
1384
1385 tg3_ump_link_report(tp);
1386 }
1387 }
1388
1389 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1390 {
1391 u16 miireg;
1392
1393 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1394 miireg = ADVERTISE_PAUSE_CAP;
1395 else if (flow_ctrl & FLOW_CTRL_TX)
1396 miireg = ADVERTISE_PAUSE_ASYM;
1397 else if (flow_ctrl & FLOW_CTRL_RX)
1398 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1399 else
1400 miireg = 0;
1401
1402 return miireg;
1403 }
1404
1405 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1406 {
1407 u16 miireg;
1408
1409 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1410 miireg = ADVERTISE_1000XPAUSE;
1411 else if (flow_ctrl & FLOW_CTRL_TX)
1412 miireg = ADVERTISE_1000XPSE_ASYM;
1413 else if (flow_ctrl & FLOW_CTRL_RX)
1414 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1415 else
1416 miireg = 0;
1417
1418 return miireg;
1419 }
1420
1421 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1422 {
1423 u8 cap = 0;
1424
1425 if (lcladv & ADVERTISE_1000XPAUSE) {
1426 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1427 if (rmtadv & LPA_1000XPAUSE)
1428 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1429 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1430 cap = FLOW_CTRL_RX;
1431 } else {
1432 if (rmtadv & LPA_1000XPAUSE)
1433 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434 }
1435 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1436 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1437 cap = FLOW_CTRL_TX;
1438 }
1439
1440 return cap;
1441 }
1442
1443 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1444 {
1445 u8 autoneg;
1446 u8 flowctrl = 0;
1447 u32 old_rx_mode = tp->rx_mode;
1448 u32 old_tx_mode = tp->tx_mode;
1449
1450 if (tg3_flag(tp, USE_PHYLIB))
1451 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1452 else
1453 autoneg = tp->link_config.autoneg;
1454
1455 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1456 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1457 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1458 else
1459 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1460 } else
1461 flowctrl = tp->link_config.flowctrl;
1462
1463 tp->link_config.active_flowctrl = flowctrl;
1464
1465 if (flowctrl & FLOW_CTRL_RX)
1466 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1467 else
1468 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1469
1470 if (old_rx_mode != tp->rx_mode)
1471 tw32_f(MAC_RX_MODE, tp->rx_mode);
1472
1473 if (flowctrl & FLOW_CTRL_TX)
1474 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1475 else
1476 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1477
1478 if (old_tx_mode != tp->tx_mode)
1479 tw32_f(MAC_TX_MODE, tp->tx_mode);
1480 }
1481
1482 static void tg3_adjust_link(struct net_device *dev)
1483 {
1484 u8 oldflowctrl, linkmesg = 0;
1485 u32 mac_mode, lcl_adv, rmt_adv;
1486 struct tg3 *tp = netdev_priv(dev);
1487 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1488
1489 spin_lock_bh(&tp->lock);
1490
1491 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1492 MAC_MODE_HALF_DUPLEX);
1493
1494 oldflowctrl = tp->link_config.active_flowctrl;
1495
1496 if (phydev->link) {
1497 lcl_adv = 0;
1498 rmt_adv = 0;
1499
1500 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1501 mac_mode |= MAC_MODE_PORT_MODE_MII;
1502 else if (phydev->speed == SPEED_1000 ||
1503 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1504 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1505 else
1506 mac_mode |= MAC_MODE_PORT_MODE_MII;
1507
1508 if (phydev->duplex == DUPLEX_HALF)
1509 mac_mode |= MAC_MODE_HALF_DUPLEX;
1510 else {
1511 lcl_adv = tg3_advert_flowctrl_1000T(
1512 tp->link_config.flowctrl);
1513
1514 if (phydev->pause)
1515 rmt_adv = LPA_PAUSE_CAP;
1516 if (phydev->asym_pause)
1517 rmt_adv |= LPA_PAUSE_ASYM;
1518 }
1519
1520 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1521 } else
1522 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1523
1524 if (mac_mode != tp->mac_mode) {
1525 tp->mac_mode = mac_mode;
1526 tw32_f(MAC_MODE, tp->mac_mode);
1527 udelay(40);
1528 }
1529
1530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1531 if (phydev->speed == SPEED_10)
1532 tw32(MAC_MI_STAT,
1533 MAC_MI_STAT_10MBPS_MODE |
1534 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1535 else
1536 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1537 }
1538
1539 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1540 tw32(MAC_TX_LENGTHS,
1541 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1542 (6 << TX_LENGTHS_IPG_SHIFT) |
1543 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1544 else
1545 tw32(MAC_TX_LENGTHS,
1546 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547 (6 << TX_LENGTHS_IPG_SHIFT) |
1548 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549
1550 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1551 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1552 phydev->speed != tp->link_config.active_speed ||
1553 phydev->duplex != tp->link_config.active_duplex ||
1554 oldflowctrl != tp->link_config.active_flowctrl)
1555 linkmesg = 1;
1556
1557 tp->link_config.active_speed = phydev->speed;
1558 tp->link_config.active_duplex = phydev->duplex;
1559
1560 spin_unlock_bh(&tp->lock);
1561
1562 if (linkmesg)
1563 tg3_link_report(tp);
1564 }
1565
1566 static int tg3_phy_init(struct tg3 *tp)
1567 {
1568 struct phy_device *phydev;
1569
1570 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1571 return 0;
1572
1573 /* Bring the PHY back to a known state. */
1574 tg3_bmcr_reset(tp);
1575
1576 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1577
1578 /* Attach the MAC to the PHY. */
1579 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1580 phydev->dev_flags, phydev->interface);
1581 if (IS_ERR(phydev)) {
1582 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1583 return PTR_ERR(phydev);
1584 }
1585
1586 /* Mask with MAC supported features. */
1587 switch (phydev->interface) {
1588 case PHY_INTERFACE_MODE_GMII:
1589 case PHY_INTERFACE_MODE_RGMII:
1590 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1591 phydev->supported &= (PHY_GBIT_FEATURES |
1592 SUPPORTED_Pause |
1593 SUPPORTED_Asym_Pause);
1594 break;
1595 }
1596 /* fallthru */
1597 case PHY_INTERFACE_MODE_MII:
1598 phydev->supported &= (PHY_BASIC_FEATURES |
1599 SUPPORTED_Pause |
1600 SUPPORTED_Asym_Pause);
1601 break;
1602 default:
1603 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1604 return -EINVAL;
1605 }
1606
1607 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1608
1609 phydev->advertising = phydev->supported;
1610
1611 return 0;
1612 }
1613
1614 static void tg3_phy_start(struct tg3 *tp)
1615 {
1616 struct phy_device *phydev;
1617
1618 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1619 return;
1620
1621 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1622
1623 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1624 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1625 phydev->speed = tp->link_config.orig_speed;
1626 phydev->duplex = tp->link_config.orig_duplex;
1627 phydev->autoneg = tp->link_config.orig_autoneg;
1628 phydev->advertising = tp->link_config.orig_advertising;
1629 }
1630
1631 phy_start(phydev);
1632
1633 phy_start_aneg(phydev);
1634 }
1635
1636 static void tg3_phy_stop(struct tg3 *tp)
1637 {
1638 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1639 return;
1640
1641 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1642 }
1643
1644 static void tg3_phy_fini(struct tg3 *tp)
1645 {
1646 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1647 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1648 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1649 }
1650 }
1651
1652 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1653 {
1654 u32 phytest;
1655
1656 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1657 u32 phy;
1658
1659 tg3_writephy(tp, MII_TG3_FET_TEST,
1660 phytest | MII_TG3_FET_SHADOW_EN);
1661 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1662 if (enable)
1663 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1664 else
1665 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1667 }
1668 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1669 }
1670 }
1671
1672 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1673 {
1674 u32 reg;
1675
1676 if (!tg3_flag(tp, 5705_PLUS) ||
1677 (tg3_flag(tp, 5717_PLUS) &&
1678 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1679 return;
1680
1681 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1682 tg3_phy_fet_toggle_apd(tp, enable);
1683 return;
1684 }
1685
1686 reg = MII_TG3_MISC_SHDW_WREN |
1687 MII_TG3_MISC_SHDW_SCR5_SEL |
1688 MII_TG3_MISC_SHDW_SCR5_LPED |
1689 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1690 MII_TG3_MISC_SHDW_SCR5_SDTL |
1691 MII_TG3_MISC_SHDW_SCR5_C125OE;
1692 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1693 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1694
1695 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1696
1697
1698 reg = MII_TG3_MISC_SHDW_WREN |
1699 MII_TG3_MISC_SHDW_APD_SEL |
1700 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1701 if (enable)
1702 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1703
1704 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1705 }
1706
1707 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1708 {
1709 u32 phy;
1710
1711 if (!tg3_flag(tp, 5705_PLUS) ||
1712 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1713 return;
1714
1715 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1716 u32 ephy;
1717
1718 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1719 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1720
1721 tg3_writephy(tp, MII_TG3_FET_TEST,
1722 ephy | MII_TG3_FET_SHADOW_EN);
1723 if (!tg3_readphy(tp, reg, &phy)) {
1724 if (enable)
1725 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1726 else
1727 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728 tg3_writephy(tp, reg, phy);
1729 }
1730 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1731 }
1732 } else {
1733 int ret;
1734
1735 ret = tg3_phy_auxctl_read(tp,
1736 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1737 if (!ret) {
1738 if (enable)
1739 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1740 else
1741 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742 tg3_phy_auxctl_write(tp,
1743 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1744 }
1745 }
1746 }
1747
1748 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1749 {
1750 int ret;
1751 u32 val;
1752
1753 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1754 return;
1755
1756 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1757 if (!ret)
1758 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1759 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1760 }
1761
1762 static void tg3_phy_apply_otp(struct tg3 *tp)
1763 {
1764 u32 otp, phy;
1765
1766 if (!tp->phy_otp)
1767 return;
1768
1769 otp = tp->phy_otp;
1770
1771 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1772 return;
1773
1774 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1775 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1776 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1777
1778 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1779 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1780 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1781
1782 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1783 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1784 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1785
1786 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1787 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1788
1789 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1790 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1791
1792 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1793 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1794 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1795
1796 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1797 }
1798
1799 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1800 {
1801 u32 val;
1802
1803 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1804 return;
1805
1806 tp->setlpicnt = 0;
1807
1808 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1809 current_link_up == 1 &&
1810 tp->link_config.active_duplex == DUPLEX_FULL &&
1811 (tp->link_config.active_speed == SPEED_100 ||
1812 tp->link_config.active_speed == SPEED_1000)) {
1813 u32 eeectl;
1814
1815 if (tp->link_config.active_speed == SPEED_1000)
1816 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1817 else
1818 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1819
1820 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1821
1822 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1823 TG3_CL45_D7_EEERES_STAT, &val);
1824
1825 switch (val) {
1826 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1827 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1828 case ASIC_REV_5717:
1829 case ASIC_REV_5719:
1830 case ASIC_REV_57765:
1831 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1832 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
1833 0x0000);
1834 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1835 }
1836 }
1837 /* Fallthrough */
1838 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1839 tp->setlpicnt = 2;
1840 }
1841 }
1842
1843 if (!tp->setlpicnt) {
1844 val = tr32(TG3_CPMU_EEE_MODE);
1845 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1846 }
1847 }
1848
1849 static int tg3_wait_macro_done(struct tg3 *tp)
1850 {
1851 int limit = 100;
1852
1853 while (limit--) {
1854 u32 tmp32;
1855
1856 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1857 if ((tmp32 & 0x1000) == 0)
1858 break;
1859 }
1860 }
1861 if (limit < 0)
1862 return -EBUSY;
1863
1864 return 0;
1865 }
1866
1867 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1868 {
1869 static const u32 test_pat[4][6] = {
1870 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1871 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1872 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1873 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1874 };
1875 int chan;
1876
1877 for (chan = 0; chan < 4; chan++) {
1878 int i;
1879
1880 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1881 (chan * 0x2000) | 0x0200);
1882 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1883
1884 for (i = 0; i < 6; i++)
1885 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1886 test_pat[chan][i]);
1887
1888 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1889 if (tg3_wait_macro_done(tp)) {
1890 *resetp = 1;
1891 return -EBUSY;
1892 }
1893
1894 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1895 (chan * 0x2000) | 0x0200);
1896 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1897 if (tg3_wait_macro_done(tp)) {
1898 *resetp = 1;
1899 return -EBUSY;
1900 }
1901
1902 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1903 if (tg3_wait_macro_done(tp)) {
1904 *resetp = 1;
1905 return -EBUSY;
1906 }
1907
1908 for (i = 0; i < 6; i += 2) {
1909 u32 low, high;
1910
1911 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1912 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1913 tg3_wait_macro_done(tp)) {
1914 *resetp = 1;
1915 return -EBUSY;
1916 }
1917 low &= 0x7fff;
1918 high &= 0x000f;
1919 if (low != test_pat[chan][i] ||
1920 high != test_pat[chan][i+1]) {
1921 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1922 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1923 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1924
1925 return -EBUSY;
1926 }
1927 }
1928 }
1929
1930 return 0;
1931 }
1932
1933 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1934 {
1935 int chan;
1936
1937 for (chan = 0; chan < 4; chan++) {
1938 int i;
1939
1940 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1941 (chan * 0x2000) | 0x0200);
1942 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1943 for (i = 0; i < 6; i++)
1944 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1945 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1946 if (tg3_wait_macro_done(tp))
1947 return -EBUSY;
1948 }
1949
1950 return 0;
1951 }
1952
1953 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1954 {
1955 u32 reg32, phy9_orig;
1956 int retries, do_phy_reset, err;
1957
1958 retries = 10;
1959 do_phy_reset = 1;
1960 do {
1961 if (do_phy_reset) {
1962 err = tg3_bmcr_reset(tp);
1963 if (err)
1964 return err;
1965 do_phy_reset = 0;
1966 }
1967
1968 /* Disable transmitter and interrupt. */
1969 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1970 continue;
1971
1972 reg32 |= 0x3000;
1973 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1974
1975 /* Set full-duplex, 1000 mbps. */
1976 tg3_writephy(tp, MII_BMCR,
1977 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1978
1979 /* Set to master mode. */
1980 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1981 continue;
1982
1983 tg3_writephy(tp, MII_TG3_CTRL,
1984 (MII_TG3_CTRL_AS_MASTER |
1985 MII_TG3_CTRL_ENABLE_AS_MASTER));
1986
1987 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1988 if (err)
1989 return err;
1990
1991 /* Block the PHY control access. */
1992 tg3_phydsp_write(tp, 0x8005, 0x0800);
1993
1994 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1995 if (!err)
1996 break;
1997 } while (--retries);
1998
1999 err = tg3_phy_reset_chanpat(tp);
2000 if (err)
2001 return err;
2002
2003 tg3_phydsp_write(tp, 0x8005, 0x0000);
2004
2005 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2006 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2007
2008 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2009
2010 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2011
2012 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2013 reg32 &= ~0x3000;
2014 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2015 } else if (!err)
2016 err = -EBUSY;
2017
2018 return err;
2019 }
2020
2021 /* This will reset the tigon3 PHY if there is no valid
2022 * link unless the FORCE argument is non-zero.
2023 */
2024 static int tg3_phy_reset(struct tg3 *tp)
2025 {
2026 u32 val, cpmuctrl;
2027 int err;
2028
2029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2030 val = tr32(GRC_MISC_CFG);
2031 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2032 udelay(40);
2033 }
2034 err = tg3_readphy(tp, MII_BMSR, &val);
2035 err |= tg3_readphy(tp, MII_BMSR, &val);
2036 if (err != 0)
2037 return -EBUSY;
2038
2039 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2040 netif_carrier_off(tp->dev);
2041 tg3_link_report(tp);
2042 }
2043
2044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2047 err = tg3_phy_reset_5703_4_5(tp);
2048 if (err)
2049 return err;
2050 goto out;
2051 }
2052
2053 cpmuctrl = 0;
2054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2055 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2056 cpmuctrl = tr32(TG3_CPMU_CTRL);
2057 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2058 tw32(TG3_CPMU_CTRL,
2059 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2060 }
2061
2062 err = tg3_bmcr_reset(tp);
2063 if (err)
2064 return err;
2065
2066 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2067 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2068 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2069
2070 tw32(TG3_CPMU_CTRL, cpmuctrl);
2071 }
2072
2073 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2074 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2075 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2076 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2077 CPMU_LSPD_1000MB_MACCLK_12_5) {
2078 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2079 udelay(40);
2080 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2081 }
2082 }
2083
2084 if (tg3_flag(tp, 5717_PLUS) &&
2085 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2086 return 0;
2087
2088 tg3_phy_apply_otp(tp);
2089
2090 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2091 tg3_phy_toggle_apd(tp, true);
2092 else
2093 tg3_phy_toggle_apd(tp, false);
2094
2095 out:
2096 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2097 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2098 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2099 tg3_phydsp_write(tp, 0x000a, 0x0323);
2100 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2101 }
2102
2103 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2104 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2105 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2106 }
2107
2108 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2109 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2110 tg3_phydsp_write(tp, 0x000a, 0x310b);
2111 tg3_phydsp_write(tp, 0x201f, 0x9506);
2112 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2113 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2114 }
2115 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2118 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2119 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2120 tg3_writephy(tp, MII_TG3_TEST1,
2121 MII_TG3_TEST1_TRIM_EN | 0x4);
2122 } else
2123 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2124
2125 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2126 }
2127 }
2128
2129 /* Set Extended packet length bit (bit 14) on all chips that */
2130 /* support jumbo frames */
2131 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2132 /* Cannot do read-modify-write on 5401 */
2133 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2134 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2135 /* Set bit 14 with read-modify-write to preserve other bits */
2136 err = tg3_phy_auxctl_read(tp,
2137 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2138 if (!err)
2139 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2140 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2141 }
2142
2143 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2144 * jumbo frames transmission.
2145 */
2146 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2147 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2148 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2149 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2150 }
2151
2152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2153 /* adjust output voltage */
2154 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2155 }
2156
2157 tg3_phy_toggle_automdix(tp, 1);
2158 tg3_phy_set_wirespeed(tp);
2159 return 0;
2160 }
2161
2162 static void tg3_frob_aux_power(struct tg3 *tp)
2163 {
2164 bool need_vaux = false;
2165
2166 /* The GPIOs do something completely different on 57765. */
2167 if (!tg3_flag(tp, IS_NIC) ||
2168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2170 return;
2171
2172 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2176 tp->pdev_peer != tp->pdev) {
2177 struct net_device *dev_peer;
2178
2179 dev_peer = pci_get_drvdata(tp->pdev_peer);
2180
2181 /* remove_one() may have been run on the peer. */
2182 if (dev_peer) {
2183 struct tg3 *tp_peer = netdev_priv(dev_peer);
2184
2185 if (tg3_flag(tp_peer, INIT_COMPLETE))
2186 return;
2187
2188 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2189 tg3_flag(tp_peer, ENABLE_ASF))
2190 need_vaux = true;
2191 }
2192 }
2193
2194 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2195 need_vaux = true;
2196
2197 if (need_vaux) {
2198 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2200 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2201 (GRC_LCLCTRL_GPIO_OE0 |
2202 GRC_LCLCTRL_GPIO_OE1 |
2203 GRC_LCLCTRL_GPIO_OE2 |
2204 GRC_LCLCTRL_GPIO_OUTPUT0 |
2205 GRC_LCLCTRL_GPIO_OUTPUT1),
2206 100);
2207 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2208 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2209 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2210 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2211 GRC_LCLCTRL_GPIO_OE1 |
2212 GRC_LCLCTRL_GPIO_OE2 |
2213 GRC_LCLCTRL_GPIO_OUTPUT0 |
2214 GRC_LCLCTRL_GPIO_OUTPUT1 |
2215 tp->grc_local_ctrl;
2216 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2217
2218 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2219 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2220
2221 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2222 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223 } else {
2224 u32 no_gpio2;
2225 u32 grc_local_ctrl = 0;
2226
2227 /* Workaround to prevent overdrawing Amps. */
2228 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2229 ASIC_REV_5714) {
2230 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2231 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2232 grc_local_ctrl, 100);
2233 }
2234
2235 /* On 5753 and variants, GPIO2 cannot be used. */
2236 no_gpio2 = tp->nic_sram_data_cfg &
2237 NIC_SRAM_DATA_CFG_NO_GPIO2;
2238
2239 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2240 GRC_LCLCTRL_GPIO_OE1 |
2241 GRC_LCLCTRL_GPIO_OE2 |
2242 GRC_LCLCTRL_GPIO_OUTPUT1 |
2243 GRC_LCLCTRL_GPIO_OUTPUT2;
2244 if (no_gpio2) {
2245 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2246 GRC_LCLCTRL_GPIO_OUTPUT2);
2247 }
2248 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2249 grc_local_ctrl, 100);
2250
2251 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2252
2253 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2254 grc_local_ctrl, 100);
2255
2256 if (!no_gpio2) {
2257 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2258 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2259 grc_local_ctrl, 100);
2260 }
2261 }
2262 } else {
2263 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2264 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2265 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266 (GRC_LCLCTRL_GPIO_OE1 |
2267 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2268
2269 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270 GRC_LCLCTRL_GPIO_OE1, 100);
2271
2272 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273 (GRC_LCLCTRL_GPIO_OE1 |
2274 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2275 }
2276 }
2277 }
2278
2279 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2280 {
2281 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2282 return 1;
2283 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2284 if (speed != SPEED_10)
2285 return 1;
2286 } else if (speed == SPEED_10)
2287 return 1;
2288
2289 return 0;
2290 }
2291
2292 static int tg3_setup_phy(struct tg3 *, int);
2293
2294 #define RESET_KIND_SHUTDOWN 0
2295 #define RESET_KIND_INIT 1
2296 #define RESET_KIND_SUSPEND 2
2297
2298 static void tg3_write_sig_post_reset(struct tg3 *, int);
2299 static int tg3_halt_cpu(struct tg3 *, u32);
2300
2301 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2302 {
2303 u32 val;
2304
2305 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2307 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2308 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2309
2310 sg_dig_ctrl |=
2311 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2312 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2313 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2314 }
2315 return;
2316 }
2317
2318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2319 tg3_bmcr_reset(tp);
2320 val = tr32(GRC_MISC_CFG);
2321 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2322 udelay(40);
2323 return;
2324 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2325 u32 phytest;
2326 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2327 u32 phy;
2328
2329 tg3_writephy(tp, MII_ADVERTISE, 0);
2330 tg3_writephy(tp, MII_BMCR,
2331 BMCR_ANENABLE | BMCR_ANRESTART);
2332
2333 tg3_writephy(tp, MII_TG3_FET_TEST,
2334 phytest | MII_TG3_FET_SHADOW_EN);
2335 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2336 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2337 tg3_writephy(tp,
2338 MII_TG3_FET_SHDW_AUXMODE4,
2339 phy);
2340 }
2341 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2342 }
2343 return;
2344 } else if (do_low_power) {
2345 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2346 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2347
2348 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2349 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2350 MII_TG3_AUXCTL_PCTL_VREG_11V;
2351 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2352 }
2353
2354 /* The PHY should not be powered down on some chips because
2355 * of bugs.
2356 */
2357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2360 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2361 return;
2362
2363 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2364 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2365 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2366 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2367 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2368 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2369 }
2370
2371 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2372 }
2373
2374 /* tp->lock is held. */
2375 static int tg3_nvram_lock(struct tg3 *tp)
2376 {
2377 if (tg3_flag(tp, NVRAM)) {
2378 int i;
2379
2380 if (tp->nvram_lock_cnt == 0) {
2381 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2382 for (i = 0; i < 8000; i++) {
2383 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2384 break;
2385 udelay(20);
2386 }
2387 if (i == 8000) {
2388 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2389 return -ENODEV;
2390 }
2391 }
2392 tp->nvram_lock_cnt++;
2393 }
2394 return 0;
2395 }
2396
2397 /* tp->lock is held. */
2398 static void tg3_nvram_unlock(struct tg3 *tp)
2399 {
2400 if (tg3_flag(tp, NVRAM)) {
2401 if (tp->nvram_lock_cnt > 0)
2402 tp->nvram_lock_cnt--;
2403 if (tp->nvram_lock_cnt == 0)
2404 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2405 }
2406 }
2407
2408 /* tp->lock is held. */
2409 static void tg3_enable_nvram_access(struct tg3 *tp)
2410 {
2411 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2412 u32 nvaccess = tr32(NVRAM_ACCESS);
2413
2414 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2415 }
2416 }
2417
2418 /* tp->lock is held. */
2419 static void tg3_disable_nvram_access(struct tg3 *tp)
2420 {
2421 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2422 u32 nvaccess = tr32(NVRAM_ACCESS);
2423
2424 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2425 }
2426 }
2427
2428 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2429 u32 offset, u32 *val)
2430 {
2431 u32 tmp;
2432 int i;
2433
2434 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2435 return -EINVAL;
2436
2437 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2438 EEPROM_ADDR_DEVID_MASK |
2439 EEPROM_ADDR_READ);
2440 tw32(GRC_EEPROM_ADDR,
2441 tmp |
2442 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2443 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2444 EEPROM_ADDR_ADDR_MASK) |
2445 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2446
2447 for (i = 0; i < 1000; i++) {
2448 tmp = tr32(GRC_EEPROM_ADDR);
2449
2450 if (tmp & EEPROM_ADDR_COMPLETE)
2451 break;
2452 msleep(1);
2453 }
2454 if (!(tmp & EEPROM_ADDR_COMPLETE))
2455 return -EBUSY;
2456
2457 tmp = tr32(GRC_EEPROM_DATA);
2458
2459 /*
2460 * The data will always be opposite the native endian
2461 * format. Perform a blind byteswap to compensate.
2462 */
2463 *val = swab32(tmp);
2464
2465 return 0;
2466 }
2467
2468 #define NVRAM_CMD_TIMEOUT 10000
2469
2470 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2471 {
2472 int i;
2473
2474 tw32(NVRAM_CMD, nvram_cmd);
2475 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2476 udelay(10);
2477 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2478 udelay(10);
2479 break;
2480 }
2481 }
2482
2483 if (i == NVRAM_CMD_TIMEOUT)
2484 return -EBUSY;
2485
2486 return 0;
2487 }
2488
2489 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2490 {
2491 if (tg3_flag(tp, NVRAM) &&
2492 tg3_flag(tp, NVRAM_BUFFERED) &&
2493 tg3_flag(tp, FLASH) &&
2494 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2495 (tp->nvram_jedecnum == JEDEC_ATMEL))
2496
2497 addr = ((addr / tp->nvram_pagesize) <<
2498 ATMEL_AT45DB0X1B_PAGE_POS) +
2499 (addr % tp->nvram_pagesize);
2500
2501 return addr;
2502 }
2503
2504 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2505 {
2506 if (tg3_flag(tp, NVRAM) &&
2507 tg3_flag(tp, NVRAM_BUFFERED) &&
2508 tg3_flag(tp, FLASH) &&
2509 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2510 (tp->nvram_jedecnum == JEDEC_ATMEL))
2511
2512 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2513 tp->nvram_pagesize) +
2514 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2515
2516 return addr;
2517 }
2518
2519 /* NOTE: Data read in from NVRAM is byteswapped according to
2520 * the byteswapping settings for all other register accesses.
2521 * tg3 devices are BE devices, so on a BE machine, the data
2522 * returned will be exactly as it is seen in NVRAM. On a LE
2523 * machine, the 32-bit value will be byteswapped.
2524 */
2525 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2526 {
2527 int ret;
2528
2529 if (!tg3_flag(tp, NVRAM))
2530 return tg3_nvram_read_using_eeprom(tp, offset, val);
2531
2532 offset = tg3_nvram_phys_addr(tp, offset);
2533
2534 if (offset > NVRAM_ADDR_MSK)
2535 return -EINVAL;
2536
2537 ret = tg3_nvram_lock(tp);
2538 if (ret)
2539 return ret;
2540
2541 tg3_enable_nvram_access(tp);
2542
2543 tw32(NVRAM_ADDR, offset);
2544 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2545 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2546
2547 if (ret == 0)
2548 *val = tr32(NVRAM_RDDATA);
2549
2550 tg3_disable_nvram_access(tp);
2551
2552 tg3_nvram_unlock(tp);
2553
2554 return ret;
2555 }
2556
2557 /* Ensures NVRAM data is in bytestream format. */
2558 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2559 {
2560 u32 v;
2561 int res = tg3_nvram_read(tp, offset, &v);
2562 if (!res)
2563 *val = cpu_to_be32(v);
2564 return res;
2565 }
2566
2567 /* tp->lock is held. */
2568 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2569 {
2570 u32 addr_high, addr_low;
2571 int i;
2572
2573 addr_high = ((tp->dev->dev_addr[0] << 8) |
2574 tp->dev->dev_addr[1]);
2575 addr_low = ((tp->dev->dev_addr[2] << 24) |
2576 (tp->dev->dev_addr[3] << 16) |
2577 (tp->dev->dev_addr[4] << 8) |
2578 (tp->dev->dev_addr[5] << 0));
2579 for (i = 0; i < 4; i++) {
2580 if (i == 1 && skip_mac_1)
2581 continue;
2582 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2583 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2584 }
2585
2586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2588 for (i = 0; i < 12; i++) {
2589 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2590 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2591 }
2592 }
2593
2594 addr_high = (tp->dev->dev_addr[0] +
2595 tp->dev->dev_addr[1] +
2596 tp->dev->dev_addr[2] +
2597 tp->dev->dev_addr[3] +
2598 tp->dev->dev_addr[4] +
2599 tp->dev->dev_addr[5]) &
2600 TX_BACKOFF_SEED_MASK;
2601 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2602 }
2603
2604 static void tg3_enable_register_access(struct tg3 *tp)
2605 {
2606 /*
2607 * Make sure register accesses (indirect or otherwise) will function
2608 * correctly.
2609 */
2610 pci_write_config_dword(tp->pdev,
2611 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2612 }
2613
2614 static int tg3_power_up(struct tg3 *tp)
2615 {
2616 tg3_enable_register_access(tp);
2617
2618 pci_set_power_state(tp->pdev, PCI_D0);
2619
2620 /* Switch out of Vaux if it is a NIC */
2621 if (tg3_flag(tp, IS_NIC))
2622 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2623
2624 return 0;
2625 }
2626
2627 static int tg3_power_down_prepare(struct tg3 *tp)
2628 {
2629 u32 misc_host_ctrl;
2630 bool device_should_wake, do_low_power;
2631
2632 tg3_enable_register_access(tp);
2633
2634 /* Restore the CLKREQ setting. */
2635 if (tg3_flag(tp, CLKREQ_BUG)) {
2636 u16 lnkctl;
2637
2638 pci_read_config_word(tp->pdev,
2639 tp->pcie_cap + PCI_EXP_LNKCTL,
2640 &lnkctl);
2641 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2642 pci_write_config_word(tp->pdev,
2643 tp->pcie_cap + PCI_EXP_LNKCTL,
2644 lnkctl);
2645 }
2646
2647 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2648 tw32(TG3PCI_MISC_HOST_CTRL,
2649 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2650
2651 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2652 tg3_flag(tp, WOL_ENABLE);
2653
2654 if (tg3_flag(tp, USE_PHYLIB)) {
2655 do_low_power = false;
2656 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2657 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2658 struct phy_device *phydev;
2659 u32 phyid, advertising;
2660
2661 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2662
2663 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2664
2665 tp->link_config.orig_speed = phydev->speed;
2666 tp->link_config.orig_duplex = phydev->duplex;
2667 tp->link_config.orig_autoneg = phydev->autoneg;
2668 tp->link_config.orig_advertising = phydev->advertising;
2669
2670 advertising = ADVERTISED_TP |
2671 ADVERTISED_Pause |
2672 ADVERTISED_Autoneg |
2673 ADVERTISED_10baseT_Half;
2674
2675 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2676 if (tg3_flag(tp, WOL_SPEED_100MB))
2677 advertising |=
2678 ADVERTISED_100baseT_Half |
2679 ADVERTISED_100baseT_Full |
2680 ADVERTISED_10baseT_Full;
2681 else
2682 advertising |= ADVERTISED_10baseT_Full;
2683 }
2684
2685 phydev->advertising = advertising;
2686
2687 phy_start_aneg(phydev);
2688
2689 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2690 if (phyid != PHY_ID_BCMAC131) {
2691 phyid &= PHY_BCM_OUI_MASK;
2692 if (phyid == PHY_BCM_OUI_1 ||
2693 phyid == PHY_BCM_OUI_2 ||
2694 phyid == PHY_BCM_OUI_3)
2695 do_low_power = true;
2696 }
2697 }
2698 } else {
2699 do_low_power = true;
2700
2701 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2702 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2703 tp->link_config.orig_speed = tp->link_config.speed;
2704 tp->link_config.orig_duplex = tp->link_config.duplex;
2705 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2706 }
2707
2708 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2709 tp->link_config.speed = SPEED_10;
2710 tp->link_config.duplex = DUPLEX_HALF;
2711 tp->link_config.autoneg = AUTONEG_ENABLE;
2712 tg3_setup_phy(tp, 0);
2713 }
2714 }
2715
2716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2717 u32 val;
2718
2719 val = tr32(GRC_VCPU_EXT_CTRL);
2720 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2721 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2722 int i;
2723 u32 val;
2724
2725 for (i = 0; i < 200; i++) {
2726 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2727 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2728 break;
2729 msleep(1);
2730 }
2731 }
2732 if (tg3_flag(tp, WOL_CAP))
2733 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2734 WOL_DRV_STATE_SHUTDOWN |
2735 WOL_DRV_WOL |
2736 WOL_SET_MAGIC_PKT);
2737
2738 if (device_should_wake) {
2739 u32 mac_mode;
2740
2741 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2742 if (do_low_power &&
2743 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2744 tg3_phy_auxctl_write(tp,
2745 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2746 MII_TG3_AUXCTL_PCTL_WOL_EN |
2747 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2748 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2749 udelay(40);
2750 }
2751
2752 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2753 mac_mode = MAC_MODE_PORT_MODE_GMII;
2754 else
2755 mac_mode = MAC_MODE_PORT_MODE_MII;
2756
2757 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2758 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2759 ASIC_REV_5700) {
2760 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2761 SPEED_100 : SPEED_10;
2762 if (tg3_5700_link_polarity(tp, speed))
2763 mac_mode |= MAC_MODE_LINK_POLARITY;
2764 else
2765 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2766 }
2767 } else {
2768 mac_mode = MAC_MODE_PORT_MODE_TBI;
2769 }
2770
2771 if (!tg3_flag(tp, 5750_PLUS))
2772 tw32(MAC_LED_CTRL, tp->led_ctrl);
2773
2774 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2775 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2776 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2777 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2778
2779 if (tg3_flag(tp, ENABLE_APE))
2780 mac_mode |= MAC_MODE_APE_TX_EN |
2781 MAC_MODE_APE_RX_EN |
2782 MAC_MODE_TDE_ENABLE;
2783
2784 tw32_f(MAC_MODE, mac_mode);
2785 udelay(100);
2786
2787 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2788 udelay(10);
2789 }
2790
2791 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2792 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2794 u32 base_val;
2795
2796 base_val = tp->pci_clock_ctrl;
2797 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2798 CLOCK_CTRL_TXCLK_DISABLE);
2799
2800 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2801 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2802 } else if (tg3_flag(tp, 5780_CLASS) ||
2803 tg3_flag(tp, CPMU_PRESENT) ||
2804 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2805 /* do nothing */
2806 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2807 u32 newbits1, newbits2;
2808
2809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2810 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2811 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2812 CLOCK_CTRL_TXCLK_DISABLE |
2813 CLOCK_CTRL_ALTCLK);
2814 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2815 } else if (tg3_flag(tp, 5705_PLUS)) {
2816 newbits1 = CLOCK_CTRL_625_CORE;
2817 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2818 } else {
2819 newbits1 = CLOCK_CTRL_ALTCLK;
2820 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2821 }
2822
2823 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2824 40);
2825
2826 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2827 40);
2828
2829 if (!tg3_flag(tp, 5705_PLUS)) {
2830 u32 newbits3;
2831
2832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2833 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2834 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2835 CLOCK_CTRL_TXCLK_DISABLE |
2836 CLOCK_CTRL_44MHZ_CORE);
2837 } else {
2838 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2839 }
2840
2841 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2842 tp->pci_clock_ctrl | newbits3, 40);
2843 }
2844 }
2845
2846 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2847 tg3_power_down_phy(tp, do_low_power);
2848
2849 tg3_frob_aux_power(tp);
2850
2851 /* Workaround for unstable PLL clock */
2852 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2853 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2854 u32 val = tr32(0x7d00);
2855
2856 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2857 tw32(0x7d00, val);
2858 if (!tg3_flag(tp, ENABLE_ASF)) {
2859 int err;
2860
2861 err = tg3_nvram_lock(tp);
2862 tg3_halt_cpu(tp, RX_CPU_BASE);
2863 if (!err)
2864 tg3_nvram_unlock(tp);
2865 }
2866 }
2867
2868 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2869
2870 return 0;
2871 }
2872
2873 static void tg3_power_down(struct tg3 *tp)
2874 {
2875 tg3_power_down_prepare(tp);
2876
2877 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2878 pci_set_power_state(tp->pdev, PCI_D3hot);
2879 }
2880
2881 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2882 {
2883 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2884 case MII_TG3_AUX_STAT_10HALF:
2885 *speed = SPEED_10;
2886 *duplex = DUPLEX_HALF;
2887 break;
2888
2889 case MII_TG3_AUX_STAT_10FULL:
2890 *speed = SPEED_10;
2891 *duplex = DUPLEX_FULL;
2892 break;
2893
2894 case MII_TG3_AUX_STAT_100HALF:
2895 *speed = SPEED_100;
2896 *duplex = DUPLEX_HALF;
2897 break;
2898
2899 case MII_TG3_AUX_STAT_100FULL:
2900 *speed = SPEED_100;
2901 *duplex = DUPLEX_FULL;
2902 break;
2903
2904 case MII_TG3_AUX_STAT_1000HALF:
2905 *speed = SPEED_1000;
2906 *duplex = DUPLEX_HALF;
2907 break;
2908
2909 case MII_TG3_AUX_STAT_1000FULL:
2910 *speed = SPEED_1000;
2911 *duplex = DUPLEX_FULL;
2912 break;
2913
2914 default:
2915 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2916 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2917 SPEED_10;
2918 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2919 DUPLEX_HALF;
2920 break;
2921 }
2922 *speed = SPEED_INVALID;
2923 *duplex = DUPLEX_INVALID;
2924 break;
2925 }
2926 }
2927
2928 static void tg3_phy_copper_begin(struct tg3 *tp)
2929 {
2930 u32 new_adv;
2931 int i;
2932
2933 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2934 /* Entering low power mode. Disable gigabit and
2935 * 100baseT advertisements.
2936 */
2937 tg3_writephy(tp, MII_TG3_CTRL, 0);
2938
2939 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2940 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2941 if (tg3_flag(tp, WOL_SPEED_100MB))
2942 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2943
2944 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2945 } else if (tp->link_config.speed == SPEED_INVALID) {
2946 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2947 tp->link_config.advertising &=
2948 ~(ADVERTISED_1000baseT_Half |
2949 ADVERTISED_1000baseT_Full);
2950
2951 new_adv = ADVERTISE_CSMA;
2952 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2953 new_adv |= ADVERTISE_10HALF;
2954 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2955 new_adv |= ADVERTISE_10FULL;
2956 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2957 new_adv |= ADVERTISE_100HALF;
2958 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2959 new_adv |= ADVERTISE_100FULL;
2960
2961 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2962
2963 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2964
2965 if (tp->link_config.advertising &
2966 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2967 new_adv = 0;
2968 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2969 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2970 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2971 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2972 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2973 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2974 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2975 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2976 MII_TG3_CTRL_ENABLE_AS_MASTER);
2977 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2978 } else {
2979 tg3_writephy(tp, MII_TG3_CTRL, 0);
2980 }
2981 } else {
2982 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2983 new_adv |= ADVERTISE_CSMA;
2984
2985 /* Asking for a specific link mode. */
2986 if (tp->link_config.speed == SPEED_1000) {
2987 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2988
2989 if (tp->link_config.duplex == DUPLEX_FULL)
2990 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2991 else
2992 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2993 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2994 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2995 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2996 MII_TG3_CTRL_ENABLE_AS_MASTER);
2997 } else {
2998 if (tp->link_config.speed == SPEED_100) {
2999 if (tp->link_config.duplex == DUPLEX_FULL)
3000 new_adv |= ADVERTISE_100FULL;
3001 else
3002 new_adv |= ADVERTISE_100HALF;
3003 } else {
3004 if (tp->link_config.duplex == DUPLEX_FULL)
3005 new_adv |= ADVERTISE_10FULL;
3006 else
3007 new_adv |= ADVERTISE_10HALF;
3008 }
3009 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3010
3011 new_adv = 0;
3012 }
3013
3014 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
3015 }
3016
3017 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
3018 u32 val;
3019
3020 tw32(TG3_CPMU_EEE_MODE,
3021 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3022
3023 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3024
3025 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3026 case ASIC_REV_5717:
3027 case ASIC_REV_57765:
3028 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3029 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3030 MII_TG3_DSP_CH34TP2_HIBW01);
3031 /* Fall through */
3032 case ASIC_REV_5719:
3033 val = MII_TG3_DSP_TAP26_ALNOKO |
3034 MII_TG3_DSP_TAP26_RMRXSTO |
3035 MII_TG3_DSP_TAP26_OPCSINPT;
3036 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3037 }
3038
3039 val = 0;
3040 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3041 /* Advertise 100-BaseTX EEE ability */
3042 if (tp->link_config.advertising &
3043 ADVERTISED_100baseT_Full)
3044 val |= MDIO_AN_EEE_ADV_100TX;
3045 /* Advertise 1000-BaseT EEE ability */
3046 if (tp->link_config.advertising &
3047 ADVERTISED_1000baseT_Full)
3048 val |= MDIO_AN_EEE_ADV_1000T;
3049 }
3050 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3051
3052 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3053 }
3054
3055 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3056 tp->link_config.speed != SPEED_INVALID) {
3057 u32 bmcr, orig_bmcr;
3058
3059 tp->link_config.active_speed = tp->link_config.speed;
3060 tp->link_config.active_duplex = tp->link_config.duplex;
3061
3062 bmcr = 0;
3063 switch (tp->link_config.speed) {
3064 default:
3065 case SPEED_10:
3066 break;
3067
3068 case SPEED_100:
3069 bmcr |= BMCR_SPEED100;
3070 break;
3071
3072 case SPEED_1000:
3073 bmcr |= TG3_BMCR_SPEED1000;
3074 break;
3075 }
3076
3077 if (tp->link_config.duplex == DUPLEX_FULL)
3078 bmcr |= BMCR_FULLDPLX;
3079
3080 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3081 (bmcr != orig_bmcr)) {
3082 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3083 for (i = 0; i < 1500; i++) {
3084 u32 tmp;
3085
3086 udelay(10);
3087 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3088 tg3_readphy(tp, MII_BMSR, &tmp))
3089 continue;
3090 if (!(tmp & BMSR_LSTATUS)) {
3091 udelay(40);
3092 break;
3093 }
3094 }
3095 tg3_writephy(tp, MII_BMCR, bmcr);
3096 udelay(40);
3097 }
3098 } else {
3099 tg3_writephy(tp, MII_BMCR,
3100 BMCR_ANENABLE | BMCR_ANRESTART);
3101 }
3102 }
3103
3104 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3105 {
3106 int err;
3107
3108 /* Turn off tap power management. */
3109 /* Set Extended packet length bit */
3110 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3111
3112 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3113 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3114 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3115 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3116 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3117
3118 udelay(40);
3119
3120 return err;
3121 }
3122
3123 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3124 {
3125 u32 adv_reg, all_mask = 0;
3126
3127 if (mask & ADVERTISED_10baseT_Half)
3128 all_mask |= ADVERTISE_10HALF;
3129 if (mask & ADVERTISED_10baseT_Full)
3130 all_mask |= ADVERTISE_10FULL;
3131 if (mask & ADVERTISED_100baseT_Half)
3132 all_mask |= ADVERTISE_100HALF;
3133 if (mask & ADVERTISED_100baseT_Full)
3134 all_mask |= ADVERTISE_100FULL;
3135
3136 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3137 return 0;
3138
3139 if ((adv_reg & all_mask) != all_mask)
3140 return 0;
3141 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3142 u32 tg3_ctrl;
3143
3144 all_mask = 0;
3145 if (mask & ADVERTISED_1000baseT_Half)
3146 all_mask |= ADVERTISE_1000HALF;
3147 if (mask & ADVERTISED_1000baseT_Full)
3148 all_mask |= ADVERTISE_1000FULL;
3149
3150 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3151 return 0;
3152
3153 if ((tg3_ctrl & all_mask) != all_mask)
3154 return 0;
3155 }
3156 return 1;
3157 }
3158
3159 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3160 {
3161 u32 curadv, reqadv;
3162
3163 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3164 return 1;
3165
3166 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3167 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3168
3169 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3170 if (curadv != reqadv)
3171 return 0;
3172
3173 if (tg3_flag(tp, PAUSE_AUTONEG))
3174 tg3_readphy(tp, MII_LPA, rmtadv);
3175 } else {
3176 /* Reprogram the advertisement register, even if it
3177 * does not affect the current link. If the link
3178 * gets renegotiated in the future, we can save an
3179 * additional renegotiation cycle by advertising
3180 * it correctly in the first place.
3181 */
3182 if (curadv != reqadv) {
3183 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3184 ADVERTISE_PAUSE_ASYM);
3185 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3186 }
3187 }
3188
3189 return 1;
3190 }
3191
3192 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3193 {
3194 int current_link_up;
3195 u32 bmsr, val;
3196 u32 lcl_adv, rmt_adv;
3197 u16 current_speed;
3198 u8 current_duplex;
3199 int i, err;
3200
3201 tw32(MAC_EVENT, 0);
3202
3203 tw32_f(MAC_STATUS,
3204 (MAC_STATUS_SYNC_CHANGED |
3205 MAC_STATUS_CFG_CHANGED |
3206 MAC_STATUS_MI_COMPLETION |
3207 MAC_STATUS_LNKSTATE_CHANGED));
3208 udelay(40);
3209
3210 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3211 tw32_f(MAC_MI_MODE,
3212 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3213 udelay(80);
3214 }
3215
3216 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3217
3218 /* Some third-party PHYs need to be reset on link going
3219 * down.
3220 */
3221 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3224 netif_carrier_ok(tp->dev)) {
3225 tg3_readphy(tp, MII_BMSR, &bmsr);
3226 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3227 !(bmsr & BMSR_LSTATUS))
3228 force_reset = 1;
3229 }
3230 if (force_reset)
3231 tg3_phy_reset(tp);
3232
3233 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3234 tg3_readphy(tp, MII_BMSR, &bmsr);
3235 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3236 !tg3_flag(tp, INIT_COMPLETE))
3237 bmsr = 0;
3238
3239 if (!(bmsr & BMSR_LSTATUS)) {
3240 err = tg3_init_5401phy_dsp(tp);
3241 if (err)
3242 return err;
3243
3244 tg3_readphy(tp, MII_BMSR, &bmsr);
3245 for (i = 0; i < 1000; i++) {
3246 udelay(10);
3247 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3248 (bmsr & BMSR_LSTATUS)) {
3249 udelay(40);
3250 break;
3251 }
3252 }
3253
3254 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3255 TG3_PHY_REV_BCM5401_B0 &&
3256 !(bmsr & BMSR_LSTATUS) &&
3257 tp->link_config.active_speed == SPEED_1000) {
3258 err = tg3_phy_reset(tp);
3259 if (!err)
3260 err = tg3_init_5401phy_dsp(tp);
3261 if (err)
3262 return err;
3263 }
3264 }
3265 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3266 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3267 /* 5701 {A0,B0} CRC bug workaround */
3268 tg3_writephy(tp, 0x15, 0x0a75);
3269 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3270 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3271 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3272 }
3273
3274 /* Clear pending interrupts... */
3275 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3276 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3277
3278 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3279 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3280 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3281 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3282
3283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3285 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3286 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3287 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3288 else
3289 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3290 }
3291
3292 current_link_up = 0;
3293 current_speed = SPEED_INVALID;
3294 current_duplex = DUPLEX_INVALID;
3295
3296 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3297 err = tg3_phy_auxctl_read(tp,
3298 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3299 &val);
3300 if (!err && !(val & (1 << 10))) {
3301 tg3_phy_auxctl_write(tp,
3302 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3303 val | (1 << 10));
3304 goto relink;
3305 }
3306 }
3307
3308 bmsr = 0;
3309 for (i = 0; i < 100; i++) {
3310 tg3_readphy(tp, MII_BMSR, &bmsr);
3311 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3312 (bmsr & BMSR_LSTATUS))
3313 break;
3314 udelay(40);
3315 }
3316
3317 if (bmsr & BMSR_LSTATUS) {
3318 u32 aux_stat, bmcr;
3319
3320 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3321 for (i = 0; i < 2000; i++) {
3322 udelay(10);
3323 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3324 aux_stat)
3325 break;
3326 }
3327
3328 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3329 &current_speed,
3330 &current_duplex);
3331
3332 bmcr = 0;
3333 for (i = 0; i < 200; i++) {
3334 tg3_readphy(tp, MII_BMCR, &bmcr);
3335 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3336 continue;
3337 if (bmcr && bmcr != 0x7fff)
3338 break;
3339 udelay(10);
3340 }
3341
3342 lcl_adv = 0;
3343 rmt_adv = 0;
3344
3345 tp->link_config.active_speed = current_speed;
3346 tp->link_config.active_duplex = current_duplex;
3347
3348 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3349 if ((bmcr & BMCR_ANENABLE) &&
3350 tg3_copper_is_advertising_all(tp,
3351 tp->link_config.advertising)) {
3352 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3353 &rmt_adv))
3354 current_link_up = 1;
3355 }
3356 } else {
3357 if (!(bmcr & BMCR_ANENABLE) &&
3358 tp->link_config.speed == current_speed &&
3359 tp->link_config.duplex == current_duplex &&
3360 tp->link_config.flowctrl ==
3361 tp->link_config.active_flowctrl) {
3362 current_link_up = 1;
3363 }
3364 }
3365
3366 if (current_link_up == 1 &&
3367 tp->link_config.active_duplex == DUPLEX_FULL)
3368 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3369 }
3370
3371 relink:
3372 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3373 tg3_phy_copper_begin(tp);
3374
3375 tg3_readphy(tp, MII_BMSR, &bmsr);
3376 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3377 (bmsr & BMSR_LSTATUS))
3378 current_link_up = 1;
3379 }
3380
3381 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3382 if (current_link_up == 1) {
3383 if (tp->link_config.active_speed == SPEED_100 ||
3384 tp->link_config.active_speed == SPEED_10)
3385 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3386 else
3387 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3388 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3389 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3390 else
3391 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3392
3393 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3394 if (tp->link_config.active_duplex == DUPLEX_HALF)
3395 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3396
3397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3398 if (current_link_up == 1 &&
3399 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3400 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3401 else
3402 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3403 }
3404
3405 /* ??? Without this setting Netgear GA302T PHY does not
3406 * ??? send/receive packets...
3407 */
3408 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3409 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3410 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3411 tw32_f(MAC_MI_MODE, tp->mi_mode);
3412 udelay(80);
3413 }
3414
3415 tw32_f(MAC_MODE, tp->mac_mode);
3416 udelay(40);
3417
3418 tg3_phy_eee_adjust(tp, current_link_up);
3419
3420 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3421 /* Polled via timer. */
3422 tw32_f(MAC_EVENT, 0);
3423 } else {
3424 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3425 }
3426 udelay(40);
3427
3428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3429 current_link_up == 1 &&
3430 tp->link_config.active_speed == SPEED_1000 &&
3431 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3432 udelay(120);
3433 tw32_f(MAC_STATUS,
3434 (MAC_STATUS_SYNC_CHANGED |
3435 MAC_STATUS_CFG_CHANGED));
3436 udelay(40);
3437 tg3_write_mem(tp,
3438 NIC_SRAM_FIRMWARE_MBOX,
3439 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3440 }
3441
3442 /* Prevent send BD corruption. */
3443 if (tg3_flag(tp, CLKREQ_BUG)) {
3444 u16 oldlnkctl, newlnkctl;
3445
3446 pci_read_config_word(tp->pdev,
3447 tp->pcie_cap + PCI_EXP_LNKCTL,
3448 &oldlnkctl);
3449 if (tp->link_config.active_speed == SPEED_100 ||
3450 tp->link_config.active_speed == SPEED_10)
3451 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3452 else
3453 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3454 if (newlnkctl != oldlnkctl)
3455 pci_write_config_word(tp->pdev,
3456 tp->pcie_cap + PCI_EXP_LNKCTL,
3457 newlnkctl);
3458 }
3459
3460 if (current_link_up != netif_carrier_ok(tp->dev)) {
3461 if (current_link_up)
3462 netif_carrier_on(tp->dev);
3463 else
3464 netif_carrier_off(tp->dev);
3465 tg3_link_report(tp);
3466 }
3467
3468 return 0;
3469 }
3470
3471 struct tg3_fiber_aneginfo {
3472 int state;
3473 #define ANEG_STATE_UNKNOWN 0
3474 #define ANEG_STATE_AN_ENABLE 1
3475 #define ANEG_STATE_RESTART_INIT 2
3476 #define ANEG_STATE_RESTART 3
3477 #define ANEG_STATE_DISABLE_LINK_OK 4
3478 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3479 #define ANEG_STATE_ABILITY_DETECT 6
3480 #define ANEG_STATE_ACK_DETECT_INIT 7
3481 #define ANEG_STATE_ACK_DETECT 8
3482 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3483 #define ANEG_STATE_COMPLETE_ACK 10
3484 #define ANEG_STATE_IDLE_DETECT_INIT 11
3485 #define ANEG_STATE_IDLE_DETECT 12
3486 #define ANEG_STATE_LINK_OK 13
3487 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3488 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3489
3490 u32 flags;
3491 #define MR_AN_ENABLE 0x00000001
3492 #define MR_RESTART_AN 0x00000002
3493 #define MR_AN_COMPLETE 0x00000004
3494 #define MR_PAGE_RX 0x00000008
3495 #define MR_NP_LOADED 0x00000010
3496 #define MR_TOGGLE_TX 0x00000020
3497 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3498 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3499 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3500 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3501 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3502 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3503 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3504 #define MR_TOGGLE_RX 0x00002000
3505 #define MR_NP_RX 0x00004000
3506
3507 #define MR_LINK_OK 0x80000000
3508
3509 unsigned long link_time, cur_time;
3510
3511 u32 ability_match_cfg;
3512 int ability_match_count;
3513
3514 char ability_match, idle_match, ack_match;
3515
3516 u32 txconfig, rxconfig;
3517 #define ANEG_CFG_NP 0x00000080
3518 #define ANEG_CFG_ACK 0x00000040
3519 #define ANEG_CFG_RF2 0x00000020
3520 #define ANEG_CFG_RF1 0x00000010
3521 #define ANEG_CFG_PS2 0x00000001
3522 #define ANEG_CFG_PS1 0x00008000
3523 #define ANEG_CFG_HD 0x00004000
3524 #define ANEG_CFG_FD 0x00002000
3525 #define ANEG_CFG_INVAL 0x00001f06
3526
3527 };
3528 #define ANEG_OK 0
3529 #define ANEG_DONE 1
3530 #define ANEG_TIMER_ENAB 2
3531 #define ANEG_FAILED -1
3532
3533 #define ANEG_STATE_SETTLE_TIME 10000
3534
3535 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3536 struct tg3_fiber_aneginfo *ap)
3537 {
3538 u16 flowctrl;
3539 unsigned long delta;
3540 u32 rx_cfg_reg;
3541 int ret;
3542
3543 if (ap->state == ANEG_STATE_UNKNOWN) {
3544 ap->rxconfig = 0;
3545 ap->link_time = 0;
3546 ap->cur_time = 0;
3547 ap->ability_match_cfg = 0;
3548 ap->ability_match_count = 0;
3549 ap->ability_match = 0;
3550 ap->idle_match = 0;
3551 ap->ack_match = 0;
3552 }
3553 ap->cur_time++;
3554
3555 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3556 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3557
3558 if (rx_cfg_reg != ap->ability_match_cfg) {
3559 ap->ability_match_cfg = rx_cfg_reg;
3560 ap->ability_match = 0;
3561 ap->ability_match_count = 0;
3562 } else {
3563 if (++ap->ability_match_count > 1) {
3564 ap->ability_match = 1;
3565 ap->ability_match_cfg = rx_cfg_reg;
3566 }
3567 }
3568 if (rx_cfg_reg & ANEG_CFG_ACK)
3569 ap->ack_match = 1;
3570 else
3571 ap->ack_match = 0;
3572
3573 ap->idle_match = 0;
3574 } else {
3575 ap->idle_match = 1;
3576 ap->ability_match_cfg = 0;
3577 ap->ability_match_count = 0;
3578 ap->ability_match = 0;
3579 ap->ack_match = 0;
3580
3581 rx_cfg_reg = 0;
3582 }
3583
3584 ap->rxconfig = rx_cfg_reg;
3585 ret = ANEG_OK;
3586
3587 switch (ap->state) {
3588 case ANEG_STATE_UNKNOWN:
3589 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3590 ap->state = ANEG_STATE_AN_ENABLE;
3591
3592 /* fallthru */
3593 case ANEG_STATE_AN_ENABLE:
3594 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3595 if (ap->flags & MR_AN_ENABLE) {
3596 ap->link_time = 0;
3597 ap->cur_time = 0;
3598 ap->ability_match_cfg = 0;
3599 ap->ability_match_count = 0;
3600 ap->ability_match = 0;
3601 ap->idle_match = 0;
3602 ap->ack_match = 0;
3603
3604 ap->state = ANEG_STATE_RESTART_INIT;
3605 } else {
3606 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3607 }
3608 break;
3609
3610 case ANEG_STATE_RESTART_INIT:
3611 ap->link_time = ap->cur_time;
3612 ap->flags &= ~(MR_NP_LOADED);
3613 ap->txconfig = 0;
3614 tw32(MAC_TX_AUTO_NEG, 0);
3615 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3616 tw32_f(MAC_MODE, tp->mac_mode);
3617 udelay(40);
3618
3619 ret = ANEG_TIMER_ENAB;
3620 ap->state = ANEG_STATE_RESTART;
3621
3622 /* fallthru */
3623 case ANEG_STATE_RESTART:
3624 delta = ap->cur_time - ap->link_time;
3625 if (delta > ANEG_STATE_SETTLE_TIME)
3626 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3627 else
3628 ret = ANEG_TIMER_ENAB;
3629 break;
3630
3631 case ANEG_STATE_DISABLE_LINK_OK:
3632 ret = ANEG_DONE;
3633 break;
3634
3635 case ANEG_STATE_ABILITY_DETECT_INIT:
3636 ap->flags &= ~(MR_TOGGLE_TX);
3637 ap->txconfig = ANEG_CFG_FD;
3638 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3639 if (flowctrl & ADVERTISE_1000XPAUSE)
3640 ap->txconfig |= ANEG_CFG_PS1;
3641 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3642 ap->txconfig |= ANEG_CFG_PS2;
3643 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3644 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3645 tw32_f(MAC_MODE, tp->mac_mode);
3646 udelay(40);
3647
3648 ap->state = ANEG_STATE_ABILITY_DETECT;
3649 break;
3650
3651 case ANEG_STATE_ABILITY_DETECT:
3652 if (ap->ability_match != 0 && ap->rxconfig != 0)
3653 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3654 break;
3655
3656 case ANEG_STATE_ACK_DETECT_INIT:
3657 ap->txconfig |= ANEG_CFG_ACK;
3658 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3659 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3660 tw32_f(MAC_MODE, tp->mac_mode);
3661 udelay(40);
3662
3663 ap->state = ANEG_STATE_ACK_DETECT;
3664
3665 /* fallthru */
3666 case ANEG_STATE_ACK_DETECT:
3667 if (ap->ack_match != 0) {
3668 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3669 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3670 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3671 } else {
3672 ap->state = ANEG_STATE_AN_ENABLE;
3673 }
3674 } else if (ap->ability_match != 0 &&
3675 ap->rxconfig == 0) {
3676 ap->state = ANEG_STATE_AN_ENABLE;
3677 }
3678 break;
3679
3680 case ANEG_STATE_COMPLETE_ACK_INIT:
3681 if (ap->rxconfig & ANEG_CFG_INVAL) {
3682 ret = ANEG_FAILED;
3683 break;
3684 }
3685 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3686 MR_LP_ADV_HALF_DUPLEX |
3687 MR_LP_ADV_SYM_PAUSE |
3688 MR_LP_ADV_ASYM_PAUSE |
3689 MR_LP_ADV_REMOTE_FAULT1 |
3690 MR_LP_ADV_REMOTE_FAULT2 |
3691 MR_LP_ADV_NEXT_PAGE |
3692 MR_TOGGLE_RX |
3693 MR_NP_RX);
3694 if (ap->rxconfig & ANEG_CFG_FD)
3695 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3696 if (ap->rxconfig & ANEG_CFG_HD)
3697 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3698 if (ap->rxconfig & ANEG_CFG_PS1)
3699 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3700 if (ap->rxconfig & ANEG_CFG_PS2)
3701 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3702 if (ap->rxconfig & ANEG_CFG_RF1)
3703 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3704 if (ap->rxconfig & ANEG_CFG_RF2)
3705 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3706 if (ap->rxconfig & ANEG_CFG_NP)
3707 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3708
3709 ap->link_time = ap->cur_time;
3710
3711 ap->flags ^= (MR_TOGGLE_TX);
3712 if (ap->rxconfig & 0x0008)
3713 ap->flags |= MR_TOGGLE_RX;
3714 if (ap->rxconfig & ANEG_CFG_NP)
3715 ap->flags |= MR_NP_RX;
3716 ap->flags |= MR_PAGE_RX;
3717
3718 ap->state = ANEG_STATE_COMPLETE_ACK;
3719 ret = ANEG_TIMER_ENAB;
3720 break;
3721
3722 case ANEG_STATE_COMPLETE_ACK:
3723 if (ap->ability_match != 0 &&
3724 ap->rxconfig == 0) {
3725 ap->state = ANEG_STATE_AN_ENABLE;
3726 break;
3727 }
3728 delta = ap->cur_time - ap->link_time;
3729 if (delta > ANEG_STATE_SETTLE_TIME) {
3730 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3731 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3732 } else {
3733 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3734 !(ap->flags & MR_NP_RX)) {
3735 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3736 } else {
3737 ret = ANEG_FAILED;
3738 }
3739 }
3740 }
3741 break;
3742
3743 case ANEG_STATE_IDLE_DETECT_INIT:
3744 ap->link_time = ap->cur_time;
3745 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3746 tw32_f(MAC_MODE, tp->mac_mode);
3747 udelay(40);
3748
3749 ap->state = ANEG_STATE_IDLE_DETECT;
3750 ret = ANEG_TIMER_ENAB;
3751 break;
3752
3753 case ANEG_STATE_IDLE_DETECT:
3754 if (ap->ability_match != 0 &&
3755 ap->rxconfig == 0) {
3756 ap->state = ANEG_STATE_AN_ENABLE;
3757 break;
3758 }
3759 delta = ap->cur_time - ap->link_time;
3760 if (delta > ANEG_STATE_SETTLE_TIME) {
3761 /* XXX another gem from the Broadcom driver :( */
3762 ap->state = ANEG_STATE_LINK_OK;
3763 }
3764 break;
3765
3766 case ANEG_STATE_LINK_OK:
3767 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3768 ret = ANEG_DONE;
3769 break;
3770
3771 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3772 /* ??? unimplemented */
3773 break;
3774
3775 case ANEG_STATE_NEXT_PAGE_WAIT:
3776 /* ??? unimplemented */
3777 break;
3778
3779 default:
3780 ret = ANEG_FAILED;
3781 break;
3782 }
3783
3784 return ret;
3785 }
3786
3787 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3788 {
3789 int res = 0;
3790 struct tg3_fiber_aneginfo aninfo;
3791 int status = ANEG_FAILED;
3792 unsigned int tick;
3793 u32 tmp;
3794
3795 tw32_f(MAC_TX_AUTO_NEG, 0);
3796
3797 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3798 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3799 udelay(40);
3800
3801 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3802 udelay(40);
3803
3804 memset(&aninfo, 0, sizeof(aninfo));
3805 aninfo.flags |= MR_AN_ENABLE;
3806 aninfo.state = ANEG_STATE_UNKNOWN;
3807 aninfo.cur_time = 0;
3808 tick = 0;
3809 while (++tick < 195000) {
3810 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3811 if (status == ANEG_DONE || status == ANEG_FAILED)
3812 break;
3813
3814 udelay(1);
3815 }
3816
3817 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3818 tw32_f(MAC_MODE, tp->mac_mode);
3819 udelay(40);
3820
3821 *txflags = aninfo.txconfig;
3822 *rxflags = aninfo.flags;
3823
3824 if (status == ANEG_DONE &&
3825 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3826 MR_LP_ADV_FULL_DUPLEX)))
3827 res = 1;
3828
3829 return res;
3830 }
3831
3832 static void tg3_init_bcm8002(struct tg3 *tp)
3833 {
3834 u32 mac_status = tr32(MAC_STATUS);
3835 int i;
3836
3837 /* Reset when initting first time or we have a link. */
3838 if (tg3_flag(tp, INIT_COMPLETE) &&
3839 !(mac_status & MAC_STATUS_PCS_SYNCED))
3840 return;
3841
3842 /* Set PLL lock range. */
3843 tg3_writephy(tp, 0x16, 0x8007);
3844
3845 /* SW reset */
3846 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3847
3848 /* Wait for reset to complete. */
3849 /* XXX schedule_timeout() ... */
3850 for (i = 0; i < 500; i++)
3851 udelay(10);
3852
3853 /* Config mode; select PMA/Ch 1 regs. */
3854 tg3_writephy(tp, 0x10, 0x8411);
3855
3856 /* Enable auto-lock and comdet, select txclk for tx. */
3857 tg3_writephy(tp, 0x11, 0x0a10);
3858
3859 tg3_writephy(tp, 0x18, 0x00a0);
3860 tg3_writephy(tp, 0x16, 0x41ff);
3861
3862 /* Assert and deassert POR. */
3863 tg3_writephy(tp, 0x13, 0x0400);
3864 udelay(40);
3865 tg3_writephy(tp, 0x13, 0x0000);
3866
3867 tg3_writephy(tp, 0x11, 0x0a50);
3868 udelay(40);
3869 tg3_writephy(tp, 0x11, 0x0a10);
3870
3871 /* Wait for signal to stabilize */
3872 /* XXX schedule_timeout() ... */
3873 for (i = 0; i < 15000; i++)
3874 udelay(10);
3875
3876 /* Deselect the channel register so we can read the PHYID
3877 * later.
3878 */
3879 tg3_writephy(tp, 0x10, 0x8011);
3880 }
3881
3882 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3883 {
3884 u16 flowctrl;
3885 u32 sg_dig_ctrl, sg_dig_status;
3886 u32 serdes_cfg, expected_sg_dig_ctrl;
3887 int workaround, port_a;
3888 int current_link_up;
3889
3890 serdes_cfg = 0;
3891 expected_sg_dig_ctrl = 0;
3892 workaround = 0;
3893 port_a = 1;
3894 current_link_up = 0;
3895
3896 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3897 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3898 workaround = 1;
3899 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3900 port_a = 0;
3901
3902 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3903 /* preserve bits 20-23 for voltage regulator */
3904 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3905 }
3906
3907 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3908
3909 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3910 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3911 if (workaround) {
3912 u32 val = serdes_cfg;
3913
3914 if (port_a)
3915 val |= 0xc010000;
3916 else
3917 val |= 0x4010000;
3918 tw32_f(MAC_SERDES_CFG, val);
3919 }
3920
3921 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3922 }
3923 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3924 tg3_setup_flow_control(tp, 0, 0);
3925 current_link_up = 1;
3926 }
3927 goto out;
3928 }
3929
3930 /* Want auto-negotiation. */
3931 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3932
3933 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3934 if (flowctrl & ADVERTISE_1000XPAUSE)
3935 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3936 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3937 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3938
3939 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3940 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3941 tp->serdes_counter &&
3942 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3943 MAC_STATUS_RCVD_CFG)) ==
3944 MAC_STATUS_PCS_SYNCED)) {
3945 tp->serdes_counter--;
3946 current_link_up = 1;
3947 goto out;
3948 }
3949 restart_autoneg:
3950 if (workaround)
3951 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3952 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3953 udelay(5);
3954 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3955
3956 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3957 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3958 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3959 MAC_STATUS_SIGNAL_DET)) {
3960 sg_dig_status = tr32(SG_DIG_STATUS);
3961 mac_status = tr32(MAC_STATUS);
3962
3963 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3964 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3965 u32 local_adv = 0, remote_adv = 0;
3966
3967 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3968 local_adv |= ADVERTISE_1000XPAUSE;
3969 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3970 local_adv |= ADVERTISE_1000XPSE_ASYM;
3971
3972 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3973 remote_adv |= LPA_1000XPAUSE;
3974 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3975 remote_adv |= LPA_1000XPAUSE_ASYM;
3976
3977 tg3_setup_flow_control(tp, local_adv, remote_adv);
3978 current_link_up = 1;
3979 tp->serdes_counter = 0;
3980 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3981 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3982 if (tp->serdes_counter)
3983 tp->serdes_counter--;
3984 else {
3985 if (workaround) {
3986 u32 val = serdes_cfg;
3987
3988 if (port_a)
3989 val |= 0xc010000;
3990 else
3991 val |= 0x4010000;
3992
3993 tw32_f(MAC_SERDES_CFG, val);
3994 }
3995
3996 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3997 udelay(40);
3998
3999 /* Link parallel detection - link is up */
4000 /* only if we have PCS_SYNC and not */
4001 /* receiving config code words */
4002 mac_status = tr32(MAC_STATUS);
4003 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4004 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4005 tg3_setup_flow_control(tp, 0, 0);
4006 current_link_up = 1;
4007 tp->phy_flags |=
4008 TG3_PHYFLG_PARALLEL_DETECT;
4009 tp->serdes_counter =
4010 SERDES_PARALLEL_DET_TIMEOUT;
4011 } else
4012 goto restart_autoneg;
4013 }
4014 }
4015 } else {
4016 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4017 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4018 }
4019
4020 out:
4021 return current_link_up;
4022 }
4023
4024 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4025 {
4026 int current_link_up = 0;
4027
4028 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4029 goto out;
4030
4031 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4032 u32 txflags, rxflags;
4033 int i;
4034
4035 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4036 u32 local_adv = 0, remote_adv = 0;
4037
4038 if (txflags & ANEG_CFG_PS1)
4039 local_adv |= ADVERTISE_1000XPAUSE;
4040 if (txflags & ANEG_CFG_PS2)
4041 local_adv |= ADVERTISE_1000XPSE_ASYM;
4042
4043 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4044 remote_adv |= LPA_1000XPAUSE;
4045 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4046 remote_adv |= LPA_1000XPAUSE_ASYM;
4047
4048 tg3_setup_flow_control(tp, local_adv, remote_adv);
4049
4050 current_link_up = 1;
4051 }
4052 for (i = 0; i < 30; i++) {
4053 udelay(20);
4054 tw32_f(MAC_STATUS,
4055 (MAC_STATUS_SYNC_CHANGED |
4056 MAC_STATUS_CFG_CHANGED));
4057 udelay(40);
4058 if ((tr32(MAC_STATUS) &
4059 (MAC_STATUS_SYNC_CHANGED |
4060 MAC_STATUS_CFG_CHANGED)) == 0)
4061 break;
4062 }
4063
4064 mac_status = tr32(MAC_STATUS);
4065 if (current_link_up == 0 &&
4066 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4067 !(mac_status & MAC_STATUS_RCVD_CFG))
4068 current_link_up = 1;
4069 } else {
4070 tg3_setup_flow_control(tp, 0, 0);
4071
4072 /* Forcing 1000FD link up. */
4073 current_link_up = 1;
4074
4075 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4076 udelay(40);
4077
4078 tw32_f(MAC_MODE, tp->mac_mode);
4079 udelay(40);
4080 }
4081
4082 out:
4083 return current_link_up;
4084 }
4085
4086 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4087 {
4088 u32 orig_pause_cfg;
4089 u16 orig_active_speed;
4090 u8 orig_active_duplex;
4091 u32 mac_status;
4092 int current_link_up;
4093 int i;
4094
4095 orig_pause_cfg = tp->link_config.active_flowctrl;
4096 orig_active_speed = tp->link_config.active_speed;
4097 orig_active_duplex = tp->link_config.active_duplex;
4098
4099 if (!tg3_flag(tp, HW_AUTONEG) &&
4100 netif_carrier_ok(tp->dev) &&
4101 tg3_flag(tp, INIT_COMPLETE)) {
4102 mac_status = tr32(MAC_STATUS);
4103 mac_status &= (MAC_STATUS_PCS_SYNCED |
4104 MAC_STATUS_SIGNAL_DET |
4105 MAC_STATUS_CFG_CHANGED |
4106 MAC_STATUS_RCVD_CFG);
4107 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4108 MAC_STATUS_SIGNAL_DET)) {
4109 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4110 MAC_STATUS_CFG_CHANGED));
4111 return 0;
4112 }
4113 }
4114
4115 tw32_f(MAC_TX_AUTO_NEG, 0);
4116
4117 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4118 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4119 tw32_f(MAC_MODE, tp->mac_mode);
4120 udelay(40);
4121
4122 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4123 tg3_init_bcm8002(tp);
4124
4125 /* Enable link change event even when serdes polling. */
4126 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4127 udelay(40);
4128
4129 current_link_up = 0;
4130 mac_status = tr32(MAC_STATUS);
4131
4132 if (tg3_flag(tp, HW_AUTONEG))
4133 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4134 else
4135 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4136
4137 tp->napi[0].hw_status->status =
4138 (SD_STATUS_UPDATED |
4139 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4140
4141 for (i = 0; i < 100; i++) {
4142 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4143 MAC_STATUS_CFG_CHANGED));
4144 udelay(5);
4145 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4146 MAC_STATUS_CFG_CHANGED |
4147 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4148 break;
4149 }
4150
4151 mac_status = tr32(MAC_STATUS);
4152 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4153 current_link_up = 0;
4154 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4155 tp->serdes_counter == 0) {
4156 tw32_f(MAC_MODE, (tp->mac_mode |
4157 MAC_MODE_SEND_CONFIGS));
4158 udelay(1);
4159 tw32_f(MAC_MODE, tp->mac_mode);
4160 }
4161 }
4162
4163 if (current_link_up == 1) {
4164 tp->link_config.active_speed = SPEED_1000;
4165 tp->link_config.active_duplex = DUPLEX_FULL;
4166 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4167 LED_CTRL_LNKLED_OVERRIDE |
4168 LED_CTRL_1000MBPS_ON));
4169 } else {
4170 tp->link_config.active_speed = SPEED_INVALID;
4171 tp->link_config.active_duplex = DUPLEX_INVALID;
4172 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173 LED_CTRL_LNKLED_OVERRIDE |
4174 LED_CTRL_TRAFFIC_OVERRIDE));
4175 }
4176
4177 if (current_link_up != netif_carrier_ok(tp->dev)) {
4178 if (current_link_up)
4179 netif_carrier_on(tp->dev);
4180 else
4181 netif_carrier_off(tp->dev);
4182 tg3_link_report(tp);
4183 } else {
4184 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4185 if (orig_pause_cfg != now_pause_cfg ||
4186 orig_active_speed != tp->link_config.active_speed ||
4187 orig_active_duplex != tp->link_config.active_duplex)
4188 tg3_link_report(tp);
4189 }
4190
4191 return 0;
4192 }
4193
4194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4195 {
4196 int current_link_up, err = 0;
4197 u32 bmsr, bmcr;
4198 u16 current_speed;
4199 u8 current_duplex;
4200 u32 local_adv, remote_adv;
4201
4202 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4203 tw32_f(MAC_MODE, tp->mac_mode);
4204 udelay(40);
4205
4206 tw32(MAC_EVENT, 0);
4207
4208 tw32_f(MAC_STATUS,
4209 (MAC_STATUS_SYNC_CHANGED |
4210 MAC_STATUS_CFG_CHANGED |
4211 MAC_STATUS_MI_COMPLETION |
4212 MAC_STATUS_LNKSTATE_CHANGED));
4213 udelay(40);
4214
4215 if (force_reset)
4216 tg3_phy_reset(tp);
4217
4218 current_link_up = 0;
4219 current_speed = SPEED_INVALID;
4220 current_duplex = DUPLEX_INVALID;
4221
4222 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4223 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4225 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4226 bmsr |= BMSR_LSTATUS;
4227 else
4228 bmsr &= ~BMSR_LSTATUS;
4229 }
4230
4231 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4232
4233 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4234 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4235 /* do nothing, just check for link up at the end */
4236 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4237 u32 adv, new_adv;
4238
4239 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4240 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4241 ADVERTISE_1000XPAUSE |
4242 ADVERTISE_1000XPSE_ASYM |
4243 ADVERTISE_SLCT);
4244
4245 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4246
4247 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4248 new_adv |= ADVERTISE_1000XHALF;
4249 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4250 new_adv |= ADVERTISE_1000XFULL;
4251
4252 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4253 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4254 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4255 tg3_writephy(tp, MII_BMCR, bmcr);
4256
4257 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4258 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4259 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4260
4261 return err;
4262 }
4263 } else {
4264 u32 new_bmcr;
4265
4266 bmcr &= ~BMCR_SPEED1000;
4267 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4268
4269 if (tp->link_config.duplex == DUPLEX_FULL)
4270 new_bmcr |= BMCR_FULLDPLX;
4271
4272 if (new_bmcr != bmcr) {
4273 /* BMCR_SPEED1000 is a reserved bit that needs
4274 * to be set on write.
4275 */
4276 new_bmcr |= BMCR_SPEED1000;
4277
4278 /* Force a linkdown */
4279 if (netif_carrier_ok(tp->dev)) {
4280 u32 adv;
4281
4282 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4283 adv &= ~(ADVERTISE_1000XFULL |
4284 ADVERTISE_1000XHALF |
4285 ADVERTISE_SLCT);
4286 tg3_writephy(tp, MII_ADVERTISE, adv);
4287 tg3_writephy(tp, MII_BMCR, bmcr |
4288 BMCR_ANRESTART |
4289 BMCR_ANENABLE);
4290 udelay(10);
4291 netif_carrier_off(tp->dev);
4292 }
4293 tg3_writephy(tp, MII_BMCR, new_bmcr);
4294 bmcr = new_bmcr;
4295 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4296 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4297 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4298 ASIC_REV_5714) {
4299 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4300 bmsr |= BMSR_LSTATUS;
4301 else
4302 bmsr &= ~BMSR_LSTATUS;
4303 }
4304 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4305 }
4306 }
4307
4308 if (bmsr & BMSR_LSTATUS) {
4309 current_speed = SPEED_1000;
4310 current_link_up = 1;
4311 if (bmcr & BMCR_FULLDPLX)
4312 current_duplex = DUPLEX_FULL;
4313 else
4314 current_duplex = DUPLEX_HALF;
4315
4316 local_adv = 0;
4317 remote_adv = 0;
4318
4319 if (bmcr & BMCR_ANENABLE) {
4320 u32 common;
4321
4322 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4323 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4324 common = local_adv & remote_adv;
4325 if (common & (ADVERTISE_1000XHALF |
4326 ADVERTISE_1000XFULL)) {
4327 if (common & ADVERTISE_1000XFULL)
4328 current_duplex = DUPLEX_FULL;
4329 else
4330 current_duplex = DUPLEX_HALF;
4331 } else if (!tg3_flag(tp, 5780_CLASS)) {
4332 /* Link is up via parallel detect */
4333 } else {
4334 current_link_up = 0;
4335 }
4336 }
4337 }
4338
4339 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4340 tg3_setup_flow_control(tp, local_adv, remote_adv);
4341
4342 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4343 if (tp->link_config.active_duplex == DUPLEX_HALF)
4344 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4345
4346 tw32_f(MAC_MODE, tp->mac_mode);
4347 udelay(40);
4348
4349 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4350
4351 tp->link_config.active_speed = current_speed;
4352 tp->link_config.active_duplex = current_duplex;
4353
4354 if (current_link_up != netif_carrier_ok(tp->dev)) {
4355 if (current_link_up)
4356 netif_carrier_on(tp->dev);
4357 else {
4358 netif_carrier_off(tp->dev);
4359 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4360 }
4361 tg3_link_report(tp);
4362 }
4363 return err;
4364 }
4365
4366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4367 {
4368 if (tp->serdes_counter) {
4369 /* Give autoneg time to complete. */
4370 tp->serdes_counter--;
4371 return;
4372 }
4373
4374 if (!netif_carrier_ok(tp->dev) &&
4375 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4376 u32 bmcr;
4377
4378 tg3_readphy(tp, MII_BMCR, &bmcr);
4379 if (bmcr & BMCR_ANENABLE) {
4380 u32 phy1, phy2;
4381
4382 /* Select shadow register 0x1f */
4383 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4384 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4385
4386 /* Select expansion interrupt status register */
4387 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4388 MII_TG3_DSP_EXP1_INT_STAT);
4389 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4390 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4391
4392 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4393 /* We have signal detect and not receiving
4394 * config code words, link is up by parallel
4395 * detection.
4396 */
4397
4398 bmcr &= ~BMCR_ANENABLE;
4399 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4400 tg3_writephy(tp, MII_BMCR, bmcr);
4401 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4402 }
4403 }
4404 } else if (netif_carrier_ok(tp->dev) &&
4405 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4406 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4407 u32 phy2;
4408
4409 /* Select expansion interrupt status register */
4410 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4411 MII_TG3_DSP_EXP1_INT_STAT);
4412 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4413 if (phy2 & 0x20) {
4414 u32 bmcr;
4415
4416 /* Config code words received, turn on autoneg. */
4417 tg3_readphy(tp, MII_BMCR, &bmcr);
4418 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4419
4420 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4421
4422 }
4423 }
4424 }
4425
4426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4427 {
4428 u32 val;
4429 int err;
4430
4431 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4432 err = tg3_setup_fiber_phy(tp, force_reset);
4433 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4434 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4435 else
4436 err = tg3_setup_copper_phy(tp, force_reset);
4437
4438 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4439 u32 scale;
4440
4441 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4442 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4443 scale = 65;
4444 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4445 scale = 6;
4446 else
4447 scale = 12;
4448
4449 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4450 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4451 tw32(GRC_MISC_CFG, val);
4452 }
4453
4454 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4455 (6 << TX_LENGTHS_IPG_SHIFT);
4456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4457 val |= tr32(MAC_TX_LENGTHS) &
4458 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4459 TX_LENGTHS_CNT_DWN_VAL_MSK);
4460
4461 if (tp->link_config.active_speed == SPEED_1000 &&
4462 tp->link_config.active_duplex == DUPLEX_HALF)
4463 tw32(MAC_TX_LENGTHS, val |
4464 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4465 else
4466 tw32(MAC_TX_LENGTHS, val |
4467 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4468
4469 if (!tg3_flag(tp, 5705_PLUS)) {
4470 if (netif_carrier_ok(tp->dev)) {
4471 tw32(HOSTCC_STAT_COAL_TICKS,
4472 tp->coal.stats_block_coalesce_usecs);
4473 } else {
4474 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4475 }
4476 }
4477
4478 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4479 val = tr32(PCIE_PWR_MGMT_THRESH);
4480 if (!netif_carrier_ok(tp->dev))
4481 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4482 tp->pwrmgmt_thresh;
4483 else
4484 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4485 tw32(PCIE_PWR_MGMT_THRESH, val);
4486 }
4487
4488 return err;
4489 }
4490
4491 static inline int tg3_irq_sync(struct tg3 *tp)
4492 {
4493 return tp->irq_sync;
4494 }
4495
4496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4497 {
4498 int i;
4499
4500 dst = (u32 *)((u8 *)dst + off);
4501 for (i = 0; i < len; i += sizeof(u32))
4502 *dst++ = tr32(off + i);
4503 }
4504
4505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4506 {
4507 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4508 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4509 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4510 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4511 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4512 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4513 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4514 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4515 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4516 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4517 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4518 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4519 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4520 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4521 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4522 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4523 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4524 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4525 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4526
4527 if (tg3_flag(tp, SUPPORT_MSIX))
4528 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4529
4530 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4531 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4532 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4533 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4534 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4535 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4536 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4537 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4538
4539 if (!tg3_flag(tp, 5705_PLUS)) {
4540 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4541 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4542 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4543 }
4544
4545 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4546 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4547 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4548 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4549 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4550
4551 if (tg3_flag(tp, NVRAM))
4552 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4553 }
4554
4555 static void tg3_dump_state(struct tg3 *tp)
4556 {
4557 int i;
4558 u32 *regs;
4559
4560 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4561 if (!regs) {
4562 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4563 return;
4564 }
4565
4566 if (tg3_flag(tp, PCI_EXPRESS)) {
4567 /* Read up to but not including private PCI registers */
4568 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4569 regs[i / sizeof(u32)] = tr32(i);
4570 } else
4571 tg3_dump_legacy_regs(tp, regs);
4572
4573 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4574 if (!regs[i + 0] && !regs[i + 1] &&
4575 !regs[i + 2] && !regs[i + 3])
4576 continue;
4577
4578 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4579 i * 4,
4580 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4581 }
4582
4583 kfree(regs);
4584
4585 for (i = 0; i < tp->irq_cnt; i++) {
4586 struct tg3_napi *tnapi = &tp->napi[i];
4587
4588 /* SW status block */
4589 netdev_err(tp->dev,
4590 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4591 i,
4592 tnapi->hw_status->status,
4593 tnapi->hw_status->status_tag,
4594 tnapi->hw_status->rx_jumbo_consumer,
4595 tnapi->hw_status->rx_consumer,
4596 tnapi->hw_status->rx_mini_consumer,
4597 tnapi->hw_status->idx[0].rx_producer,
4598 tnapi->hw_status->idx[0].tx_consumer);
4599
4600 netdev_err(tp->dev,
4601 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4602 i,
4603 tnapi->last_tag, tnapi->last_irq_tag,
4604 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4605 tnapi->rx_rcb_ptr,
4606 tnapi->prodring.rx_std_prod_idx,
4607 tnapi->prodring.rx_std_cons_idx,
4608 tnapi->prodring.rx_jmb_prod_idx,
4609 tnapi->prodring.rx_jmb_cons_idx);
4610 }
4611 }
4612
4613 /* This is called whenever we suspect that the system chipset is re-
4614 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4615 * is bogus tx completions. We try to recover by setting the
4616 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4617 * in the workqueue.
4618 */
4619 static void tg3_tx_recover(struct tg3 *tp)
4620 {
4621 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4622 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4623
4624 netdev_warn(tp->dev,
4625 "The system may be re-ordering memory-mapped I/O "
4626 "cycles to the network device, attempting to recover. "
4627 "Please report the problem to the driver maintainer "
4628 "and include system chipset information.\n");
4629
4630 spin_lock(&tp->lock);
4631 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4632 spin_unlock(&tp->lock);
4633 }
4634
4635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4636 {
4637 /* Tell compiler to fetch tx indices from memory. */
4638 barrier();
4639 return tnapi->tx_pending -
4640 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4641 }
4642
4643 /* Tigon3 never reports partial packet sends. So we do not
4644 * need special logic to handle SKBs that have not had all
4645 * of their frags sent yet, like SunGEM does.
4646 */
4647 static void tg3_tx(struct tg3_napi *tnapi)
4648 {
4649 struct tg3 *tp = tnapi->tp;
4650 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4651 u32 sw_idx = tnapi->tx_cons;
4652 struct netdev_queue *txq;
4653 int index = tnapi - tp->napi;
4654
4655 if (tg3_flag(tp, ENABLE_TSS))
4656 index--;
4657
4658 txq = netdev_get_tx_queue(tp->dev, index);
4659
4660 while (sw_idx != hw_idx) {
4661 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4662 struct sk_buff *skb = ri->skb;
4663 int i, tx_bug = 0;
4664
4665 if (unlikely(skb == NULL)) {
4666 tg3_tx_recover(tp);
4667 return;
4668 }
4669
4670 pci_unmap_single(tp->pdev,
4671 dma_unmap_addr(ri, mapping),
4672 skb_headlen(skb),
4673 PCI_DMA_TODEVICE);
4674
4675 ri->skb = NULL;
4676
4677 sw_idx = NEXT_TX(sw_idx);
4678
4679 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4680 ri = &tnapi->tx_buffers[sw_idx];
4681 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4682 tx_bug = 1;
4683
4684 pci_unmap_page(tp->pdev,
4685 dma_unmap_addr(ri, mapping),
4686 skb_shinfo(skb)->frags[i].size,
4687 PCI_DMA_TODEVICE);
4688 sw_idx = NEXT_TX(sw_idx);
4689 }
4690
4691 dev_kfree_skb(skb);
4692
4693 if (unlikely(tx_bug)) {
4694 tg3_tx_recover(tp);
4695 return;
4696 }
4697 }
4698
4699 tnapi->tx_cons = sw_idx;
4700
4701 /* Need to make the tx_cons update visible to tg3_start_xmit()
4702 * before checking for netif_queue_stopped(). Without the
4703 * memory barrier, there is a small possibility that tg3_start_xmit()
4704 * will miss it and cause the queue to be stopped forever.
4705 */
4706 smp_mb();
4707
4708 if (unlikely(netif_tx_queue_stopped(txq) &&
4709 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4710 __netif_tx_lock(txq, smp_processor_id());
4711 if (netif_tx_queue_stopped(txq) &&
4712 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4713 netif_tx_wake_queue(txq);
4714 __netif_tx_unlock(txq);
4715 }
4716 }
4717
4718 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4719 {
4720 if (!ri->skb)
4721 return;
4722
4723 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4724 map_sz, PCI_DMA_FROMDEVICE);
4725 dev_kfree_skb_any(ri->skb);
4726 ri->skb = NULL;
4727 }
4728
4729 /* Returns size of skb allocated or < 0 on error.
4730 *
4731 * We only need to fill in the address because the other members
4732 * of the RX descriptor are invariant, see tg3_init_rings.
4733 *
4734 * Note the purposeful assymetry of cpu vs. chip accesses. For
4735 * posting buffers we only dirty the first cache line of the RX
4736 * descriptor (containing the address). Whereas for the RX status
4737 * buffers the cpu only reads the last cacheline of the RX descriptor
4738 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4739 */
4740 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4741 u32 opaque_key, u32 dest_idx_unmasked)
4742 {
4743 struct tg3_rx_buffer_desc *desc;
4744 struct ring_info *map;
4745 struct sk_buff *skb;
4746 dma_addr_t mapping;
4747 int skb_size, dest_idx;
4748
4749 switch (opaque_key) {
4750 case RXD_OPAQUE_RING_STD:
4751 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4752 desc = &tpr->rx_std[dest_idx];
4753 map = &tpr->rx_std_buffers[dest_idx];
4754 skb_size = tp->rx_pkt_map_sz;
4755 break;
4756
4757 case RXD_OPAQUE_RING_JUMBO:
4758 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4759 desc = &tpr->rx_jmb[dest_idx].std;
4760 map = &tpr->rx_jmb_buffers[dest_idx];
4761 skb_size = TG3_RX_JMB_MAP_SZ;
4762 break;
4763
4764 default:
4765 return -EINVAL;
4766 }
4767
4768 /* Do not overwrite any of the map or rp information
4769 * until we are sure we can commit to a new buffer.
4770 *
4771 * Callers depend upon this behavior and assume that
4772 * we leave everything unchanged if we fail.
4773 */
4774 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4775 if (skb == NULL)
4776 return -ENOMEM;
4777
4778 skb_reserve(skb, tp->rx_offset);
4779
4780 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4781 PCI_DMA_FROMDEVICE);
4782 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4783 dev_kfree_skb(skb);
4784 return -EIO;
4785 }
4786
4787 map->skb = skb;
4788 dma_unmap_addr_set(map, mapping, mapping);
4789
4790 desc->addr_hi = ((u64)mapping >> 32);
4791 desc->addr_lo = ((u64)mapping & 0xffffffff);
4792
4793 return skb_size;
4794 }
4795
4796 /* We only need to move over in the address because the other
4797 * members of the RX descriptor are invariant. See notes above
4798 * tg3_alloc_rx_skb for full details.
4799 */
4800 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4801 struct tg3_rx_prodring_set *dpr,
4802 u32 opaque_key, int src_idx,
4803 u32 dest_idx_unmasked)
4804 {
4805 struct tg3 *tp = tnapi->tp;
4806 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4807 struct ring_info *src_map, *dest_map;
4808 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4809 int dest_idx;
4810
4811 switch (opaque_key) {
4812 case RXD_OPAQUE_RING_STD:
4813 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4814 dest_desc = &dpr->rx_std[dest_idx];
4815 dest_map = &dpr->rx_std_buffers[dest_idx];
4816 src_desc = &spr->rx_std[src_idx];
4817 src_map = &spr->rx_std_buffers[src_idx];
4818 break;
4819
4820 case RXD_OPAQUE_RING_JUMBO:
4821 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4822 dest_desc = &dpr->rx_jmb[dest_idx].std;
4823 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4824 src_desc = &spr->rx_jmb[src_idx].std;
4825 src_map = &spr->rx_jmb_buffers[src_idx];
4826 break;
4827
4828 default:
4829 return;
4830 }
4831
4832 dest_map->skb = src_map->skb;
4833 dma_unmap_addr_set(dest_map, mapping,
4834 dma_unmap_addr(src_map, mapping));
4835 dest_desc->addr_hi = src_desc->addr_hi;
4836 dest_desc->addr_lo = src_desc->addr_lo;
4837
4838 /* Ensure that the update to the skb happens after the physical
4839 * addresses have been transferred to the new BD location.
4840 */
4841 smp_wmb();
4842
4843 src_map->skb = NULL;
4844 }
4845
4846 /* The RX ring scheme is composed of multiple rings which post fresh
4847 * buffers to the chip, and one special ring the chip uses to report
4848 * status back to the host.
4849 *
4850 * The special ring reports the status of received packets to the
4851 * host. The chip does not write into the original descriptor the
4852 * RX buffer was obtained from. The chip simply takes the original
4853 * descriptor as provided by the host, updates the status and length
4854 * field, then writes this into the next status ring entry.
4855 *
4856 * Each ring the host uses to post buffers to the chip is described
4857 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4858 * it is first placed into the on-chip ram. When the packet's length
4859 * is known, it walks down the TG3_BDINFO entries to select the ring.
4860 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4861 * which is within the range of the new packet's length is chosen.
4862 *
4863 * The "separate ring for rx status" scheme may sound queer, but it makes
4864 * sense from a cache coherency perspective. If only the host writes
4865 * to the buffer post rings, and only the chip writes to the rx status
4866 * rings, then cache lines never move beyond shared-modified state.
4867 * If both the host and chip were to write into the same ring, cache line
4868 * eviction could occur since both entities want it in an exclusive state.
4869 */
4870 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4871 {
4872 struct tg3 *tp = tnapi->tp;
4873 u32 work_mask, rx_std_posted = 0;
4874 u32 std_prod_idx, jmb_prod_idx;
4875 u32 sw_idx = tnapi->rx_rcb_ptr;
4876 u16 hw_idx;
4877 int received;
4878 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4879
4880 hw_idx = *(tnapi->rx_rcb_prod_idx);
4881 /*
4882 * We need to order the read of hw_idx and the read of
4883 * the opaque cookie.
4884 */
4885 rmb();
4886 work_mask = 0;
4887 received = 0;
4888 std_prod_idx = tpr->rx_std_prod_idx;
4889 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4890 while (sw_idx != hw_idx && budget > 0) {
4891 struct ring_info *ri;
4892 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4893 unsigned int len;
4894 struct sk_buff *skb;
4895 dma_addr_t dma_addr;
4896 u32 opaque_key, desc_idx, *post_ptr;
4897
4898 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4899 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4900 if (opaque_key == RXD_OPAQUE_RING_STD) {
4901 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4902 dma_addr = dma_unmap_addr(ri, mapping);
4903 skb = ri->skb;
4904 post_ptr = &std_prod_idx;
4905 rx_std_posted++;
4906 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4907 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4908 dma_addr = dma_unmap_addr(ri, mapping);
4909 skb = ri->skb;
4910 post_ptr = &jmb_prod_idx;
4911 } else
4912 goto next_pkt_nopost;
4913
4914 work_mask |= opaque_key;
4915
4916 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4917 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4918 drop_it:
4919 tg3_recycle_rx(tnapi, tpr, opaque_key,
4920 desc_idx, *post_ptr);
4921 drop_it_no_recycle:
4922 /* Other statistics kept track of by card. */
4923 tp->rx_dropped++;
4924 goto next_pkt;
4925 }
4926
4927 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4928 ETH_FCS_LEN;
4929
4930 if (len > TG3_RX_COPY_THRESH(tp)) {
4931 int skb_size;
4932
4933 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4934 *post_ptr);
4935 if (skb_size < 0)
4936 goto drop_it;
4937
4938 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4939 PCI_DMA_FROMDEVICE);
4940
4941 /* Ensure that the update to the skb happens
4942 * after the usage of the old DMA mapping.
4943 */
4944 smp_wmb();
4945
4946 ri->skb = NULL;
4947
4948 skb_put(skb, len);
4949 } else {
4950 struct sk_buff *copy_skb;
4951
4952 tg3_recycle_rx(tnapi, tpr, opaque_key,
4953 desc_idx, *post_ptr);
4954
4955 copy_skb = netdev_alloc_skb(tp->dev, len +
4956 TG3_RAW_IP_ALIGN);
4957 if (copy_skb == NULL)
4958 goto drop_it_no_recycle;
4959
4960 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4961 skb_put(copy_skb, len);
4962 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4963 skb_copy_from_linear_data(skb, copy_skb->data, len);
4964 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4965
4966 /* We'll reuse the original ring buffer. */
4967 skb = copy_skb;
4968 }
4969
4970 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4971 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4972 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4973 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4974 skb->ip_summed = CHECKSUM_UNNECESSARY;
4975 else
4976 skb_checksum_none_assert(skb);
4977
4978 skb->protocol = eth_type_trans(skb, tp->dev);
4979
4980 if (len > (tp->dev->mtu + ETH_HLEN) &&
4981 skb->protocol != htons(ETH_P_8021Q)) {
4982 dev_kfree_skb(skb);
4983 goto drop_it_no_recycle;
4984 }
4985
4986 if (desc->type_flags & RXD_FLAG_VLAN &&
4987 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4988 __vlan_hwaccel_put_tag(skb,
4989 desc->err_vlan & RXD_VLAN_MASK);
4990
4991 napi_gro_receive(&tnapi->napi, skb);
4992
4993 received++;
4994 budget--;
4995
4996 next_pkt:
4997 (*post_ptr)++;
4998
4999 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5000 tpr->rx_std_prod_idx = std_prod_idx &
5001 tp->rx_std_ring_mask;
5002 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5003 tpr->rx_std_prod_idx);
5004 work_mask &= ~RXD_OPAQUE_RING_STD;
5005 rx_std_posted = 0;
5006 }
5007 next_pkt_nopost:
5008 sw_idx++;
5009 sw_idx &= tp->rx_ret_ring_mask;
5010
5011 /* Refresh hw_idx to see if there is new work */
5012 if (sw_idx == hw_idx) {
5013 hw_idx = *(tnapi->rx_rcb_prod_idx);
5014 rmb();
5015 }
5016 }
5017
5018 /* ACK the status ring. */
5019 tnapi->rx_rcb_ptr = sw_idx;
5020 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5021
5022 /* Refill RX ring(s). */
5023 if (!tg3_flag(tp, ENABLE_RSS)) {
5024 if (work_mask & RXD_OPAQUE_RING_STD) {
5025 tpr->rx_std_prod_idx = std_prod_idx &
5026 tp->rx_std_ring_mask;
5027 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5028 tpr->rx_std_prod_idx);
5029 }
5030 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5031 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5032 tp->rx_jmb_ring_mask;
5033 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5034 tpr->rx_jmb_prod_idx);
5035 }
5036 mmiowb();
5037 } else if (work_mask) {
5038 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5039 * updated before the producer indices can be updated.
5040 */
5041 smp_wmb();
5042
5043 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5044 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5045
5046 if (tnapi != &tp->napi[1])
5047 napi_schedule(&tp->napi[1].napi);
5048 }
5049
5050 return received;
5051 }
5052
5053 static void tg3_poll_link(struct tg3 *tp)
5054 {
5055 /* handle link change and other phy events */
5056 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5057 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5058
5059 if (sblk->status & SD_STATUS_LINK_CHG) {
5060 sblk->status = SD_STATUS_UPDATED |
5061 (sblk->status & ~SD_STATUS_LINK_CHG);
5062 spin_lock(&tp->lock);
5063 if (tg3_flag(tp, USE_PHYLIB)) {
5064 tw32_f(MAC_STATUS,
5065 (MAC_STATUS_SYNC_CHANGED |
5066 MAC_STATUS_CFG_CHANGED |
5067 MAC_STATUS_MI_COMPLETION |
5068 MAC_STATUS_LNKSTATE_CHANGED));
5069 udelay(40);
5070 } else
5071 tg3_setup_phy(tp, 0);
5072 spin_unlock(&tp->lock);
5073 }
5074 }
5075 }
5076
5077 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5078 struct tg3_rx_prodring_set *dpr,
5079 struct tg3_rx_prodring_set *spr)
5080 {
5081 u32 si, di, cpycnt, src_prod_idx;
5082 int i, err = 0;
5083
5084 while (1) {
5085 src_prod_idx = spr->rx_std_prod_idx;
5086
5087 /* Make sure updates to the rx_std_buffers[] entries and the
5088 * standard producer index are seen in the correct order.
5089 */
5090 smp_rmb();
5091
5092 if (spr->rx_std_cons_idx == src_prod_idx)
5093 break;
5094
5095 if (spr->rx_std_cons_idx < src_prod_idx)
5096 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5097 else
5098 cpycnt = tp->rx_std_ring_mask + 1 -
5099 spr->rx_std_cons_idx;
5100
5101 cpycnt = min(cpycnt,
5102 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5103
5104 si = spr->rx_std_cons_idx;
5105 di = dpr->rx_std_prod_idx;
5106
5107 for (i = di; i < di + cpycnt; i++) {
5108 if (dpr->rx_std_buffers[i].skb) {
5109 cpycnt = i - di;
5110 err = -ENOSPC;
5111 break;
5112 }
5113 }
5114
5115 if (!cpycnt)
5116 break;
5117
5118 /* Ensure that updates to the rx_std_buffers ring and the
5119 * shadowed hardware producer ring from tg3_recycle_skb() are
5120 * ordered correctly WRT the skb check above.
5121 */
5122 smp_rmb();
5123
5124 memcpy(&dpr->rx_std_buffers[di],
5125 &spr->rx_std_buffers[si],
5126 cpycnt * sizeof(struct ring_info));
5127
5128 for (i = 0; i < cpycnt; i++, di++, si++) {
5129 struct tg3_rx_buffer_desc *sbd, *dbd;
5130 sbd = &spr->rx_std[si];
5131 dbd = &dpr->rx_std[di];
5132 dbd->addr_hi = sbd->addr_hi;
5133 dbd->addr_lo = sbd->addr_lo;
5134 }
5135
5136 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5137 tp->rx_std_ring_mask;
5138 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5139 tp->rx_std_ring_mask;
5140 }
5141
5142 while (1) {
5143 src_prod_idx = spr->rx_jmb_prod_idx;
5144
5145 /* Make sure updates to the rx_jmb_buffers[] entries and
5146 * the jumbo producer index are seen in the correct order.
5147 */
5148 smp_rmb();
5149
5150 if (spr->rx_jmb_cons_idx == src_prod_idx)
5151 break;
5152
5153 if (spr->rx_jmb_cons_idx < src_prod_idx)
5154 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5155 else
5156 cpycnt = tp->rx_jmb_ring_mask + 1 -
5157 spr->rx_jmb_cons_idx;
5158
5159 cpycnt = min(cpycnt,
5160 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5161
5162 si = spr->rx_jmb_cons_idx;
5163 di = dpr->rx_jmb_prod_idx;
5164
5165 for (i = di; i < di + cpycnt; i++) {
5166 if (dpr->rx_jmb_buffers[i].skb) {
5167 cpycnt = i - di;
5168 err = -ENOSPC;
5169 break;
5170 }
5171 }
5172
5173 if (!cpycnt)
5174 break;
5175
5176 /* Ensure that updates to the rx_jmb_buffers ring and the
5177 * shadowed hardware producer ring from tg3_recycle_skb() are
5178 * ordered correctly WRT the skb check above.
5179 */
5180 smp_rmb();
5181
5182 memcpy(&dpr->rx_jmb_buffers[di],
5183 &spr->rx_jmb_buffers[si],
5184 cpycnt * sizeof(struct ring_info));
5185
5186 for (i = 0; i < cpycnt; i++, di++, si++) {
5187 struct tg3_rx_buffer_desc *sbd, *dbd;
5188 sbd = &spr->rx_jmb[si].std;
5189 dbd = &dpr->rx_jmb[di].std;
5190 dbd->addr_hi = sbd->addr_hi;
5191 dbd->addr_lo = sbd->addr_lo;
5192 }
5193
5194 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5195 tp->rx_jmb_ring_mask;
5196 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5197 tp->rx_jmb_ring_mask;
5198 }
5199
5200 return err;
5201 }
5202
5203 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5204 {
5205 struct tg3 *tp = tnapi->tp;
5206
5207 /* run TX completion thread */
5208 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5209 tg3_tx(tnapi);
5210 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5211 return work_done;
5212 }
5213
5214 /* run RX thread, within the bounds set by NAPI.
5215 * All RX "locking" is done by ensuring outside
5216 * code synchronizes with tg3->napi.poll()
5217 */
5218 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5219 work_done += tg3_rx(tnapi, budget - work_done);
5220
5221 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5222 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5223 int i, err = 0;
5224 u32 std_prod_idx = dpr->rx_std_prod_idx;
5225 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5226
5227 for (i = 1; i < tp->irq_cnt; i++)
5228 err |= tg3_rx_prodring_xfer(tp, dpr,
5229 &tp->napi[i].prodring);
5230
5231 wmb();
5232
5233 if (std_prod_idx != dpr->rx_std_prod_idx)
5234 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5235 dpr->rx_std_prod_idx);
5236
5237 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5238 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5239 dpr->rx_jmb_prod_idx);
5240
5241 mmiowb();
5242
5243 if (err)
5244 tw32_f(HOSTCC_MODE, tp->coal_now);
5245 }
5246
5247 return work_done;
5248 }
5249
5250 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5251 {
5252 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5253 struct tg3 *tp = tnapi->tp;
5254 int work_done = 0;
5255 struct tg3_hw_status *sblk = tnapi->hw_status;
5256
5257 while (1) {
5258 work_done = tg3_poll_work(tnapi, work_done, budget);
5259
5260 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5261 goto tx_recovery;
5262
5263 if (unlikely(work_done >= budget))
5264 break;
5265
5266 /* tp->last_tag is used in tg3_int_reenable() below
5267 * to tell the hw how much work has been processed,
5268 * so we must read it before checking for more work.
5269 */
5270 tnapi->last_tag = sblk->status_tag;
5271 tnapi->last_irq_tag = tnapi->last_tag;
5272 rmb();
5273
5274 /* check for RX/TX work to do */
5275 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5276 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5277 napi_complete(napi);
5278 /* Reenable interrupts. */
5279 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5280 mmiowb();
5281 break;
5282 }
5283 }
5284
5285 return work_done;
5286
5287 tx_recovery:
5288 /* work_done is guaranteed to be less than budget. */
5289 napi_complete(napi);
5290 schedule_work(&tp->reset_task);
5291 return work_done;
5292 }
5293
5294 static void tg3_process_error(struct tg3 *tp)
5295 {
5296 u32 val;
5297 bool real_error = false;
5298
5299 if (tg3_flag(tp, ERROR_PROCESSED))
5300 return;
5301
5302 /* Check Flow Attention register */
5303 val = tr32(HOSTCC_FLOW_ATTN);
5304 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5305 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5306 real_error = true;
5307 }
5308
5309 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5310 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5311 real_error = true;
5312 }
5313
5314 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5315 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5316 real_error = true;
5317 }
5318
5319 if (!real_error)
5320 return;
5321
5322 tg3_dump_state(tp);
5323
5324 tg3_flag_set(tp, ERROR_PROCESSED);
5325 schedule_work(&tp->reset_task);
5326 }
5327
5328 static int tg3_poll(struct napi_struct *napi, int budget)
5329 {
5330 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5331 struct tg3 *tp = tnapi->tp;
5332 int work_done = 0;
5333 struct tg3_hw_status *sblk = tnapi->hw_status;
5334
5335 while (1) {
5336 if (sblk->status & SD_STATUS_ERROR)
5337 tg3_process_error(tp);
5338
5339 tg3_poll_link(tp);
5340
5341 work_done = tg3_poll_work(tnapi, work_done, budget);
5342
5343 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5344 goto tx_recovery;
5345
5346 if (unlikely(work_done >= budget))
5347 break;
5348
5349 if (tg3_flag(tp, TAGGED_STATUS)) {
5350 /* tp->last_tag is used in tg3_int_reenable() below
5351 * to tell the hw how much work has been processed,
5352 * so we must read it before checking for more work.
5353 */
5354 tnapi->last_tag = sblk->status_tag;
5355 tnapi->last_irq_tag = tnapi->last_tag;
5356 rmb();
5357 } else
5358 sblk->status &= ~SD_STATUS_UPDATED;
5359
5360 if (likely(!tg3_has_work(tnapi))) {
5361 napi_complete(napi);
5362 tg3_int_reenable(tnapi);
5363 break;
5364 }
5365 }
5366
5367 return work_done;
5368
5369 tx_recovery:
5370 /* work_done is guaranteed to be less than budget. */
5371 napi_complete(napi);
5372 schedule_work(&tp->reset_task);
5373 return work_done;
5374 }
5375
5376 static void tg3_napi_disable(struct tg3 *tp)
5377 {
5378 int i;
5379
5380 for (i = tp->irq_cnt - 1; i >= 0; i--)
5381 napi_disable(&tp->napi[i].napi);
5382 }
5383
5384 static void tg3_napi_enable(struct tg3 *tp)
5385 {
5386 int i;
5387
5388 for (i = 0; i < tp->irq_cnt; i++)
5389 napi_enable(&tp->napi[i].napi);
5390 }
5391
5392 static void tg3_napi_init(struct tg3 *tp)
5393 {
5394 int i;
5395
5396 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5397 for (i = 1; i < tp->irq_cnt; i++)
5398 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5399 }
5400
5401 static void tg3_napi_fini(struct tg3 *tp)
5402 {
5403 int i;
5404
5405 for (i = 0; i < tp->irq_cnt; i++)
5406 netif_napi_del(&tp->napi[i].napi);
5407 }
5408
5409 static inline void tg3_netif_stop(struct tg3 *tp)
5410 {
5411 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5412 tg3_napi_disable(tp);
5413 netif_tx_disable(tp->dev);
5414 }
5415
5416 static inline void tg3_netif_start(struct tg3 *tp)
5417 {
5418 /* NOTE: unconditional netif_tx_wake_all_queues is only
5419 * appropriate so long as all callers are assured to
5420 * have free tx slots (such as after tg3_init_hw)
5421 */
5422 netif_tx_wake_all_queues(tp->dev);
5423
5424 tg3_napi_enable(tp);
5425 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5426 tg3_enable_ints(tp);
5427 }
5428
5429 static void tg3_irq_quiesce(struct tg3 *tp)
5430 {
5431 int i;
5432
5433 BUG_ON(tp->irq_sync);
5434
5435 tp->irq_sync = 1;
5436 smp_mb();
5437
5438 for (i = 0; i < tp->irq_cnt; i++)
5439 synchronize_irq(tp->napi[i].irq_vec);
5440 }
5441
5442 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5443 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5444 * with as well. Most of the time, this is not necessary except when
5445 * shutting down the device.
5446 */
5447 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5448 {
5449 spin_lock_bh(&tp->lock);
5450 if (irq_sync)
5451 tg3_irq_quiesce(tp);
5452 }
5453
5454 static inline void tg3_full_unlock(struct tg3 *tp)
5455 {
5456 spin_unlock_bh(&tp->lock);
5457 }
5458
5459 /* One-shot MSI handler - Chip automatically disables interrupt
5460 * after sending MSI so driver doesn't have to do it.
5461 */
5462 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5463 {
5464 struct tg3_napi *tnapi = dev_id;
5465 struct tg3 *tp = tnapi->tp;
5466
5467 prefetch(tnapi->hw_status);
5468 if (tnapi->rx_rcb)
5469 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5470
5471 if (likely(!tg3_irq_sync(tp)))
5472 napi_schedule(&tnapi->napi);
5473
5474 return IRQ_HANDLED;
5475 }
5476
5477 /* MSI ISR - No need to check for interrupt sharing and no need to
5478 * flush status block and interrupt mailbox. PCI ordering rules
5479 * guarantee that MSI will arrive after the status block.
5480 */
5481 static irqreturn_t tg3_msi(int irq, void *dev_id)
5482 {
5483 struct tg3_napi *tnapi = dev_id;
5484 struct tg3 *tp = tnapi->tp;
5485
5486 prefetch(tnapi->hw_status);
5487 if (tnapi->rx_rcb)
5488 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5489 /*
5490 * Writing any value to intr-mbox-0 clears PCI INTA# and
5491 * chip-internal interrupt pending events.
5492 * Writing non-zero to intr-mbox-0 additional tells the
5493 * NIC to stop sending us irqs, engaging "in-intr-handler"
5494 * event coalescing.
5495 */
5496 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5497 if (likely(!tg3_irq_sync(tp)))
5498 napi_schedule(&tnapi->napi);
5499
5500 return IRQ_RETVAL(1);
5501 }
5502
5503 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5504 {
5505 struct tg3_napi *tnapi = dev_id;
5506 struct tg3 *tp = tnapi->tp;
5507 struct tg3_hw_status *sblk = tnapi->hw_status;
5508 unsigned int handled = 1;
5509
5510 /* In INTx mode, it is possible for the interrupt to arrive at
5511 * the CPU before the status block posted prior to the interrupt.
5512 * Reading the PCI State register will confirm whether the
5513 * interrupt is ours and will flush the status block.
5514 */
5515 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5516 if (tg3_flag(tp, CHIP_RESETTING) ||
5517 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5518 handled = 0;
5519 goto out;
5520 }
5521 }
5522
5523 /*
5524 * Writing any value to intr-mbox-0 clears PCI INTA# and
5525 * chip-internal interrupt pending events.
5526 * Writing non-zero to intr-mbox-0 additional tells the
5527 * NIC to stop sending us irqs, engaging "in-intr-handler"
5528 * event coalescing.
5529 *
5530 * Flush the mailbox to de-assert the IRQ immediately to prevent
5531 * spurious interrupts. The flush impacts performance but
5532 * excessive spurious interrupts can be worse in some cases.
5533 */
5534 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5535 if (tg3_irq_sync(tp))
5536 goto out;
5537 sblk->status &= ~SD_STATUS_UPDATED;
5538 if (likely(tg3_has_work(tnapi))) {
5539 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5540 napi_schedule(&tnapi->napi);
5541 } else {
5542 /* No work, shared interrupt perhaps? re-enable
5543 * interrupts, and flush that PCI write
5544 */
5545 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5546 0x00000000);
5547 }
5548 out:
5549 return IRQ_RETVAL(handled);
5550 }
5551
5552 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5553 {
5554 struct tg3_napi *tnapi = dev_id;
5555 struct tg3 *tp = tnapi->tp;
5556 struct tg3_hw_status *sblk = tnapi->hw_status;
5557 unsigned int handled = 1;
5558
5559 /* In INTx mode, it is possible for the interrupt to arrive at
5560 * the CPU before the status block posted prior to the interrupt.
5561 * Reading the PCI State register will confirm whether the
5562 * interrupt is ours and will flush the status block.
5563 */
5564 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5565 if (tg3_flag(tp, CHIP_RESETTING) ||
5566 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5567 handled = 0;
5568 goto out;
5569 }
5570 }
5571
5572 /*
5573 * writing any value to intr-mbox-0 clears PCI INTA# and
5574 * chip-internal interrupt pending events.
5575 * writing non-zero to intr-mbox-0 additional tells the
5576 * NIC to stop sending us irqs, engaging "in-intr-handler"
5577 * event coalescing.
5578 *
5579 * Flush the mailbox to de-assert the IRQ immediately to prevent
5580 * spurious interrupts. The flush impacts performance but
5581 * excessive spurious interrupts can be worse in some cases.
5582 */
5583 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5584
5585 /*
5586 * In a shared interrupt configuration, sometimes other devices'
5587 * interrupts will scream. We record the current status tag here
5588 * so that the above check can report that the screaming interrupts
5589 * are unhandled. Eventually they will be silenced.
5590 */
5591 tnapi->last_irq_tag = sblk->status_tag;
5592
5593 if (tg3_irq_sync(tp))
5594 goto out;
5595
5596 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5597
5598 napi_schedule(&tnapi->napi);
5599
5600 out:
5601 return IRQ_RETVAL(handled);
5602 }
5603
5604 /* ISR for interrupt test */
5605 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5606 {
5607 struct tg3_napi *tnapi = dev_id;
5608 struct tg3 *tp = tnapi->tp;
5609 struct tg3_hw_status *sblk = tnapi->hw_status;
5610
5611 if ((sblk->status & SD_STATUS_UPDATED) ||
5612 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5613 tg3_disable_ints(tp);
5614 return IRQ_RETVAL(1);
5615 }
5616 return IRQ_RETVAL(0);
5617 }
5618
5619 static int tg3_init_hw(struct tg3 *, int);
5620 static int tg3_halt(struct tg3 *, int, int);
5621
5622 /* Restart hardware after configuration changes, self-test, etc.
5623 * Invoked with tp->lock held.
5624 */
5625 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5626 __releases(tp->lock)
5627 __acquires(tp->lock)
5628 {
5629 int err;
5630
5631 err = tg3_init_hw(tp, reset_phy);
5632 if (err) {
5633 netdev_err(tp->dev,
5634 "Failed to re-initialize device, aborting\n");
5635 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5636 tg3_full_unlock(tp);
5637 del_timer_sync(&tp->timer);
5638 tp->irq_sync = 0;
5639 tg3_napi_enable(tp);
5640 dev_close(tp->dev);
5641 tg3_full_lock(tp, 0);
5642 }
5643 return err;
5644 }
5645
5646 #ifdef CONFIG_NET_POLL_CONTROLLER
5647 static void tg3_poll_controller(struct net_device *dev)
5648 {
5649 int i;
5650 struct tg3 *tp = netdev_priv(dev);
5651
5652 for (i = 0; i < tp->irq_cnt; i++)
5653 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5654 }
5655 #endif
5656
5657 static void tg3_reset_task(struct work_struct *work)
5658 {
5659 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5660 int err;
5661 unsigned int restart_timer;
5662
5663 tg3_full_lock(tp, 0);
5664
5665 if (!netif_running(tp->dev)) {
5666 tg3_full_unlock(tp);
5667 return;
5668 }
5669
5670 tg3_full_unlock(tp);
5671
5672 tg3_phy_stop(tp);
5673
5674 tg3_netif_stop(tp);
5675
5676 tg3_full_lock(tp, 1);
5677
5678 restart_timer = tg3_flag(tp, RESTART_TIMER);
5679 tg3_flag_clear(tp, RESTART_TIMER);
5680
5681 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5682 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5683 tp->write32_rx_mbox = tg3_write_flush_reg32;
5684 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5685 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5686 }
5687
5688 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5689 err = tg3_init_hw(tp, 1);
5690 if (err)
5691 goto out;
5692
5693 tg3_netif_start(tp);
5694
5695 if (restart_timer)
5696 mod_timer(&tp->timer, jiffies + 1);
5697
5698 out:
5699 tg3_full_unlock(tp);
5700
5701 if (!err)
5702 tg3_phy_start(tp);
5703 }
5704
5705 static void tg3_tx_timeout(struct net_device *dev)
5706 {
5707 struct tg3 *tp = netdev_priv(dev);
5708
5709 if (netif_msg_tx_err(tp)) {
5710 netdev_err(dev, "transmit timed out, resetting\n");
5711 tg3_dump_state(tp);
5712 }
5713
5714 schedule_work(&tp->reset_task);
5715 }
5716
5717 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5718 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5719 {
5720 u32 base = (u32) mapping & 0xffffffff;
5721
5722 return (base > 0xffffdcc0) && (base + len + 8 < base);
5723 }
5724
5725 /* Test for DMA addresses > 40-bit */
5726 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5727 int len)
5728 {
5729 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5730 if (tg3_flag(tp, 40BIT_DMA_BUG))
5731 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5732 return 0;
5733 #else
5734 return 0;
5735 #endif
5736 }
5737
5738 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5739
5740 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5741 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5742 struct sk_buff *skb, u32 last_plus_one,
5743 u32 *start, u32 base_flags, u32 mss)
5744 {
5745 struct tg3 *tp = tnapi->tp;
5746 struct sk_buff *new_skb;
5747 dma_addr_t new_addr = 0;
5748 u32 entry = *start;
5749 int i, ret = 0;
5750
5751 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5752 new_skb = skb_copy(skb, GFP_ATOMIC);
5753 else {
5754 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5755
5756 new_skb = skb_copy_expand(skb,
5757 skb_headroom(skb) + more_headroom,
5758 skb_tailroom(skb), GFP_ATOMIC);
5759 }
5760
5761 if (!new_skb) {
5762 ret = -1;
5763 } else {
5764 /* New SKB is guaranteed to be linear. */
5765 entry = *start;
5766 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5767 PCI_DMA_TODEVICE);
5768 /* Make sure the mapping succeeded */
5769 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5770 ret = -1;
5771 dev_kfree_skb(new_skb);
5772 new_skb = NULL;
5773
5774 /* Make sure new skb does not cross any 4G boundaries.
5775 * Drop the packet if it does.
5776 */
5777 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5778 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5779 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5780 PCI_DMA_TODEVICE);
5781 ret = -1;
5782 dev_kfree_skb(new_skb);
5783 new_skb = NULL;
5784 } else {
5785 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5786 base_flags, 1 | (mss << 1));
5787 *start = NEXT_TX(entry);
5788 }
5789 }
5790
5791 /* Now clean up the sw ring entries. */
5792 i = 0;
5793 while (entry != last_plus_one) {
5794 int len;
5795
5796 if (i == 0)
5797 len = skb_headlen(skb);
5798 else
5799 len = skb_shinfo(skb)->frags[i-1].size;
5800
5801 pci_unmap_single(tp->pdev,
5802 dma_unmap_addr(&tnapi->tx_buffers[entry],
5803 mapping),
5804 len, PCI_DMA_TODEVICE);
5805 if (i == 0) {
5806 tnapi->tx_buffers[entry].skb = new_skb;
5807 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5808 new_addr);
5809 } else {
5810 tnapi->tx_buffers[entry].skb = NULL;
5811 }
5812 entry = NEXT_TX(entry);
5813 i++;
5814 }
5815
5816 dev_kfree_skb(skb);
5817
5818 return ret;
5819 }
5820
5821 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5822 dma_addr_t mapping, int len, u32 flags,
5823 u32 mss_and_is_end)
5824 {
5825 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5826 int is_end = (mss_and_is_end & 0x1);
5827 u32 mss = (mss_and_is_end >> 1);
5828 u32 vlan_tag = 0;
5829
5830 if (is_end)
5831 flags |= TXD_FLAG_END;
5832 if (flags & TXD_FLAG_VLAN) {
5833 vlan_tag = flags >> 16;
5834 flags &= 0xffff;
5835 }
5836 vlan_tag |= (mss << TXD_MSS_SHIFT);
5837
5838 txd->addr_hi = ((u64) mapping >> 32);
5839 txd->addr_lo = ((u64) mapping & 0xffffffff);
5840 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5841 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5842 }
5843
5844 /* hard_start_xmit for devices that don't have any bugs and
5845 * support TG3_FLAG_HW_TSO_2 and TG3_FLAG_HW_TSO_3 only.
5846 */
5847 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5848 struct net_device *dev)
5849 {
5850 struct tg3 *tp = netdev_priv(dev);
5851 u32 len, entry, base_flags, mss;
5852 dma_addr_t mapping;
5853 struct tg3_napi *tnapi;
5854 struct netdev_queue *txq;
5855 unsigned int i, last;
5856
5857 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5858 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5859 if (tg3_flag(tp, ENABLE_TSS))
5860 tnapi++;
5861
5862 /* We are running in BH disabled context with netif_tx_lock
5863 * and TX reclaim runs via tp->napi.poll inside of a software
5864 * interrupt. Furthermore, IRQ processing runs lockless so we have
5865 * no IRQ context deadlocks to worry about either. Rejoice!
5866 */
5867 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5868 if (!netif_tx_queue_stopped(txq)) {
5869 netif_tx_stop_queue(txq);
5870
5871 /* This is a hard error, log it. */
5872 netdev_err(dev,
5873 "BUG! Tx Ring full when queue awake!\n");
5874 }
5875 return NETDEV_TX_BUSY;
5876 }
5877
5878 entry = tnapi->tx_prod;
5879 base_flags = 0;
5880 mss = skb_shinfo(skb)->gso_size;
5881 if (mss) {
5882 int tcp_opt_len, ip_tcp_len;
5883 u32 hdrlen;
5884
5885 if (skb_header_cloned(skb) &&
5886 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5887 dev_kfree_skb(skb);
5888 goto out_unlock;
5889 }
5890
5891 if (skb_is_gso_v6(skb)) {
5892 hdrlen = skb_headlen(skb) - ETH_HLEN;
5893 } else {
5894 struct iphdr *iph = ip_hdr(skb);
5895
5896 tcp_opt_len = tcp_optlen(skb);
5897 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5898
5899 iph->check = 0;
5900 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5901 hdrlen = ip_tcp_len + tcp_opt_len;
5902 }
5903
5904 if (tg3_flag(tp, HW_TSO_3)) {
5905 mss |= (hdrlen & 0xc) << 12;
5906 if (hdrlen & 0x10)
5907 base_flags |= 0x00000010;
5908 base_flags |= (hdrlen & 0x3e0) << 5;
5909 } else
5910 mss |= hdrlen << 9;
5911
5912 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5913 TXD_FLAG_CPU_POST_DMA);
5914
5915 tcp_hdr(skb)->check = 0;
5916
5917 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5918 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5919 }
5920
5921 if (vlan_tx_tag_present(skb))
5922 base_flags |= (TXD_FLAG_VLAN |
5923 (vlan_tx_tag_get(skb) << 16));
5924
5925 len = skb_headlen(skb);
5926
5927 /* Queue skb data, a.k.a. the main skb fragment. */
5928 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5929 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5930 dev_kfree_skb(skb);
5931 goto out_unlock;
5932 }
5933
5934 tnapi->tx_buffers[entry].skb = skb;
5935 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5936
5937 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5938 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5939 base_flags |= TXD_FLAG_JMB_PKT;
5940
5941 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5942 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5943
5944 entry = NEXT_TX(entry);
5945
5946 /* Now loop through additional data fragments, and queue them. */
5947 if (skb_shinfo(skb)->nr_frags > 0) {
5948 last = skb_shinfo(skb)->nr_frags - 1;
5949 for (i = 0; i <= last; i++) {
5950 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5951
5952 len = frag->size;
5953 mapping = pci_map_page(tp->pdev,
5954 frag->page,
5955 frag->page_offset,
5956 len, PCI_DMA_TODEVICE);
5957 if (pci_dma_mapping_error(tp->pdev, mapping))
5958 goto dma_error;
5959
5960 tnapi->tx_buffers[entry].skb = NULL;
5961 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5962 mapping);
5963
5964 tg3_set_txd(tnapi, entry, mapping, len,
5965 base_flags, (i == last) | (mss << 1));
5966
5967 entry = NEXT_TX(entry);
5968 }
5969 }
5970
5971 /* Packets are ready, update Tx producer idx local and on card. */
5972 tw32_tx_mbox(tnapi->prodmbox, entry);
5973
5974 tnapi->tx_prod = entry;
5975 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5976 netif_tx_stop_queue(txq);
5977
5978 /* netif_tx_stop_queue() must be done before checking
5979 * checking tx index in tg3_tx_avail() below, because in
5980 * tg3_tx(), we update tx index before checking for
5981 * netif_tx_queue_stopped().
5982 */
5983 smp_mb();
5984 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5985 netif_tx_wake_queue(txq);
5986 }
5987
5988 out_unlock:
5989 mmiowb();
5990
5991 return NETDEV_TX_OK;
5992
5993 dma_error:
5994 last = i;
5995 entry = tnapi->tx_prod;
5996 tnapi->tx_buffers[entry].skb = NULL;
5997 pci_unmap_single(tp->pdev,
5998 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5999 skb_headlen(skb),
6000 PCI_DMA_TODEVICE);
6001 for (i = 0; i <= last; i++) {
6002 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6003 entry = NEXT_TX(entry);
6004
6005 pci_unmap_page(tp->pdev,
6006 dma_unmap_addr(&tnapi->tx_buffers[entry],
6007 mapping),
6008 frag->size, PCI_DMA_TODEVICE);
6009 }
6010
6011 dev_kfree_skb(skb);
6012 return NETDEV_TX_OK;
6013 }
6014
6015 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
6016 struct net_device *);
6017
6018 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6019 * TSO header is greater than 80 bytes.
6020 */
6021 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6022 {
6023 struct sk_buff *segs, *nskb;
6024 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6025
6026 /* Estimate the number of fragments in the worst case */
6027 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6028 netif_stop_queue(tp->dev);
6029
6030 /* netif_tx_stop_queue() must be done before checking
6031 * checking tx index in tg3_tx_avail() below, because in
6032 * tg3_tx(), we update tx index before checking for
6033 * netif_tx_queue_stopped().
6034 */
6035 smp_mb();
6036 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6037 return NETDEV_TX_BUSY;
6038
6039 netif_wake_queue(tp->dev);
6040 }
6041
6042 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6043 if (IS_ERR(segs))
6044 goto tg3_tso_bug_end;
6045
6046 do {
6047 nskb = segs;
6048 segs = segs->next;
6049 nskb->next = NULL;
6050 tg3_start_xmit_dma_bug(nskb, tp->dev);
6051 } while (segs);
6052
6053 tg3_tso_bug_end:
6054 dev_kfree_skb(skb);
6055
6056 return NETDEV_TX_OK;
6057 }
6058
6059 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6060 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6061 */
6062 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
6063 struct net_device *dev)
6064 {
6065 struct tg3 *tp = netdev_priv(dev);
6066 u32 len, entry, base_flags, mss;
6067 int would_hit_hwbug;
6068 dma_addr_t mapping;
6069 struct tg3_napi *tnapi;
6070 struct netdev_queue *txq;
6071 unsigned int i, last;
6072
6073 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6074 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6075 if (tg3_flag(tp, ENABLE_TSS))
6076 tnapi++;
6077
6078 /* We are running in BH disabled context with netif_tx_lock
6079 * and TX reclaim runs via tp->napi.poll inside of a software
6080 * interrupt. Furthermore, IRQ processing runs lockless so we have
6081 * no IRQ context deadlocks to worry about either. Rejoice!
6082 */
6083 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6084 if (!netif_tx_queue_stopped(txq)) {
6085 netif_tx_stop_queue(txq);
6086
6087 /* This is a hard error, log it. */
6088 netdev_err(dev,
6089 "BUG! Tx Ring full when queue awake!\n");
6090 }
6091 return NETDEV_TX_BUSY;
6092 }
6093
6094 entry = tnapi->tx_prod;
6095 base_flags = 0;
6096 if (skb->ip_summed == CHECKSUM_PARTIAL)
6097 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6098
6099 mss = skb_shinfo(skb)->gso_size;
6100 if (mss) {
6101 struct iphdr *iph;
6102 u32 tcp_opt_len, hdr_len;
6103
6104 if (skb_header_cloned(skb) &&
6105 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6106 dev_kfree_skb(skb);
6107 goto out_unlock;
6108 }
6109
6110 iph = ip_hdr(skb);
6111 tcp_opt_len = tcp_optlen(skb);
6112
6113 if (skb_is_gso_v6(skb)) {
6114 hdr_len = skb_headlen(skb) - ETH_HLEN;
6115 } else {
6116 u32 ip_tcp_len;
6117
6118 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6119 hdr_len = ip_tcp_len + tcp_opt_len;
6120
6121 iph->check = 0;
6122 iph->tot_len = htons(mss + hdr_len);
6123 }
6124
6125 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6126 tg3_flag(tp, TSO_BUG))
6127 return tg3_tso_bug(tp, skb);
6128
6129 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6130 TXD_FLAG_CPU_POST_DMA);
6131
6132 if (tg3_flag(tp, HW_TSO_1) ||
6133 tg3_flag(tp, HW_TSO_2) ||
6134 tg3_flag(tp, HW_TSO_3)) {
6135 tcp_hdr(skb)->check = 0;
6136 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6137 } else
6138 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6139 iph->daddr, 0,
6140 IPPROTO_TCP,
6141 0);
6142
6143 if (tg3_flag(tp, HW_TSO_3)) {
6144 mss |= (hdr_len & 0xc) << 12;
6145 if (hdr_len & 0x10)
6146 base_flags |= 0x00000010;
6147 base_flags |= (hdr_len & 0x3e0) << 5;
6148 } else if (tg3_flag(tp, HW_TSO_2))
6149 mss |= hdr_len << 9;
6150 else if (tg3_flag(tp, HW_TSO_1) ||
6151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6152 if (tcp_opt_len || iph->ihl > 5) {
6153 int tsflags;
6154
6155 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6156 mss |= (tsflags << 11);
6157 }
6158 } else {
6159 if (tcp_opt_len || iph->ihl > 5) {
6160 int tsflags;
6161
6162 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6163 base_flags |= tsflags << 12;
6164 }
6165 }
6166 }
6167
6168 if (vlan_tx_tag_present(skb))
6169 base_flags |= (TXD_FLAG_VLAN |
6170 (vlan_tx_tag_get(skb) << 16));
6171
6172 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6173 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6174 base_flags |= TXD_FLAG_JMB_PKT;
6175
6176 len = skb_headlen(skb);
6177
6178 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6179 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6180 dev_kfree_skb(skb);
6181 goto out_unlock;
6182 }
6183
6184 tnapi->tx_buffers[entry].skb = skb;
6185 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6186
6187 would_hit_hwbug = 0;
6188
6189 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6190 would_hit_hwbug = 1;
6191
6192 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6193 tg3_4g_overflow_test(mapping, len))
6194 would_hit_hwbug = 1;
6195
6196 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6197 tg3_40bit_overflow_test(tp, mapping, len))
6198 would_hit_hwbug = 1;
6199
6200 if (tg3_flag(tp, 5701_DMA_BUG))
6201 would_hit_hwbug = 1;
6202
6203 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6204 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6205
6206 entry = NEXT_TX(entry);
6207
6208 /* Now loop through additional data fragments, and queue them. */
6209 if (skb_shinfo(skb)->nr_frags > 0) {
6210 last = skb_shinfo(skb)->nr_frags - 1;
6211 for (i = 0; i <= last; i++) {
6212 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6213
6214 len = frag->size;
6215 mapping = pci_map_page(tp->pdev,
6216 frag->page,
6217 frag->page_offset,
6218 len, PCI_DMA_TODEVICE);
6219
6220 tnapi->tx_buffers[entry].skb = NULL;
6221 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6222 mapping);
6223 if (pci_dma_mapping_error(tp->pdev, mapping))
6224 goto dma_error;
6225
6226 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6227 len <= 8)
6228 would_hit_hwbug = 1;
6229
6230 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6231 tg3_4g_overflow_test(mapping, len))
6232 would_hit_hwbug = 1;
6233
6234 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6235 tg3_40bit_overflow_test(tp, mapping, len))
6236 would_hit_hwbug = 1;
6237
6238 if (tg3_flag(tp, HW_TSO_1) ||
6239 tg3_flag(tp, HW_TSO_2) ||
6240 tg3_flag(tp, HW_TSO_3))
6241 tg3_set_txd(tnapi, entry, mapping, len,
6242 base_flags, (i == last)|(mss << 1));
6243 else
6244 tg3_set_txd(tnapi, entry, mapping, len,
6245 base_flags, (i == last));
6246
6247 entry = NEXT_TX(entry);
6248 }
6249 }
6250
6251 if (would_hit_hwbug) {
6252 u32 last_plus_one = entry;
6253 u32 start;
6254
6255 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6256 start &= (TG3_TX_RING_SIZE - 1);
6257
6258 /* If the workaround fails due to memory/mapping
6259 * failure, silently drop this packet.
6260 */
6261 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6262 &start, base_flags, mss))
6263 goto out_unlock;
6264
6265 entry = start;
6266 }
6267
6268 /* Packets are ready, update Tx producer idx local and on card. */
6269 tw32_tx_mbox(tnapi->prodmbox, entry);
6270
6271 tnapi->tx_prod = entry;
6272 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6273 netif_tx_stop_queue(txq);
6274
6275 /* netif_tx_stop_queue() must be done before checking
6276 * checking tx index in tg3_tx_avail() below, because in
6277 * tg3_tx(), we update tx index before checking for
6278 * netif_tx_queue_stopped().
6279 */
6280 smp_mb();
6281 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6282 netif_tx_wake_queue(txq);
6283 }
6284
6285 out_unlock:
6286 mmiowb();
6287
6288 return NETDEV_TX_OK;
6289
6290 dma_error:
6291 last = i;
6292 entry = tnapi->tx_prod;
6293 tnapi->tx_buffers[entry].skb = NULL;
6294 pci_unmap_single(tp->pdev,
6295 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6296 skb_headlen(skb),
6297 PCI_DMA_TODEVICE);
6298 for (i = 0; i <= last; i++) {
6299 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6300 entry = NEXT_TX(entry);
6301
6302 pci_unmap_page(tp->pdev,
6303 dma_unmap_addr(&tnapi->tx_buffers[entry],
6304 mapping),
6305 frag->size, PCI_DMA_TODEVICE);
6306 }
6307
6308 dev_kfree_skb(skb);
6309 return NETDEV_TX_OK;
6310 }
6311
6312 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6313 {
6314 struct tg3 *tp = netdev_priv(dev);
6315
6316 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6317 features &= ~NETIF_F_ALL_TSO;
6318
6319 return features;
6320 }
6321
6322 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6323 int new_mtu)
6324 {
6325 dev->mtu = new_mtu;
6326
6327 if (new_mtu > ETH_DATA_LEN) {
6328 if (tg3_flag(tp, 5780_CLASS)) {
6329 netdev_update_features(dev);
6330 tg3_flag_clear(tp, TSO_CAPABLE);
6331 } else {
6332 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6333 }
6334 } else {
6335 if (tg3_flag(tp, 5780_CLASS)) {
6336 tg3_flag_set(tp, TSO_CAPABLE);
6337 netdev_update_features(dev);
6338 }
6339 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6340 }
6341 }
6342
6343 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6344 {
6345 struct tg3 *tp = netdev_priv(dev);
6346 int err;
6347
6348 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6349 return -EINVAL;
6350
6351 if (!netif_running(dev)) {
6352 /* We'll just catch it later when the
6353 * device is up'd.
6354 */
6355 tg3_set_mtu(dev, tp, new_mtu);
6356 return 0;
6357 }
6358
6359 tg3_phy_stop(tp);
6360
6361 tg3_netif_stop(tp);
6362
6363 tg3_full_lock(tp, 1);
6364
6365 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6366
6367 tg3_set_mtu(dev, tp, new_mtu);
6368
6369 err = tg3_restart_hw(tp, 0);
6370
6371 if (!err)
6372 tg3_netif_start(tp);
6373
6374 tg3_full_unlock(tp);
6375
6376 if (!err)
6377 tg3_phy_start(tp);
6378
6379 return err;
6380 }
6381
6382 static void tg3_rx_prodring_free(struct tg3 *tp,
6383 struct tg3_rx_prodring_set *tpr)
6384 {
6385 int i;
6386
6387 if (tpr != &tp->napi[0].prodring) {
6388 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6389 i = (i + 1) & tp->rx_std_ring_mask)
6390 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6391 tp->rx_pkt_map_sz);
6392
6393 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6394 for (i = tpr->rx_jmb_cons_idx;
6395 i != tpr->rx_jmb_prod_idx;
6396 i = (i + 1) & tp->rx_jmb_ring_mask) {
6397 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6398 TG3_RX_JMB_MAP_SZ);
6399 }
6400 }
6401
6402 return;
6403 }
6404
6405 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6406 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6407 tp->rx_pkt_map_sz);
6408
6409 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6410 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6411 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6412 TG3_RX_JMB_MAP_SZ);
6413 }
6414 }
6415
6416 /* Initialize rx rings for packet processing.
6417 *
6418 * The chip has been shut down and the driver detached from
6419 * the networking, so no interrupts or new tx packets will
6420 * end up in the driver. tp->{tx,}lock are held and thus
6421 * we may not sleep.
6422 */
6423 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6424 struct tg3_rx_prodring_set *tpr)
6425 {
6426 u32 i, rx_pkt_dma_sz;
6427
6428 tpr->rx_std_cons_idx = 0;
6429 tpr->rx_std_prod_idx = 0;
6430 tpr->rx_jmb_cons_idx = 0;
6431 tpr->rx_jmb_prod_idx = 0;
6432
6433 if (tpr != &tp->napi[0].prodring) {
6434 memset(&tpr->rx_std_buffers[0], 0,
6435 TG3_RX_STD_BUFF_RING_SIZE(tp));
6436 if (tpr->rx_jmb_buffers)
6437 memset(&tpr->rx_jmb_buffers[0], 0,
6438 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6439 goto done;
6440 }
6441
6442 /* Zero out all descriptors. */
6443 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6444
6445 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6446 if (tg3_flag(tp, 5780_CLASS) &&
6447 tp->dev->mtu > ETH_DATA_LEN)
6448 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6449 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6450
6451 /* Initialize invariants of the rings, we only set this
6452 * stuff once. This works because the card does not
6453 * write into the rx buffer posting rings.
6454 */
6455 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6456 struct tg3_rx_buffer_desc *rxd;
6457
6458 rxd = &tpr->rx_std[i];
6459 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6460 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6461 rxd->opaque = (RXD_OPAQUE_RING_STD |
6462 (i << RXD_OPAQUE_INDEX_SHIFT));
6463 }
6464
6465 /* Now allocate fresh SKBs for each rx ring. */
6466 for (i = 0; i < tp->rx_pending; i++) {
6467 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6468 netdev_warn(tp->dev,
6469 "Using a smaller RX standard ring. Only "
6470 "%d out of %d buffers were allocated "
6471 "successfully\n", i, tp->rx_pending);
6472 if (i == 0)
6473 goto initfail;
6474 tp->rx_pending = i;
6475 break;
6476 }
6477 }
6478
6479 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6480 goto done;
6481
6482 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6483
6484 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6485 goto done;
6486
6487 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6488 struct tg3_rx_buffer_desc *rxd;
6489
6490 rxd = &tpr->rx_jmb[i].std;
6491 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6492 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6493 RXD_FLAG_JUMBO;
6494 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6495 (i << RXD_OPAQUE_INDEX_SHIFT));
6496 }
6497
6498 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6499 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6500 netdev_warn(tp->dev,
6501 "Using a smaller RX jumbo ring. Only %d "
6502 "out of %d buffers were allocated "
6503 "successfully\n", i, tp->rx_jumbo_pending);
6504 if (i == 0)
6505 goto initfail;
6506 tp->rx_jumbo_pending = i;
6507 break;
6508 }
6509 }
6510
6511 done:
6512 return 0;
6513
6514 initfail:
6515 tg3_rx_prodring_free(tp, tpr);
6516 return -ENOMEM;
6517 }
6518
6519 static void tg3_rx_prodring_fini(struct tg3 *tp,
6520 struct tg3_rx_prodring_set *tpr)
6521 {
6522 kfree(tpr->rx_std_buffers);
6523 tpr->rx_std_buffers = NULL;
6524 kfree(tpr->rx_jmb_buffers);
6525 tpr->rx_jmb_buffers = NULL;
6526 if (tpr->rx_std) {
6527 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6528 tpr->rx_std, tpr->rx_std_mapping);
6529 tpr->rx_std = NULL;
6530 }
6531 if (tpr->rx_jmb) {
6532 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6533 tpr->rx_jmb, tpr->rx_jmb_mapping);
6534 tpr->rx_jmb = NULL;
6535 }
6536 }
6537
6538 static int tg3_rx_prodring_init(struct tg3 *tp,
6539 struct tg3_rx_prodring_set *tpr)
6540 {
6541 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6542 GFP_KERNEL);
6543 if (!tpr->rx_std_buffers)
6544 return -ENOMEM;
6545
6546 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6547 TG3_RX_STD_RING_BYTES(tp),
6548 &tpr->rx_std_mapping,
6549 GFP_KERNEL);
6550 if (!tpr->rx_std)
6551 goto err_out;
6552
6553 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6554 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6555 GFP_KERNEL);
6556 if (!tpr->rx_jmb_buffers)
6557 goto err_out;
6558
6559 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6560 TG3_RX_JMB_RING_BYTES(tp),
6561 &tpr->rx_jmb_mapping,
6562 GFP_KERNEL);
6563 if (!tpr->rx_jmb)
6564 goto err_out;
6565 }
6566
6567 return 0;
6568
6569 err_out:
6570 tg3_rx_prodring_fini(tp, tpr);
6571 return -ENOMEM;
6572 }
6573
6574 /* Free up pending packets in all rx/tx rings.
6575 *
6576 * The chip has been shut down and the driver detached from
6577 * the networking, so no interrupts or new tx packets will
6578 * end up in the driver. tp->{tx,}lock is not held and we are not
6579 * in an interrupt context and thus may sleep.
6580 */
6581 static void tg3_free_rings(struct tg3 *tp)
6582 {
6583 int i, j;
6584
6585 for (j = 0; j < tp->irq_cnt; j++) {
6586 struct tg3_napi *tnapi = &tp->napi[j];
6587
6588 tg3_rx_prodring_free(tp, &tnapi->prodring);
6589
6590 if (!tnapi->tx_buffers)
6591 continue;
6592
6593 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6594 struct ring_info *txp;
6595 struct sk_buff *skb;
6596 unsigned int k;
6597
6598 txp = &tnapi->tx_buffers[i];
6599 skb = txp->skb;
6600
6601 if (skb == NULL) {
6602 i++;
6603 continue;
6604 }
6605
6606 pci_unmap_single(tp->pdev,
6607 dma_unmap_addr(txp, mapping),
6608 skb_headlen(skb),
6609 PCI_DMA_TODEVICE);
6610 txp->skb = NULL;
6611
6612 i++;
6613
6614 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6615 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6616 pci_unmap_page(tp->pdev,
6617 dma_unmap_addr(txp, mapping),
6618 skb_shinfo(skb)->frags[k].size,
6619 PCI_DMA_TODEVICE);
6620 i++;
6621 }
6622
6623 dev_kfree_skb_any(skb);
6624 }
6625 }
6626 }
6627
6628 /* Initialize tx/rx rings for packet processing.
6629 *
6630 * The chip has been shut down and the driver detached from
6631 * the networking, so no interrupts or new tx packets will
6632 * end up in the driver. tp->{tx,}lock are held and thus
6633 * we may not sleep.
6634 */
6635 static int tg3_init_rings(struct tg3 *tp)
6636 {
6637 int i;
6638
6639 /* Free up all the SKBs. */
6640 tg3_free_rings(tp);
6641
6642 for (i = 0; i < tp->irq_cnt; i++) {
6643 struct tg3_napi *tnapi = &tp->napi[i];
6644
6645 tnapi->last_tag = 0;
6646 tnapi->last_irq_tag = 0;
6647 tnapi->hw_status->status = 0;
6648 tnapi->hw_status->status_tag = 0;
6649 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6650
6651 tnapi->tx_prod = 0;
6652 tnapi->tx_cons = 0;
6653 if (tnapi->tx_ring)
6654 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6655
6656 tnapi->rx_rcb_ptr = 0;
6657 if (tnapi->rx_rcb)
6658 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6659
6660 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6661 tg3_free_rings(tp);
6662 return -ENOMEM;
6663 }
6664 }
6665
6666 return 0;
6667 }
6668
6669 /*
6670 * Must not be invoked with interrupt sources disabled and
6671 * the hardware shutdown down.
6672 */
6673 static void tg3_free_consistent(struct tg3 *tp)
6674 {
6675 int i;
6676
6677 for (i = 0; i < tp->irq_cnt; i++) {
6678 struct tg3_napi *tnapi = &tp->napi[i];
6679
6680 if (tnapi->tx_ring) {
6681 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6682 tnapi->tx_ring, tnapi->tx_desc_mapping);
6683 tnapi->tx_ring = NULL;
6684 }
6685
6686 kfree(tnapi->tx_buffers);
6687 tnapi->tx_buffers = NULL;
6688
6689 if (tnapi->rx_rcb) {
6690 dma_free_coherent(&tp->pdev->dev,
6691 TG3_RX_RCB_RING_BYTES(tp),
6692 tnapi->rx_rcb,
6693 tnapi->rx_rcb_mapping);
6694 tnapi->rx_rcb = NULL;
6695 }
6696
6697 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6698
6699 if (tnapi->hw_status) {
6700 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6701 tnapi->hw_status,
6702 tnapi->status_mapping);
6703 tnapi->hw_status = NULL;
6704 }
6705 }
6706
6707 if (tp->hw_stats) {
6708 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6709 tp->hw_stats, tp->stats_mapping);
6710 tp->hw_stats = NULL;
6711 }
6712 }
6713
6714 /*
6715 * Must not be invoked with interrupt sources disabled and
6716 * the hardware shutdown down. Can sleep.
6717 */
6718 static int tg3_alloc_consistent(struct tg3 *tp)
6719 {
6720 int i;
6721
6722 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6723 sizeof(struct tg3_hw_stats),
6724 &tp->stats_mapping,
6725 GFP_KERNEL);
6726 if (!tp->hw_stats)
6727 goto err_out;
6728
6729 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6730
6731 for (i = 0; i < tp->irq_cnt; i++) {
6732 struct tg3_napi *tnapi = &tp->napi[i];
6733 struct tg3_hw_status *sblk;
6734
6735 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6736 TG3_HW_STATUS_SIZE,
6737 &tnapi->status_mapping,
6738 GFP_KERNEL);
6739 if (!tnapi->hw_status)
6740 goto err_out;
6741
6742 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6743 sblk = tnapi->hw_status;
6744
6745 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6746 goto err_out;
6747
6748 /* If multivector TSS is enabled, vector 0 does not handle
6749 * tx interrupts. Don't allocate any resources for it.
6750 */
6751 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6752 (i && tg3_flag(tp, ENABLE_TSS))) {
6753 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6754 TG3_TX_RING_SIZE,
6755 GFP_KERNEL);
6756 if (!tnapi->tx_buffers)
6757 goto err_out;
6758
6759 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6760 TG3_TX_RING_BYTES,
6761 &tnapi->tx_desc_mapping,
6762 GFP_KERNEL);
6763 if (!tnapi->tx_ring)
6764 goto err_out;
6765 }
6766
6767 /*
6768 * When RSS is enabled, the status block format changes
6769 * slightly. The "rx_jumbo_consumer", "reserved",
6770 * and "rx_mini_consumer" members get mapped to the
6771 * other three rx return ring producer indexes.
6772 */
6773 switch (i) {
6774 default:
6775 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6776 break;
6777 case 2:
6778 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6779 break;
6780 case 3:
6781 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6782 break;
6783 case 4:
6784 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6785 break;
6786 }
6787
6788 /*
6789 * If multivector RSS is enabled, vector 0 does not handle
6790 * rx or tx interrupts. Don't allocate any resources for it.
6791 */
6792 if (!i && tg3_flag(tp, ENABLE_RSS))
6793 continue;
6794
6795 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6796 TG3_RX_RCB_RING_BYTES(tp),
6797 &tnapi->rx_rcb_mapping,
6798 GFP_KERNEL);
6799 if (!tnapi->rx_rcb)
6800 goto err_out;
6801
6802 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6803 }
6804
6805 return 0;
6806
6807 err_out:
6808 tg3_free_consistent(tp);
6809 return -ENOMEM;
6810 }
6811
6812 #define MAX_WAIT_CNT 1000
6813
6814 /* To stop a block, clear the enable bit and poll till it
6815 * clears. tp->lock is held.
6816 */
6817 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6818 {
6819 unsigned int i;
6820 u32 val;
6821
6822 if (tg3_flag(tp, 5705_PLUS)) {
6823 switch (ofs) {
6824 case RCVLSC_MODE:
6825 case DMAC_MODE:
6826 case MBFREE_MODE:
6827 case BUFMGR_MODE:
6828 case MEMARB_MODE:
6829 /* We can't enable/disable these bits of the
6830 * 5705/5750, just say success.
6831 */
6832 return 0;
6833
6834 default:
6835 break;
6836 }
6837 }
6838
6839 val = tr32(ofs);
6840 val &= ~enable_bit;
6841 tw32_f(ofs, val);
6842
6843 for (i = 0; i < MAX_WAIT_CNT; i++) {
6844 udelay(100);
6845 val = tr32(ofs);
6846 if ((val & enable_bit) == 0)
6847 break;
6848 }
6849
6850 if (i == MAX_WAIT_CNT && !silent) {
6851 dev_err(&tp->pdev->dev,
6852 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6853 ofs, enable_bit);
6854 return -ENODEV;
6855 }
6856
6857 return 0;
6858 }
6859
6860 /* tp->lock is held. */
6861 static int tg3_abort_hw(struct tg3 *tp, int silent)
6862 {
6863 int i, err;
6864
6865 tg3_disable_ints(tp);
6866
6867 tp->rx_mode &= ~RX_MODE_ENABLE;
6868 tw32_f(MAC_RX_MODE, tp->rx_mode);
6869 udelay(10);
6870
6871 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6872 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6873 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6874 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6875 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6876 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6877
6878 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6879 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6880 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6881 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6882 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6883 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6884 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6885
6886 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6887 tw32_f(MAC_MODE, tp->mac_mode);
6888 udelay(40);
6889
6890 tp->tx_mode &= ~TX_MODE_ENABLE;
6891 tw32_f(MAC_TX_MODE, tp->tx_mode);
6892
6893 for (i = 0; i < MAX_WAIT_CNT; i++) {
6894 udelay(100);
6895 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6896 break;
6897 }
6898 if (i >= MAX_WAIT_CNT) {
6899 dev_err(&tp->pdev->dev,
6900 "%s timed out, TX_MODE_ENABLE will not clear "
6901 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6902 err |= -ENODEV;
6903 }
6904
6905 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6906 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6907 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6908
6909 tw32(FTQ_RESET, 0xffffffff);
6910 tw32(FTQ_RESET, 0x00000000);
6911
6912 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6913 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6914
6915 for (i = 0; i < tp->irq_cnt; i++) {
6916 struct tg3_napi *tnapi = &tp->napi[i];
6917 if (tnapi->hw_status)
6918 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6919 }
6920 if (tp->hw_stats)
6921 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6922
6923 return err;
6924 }
6925
6926 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6927 {
6928 int i;
6929 u32 apedata;
6930
6931 /* NCSI does not support APE events */
6932 if (tg3_flag(tp, APE_HAS_NCSI))
6933 return;
6934
6935 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6936 if (apedata != APE_SEG_SIG_MAGIC)
6937 return;
6938
6939 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6940 if (!(apedata & APE_FW_STATUS_READY))
6941 return;
6942
6943 /* Wait for up to 1 millisecond for APE to service previous event. */
6944 for (i = 0; i < 10; i++) {
6945 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6946 return;
6947
6948 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6949
6950 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6951 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6952 event | APE_EVENT_STATUS_EVENT_PENDING);
6953
6954 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6955
6956 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6957 break;
6958
6959 udelay(100);
6960 }
6961
6962 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6963 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6964 }
6965
6966 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6967 {
6968 u32 event;
6969 u32 apedata;
6970
6971 if (!tg3_flag(tp, ENABLE_APE))
6972 return;
6973
6974 switch (kind) {
6975 case RESET_KIND_INIT:
6976 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6977 APE_HOST_SEG_SIG_MAGIC);
6978 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6979 APE_HOST_SEG_LEN_MAGIC);
6980 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6981 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6982 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6983 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6984 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6985 APE_HOST_BEHAV_NO_PHYLOCK);
6986 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6987 TG3_APE_HOST_DRVR_STATE_START);
6988
6989 event = APE_EVENT_STATUS_STATE_START;
6990 break;
6991 case RESET_KIND_SHUTDOWN:
6992 /* With the interface we are currently using,
6993 * APE does not track driver state. Wiping
6994 * out the HOST SEGMENT SIGNATURE forces
6995 * the APE to assume OS absent status.
6996 */
6997 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6998
6999 if (device_may_wakeup(&tp->pdev->dev) &&
7000 tg3_flag(tp, WOL_ENABLE)) {
7001 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7002 TG3_APE_HOST_WOL_SPEED_AUTO);
7003 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7004 } else
7005 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7006
7007 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7008
7009 event = APE_EVENT_STATUS_STATE_UNLOAD;
7010 break;
7011 case RESET_KIND_SUSPEND:
7012 event = APE_EVENT_STATUS_STATE_SUSPEND;
7013 break;
7014 default:
7015 return;
7016 }
7017
7018 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7019
7020 tg3_ape_send_event(tp, event);
7021 }
7022
7023 /* tp->lock is held. */
7024 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7025 {
7026 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7027 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7028
7029 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7030 switch (kind) {
7031 case RESET_KIND_INIT:
7032 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7033 DRV_STATE_START);
7034 break;
7035
7036 case RESET_KIND_SHUTDOWN:
7037 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7038 DRV_STATE_UNLOAD);
7039 break;
7040
7041 case RESET_KIND_SUSPEND:
7042 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7043 DRV_STATE_SUSPEND);
7044 break;
7045
7046 default:
7047 break;
7048 }
7049 }
7050
7051 if (kind == RESET_KIND_INIT ||
7052 kind == RESET_KIND_SUSPEND)
7053 tg3_ape_driver_state_change(tp, kind);
7054 }
7055
7056 /* tp->lock is held. */
7057 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7058 {
7059 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7060 switch (kind) {
7061 case RESET_KIND_INIT:
7062 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7063 DRV_STATE_START_DONE);
7064 break;
7065
7066 case RESET_KIND_SHUTDOWN:
7067 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7068 DRV_STATE_UNLOAD_DONE);
7069 break;
7070
7071 default:
7072 break;
7073 }
7074 }
7075
7076 if (kind == RESET_KIND_SHUTDOWN)
7077 tg3_ape_driver_state_change(tp, kind);
7078 }
7079
7080 /* tp->lock is held. */
7081 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7082 {
7083 if (tg3_flag(tp, ENABLE_ASF)) {
7084 switch (kind) {
7085 case RESET_KIND_INIT:
7086 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7087 DRV_STATE_START);
7088 break;
7089
7090 case RESET_KIND_SHUTDOWN:
7091 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7092 DRV_STATE_UNLOAD);
7093 break;
7094
7095 case RESET_KIND_SUSPEND:
7096 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7097 DRV_STATE_SUSPEND);
7098 break;
7099
7100 default:
7101 break;
7102 }
7103 }
7104 }
7105
7106 static int tg3_poll_fw(struct tg3 *tp)
7107 {
7108 int i;
7109 u32 val;
7110
7111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7112 /* Wait up to 20ms for init done. */
7113 for (i = 0; i < 200; i++) {
7114 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7115 return 0;
7116 udelay(100);
7117 }
7118 return -ENODEV;
7119 }
7120
7121 /* Wait for firmware initialization to complete. */
7122 for (i = 0; i < 100000; i++) {
7123 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7124 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7125 break;
7126 udelay(10);
7127 }
7128
7129 /* Chip might not be fitted with firmware. Some Sun onboard
7130 * parts are configured like that. So don't signal the timeout
7131 * of the above loop as an error, but do report the lack of
7132 * running firmware once.
7133 */
7134 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7135 tg3_flag_set(tp, NO_FWARE_REPORTED);
7136
7137 netdev_info(tp->dev, "No firmware running\n");
7138 }
7139
7140 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7141 /* The 57765 A0 needs a little more
7142 * time to do some important work.
7143 */
7144 mdelay(10);
7145 }
7146
7147 return 0;
7148 }
7149
7150 /* Save PCI command register before chip reset */
7151 static void tg3_save_pci_state(struct tg3 *tp)
7152 {
7153 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7154 }
7155
7156 /* Restore PCI state after chip reset */
7157 static void tg3_restore_pci_state(struct tg3 *tp)
7158 {
7159 u32 val;
7160
7161 /* Re-enable indirect register accesses. */
7162 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7163 tp->misc_host_ctrl);
7164
7165 /* Set MAX PCI retry to zero. */
7166 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7167 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7168 tg3_flag(tp, PCIX_MODE))
7169 val |= PCISTATE_RETRY_SAME_DMA;
7170 /* Allow reads and writes to the APE register and memory space. */
7171 if (tg3_flag(tp, ENABLE_APE))
7172 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7173 PCISTATE_ALLOW_APE_SHMEM_WR |
7174 PCISTATE_ALLOW_APE_PSPACE_WR;
7175 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7176
7177 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7178
7179 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7180 if (tg3_flag(tp, PCI_EXPRESS))
7181 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7182 else {
7183 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7184 tp->pci_cacheline_sz);
7185 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7186 tp->pci_lat_timer);
7187 }
7188 }
7189
7190 /* Make sure PCI-X relaxed ordering bit is clear. */
7191 if (tg3_flag(tp, PCIX_MODE)) {
7192 u16 pcix_cmd;
7193
7194 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7195 &pcix_cmd);
7196 pcix_cmd &= ~PCI_X_CMD_ERO;
7197 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7198 pcix_cmd);
7199 }
7200
7201 if (tg3_flag(tp, 5780_CLASS)) {
7202
7203 /* Chip reset on 5780 will reset MSI enable bit,
7204 * so need to restore it.
7205 */
7206 if (tg3_flag(tp, USING_MSI)) {
7207 u16 ctrl;
7208
7209 pci_read_config_word(tp->pdev,
7210 tp->msi_cap + PCI_MSI_FLAGS,
7211 &ctrl);
7212 pci_write_config_word(tp->pdev,
7213 tp->msi_cap + PCI_MSI_FLAGS,
7214 ctrl | PCI_MSI_FLAGS_ENABLE);
7215 val = tr32(MSGINT_MODE);
7216 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7217 }
7218 }
7219 }
7220
7221 static void tg3_stop_fw(struct tg3 *);
7222
7223 /* tp->lock is held. */
7224 static int tg3_chip_reset(struct tg3 *tp)
7225 {
7226 u32 val;
7227 void (*write_op)(struct tg3 *, u32, u32);
7228 int i, err;
7229
7230 tg3_nvram_lock(tp);
7231
7232 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7233
7234 /* No matching tg3_nvram_unlock() after this because
7235 * chip reset below will undo the nvram lock.
7236 */
7237 tp->nvram_lock_cnt = 0;
7238
7239 /* GRC_MISC_CFG core clock reset will clear the memory
7240 * enable bit in PCI register 4 and the MSI enable bit
7241 * on some chips, so we save relevant registers here.
7242 */
7243 tg3_save_pci_state(tp);
7244
7245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7246 tg3_flag(tp, 5755_PLUS))
7247 tw32(GRC_FASTBOOT_PC, 0);
7248
7249 /*
7250 * We must avoid the readl() that normally takes place.
7251 * It locks machines, causes machine checks, and other
7252 * fun things. So, temporarily disable the 5701
7253 * hardware workaround, while we do the reset.
7254 */
7255 write_op = tp->write32;
7256 if (write_op == tg3_write_flush_reg32)
7257 tp->write32 = tg3_write32;
7258
7259 /* Prevent the irq handler from reading or writing PCI registers
7260 * during chip reset when the memory enable bit in the PCI command
7261 * register may be cleared. The chip does not generate interrupt
7262 * at this time, but the irq handler may still be called due to irq
7263 * sharing or irqpoll.
7264 */
7265 tg3_flag_set(tp, CHIP_RESETTING);
7266 for (i = 0; i < tp->irq_cnt; i++) {
7267 struct tg3_napi *tnapi = &tp->napi[i];
7268 if (tnapi->hw_status) {
7269 tnapi->hw_status->status = 0;
7270 tnapi->hw_status->status_tag = 0;
7271 }
7272 tnapi->last_tag = 0;
7273 tnapi->last_irq_tag = 0;
7274 }
7275 smp_mb();
7276
7277 for (i = 0; i < tp->irq_cnt; i++)
7278 synchronize_irq(tp->napi[i].irq_vec);
7279
7280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7281 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7282 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7283 }
7284
7285 /* do the reset */
7286 val = GRC_MISC_CFG_CORECLK_RESET;
7287
7288 if (tg3_flag(tp, PCI_EXPRESS)) {
7289 /* Force PCIe 1.0a mode */
7290 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7291 !tg3_flag(tp, 57765_PLUS) &&
7292 tr32(TG3_PCIE_PHY_TSTCTL) ==
7293 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7294 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7295
7296 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7297 tw32(GRC_MISC_CFG, (1 << 29));
7298 val |= (1 << 29);
7299 }
7300 }
7301
7302 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7303 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7304 tw32(GRC_VCPU_EXT_CTRL,
7305 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7306 }
7307
7308 /* Manage gphy power for all CPMU absent PCIe devices. */
7309 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7310 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7311
7312 tw32(GRC_MISC_CFG, val);
7313
7314 /* restore 5701 hardware bug workaround write method */
7315 tp->write32 = write_op;
7316
7317 /* Unfortunately, we have to delay before the PCI read back.
7318 * Some 575X chips even will not respond to a PCI cfg access
7319 * when the reset command is given to the chip.
7320 *
7321 * How do these hardware designers expect things to work
7322 * properly if the PCI write is posted for a long period
7323 * of time? It is always necessary to have some method by
7324 * which a register read back can occur to push the write
7325 * out which does the reset.
7326 *
7327 * For most tg3 variants the trick below was working.
7328 * Ho hum...
7329 */
7330 udelay(120);
7331
7332 /* Flush PCI posted writes. The normal MMIO registers
7333 * are inaccessible at this time so this is the only
7334 * way to make this reliably (actually, this is no longer
7335 * the case, see above). I tried to use indirect
7336 * register read/write but this upset some 5701 variants.
7337 */
7338 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7339
7340 udelay(120);
7341
7342 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7343 u16 val16;
7344
7345 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7346 int i;
7347 u32 cfg_val;
7348
7349 /* Wait for link training to complete. */
7350 for (i = 0; i < 5000; i++)
7351 udelay(100);
7352
7353 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7354 pci_write_config_dword(tp->pdev, 0xc4,
7355 cfg_val | (1 << 15));
7356 }
7357
7358 /* Clear the "no snoop" and "relaxed ordering" bits. */
7359 pci_read_config_word(tp->pdev,
7360 tp->pcie_cap + PCI_EXP_DEVCTL,
7361 &val16);
7362 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7363 PCI_EXP_DEVCTL_NOSNOOP_EN);
7364 /*
7365 * Older PCIe devices only support the 128 byte
7366 * MPS setting. Enforce the restriction.
7367 */
7368 if (!tg3_flag(tp, CPMU_PRESENT))
7369 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7370 pci_write_config_word(tp->pdev,
7371 tp->pcie_cap + PCI_EXP_DEVCTL,
7372 val16);
7373
7374 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7375
7376 /* Clear error status */
7377 pci_write_config_word(tp->pdev,
7378 tp->pcie_cap + PCI_EXP_DEVSTA,
7379 PCI_EXP_DEVSTA_CED |
7380 PCI_EXP_DEVSTA_NFED |
7381 PCI_EXP_DEVSTA_FED |
7382 PCI_EXP_DEVSTA_URD);
7383 }
7384
7385 tg3_restore_pci_state(tp);
7386
7387 tg3_flag_clear(tp, CHIP_RESETTING);
7388 tg3_flag_clear(tp, ERROR_PROCESSED);
7389
7390 val = 0;
7391 if (tg3_flag(tp, 5780_CLASS))
7392 val = tr32(MEMARB_MODE);
7393 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7394
7395 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7396 tg3_stop_fw(tp);
7397 tw32(0x5000, 0x400);
7398 }
7399
7400 tw32(GRC_MODE, tp->grc_mode);
7401
7402 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7403 val = tr32(0xc4);
7404
7405 tw32(0xc4, val | (1 << 15));
7406 }
7407
7408 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7410 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7411 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7412 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7413 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7414 }
7415
7416 if (tg3_flag(tp, ENABLE_APE))
7417 tp->mac_mode = MAC_MODE_APE_TX_EN |
7418 MAC_MODE_APE_RX_EN |
7419 MAC_MODE_TDE_ENABLE;
7420
7421 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7422 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7423 val = tp->mac_mode;
7424 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7425 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7426 val = tp->mac_mode;
7427 } else
7428 val = 0;
7429
7430 tw32_f(MAC_MODE, val);
7431 udelay(40);
7432
7433 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7434
7435 err = tg3_poll_fw(tp);
7436 if (err)
7437 return err;
7438
7439 tg3_mdio_start(tp);
7440
7441 if (tg3_flag(tp, PCI_EXPRESS) &&
7442 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7443 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7444 !tg3_flag(tp, 57765_PLUS)) {
7445 val = tr32(0x7c00);
7446
7447 tw32(0x7c00, val | (1 << 25));
7448 }
7449
7450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7451 val = tr32(TG3_CPMU_CLCK_ORIDE);
7452 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7453 }
7454
7455 /* Reprobe ASF enable state. */
7456 tg3_flag_clear(tp, ENABLE_ASF);
7457 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7458 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7459 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7460 u32 nic_cfg;
7461
7462 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7463 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7464 tg3_flag_set(tp, ENABLE_ASF);
7465 tp->last_event_jiffies = jiffies;
7466 if (tg3_flag(tp, 5750_PLUS))
7467 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7468 }
7469 }
7470
7471 return 0;
7472 }
7473
7474 /* tp->lock is held. */
7475 static void tg3_stop_fw(struct tg3 *tp)
7476 {
7477 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7478 /* Wait for RX cpu to ACK the previous event. */
7479 tg3_wait_for_event_ack(tp);
7480
7481 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7482
7483 tg3_generate_fw_event(tp);
7484
7485 /* Wait for RX cpu to ACK this event. */
7486 tg3_wait_for_event_ack(tp);
7487 }
7488 }
7489
7490 /* tp->lock is held. */
7491 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7492 {
7493 int err;
7494
7495 tg3_stop_fw(tp);
7496
7497 tg3_write_sig_pre_reset(tp, kind);
7498
7499 tg3_abort_hw(tp, silent);
7500 err = tg3_chip_reset(tp);
7501
7502 __tg3_set_mac_addr(tp, 0);
7503
7504 tg3_write_sig_legacy(tp, kind);
7505 tg3_write_sig_post_reset(tp, kind);
7506
7507 if (err)
7508 return err;
7509
7510 return 0;
7511 }
7512
7513 #define RX_CPU_SCRATCH_BASE 0x30000
7514 #define RX_CPU_SCRATCH_SIZE 0x04000
7515 #define TX_CPU_SCRATCH_BASE 0x34000
7516 #define TX_CPU_SCRATCH_SIZE 0x04000
7517
7518 /* tp->lock is held. */
7519 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7520 {
7521 int i;
7522
7523 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7524
7525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7526 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7527
7528 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7529 return 0;
7530 }
7531 if (offset == RX_CPU_BASE) {
7532 for (i = 0; i < 10000; i++) {
7533 tw32(offset + CPU_STATE, 0xffffffff);
7534 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7535 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7536 break;
7537 }
7538
7539 tw32(offset + CPU_STATE, 0xffffffff);
7540 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7541 udelay(10);
7542 } else {
7543 for (i = 0; i < 10000; i++) {
7544 tw32(offset + CPU_STATE, 0xffffffff);
7545 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7546 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7547 break;
7548 }
7549 }
7550
7551 if (i >= 10000) {
7552 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7553 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7554 return -ENODEV;
7555 }
7556
7557 /* Clear firmware's nvram arbitration. */
7558 if (tg3_flag(tp, NVRAM))
7559 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7560 return 0;
7561 }
7562
7563 struct fw_info {
7564 unsigned int fw_base;
7565 unsigned int fw_len;
7566 const __be32 *fw_data;
7567 };
7568
7569 /* tp->lock is held. */
7570 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7571 int cpu_scratch_size, struct fw_info *info)
7572 {
7573 int err, lock_err, i;
7574 void (*write_op)(struct tg3 *, u32, u32);
7575
7576 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7577 netdev_err(tp->dev,
7578 "%s: Trying to load TX cpu firmware which is 5705\n",
7579 __func__);
7580 return -EINVAL;
7581 }
7582
7583 if (tg3_flag(tp, 5705_PLUS))
7584 write_op = tg3_write_mem;
7585 else
7586 write_op = tg3_write_indirect_reg32;
7587
7588 /* It is possible that bootcode is still loading at this point.
7589 * Get the nvram lock first before halting the cpu.
7590 */
7591 lock_err = tg3_nvram_lock(tp);
7592 err = tg3_halt_cpu(tp, cpu_base);
7593 if (!lock_err)
7594 tg3_nvram_unlock(tp);
7595 if (err)
7596 goto out;
7597
7598 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7599 write_op(tp, cpu_scratch_base + i, 0);
7600 tw32(cpu_base + CPU_STATE, 0xffffffff);
7601 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7602 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7603 write_op(tp, (cpu_scratch_base +
7604 (info->fw_base & 0xffff) +
7605 (i * sizeof(u32))),
7606 be32_to_cpu(info->fw_data[i]));
7607
7608 err = 0;
7609
7610 out:
7611 return err;
7612 }
7613
7614 /* tp->lock is held. */
7615 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7616 {
7617 struct fw_info info;
7618 const __be32 *fw_data;
7619 int err, i;
7620
7621 fw_data = (void *)tp->fw->data;
7622
7623 /* Firmware blob starts with version numbers, followed by
7624 start address and length. We are setting complete length.
7625 length = end_address_of_bss - start_address_of_text.
7626 Remainder is the blob to be loaded contiguously
7627 from start address. */
7628
7629 info.fw_base = be32_to_cpu(fw_data[1]);
7630 info.fw_len = tp->fw->size - 12;
7631 info.fw_data = &fw_data[3];
7632
7633 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7634 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7635 &info);
7636 if (err)
7637 return err;
7638
7639 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7640 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7641 &info);
7642 if (err)
7643 return err;
7644
7645 /* Now startup only the RX cpu. */
7646 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7647 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7648
7649 for (i = 0; i < 5; i++) {
7650 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7651 break;
7652 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7653 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7654 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7655 udelay(1000);
7656 }
7657 if (i >= 5) {
7658 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7659 "should be %08x\n", __func__,
7660 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7661 return -ENODEV;
7662 }
7663 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7664 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7665
7666 return 0;
7667 }
7668
7669 /* tp->lock is held. */
7670 static int tg3_load_tso_firmware(struct tg3 *tp)
7671 {
7672 struct fw_info info;
7673 const __be32 *fw_data;
7674 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7675 int err, i;
7676
7677 if (tg3_flag(tp, HW_TSO_1) ||
7678 tg3_flag(tp, HW_TSO_2) ||
7679 tg3_flag(tp, HW_TSO_3))
7680 return 0;
7681
7682 fw_data = (void *)tp->fw->data;
7683
7684 /* Firmware blob starts with version numbers, followed by
7685 start address and length. We are setting complete length.
7686 length = end_address_of_bss - start_address_of_text.
7687 Remainder is the blob to be loaded contiguously
7688 from start address. */
7689
7690 info.fw_base = be32_to_cpu(fw_data[1]);
7691 cpu_scratch_size = tp->fw_len;
7692 info.fw_len = tp->fw->size - 12;
7693 info.fw_data = &fw_data[3];
7694
7695 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7696 cpu_base = RX_CPU_BASE;
7697 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7698 } else {
7699 cpu_base = TX_CPU_BASE;
7700 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7701 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7702 }
7703
7704 err = tg3_load_firmware_cpu(tp, cpu_base,
7705 cpu_scratch_base, cpu_scratch_size,
7706 &info);
7707 if (err)
7708 return err;
7709
7710 /* Now startup the cpu. */
7711 tw32(cpu_base + CPU_STATE, 0xffffffff);
7712 tw32_f(cpu_base + CPU_PC, info.fw_base);
7713
7714 for (i = 0; i < 5; i++) {
7715 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7716 break;
7717 tw32(cpu_base + CPU_STATE, 0xffffffff);
7718 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7719 tw32_f(cpu_base + CPU_PC, info.fw_base);
7720 udelay(1000);
7721 }
7722 if (i >= 5) {
7723 netdev_err(tp->dev,
7724 "%s fails to set CPU PC, is %08x should be %08x\n",
7725 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7726 return -ENODEV;
7727 }
7728 tw32(cpu_base + CPU_STATE, 0xffffffff);
7729 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7730 return 0;
7731 }
7732
7733
7734 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7735 {
7736 struct tg3 *tp = netdev_priv(dev);
7737 struct sockaddr *addr = p;
7738 int err = 0, skip_mac_1 = 0;
7739
7740 if (!is_valid_ether_addr(addr->sa_data))
7741 return -EINVAL;
7742
7743 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7744
7745 if (!netif_running(dev))
7746 return 0;
7747
7748 if (tg3_flag(tp, ENABLE_ASF)) {
7749 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7750
7751 addr0_high = tr32(MAC_ADDR_0_HIGH);
7752 addr0_low = tr32(MAC_ADDR_0_LOW);
7753 addr1_high = tr32(MAC_ADDR_1_HIGH);
7754 addr1_low = tr32(MAC_ADDR_1_LOW);
7755
7756 /* Skip MAC addr 1 if ASF is using it. */
7757 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7758 !(addr1_high == 0 && addr1_low == 0))
7759 skip_mac_1 = 1;
7760 }
7761 spin_lock_bh(&tp->lock);
7762 __tg3_set_mac_addr(tp, skip_mac_1);
7763 spin_unlock_bh(&tp->lock);
7764
7765 return err;
7766 }
7767
7768 /* tp->lock is held. */
7769 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7770 dma_addr_t mapping, u32 maxlen_flags,
7771 u32 nic_addr)
7772 {
7773 tg3_write_mem(tp,
7774 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7775 ((u64) mapping >> 32));
7776 tg3_write_mem(tp,
7777 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7778 ((u64) mapping & 0xffffffff));
7779 tg3_write_mem(tp,
7780 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7781 maxlen_flags);
7782
7783 if (!tg3_flag(tp, 5705_PLUS))
7784 tg3_write_mem(tp,
7785 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7786 nic_addr);
7787 }
7788
7789 static void __tg3_set_rx_mode(struct net_device *);
7790 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7791 {
7792 int i;
7793
7794 if (!tg3_flag(tp, ENABLE_TSS)) {
7795 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7796 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7797 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7798 } else {
7799 tw32(HOSTCC_TXCOL_TICKS, 0);
7800 tw32(HOSTCC_TXMAX_FRAMES, 0);
7801 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7802 }
7803
7804 if (!tg3_flag(tp, ENABLE_RSS)) {
7805 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7806 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7807 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7808 } else {
7809 tw32(HOSTCC_RXCOL_TICKS, 0);
7810 tw32(HOSTCC_RXMAX_FRAMES, 0);
7811 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7812 }
7813
7814 if (!tg3_flag(tp, 5705_PLUS)) {
7815 u32 val = ec->stats_block_coalesce_usecs;
7816
7817 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7818 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7819
7820 if (!netif_carrier_ok(tp->dev))
7821 val = 0;
7822
7823 tw32(HOSTCC_STAT_COAL_TICKS, val);
7824 }
7825
7826 for (i = 0; i < tp->irq_cnt - 1; i++) {
7827 u32 reg;
7828
7829 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7830 tw32(reg, ec->rx_coalesce_usecs);
7831 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7832 tw32(reg, ec->rx_max_coalesced_frames);
7833 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7834 tw32(reg, ec->rx_max_coalesced_frames_irq);
7835
7836 if (tg3_flag(tp, ENABLE_TSS)) {
7837 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7838 tw32(reg, ec->tx_coalesce_usecs);
7839 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7840 tw32(reg, ec->tx_max_coalesced_frames);
7841 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7842 tw32(reg, ec->tx_max_coalesced_frames_irq);
7843 }
7844 }
7845
7846 for (; i < tp->irq_max - 1; i++) {
7847 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7848 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7849 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7850
7851 if (tg3_flag(tp, ENABLE_TSS)) {
7852 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7853 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7854 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7855 }
7856 }
7857 }
7858
7859 /* tp->lock is held. */
7860 static void tg3_rings_reset(struct tg3 *tp)
7861 {
7862 int i;
7863 u32 stblk, txrcb, rxrcb, limit;
7864 struct tg3_napi *tnapi = &tp->napi[0];
7865
7866 /* Disable all transmit rings but the first. */
7867 if (!tg3_flag(tp, 5705_PLUS))
7868 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7869 else if (tg3_flag(tp, 5717_PLUS))
7870 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7871 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7872 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7873 else
7874 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7875
7876 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7877 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7878 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7879 BDINFO_FLAGS_DISABLED);
7880
7881
7882 /* Disable all receive return rings but the first. */
7883 if (tg3_flag(tp, 5717_PLUS))
7884 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7885 else if (!tg3_flag(tp, 5705_PLUS))
7886 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7887 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7888 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7889 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7890 else
7891 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7892
7893 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7894 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7895 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7896 BDINFO_FLAGS_DISABLED);
7897
7898 /* Disable interrupts */
7899 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7900
7901 /* Zero mailbox registers. */
7902 if (tg3_flag(tp, SUPPORT_MSIX)) {
7903 for (i = 1; i < tp->irq_max; i++) {
7904 tp->napi[i].tx_prod = 0;
7905 tp->napi[i].tx_cons = 0;
7906 if (tg3_flag(tp, ENABLE_TSS))
7907 tw32_mailbox(tp->napi[i].prodmbox, 0);
7908 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7909 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7910 }
7911 if (!tg3_flag(tp, ENABLE_TSS))
7912 tw32_mailbox(tp->napi[0].prodmbox, 0);
7913 } else {
7914 tp->napi[0].tx_prod = 0;
7915 tp->napi[0].tx_cons = 0;
7916 tw32_mailbox(tp->napi[0].prodmbox, 0);
7917 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7918 }
7919
7920 /* Make sure the NIC-based send BD rings are disabled. */
7921 if (!tg3_flag(tp, 5705_PLUS)) {
7922 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7923 for (i = 0; i < 16; i++)
7924 tw32_tx_mbox(mbox + i * 8, 0);
7925 }
7926
7927 txrcb = NIC_SRAM_SEND_RCB;
7928 rxrcb = NIC_SRAM_RCV_RET_RCB;
7929
7930 /* Clear status block in ram. */
7931 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7932
7933 /* Set status block DMA address */
7934 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7935 ((u64) tnapi->status_mapping >> 32));
7936 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7937 ((u64) tnapi->status_mapping & 0xffffffff));
7938
7939 if (tnapi->tx_ring) {
7940 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7941 (TG3_TX_RING_SIZE <<
7942 BDINFO_FLAGS_MAXLEN_SHIFT),
7943 NIC_SRAM_TX_BUFFER_DESC);
7944 txrcb += TG3_BDINFO_SIZE;
7945 }
7946
7947 if (tnapi->rx_rcb) {
7948 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7949 (tp->rx_ret_ring_mask + 1) <<
7950 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7951 rxrcb += TG3_BDINFO_SIZE;
7952 }
7953
7954 stblk = HOSTCC_STATBLCK_RING1;
7955
7956 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7957 u64 mapping = (u64)tnapi->status_mapping;
7958 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7959 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7960
7961 /* Clear status block in ram. */
7962 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7963
7964 if (tnapi->tx_ring) {
7965 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7966 (TG3_TX_RING_SIZE <<
7967 BDINFO_FLAGS_MAXLEN_SHIFT),
7968 NIC_SRAM_TX_BUFFER_DESC);
7969 txrcb += TG3_BDINFO_SIZE;
7970 }
7971
7972 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7973 ((tp->rx_ret_ring_mask + 1) <<
7974 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7975
7976 stblk += 8;
7977 rxrcb += TG3_BDINFO_SIZE;
7978 }
7979 }
7980
7981 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7982 {
7983 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7984
7985 if (!tg3_flag(tp, 5750_PLUS) ||
7986 tg3_flag(tp, 5780_CLASS) ||
7987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7988 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7989 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7990 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7992 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7993 else
7994 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7995
7996 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7997 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7998
7999 val = min(nic_rep_thresh, host_rep_thresh);
8000 tw32(RCVBDI_STD_THRESH, val);
8001
8002 if (tg3_flag(tp, 57765_PLUS))
8003 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8004
8005 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8006 return;
8007
8008 if (!tg3_flag(tp, 5705_PLUS))
8009 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8010 else
8011 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8012
8013 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8014
8015 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8016 tw32(RCVBDI_JUMBO_THRESH, val);
8017
8018 if (tg3_flag(tp, 57765_PLUS))
8019 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8020 }
8021
8022 /* tp->lock is held. */
8023 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8024 {
8025 u32 val, rdmac_mode;
8026 int i, err, limit;
8027 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8028
8029 tg3_disable_ints(tp);
8030
8031 tg3_stop_fw(tp);
8032
8033 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8034
8035 if (tg3_flag(tp, INIT_COMPLETE))
8036 tg3_abort_hw(tp, 1);
8037
8038 /* Enable MAC control of LPI */
8039 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8040 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8041 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8042 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8043
8044 tw32_f(TG3_CPMU_EEE_CTRL,
8045 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8046
8047 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8048 TG3_CPMU_EEEMD_LPI_IN_TX |
8049 TG3_CPMU_EEEMD_LPI_IN_RX |
8050 TG3_CPMU_EEEMD_EEE_ENABLE;
8051
8052 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8053 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8054
8055 if (tg3_flag(tp, ENABLE_APE))
8056 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8057
8058 tw32_f(TG3_CPMU_EEE_MODE, val);
8059
8060 tw32_f(TG3_CPMU_EEE_DBTMR1,
8061 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8062 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8063
8064 tw32_f(TG3_CPMU_EEE_DBTMR2,
8065 TG3_CPMU_DBTMR2_APE_TX_2047US |
8066 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8067 }
8068
8069 if (reset_phy)
8070 tg3_phy_reset(tp);
8071
8072 err = tg3_chip_reset(tp);
8073 if (err)
8074 return err;
8075
8076 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8077
8078 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8079 val = tr32(TG3_CPMU_CTRL);
8080 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8081 tw32(TG3_CPMU_CTRL, val);
8082
8083 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8084 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8085 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8086 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8087
8088 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8089 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8090 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8091 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8092
8093 val = tr32(TG3_CPMU_HST_ACC);
8094 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8095 val |= CPMU_HST_ACC_MACCLK_6_25;
8096 tw32(TG3_CPMU_HST_ACC, val);
8097 }
8098
8099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8100 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8101 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8102 PCIE_PWR_MGMT_L1_THRESH_4MS;
8103 tw32(PCIE_PWR_MGMT_THRESH, val);
8104
8105 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8106 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8107
8108 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8109
8110 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8111 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8112 }
8113
8114 if (tg3_flag(tp, L1PLLPD_EN)) {
8115 u32 grc_mode = tr32(GRC_MODE);
8116
8117 /* Access the lower 1K of PL PCIE block registers. */
8118 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8119 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8120
8121 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8122 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8123 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8124
8125 tw32(GRC_MODE, grc_mode);
8126 }
8127
8128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8129 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8130 u32 grc_mode = tr32(GRC_MODE);
8131
8132 /* Access the lower 1K of PL PCIE block registers. */
8133 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8134 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8135
8136 val = tr32(TG3_PCIE_TLDLPL_PORT +
8137 TG3_PCIE_PL_LO_PHYCTL5);
8138 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8139 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8140
8141 tw32(GRC_MODE, grc_mode);
8142 }
8143
8144 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8145 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8146 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8147 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8148 }
8149
8150 /* This works around an issue with Athlon chipsets on
8151 * B3 tigon3 silicon. This bit has no effect on any
8152 * other revision. But do not set this on PCI Express
8153 * chips and don't even touch the clocks if the CPMU is present.
8154 */
8155 if (!tg3_flag(tp, CPMU_PRESENT)) {
8156 if (!tg3_flag(tp, PCI_EXPRESS))
8157 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8158 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8159 }
8160
8161 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8162 tg3_flag(tp, PCIX_MODE)) {
8163 val = tr32(TG3PCI_PCISTATE);
8164 val |= PCISTATE_RETRY_SAME_DMA;
8165 tw32(TG3PCI_PCISTATE, val);
8166 }
8167
8168 if (tg3_flag(tp, ENABLE_APE)) {
8169 /* Allow reads and writes to the
8170 * APE register and memory space.
8171 */
8172 val = tr32(TG3PCI_PCISTATE);
8173 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8174 PCISTATE_ALLOW_APE_SHMEM_WR |
8175 PCISTATE_ALLOW_APE_PSPACE_WR;
8176 tw32(TG3PCI_PCISTATE, val);
8177 }
8178
8179 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8180 /* Enable some hw fixes. */
8181 val = tr32(TG3PCI_MSI_DATA);
8182 val |= (1 << 26) | (1 << 28) | (1 << 29);
8183 tw32(TG3PCI_MSI_DATA, val);
8184 }
8185
8186 /* Descriptor ring init may make accesses to the
8187 * NIC SRAM area to setup the TX descriptors, so we
8188 * can only do this after the hardware has been
8189 * successfully reset.
8190 */
8191 err = tg3_init_rings(tp);
8192 if (err)
8193 return err;
8194
8195 if (tg3_flag(tp, 57765_PLUS)) {
8196 val = tr32(TG3PCI_DMA_RW_CTRL) &
8197 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8198 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8199 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8200 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8201 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8202 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8203 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8204 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8205 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8206 /* This value is determined during the probe time DMA
8207 * engine test, tg3_test_dma.
8208 */
8209 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8210 }
8211
8212 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8213 GRC_MODE_4X_NIC_SEND_RINGS |
8214 GRC_MODE_NO_TX_PHDR_CSUM |
8215 GRC_MODE_NO_RX_PHDR_CSUM);
8216 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8217
8218 /* Pseudo-header checksum is done by hardware logic and not
8219 * the offload processers, so make the chip do the pseudo-
8220 * header checksums on receive. For transmit it is more
8221 * convenient to do the pseudo-header checksum in software
8222 * as Linux does that on transmit for us in all cases.
8223 */
8224 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8225
8226 tw32(GRC_MODE,
8227 tp->grc_mode |
8228 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8229
8230 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8231 val = tr32(GRC_MISC_CFG);
8232 val &= ~0xff;
8233 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8234 tw32(GRC_MISC_CFG, val);
8235
8236 /* Initialize MBUF/DESC pool. */
8237 if (tg3_flag(tp, 5750_PLUS)) {
8238 /* Do nothing. */
8239 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8240 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8242 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8243 else
8244 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8245 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8246 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8247 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8248 int fw_len;
8249
8250 fw_len = tp->fw_len;
8251 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8252 tw32(BUFMGR_MB_POOL_ADDR,
8253 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8254 tw32(BUFMGR_MB_POOL_SIZE,
8255 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8256 }
8257
8258 if (tp->dev->mtu <= ETH_DATA_LEN) {
8259 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8260 tp->bufmgr_config.mbuf_read_dma_low_water);
8261 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8262 tp->bufmgr_config.mbuf_mac_rx_low_water);
8263 tw32(BUFMGR_MB_HIGH_WATER,
8264 tp->bufmgr_config.mbuf_high_water);
8265 } else {
8266 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8267 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8268 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8269 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8270 tw32(BUFMGR_MB_HIGH_WATER,
8271 tp->bufmgr_config.mbuf_high_water_jumbo);
8272 }
8273 tw32(BUFMGR_DMA_LOW_WATER,
8274 tp->bufmgr_config.dma_low_water);
8275 tw32(BUFMGR_DMA_HIGH_WATER,
8276 tp->bufmgr_config.dma_high_water);
8277
8278 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8280 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8282 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8283 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8284 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8285 tw32(BUFMGR_MODE, val);
8286 for (i = 0; i < 2000; i++) {
8287 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8288 break;
8289 udelay(10);
8290 }
8291 if (i >= 2000) {
8292 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8293 return -ENODEV;
8294 }
8295
8296 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8297 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8298
8299 tg3_setup_rxbd_thresholds(tp);
8300
8301 /* Initialize TG3_BDINFO's at:
8302 * RCVDBDI_STD_BD: standard eth size rx ring
8303 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8304 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8305 *
8306 * like so:
8307 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8308 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8309 * ring attribute flags
8310 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8311 *
8312 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8313 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8314 *
8315 * The size of each ring is fixed in the firmware, but the location is
8316 * configurable.
8317 */
8318 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8319 ((u64) tpr->rx_std_mapping >> 32));
8320 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8321 ((u64) tpr->rx_std_mapping & 0xffffffff));
8322 if (!tg3_flag(tp, 5717_PLUS))
8323 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8324 NIC_SRAM_RX_BUFFER_DESC);
8325
8326 /* Disable the mini ring */
8327 if (!tg3_flag(tp, 5705_PLUS))
8328 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8329 BDINFO_FLAGS_DISABLED);
8330
8331 /* Program the jumbo buffer descriptor ring control
8332 * blocks on those devices that have them.
8333 */
8334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8335 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8336
8337 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8338 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8339 ((u64) tpr->rx_jmb_mapping >> 32));
8340 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8341 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8342 val = TG3_RX_JMB_RING_SIZE(tp) <<
8343 BDINFO_FLAGS_MAXLEN_SHIFT;
8344 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8345 val | BDINFO_FLAGS_USE_EXT_RECV);
8346 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8348 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8349 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8350 } else {
8351 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8352 BDINFO_FLAGS_DISABLED);
8353 }
8354
8355 if (tg3_flag(tp, 57765_PLUS)) {
8356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8357 val = TG3_RX_STD_MAX_SIZE_5700;
8358 else
8359 val = TG3_RX_STD_MAX_SIZE_5717;
8360 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8361 val |= (TG3_RX_STD_DMA_SZ << 2);
8362 } else
8363 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8364 } else
8365 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8366
8367 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8368
8369 tpr->rx_std_prod_idx = tp->rx_pending;
8370 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8371
8372 tpr->rx_jmb_prod_idx =
8373 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8374 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8375
8376 tg3_rings_reset(tp);
8377
8378 /* Initialize MAC address and backoff seed. */
8379 __tg3_set_mac_addr(tp, 0);
8380
8381 /* MTU + ethernet header + FCS + optional VLAN tag */
8382 tw32(MAC_RX_MTU_SIZE,
8383 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8384
8385 /* The slot time is changed by tg3_setup_phy if we
8386 * run at gigabit with half duplex.
8387 */
8388 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8389 (6 << TX_LENGTHS_IPG_SHIFT) |
8390 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8391
8392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8393 val |= tr32(MAC_TX_LENGTHS) &
8394 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8395 TX_LENGTHS_CNT_DWN_VAL_MSK);
8396
8397 tw32(MAC_TX_LENGTHS, val);
8398
8399 /* Receive rules. */
8400 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8401 tw32(RCVLPC_CONFIG, 0x0181);
8402
8403 /* Calculate RDMAC_MODE setting early, we need it to determine
8404 * the RCVLPC_STATE_ENABLE mask.
8405 */
8406 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8407 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8408 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8409 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8410 RDMAC_MODE_LNGREAD_ENAB);
8411
8412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8413 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8414
8415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8418 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8419 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8420 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8421
8422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8423 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8424 if (tg3_flag(tp, TSO_CAPABLE) &&
8425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8426 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8427 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8428 !tg3_flag(tp, IS_5788)) {
8429 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8430 }
8431 }
8432
8433 if (tg3_flag(tp, PCI_EXPRESS))
8434 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8435
8436 if (tg3_flag(tp, HW_TSO_1) ||
8437 tg3_flag(tp, HW_TSO_2) ||
8438 tg3_flag(tp, HW_TSO_3))
8439 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8440
8441 if (tg3_flag(tp, HW_TSO_3) ||
8442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8444 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8445
8446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8447 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8448
8449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8452 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8453 tg3_flag(tp, 57765_PLUS)) {
8454 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8457 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8458 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8459 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8460 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8461 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8462 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8463 }
8464 tw32(TG3_RDMA_RSRVCTRL_REG,
8465 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8466 }
8467
8468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8469 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8470 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8471 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8472 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8473 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8474 }
8475
8476 /* Receive/send statistics. */
8477 if (tg3_flag(tp, 5750_PLUS)) {
8478 val = tr32(RCVLPC_STATS_ENABLE);
8479 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8480 tw32(RCVLPC_STATS_ENABLE, val);
8481 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8482 tg3_flag(tp, TSO_CAPABLE)) {
8483 val = tr32(RCVLPC_STATS_ENABLE);
8484 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8485 tw32(RCVLPC_STATS_ENABLE, val);
8486 } else {
8487 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8488 }
8489 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8490 tw32(SNDDATAI_STATSENAB, 0xffffff);
8491 tw32(SNDDATAI_STATSCTRL,
8492 (SNDDATAI_SCTRL_ENABLE |
8493 SNDDATAI_SCTRL_FASTUPD));
8494
8495 /* Setup host coalescing engine. */
8496 tw32(HOSTCC_MODE, 0);
8497 for (i = 0; i < 2000; i++) {
8498 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8499 break;
8500 udelay(10);
8501 }
8502
8503 __tg3_set_coalesce(tp, &tp->coal);
8504
8505 if (!tg3_flag(tp, 5705_PLUS)) {
8506 /* Status/statistics block address. See tg3_timer,
8507 * the tg3_periodic_fetch_stats call there, and
8508 * tg3_get_stats to see how this works for 5705/5750 chips.
8509 */
8510 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8511 ((u64) tp->stats_mapping >> 32));
8512 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8513 ((u64) tp->stats_mapping & 0xffffffff));
8514 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8515
8516 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8517
8518 /* Clear statistics and status block memory areas */
8519 for (i = NIC_SRAM_STATS_BLK;
8520 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8521 i += sizeof(u32)) {
8522 tg3_write_mem(tp, i, 0);
8523 udelay(40);
8524 }
8525 }
8526
8527 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8528
8529 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8530 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8531 if (!tg3_flag(tp, 5705_PLUS))
8532 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8533
8534 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8535 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8536 /* reset to prevent losing 1st rx packet intermittently */
8537 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8538 udelay(10);
8539 }
8540
8541 if (tg3_flag(tp, ENABLE_APE))
8542 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8543 else
8544 tp->mac_mode = 0;
8545 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8546 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8547 if (!tg3_flag(tp, 5705_PLUS) &&
8548 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8549 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8550 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8551 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8552 udelay(40);
8553
8554 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8555 * If TG3_FLAG_IS_NIC is zero, we should read the
8556 * register to preserve the GPIO settings for LOMs. The GPIOs,
8557 * whether used as inputs or outputs, are set by boot code after
8558 * reset.
8559 */
8560 if (!tg3_flag(tp, IS_NIC)) {
8561 u32 gpio_mask;
8562
8563 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8564 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8565 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8566
8567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8568 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8569 GRC_LCLCTRL_GPIO_OUTPUT3;
8570
8571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8572 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8573
8574 tp->grc_local_ctrl &= ~gpio_mask;
8575 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8576
8577 /* GPIO1 must be driven high for eeprom write protect */
8578 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8579 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8580 GRC_LCLCTRL_GPIO_OUTPUT1);
8581 }
8582 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8583 udelay(100);
8584
8585 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8586 val = tr32(MSGINT_MODE);
8587 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8588 tw32(MSGINT_MODE, val);
8589 }
8590
8591 if (!tg3_flag(tp, 5705_PLUS)) {
8592 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8593 udelay(40);
8594 }
8595
8596 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8597 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8598 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8599 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8600 WDMAC_MODE_LNGREAD_ENAB);
8601
8602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8603 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8604 if (tg3_flag(tp, TSO_CAPABLE) &&
8605 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8606 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8607 /* nothing */
8608 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8609 !tg3_flag(tp, IS_5788)) {
8610 val |= WDMAC_MODE_RX_ACCEL;
8611 }
8612 }
8613
8614 /* Enable host coalescing bug fix */
8615 if (tg3_flag(tp, 5755_PLUS))
8616 val |= WDMAC_MODE_STATUS_TAG_FIX;
8617
8618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8619 val |= WDMAC_MODE_BURST_ALL_DATA;
8620
8621 tw32_f(WDMAC_MODE, val);
8622 udelay(40);
8623
8624 if (tg3_flag(tp, PCIX_MODE)) {
8625 u16 pcix_cmd;
8626
8627 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8628 &pcix_cmd);
8629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8630 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8631 pcix_cmd |= PCI_X_CMD_READ_2K;
8632 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8633 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8634 pcix_cmd |= PCI_X_CMD_READ_2K;
8635 }
8636 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8637 pcix_cmd);
8638 }
8639
8640 tw32_f(RDMAC_MODE, rdmac_mode);
8641 udelay(40);
8642
8643 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8644 if (!tg3_flag(tp, 5705_PLUS))
8645 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8646
8647 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8648 tw32(SNDDATAC_MODE,
8649 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8650 else
8651 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8652
8653 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8654 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8655 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8656 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8657 val |= RCVDBDI_MODE_LRG_RING_SZ;
8658 tw32(RCVDBDI_MODE, val);
8659 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8660 if (tg3_flag(tp, HW_TSO_1) ||
8661 tg3_flag(tp, HW_TSO_2) ||
8662 tg3_flag(tp, HW_TSO_3))
8663 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8664 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8665 if (tg3_flag(tp, ENABLE_TSS))
8666 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8667 tw32(SNDBDI_MODE, val);
8668 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8669
8670 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8671 err = tg3_load_5701_a0_firmware_fix(tp);
8672 if (err)
8673 return err;
8674 }
8675
8676 if (tg3_flag(tp, TSO_CAPABLE)) {
8677 err = tg3_load_tso_firmware(tp);
8678 if (err)
8679 return err;
8680 }
8681
8682 tp->tx_mode = TX_MODE_ENABLE;
8683
8684 if (tg3_flag(tp, 5755_PLUS) ||
8685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8686 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8687
8688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8689 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8690 tp->tx_mode &= ~val;
8691 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8692 }
8693
8694 tw32_f(MAC_TX_MODE, tp->tx_mode);
8695 udelay(100);
8696
8697 if (tg3_flag(tp, ENABLE_RSS)) {
8698 u32 reg = MAC_RSS_INDIR_TBL_0;
8699 u8 *ent = (u8 *)&val;
8700
8701 /* Setup the indirection table */
8702 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8703 int idx = i % sizeof(val);
8704
8705 ent[idx] = i % (tp->irq_cnt - 1);
8706 if (idx == sizeof(val) - 1) {
8707 tw32(reg, val);
8708 reg += 4;
8709 }
8710 }
8711
8712 /* Setup the "secret" hash key. */
8713 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8714 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8715 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8716 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8717 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8718 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8719 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8720 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8721 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8722 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8723 }
8724
8725 tp->rx_mode = RX_MODE_ENABLE;
8726 if (tg3_flag(tp, 5755_PLUS))
8727 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8728
8729 if (tg3_flag(tp, ENABLE_RSS))
8730 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8731 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8732 RX_MODE_RSS_IPV6_HASH_EN |
8733 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8734 RX_MODE_RSS_IPV4_HASH_EN |
8735 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8736
8737 tw32_f(MAC_RX_MODE, tp->rx_mode);
8738 udelay(10);
8739
8740 tw32(MAC_LED_CTRL, tp->led_ctrl);
8741
8742 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8743 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8744 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8745 udelay(10);
8746 }
8747 tw32_f(MAC_RX_MODE, tp->rx_mode);
8748 udelay(10);
8749
8750 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8751 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8752 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8753 /* Set drive transmission level to 1.2V */
8754 /* only if the signal pre-emphasis bit is not set */
8755 val = tr32(MAC_SERDES_CFG);
8756 val &= 0xfffff000;
8757 val |= 0x880;
8758 tw32(MAC_SERDES_CFG, val);
8759 }
8760 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8761 tw32(MAC_SERDES_CFG, 0x616000);
8762 }
8763
8764 /* Prevent chip from dropping frames when flow control
8765 * is enabled.
8766 */
8767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8768 val = 1;
8769 else
8770 val = 2;
8771 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8772
8773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8774 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8775 /* Use hardware link auto-negotiation */
8776 tg3_flag_set(tp, HW_AUTONEG);
8777 }
8778
8779 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8780 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8781 u32 tmp;
8782
8783 tmp = tr32(SERDES_RX_CTRL);
8784 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8785 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8786 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8787 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8788 }
8789
8790 if (!tg3_flag(tp, USE_PHYLIB)) {
8791 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8792 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8793 tp->link_config.speed = tp->link_config.orig_speed;
8794 tp->link_config.duplex = tp->link_config.orig_duplex;
8795 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8796 }
8797
8798 err = tg3_setup_phy(tp, 0);
8799 if (err)
8800 return err;
8801
8802 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8803 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8804 u32 tmp;
8805
8806 /* Clear CRC stats. */
8807 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8808 tg3_writephy(tp, MII_TG3_TEST1,
8809 tmp | MII_TG3_TEST1_CRC_EN);
8810 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8811 }
8812 }
8813 }
8814
8815 __tg3_set_rx_mode(tp->dev);
8816
8817 /* Initialize receive rules. */
8818 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8819 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8820 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8821 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8822
8823 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8824 limit = 8;
8825 else
8826 limit = 16;
8827 if (tg3_flag(tp, ENABLE_ASF))
8828 limit -= 4;
8829 switch (limit) {
8830 case 16:
8831 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8832 case 15:
8833 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8834 case 14:
8835 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8836 case 13:
8837 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8838 case 12:
8839 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8840 case 11:
8841 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8842 case 10:
8843 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8844 case 9:
8845 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8846 case 8:
8847 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8848 case 7:
8849 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8850 case 6:
8851 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8852 case 5:
8853 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8854 case 4:
8855 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8856 case 3:
8857 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8858 case 2:
8859 case 1:
8860
8861 default:
8862 break;
8863 }
8864
8865 if (tg3_flag(tp, ENABLE_APE))
8866 /* Write our heartbeat update interval to APE. */
8867 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8868 APE_HOST_HEARTBEAT_INT_DISABLE);
8869
8870 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8871
8872 return 0;
8873 }
8874
8875 /* Called at device open time to get the chip ready for
8876 * packet processing. Invoked with tp->lock held.
8877 */
8878 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8879 {
8880 tg3_switch_clocks(tp);
8881
8882 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8883
8884 return tg3_reset_hw(tp, reset_phy);
8885 }
8886
8887 #define TG3_STAT_ADD32(PSTAT, REG) \
8888 do { u32 __val = tr32(REG); \
8889 (PSTAT)->low += __val; \
8890 if ((PSTAT)->low < __val) \
8891 (PSTAT)->high += 1; \
8892 } while (0)
8893
8894 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8895 {
8896 struct tg3_hw_stats *sp = tp->hw_stats;
8897
8898 if (!netif_carrier_ok(tp->dev))
8899 return;
8900
8901 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8902 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8903 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8904 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8905 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8906 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8907 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8908 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8909 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8910 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8911 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8912 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8913 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8914
8915 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8916 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8917 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8918 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8919 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8920 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8921 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8922 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8923 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8924 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8925 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8926 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8927 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8928 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8929
8930 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8931 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8932 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8933 } else {
8934 u32 val = tr32(HOSTCC_FLOW_ATTN);
8935 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8936 if (val) {
8937 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8938 sp->rx_discards.low += val;
8939 if (sp->rx_discards.low < val)
8940 sp->rx_discards.high += 1;
8941 }
8942 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8943 }
8944 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8945 }
8946
8947 static void tg3_timer(unsigned long __opaque)
8948 {
8949 struct tg3 *tp = (struct tg3 *) __opaque;
8950
8951 if (tp->irq_sync)
8952 goto restart_timer;
8953
8954 spin_lock(&tp->lock);
8955
8956 if (!tg3_flag(tp, TAGGED_STATUS)) {
8957 /* All of this garbage is because when using non-tagged
8958 * IRQ status the mailbox/status_block protocol the chip
8959 * uses with the cpu is race prone.
8960 */
8961 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8962 tw32(GRC_LOCAL_CTRL,
8963 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8964 } else {
8965 tw32(HOSTCC_MODE, tp->coalesce_mode |
8966 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8967 }
8968
8969 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8970 tg3_flag_set(tp, RESTART_TIMER);
8971 spin_unlock(&tp->lock);
8972 schedule_work(&tp->reset_task);
8973 return;
8974 }
8975 }
8976
8977 /* This part only runs once per second. */
8978 if (!--tp->timer_counter) {
8979 if (tg3_flag(tp, 5705_PLUS))
8980 tg3_periodic_fetch_stats(tp);
8981
8982 if (tp->setlpicnt && !--tp->setlpicnt) {
8983 u32 val = tr32(TG3_CPMU_EEE_MODE);
8984 tw32(TG3_CPMU_EEE_MODE,
8985 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8986 }
8987
8988 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8989 u32 mac_stat;
8990 int phy_event;
8991
8992 mac_stat = tr32(MAC_STATUS);
8993
8994 phy_event = 0;
8995 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8996 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8997 phy_event = 1;
8998 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8999 phy_event = 1;
9000
9001 if (phy_event)
9002 tg3_setup_phy(tp, 0);
9003 } else if (tg3_flag(tp, POLL_SERDES)) {
9004 u32 mac_stat = tr32(MAC_STATUS);
9005 int need_setup = 0;
9006
9007 if (netif_carrier_ok(tp->dev) &&
9008 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9009 need_setup = 1;
9010 }
9011 if (!netif_carrier_ok(tp->dev) &&
9012 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9013 MAC_STATUS_SIGNAL_DET))) {
9014 need_setup = 1;
9015 }
9016 if (need_setup) {
9017 if (!tp->serdes_counter) {
9018 tw32_f(MAC_MODE,
9019 (tp->mac_mode &
9020 ~MAC_MODE_PORT_MODE_MASK));
9021 udelay(40);
9022 tw32_f(MAC_MODE, tp->mac_mode);
9023 udelay(40);
9024 }
9025 tg3_setup_phy(tp, 0);
9026 }
9027 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9028 tg3_flag(tp, 5780_CLASS)) {
9029 tg3_serdes_parallel_detect(tp);
9030 }
9031
9032 tp->timer_counter = tp->timer_multiplier;
9033 }
9034
9035 /* Heartbeat is only sent once every 2 seconds.
9036 *
9037 * The heartbeat is to tell the ASF firmware that the host
9038 * driver is still alive. In the event that the OS crashes,
9039 * ASF needs to reset the hardware to free up the FIFO space
9040 * that may be filled with rx packets destined for the host.
9041 * If the FIFO is full, ASF will no longer function properly.
9042 *
9043 * Unintended resets have been reported on real time kernels
9044 * where the timer doesn't run on time. Netpoll will also have
9045 * same problem.
9046 *
9047 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9048 * to check the ring condition when the heartbeat is expiring
9049 * before doing the reset. This will prevent most unintended
9050 * resets.
9051 */
9052 if (!--tp->asf_counter) {
9053 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9054 tg3_wait_for_event_ack(tp);
9055
9056 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9057 FWCMD_NICDRV_ALIVE3);
9058 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9059 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9060 TG3_FW_UPDATE_TIMEOUT_SEC);
9061
9062 tg3_generate_fw_event(tp);
9063 }
9064 tp->asf_counter = tp->asf_multiplier;
9065 }
9066
9067 spin_unlock(&tp->lock);
9068
9069 restart_timer:
9070 tp->timer.expires = jiffies + tp->timer_offset;
9071 add_timer(&tp->timer);
9072 }
9073
9074 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9075 {
9076 irq_handler_t fn;
9077 unsigned long flags;
9078 char *name;
9079 struct tg3_napi *tnapi = &tp->napi[irq_num];
9080
9081 if (tp->irq_cnt == 1)
9082 name = tp->dev->name;
9083 else {
9084 name = &tnapi->irq_lbl[0];
9085 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9086 name[IFNAMSIZ-1] = 0;
9087 }
9088
9089 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9090 fn = tg3_msi;
9091 if (tg3_flag(tp, 1SHOT_MSI))
9092 fn = tg3_msi_1shot;
9093 flags = 0;
9094 } else {
9095 fn = tg3_interrupt;
9096 if (tg3_flag(tp, TAGGED_STATUS))
9097 fn = tg3_interrupt_tagged;
9098 flags = IRQF_SHARED;
9099 }
9100
9101 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9102 }
9103
9104 static int tg3_test_interrupt(struct tg3 *tp)
9105 {
9106 struct tg3_napi *tnapi = &tp->napi[0];
9107 struct net_device *dev = tp->dev;
9108 int err, i, intr_ok = 0;
9109 u32 val;
9110
9111 if (!netif_running(dev))
9112 return -ENODEV;
9113
9114 tg3_disable_ints(tp);
9115
9116 free_irq(tnapi->irq_vec, tnapi);
9117
9118 /*
9119 * Turn off MSI one shot mode. Otherwise this test has no
9120 * observable way to know whether the interrupt was delivered.
9121 */
9122 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9123 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9124 tw32(MSGINT_MODE, val);
9125 }
9126
9127 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9128 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9129 if (err)
9130 return err;
9131
9132 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9133 tg3_enable_ints(tp);
9134
9135 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9136 tnapi->coal_now);
9137
9138 for (i = 0; i < 5; i++) {
9139 u32 int_mbox, misc_host_ctrl;
9140
9141 int_mbox = tr32_mailbox(tnapi->int_mbox);
9142 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9143
9144 if ((int_mbox != 0) ||
9145 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9146 intr_ok = 1;
9147 break;
9148 }
9149
9150 msleep(10);
9151 }
9152
9153 tg3_disable_ints(tp);
9154
9155 free_irq(tnapi->irq_vec, tnapi);
9156
9157 err = tg3_request_irq(tp, 0);
9158
9159 if (err)
9160 return err;
9161
9162 if (intr_ok) {
9163 /* Reenable MSI one shot mode. */
9164 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9165 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9166 tw32(MSGINT_MODE, val);
9167 }
9168 return 0;
9169 }
9170
9171 return -EIO;
9172 }
9173
9174 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9175 * successfully restored
9176 */
9177 static int tg3_test_msi(struct tg3 *tp)
9178 {
9179 int err;
9180 u16 pci_cmd;
9181
9182 if (!tg3_flag(tp, USING_MSI))
9183 return 0;
9184
9185 /* Turn off SERR reporting in case MSI terminates with Master
9186 * Abort.
9187 */
9188 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9189 pci_write_config_word(tp->pdev, PCI_COMMAND,
9190 pci_cmd & ~PCI_COMMAND_SERR);
9191
9192 err = tg3_test_interrupt(tp);
9193
9194 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9195
9196 if (!err)
9197 return 0;
9198
9199 /* other failures */
9200 if (err != -EIO)
9201 return err;
9202
9203 /* MSI test failed, go back to INTx mode */
9204 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9205 "to INTx mode. Please report this failure to the PCI "
9206 "maintainer and include system chipset information\n");
9207
9208 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9209
9210 pci_disable_msi(tp->pdev);
9211
9212 tg3_flag_clear(tp, USING_MSI);
9213 tp->napi[0].irq_vec = tp->pdev->irq;
9214
9215 err = tg3_request_irq(tp, 0);
9216 if (err)
9217 return err;
9218
9219 /* Need to reset the chip because the MSI cycle may have terminated
9220 * with Master Abort.
9221 */
9222 tg3_full_lock(tp, 1);
9223
9224 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9225 err = tg3_init_hw(tp, 1);
9226
9227 tg3_full_unlock(tp);
9228
9229 if (err)
9230 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9231
9232 return err;
9233 }
9234
9235 static int tg3_request_firmware(struct tg3 *tp)
9236 {
9237 const __be32 *fw_data;
9238
9239 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9240 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9241 tp->fw_needed);
9242 return -ENOENT;
9243 }
9244
9245 fw_data = (void *)tp->fw->data;
9246
9247 /* Firmware blob starts with version numbers, followed by
9248 * start address and _full_ length including BSS sections
9249 * (which must be longer than the actual data, of course
9250 */
9251
9252 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9253 if (tp->fw_len < (tp->fw->size - 12)) {
9254 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9255 tp->fw_len, tp->fw_needed);
9256 release_firmware(tp->fw);
9257 tp->fw = NULL;
9258 return -EINVAL;
9259 }
9260
9261 /* We no longer need firmware; we have it. */
9262 tp->fw_needed = NULL;
9263 return 0;
9264 }
9265
9266 static bool tg3_enable_msix(struct tg3 *tp)
9267 {
9268 int i, rc, cpus = num_online_cpus();
9269 struct msix_entry msix_ent[tp->irq_max];
9270
9271 if (cpus == 1)
9272 /* Just fallback to the simpler MSI mode. */
9273 return false;
9274
9275 /*
9276 * We want as many rx rings enabled as there are cpus.
9277 * The first MSIX vector only deals with link interrupts, etc,
9278 * so we add one to the number of vectors we are requesting.
9279 */
9280 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9281
9282 for (i = 0; i < tp->irq_max; i++) {
9283 msix_ent[i].entry = i;
9284 msix_ent[i].vector = 0;
9285 }
9286
9287 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9288 if (rc < 0) {
9289 return false;
9290 } else if (rc != 0) {
9291 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9292 return false;
9293 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9294 tp->irq_cnt, rc);
9295 tp->irq_cnt = rc;
9296 }
9297
9298 for (i = 0; i < tp->irq_max; i++)
9299 tp->napi[i].irq_vec = msix_ent[i].vector;
9300
9301 netif_set_real_num_tx_queues(tp->dev, 1);
9302 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9303 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9304 pci_disable_msix(tp->pdev);
9305 return false;
9306 }
9307
9308 if (tp->irq_cnt > 1) {
9309 tg3_flag_set(tp, ENABLE_RSS);
9310
9311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9313 tg3_flag_set(tp, ENABLE_TSS);
9314 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9315 }
9316 }
9317
9318 return true;
9319 }
9320
9321 static void tg3_ints_init(struct tg3 *tp)
9322 {
9323 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9324 !tg3_flag(tp, TAGGED_STATUS)) {
9325 /* All MSI supporting chips should support tagged
9326 * status. Assert that this is the case.
9327 */
9328 netdev_warn(tp->dev,
9329 "MSI without TAGGED_STATUS? Not using MSI\n");
9330 goto defcfg;
9331 }
9332
9333 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9334 tg3_flag_set(tp, USING_MSIX);
9335 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9336 tg3_flag_set(tp, USING_MSI);
9337
9338 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9339 u32 msi_mode = tr32(MSGINT_MODE);
9340 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9341 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9342 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9343 }
9344 defcfg:
9345 if (!tg3_flag(tp, USING_MSIX)) {
9346 tp->irq_cnt = 1;
9347 tp->napi[0].irq_vec = tp->pdev->irq;
9348 netif_set_real_num_tx_queues(tp->dev, 1);
9349 netif_set_real_num_rx_queues(tp->dev, 1);
9350 }
9351 }
9352
9353 static void tg3_ints_fini(struct tg3 *tp)
9354 {
9355 if (tg3_flag(tp, USING_MSIX))
9356 pci_disable_msix(tp->pdev);
9357 else if (tg3_flag(tp, USING_MSI))
9358 pci_disable_msi(tp->pdev);
9359 tg3_flag_clear(tp, USING_MSI);
9360 tg3_flag_clear(tp, USING_MSIX);
9361 tg3_flag_clear(tp, ENABLE_RSS);
9362 tg3_flag_clear(tp, ENABLE_TSS);
9363 }
9364
9365 static int tg3_open(struct net_device *dev)
9366 {
9367 struct tg3 *tp = netdev_priv(dev);
9368 int i, err;
9369
9370 if (tp->fw_needed) {
9371 err = tg3_request_firmware(tp);
9372 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9373 if (err)
9374 return err;
9375 } else if (err) {
9376 netdev_warn(tp->dev, "TSO capability disabled\n");
9377 tg3_flag_clear(tp, TSO_CAPABLE);
9378 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9379 netdev_notice(tp->dev, "TSO capability restored\n");
9380 tg3_flag_set(tp, TSO_CAPABLE);
9381 }
9382 }
9383
9384 netif_carrier_off(tp->dev);
9385
9386 err = tg3_power_up(tp);
9387 if (err)
9388 return err;
9389
9390 tg3_full_lock(tp, 0);
9391
9392 tg3_disable_ints(tp);
9393 tg3_flag_clear(tp, INIT_COMPLETE);
9394
9395 tg3_full_unlock(tp);
9396
9397 /*
9398 * Setup interrupts first so we know how
9399 * many NAPI resources to allocate
9400 */
9401 tg3_ints_init(tp);
9402
9403 /* The placement of this call is tied
9404 * to the setup and use of Host TX descriptors.
9405 */
9406 err = tg3_alloc_consistent(tp);
9407 if (err)
9408 goto err_out1;
9409
9410 tg3_napi_init(tp);
9411
9412 tg3_napi_enable(tp);
9413
9414 for (i = 0; i < tp->irq_cnt; i++) {
9415 struct tg3_napi *tnapi = &tp->napi[i];
9416 err = tg3_request_irq(tp, i);
9417 if (err) {
9418 for (i--; i >= 0; i--)
9419 free_irq(tnapi->irq_vec, tnapi);
9420 break;
9421 }
9422 }
9423
9424 if (err)
9425 goto err_out2;
9426
9427 tg3_full_lock(tp, 0);
9428
9429 err = tg3_init_hw(tp, 1);
9430 if (err) {
9431 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9432 tg3_free_rings(tp);
9433 } else {
9434 if (tg3_flag(tp, TAGGED_STATUS))
9435 tp->timer_offset = HZ;
9436 else
9437 tp->timer_offset = HZ / 10;
9438
9439 BUG_ON(tp->timer_offset > HZ);
9440 tp->timer_counter = tp->timer_multiplier =
9441 (HZ / tp->timer_offset);
9442 tp->asf_counter = tp->asf_multiplier =
9443 ((HZ / tp->timer_offset) * 2);
9444
9445 init_timer(&tp->timer);
9446 tp->timer.expires = jiffies + tp->timer_offset;
9447 tp->timer.data = (unsigned long) tp;
9448 tp->timer.function = tg3_timer;
9449 }
9450
9451 tg3_full_unlock(tp);
9452
9453 if (err)
9454 goto err_out3;
9455
9456 if (tg3_flag(tp, USING_MSI)) {
9457 err = tg3_test_msi(tp);
9458
9459 if (err) {
9460 tg3_full_lock(tp, 0);
9461 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9462 tg3_free_rings(tp);
9463 tg3_full_unlock(tp);
9464
9465 goto err_out2;
9466 }
9467
9468 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9469 u32 val = tr32(PCIE_TRANSACTION_CFG);
9470
9471 tw32(PCIE_TRANSACTION_CFG,
9472 val | PCIE_TRANS_CFG_1SHOT_MSI);
9473 }
9474 }
9475
9476 tg3_phy_start(tp);
9477
9478 tg3_full_lock(tp, 0);
9479
9480 add_timer(&tp->timer);
9481 tg3_flag_set(tp, INIT_COMPLETE);
9482 tg3_enable_ints(tp);
9483
9484 tg3_full_unlock(tp);
9485
9486 netif_tx_start_all_queues(dev);
9487
9488 return 0;
9489
9490 err_out3:
9491 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9492 struct tg3_napi *tnapi = &tp->napi[i];
9493 free_irq(tnapi->irq_vec, tnapi);
9494 }
9495
9496 err_out2:
9497 tg3_napi_disable(tp);
9498 tg3_napi_fini(tp);
9499 tg3_free_consistent(tp);
9500
9501 err_out1:
9502 tg3_ints_fini(tp);
9503 return err;
9504 }
9505
9506 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9507 struct rtnl_link_stats64 *);
9508 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9509
9510 static int tg3_close(struct net_device *dev)
9511 {
9512 int i;
9513 struct tg3 *tp = netdev_priv(dev);
9514
9515 tg3_napi_disable(tp);
9516 cancel_work_sync(&tp->reset_task);
9517
9518 netif_tx_stop_all_queues(dev);
9519
9520 del_timer_sync(&tp->timer);
9521
9522 tg3_phy_stop(tp);
9523
9524 tg3_full_lock(tp, 1);
9525
9526 tg3_disable_ints(tp);
9527
9528 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9529 tg3_free_rings(tp);
9530 tg3_flag_clear(tp, INIT_COMPLETE);
9531
9532 tg3_full_unlock(tp);
9533
9534 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9535 struct tg3_napi *tnapi = &tp->napi[i];
9536 free_irq(tnapi->irq_vec, tnapi);
9537 }
9538
9539 tg3_ints_fini(tp);
9540
9541 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9542
9543 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9544 sizeof(tp->estats_prev));
9545
9546 tg3_napi_fini(tp);
9547
9548 tg3_free_consistent(tp);
9549
9550 tg3_power_down(tp);
9551
9552 netif_carrier_off(tp->dev);
9553
9554 return 0;
9555 }
9556
9557 static inline u64 get_stat64(tg3_stat64_t *val)
9558 {
9559 return ((u64)val->high << 32) | ((u64)val->low);
9560 }
9561
9562 static u64 calc_crc_errors(struct tg3 *tp)
9563 {
9564 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9565
9566 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9567 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9569 u32 val;
9570
9571 spin_lock_bh(&tp->lock);
9572 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9573 tg3_writephy(tp, MII_TG3_TEST1,
9574 val | MII_TG3_TEST1_CRC_EN);
9575 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9576 } else
9577 val = 0;
9578 spin_unlock_bh(&tp->lock);
9579
9580 tp->phy_crc_errors += val;
9581
9582 return tp->phy_crc_errors;
9583 }
9584
9585 return get_stat64(&hw_stats->rx_fcs_errors);
9586 }
9587
9588 #define ESTAT_ADD(member) \
9589 estats->member = old_estats->member + \
9590 get_stat64(&hw_stats->member)
9591
9592 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9593 {
9594 struct tg3_ethtool_stats *estats = &tp->estats;
9595 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9596 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9597
9598 if (!hw_stats)
9599 return old_estats;
9600
9601 ESTAT_ADD(rx_octets);
9602 ESTAT_ADD(rx_fragments);
9603 ESTAT_ADD(rx_ucast_packets);
9604 ESTAT_ADD(rx_mcast_packets);
9605 ESTAT_ADD(rx_bcast_packets);
9606 ESTAT_ADD(rx_fcs_errors);
9607 ESTAT_ADD(rx_align_errors);
9608 ESTAT_ADD(rx_xon_pause_rcvd);
9609 ESTAT_ADD(rx_xoff_pause_rcvd);
9610 ESTAT_ADD(rx_mac_ctrl_rcvd);
9611 ESTAT_ADD(rx_xoff_entered);
9612 ESTAT_ADD(rx_frame_too_long_errors);
9613 ESTAT_ADD(rx_jabbers);
9614 ESTAT_ADD(rx_undersize_packets);
9615 ESTAT_ADD(rx_in_length_errors);
9616 ESTAT_ADD(rx_out_length_errors);
9617 ESTAT_ADD(rx_64_or_less_octet_packets);
9618 ESTAT_ADD(rx_65_to_127_octet_packets);
9619 ESTAT_ADD(rx_128_to_255_octet_packets);
9620 ESTAT_ADD(rx_256_to_511_octet_packets);
9621 ESTAT_ADD(rx_512_to_1023_octet_packets);
9622 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9623 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9624 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9625 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9626 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9627
9628 ESTAT_ADD(tx_octets);
9629 ESTAT_ADD(tx_collisions);
9630 ESTAT_ADD(tx_xon_sent);
9631 ESTAT_ADD(tx_xoff_sent);
9632 ESTAT_ADD(tx_flow_control);
9633 ESTAT_ADD(tx_mac_errors);
9634 ESTAT_ADD(tx_single_collisions);
9635 ESTAT_ADD(tx_mult_collisions);
9636 ESTAT_ADD(tx_deferred);
9637 ESTAT_ADD(tx_excessive_collisions);
9638 ESTAT_ADD(tx_late_collisions);
9639 ESTAT_ADD(tx_collide_2times);
9640 ESTAT_ADD(tx_collide_3times);
9641 ESTAT_ADD(tx_collide_4times);
9642 ESTAT_ADD(tx_collide_5times);
9643 ESTAT_ADD(tx_collide_6times);
9644 ESTAT_ADD(tx_collide_7times);
9645 ESTAT_ADD(tx_collide_8times);
9646 ESTAT_ADD(tx_collide_9times);
9647 ESTAT_ADD(tx_collide_10times);
9648 ESTAT_ADD(tx_collide_11times);
9649 ESTAT_ADD(tx_collide_12times);
9650 ESTAT_ADD(tx_collide_13times);
9651 ESTAT_ADD(tx_collide_14times);
9652 ESTAT_ADD(tx_collide_15times);
9653 ESTAT_ADD(tx_ucast_packets);
9654 ESTAT_ADD(tx_mcast_packets);
9655 ESTAT_ADD(tx_bcast_packets);
9656 ESTAT_ADD(tx_carrier_sense_errors);
9657 ESTAT_ADD(tx_discards);
9658 ESTAT_ADD(tx_errors);
9659
9660 ESTAT_ADD(dma_writeq_full);
9661 ESTAT_ADD(dma_write_prioq_full);
9662 ESTAT_ADD(rxbds_empty);
9663 ESTAT_ADD(rx_discards);
9664 ESTAT_ADD(rx_errors);
9665 ESTAT_ADD(rx_threshold_hit);
9666
9667 ESTAT_ADD(dma_readq_full);
9668 ESTAT_ADD(dma_read_prioq_full);
9669 ESTAT_ADD(tx_comp_queue_full);
9670
9671 ESTAT_ADD(ring_set_send_prod_index);
9672 ESTAT_ADD(ring_status_update);
9673 ESTAT_ADD(nic_irqs);
9674 ESTAT_ADD(nic_avoided_irqs);
9675 ESTAT_ADD(nic_tx_threshold_hit);
9676
9677 return estats;
9678 }
9679
9680 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9681 struct rtnl_link_stats64 *stats)
9682 {
9683 struct tg3 *tp = netdev_priv(dev);
9684 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9685 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9686
9687 if (!hw_stats)
9688 return old_stats;
9689
9690 stats->rx_packets = old_stats->rx_packets +
9691 get_stat64(&hw_stats->rx_ucast_packets) +
9692 get_stat64(&hw_stats->rx_mcast_packets) +
9693 get_stat64(&hw_stats->rx_bcast_packets);
9694
9695 stats->tx_packets = old_stats->tx_packets +
9696 get_stat64(&hw_stats->tx_ucast_packets) +
9697 get_stat64(&hw_stats->tx_mcast_packets) +
9698 get_stat64(&hw_stats->tx_bcast_packets);
9699
9700 stats->rx_bytes = old_stats->rx_bytes +
9701 get_stat64(&hw_stats->rx_octets);
9702 stats->tx_bytes = old_stats->tx_bytes +
9703 get_stat64(&hw_stats->tx_octets);
9704
9705 stats->rx_errors = old_stats->rx_errors +
9706 get_stat64(&hw_stats->rx_errors);
9707 stats->tx_errors = old_stats->tx_errors +
9708 get_stat64(&hw_stats->tx_errors) +
9709 get_stat64(&hw_stats->tx_mac_errors) +
9710 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9711 get_stat64(&hw_stats->tx_discards);
9712
9713 stats->multicast = old_stats->multicast +
9714 get_stat64(&hw_stats->rx_mcast_packets);
9715 stats->collisions = old_stats->collisions +
9716 get_stat64(&hw_stats->tx_collisions);
9717
9718 stats->rx_length_errors = old_stats->rx_length_errors +
9719 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9720 get_stat64(&hw_stats->rx_undersize_packets);
9721
9722 stats->rx_over_errors = old_stats->rx_over_errors +
9723 get_stat64(&hw_stats->rxbds_empty);
9724 stats->rx_frame_errors = old_stats->rx_frame_errors +
9725 get_stat64(&hw_stats->rx_align_errors);
9726 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9727 get_stat64(&hw_stats->tx_discards);
9728 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9729 get_stat64(&hw_stats->tx_carrier_sense_errors);
9730
9731 stats->rx_crc_errors = old_stats->rx_crc_errors +
9732 calc_crc_errors(tp);
9733
9734 stats->rx_missed_errors = old_stats->rx_missed_errors +
9735 get_stat64(&hw_stats->rx_discards);
9736
9737 stats->rx_dropped = tp->rx_dropped;
9738
9739 return stats;
9740 }
9741
9742 static inline u32 calc_crc(unsigned char *buf, int len)
9743 {
9744 u32 reg;
9745 u32 tmp;
9746 int j, k;
9747
9748 reg = 0xffffffff;
9749
9750 for (j = 0; j < len; j++) {
9751 reg ^= buf[j];
9752
9753 for (k = 0; k < 8; k++) {
9754 tmp = reg & 0x01;
9755
9756 reg >>= 1;
9757
9758 if (tmp)
9759 reg ^= 0xedb88320;
9760 }
9761 }
9762
9763 return ~reg;
9764 }
9765
9766 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9767 {
9768 /* accept or reject all multicast frames */
9769 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9770 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9771 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9772 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9773 }
9774
9775 static void __tg3_set_rx_mode(struct net_device *dev)
9776 {
9777 struct tg3 *tp = netdev_priv(dev);
9778 u32 rx_mode;
9779
9780 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9781 RX_MODE_KEEP_VLAN_TAG);
9782
9783 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9784 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9785 * flag clear.
9786 */
9787 if (!tg3_flag(tp, ENABLE_ASF))
9788 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9789 #endif
9790
9791 if (dev->flags & IFF_PROMISC) {
9792 /* Promiscuous mode. */
9793 rx_mode |= RX_MODE_PROMISC;
9794 } else if (dev->flags & IFF_ALLMULTI) {
9795 /* Accept all multicast. */
9796 tg3_set_multi(tp, 1);
9797 } else if (netdev_mc_empty(dev)) {
9798 /* Reject all multicast. */
9799 tg3_set_multi(tp, 0);
9800 } else {
9801 /* Accept one or more multicast(s). */
9802 struct netdev_hw_addr *ha;
9803 u32 mc_filter[4] = { 0, };
9804 u32 regidx;
9805 u32 bit;
9806 u32 crc;
9807
9808 netdev_for_each_mc_addr(ha, dev) {
9809 crc = calc_crc(ha->addr, ETH_ALEN);
9810 bit = ~crc & 0x7f;
9811 regidx = (bit & 0x60) >> 5;
9812 bit &= 0x1f;
9813 mc_filter[regidx] |= (1 << bit);
9814 }
9815
9816 tw32(MAC_HASH_REG_0, mc_filter[0]);
9817 tw32(MAC_HASH_REG_1, mc_filter[1]);
9818 tw32(MAC_HASH_REG_2, mc_filter[2]);
9819 tw32(MAC_HASH_REG_3, mc_filter[3]);
9820 }
9821
9822 if (rx_mode != tp->rx_mode) {
9823 tp->rx_mode = rx_mode;
9824 tw32_f(MAC_RX_MODE, rx_mode);
9825 udelay(10);
9826 }
9827 }
9828
9829 static void tg3_set_rx_mode(struct net_device *dev)
9830 {
9831 struct tg3 *tp = netdev_priv(dev);
9832
9833 if (!netif_running(dev))
9834 return;
9835
9836 tg3_full_lock(tp, 0);
9837 __tg3_set_rx_mode(dev);
9838 tg3_full_unlock(tp);
9839 }
9840
9841 static int tg3_get_regs_len(struct net_device *dev)
9842 {
9843 return TG3_REG_BLK_SIZE;
9844 }
9845
9846 static void tg3_get_regs(struct net_device *dev,
9847 struct ethtool_regs *regs, void *_p)
9848 {
9849 struct tg3 *tp = netdev_priv(dev);
9850
9851 regs->version = 0;
9852
9853 memset(_p, 0, TG3_REG_BLK_SIZE);
9854
9855 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9856 return;
9857
9858 tg3_full_lock(tp, 0);
9859
9860 tg3_dump_legacy_regs(tp, (u32 *)_p);
9861
9862 tg3_full_unlock(tp);
9863 }
9864
9865 static int tg3_get_eeprom_len(struct net_device *dev)
9866 {
9867 struct tg3 *tp = netdev_priv(dev);
9868
9869 return tp->nvram_size;
9870 }
9871
9872 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9873 {
9874 struct tg3 *tp = netdev_priv(dev);
9875 int ret;
9876 u8 *pd;
9877 u32 i, offset, len, b_offset, b_count;
9878 __be32 val;
9879
9880 if (tg3_flag(tp, NO_NVRAM))
9881 return -EINVAL;
9882
9883 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9884 return -EAGAIN;
9885
9886 offset = eeprom->offset;
9887 len = eeprom->len;
9888 eeprom->len = 0;
9889
9890 eeprom->magic = TG3_EEPROM_MAGIC;
9891
9892 if (offset & 3) {
9893 /* adjustments to start on required 4 byte boundary */
9894 b_offset = offset & 3;
9895 b_count = 4 - b_offset;
9896 if (b_count > len) {
9897 /* i.e. offset=1 len=2 */
9898 b_count = len;
9899 }
9900 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9901 if (ret)
9902 return ret;
9903 memcpy(data, ((char *)&val) + b_offset, b_count);
9904 len -= b_count;
9905 offset += b_count;
9906 eeprom->len += b_count;
9907 }
9908
9909 /* read bytes up to the last 4 byte boundary */
9910 pd = &data[eeprom->len];
9911 for (i = 0; i < (len - (len & 3)); i += 4) {
9912 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9913 if (ret) {
9914 eeprom->len += i;
9915 return ret;
9916 }
9917 memcpy(pd + i, &val, 4);
9918 }
9919 eeprom->len += i;
9920
9921 if (len & 3) {
9922 /* read last bytes not ending on 4 byte boundary */
9923 pd = &data[eeprom->len];
9924 b_count = len & 3;
9925 b_offset = offset + len - b_count;
9926 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9927 if (ret)
9928 return ret;
9929 memcpy(pd, &val, b_count);
9930 eeprom->len += b_count;
9931 }
9932 return 0;
9933 }
9934
9935 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9936
9937 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9938 {
9939 struct tg3 *tp = netdev_priv(dev);
9940 int ret;
9941 u32 offset, len, b_offset, odd_len;
9942 u8 *buf;
9943 __be32 start, end;
9944
9945 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9946 return -EAGAIN;
9947
9948 if (tg3_flag(tp, NO_NVRAM) ||
9949 eeprom->magic != TG3_EEPROM_MAGIC)
9950 return -EINVAL;
9951
9952 offset = eeprom->offset;
9953 len = eeprom->len;
9954
9955 if ((b_offset = (offset & 3))) {
9956 /* adjustments to start on required 4 byte boundary */
9957 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9958 if (ret)
9959 return ret;
9960 len += b_offset;
9961 offset &= ~3;
9962 if (len < 4)
9963 len = 4;
9964 }
9965
9966 odd_len = 0;
9967 if (len & 3) {
9968 /* adjustments to end on required 4 byte boundary */
9969 odd_len = 1;
9970 len = (len + 3) & ~3;
9971 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9972 if (ret)
9973 return ret;
9974 }
9975
9976 buf = data;
9977 if (b_offset || odd_len) {
9978 buf = kmalloc(len, GFP_KERNEL);
9979 if (!buf)
9980 return -ENOMEM;
9981 if (b_offset)
9982 memcpy(buf, &start, 4);
9983 if (odd_len)
9984 memcpy(buf+len-4, &end, 4);
9985 memcpy(buf + b_offset, data, eeprom->len);
9986 }
9987
9988 ret = tg3_nvram_write_block(tp, offset, len, buf);
9989
9990 if (buf != data)
9991 kfree(buf);
9992
9993 return ret;
9994 }
9995
9996 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9997 {
9998 struct tg3 *tp = netdev_priv(dev);
9999
10000 if (tg3_flag(tp, USE_PHYLIB)) {
10001 struct phy_device *phydev;
10002 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10003 return -EAGAIN;
10004 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10005 return phy_ethtool_gset(phydev, cmd);
10006 }
10007
10008 cmd->supported = (SUPPORTED_Autoneg);
10009
10010 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10011 cmd->supported |= (SUPPORTED_1000baseT_Half |
10012 SUPPORTED_1000baseT_Full);
10013
10014 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10015 cmd->supported |= (SUPPORTED_100baseT_Half |
10016 SUPPORTED_100baseT_Full |
10017 SUPPORTED_10baseT_Half |
10018 SUPPORTED_10baseT_Full |
10019 SUPPORTED_TP);
10020 cmd->port = PORT_TP;
10021 } else {
10022 cmd->supported |= SUPPORTED_FIBRE;
10023 cmd->port = PORT_FIBRE;
10024 }
10025
10026 cmd->advertising = tp->link_config.advertising;
10027 if (netif_running(dev)) {
10028 cmd->speed = tp->link_config.active_speed;
10029 cmd->duplex = tp->link_config.active_duplex;
10030 } else {
10031 cmd->speed = SPEED_INVALID;
10032 cmd->duplex = DUPLEX_INVALID;
10033 }
10034 cmd->phy_address = tp->phy_addr;
10035 cmd->transceiver = XCVR_INTERNAL;
10036 cmd->autoneg = tp->link_config.autoneg;
10037 cmd->maxtxpkt = 0;
10038 cmd->maxrxpkt = 0;
10039 return 0;
10040 }
10041
10042 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10043 {
10044 struct tg3 *tp = netdev_priv(dev);
10045 u32 speed = ethtool_cmd_speed(cmd);
10046
10047 if (tg3_flag(tp, USE_PHYLIB)) {
10048 struct phy_device *phydev;
10049 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10050 return -EAGAIN;
10051 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10052 return phy_ethtool_sset(phydev, cmd);
10053 }
10054
10055 if (cmd->autoneg != AUTONEG_ENABLE &&
10056 cmd->autoneg != AUTONEG_DISABLE)
10057 return -EINVAL;
10058
10059 if (cmd->autoneg == AUTONEG_DISABLE &&
10060 cmd->duplex != DUPLEX_FULL &&
10061 cmd->duplex != DUPLEX_HALF)
10062 return -EINVAL;
10063
10064 if (cmd->autoneg == AUTONEG_ENABLE) {
10065 u32 mask = ADVERTISED_Autoneg |
10066 ADVERTISED_Pause |
10067 ADVERTISED_Asym_Pause;
10068
10069 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10070 mask |= ADVERTISED_1000baseT_Half |
10071 ADVERTISED_1000baseT_Full;
10072
10073 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10074 mask |= ADVERTISED_100baseT_Half |
10075 ADVERTISED_100baseT_Full |
10076 ADVERTISED_10baseT_Half |
10077 ADVERTISED_10baseT_Full |
10078 ADVERTISED_TP;
10079 else
10080 mask |= ADVERTISED_FIBRE;
10081
10082 if (cmd->advertising & ~mask)
10083 return -EINVAL;
10084
10085 mask &= (ADVERTISED_1000baseT_Half |
10086 ADVERTISED_1000baseT_Full |
10087 ADVERTISED_100baseT_Half |
10088 ADVERTISED_100baseT_Full |
10089 ADVERTISED_10baseT_Half |
10090 ADVERTISED_10baseT_Full);
10091
10092 cmd->advertising &= mask;
10093 } else {
10094 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10095 if (speed != SPEED_1000)
10096 return -EINVAL;
10097
10098 if (cmd->duplex != DUPLEX_FULL)
10099 return -EINVAL;
10100 } else {
10101 if (speed != SPEED_100 &&
10102 speed != SPEED_10)
10103 return -EINVAL;
10104 }
10105 }
10106
10107 tg3_full_lock(tp, 0);
10108
10109 tp->link_config.autoneg = cmd->autoneg;
10110 if (cmd->autoneg == AUTONEG_ENABLE) {
10111 tp->link_config.advertising = (cmd->advertising |
10112 ADVERTISED_Autoneg);
10113 tp->link_config.speed = SPEED_INVALID;
10114 tp->link_config.duplex = DUPLEX_INVALID;
10115 } else {
10116 tp->link_config.advertising = 0;
10117 tp->link_config.speed = speed;
10118 tp->link_config.duplex = cmd->duplex;
10119 }
10120
10121 tp->link_config.orig_speed = tp->link_config.speed;
10122 tp->link_config.orig_duplex = tp->link_config.duplex;
10123 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10124
10125 if (netif_running(dev))
10126 tg3_setup_phy(tp, 1);
10127
10128 tg3_full_unlock(tp);
10129
10130 return 0;
10131 }
10132
10133 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10134 {
10135 struct tg3 *tp = netdev_priv(dev);
10136
10137 strcpy(info->driver, DRV_MODULE_NAME);
10138 strcpy(info->version, DRV_MODULE_VERSION);
10139 strcpy(info->fw_version, tp->fw_ver);
10140 strcpy(info->bus_info, pci_name(tp->pdev));
10141 }
10142
10143 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10144 {
10145 struct tg3 *tp = netdev_priv(dev);
10146
10147 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10148 wol->supported = WAKE_MAGIC;
10149 else
10150 wol->supported = 0;
10151 wol->wolopts = 0;
10152 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10153 wol->wolopts = WAKE_MAGIC;
10154 memset(&wol->sopass, 0, sizeof(wol->sopass));
10155 }
10156
10157 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10158 {
10159 struct tg3 *tp = netdev_priv(dev);
10160 struct device *dp = &tp->pdev->dev;
10161
10162 if (wol->wolopts & ~WAKE_MAGIC)
10163 return -EINVAL;
10164 if ((wol->wolopts & WAKE_MAGIC) &&
10165 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10166 return -EINVAL;
10167
10168 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10169
10170 spin_lock_bh(&tp->lock);
10171 if (device_may_wakeup(dp))
10172 tg3_flag_set(tp, WOL_ENABLE);
10173 else
10174 tg3_flag_clear(tp, WOL_ENABLE);
10175 spin_unlock_bh(&tp->lock);
10176
10177 return 0;
10178 }
10179
10180 static u32 tg3_get_msglevel(struct net_device *dev)
10181 {
10182 struct tg3 *tp = netdev_priv(dev);
10183 return tp->msg_enable;
10184 }
10185
10186 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10187 {
10188 struct tg3 *tp = netdev_priv(dev);
10189 tp->msg_enable = value;
10190 }
10191
10192 static int tg3_nway_reset(struct net_device *dev)
10193 {
10194 struct tg3 *tp = netdev_priv(dev);
10195 int r;
10196
10197 if (!netif_running(dev))
10198 return -EAGAIN;
10199
10200 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10201 return -EINVAL;
10202
10203 if (tg3_flag(tp, USE_PHYLIB)) {
10204 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10205 return -EAGAIN;
10206 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10207 } else {
10208 u32 bmcr;
10209
10210 spin_lock_bh(&tp->lock);
10211 r = -EINVAL;
10212 tg3_readphy(tp, MII_BMCR, &bmcr);
10213 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10214 ((bmcr & BMCR_ANENABLE) ||
10215 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10216 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10217 BMCR_ANENABLE);
10218 r = 0;
10219 }
10220 spin_unlock_bh(&tp->lock);
10221 }
10222
10223 return r;
10224 }
10225
10226 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10227 {
10228 struct tg3 *tp = netdev_priv(dev);
10229
10230 ering->rx_max_pending = tp->rx_std_ring_mask;
10231 ering->rx_mini_max_pending = 0;
10232 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10233 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10234 else
10235 ering->rx_jumbo_max_pending = 0;
10236
10237 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10238
10239 ering->rx_pending = tp->rx_pending;
10240 ering->rx_mini_pending = 0;
10241 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10242 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10243 else
10244 ering->rx_jumbo_pending = 0;
10245
10246 ering->tx_pending = tp->napi[0].tx_pending;
10247 }
10248
10249 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10250 {
10251 struct tg3 *tp = netdev_priv(dev);
10252 int i, irq_sync = 0, err = 0;
10253
10254 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10255 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10256 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10257 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10258 (tg3_flag(tp, TSO_BUG) &&
10259 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10260 return -EINVAL;
10261
10262 if (netif_running(dev)) {
10263 tg3_phy_stop(tp);
10264 tg3_netif_stop(tp);
10265 irq_sync = 1;
10266 }
10267
10268 tg3_full_lock(tp, irq_sync);
10269
10270 tp->rx_pending = ering->rx_pending;
10271
10272 if (tg3_flag(tp, MAX_RXPEND_64) &&
10273 tp->rx_pending > 63)
10274 tp->rx_pending = 63;
10275 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10276
10277 for (i = 0; i < tp->irq_max; i++)
10278 tp->napi[i].tx_pending = ering->tx_pending;
10279
10280 if (netif_running(dev)) {
10281 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10282 err = tg3_restart_hw(tp, 1);
10283 if (!err)
10284 tg3_netif_start(tp);
10285 }
10286
10287 tg3_full_unlock(tp);
10288
10289 if (irq_sync && !err)
10290 tg3_phy_start(tp);
10291
10292 return err;
10293 }
10294
10295 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10296 {
10297 struct tg3 *tp = netdev_priv(dev);
10298
10299 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10300
10301 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10302 epause->rx_pause = 1;
10303 else
10304 epause->rx_pause = 0;
10305
10306 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10307 epause->tx_pause = 1;
10308 else
10309 epause->tx_pause = 0;
10310 }
10311
10312 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10313 {
10314 struct tg3 *tp = netdev_priv(dev);
10315 int err = 0;
10316
10317 if (tg3_flag(tp, USE_PHYLIB)) {
10318 u32 newadv;
10319 struct phy_device *phydev;
10320
10321 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10322
10323 if (!(phydev->supported & SUPPORTED_Pause) ||
10324 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10325 (epause->rx_pause != epause->tx_pause)))
10326 return -EINVAL;
10327
10328 tp->link_config.flowctrl = 0;
10329 if (epause->rx_pause) {
10330 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10331
10332 if (epause->tx_pause) {
10333 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10334 newadv = ADVERTISED_Pause;
10335 } else
10336 newadv = ADVERTISED_Pause |
10337 ADVERTISED_Asym_Pause;
10338 } else if (epause->tx_pause) {
10339 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10340 newadv = ADVERTISED_Asym_Pause;
10341 } else
10342 newadv = 0;
10343
10344 if (epause->autoneg)
10345 tg3_flag_set(tp, PAUSE_AUTONEG);
10346 else
10347 tg3_flag_clear(tp, PAUSE_AUTONEG);
10348
10349 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10350 u32 oldadv = phydev->advertising &
10351 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10352 if (oldadv != newadv) {
10353 phydev->advertising &=
10354 ~(ADVERTISED_Pause |
10355 ADVERTISED_Asym_Pause);
10356 phydev->advertising |= newadv;
10357 if (phydev->autoneg) {
10358 /*
10359 * Always renegotiate the link to
10360 * inform our link partner of our
10361 * flow control settings, even if the
10362 * flow control is forced. Let
10363 * tg3_adjust_link() do the final
10364 * flow control setup.
10365 */
10366 return phy_start_aneg(phydev);
10367 }
10368 }
10369
10370 if (!epause->autoneg)
10371 tg3_setup_flow_control(tp, 0, 0);
10372 } else {
10373 tp->link_config.orig_advertising &=
10374 ~(ADVERTISED_Pause |
10375 ADVERTISED_Asym_Pause);
10376 tp->link_config.orig_advertising |= newadv;
10377 }
10378 } else {
10379 int irq_sync = 0;
10380
10381 if (netif_running(dev)) {
10382 tg3_netif_stop(tp);
10383 irq_sync = 1;
10384 }
10385
10386 tg3_full_lock(tp, irq_sync);
10387
10388 if (epause->autoneg)
10389 tg3_flag_set(tp, PAUSE_AUTONEG);
10390 else
10391 tg3_flag_clear(tp, PAUSE_AUTONEG);
10392 if (epause->rx_pause)
10393 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10394 else
10395 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10396 if (epause->tx_pause)
10397 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10398 else
10399 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10400
10401 if (netif_running(dev)) {
10402 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10403 err = tg3_restart_hw(tp, 1);
10404 if (!err)
10405 tg3_netif_start(tp);
10406 }
10407
10408 tg3_full_unlock(tp);
10409 }
10410
10411 return err;
10412 }
10413
10414 static int tg3_get_sset_count(struct net_device *dev, int sset)
10415 {
10416 switch (sset) {
10417 case ETH_SS_TEST:
10418 return TG3_NUM_TEST;
10419 case ETH_SS_STATS:
10420 return TG3_NUM_STATS;
10421 default:
10422 return -EOPNOTSUPP;
10423 }
10424 }
10425
10426 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10427 {
10428 switch (stringset) {
10429 case ETH_SS_STATS:
10430 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10431 break;
10432 case ETH_SS_TEST:
10433 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10434 break;
10435 default:
10436 WARN_ON(1); /* we need a WARN() */
10437 break;
10438 }
10439 }
10440
10441 static int tg3_set_phys_id(struct net_device *dev,
10442 enum ethtool_phys_id_state state)
10443 {
10444 struct tg3 *tp = netdev_priv(dev);
10445
10446 if (!netif_running(tp->dev))
10447 return -EAGAIN;
10448
10449 switch (state) {
10450 case ETHTOOL_ID_ACTIVE:
10451 return 1; /* cycle on/off once per second */
10452
10453 case ETHTOOL_ID_ON:
10454 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10455 LED_CTRL_1000MBPS_ON |
10456 LED_CTRL_100MBPS_ON |
10457 LED_CTRL_10MBPS_ON |
10458 LED_CTRL_TRAFFIC_OVERRIDE |
10459 LED_CTRL_TRAFFIC_BLINK |
10460 LED_CTRL_TRAFFIC_LED);
10461 break;
10462
10463 case ETHTOOL_ID_OFF:
10464 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10465 LED_CTRL_TRAFFIC_OVERRIDE);
10466 break;
10467
10468 case ETHTOOL_ID_INACTIVE:
10469 tw32(MAC_LED_CTRL, tp->led_ctrl);
10470 break;
10471 }
10472
10473 return 0;
10474 }
10475
10476 static void tg3_get_ethtool_stats(struct net_device *dev,
10477 struct ethtool_stats *estats, u64 *tmp_stats)
10478 {
10479 struct tg3 *tp = netdev_priv(dev);
10480 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10481 }
10482
10483 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10484 {
10485 int i;
10486 __be32 *buf;
10487 u32 offset = 0, len = 0;
10488 u32 magic, val;
10489
10490 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10491 return NULL;
10492
10493 if (magic == TG3_EEPROM_MAGIC) {
10494 for (offset = TG3_NVM_DIR_START;
10495 offset < TG3_NVM_DIR_END;
10496 offset += TG3_NVM_DIRENT_SIZE) {
10497 if (tg3_nvram_read(tp, offset, &val))
10498 return NULL;
10499
10500 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10501 TG3_NVM_DIRTYPE_EXTVPD)
10502 break;
10503 }
10504
10505 if (offset != TG3_NVM_DIR_END) {
10506 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10507 if (tg3_nvram_read(tp, offset + 4, &offset))
10508 return NULL;
10509
10510 offset = tg3_nvram_logical_addr(tp, offset);
10511 }
10512 }
10513
10514 if (!offset || !len) {
10515 offset = TG3_NVM_VPD_OFF;
10516 len = TG3_NVM_VPD_LEN;
10517 }
10518
10519 buf = kmalloc(len, GFP_KERNEL);
10520 if (buf == NULL)
10521 return NULL;
10522
10523 if (magic == TG3_EEPROM_MAGIC) {
10524 for (i = 0; i < len; i += 4) {
10525 /* The data is in little-endian format in NVRAM.
10526 * Use the big-endian read routines to preserve
10527 * the byte order as it exists in NVRAM.
10528 */
10529 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10530 goto error;
10531 }
10532 } else {
10533 u8 *ptr;
10534 ssize_t cnt;
10535 unsigned int pos = 0;
10536
10537 ptr = (u8 *)&buf[0];
10538 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10539 cnt = pci_read_vpd(tp->pdev, pos,
10540 len - pos, ptr);
10541 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10542 cnt = 0;
10543 else if (cnt < 0)
10544 goto error;
10545 }
10546 if (pos != len)
10547 goto error;
10548 }
10549
10550 return buf;
10551
10552 error:
10553 kfree(buf);
10554 return NULL;
10555 }
10556
10557 #define NVRAM_TEST_SIZE 0x100
10558 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10559 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10560 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10561 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10562 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10563
10564 static int tg3_test_nvram(struct tg3 *tp)
10565 {
10566 u32 csum, magic;
10567 __be32 *buf;
10568 int i, j, k, err = 0, size;
10569
10570 if (tg3_flag(tp, NO_NVRAM))
10571 return 0;
10572
10573 if (tg3_nvram_read(tp, 0, &magic) != 0)
10574 return -EIO;
10575
10576 if (magic == TG3_EEPROM_MAGIC)
10577 size = NVRAM_TEST_SIZE;
10578 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10579 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10580 TG3_EEPROM_SB_FORMAT_1) {
10581 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10582 case TG3_EEPROM_SB_REVISION_0:
10583 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10584 break;
10585 case TG3_EEPROM_SB_REVISION_2:
10586 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10587 break;
10588 case TG3_EEPROM_SB_REVISION_3:
10589 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10590 break;
10591 default:
10592 return 0;
10593 }
10594 } else
10595 return 0;
10596 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10597 size = NVRAM_SELFBOOT_HW_SIZE;
10598 else
10599 return -EIO;
10600
10601 buf = kmalloc(size, GFP_KERNEL);
10602 if (buf == NULL)
10603 return -ENOMEM;
10604
10605 err = -EIO;
10606 for (i = 0, j = 0; i < size; i += 4, j++) {
10607 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10608 if (err)
10609 break;
10610 }
10611 if (i < size)
10612 goto out;
10613
10614 /* Selfboot format */
10615 magic = be32_to_cpu(buf[0]);
10616 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10617 TG3_EEPROM_MAGIC_FW) {
10618 u8 *buf8 = (u8 *) buf, csum8 = 0;
10619
10620 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10621 TG3_EEPROM_SB_REVISION_2) {
10622 /* For rev 2, the csum doesn't include the MBA. */
10623 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10624 csum8 += buf8[i];
10625 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10626 csum8 += buf8[i];
10627 } else {
10628 for (i = 0; i < size; i++)
10629 csum8 += buf8[i];
10630 }
10631
10632 if (csum8 == 0) {
10633 err = 0;
10634 goto out;
10635 }
10636
10637 err = -EIO;
10638 goto out;
10639 }
10640
10641 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10642 TG3_EEPROM_MAGIC_HW) {
10643 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10644 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10645 u8 *buf8 = (u8 *) buf;
10646
10647 /* Separate the parity bits and the data bytes. */
10648 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10649 if ((i == 0) || (i == 8)) {
10650 int l;
10651 u8 msk;
10652
10653 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10654 parity[k++] = buf8[i] & msk;
10655 i++;
10656 } else if (i == 16) {
10657 int l;
10658 u8 msk;
10659
10660 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10661 parity[k++] = buf8[i] & msk;
10662 i++;
10663
10664 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10665 parity[k++] = buf8[i] & msk;
10666 i++;
10667 }
10668 data[j++] = buf8[i];
10669 }
10670
10671 err = -EIO;
10672 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10673 u8 hw8 = hweight8(data[i]);
10674
10675 if ((hw8 & 0x1) && parity[i])
10676 goto out;
10677 else if (!(hw8 & 0x1) && !parity[i])
10678 goto out;
10679 }
10680 err = 0;
10681 goto out;
10682 }
10683
10684 err = -EIO;
10685
10686 /* Bootstrap checksum at offset 0x10 */
10687 csum = calc_crc((unsigned char *) buf, 0x10);
10688 if (csum != le32_to_cpu(buf[0x10/4]))
10689 goto out;
10690
10691 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10692 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10693 if (csum != le32_to_cpu(buf[0xfc/4]))
10694 goto out;
10695
10696 kfree(buf);
10697
10698 buf = tg3_vpd_readblock(tp);
10699 if (!buf)
10700 return -ENOMEM;
10701
10702 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10703 PCI_VPD_LRDT_RO_DATA);
10704 if (i > 0) {
10705 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10706 if (j < 0)
10707 goto out;
10708
10709 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10710 goto out;
10711
10712 i += PCI_VPD_LRDT_TAG_SIZE;
10713 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10714 PCI_VPD_RO_KEYWORD_CHKSUM);
10715 if (j > 0) {
10716 u8 csum8 = 0;
10717
10718 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10719
10720 for (i = 0; i <= j; i++)
10721 csum8 += ((u8 *)buf)[i];
10722
10723 if (csum8)
10724 goto out;
10725 }
10726 }
10727
10728 err = 0;
10729
10730 out:
10731 kfree(buf);
10732 return err;
10733 }
10734
10735 #define TG3_SERDES_TIMEOUT_SEC 2
10736 #define TG3_COPPER_TIMEOUT_SEC 6
10737
10738 static int tg3_test_link(struct tg3 *tp)
10739 {
10740 int i, max;
10741
10742 if (!netif_running(tp->dev))
10743 return -ENODEV;
10744
10745 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10746 max = TG3_SERDES_TIMEOUT_SEC;
10747 else
10748 max = TG3_COPPER_TIMEOUT_SEC;
10749
10750 for (i = 0; i < max; i++) {
10751 if (netif_carrier_ok(tp->dev))
10752 return 0;
10753
10754 if (msleep_interruptible(1000))
10755 break;
10756 }
10757
10758 return -EIO;
10759 }
10760
10761 /* Only test the commonly used registers */
10762 static int tg3_test_registers(struct tg3 *tp)
10763 {
10764 int i, is_5705, is_5750;
10765 u32 offset, read_mask, write_mask, val, save_val, read_val;
10766 static struct {
10767 u16 offset;
10768 u16 flags;
10769 #define TG3_FL_5705 0x1
10770 #define TG3_FL_NOT_5705 0x2
10771 #define TG3_FL_NOT_5788 0x4
10772 #define TG3_FL_NOT_5750 0x8
10773 u32 read_mask;
10774 u32 write_mask;
10775 } reg_tbl[] = {
10776 /* MAC Control Registers */
10777 { MAC_MODE, TG3_FL_NOT_5705,
10778 0x00000000, 0x00ef6f8c },
10779 { MAC_MODE, TG3_FL_5705,
10780 0x00000000, 0x01ef6b8c },
10781 { MAC_STATUS, TG3_FL_NOT_5705,
10782 0x03800107, 0x00000000 },
10783 { MAC_STATUS, TG3_FL_5705,
10784 0x03800100, 0x00000000 },
10785 { MAC_ADDR_0_HIGH, 0x0000,
10786 0x00000000, 0x0000ffff },
10787 { MAC_ADDR_0_LOW, 0x0000,
10788 0x00000000, 0xffffffff },
10789 { MAC_RX_MTU_SIZE, 0x0000,
10790 0x00000000, 0x0000ffff },
10791 { MAC_TX_MODE, 0x0000,
10792 0x00000000, 0x00000070 },
10793 { MAC_TX_LENGTHS, 0x0000,
10794 0x00000000, 0x00003fff },
10795 { MAC_RX_MODE, TG3_FL_NOT_5705,
10796 0x00000000, 0x000007fc },
10797 { MAC_RX_MODE, TG3_FL_5705,
10798 0x00000000, 0x000007dc },
10799 { MAC_HASH_REG_0, 0x0000,
10800 0x00000000, 0xffffffff },
10801 { MAC_HASH_REG_1, 0x0000,
10802 0x00000000, 0xffffffff },
10803 { MAC_HASH_REG_2, 0x0000,
10804 0x00000000, 0xffffffff },
10805 { MAC_HASH_REG_3, 0x0000,
10806 0x00000000, 0xffffffff },
10807
10808 /* Receive Data and Receive BD Initiator Control Registers. */
10809 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10810 0x00000000, 0xffffffff },
10811 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10812 0x00000000, 0xffffffff },
10813 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10814 0x00000000, 0x00000003 },
10815 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10816 0x00000000, 0xffffffff },
10817 { RCVDBDI_STD_BD+0, 0x0000,
10818 0x00000000, 0xffffffff },
10819 { RCVDBDI_STD_BD+4, 0x0000,
10820 0x00000000, 0xffffffff },
10821 { RCVDBDI_STD_BD+8, 0x0000,
10822 0x00000000, 0xffff0002 },
10823 { RCVDBDI_STD_BD+0xc, 0x0000,
10824 0x00000000, 0xffffffff },
10825
10826 /* Receive BD Initiator Control Registers. */
10827 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10828 0x00000000, 0xffffffff },
10829 { RCVBDI_STD_THRESH, TG3_FL_5705,
10830 0x00000000, 0x000003ff },
10831 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10832 0x00000000, 0xffffffff },
10833
10834 /* Host Coalescing Control Registers. */
10835 { HOSTCC_MODE, TG3_FL_NOT_5705,
10836 0x00000000, 0x00000004 },
10837 { HOSTCC_MODE, TG3_FL_5705,
10838 0x00000000, 0x000000f6 },
10839 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10840 0x00000000, 0xffffffff },
10841 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10842 0x00000000, 0x000003ff },
10843 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10844 0x00000000, 0xffffffff },
10845 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10846 0x00000000, 0x000003ff },
10847 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10848 0x00000000, 0xffffffff },
10849 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10850 0x00000000, 0x000000ff },
10851 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10852 0x00000000, 0xffffffff },
10853 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10854 0x00000000, 0x000000ff },
10855 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10856 0x00000000, 0xffffffff },
10857 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10858 0x00000000, 0xffffffff },
10859 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10860 0x00000000, 0xffffffff },
10861 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10862 0x00000000, 0x000000ff },
10863 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10864 0x00000000, 0xffffffff },
10865 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10866 0x00000000, 0x000000ff },
10867 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10868 0x00000000, 0xffffffff },
10869 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10870 0x00000000, 0xffffffff },
10871 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10872 0x00000000, 0xffffffff },
10873 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10874 0x00000000, 0xffffffff },
10875 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10876 0x00000000, 0xffffffff },
10877 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10878 0xffffffff, 0x00000000 },
10879 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10880 0xffffffff, 0x00000000 },
10881
10882 /* Buffer Manager Control Registers. */
10883 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10884 0x00000000, 0x007fff80 },
10885 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10886 0x00000000, 0x007fffff },
10887 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10888 0x00000000, 0x0000003f },
10889 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10890 0x00000000, 0x000001ff },
10891 { BUFMGR_MB_HIGH_WATER, 0x0000,
10892 0x00000000, 0x000001ff },
10893 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10894 0xffffffff, 0x00000000 },
10895 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10896 0xffffffff, 0x00000000 },
10897
10898 /* Mailbox Registers */
10899 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10900 0x00000000, 0x000001ff },
10901 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10902 0x00000000, 0x000001ff },
10903 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10904 0x00000000, 0x000007ff },
10905 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10906 0x00000000, 0x000001ff },
10907
10908 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10909 };
10910
10911 is_5705 = is_5750 = 0;
10912 if (tg3_flag(tp, 5705_PLUS)) {
10913 is_5705 = 1;
10914 if (tg3_flag(tp, 5750_PLUS))
10915 is_5750 = 1;
10916 }
10917
10918 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10919 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10920 continue;
10921
10922 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10923 continue;
10924
10925 if (tg3_flag(tp, IS_5788) &&
10926 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10927 continue;
10928
10929 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10930 continue;
10931
10932 offset = (u32) reg_tbl[i].offset;
10933 read_mask = reg_tbl[i].read_mask;
10934 write_mask = reg_tbl[i].write_mask;
10935
10936 /* Save the original register content */
10937 save_val = tr32(offset);
10938
10939 /* Determine the read-only value. */
10940 read_val = save_val & read_mask;
10941
10942 /* Write zero to the register, then make sure the read-only bits
10943 * are not changed and the read/write bits are all zeros.
10944 */
10945 tw32(offset, 0);
10946
10947 val = tr32(offset);
10948
10949 /* Test the read-only and read/write bits. */
10950 if (((val & read_mask) != read_val) || (val & write_mask))
10951 goto out;
10952
10953 /* Write ones to all the bits defined by RdMask and WrMask, then
10954 * make sure the read-only bits are not changed and the
10955 * read/write bits are all ones.
10956 */
10957 tw32(offset, read_mask | write_mask);
10958
10959 val = tr32(offset);
10960
10961 /* Test the read-only bits. */
10962 if ((val & read_mask) != read_val)
10963 goto out;
10964
10965 /* Test the read/write bits. */
10966 if ((val & write_mask) != write_mask)
10967 goto out;
10968
10969 tw32(offset, save_val);
10970 }
10971
10972 return 0;
10973
10974 out:
10975 if (netif_msg_hw(tp))
10976 netdev_err(tp->dev,
10977 "Register test failed at offset %x\n", offset);
10978 tw32(offset, save_val);
10979 return -EIO;
10980 }
10981
10982 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10983 {
10984 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10985 int i;
10986 u32 j;
10987
10988 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10989 for (j = 0; j < len; j += 4) {
10990 u32 val;
10991
10992 tg3_write_mem(tp, offset + j, test_pattern[i]);
10993 tg3_read_mem(tp, offset + j, &val);
10994 if (val != test_pattern[i])
10995 return -EIO;
10996 }
10997 }
10998 return 0;
10999 }
11000
11001 static int tg3_test_memory(struct tg3 *tp)
11002 {
11003 static struct mem_entry {
11004 u32 offset;
11005 u32 len;
11006 } mem_tbl_570x[] = {
11007 { 0x00000000, 0x00b50},
11008 { 0x00002000, 0x1c000},
11009 { 0xffffffff, 0x00000}
11010 }, mem_tbl_5705[] = {
11011 { 0x00000100, 0x0000c},
11012 { 0x00000200, 0x00008},
11013 { 0x00004000, 0x00800},
11014 { 0x00006000, 0x01000},
11015 { 0x00008000, 0x02000},
11016 { 0x00010000, 0x0e000},
11017 { 0xffffffff, 0x00000}
11018 }, mem_tbl_5755[] = {
11019 { 0x00000200, 0x00008},
11020 { 0x00004000, 0x00800},
11021 { 0x00006000, 0x00800},
11022 { 0x00008000, 0x02000},
11023 { 0x00010000, 0x0c000},
11024 { 0xffffffff, 0x00000}
11025 }, mem_tbl_5906[] = {
11026 { 0x00000200, 0x00008},
11027 { 0x00004000, 0x00400},
11028 { 0x00006000, 0x00400},
11029 { 0x00008000, 0x01000},
11030 { 0x00010000, 0x01000},
11031 { 0xffffffff, 0x00000}
11032 }, mem_tbl_5717[] = {
11033 { 0x00000200, 0x00008},
11034 { 0x00010000, 0x0a000},
11035 { 0x00020000, 0x13c00},
11036 { 0xffffffff, 0x00000}
11037 }, mem_tbl_57765[] = {
11038 { 0x00000200, 0x00008},
11039 { 0x00004000, 0x00800},
11040 { 0x00006000, 0x09800},
11041 { 0x00010000, 0x0a000},
11042 { 0xffffffff, 0x00000}
11043 };
11044 struct mem_entry *mem_tbl;
11045 int err = 0;
11046 int i;
11047
11048 if (tg3_flag(tp, 5717_PLUS))
11049 mem_tbl = mem_tbl_5717;
11050 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11051 mem_tbl = mem_tbl_57765;
11052 else if (tg3_flag(tp, 5755_PLUS))
11053 mem_tbl = mem_tbl_5755;
11054 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11055 mem_tbl = mem_tbl_5906;
11056 else if (tg3_flag(tp, 5705_PLUS))
11057 mem_tbl = mem_tbl_5705;
11058 else
11059 mem_tbl = mem_tbl_570x;
11060
11061 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11062 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11063 if (err)
11064 break;
11065 }
11066
11067 return err;
11068 }
11069
11070 #define TG3_MAC_LOOPBACK 0
11071 #define TG3_PHY_LOOPBACK 1
11072 #define TG3_TSO_LOOPBACK 2
11073
11074 #define TG3_TSO_MSS 500
11075
11076 #define TG3_TSO_IP_HDR_LEN 20
11077 #define TG3_TSO_TCP_HDR_LEN 20
11078 #define TG3_TSO_TCP_OPT_LEN 12
11079
11080 static const u8 tg3_tso_header[] = {
11081 0x08, 0x00,
11082 0x45, 0x00, 0x00, 0x00,
11083 0x00, 0x00, 0x40, 0x00,
11084 0x40, 0x06, 0x00, 0x00,
11085 0x0a, 0x00, 0x00, 0x01,
11086 0x0a, 0x00, 0x00, 0x02,
11087 0x0d, 0x00, 0xe0, 0x00,
11088 0x00, 0x00, 0x01, 0x00,
11089 0x00, 0x00, 0x02, 0x00,
11090 0x80, 0x10, 0x10, 0x00,
11091 0x14, 0x09, 0x00, 0x00,
11092 0x01, 0x01, 0x08, 0x0a,
11093 0x11, 0x11, 0x11, 0x11,
11094 0x11, 0x11, 0x11, 0x11,
11095 };
11096
11097 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11098 {
11099 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11100 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11101 struct sk_buff *skb, *rx_skb;
11102 u8 *tx_data;
11103 dma_addr_t map;
11104 int num_pkts, tx_len, rx_len, i, err;
11105 struct tg3_rx_buffer_desc *desc;
11106 struct tg3_napi *tnapi, *rnapi;
11107 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11108
11109 tnapi = &tp->napi[0];
11110 rnapi = &tp->napi[0];
11111 if (tp->irq_cnt > 1) {
11112 if (tg3_flag(tp, ENABLE_RSS))
11113 rnapi = &tp->napi[1];
11114 if (tg3_flag(tp, ENABLE_TSS))
11115 tnapi = &tp->napi[1];
11116 }
11117 coal_now = tnapi->coal_now | rnapi->coal_now;
11118
11119 if (loopback_mode == TG3_MAC_LOOPBACK) {
11120 /* HW errata - mac loopback fails in some cases on 5780.
11121 * Normal traffic and PHY loopback are not affected by
11122 * errata. Also, the MAC loopback test is deprecated for
11123 * all newer ASIC revisions.
11124 */
11125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11126 tg3_flag(tp, CPMU_PRESENT))
11127 return 0;
11128
11129 mac_mode = tp->mac_mode &
11130 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11131 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11132 if (!tg3_flag(tp, 5705_PLUS))
11133 mac_mode |= MAC_MODE_LINK_POLARITY;
11134 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11135 mac_mode |= MAC_MODE_PORT_MODE_MII;
11136 else
11137 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11138 tw32(MAC_MODE, mac_mode);
11139 } else {
11140 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11141 tg3_phy_fet_toggle_apd(tp, false);
11142 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11143 } else
11144 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11145
11146 tg3_phy_toggle_automdix(tp, 0);
11147
11148 tg3_writephy(tp, MII_BMCR, val);
11149 udelay(40);
11150
11151 mac_mode = tp->mac_mode &
11152 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11153 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11154 tg3_writephy(tp, MII_TG3_FET_PTEST,
11155 MII_TG3_FET_PTEST_FRC_TX_LINK |
11156 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11157 /* The write needs to be flushed for the AC131 */
11158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11159 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11160 mac_mode |= MAC_MODE_PORT_MODE_MII;
11161 } else
11162 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11163
11164 /* reset to prevent losing 1st rx packet intermittently */
11165 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11166 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11167 udelay(10);
11168 tw32_f(MAC_RX_MODE, tp->rx_mode);
11169 }
11170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11171 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11172 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11173 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11174 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11175 mac_mode |= MAC_MODE_LINK_POLARITY;
11176 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11177 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11178 }
11179 tw32(MAC_MODE, mac_mode);
11180
11181 /* Wait for link */
11182 for (i = 0; i < 100; i++) {
11183 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11184 break;
11185 mdelay(1);
11186 }
11187 }
11188
11189 err = -EIO;
11190
11191 tx_len = pktsz;
11192 skb = netdev_alloc_skb(tp->dev, tx_len);
11193 if (!skb)
11194 return -ENOMEM;
11195
11196 tx_data = skb_put(skb, tx_len);
11197 memcpy(tx_data, tp->dev->dev_addr, 6);
11198 memset(tx_data + 6, 0x0, 8);
11199
11200 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11201
11202 if (loopback_mode == TG3_TSO_LOOPBACK) {
11203 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11204
11205 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11206 TG3_TSO_TCP_OPT_LEN;
11207
11208 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11209 sizeof(tg3_tso_header));
11210 mss = TG3_TSO_MSS;
11211
11212 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11213 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11214
11215 /* Set the total length field in the IP header */
11216 iph->tot_len = htons((u16)(mss + hdr_len));
11217
11218 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11219 TXD_FLAG_CPU_POST_DMA);
11220
11221 if (tg3_flag(tp, HW_TSO_1) ||
11222 tg3_flag(tp, HW_TSO_2) ||
11223 tg3_flag(tp, HW_TSO_3)) {
11224 struct tcphdr *th;
11225 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11226 th = (struct tcphdr *)&tx_data[val];
11227 th->check = 0;
11228 } else
11229 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11230
11231 if (tg3_flag(tp, HW_TSO_3)) {
11232 mss |= (hdr_len & 0xc) << 12;
11233 if (hdr_len & 0x10)
11234 base_flags |= 0x00000010;
11235 base_flags |= (hdr_len & 0x3e0) << 5;
11236 } else if (tg3_flag(tp, HW_TSO_2))
11237 mss |= hdr_len << 9;
11238 else if (tg3_flag(tp, HW_TSO_1) ||
11239 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11240 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11241 } else {
11242 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11243 }
11244
11245 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11246 } else {
11247 num_pkts = 1;
11248 data_off = ETH_HLEN;
11249 }
11250
11251 for (i = data_off; i < tx_len; i++)
11252 tx_data[i] = (u8) (i & 0xff);
11253
11254 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11255 if (pci_dma_mapping_error(tp->pdev, map)) {
11256 dev_kfree_skb(skb);
11257 return -EIO;
11258 }
11259
11260 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11261 rnapi->coal_now);
11262
11263 udelay(10);
11264
11265 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11266
11267 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11268 base_flags, (mss << 1) | 1);
11269
11270 tnapi->tx_prod++;
11271
11272 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11273 tr32_mailbox(tnapi->prodmbox);
11274
11275 udelay(10);
11276
11277 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11278 for (i = 0; i < 35; i++) {
11279 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11280 coal_now);
11281
11282 udelay(10);
11283
11284 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11285 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11286 if ((tx_idx == tnapi->tx_prod) &&
11287 (rx_idx == (rx_start_idx + num_pkts)))
11288 break;
11289 }
11290
11291 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11292 dev_kfree_skb(skb);
11293
11294 if (tx_idx != tnapi->tx_prod)
11295 goto out;
11296
11297 if (rx_idx != rx_start_idx + num_pkts)
11298 goto out;
11299
11300 val = data_off;
11301 while (rx_idx != rx_start_idx) {
11302 desc = &rnapi->rx_rcb[rx_start_idx++];
11303 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11304 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11305
11306 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11307 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11308 goto out;
11309
11310 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11311 - ETH_FCS_LEN;
11312
11313 if (loopback_mode != TG3_TSO_LOOPBACK) {
11314 if (rx_len != tx_len)
11315 goto out;
11316
11317 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11318 if (opaque_key != RXD_OPAQUE_RING_STD)
11319 goto out;
11320 } else {
11321 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11322 goto out;
11323 }
11324 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11325 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11326 >> RXD_TCPCSUM_SHIFT == 0xffff) {
11327 goto out;
11328 }
11329
11330 if (opaque_key == RXD_OPAQUE_RING_STD) {
11331 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11332 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11333 mapping);
11334 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11335 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11336 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11337 mapping);
11338 } else
11339 goto out;
11340
11341 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11342 PCI_DMA_FROMDEVICE);
11343
11344 for (i = data_off; i < rx_len; i++, val++) {
11345 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11346 goto out;
11347 }
11348 }
11349
11350 err = 0;
11351
11352 /* tg3_free_rings will unmap and free the rx_skb */
11353 out:
11354 return err;
11355 }
11356
11357 #define TG3_STD_LOOPBACK_FAILED 1
11358 #define TG3_JMB_LOOPBACK_FAILED 2
11359 #define TG3_TSO_LOOPBACK_FAILED 4
11360
11361 #define TG3_MAC_LOOPBACK_SHIFT 0
11362 #define TG3_PHY_LOOPBACK_SHIFT 4
11363 #define TG3_LOOPBACK_FAILED 0x00000077
11364
11365 static int tg3_test_loopback(struct tg3 *tp)
11366 {
11367 int err = 0;
11368 u32 eee_cap, cpmuctrl = 0;
11369
11370 if (!netif_running(tp->dev))
11371 return TG3_LOOPBACK_FAILED;
11372
11373 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11374 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11375
11376 err = tg3_reset_hw(tp, 1);
11377 if (err) {
11378 err = TG3_LOOPBACK_FAILED;
11379 goto done;
11380 }
11381
11382 if (tg3_flag(tp, ENABLE_RSS)) {
11383 int i;
11384
11385 /* Reroute all rx packets to the 1st queue */
11386 for (i = MAC_RSS_INDIR_TBL_0;
11387 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11388 tw32(i, 0x0);
11389 }
11390
11391 /* Turn off gphy autopowerdown. */
11392 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11393 tg3_phy_toggle_apd(tp, false);
11394
11395 if (tg3_flag(tp, CPMU_PRESENT)) {
11396 int i;
11397 u32 status;
11398
11399 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11400
11401 /* Wait for up to 40 microseconds to acquire lock. */
11402 for (i = 0; i < 4; i++) {
11403 status = tr32(TG3_CPMU_MUTEX_GNT);
11404 if (status == CPMU_MUTEX_GNT_DRIVER)
11405 break;
11406 udelay(10);
11407 }
11408
11409 if (status != CPMU_MUTEX_GNT_DRIVER) {
11410 err = TG3_LOOPBACK_FAILED;
11411 goto done;
11412 }
11413
11414 /* Turn off link-based power management. */
11415 cpmuctrl = tr32(TG3_CPMU_CTRL);
11416 tw32(TG3_CPMU_CTRL,
11417 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11418 CPMU_CTRL_LINK_AWARE_MODE));
11419 }
11420
11421 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11422 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11423
11424 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11425 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11426 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11427
11428 if (tg3_flag(tp, CPMU_PRESENT)) {
11429 tw32(TG3_CPMU_CTRL, cpmuctrl);
11430
11431 /* Release the mutex */
11432 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11433 }
11434
11435 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11436 !tg3_flag(tp, USE_PHYLIB)) {
11437 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11438 err |= TG3_STD_LOOPBACK_FAILED <<
11439 TG3_PHY_LOOPBACK_SHIFT;
11440 if (tg3_flag(tp, TSO_CAPABLE) &&
11441 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11442 err |= TG3_TSO_LOOPBACK_FAILED <<
11443 TG3_PHY_LOOPBACK_SHIFT;
11444 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11445 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11446 err |= TG3_JMB_LOOPBACK_FAILED <<
11447 TG3_PHY_LOOPBACK_SHIFT;
11448 }
11449
11450 /* Re-enable gphy autopowerdown. */
11451 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11452 tg3_phy_toggle_apd(tp, true);
11453
11454 done:
11455 tp->phy_flags |= eee_cap;
11456
11457 return err;
11458 }
11459
11460 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11461 u64 *data)
11462 {
11463 struct tg3 *tp = netdev_priv(dev);
11464
11465 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11466 tg3_power_up(tp);
11467
11468 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11469
11470 if (tg3_test_nvram(tp) != 0) {
11471 etest->flags |= ETH_TEST_FL_FAILED;
11472 data[0] = 1;
11473 }
11474 if (tg3_test_link(tp) != 0) {
11475 etest->flags |= ETH_TEST_FL_FAILED;
11476 data[1] = 1;
11477 }
11478 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11479 int err, err2 = 0, irq_sync = 0;
11480
11481 if (netif_running(dev)) {
11482 tg3_phy_stop(tp);
11483 tg3_netif_stop(tp);
11484 irq_sync = 1;
11485 }
11486
11487 tg3_full_lock(tp, irq_sync);
11488
11489 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11490 err = tg3_nvram_lock(tp);
11491 tg3_halt_cpu(tp, RX_CPU_BASE);
11492 if (!tg3_flag(tp, 5705_PLUS))
11493 tg3_halt_cpu(tp, TX_CPU_BASE);
11494 if (!err)
11495 tg3_nvram_unlock(tp);
11496
11497 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11498 tg3_phy_reset(tp);
11499
11500 if (tg3_test_registers(tp) != 0) {
11501 etest->flags |= ETH_TEST_FL_FAILED;
11502 data[2] = 1;
11503 }
11504 if (tg3_test_memory(tp) != 0) {
11505 etest->flags |= ETH_TEST_FL_FAILED;
11506 data[3] = 1;
11507 }
11508 if ((data[4] = tg3_test_loopback(tp)) != 0)
11509 etest->flags |= ETH_TEST_FL_FAILED;
11510
11511 tg3_full_unlock(tp);
11512
11513 if (tg3_test_interrupt(tp) != 0) {
11514 etest->flags |= ETH_TEST_FL_FAILED;
11515 data[5] = 1;
11516 }
11517
11518 tg3_full_lock(tp, 0);
11519
11520 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11521 if (netif_running(dev)) {
11522 tg3_flag_set(tp, INIT_COMPLETE);
11523 err2 = tg3_restart_hw(tp, 1);
11524 if (!err2)
11525 tg3_netif_start(tp);
11526 }
11527
11528 tg3_full_unlock(tp);
11529
11530 if (irq_sync && !err2)
11531 tg3_phy_start(tp);
11532 }
11533 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11534 tg3_power_down(tp);
11535
11536 }
11537
11538 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11539 {
11540 struct mii_ioctl_data *data = if_mii(ifr);
11541 struct tg3 *tp = netdev_priv(dev);
11542 int err;
11543
11544 if (tg3_flag(tp, USE_PHYLIB)) {
11545 struct phy_device *phydev;
11546 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11547 return -EAGAIN;
11548 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11549 return phy_mii_ioctl(phydev, ifr, cmd);
11550 }
11551
11552 switch (cmd) {
11553 case SIOCGMIIPHY:
11554 data->phy_id = tp->phy_addr;
11555
11556 /* fallthru */
11557 case SIOCGMIIREG: {
11558 u32 mii_regval;
11559
11560 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11561 break; /* We have no PHY */
11562
11563 if (!netif_running(dev))
11564 return -EAGAIN;
11565
11566 spin_lock_bh(&tp->lock);
11567 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11568 spin_unlock_bh(&tp->lock);
11569
11570 data->val_out = mii_regval;
11571
11572 return err;
11573 }
11574
11575 case SIOCSMIIREG:
11576 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11577 break; /* We have no PHY */
11578
11579 if (!netif_running(dev))
11580 return -EAGAIN;
11581
11582 spin_lock_bh(&tp->lock);
11583 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11584 spin_unlock_bh(&tp->lock);
11585
11586 return err;
11587
11588 default:
11589 /* do nothing */
11590 break;
11591 }
11592 return -EOPNOTSUPP;
11593 }
11594
11595 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11596 {
11597 struct tg3 *tp = netdev_priv(dev);
11598
11599 memcpy(ec, &tp->coal, sizeof(*ec));
11600 return 0;
11601 }
11602
11603 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11604 {
11605 struct tg3 *tp = netdev_priv(dev);
11606 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11607 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11608
11609 if (!tg3_flag(tp, 5705_PLUS)) {
11610 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11611 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11612 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11613 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11614 }
11615
11616 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11617 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11618 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11619 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11620 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11621 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11622 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11623 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11624 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11625 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11626 return -EINVAL;
11627
11628 /* No rx interrupts will be generated if both are zero */
11629 if ((ec->rx_coalesce_usecs == 0) &&
11630 (ec->rx_max_coalesced_frames == 0))
11631 return -EINVAL;
11632
11633 /* No tx interrupts will be generated if both are zero */
11634 if ((ec->tx_coalesce_usecs == 0) &&
11635 (ec->tx_max_coalesced_frames == 0))
11636 return -EINVAL;
11637
11638 /* Only copy relevant parameters, ignore all others. */
11639 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11640 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11641 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11642 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11643 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11644 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11645 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11646 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11647 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11648
11649 if (netif_running(dev)) {
11650 tg3_full_lock(tp, 0);
11651 __tg3_set_coalesce(tp, &tp->coal);
11652 tg3_full_unlock(tp);
11653 }
11654 return 0;
11655 }
11656
11657 static const struct ethtool_ops tg3_ethtool_ops = {
11658 .get_settings = tg3_get_settings,
11659 .set_settings = tg3_set_settings,
11660 .get_drvinfo = tg3_get_drvinfo,
11661 .get_regs_len = tg3_get_regs_len,
11662 .get_regs = tg3_get_regs,
11663 .get_wol = tg3_get_wol,
11664 .set_wol = tg3_set_wol,
11665 .get_msglevel = tg3_get_msglevel,
11666 .set_msglevel = tg3_set_msglevel,
11667 .nway_reset = tg3_nway_reset,
11668 .get_link = ethtool_op_get_link,
11669 .get_eeprom_len = tg3_get_eeprom_len,
11670 .get_eeprom = tg3_get_eeprom,
11671 .set_eeprom = tg3_set_eeprom,
11672 .get_ringparam = tg3_get_ringparam,
11673 .set_ringparam = tg3_set_ringparam,
11674 .get_pauseparam = tg3_get_pauseparam,
11675 .set_pauseparam = tg3_set_pauseparam,
11676 .self_test = tg3_self_test,
11677 .get_strings = tg3_get_strings,
11678 .set_phys_id = tg3_set_phys_id,
11679 .get_ethtool_stats = tg3_get_ethtool_stats,
11680 .get_coalesce = tg3_get_coalesce,
11681 .set_coalesce = tg3_set_coalesce,
11682 .get_sset_count = tg3_get_sset_count,
11683 };
11684
11685 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11686 {
11687 u32 cursize, val, magic;
11688
11689 tp->nvram_size = EEPROM_CHIP_SIZE;
11690
11691 if (tg3_nvram_read(tp, 0, &magic) != 0)
11692 return;
11693
11694 if ((magic != TG3_EEPROM_MAGIC) &&
11695 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11696 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11697 return;
11698
11699 /*
11700 * Size the chip by reading offsets at increasing powers of two.
11701 * When we encounter our validation signature, we know the addressing
11702 * has wrapped around, and thus have our chip size.
11703 */
11704 cursize = 0x10;
11705
11706 while (cursize < tp->nvram_size) {
11707 if (tg3_nvram_read(tp, cursize, &val) != 0)
11708 return;
11709
11710 if (val == magic)
11711 break;
11712
11713 cursize <<= 1;
11714 }
11715
11716 tp->nvram_size = cursize;
11717 }
11718
11719 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11720 {
11721 u32 val;
11722
11723 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11724 return;
11725
11726 /* Selfboot format */
11727 if (val != TG3_EEPROM_MAGIC) {
11728 tg3_get_eeprom_size(tp);
11729 return;
11730 }
11731
11732 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11733 if (val != 0) {
11734 /* This is confusing. We want to operate on the
11735 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11736 * call will read from NVRAM and byteswap the data
11737 * according to the byteswapping settings for all
11738 * other register accesses. This ensures the data we
11739 * want will always reside in the lower 16-bits.
11740 * However, the data in NVRAM is in LE format, which
11741 * means the data from the NVRAM read will always be
11742 * opposite the endianness of the CPU. The 16-bit
11743 * byteswap then brings the data to CPU endianness.
11744 */
11745 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11746 return;
11747 }
11748 }
11749 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11750 }
11751
11752 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11753 {
11754 u32 nvcfg1;
11755
11756 nvcfg1 = tr32(NVRAM_CFG1);
11757 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11758 tg3_flag_set(tp, FLASH);
11759 } else {
11760 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11761 tw32(NVRAM_CFG1, nvcfg1);
11762 }
11763
11764 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11765 tg3_flag(tp, 5780_CLASS)) {
11766 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11767 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11768 tp->nvram_jedecnum = JEDEC_ATMEL;
11769 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11770 tg3_flag_set(tp, NVRAM_BUFFERED);
11771 break;
11772 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11773 tp->nvram_jedecnum = JEDEC_ATMEL;
11774 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11775 break;
11776 case FLASH_VENDOR_ATMEL_EEPROM:
11777 tp->nvram_jedecnum = JEDEC_ATMEL;
11778 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11779 tg3_flag_set(tp, NVRAM_BUFFERED);
11780 break;
11781 case FLASH_VENDOR_ST:
11782 tp->nvram_jedecnum = JEDEC_ST;
11783 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11784 tg3_flag_set(tp, NVRAM_BUFFERED);
11785 break;
11786 case FLASH_VENDOR_SAIFUN:
11787 tp->nvram_jedecnum = JEDEC_SAIFUN;
11788 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11789 break;
11790 case FLASH_VENDOR_SST_SMALL:
11791 case FLASH_VENDOR_SST_LARGE:
11792 tp->nvram_jedecnum = JEDEC_SST;
11793 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11794 break;
11795 }
11796 } else {
11797 tp->nvram_jedecnum = JEDEC_ATMEL;
11798 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11799 tg3_flag_set(tp, NVRAM_BUFFERED);
11800 }
11801 }
11802
11803 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11804 {
11805 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11806 case FLASH_5752PAGE_SIZE_256:
11807 tp->nvram_pagesize = 256;
11808 break;
11809 case FLASH_5752PAGE_SIZE_512:
11810 tp->nvram_pagesize = 512;
11811 break;
11812 case FLASH_5752PAGE_SIZE_1K:
11813 tp->nvram_pagesize = 1024;
11814 break;
11815 case FLASH_5752PAGE_SIZE_2K:
11816 tp->nvram_pagesize = 2048;
11817 break;
11818 case FLASH_5752PAGE_SIZE_4K:
11819 tp->nvram_pagesize = 4096;
11820 break;
11821 case FLASH_5752PAGE_SIZE_264:
11822 tp->nvram_pagesize = 264;
11823 break;
11824 case FLASH_5752PAGE_SIZE_528:
11825 tp->nvram_pagesize = 528;
11826 break;
11827 }
11828 }
11829
11830 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11831 {
11832 u32 nvcfg1;
11833
11834 nvcfg1 = tr32(NVRAM_CFG1);
11835
11836 /* NVRAM protection for TPM */
11837 if (nvcfg1 & (1 << 27))
11838 tg3_flag_set(tp, PROTECTED_NVRAM);
11839
11840 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11841 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11842 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11843 tp->nvram_jedecnum = JEDEC_ATMEL;
11844 tg3_flag_set(tp, NVRAM_BUFFERED);
11845 break;
11846 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11847 tp->nvram_jedecnum = JEDEC_ATMEL;
11848 tg3_flag_set(tp, NVRAM_BUFFERED);
11849 tg3_flag_set(tp, FLASH);
11850 break;
11851 case FLASH_5752VENDOR_ST_M45PE10:
11852 case FLASH_5752VENDOR_ST_M45PE20:
11853 case FLASH_5752VENDOR_ST_M45PE40:
11854 tp->nvram_jedecnum = JEDEC_ST;
11855 tg3_flag_set(tp, NVRAM_BUFFERED);
11856 tg3_flag_set(tp, FLASH);
11857 break;
11858 }
11859
11860 if (tg3_flag(tp, FLASH)) {
11861 tg3_nvram_get_pagesize(tp, nvcfg1);
11862 } else {
11863 /* For eeprom, set pagesize to maximum eeprom size */
11864 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11865
11866 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11867 tw32(NVRAM_CFG1, nvcfg1);
11868 }
11869 }
11870
11871 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11872 {
11873 u32 nvcfg1, protect = 0;
11874
11875 nvcfg1 = tr32(NVRAM_CFG1);
11876
11877 /* NVRAM protection for TPM */
11878 if (nvcfg1 & (1 << 27)) {
11879 tg3_flag_set(tp, PROTECTED_NVRAM);
11880 protect = 1;
11881 }
11882
11883 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11884 switch (nvcfg1) {
11885 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11886 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11887 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11888 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11889 tp->nvram_jedecnum = JEDEC_ATMEL;
11890 tg3_flag_set(tp, NVRAM_BUFFERED);
11891 tg3_flag_set(tp, FLASH);
11892 tp->nvram_pagesize = 264;
11893 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11894 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11895 tp->nvram_size = (protect ? 0x3e200 :
11896 TG3_NVRAM_SIZE_512KB);
11897 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11898 tp->nvram_size = (protect ? 0x1f200 :
11899 TG3_NVRAM_SIZE_256KB);
11900 else
11901 tp->nvram_size = (protect ? 0x1f200 :
11902 TG3_NVRAM_SIZE_128KB);
11903 break;
11904 case FLASH_5752VENDOR_ST_M45PE10:
11905 case FLASH_5752VENDOR_ST_M45PE20:
11906 case FLASH_5752VENDOR_ST_M45PE40:
11907 tp->nvram_jedecnum = JEDEC_ST;
11908 tg3_flag_set(tp, NVRAM_BUFFERED);
11909 tg3_flag_set(tp, FLASH);
11910 tp->nvram_pagesize = 256;
11911 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11912 tp->nvram_size = (protect ?
11913 TG3_NVRAM_SIZE_64KB :
11914 TG3_NVRAM_SIZE_128KB);
11915 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11916 tp->nvram_size = (protect ?
11917 TG3_NVRAM_SIZE_64KB :
11918 TG3_NVRAM_SIZE_256KB);
11919 else
11920 tp->nvram_size = (protect ?
11921 TG3_NVRAM_SIZE_128KB :
11922 TG3_NVRAM_SIZE_512KB);
11923 break;
11924 }
11925 }
11926
11927 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11928 {
11929 u32 nvcfg1;
11930
11931 nvcfg1 = tr32(NVRAM_CFG1);
11932
11933 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11934 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11935 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11936 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11937 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11938 tp->nvram_jedecnum = JEDEC_ATMEL;
11939 tg3_flag_set(tp, NVRAM_BUFFERED);
11940 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11941
11942 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11943 tw32(NVRAM_CFG1, nvcfg1);
11944 break;
11945 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11946 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11947 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11948 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11949 tp->nvram_jedecnum = JEDEC_ATMEL;
11950 tg3_flag_set(tp, NVRAM_BUFFERED);
11951 tg3_flag_set(tp, FLASH);
11952 tp->nvram_pagesize = 264;
11953 break;
11954 case FLASH_5752VENDOR_ST_M45PE10:
11955 case FLASH_5752VENDOR_ST_M45PE20:
11956 case FLASH_5752VENDOR_ST_M45PE40:
11957 tp->nvram_jedecnum = JEDEC_ST;
11958 tg3_flag_set(tp, NVRAM_BUFFERED);
11959 tg3_flag_set(tp, FLASH);
11960 tp->nvram_pagesize = 256;
11961 break;
11962 }
11963 }
11964
11965 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11966 {
11967 u32 nvcfg1, protect = 0;
11968
11969 nvcfg1 = tr32(NVRAM_CFG1);
11970
11971 /* NVRAM protection for TPM */
11972 if (nvcfg1 & (1 << 27)) {
11973 tg3_flag_set(tp, PROTECTED_NVRAM);
11974 protect = 1;
11975 }
11976
11977 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11978 switch (nvcfg1) {
11979 case FLASH_5761VENDOR_ATMEL_ADB021D:
11980 case FLASH_5761VENDOR_ATMEL_ADB041D:
11981 case FLASH_5761VENDOR_ATMEL_ADB081D:
11982 case FLASH_5761VENDOR_ATMEL_ADB161D:
11983 case FLASH_5761VENDOR_ATMEL_MDB021D:
11984 case FLASH_5761VENDOR_ATMEL_MDB041D:
11985 case FLASH_5761VENDOR_ATMEL_MDB081D:
11986 case FLASH_5761VENDOR_ATMEL_MDB161D:
11987 tp->nvram_jedecnum = JEDEC_ATMEL;
11988 tg3_flag_set(tp, NVRAM_BUFFERED);
11989 tg3_flag_set(tp, FLASH);
11990 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11991 tp->nvram_pagesize = 256;
11992 break;
11993 case FLASH_5761VENDOR_ST_A_M45PE20:
11994 case FLASH_5761VENDOR_ST_A_M45PE40:
11995 case FLASH_5761VENDOR_ST_A_M45PE80:
11996 case FLASH_5761VENDOR_ST_A_M45PE16:
11997 case FLASH_5761VENDOR_ST_M_M45PE20:
11998 case FLASH_5761VENDOR_ST_M_M45PE40:
11999 case FLASH_5761VENDOR_ST_M_M45PE80:
12000 case FLASH_5761VENDOR_ST_M_M45PE16:
12001 tp->nvram_jedecnum = JEDEC_ST;
12002 tg3_flag_set(tp, NVRAM_BUFFERED);
12003 tg3_flag_set(tp, FLASH);
12004 tp->nvram_pagesize = 256;
12005 break;
12006 }
12007
12008 if (protect) {
12009 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12010 } else {
12011 switch (nvcfg1) {
12012 case FLASH_5761VENDOR_ATMEL_ADB161D:
12013 case FLASH_5761VENDOR_ATMEL_MDB161D:
12014 case FLASH_5761VENDOR_ST_A_M45PE16:
12015 case FLASH_5761VENDOR_ST_M_M45PE16:
12016 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12017 break;
12018 case FLASH_5761VENDOR_ATMEL_ADB081D:
12019 case FLASH_5761VENDOR_ATMEL_MDB081D:
12020 case FLASH_5761VENDOR_ST_A_M45PE80:
12021 case FLASH_5761VENDOR_ST_M_M45PE80:
12022 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12023 break;
12024 case FLASH_5761VENDOR_ATMEL_ADB041D:
12025 case FLASH_5761VENDOR_ATMEL_MDB041D:
12026 case FLASH_5761VENDOR_ST_A_M45PE40:
12027 case FLASH_5761VENDOR_ST_M_M45PE40:
12028 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12029 break;
12030 case FLASH_5761VENDOR_ATMEL_ADB021D:
12031 case FLASH_5761VENDOR_ATMEL_MDB021D:
12032 case FLASH_5761VENDOR_ST_A_M45PE20:
12033 case FLASH_5761VENDOR_ST_M_M45PE20:
12034 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12035 break;
12036 }
12037 }
12038 }
12039
12040 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12041 {
12042 tp->nvram_jedecnum = JEDEC_ATMEL;
12043 tg3_flag_set(tp, NVRAM_BUFFERED);
12044 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12045 }
12046
12047 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12048 {
12049 u32 nvcfg1;
12050
12051 nvcfg1 = tr32(NVRAM_CFG1);
12052
12053 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12054 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12055 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12056 tp->nvram_jedecnum = JEDEC_ATMEL;
12057 tg3_flag_set(tp, NVRAM_BUFFERED);
12058 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12059
12060 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12061 tw32(NVRAM_CFG1, nvcfg1);
12062 return;
12063 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12064 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12065 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12066 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12067 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12068 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12069 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12070 tp->nvram_jedecnum = JEDEC_ATMEL;
12071 tg3_flag_set(tp, NVRAM_BUFFERED);
12072 tg3_flag_set(tp, FLASH);
12073
12074 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12075 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12076 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12077 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12078 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12079 break;
12080 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12081 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12082 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12083 break;
12084 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12085 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12086 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12087 break;
12088 }
12089 break;
12090 case FLASH_5752VENDOR_ST_M45PE10:
12091 case FLASH_5752VENDOR_ST_M45PE20:
12092 case FLASH_5752VENDOR_ST_M45PE40:
12093 tp->nvram_jedecnum = JEDEC_ST;
12094 tg3_flag_set(tp, NVRAM_BUFFERED);
12095 tg3_flag_set(tp, FLASH);
12096
12097 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12098 case FLASH_5752VENDOR_ST_M45PE10:
12099 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12100 break;
12101 case FLASH_5752VENDOR_ST_M45PE20:
12102 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12103 break;
12104 case FLASH_5752VENDOR_ST_M45PE40:
12105 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12106 break;
12107 }
12108 break;
12109 default:
12110 tg3_flag_set(tp, NO_NVRAM);
12111 return;
12112 }
12113
12114 tg3_nvram_get_pagesize(tp, nvcfg1);
12115 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12116 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12117 }
12118
12119
12120 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12121 {
12122 u32 nvcfg1;
12123
12124 nvcfg1 = tr32(NVRAM_CFG1);
12125
12126 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12127 case FLASH_5717VENDOR_ATMEL_EEPROM:
12128 case FLASH_5717VENDOR_MICRO_EEPROM:
12129 tp->nvram_jedecnum = JEDEC_ATMEL;
12130 tg3_flag_set(tp, NVRAM_BUFFERED);
12131 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12132
12133 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12134 tw32(NVRAM_CFG1, nvcfg1);
12135 return;
12136 case FLASH_5717VENDOR_ATMEL_MDB011D:
12137 case FLASH_5717VENDOR_ATMEL_ADB011B:
12138 case FLASH_5717VENDOR_ATMEL_ADB011D:
12139 case FLASH_5717VENDOR_ATMEL_MDB021D:
12140 case FLASH_5717VENDOR_ATMEL_ADB021B:
12141 case FLASH_5717VENDOR_ATMEL_ADB021D:
12142 case FLASH_5717VENDOR_ATMEL_45USPT:
12143 tp->nvram_jedecnum = JEDEC_ATMEL;
12144 tg3_flag_set(tp, NVRAM_BUFFERED);
12145 tg3_flag_set(tp, FLASH);
12146
12147 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12148 case FLASH_5717VENDOR_ATMEL_MDB021D:
12149 /* Detect size with tg3_nvram_get_size() */
12150 break;
12151 case FLASH_5717VENDOR_ATMEL_ADB021B:
12152 case FLASH_5717VENDOR_ATMEL_ADB021D:
12153 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12154 break;
12155 default:
12156 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12157 break;
12158 }
12159 break;
12160 case FLASH_5717VENDOR_ST_M_M25PE10:
12161 case FLASH_5717VENDOR_ST_A_M25PE10:
12162 case FLASH_5717VENDOR_ST_M_M45PE10:
12163 case FLASH_5717VENDOR_ST_A_M45PE10:
12164 case FLASH_5717VENDOR_ST_M_M25PE20:
12165 case FLASH_5717VENDOR_ST_A_M25PE20:
12166 case FLASH_5717VENDOR_ST_M_M45PE20:
12167 case FLASH_5717VENDOR_ST_A_M45PE20:
12168 case FLASH_5717VENDOR_ST_25USPT:
12169 case FLASH_5717VENDOR_ST_45USPT:
12170 tp->nvram_jedecnum = JEDEC_ST;
12171 tg3_flag_set(tp, NVRAM_BUFFERED);
12172 tg3_flag_set(tp, FLASH);
12173
12174 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12175 case FLASH_5717VENDOR_ST_M_M25PE20:
12176 case FLASH_5717VENDOR_ST_M_M45PE20:
12177 /* Detect size with tg3_nvram_get_size() */
12178 break;
12179 case FLASH_5717VENDOR_ST_A_M25PE20:
12180 case FLASH_5717VENDOR_ST_A_M45PE20:
12181 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12182 break;
12183 default:
12184 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12185 break;
12186 }
12187 break;
12188 default:
12189 tg3_flag_set(tp, NO_NVRAM);
12190 return;
12191 }
12192
12193 tg3_nvram_get_pagesize(tp, nvcfg1);
12194 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12195 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12196 }
12197
12198 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12199 {
12200 u32 nvcfg1, nvmpinstrp;
12201
12202 nvcfg1 = tr32(NVRAM_CFG1);
12203 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12204
12205 switch (nvmpinstrp) {
12206 case FLASH_5720_EEPROM_HD:
12207 case FLASH_5720_EEPROM_LD:
12208 tp->nvram_jedecnum = JEDEC_ATMEL;
12209 tg3_flag_set(tp, NVRAM_BUFFERED);
12210
12211 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12212 tw32(NVRAM_CFG1, nvcfg1);
12213 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12214 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12215 else
12216 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12217 return;
12218 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12219 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12220 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12221 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12222 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12223 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12224 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12225 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12226 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12227 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12228 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12229 case FLASH_5720VENDOR_ATMEL_45USPT:
12230 tp->nvram_jedecnum = JEDEC_ATMEL;
12231 tg3_flag_set(tp, NVRAM_BUFFERED);
12232 tg3_flag_set(tp, FLASH);
12233
12234 switch (nvmpinstrp) {
12235 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12236 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12237 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12238 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12239 break;
12240 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12241 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12242 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12243 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12244 break;
12245 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12246 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12247 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12248 break;
12249 default:
12250 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12251 break;
12252 }
12253 break;
12254 case FLASH_5720VENDOR_M_ST_M25PE10:
12255 case FLASH_5720VENDOR_M_ST_M45PE10:
12256 case FLASH_5720VENDOR_A_ST_M25PE10:
12257 case FLASH_5720VENDOR_A_ST_M45PE10:
12258 case FLASH_5720VENDOR_M_ST_M25PE20:
12259 case FLASH_5720VENDOR_M_ST_M45PE20:
12260 case FLASH_5720VENDOR_A_ST_M25PE20:
12261 case FLASH_5720VENDOR_A_ST_M45PE20:
12262 case FLASH_5720VENDOR_M_ST_M25PE40:
12263 case FLASH_5720VENDOR_M_ST_M45PE40:
12264 case FLASH_5720VENDOR_A_ST_M25PE40:
12265 case FLASH_5720VENDOR_A_ST_M45PE40:
12266 case FLASH_5720VENDOR_M_ST_M25PE80:
12267 case FLASH_5720VENDOR_M_ST_M45PE80:
12268 case FLASH_5720VENDOR_A_ST_M25PE80:
12269 case FLASH_5720VENDOR_A_ST_M45PE80:
12270 case FLASH_5720VENDOR_ST_25USPT:
12271 case FLASH_5720VENDOR_ST_45USPT:
12272 tp->nvram_jedecnum = JEDEC_ST;
12273 tg3_flag_set(tp, NVRAM_BUFFERED);
12274 tg3_flag_set(tp, FLASH);
12275
12276 switch (nvmpinstrp) {
12277 case FLASH_5720VENDOR_M_ST_M25PE20:
12278 case FLASH_5720VENDOR_M_ST_M45PE20:
12279 case FLASH_5720VENDOR_A_ST_M25PE20:
12280 case FLASH_5720VENDOR_A_ST_M45PE20:
12281 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12282 break;
12283 case FLASH_5720VENDOR_M_ST_M25PE40:
12284 case FLASH_5720VENDOR_M_ST_M45PE40:
12285 case FLASH_5720VENDOR_A_ST_M25PE40:
12286 case FLASH_5720VENDOR_A_ST_M45PE40:
12287 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12288 break;
12289 case FLASH_5720VENDOR_M_ST_M25PE80:
12290 case FLASH_5720VENDOR_M_ST_M45PE80:
12291 case FLASH_5720VENDOR_A_ST_M25PE80:
12292 case FLASH_5720VENDOR_A_ST_M45PE80:
12293 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12294 break;
12295 default:
12296 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12297 break;
12298 }
12299 break;
12300 default:
12301 tg3_flag_set(tp, NO_NVRAM);
12302 return;
12303 }
12304
12305 tg3_nvram_get_pagesize(tp, nvcfg1);
12306 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12307 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12308 }
12309
12310 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12311 static void __devinit tg3_nvram_init(struct tg3 *tp)
12312 {
12313 tw32_f(GRC_EEPROM_ADDR,
12314 (EEPROM_ADDR_FSM_RESET |
12315 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12316 EEPROM_ADDR_CLKPERD_SHIFT)));
12317
12318 msleep(1);
12319
12320 /* Enable seeprom accesses. */
12321 tw32_f(GRC_LOCAL_CTRL,
12322 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12323 udelay(100);
12324
12325 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12326 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12327 tg3_flag_set(tp, NVRAM);
12328
12329 if (tg3_nvram_lock(tp)) {
12330 netdev_warn(tp->dev,
12331 "Cannot get nvram lock, %s failed\n",
12332 __func__);
12333 return;
12334 }
12335 tg3_enable_nvram_access(tp);
12336
12337 tp->nvram_size = 0;
12338
12339 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12340 tg3_get_5752_nvram_info(tp);
12341 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12342 tg3_get_5755_nvram_info(tp);
12343 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12346 tg3_get_5787_nvram_info(tp);
12347 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12348 tg3_get_5761_nvram_info(tp);
12349 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12350 tg3_get_5906_nvram_info(tp);
12351 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12353 tg3_get_57780_nvram_info(tp);
12354 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12356 tg3_get_5717_nvram_info(tp);
12357 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12358 tg3_get_5720_nvram_info(tp);
12359 else
12360 tg3_get_nvram_info(tp);
12361
12362 if (tp->nvram_size == 0)
12363 tg3_get_nvram_size(tp);
12364
12365 tg3_disable_nvram_access(tp);
12366 tg3_nvram_unlock(tp);
12367
12368 } else {
12369 tg3_flag_clear(tp, NVRAM);
12370 tg3_flag_clear(tp, NVRAM_BUFFERED);
12371
12372 tg3_get_eeprom_size(tp);
12373 }
12374 }
12375
12376 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12377 u32 offset, u32 len, u8 *buf)
12378 {
12379 int i, j, rc = 0;
12380 u32 val;
12381
12382 for (i = 0; i < len; i += 4) {
12383 u32 addr;
12384 __be32 data;
12385
12386 addr = offset + i;
12387
12388 memcpy(&data, buf + i, 4);
12389
12390 /*
12391 * The SEEPROM interface expects the data to always be opposite
12392 * the native endian format. We accomplish this by reversing
12393 * all the operations that would have been performed on the
12394 * data from a call to tg3_nvram_read_be32().
12395 */
12396 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12397
12398 val = tr32(GRC_EEPROM_ADDR);
12399 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12400
12401 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12402 EEPROM_ADDR_READ);
12403 tw32(GRC_EEPROM_ADDR, val |
12404 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12405 (addr & EEPROM_ADDR_ADDR_MASK) |
12406 EEPROM_ADDR_START |
12407 EEPROM_ADDR_WRITE);
12408
12409 for (j = 0; j < 1000; j++) {
12410 val = tr32(GRC_EEPROM_ADDR);
12411
12412 if (val & EEPROM_ADDR_COMPLETE)
12413 break;
12414 msleep(1);
12415 }
12416 if (!(val & EEPROM_ADDR_COMPLETE)) {
12417 rc = -EBUSY;
12418 break;
12419 }
12420 }
12421
12422 return rc;
12423 }
12424
12425 /* offset and length are dword aligned */
12426 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12427 u8 *buf)
12428 {
12429 int ret = 0;
12430 u32 pagesize = tp->nvram_pagesize;
12431 u32 pagemask = pagesize - 1;
12432 u32 nvram_cmd;
12433 u8 *tmp;
12434
12435 tmp = kmalloc(pagesize, GFP_KERNEL);
12436 if (tmp == NULL)
12437 return -ENOMEM;
12438
12439 while (len) {
12440 int j;
12441 u32 phy_addr, page_off, size;
12442
12443 phy_addr = offset & ~pagemask;
12444
12445 for (j = 0; j < pagesize; j += 4) {
12446 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12447 (__be32 *) (tmp + j));
12448 if (ret)
12449 break;
12450 }
12451 if (ret)
12452 break;
12453
12454 page_off = offset & pagemask;
12455 size = pagesize;
12456 if (len < size)
12457 size = len;
12458
12459 len -= size;
12460
12461 memcpy(tmp + page_off, buf, size);
12462
12463 offset = offset + (pagesize - page_off);
12464
12465 tg3_enable_nvram_access(tp);
12466
12467 /*
12468 * Before we can erase the flash page, we need
12469 * to issue a special "write enable" command.
12470 */
12471 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12472
12473 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12474 break;
12475
12476 /* Erase the target page */
12477 tw32(NVRAM_ADDR, phy_addr);
12478
12479 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12480 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12481
12482 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12483 break;
12484
12485 /* Issue another write enable to start the write. */
12486 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12487
12488 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12489 break;
12490
12491 for (j = 0; j < pagesize; j += 4) {
12492 __be32 data;
12493
12494 data = *((__be32 *) (tmp + j));
12495
12496 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12497
12498 tw32(NVRAM_ADDR, phy_addr + j);
12499
12500 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12501 NVRAM_CMD_WR;
12502
12503 if (j == 0)
12504 nvram_cmd |= NVRAM_CMD_FIRST;
12505 else if (j == (pagesize - 4))
12506 nvram_cmd |= NVRAM_CMD_LAST;
12507
12508 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12509 break;
12510 }
12511 if (ret)
12512 break;
12513 }
12514
12515 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12516 tg3_nvram_exec_cmd(tp, nvram_cmd);
12517
12518 kfree(tmp);
12519
12520 return ret;
12521 }
12522
12523 /* offset and length are dword aligned */
12524 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12525 u8 *buf)
12526 {
12527 int i, ret = 0;
12528
12529 for (i = 0; i < len; i += 4, offset += 4) {
12530 u32 page_off, phy_addr, nvram_cmd;
12531 __be32 data;
12532
12533 memcpy(&data, buf + i, 4);
12534 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12535
12536 page_off = offset % tp->nvram_pagesize;
12537
12538 phy_addr = tg3_nvram_phys_addr(tp, offset);
12539
12540 tw32(NVRAM_ADDR, phy_addr);
12541
12542 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12543
12544 if (page_off == 0 || i == 0)
12545 nvram_cmd |= NVRAM_CMD_FIRST;
12546 if (page_off == (tp->nvram_pagesize - 4))
12547 nvram_cmd |= NVRAM_CMD_LAST;
12548
12549 if (i == (len - 4))
12550 nvram_cmd |= NVRAM_CMD_LAST;
12551
12552 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12553 !tg3_flag(tp, 5755_PLUS) &&
12554 (tp->nvram_jedecnum == JEDEC_ST) &&
12555 (nvram_cmd & NVRAM_CMD_FIRST)) {
12556
12557 if ((ret = tg3_nvram_exec_cmd(tp,
12558 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12559 NVRAM_CMD_DONE)))
12560
12561 break;
12562 }
12563 if (!tg3_flag(tp, FLASH)) {
12564 /* We always do complete word writes to eeprom. */
12565 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12566 }
12567
12568 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12569 break;
12570 }
12571 return ret;
12572 }
12573
12574 /* offset and length are dword aligned */
12575 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12576 {
12577 int ret;
12578
12579 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12580 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12581 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12582 udelay(40);
12583 }
12584
12585 if (!tg3_flag(tp, NVRAM)) {
12586 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12587 } else {
12588 u32 grc_mode;
12589
12590 ret = tg3_nvram_lock(tp);
12591 if (ret)
12592 return ret;
12593
12594 tg3_enable_nvram_access(tp);
12595 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12596 tw32(NVRAM_WRITE1, 0x406);
12597
12598 grc_mode = tr32(GRC_MODE);
12599 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12600
12601 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12602 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12603 buf);
12604 } else {
12605 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12606 buf);
12607 }
12608
12609 grc_mode = tr32(GRC_MODE);
12610 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12611
12612 tg3_disable_nvram_access(tp);
12613 tg3_nvram_unlock(tp);
12614 }
12615
12616 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12617 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12618 udelay(40);
12619 }
12620
12621 return ret;
12622 }
12623
12624 struct subsys_tbl_ent {
12625 u16 subsys_vendor, subsys_devid;
12626 u32 phy_id;
12627 };
12628
12629 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12630 /* Broadcom boards. */
12631 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12632 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12633 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12634 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12635 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12636 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12637 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12638 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12639 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12640 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12641 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12642 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12643 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12644 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12645 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12646 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12647 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12648 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12649 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12650 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12651 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12652 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12653
12654 /* 3com boards. */
12655 { TG3PCI_SUBVENDOR_ID_3COM,
12656 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12657 { TG3PCI_SUBVENDOR_ID_3COM,
12658 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12659 { TG3PCI_SUBVENDOR_ID_3COM,
12660 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12661 { TG3PCI_SUBVENDOR_ID_3COM,
12662 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12663 { TG3PCI_SUBVENDOR_ID_3COM,
12664 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12665
12666 /* DELL boards. */
12667 { TG3PCI_SUBVENDOR_ID_DELL,
12668 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12669 { TG3PCI_SUBVENDOR_ID_DELL,
12670 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12671 { TG3PCI_SUBVENDOR_ID_DELL,
12672 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12673 { TG3PCI_SUBVENDOR_ID_DELL,
12674 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12675
12676 /* Compaq boards. */
12677 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12678 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12679 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12680 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12681 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12682 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12683 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12684 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12685 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12686 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12687
12688 /* IBM boards. */
12689 { TG3PCI_SUBVENDOR_ID_IBM,
12690 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12691 };
12692
12693 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12694 {
12695 int i;
12696
12697 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12698 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12699 tp->pdev->subsystem_vendor) &&
12700 (subsys_id_to_phy_id[i].subsys_devid ==
12701 tp->pdev->subsystem_device))
12702 return &subsys_id_to_phy_id[i];
12703 }
12704 return NULL;
12705 }
12706
12707 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12708 {
12709 u32 val;
12710 u16 pmcsr;
12711
12712 /* On some early chips the SRAM cannot be accessed in D3hot state,
12713 * so need make sure we're in D0.
12714 */
12715 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12716 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12717 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12718 msleep(1);
12719
12720 /* Make sure register accesses (indirect or otherwise)
12721 * will function correctly.
12722 */
12723 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12724 tp->misc_host_ctrl);
12725
12726 /* The memory arbiter has to be enabled in order for SRAM accesses
12727 * to succeed. Normally on powerup the tg3 chip firmware will make
12728 * sure it is enabled, but other entities such as system netboot
12729 * code might disable it.
12730 */
12731 val = tr32(MEMARB_MODE);
12732 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12733
12734 tp->phy_id = TG3_PHY_ID_INVALID;
12735 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12736
12737 /* Assume an onboard device and WOL capable by default. */
12738 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12739 tg3_flag_set(tp, WOL_CAP);
12740
12741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12742 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12743 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12744 tg3_flag_set(tp, IS_NIC);
12745 }
12746 val = tr32(VCPU_CFGSHDW);
12747 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12748 tg3_flag_set(tp, ASPM_WORKAROUND);
12749 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12750 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12751 tg3_flag_set(tp, WOL_ENABLE);
12752 goto done;
12753 }
12754
12755 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12756 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12757 u32 nic_cfg, led_cfg;
12758 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12759 int eeprom_phy_serdes = 0;
12760
12761 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12762 tp->nic_sram_data_cfg = nic_cfg;
12763
12764 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12765 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12766 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12767 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12768 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12769 (ver > 0) && (ver < 0x100))
12770 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12771
12772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12773 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12774
12775 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12776 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12777 eeprom_phy_serdes = 1;
12778
12779 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12780 if (nic_phy_id != 0) {
12781 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12782 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12783
12784 eeprom_phy_id = (id1 >> 16) << 10;
12785 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12786 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12787 } else
12788 eeprom_phy_id = 0;
12789
12790 tp->phy_id = eeprom_phy_id;
12791 if (eeprom_phy_serdes) {
12792 if (!tg3_flag(tp, 5705_PLUS))
12793 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12794 else
12795 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12796 }
12797
12798 if (tg3_flag(tp, 5750_PLUS))
12799 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12800 SHASTA_EXT_LED_MODE_MASK);
12801 else
12802 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12803
12804 switch (led_cfg) {
12805 default:
12806 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12807 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12808 break;
12809
12810 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12811 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12812 break;
12813
12814 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12815 tp->led_ctrl = LED_CTRL_MODE_MAC;
12816
12817 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12818 * read on some older 5700/5701 bootcode.
12819 */
12820 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12821 ASIC_REV_5700 ||
12822 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12823 ASIC_REV_5701)
12824 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12825
12826 break;
12827
12828 case SHASTA_EXT_LED_SHARED:
12829 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12830 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12831 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12832 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12833 LED_CTRL_MODE_PHY_2);
12834 break;
12835
12836 case SHASTA_EXT_LED_MAC:
12837 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12838 break;
12839
12840 case SHASTA_EXT_LED_COMBO:
12841 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12842 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12843 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12844 LED_CTRL_MODE_PHY_2);
12845 break;
12846
12847 }
12848
12849 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12851 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12852 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12853
12854 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12855 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12856
12857 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12858 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12859 if ((tp->pdev->subsystem_vendor ==
12860 PCI_VENDOR_ID_ARIMA) &&
12861 (tp->pdev->subsystem_device == 0x205a ||
12862 tp->pdev->subsystem_device == 0x2063))
12863 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12864 } else {
12865 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12866 tg3_flag_set(tp, IS_NIC);
12867 }
12868
12869 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12870 tg3_flag_set(tp, ENABLE_ASF);
12871 if (tg3_flag(tp, 5750_PLUS))
12872 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12873 }
12874
12875 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12876 tg3_flag(tp, 5750_PLUS))
12877 tg3_flag_set(tp, ENABLE_APE);
12878
12879 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12880 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12881 tg3_flag_clear(tp, WOL_CAP);
12882
12883 if (tg3_flag(tp, WOL_CAP) &&
12884 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12885 tg3_flag_set(tp, WOL_ENABLE);
12886
12887 if (cfg2 & (1 << 17))
12888 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12889
12890 /* serdes signal pre-emphasis in register 0x590 set by */
12891 /* bootcode if bit 18 is set */
12892 if (cfg2 & (1 << 18))
12893 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12894
12895 if ((tg3_flag(tp, 57765_PLUS) ||
12896 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12897 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12898 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12899 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12900
12901 if (tg3_flag(tp, PCI_EXPRESS) &&
12902 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12903 !tg3_flag(tp, 57765_PLUS)) {
12904 u32 cfg3;
12905
12906 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12907 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12908 tg3_flag_set(tp, ASPM_WORKAROUND);
12909 }
12910
12911 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12912 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12913 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12914 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12915 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12916 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12917 }
12918 done:
12919 if (tg3_flag(tp, WOL_CAP))
12920 device_set_wakeup_enable(&tp->pdev->dev,
12921 tg3_flag(tp, WOL_ENABLE));
12922 else
12923 device_set_wakeup_capable(&tp->pdev->dev, false);
12924 }
12925
12926 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12927 {
12928 int i;
12929 u32 val;
12930
12931 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12932 tw32(OTP_CTRL, cmd);
12933
12934 /* Wait for up to 1 ms for command to execute. */
12935 for (i = 0; i < 100; i++) {
12936 val = tr32(OTP_STATUS);
12937 if (val & OTP_STATUS_CMD_DONE)
12938 break;
12939 udelay(10);
12940 }
12941
12942 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12943 }
12944
12945 /* Read the gphy configuration from the OTP region of the chip. The gphy
12946 * configuration is a 32-bit value that straddles the alignment boundary.
12947 * We do two 32-bit reads and then shift and merge the results.
12948 */
12949 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12950 {
12951 u32 bhalf_otp, thalf_otp;
12952
12953 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12954
12955 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12956 return 0;
12957
12958 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12959
12960 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12961 return 0;
12962
12963 thalf_otp = tr32(OTP_READ_DATA);
12964
12965 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12966
12967 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12968 return 0;
12969
12970 bhalf_otp = tr32(OTP_READ_DATA);
12971
12972 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12973 }
12974
12975 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12976 {
12977 u32 adv = ADVERTISED_Autoneg |
12978 ADVERTISED_Pause;
12979
12980 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12981 adv |= ADVERTISED_1000baseT_Half |
12982 ADVERTISED_1000baseT_Full;
12983
12984 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12985 adv |= ADVERTISED_100baseT_Half |
12986 ADVERTISED_100baseT_Full |
12987 ADVERTISED_10baseT_Half |
12988 ADVERTISED_10baseT_Full |
12989 ADVERTISED_TP;
12990 else
12991 adv |= ADVERTISED_FIBRE;
12992
12993 tp->link_config.advertising = adv;
12994 tp->link_config.speed = SPEED_INVALID;
12995 tp->link_config.duplex = DUPLEX_INVALID;
12996 tp->link_config.autoneg = AUTONEG_ENABLE;
12997 tp->link_config.active_speed = SPEED_INVALID;
12998 tp->link_config.active_duplex = DUPLEX_INVALID;
12999 tp->link_config.orig_speed = SPEED_INVALID;
13000 tp->link_config.orig_duplex = DUPLEX_INVALID;
13001 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13002 }
13003
13004 static int __devinit tg3_phy_probe(struct tg3 *tp)
13005 {
13006 u32 hw_phy_id_1, hw_phy_id_2;
13007 u32 hw_phy_id, hw_phy_id_masked;
13008 int err;
13009
13010 /* flow control autonegotiation is default behavior */
13011 tg3_flag_set(tp, PAUSE_AUTONEG);
13012 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13013
13014 if (tg3_flag(tp, USE_PHYLIB))
13015 return tg3_phy_init(tp);
13016
13017 /* Reading the PHY ID register can conflict with ASF
13018 * firmware access to the PHY hardware.
13019 */
13020 err = 0;
13021 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13022 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13023 } else {
13024 /* Now read the physical PHY_ID from the chip and verify
13025 * that it is sane. If it doesn't look good, we fall back
13026 * to either the hard-coded table based PHY_ID and failing
13027 * that the value found in the eeprom area.
13028 */
13029 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13030 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13031
13032 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13033 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13034 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13035
13036 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13037 }
13038
13039 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13040 tp->phy_id = hw_phy_id;
13041 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13042 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13043 else
13044 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13045 } else {
13046 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13047 /* Do nothing, phy ID already set up in
13048 * tg3_get_eeprom_hw_cfg().
13049 */
13050 } else {
13051 struct subsys_tbl_ent *p;
13052
13053 /* No eeprom signature? Try the hardcoded
13054 * subsys device table.
13055 */
13056 p = tg3_lookup_by_subsys(tp);
13057 if (!p)
13058 return -ENODEV;
13059
13060 tp->phy_id = p->phy_id;
13061 if (!tp->phy_id ||
13062 tp->phy_id == TG3_PHY_ID_BCM8002)
13063 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13064 }
13065 }
13066
13067 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13068 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13069 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13070 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13071 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13072 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13073
13074 tg3_phy_init_link_config(tp);
13075
13076 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13077 !tg3_flag(tp, ENABLE_APE) &&
13078 !tg3_flag(tp, ENABLE_ASF)) {
13079 u32 bmsr, adv_reg, tg3_ctrl, mask;
13080
13081 tg3_readphy(tp, MII_BMSR, &bmsr);
13082 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13083 (bmsr & BMSR_LSTATUS))
13084 goto skip_phy_reset;
13085
13086 err = tg3_phy_reset(tp);
13087 if (err)
13088 return err;
13089
13090 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
13091 ADVERTISE_100HALF | ADVERTISE_100FULL |
13092 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
13093 tg3_ctrl = 0;
13094 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
13095 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
13096 MII_TG3_CTRL_ADV_1000_FULL);
13097 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13098 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
13099 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
13100 MII_TG3_CTRL_ENABLE_AS_MASTER);
13101 }
13102
13103 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13104 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13105 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13106 if (!tg3_copper_is_advertising_all(tp, mask)) {
13107 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13108
13109 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13110 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13111
13112 tg3_writephy(tp, MII_BMCR,
13113 BMCR_ANENABLE | BMCR_ANRESTART);
13114 }
13115 tg3_phy_set_wirespeed(tp);
13116
13117 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13118 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13119 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13120 }
13121
13122 skip_phy_reset:
13123 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13124 err = tg3_init_5401phy_dsp(tp);
13125 if (err)
13126 return err;
13127
13128 err = tg3_init_5401phy_dsp(tp);
13129 }
13130
13131 return err;
13132 }
13133
13134 static void __devinit tg3_read_vpd(struct tg3 *tp)
13135 {
13136 u8 *vpd_data;
13137 unsigned int block_end, rosize, len;
13138 int j, i = 0;
13139
13140 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13141 if (!vpd_data)
13142 goto out_no_vpd;
13143
13144 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13145 PCI_VPD_LRDT_RO_DATA);
13146 if (i < 0)
13147 goto out_not_found;
13148
13149 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13150 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13151 i += PCI_VPD_LRDT_TAG_SIZE;
13152
13153 if (block_end > TG3_NVM_VPD_LEN)
13154 goto out_not_found;
13155
13156 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13157 PCI_VPD_RO_KEYWORD_MFR_ID);
13158 if (j > 0) {
13159 len = pci_vpd_info_field_size(&vpd_data[j]);
13160
13161 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13162 if (j + len > block_end || len != 4 ||
13163 memcmp(&vpd_data[j], "1028", 4))
13164 goto partno;
13165
13166 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13167 PCI_VPD_RO_KEYWORD_VENDOR0);
13168 if (j < 0)
13169 goto partno;
13170
13171 len = pci_vpd_info_field_size(&vpd_data[j]);
13172
13173 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13174 if (j + len > block_end)
13175 goto partno;
13176
13177 memcpy(tp->fw_ver, &vpd_data[j], len);
13178 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13179 }
13180
13181 partno:
13182 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13183 PCI_VPD_RO_KEYWORD_PARTNO);
13184 if (i < 0)
13185 goto out_not_found;
13186
13187 len = pci_vpd_info_field_size(&vpd_data[i]);
13188
13189 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13190 if (len > TG3_BPN_SIZE ||
13191 (len + i) > TG3_NVM_VPD_LEN)
13192 goto out_not_found;
13193
13194 memcpy(tp->board_part_number, &vpd_data[i], len);
13195
13196 out_not_found:
13197 kfree(vpd_data);
13198 if (tp->board_part_number[0])
13199 return;
13200
13201 out_no_vpd:
13202 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13203 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13204 strcpy(tp->board_part_number, "BCM5717");
13205 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13206 strcpy(tp->board_part_number, "BCM5718");
13207 else
13208 goto nomatch;
13209 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13210 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13211 strcpy(tp->board_part_number, "BCM57780");
13212 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13213 strcpy(tp->board_part_number, "BCM57760");
13214 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13215 strcpy(tp->board_part_number, "BCM57790");
13216 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13217 strcpy(tp->board_part_number, "BCM57788");
13218 else
13219 goto nomatch;
13220 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13221 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13222 strcpy(tp->board_part_number, "BCM57761");
13223 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13224 strcpy(tp->board_part_number, "BCM57765");
13225 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13226 strcpy(tp->board_part_number, "BCM57781");
13227 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13228 strcpy(tp->board_part_number, "BCM57785");
13229 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13230 strcpy(tp->board_part_number, "BCM57791");
13231 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13232 strcpy(tp->board_part_number, "BCM57795");
13233 else
13234 goto nomatch;
13235 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13236 strcpy(tp->board_part_number, "BCM95906");
13237 } else {
13238 nomatch:
13239 strcpy(tp->board_part_number, "none");
13240 }
13241 }
13242
13243 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13244 {
13245 u32 val;
13246
13247 if (tg3_nvram_read(tp, offset, &val) ||
13248 (val & 0xfc000000) != 0x0c000000 ||
13249 tg3_nvram_read(tp, offset + 4, &val) ||
13250 val != 0)
13251 return 0;
13252
13253 return 1;
13254 }
13255
13256 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13257 {
13258 u32 val, offset, start, ver_offset;
13259 int i, dst_off;
13260 bool newver = false;
13261
13262 if (tg3_nvram_read(tp, 0xc, &offset) ||
13263 tg3_nvram_read(tp, 0x4, &start))
13264 return;
13265
13266 offset = tg3_nvram_logical_addr(tp, offset);
13267
13268 if (tg3_nvram_read(tp, offset, &val))
13269 return;
13270
13271 if ((val & 0xfc000000) == 0x0c000000) {
13272 if (tg3_nvram_read(tp, offset + 4, &val))
13273 return;
13274
13275 if (val == 0)
13276 newver = true;
13277 }
13278
13279 dst_off = strlen(tp->fw_ver);
13280
13281 if (newver) {
13282 if (TG3_VER_SIZE - dst_off < 16 ||
13283 tg3_nvram_read(tp, offset + 8, &ver_offset))
13284 return;
13285
13286 offset = offset + ver_offset - start;
13287 for (i = 0; i < 16; i += 4) {
13288 __be32 v;
13289 if (tg3_nvram_read_be32(tp, offset + i, &v))
13290 return;
13291
13292 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13293 }
13294 } else {
13295 u32 major, minor;
13296
13297 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13298 return;
13299
13300 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13301 TG3_NVM_BCVER_MAJSFT;
13302 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13303 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13304 "v%d.%02d", major, minor);
13305 }
13306 }
13307
13308 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13309 {
13310 u32 val, major, minor;
13311
13312 /* Use native endian representation */
13313 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13314 return;
13315
13316 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13317 TG3_NVM_HWSB_CFG1_MAJSFT;
13318 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13319 TG3_NVM_HWSB_CFG1_MINSFT;
13320
13321 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13322 }
13323
13324 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13325 {
13326 u32 offset, major, minor, build;
13327
13328 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13329
13330 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13331 return;
13332
13333 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13334 case TG3_EEPROM_SB_REVISION_0:
13335 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13336 break;
13337 case TG3_EEPROM_SB_REVISION_2:
13338 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13339 break;
13340 case TG3_EEPROM_SB_REVISION_3:
13341 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13342 break;
13343 case TG3_EEPROM_SB_REVISION_4:
13344 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13345 break;
13346 case TG3_EEPROM_SB_REVISION_5:
13347 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13348 break;
13349 case TG3_EEPROM_SB_REVISION_6:
13350 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13351 break;
13352 default:
13353 return;
13354 }
13355
13356 if (tg3_nvram_read(tp, offset, &val))
13357 return;
13358
13359 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13360 TG3_EEPROM_SB_EDH_BLD_SHFT;
13361 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13362 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13363 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13364
13365 if (minor > 99 || build > 26)
13366 return;
13367
13368 offset = strlen(tp->fw_ver);
13369 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13370 " v%d.%02d", major, minor);
13371
13372 if (build > 0) {
13373 offset = strlen(tp->fw_ver);
13374 if (offset < TG3_VER_SIZE - 1)
13375 tp->fw_ver[offset] = 'a' + build - 1;
13376 }
13377 }
13378
13379 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13380 {
13381 u32 val, offset, start;
13382 int i, vlen;
13383
13384 for (offset = TG3_NVM_DIR_START;
13385 offset < TG3_NVM_DIR_END;
13386 offset += TG3_NVM_DIRENT_SIZE) {
13387 if (tg3_nvram_read(tp, offset, &val))
13388 return;
13389
13390 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13391 break;
13392 }
13393
13394 if (offset == TG3_NVM_DIR_END)
13395 return;
13396
13397 if (!tg3_flag(tp, 5705_PLUS))
13398 start = 0x08000000;
13399 else if (tg3_nvram_read(tp, offset - 4, &start))
13400 return;
13401
13402 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13403 !tg3_fw_img_is_valid(tp, offset) ||
13404 tg3_nvram_read(tp, offset + 8, &val))
13405 return;
13406
13407 offset += val - start;
13408
13409 vlen = strlen(tp->fw_ver);
13410
13411 tp->fw_ver[vlen++] = ',';
13412 tp->fw_ver[vlen++] = ' ';
13413
13414 for (i = 0; i < 4; i++) {
13415 __be32 v;
13416 if (tg3_nvram_read_be32(tp, offset, &v))
13417 return;
13418
13419 offset += sizeof(v);
13420
13421 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13422 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13423 break;
13424 }
13425
13426 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13427 vlen += sizeof(v);
13428 }
13429 }
13430
13431 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13432 {
13433 int vlen;
13434 u32 apedata;
13435 char *fwtype;
13436
13437 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13438 return;
13439
13440 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13441 if (apedata != APE_SEG_SIG_MAGIC)
13442 return;
13443
13444 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13445 if (!(apedata & APE_FW_STATUS_READY))
13446 return;
13447
13448 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13449
13450 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13451 tg3_flag_set(tp, APE_HAS_NCSI);
13452 fwtype = "NCSI";
13453 } else {
13454 fwtype = "DASH";
13455 }
13456
13457 vlen = strlen(tp->fw_ver);
13458
13459 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13460 fwtype,
13461 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13462 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13463 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13464 (apedata & APE_FW_VERSION_BLDMSK));
13465 }
13466
13467 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13468 {
13469 u32 val;
13470 bool vpd_vers = false;
13471
13472 if (tp->fw_ver[0] != 0)
13473 vpd_vers = true;
13474
13475 if (tg3_flag(tp, NO_NVRAM)) {
13476 strcat(tp->fw_ver, "sb");
13477 return;
13478 }
13479
13480 if (tg3_nvram_read(tp, 0, &val))
13481 return;
13482
13483 if (val == TG3_EEPROM_MAGIC)
13484 tg3_read_bc_ver(tp);
13485 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13486 tg3_read_sb_ver(tp, val);
13487 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13488 tg3_read_hwsb_ver(tp);
13489 else
13490 return;
13491
13492 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13493 goto done;
13494
13495 tg3_read_mgmtfw_ver(tp);
13496
13497 done:
13498 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13499 }
13500
13501 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13502
13503 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13504 {
13505 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13506 return TG3_RX_RET_MAX_SIZE_5717;
13507 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13508 return TG3_RX_RET_MAX_SIZE_5700;
13509 else
13510 return TG3_RX_RET_MAX_SIZE_5705;
13511 }
13512
13513 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13514 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13515 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13516 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13517 { },
13518 };
13519
13520 static int __devinit tg3_get_invariants(struct tg3 *tp)
13521 {
13522 u32 misc_ctrl_reg;
13523 u32 pci_state_reg, grc_misc_cfg;
13524 u32 val;
13525 u16 pci_cmd;
13526 int err;
13527
13528 /* Force memory write invalidate off. If we leave it on,
13529 * then on 5700_BX chips we have to enable a workaround.
13530 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13531 * to match the cacheline size. The Broadcom driver have this
13532 * workaround but turns MWI off all the times so never uses
13533 * it. This seems to suggest that the workaround is insufficient.
13534 */
13535 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13536 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13537 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13538
13539 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13540 * has the register indirect write enable bit set before
13541 * we try to access any of the MMIO registers. It is also
13542 * critical that the PCI-X hw workaround situation is decided
13543 * before that as well.
13544 */
13545 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13546 &misc_ctrl_reg);
13547
13548 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13549 MISC_HOST_CTRL_CHIPREV_SHIFT);
13550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13551 u32 prod_id_asic_rev;
13552
13553 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13554 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13555 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13556 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13557 pci_read_config_dword(tp->pdev,
13558 TG3PCI_GEN2_PRODID_ASICREV,
13559 &prod_id_asic_rev);
13560 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13561 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13562 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13563 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13564 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13565 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13566 pci_read_config_dword(tp->pdev,
13567 TG3PCI_GEN15_PRODID_ASICREV,
13568 &prod_id_asic_rev);
13569 else
13570 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13571 &prod_id_asic_rev);
13572
13573 tp->pci_chip_rev_id = prod_id_asic_rev;
13574 }
13575
13576 /* Wrong chip ID in 5752 A0. This code can be removed later
13577 * as A0 is not in production.
13578 */
13579 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13580 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13581
13582 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13583 * we need to disable memory and use config. cycles
13584 * only to access all registers. The 5702/03 chips
13585 * can mistakenly decode the special cycles from the
13586 * ICH chipsets as memory write cycles, causing corruption
13587 * of register and memory space. Only certain ICH bridges
13588 * will drive special cycles with non-zero data during the
13589 * address phase which can fall within the 5703's address
13590 * range. This is not an ICH bug as the PCI spec allows
13591 * non-zero address during special cycles. However, only
13592 * these ICH bridges are known to drive non-zero addresses
13593 * during special cycles.
13594 *
13595 * Since special cycles do not cross PCI bridges, we only
13596 * enable this workaround if the 5703 is on the secondary
13597 * bus of these ICH bridges.
13598 */
13599 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13600 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13601 static struct tg3_dev_id {
13602 u32 vendor;
13603 u32 device;
13604 u32 rev;
13605 } ich_chipsets[] = {
13606 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13607 PCI_ANY_ID },
13608 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13609 PCI_ANY_ID },
13610 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13611 0xa },
13612 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13613 PCI_ANY_ID },
13614 { },
13615 };
13616 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13617 struct pci_dev *bridge = NULL;
13618
13619 while (pci_id->vendor != 0) {
13620 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13621 bridge);
13622 if (!bridge) {
13623 pci_id++;
13624 continue;
13625 }
13626 if (pci_id->rev != PCI_ANY_ID) {
13627 if (bridge->revision > pci_id->rev)
13628 continue;
13629 }
13630 if (bridge->subordinate &&
13631 (bridge->subordinate->number ==
13632 tp->pdev->bus->number)) {
13633 tg3_flag_set(tp, ICH_WORKAROUND);
13634 pci_dev_put(bridge);
13635 break;
13636 }
13637 }
13638 }
13639
13640 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13641 static struct tg3_dev_id {
13642 u32 vendor;
13643 u32 device;
13644 } bridge_chipsets[] = {
13645 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13646 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13647 { },
13648 };
13649 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13650 struct pci_dev *bridge = NULL;
13651
13652 while (pci_id->vendor != 0) {
13653 bridge = pci_get_device(pci_id->vendor,
13654 pci_id->device,
13655 bridge);
13656 if (!bridge) {
13657 pci_id++;
13658 continue;
13659 }
13660 if (bridge->subordinate &&
13661 (bridge->subordinate->number <=
13662 tp->pdev->bus->number) &&
13663 (bridge->subordinate->subordinate >=
13664 tp->pdev->bus->number)) {
13665 tg3_flag_set(tp, 5701_DMA_BUG);
13666 pci_dev_put(bridge);
13667 break;
13668 }
13669 }
13670 }
13671
13672 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13673 * DMA addresses > 40-bit. This bridge may have other additional
13674 * 57xx devices behind it in some 4-port NIC designs for example.
13675 * Any tg3 device found behind the bridge will also need the 40-bit
13676 * DMA workaround.
13677 */
13678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13680 tg3_flag_set(tp, 5780_CLASS);
13681 tg3_flag_set(tp, 40BIT_DMA_BUG);
13682 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13683 } else {
13684 struct pci_dev *bridge = NULL;
13685
13686 do {
13687 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13688 PCI_DEVICE_ID_SERVERWORKS_EPB,
13689 bridge);
13690 if (bridge && bridge->subordinate &&
13691 (bridge->subordinate->number <=
13692 tp->pdev->bus->number) &&
13693 (bridge->subordinate->subordinate >=
13694 tp->pdev->bus->number)) {
13695 tg3_flag_set(tp, 40BIT_DMA_BUG);
13696 pci_dev_put(bridge);
13697 break;
13698 }
13699 } while (bridge);
13700 }
13701
13702 /* Initialize misc host control in PCI block. */
13703 tp->misc_host_ctrl |= (misc_ctrl_reg &
13704 MISC_HOST_CTRL_CHIPREV);
13705 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13706 tp->misc_host_ctrl);
13707
13708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13709 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13710 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13711 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13712 tp->pdev_peer = tg3_find_peer(tp);
13713
13714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13715 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13716 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13717 tg3_flag_set(tp, 5717_PLUS);
13718
13719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13720 tg3_flag(tp, 5717_PLUS))
13721 tg3_flag_set(tp, 57765_PLUS);
13722
13723 /* Intentionally exclude ASIC_REV_5906 */
13724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13725 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13726 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13727 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13729 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13730 tg3_flag(tp, 57765_PLUS))
13731 tg3_flag_set(tp, 5755_PLUS);
13732
13733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13734 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13735 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13736 tg3_flag(tp, 5755_PLUS) ||
13737 tg3_flag(tp, 5780_CLASS))
13738 tg3_flag_set(tp, 5750_PLUS);
13739
13740 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13741 tg3_flag(tp, 5750_PLUS))
13742 tg3_flag_set(tp, 5705_PLUS);
13743
13744 /* 5700 B0 chips do not support checksumming correctly due
13745 * to hardware bugs.
13746 */
13747 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13748 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13749
13750 if (tg3_flag(tp, 5755_PLUS))
13751 features |= NETIF_F_IPV6_CSUM;
13752 tp->dev->features |= features;
13753 tp->dev->hw_features |= features;
13754 tp->dev->vlan_features |= features;
13755 }
13756
13757 /* Determine TSO capabilities */
13758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13759 ; /* Do nothing. HW bug. */
13760 else if (tg3_flag(tp, 57765_PLUS))
13761 tg3_flag_set(tp, HW_TSO_3);
13762 else if (tg3_flag(tp, 5755_PLUS) ||
13763 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13764 tg3_flag_set(tp, HW_TSO_2);
13765 else if (tg3_flag(tp, 5750_PLUS)) {
13766 tg3_flag_set(tp, HW_TSO_1);
13767 tg3_flag_set(tp, TSO_BUG);
13768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13769 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13770 tg3_flag_clear(tp, TSO_BUG);
13771 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13772 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13773 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13774 tg3_flag_set(tp, TSO_BUG);
13775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13776 tp->fw_needed = FIRMWARE_TG3TSO5;
13777 else
13778 tp->fw_needed = FIRMWARE_TG3TSO;
13779 }
13780
13781 tp->irq_max = 1;
13782
13783 if (tg3_flag(tp, 5750_PLUS)) {
13784 tg3_flag_set(tp, SUPPORT_MSI);
13785 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13786 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13787 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13788 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13789 tp->pdev_peer == tp->pdev))
13790 tg3_flag_clear(tp, SUPPORT_MSI);
13791
13792 if (tg3_flag(tp, 5755_PLUS) ||
13793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13794 tg3_flag_set(tp, 1SHOT_MSI);
13795 }
13796
13797 if (tg3_flag(tp, 57765_PLUS)) {
13798 tg3_flag_set(tp, SUPPORT_MSIX);
13799 tp->irq_max = TG3_IRQ_MAX_VECS;
13800 }
13801 }
13802
13803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13804 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13805 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13806 tg3_flag_set(tp, SHORT_DMA_BUG);
13807 else if (!tg3_flag(tp, 5755_PLUS)) {
13808 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13809 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13810 }
13811
13812 if (tg3_flag(tp, 5717_PLUS))
13813 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13814
13815 if (tg3_flag(tp, 57765_PLUS) &&
13816 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13817 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13818
13819 if (!tg3_flag(tp, 5705_PLUS) ||
13820 tg3_flag(tp, 5780_CLASS) ||
13821 tg3_flag(tp, USE_JUMBO_BDFLAG))
13822 tg3_flag_set(tp, JUMBO_CAPABLE);
13823
13824 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13825 &pci_state_reg);
13826
13827 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13828 if (tp->pcie_cap != 0) {
13829 u16 lnkctl;
13830
13831 tg3_flag_set(tp, PCI_EXPRESS);
13832
13833 tp->pcie_readrq = 4096;
13834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13835 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13836 tp->pcie_readrq = 2048;
13837
13838 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13839
13840 pci_read_config_word(tp->pdev,
13841 tp->pcie_cap + PCI_EXP_LNKCTL,
13842 &lnkctl);
13843 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13845 tg3_flag_clear(tp, HW_TSO_2);
13846 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13848 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13849 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13850 tg3_flag_set(tp, CLKREQ_BUG);
13851 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13852 tg3_flag_set(tp, L1PLLPD_EN);
13853 }
13854 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13855 tg3_flag_set(tp, PCI_EXPRESS);
13856 } else if (!tg3_flag(tp, 5705_PLUS) ||
13857 tg3_flag(tp, 5780_CLASS)) {
13858 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13859 if (!tp->pcix_cap) {
13860 dev_err(&tp->pdev->dev,
13861 "Cannot find PCI-X capability, aborting\n");
13862 return -EIO;
13863 }
13864
13865 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13866 tg3_flag_set(tp, PCIX_MODE);
13867 }
13868
13869 /* If we have an AMD 762 or VIA K8T800 chipset, write
13870 * reordering to the mailbox registers done by the host
13871 * controller can cause major troubles. We read back from
13872 * every mailbox register write to force the writes to be
13873 * posted to the chip in order.
13874 */
13875 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13876 !tg3_flag(tp, PCI_EXPRESS))
13877 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13878
13879 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13880 &tp->pci_cacheline_sz);
13881 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13882 &tp->pci_lat_timer);
13883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13884 tp->pci_lat_timer < 64) {
13885 tp->pci_lat_timer = 64;
13886 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13887 tp->pci_lat_timer);
13888 }
13889
13890 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13891 /* 5700 BX chips need to have their TX producer index
13892 * mailboxes written twice to workaround a bug.
13893 */
13894 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13895
13896 /* If we are in PCI-X mode, enable register write workaround.
13897 *
13898 * The workaround is to use indirect register accesses
13899 * for all chip writes not to mailbox registers.
13900 */
13901 if (tg3_flag(tp, PCIX_MODE)) {
13902 u32 pm_reg;
13903
13904 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13905
13906 /* The chip can have it's power management PCI config
13907 * space registers clobbered due to this bug.
13908 * So explicitly force the chip into D0 here.
13909 */
13910 pci_read_config_dword(tp->pdev,
13911 tp->pm_cap + PCI_PM_CTRL,
13912 &pm_reg);
13913 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13914 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13915 pci_write_config_dword(tp->pdev,
13916 tp->pm_cap + PCI_PM_CTRL,
13917 pm_reg);
13918
13919 /* Also, force SERR#/PERR# in PCI command. */
13920 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13921 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13922 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13923 }
13924 }
13925
13926 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13927 tg3_flag_set(tp, PCI_HIGH_SPEED);
13928 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13929 tg3_flag_set(tp, PCI_32BIT);
13930
13931 /* Chip-specific fixup from Broadcom driver */
13932 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13933 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13934 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13935 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13936 }
13937
13938 /* Default fast path register access methods */
13939 tp->read32 = tg3_read32;
13940 tp->write32 = tg3_write32;
13941 tp->read32_mbox = tg3_read32;
13942 tp->write32_mbox = tg3_write32;
13943 tp->write32_tx_mbox = tg3_write32;
13944 tp->write32_rx_mbox = tg3_write32;
13945
13946 /* Various workaround register access methods */
13947 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13948 tp->write32 = tg3_write_indirect_reg32;
13949 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13950 (tg3_flag(tp, PCI_EXPRESS) &&
13951 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13952 /*
13953 * Back to back register writes can cause problems on these
13954 * chips, the workaround is to read back all reg writes
13955 * except those to mailbox regs.
13956 *
13957 * See tg3_write_indirect_reg32().
13958 */
13959 tp->write32 = tg3_write_flush_reg32;
13960 }
13961
13962 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13963 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13964 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13965 tp->write32_rx_mbox = tg3_write_flush_reg32;
13966 }
13967
13968 if (tg3_flag(tp, ICH_WORKAROUND)) {
13969 tp->read32 = tg3_read_indirect_reg32;
13970 tp->write32 = tg3_write_indirect_reg32;
13971 tp->read32_mbox = tg3_read_indirect_mbox;
13972 tp->write32_mbox = tg3_write_indirect_mbox;
13973 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13974 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13975
13976 iounmap(tp->regs);
13977 tp->regs = NULL;
13978
13979 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13980 pci_cmd &= ~PCI_COMMAND_MEMORY;
13981 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13982 }
13983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13984 tp->read32_mbox = tg3_read32_mbox_5906;
13985 tp->write32_mbox = tg3_write32_mbox_5906;
13986 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13987 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13988 }
13989
13990 if (tp->write32 == tg3_write_indirect_reg32 ||
13991 (tg3_flag(tp, PCIX_MODE) &&
13992 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13994 tg3_flag_set(tp, SRAM_USE_CONFIG);
13995
13996 /* Get eeprom hw config before calling tg3_set_power_state().
13997 * In particular, the TG3_FLAG_IS_NIC flag must be
13998 * determined before calling tg3_set_power_state() so that
13999 * we know whether or not to switch out of Vaux power.
14000 * When the flag is set, it means that GPIO1 is used for eeprom
14001 * write protect and also implies that it is a LOM where GPIOs
14002 * are not used to switch power.
14003 */
14004 tg3_get_eeprom_hw_cfg(tp);
14005
14006 if (tg3_flag(tp, ENABLE_APE)) {
14007 /* Allow reads and writes to the
14008 * APE register and memory space.
14009 */
14010 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14011 PCISTATE_ALLOW_APE_SHMEM_WR |
14012 PCISTATE_ALLOW_APE_PSPACE_WR;
14013 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14014 pci_state_reg);
14015 }
14016
14017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14021 tg3_flag(tp, 57765_PLUS))
14022 tg3_flag_set(tp, CPMU_PRESENT);
14023
14024 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
14025 * GPIO1 driven high will bring 5700's external PHY out of reset.
14026 * It is also used as eeprom write protect on LOMs.
14027 */
14028 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14029 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14030 tg3_flag(tp, EEPROM_WRITE_PROT))
14031 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14032 GRC_LCLCTRL_GPIO_OUTPUT1);
14033 /* Unused GPIO3 must be driven as output on 5752 because there
14034 * are no pull-up resistors on unused GPIO pins.
14035 */
14036 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14037 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14038
14039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14042 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14043
14044 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14046 /* Turn off the debug UART. */
14047 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14048 if (tg3_flag(tp, IS_NIC))
14049 /* Keep VMain power. */
14050 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14051 GRC_LCLCTRL_GPIO_OUTPUT0;
14052 }
14053
14054 /* Force the chip into D0. */
14055 err = tg3_power_up(tp);
14056 if (err) {
14057 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
14058 return err;
14059 }
14060
14061 /* Derive initial jumbo mode from MTU assigned in
14062 * ether_setup() via the alloc_etherdev() call
14063 */
14064 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14065 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14066
14067 /* Determine WakeOnLan speed to use. */
14068 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14069 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14070 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14071 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14072 tg3_flag_clear(tp, WOL_SPEED_100MB);
14073 } else {
14074 tg3_flag_set(tp, WOL_SPEED_100MB);
14075 }
14076
14077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14078 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14079
14080 /* A few boards don't want Ethernet@WireSpeed phy feature */
14081 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14082 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
14083 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14084 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14085 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14086 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14087 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14088
14089 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14090 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14091 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14092 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14093 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14094
14095 if (tg3_flag(tp, 5705_PLUS) &&
14096 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14097 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14098 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14099 !tg3_flag(tp, 57765_PLUS)) {
14100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14102 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14103 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14104 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14105 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14106 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14107 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14108 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14109 } else
14110 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14111 }
14112
14113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14114 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14115 tp->phy_otp = tg3_read_otp_phycfg(tp);
14116 if (tp->phy_otp == 0)
14117 tp->phy_otp = TG3_OTP_DEFAULT;
14118 }
14119
14120 if (tg3_flag(tp, CPMU_PRESENT))
14121 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14122 else
14123 tp->mi_mode = MAC_MI_MODE_BASE;
14124
14125 tp->coalesce_mode = 0;
14126 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14127 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14128 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14129
14130 /* Set these bits to enable statistics workaround. */
14131 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14132 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14133 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14134 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14135 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14136 }
14137
14138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14140 tg3_flag_set(tp, USE_PHYLIB);
14141
14142 err = tg3_mdio_init(tp);
14143 if (err)
14144 return err;
14145
14146 /* Initialize data/descriptor byte/word swapping. */
14147 val = tr32(GRC_MODE);
14148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14149 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14150 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14151 GRC_MODE_B2HRX_ENABLE |
14152 GRC_MODE_HTX2B_ENABLE |
14153 GRC_MODE_HOST_STACKUP);
14154 else
14155 val &= GRC_MODE_HOST_STACKUP;
14156
14157 tw32(GRC_MODE, val | tp->grc_mode);
14158
14159 tg3_switch_clocks(tp);
14160
14161 /* Clear this out for sanity. */
14162 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14163
14164 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14165 &pci_state_reg);
14166 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14167 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14168 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14169
14170 if (chiprevid == CHIPREV_ID_5701_A0 ||
14171 chiprevid == CHIPREV_ID_5701_B0 ||
14172 chiprevid == CHIPREV_ID_5701_B2 ||
14173 chiprevid == CHIPREV_ID_5701_B5) {
14174 void __iomem *sram_base;
14175
14176 /* Write some dummy words into the SRAM status block
14177 * area, see if it reads back correctly. If the return
14178 * value is bad, force enable the PCIX workaround.
14179 */
14180 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14181
14182 writel(0x00000000, sram_base);
14183 writel(0x00000000, sram_base + 4);
14184 writel(0xffffffff, sram_base + 4);
14185 if (readl(sram_base) != 0x00000000)
14186 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14187 }
14188 }
14189
14190 udelay(50);
14191 tg3_nvram_init(tp);
14192
14193 grc_misc_cfg = tr32(GRC_MISC_CFG);
14194 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14195
14196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14197 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14198 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14199 tg3_flag_set(tp, IS_5788);
14200
14201 if (!tg3_flag(tp, IS_5788) &&
14202 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14203 tg3_flag_set(tp, TAGGED_STATUS);
14204 if (tg3_flag(tp, TAGGED_STATUS)) {
14205 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14206 HOSTCC_MODE_CLRTICK_TXBD);
14207
14208 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14209 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14210 tp->misc_host_ctrl);
14211 }
14212
14213 /* Preserve the APE MAC_MODE bits */
14214 if (tg3_flag(tp, ENABLE_APE))
14215 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14216 else
14217 tp->mac_mode = TG3_DEF_MAC_MODE;
14218
14219 /* these are limited to 10/100 only */
14220 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14221 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14222 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14223 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14224 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14225 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14226 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14227 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14228 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14229 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14230 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14231 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14232 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14233 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14234 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14235 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14236
14237 err = tg3_phy_probe(tp);
14238 if (err) {
14239 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14240 /* ... but do not return immediately ... */
14241 tg3_mdio_fini(tp);
14242 }
14243
14244 tg3_read_vpd(tp);
14245 tg3_read_fw_ver(tp);
14246
14247 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14248 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14249 } else {
14250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14251 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14252 else
14253 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14254 }
14255
14256 /* 5700 {AX,BX} chips have a broken status block link
14257 * change bit implementation, so we must use the
14258 * status register in those cases.
14259 */
14260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14261 tg3_flag_set(tp, USE_LINKCHG_REG);
14262 else
14263 tg3_flag_clear(tp, USE_LINKCHG_REG);
14264
14265 /* The led_ctrl is set during tg3_phy_probe, here we might
14266 * have to force the link status polling mechanism based
14267 * upon subsystem IDs.
14268 */
14269 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14271 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14272 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14273 tg3_flag_set(tp, USE_LINKCHG_REG);
14274 }
14275
14276 /* For all SERDES we poll the MAC status register. */
14277 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14278 tg3_flag_set(tp, POLL_SERDES);
14279 else
14280 tg3_flag_clear(tp, POLL_SERDES);
14281
14282 tp->rx_offset = NET_IP_ALIGN;
14283 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14284 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14285 tg3_flag(tp, PCIX_MODE)) {
14286 tp->rx_offset = 0;
14287 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14288 tp->rx_copy_thresh = ~(u16)0;
14289 #endif
14290 }
14291
14292 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14293 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14294 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14295
14296 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14297
14298 /* Increment the rx prod index on the rx std ring by at most
14299 * 8 for these chips to workaround hw errata.
14300 */
14301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14302 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14303 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14304 tp->rx_std_max_post = 8;
14305
14306 if (tg3_flag(tp, ASPM_WORKAROUND))
14307 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14308 PCIE_PWR_MGMT_L1_THRESH_MSK;
14309
14310 return err;
14311 }
14312
14313 #ifdef CONFIG_SPARC
14314 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14315 {
14316 struct net_device *dev = tp->dev;
14317 struct pci_dev *pdev = tp->pdev;
14318 struct device_node *dp = pci_device_to_OF_node(pdev);
14319 const unsigned char *addr;
14320 int len;
14321
14322 addr = of_get_property(dp, "local-mac-address", &len);
14323 if (addr && len == 6) {
14324 memcpy(dev->dev_addr, addr, 6);
14325 memcpy(dev->perm_addr, dev->dev_addr, 6);
14326 return 0;
14327 }
14328 return -ENODEV;
14329 }
14330
14331 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14332 {
14333 struct net_device *dev = tp->dev;
14334
14335 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14336 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14337 return 0;
14338 }
14339 #endif
14340
14341 static int __devinit tg3_get_device_address(struct tg3 *tp)
14342 {
14343 struct net_device *dev = tp->dev;
14344 u32 hi, lo, mac_offset;
14345 int addr_ok = 0;
14346
14347 #ifdef CONFIG_SPARC
14348 if (!tg3_get_macaddr_sparc(tp))
14349 return 0;
14350 #endif
14351
14352 mac_offset = 0x7c;
14353 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14354 tg3_flag(tp, 5780_CLASS)) {
14355 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14356 mac_offset = 0xcc;
14357 if (tg3_nvram_lock(tp))
14358 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14359 else
14360 tg3_nvram_unlock(tp);
14361 } else if (tg3_flag(tp, 5717_PLUS)) {
14362 if (PCI_FUNC(tp->pdev->devfn) & 1)
14363 mac_offset = 0xcc;
14364 if (PCI_FUNC(tp->pdev->devfn) > 1)
14365 mac_offset += 0x18c;
14366 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14367 mac_offset = 0x10;
14368
14369 /* First try to get it from MAC address mailbox. */
14370 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14371 if ((hi >> 16) == 0x484b) {
14372 dev->dev_addr[0] = (hi >> 8) & 0xff;
14373 dev->dev_addr[1] = (hi >> 0) & 0xff;
14374
14375 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14376 dev->dev_addr[2] = (lo >> 24) & 0xff;
14377 dev->dev_addr[3] = (lo >> 16) & 0xff;
14378 dev->dev_addr[4] = (lo >> 8) & 0xff;
14379 dev->dev_addr[5] = (lo >> 0) & 0xff;
14380
14381 /* Some old bootcode may report a 0 MAC address in SRAM */
14382 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14383 }
14384 if (!addr_ok) {
14385 /* Next, try NVRAM. */
14386 if (!tg3_flag(tp, NO_NVRAM) &&
14387 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14388 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14389 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14390 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14391 }
14392 /* Finally just fetch it out of the MAC control regs. */
14393 else {
14394 hi = tr32(MAC_ADDR_0_HIGH);
14395 lo = tr32(MAC_ADDR_0_LOW);
14396
14397 dev->dev_addr[5] = lo & 0xff;
14398 dev->dev_addr[4] = (lo >> 8) & 0xff;
14399 dev->dev_addr[3] = (lo >> 16) & 0xff;
14400 dev->dev_addr[2] = (lo >> 24) & 0xff;
14401 dev->dev_addr[1] = hi & 0xff;
14402 dev->dev_addr[0] = (hi >> 8) & 0xff;
14403 }
14404 }
14405
14406 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14407 #ifdef CONFIG_SPARC
14408 if (!tg3_get_default_macaddr_sparc(tp))
14409 return 0;
14410 #endif
14411 return -EINVAL;
14412 }
14413 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14414 return 0;
14415 }
14416
14417 #define BOUNDARY_SINGLE_CACHELINE 1
14418 #define BOUNDARY_MULTI_CACHELINE 2
14419
14420 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14421 {
14422 int cacheline_size;
14423 u8 byte;
14424 int goal;
14425
14426 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14427 if (byte == 0)
14428 cacheline_size = 1024;
14429 else
14430 cacheline_size = (int) byte * 4;
14431
14432 /* On 5703 and later chips, the boundary bits have no
14433 * effect.
14434 */
14435 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14436 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14437 !tg3_flag(tp, PCI_EXPRESS))
14438 goto out;
14439
14440 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14441 goal = BOUNDARY_MULTI_CACHELINE;
14442 #else
14443 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14444 goal = BOUNDARY_SINGLE_CACHELINE;
14445 #else
14446 goal = 0;
14447 #endif
14448 #endif
14449
14450 if (tg3_flag(tp, 57765_PLUS)) {
14451 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14452 goto out;
14453 }
14454
14455 if (!goal)
14456 goto out;
14457
14458 /* PCI controllers on most RISC systems tend to disconnect
14459 * when a device tries to burst across a cache-line boundary.
14460 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14461 *
14462 * Unfortunately, for PCI-E there are only limited
14463 * write-side controls for this, and thus for reads
14464 * we will still get the disconnects. We'll also waste
14465 * these PCI cycles for both read and write for chips
14466 * other than 5700 and 5701 which do not implement the
14467 * boundary bits.
14468 */
14469 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14470 switch (cacheline_size) {
14471 case 16:
14472 case 32:
14473 case 64:
14474 case 128:
14475 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14476 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14477 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14478 } else {
14479 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14480 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14481 }
14482 break;
14483
14484 case 256:
14485 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14486 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14487 break;
14488
14489 default:
14490 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14491 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14492 break;
14493 }
14494 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14495 switch (cacheline_size) {
14496 case 16:
14497 case 32:
14498 case 64:
14499 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14500 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14501 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14502 break;
14503 }
14504 /* fallthrough */
14505 case 128:
14506 default:
14507 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14508 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14509 break;
14510 }
14511 } else {
14512 switch (cacheline_size) {
14513 case 16:
14514 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14515 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14516 DMA_RWCTRL_WRITE_BNDRY_16);
14517 break;
14518 }
14519 /* fallthrough */
14520 case 32:
14521 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14522 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14523 DMA_RWCTRL_WRITE_BNDRY_32);
14524 break;
14525 }
14526 /* fallthrough */
14527 case 64:
14528 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14529 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14530 DMA_RWCTRL_WRITE_BNDRY_64);
14531 break;
14532 }
14533 /* fallthrough */
14534 case 128:
14535 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14536 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14537 DMA_RWCTRL_WRITE_BNDRY_128);
14538 break;
14539 }
14540 /* fallthrough */
14541 case 256:
14542 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14543 DMA_RWCTRL_WRITE_BNDRY_256);
14544 break;
14545 case 512:
14546 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14547 DMA_RWCTRL_WRITE_BNDRY_512);
14548 break;
14549 case 1024:
14550 default:
14551 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14552 DMA_RWCTRL_WRITE_BNDRY_1024);
14553 break;
14554 }
14555 }
14556
14557 out:
14558 return val;
14559 }
14560
14561 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14562 {
14563 struct tg3_internal_buffer_desc test_desc;
14564 u32 sram_dma_descs;
14565 int i, ret;
14566
14567 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14568
14569 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14570 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14571 tw32(RDMAC_STATUS, 0);
14572 tw32(WDMAC_STATUS, 0);
14573
14574 tw32(BUFMGR_MODE, 0);
14575 tw32(FTQ_RESET, 0);
14576
14577 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14578 test_desc.addr_lo = buf_dma & 0xffffffff;
14579 test_desc.nic_mbuf = 0x00002100;
14580 test_desc.len = size;
14581
14582 /*
14583 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14584 * the *second* time the tg3 driver was getting loaded after an
14585 * initial scan.
14586 *
14587 * Broadcom tells me:
14588 * ...the DMA engine is connected to the GRC block and a DMA
14589 * reset may affect the GRC block in some unpredictable way...
14590 * The behavior of resets to individual blocks has not been tested.
14591 *
14592 * Broadcom noted the GRC reset will also reset all sub-components.
14593 */
14594 if (to_device) {
14595 test_desc.cqid_sqid = (13 << 8) | 2;
14596
14597 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14598 udelay(40);
14599 } else {
14600 test_desc.cqid_sqid = (16 << 8) | 7;
14601
14602 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14603 udelay(40);
14604 }
14605 test_desc.flags = 0x00000005;
14606
14607 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14608 u32 val;
14609
14610 val = *(((u32 *)&test_desc) + i);
14611 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14612 sram_dma_descs + (i * sizeof(u32)));
14613 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14614 }
14615 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14616
14617 if (to_device)
14618 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14619 else
14620 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14621
14622 ret = -ENODEV;
14623 for (i = 0; i < 40; i++) {
14624 u32 val;
14625
14626 if (to_device)
14627 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14628 else
14629 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14630 if ((val & 0xffff) == sram_dma_descs) {
14631 ret = 0;
14632 break;
14633 }
14634
14635 udelay(100);
14636 }
14637
14638 return ret;
14639 }
14640
14641 #define TEST_BUFFER_SIZE 0x2000
14642
14643 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14644 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14645 { },
14646 };
14647
14648 static int __devinit tg3_test_dma(struct tg3 *tp)
14649 {
14650 dma_addr_t buf_dma;
14651 u32 *buf, saved_dma_rwctrl;
14652 int ret = 0;
14653
14654 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14655 &buf_dma, GFP_KERNEL);
14656 if (!buf) {
14657 ret = -ENOMEM;
14658 goto out_nofree;
14659 }
14660
14661 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14662 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14663
14664 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14665
14666 if (tg3_flag(tp, 57765_PLUS))
14667 goto out;
14668
14669 if (tg3_flag(tp, PCI_EXPRESS)) {
14670 /* DMA read watermark not used on PCIE */
14671 tp->dma_rwctrl |= 0x00180000;
14672 } else if (!tg3_flag(tp, PCIX_MODE)) {
14673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14675 tp->dma_rwctrl |= 0x003f0000;
14676 else
14677 tp->dma_rwctrl |= 0x003f000f;
14678 } else {
14679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14681 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14682 u32 read_water = 0x7;
14683
14684 /* If the 5704 is behind the EPB bridge, we can
14685 * do the less restrictive ONE_DMA workaround for
14686 * better performance.
14687 */
14688 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14690 tp->dma_rwctrl |= 0x8000;
14691 else if (ccval == 0x6 || ccval == 0x7)
14692 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14693
14694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14695 read_water = 4;
14696 /* Set bit 23 to enable PCIX hw bug fix */
14697 tp->dma_rwctrl |=
14698 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14699 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14700 (1 << 23);
14701 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14702 /* 5780 always in PCIX mode */
14703 tp->dma_rwctrl |= 0x00144000;
14704 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14705 /* 5714 always in PCIX mode */
14706 tp->dma_rwctrl |= 0x00148000;
14707 } else {
14708 tp->dma_rwctrl |= 0x001b000f;
14709 }
14710 }
14711
14712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14713 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14714 tp->dma_rwctrl &= 0xfffffff0;
14715
14716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14717 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14718 /* Remove this if it causes problems for some boards. */
14719 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14720
14721 /* On 5700/5701 chips, we need to set this bit.
14722 * Otherwise the chip will issue cacheline transactions
14723 * to streamable DMA memory with not all the byte
14724 * enables turned on. This is an error on several
14725 * RISC PCI controllers, in particular sparc64.
14726 *
14727 * On 5703/5704 chips, this bit has been reassigned
14728 * a different meaning. In particular, it is used
14729 * on those chips to enable a PCI-X workaround.
14730 */
14731 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14732 }
14733
14734 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14735
14736 #if 0
14737 /* Unneeded, already done by tg3_get_invariants. */
14738 tg3_switch_clocks(tp);
14739 #endif
14740
14741 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14742 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14743 goto out;
14744
14745 /* It is best to perform DMA test with maximum write burst size
14746 * to expose the 5700/5701 write DMA bug.
14747 */
14748 saved_dma_rwctrl = tp->dma_rwctrl;
14749 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14750 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14751
14752 while (1) {
14753 u32 *p = buf, i;
14754
14755 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14756 p[i] = i;
14757
14758 /* Send the buffer to the chip. */
14759 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14760 if (ret) {
14761 dev_err(&tp->pdev->dev,
14762 "%s: Buffer write failed. err = %d\n",
14763 __func__, ret);
14764 break;
14765 }
14766
14767 #if 0
14768 /* validate data reached card RAM correctly. */
14769 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14770 u32 val;
14771 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14772 if (le32_to_cpu(val) != p[i]) {
14773 dev_err(&tp->pdev->dev,
14774 "%s: Buffer corrupted on device! "
14775 "(%d != %d)\n", __func__, val, i);
14776 /* ret = -ENODEV here? */
14777 }
14778 p[i] = 0;
14779 }
14780 #endif
14781 /* Now read it back. */
14782 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14783 if (ret) {
14784 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14785 "err = %d\n", __func__, ret);
14786 break;
14787 }
14788
14789 /* Verify it. */
14790 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14791 if (p[i] == i)
14792 continue;
14793
14794 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14795 DMA_RWCTRL_WRITE_BNDRY_16) {
14796 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14797 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14798 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14799 break;
14800 } else {
14801 dev_err(&tp->pdev->dev,
14802 "%s: Buffer corrupted on read back! "
14803 "(%d != %d)\n", __func__, p[i], i);
14804 ret = -ENODEV;
14805 goto out;
14806 }
14807 }
14808
14809 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14810 /* Success. */
14811 ret = 0;
14812 break;
14813 }
14814 }
14815 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14816 DMA_RWCTRL_WRITE_BNDRY_16) {
14817 /* DMA test passed without adjusting DMA boundary,
14818 * now look for chipsets that are known to expose the
14819 * DMA bug without failing the test.
14820 */
14821 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14822 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14823 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14824 } else {
14825 /* Safe to use the calculated DMA boundary. */
14826 tp->dma_rwctrl = saved_dma_rwctrl;
14827 }
14828
14829 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14830 }
14831
14832 out:
14833 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14834 out_nofree:
14835 return ret;
14836 }
14837
14838 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14839 {
14840 if (tg3_flag(tp, 57765_PLUS)) {
14841 tp->bufmgr_config.mbuf_read_dma_low_water =
14842 DEFAULT_MB_RDMA_LOW_WATER_5705;
14843 tp->bufmgr_config.mbuf_mac_rx_low_water =
14844 DEFAULT_MB_MACRX_LOW_WATER_57765;
14845 tp->bufmgr_config.mbuf_high_water =
14846 DEFAULT_MB_HIGH_WATER_57765;
14847
14848 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14849 DEFAULT_MB_RDMA_LOW_WATER_5705;
14850 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14851 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14852 tp->bufmgr_config.mbuf_high_water_jumbo =
14853 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14854 } else if (tg3_flag(tp, 5705_PLUS)) {
14855 tp->bufmgr_config.mbuf_read_dma_low_water =
14856 DEFAULT_MB_RDMA_LOW_WATER_5705;
14857 tp->bufmgr_config.mbuf_mac_rx_low_water =
14858 DEFAULT_MB_MACRX_LOW_WATER_5705;
14859 tp->bufmgr_config.mbuf_high_water =
14860 DEFAULT_MB_HIGH_WATER_5705;
14861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14862 tp->bufmgr_config.mbuf_mac_rx_low_water =
14863 DEFAULT_MB_MACRX_LOW_WATER_5906;
14864 tp->bufmgr_config.mbuf_high_water =
14865 DEFAULT_MB_HIGH_WATER_5906;
14866 }
14867
14868 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14869 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14870 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14871 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14872 tp->bufmgr_config.mbuf_high_water_jumbo =
14873 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14874 } else {
14875 tp->bufmgr_config.mbuf_read_dma_low_water =
14876 DEFAULT_MB_RDMA_LOW_WATER;
14877 tp->bufmgr_config.mbuf_mac_rx_low_water =
14878 DEFAULT_MB_MACRX_LOW_WATER;
14879 tp->bufmgr_config.mbuf_high_water =
14880 DEFAULT_MB_HIGH_WATER;
14881
14882 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14883 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14884 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14885 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14886 tp->bufmgr_config.mbuf_high_water_jumbo =
14887 DEFAULT_MB_HIGH_WATER_JUMBO;
14888 }
14889
14890 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14891 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14892 }
14893
14894 static char * __devinit tg3_phy_string(struct tg3 *tp)
14895 {
14896 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14897 case TG3_PHY_ID_BCM5400: return "5400";
14898 case TG3_PHY_ID_BCM5401: return "5401";
14899 case TG3_PHY_ID_BCM5411: return "5411";
14900 case TG3_PHY_ID_BCM5701: return "5701";
14901 case TG3_PHY_ID_BCM5703: return "5703";
14902 case TG3_PHY_ID_BCM5704: return "5704";
14903 case TG3_PHY_ID_BCM5705: return "5705";
14904 case TG3_PHY_ID_BCM5750: return "5750";
14905 case TG3_PHY_ID_BCM5752: return "5752";
14906 case TG3_PHY_ID_BCM5714: return "5714";
14907 case TG3_PHY_ID_BCM5780: return "5780";
14908 case TG3_PHY_ID_BCM5755: return "5755";
14909 case TG3_PHY_ID_BCM5787: return "5787";
14910 case TG3_PHY_ID_BCM5784: return "5784";
14911 case TG3_PHY_ID_BCM5756: return "5722/5756";
14912 case TG3_PHY_ID_BCM5906: return "5906";
14913 case TG3_PHY_ID_BCM5761: return "5761";
14914 case TG3_PHY_ID_BCM5718C: return "5718C";
14915 case TG3_PHY_ID_BCM5718S: return "5718S";
14916 case TG3_PHY_ID_BCM57765: return "57765";
14917 case TG3_PHY_ID_BCM5719C: return "5719C";
14918 case TG3_PHY_ID_BCM5720C: return "5720C";
14919 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14920 case 0: return "serdes";
14921 default: return "unknown";
14922 }
14923 }
14924
14925 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14926 {
14927 if (tg3_flag(tp, PCI_EXPRESS)) {
14928 strcpy(str, "PCI Express");
14929 return str;
14930 } else if (tg3_flag(tp, PCIX_MODE)) {
14931 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14932
14933 strcpy(str, "PCIX:");
14934
14935 if ((clock_ctrl == 7) ||
14936 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14937 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14938 strcat(str, "133MHz");
14939 else if (clock_ctrl == 0)
14940 strcat(str, "33MHz");
14941 else if (clock_ctrl == 2)
14942 strcat(str, "50MHz");
14943 else if (clock_ctrl == 4)
14944 strcat(str, "66MHz");
14945 else if (clock_ctrl == 6)
14946 strcat(str, "100MHz");
14947 } else {
14948 strcpy(str, "PCI:");
14949 if (tg3_flag(tp, PCI_HIGH_SPEED))
14950 strcat(str, "66MHz");
14951 else
14952 strcat(str, "33MHz");
14953 }
14954 if (tg3_flag(tp, PCI_32BIT))
14955 strcat(str, ":32-bit");
14956 else
14957 strcat(str, ":64-bit");
14958 return str;
14959 }
14960
14961 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14962 {
14963 struct pci_dev *peer;
14964 unsigned int func, devnr = tp->pdev->devfn & ~7;
14965
14966 for (func = 0; func < 8; func++) {
14967 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14968 if (peer && peer != tp->pdev)
14969 break;
14970 pci_dev_put(peer);
14971 }
14972 /* 5704 can be configured in single-port mode, set peer to
14973 * tp->pdev in that case.
14974 */
14975 if (!peer) {
14976 peer = tp->pdev;
14977 return peer;
14978 }
14979
14980 /*
14981 * We don't need to keep the refcount elevated; there's no way
14982 * to remove one half of this device without removing the other
14983 */
14984 pci_dev_put(peer);
14985
14986 return peer;
14987 }
14988
14989 static void __devinit tg3_init_coal(struct tg3 *tp)
14990 {
14991 struct ethtool_coalesce *ec = &tp->coal;
14992
14993 memset(ec, 0, sizeof(*ec));
14994 ec->cmd = ETHTOOL_GCOALESCE;
14995 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14996 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14997 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14998 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14999 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15000 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15001 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15002 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15003 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15004
15005 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15006 HOSTCC_MODE_CLRTICK_TXBD)) {
15007 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15008 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15009 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15010 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15011 }
15012
15013 if (tg3_flag(tp, 5705_PLUS)) {
15014 ec->rx_coalesce_usecs_irq = 0;
15015 ec->tx_coalesce_usecs_irq = 0;
15016 ec->stats_block_coalesce_usecs = 0;
15017 }
15018 }
15019
15020 static const struct net_device_ops tg3_netdev_ops = {
15021 .ndo_open = tg3_open,
15022 .ndo_stop = tg3_close,
15023 .ndo_start_xmit = tg3_start_xmit,
15024 .ndo_get_stats64 = tg3_get_stats64,
15025 .ndo_validate_addr = eth_validate_addr,
15026 .ndo_set_multicast_list = tg3_set_rx_mode,
15027 .ndo_set_mac_address = tg3_set_mac_addr,
15028 .ndo_do_ioctl = tg3_ioctl,
15029 .ndo_tx_timeout = tg3_tx_timeout,
15030 .ndo_change_mtu = tg3_change_mtu,
15031 .ndo_fix_features = tg3_fix_features,
15032 #ifdef CONFIG_NET_POLL_CONTROLLER
15033 .ndo_poll_controller = tg3_poll_controller,
15034 #endif
15035 };
15036
15037 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
15038 .ndo_open = tg3_open,
15039 .ndo_stop = tg3_close,
15040 .ndo_start_xmit = tg3_start_xmit_dma_bug,
15041 .ndo_get_stats64 = tg3_get_stats64,
15042 .ndo_validate_addr = eth_validate_addr,
15043 .ndo_set_multicast_list = tg3_set_rx_mode,
15044 .ndo_set_mac_address = tg3_set_mac_addr,
15045 .ndo_do_ioctl = tg3_ioctl,
15046 .ndo_tx_timeout = tg3_tx_timeout,
15047 .ndo_change_mtu = tg3_change_mtu,
15048 #ifdef CONFIG_NET_POLL_CONTROLLER
15049 .ndo_poll_controller = tg3_poll_controller,
15050 #endif
15051 };
15052
15053 static int __devinit tg3_init_one(struct pci_dev *pdev,
15054 const struct pci_device_id *ent)
15055 {
15056 struct net_device *dev;
15057 struct tg3 *tp;
15058 int i, err, pm_cap;
15059 u32 sndmbx, rcvmbx, intmbx;
15060 char str[40];
15061 u64 dma_mask, persist_dma_mask;
15062 u32 hw_features = 0;
15063
15064 printk_once(KERN_INFO "%s\n", version);
15065
15066 err = pci_enable_device(pdev);
15067 if (err) {
15068 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15069 return err;
15070 }
15071
15072 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15073 if (err) {
15074 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15075 goto err_out_disable_pdev;
15076 }
15077
15078 pci_set_master(pdev);
15079
15080 /* Find power-management capability. */
15081 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15082 if (pm_cap == 0) {
15083 dev_err(&pdev->dev,
15084 "Cannot find Power Management capability, aborting\n");
15085 err = -EIO;
15086 goto err_out_free_res;
15087 }
15088
15089 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15090 if (!dev) {
15091 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15092 err = -ENOMEM;
15093 goto err_out_free_res;
15094 }
15095
15096 SET_NETDEV_DEV(dev, &pdev->dev);
15097
15098 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15099
15100 tp = netdev_priv(dev);
15101 tp->pdev = pdev;
15102 tp->dev = dev;
15103 tp->pm_cap = pm_cap;
15104 tp->rx_mode = TG3_DEF_RX_MODE;
15105 tp->tx_mode = TG3_DEF_TX_MODE;
15106
15107 if (tg3_debug > 0)
15108 tp->msg_enable = tg3_debug;
15109 else
15110 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15111
15112 /* The word/byte swap controls here control register access byte
15113 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15114 * setting below.
15115 */
15116 tp->misc_host_ctrl =
15117 MISC_HOST_CTRL_MASK_PCI_INT |
15118 MISC_HOST_CTRL_WORD_SWAP |
15119 MISC_HOST_CTRL_INDIR_ACCESS |
15120 MISC_HOST_CTRL_PCISTATE_RW;
15121
15122 /* The NONFRM (non-frame) byte/word swap controls take effect
15123 * on descriptor entries, anything which isn't packet data.
15124 *
15125 * The StrongARM chips on the board (one for tx, one for rx)
15126 * are running in big-endian mode.
15127 */
15128 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15129 GRC_MODE_WSWAP_NONFRM_DATA);
15130 #ifdef __BIG_ENDIAN
15131 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15132 #endif
15133 spin_lock_init(&tp->lock);
15134 spin_lock_init(&tp->indirect_lock);
15135 INIT_WORK(&tp->reset_task, tg3_reset_task);
15136
15137 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15138 if (!tp->regs) {
15139 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15140 err = -ENOMEM;
15141 goto err_out_free_dev;
15142 }
15143
15144 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15145 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15146
15147 dev->ethtool_ops = &tg3_ethtool_ops;
15148 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15149 dev->irq = pdev->irq;
15150
15151 err = tg3_get_invariants(tp);
15152 if (err) {
15153 dev_err(&pdev->dev,
15154 "Problem fetching invariants of chip, aborting\n");
15155 goto err_out_iounmap;
15156 }
15157
15158 if (tg3_flag(tp, 5755_PLUS) && !tg3_flag(tp, 5717_PLUS))
15159 dev->netdev_ops = &tg3_netdev_ops;
15160 else
15161 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
15162
15163
15164 /* The EPB bridge inside 5714, 5715, and 5780 and any
15165 * device behind the EPB cannot support DMA addresses > 40-bit.
15166 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15167 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15168 * do DMA address check in tg3_start_xmit().
15169 */
15170 if (tg3_flag(tp, IS_5788))
15171 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15172 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15173 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15174 #ifdef CONFIG_HIGHMEM
15175 dma_mask = DMA_BIT_MASK(64);
15176 #endif
15177 } else
15178 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15179
15180 /* Configure DMA attributes. */
15181 if (dma_mask > DMA_BIT_MASK(32)) {
15182 err = pci_set_dma_mask(pdev, dma_mask);
15183 if (!err) {
15184 dev->features |= NETIF_F_HIGHDMA;
15185 err = pci_set_consistent_dma_mask(pdev,
15186 persist_dma_mask);
15187 if (err < 0) {
15188 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15189 "DMA for consistent allocations\n");
15190 goto err_out_iounmap;
15191 }
15192 }
15193 }
15194 if (err || dma_mask == DMA_BIT_MASK(32)) {
15195 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15196 if (err) {
15197 dev_err(&pdev->dev,
15198 "No usable DMA configuration, aborting\n");
15199 goto err_out_iounmap;
15200 }
15201 }
15202
15203 tg3_init_bufmgr_config(tp);
15204
15205 /* Selectively allow TSO based on operating conditions */
15206 if ((tg3_flag(tp, HW_TSO_1) ||
15207 tg3_flag(tp, HW_TSO_2) ||
15208 tg3_flag(tp, HW_TSO_3)) ||
15209 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
15210 tg3_flag_set(tp, TSO_CAPABLE);
15211 else {
15212 tg3_flag_clear(tp, TSO_CAPABLE);
15213 tg3_flag_clear(tp, TSO_BUG);
15214 tp->fw_needed = NULL;
15215 }
15216
15217 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15218 tp->fw_needed = FIRMWARE_TG3;
15219
15220 /* TSO is on by default on chips that support hardware TSO.
15221 * Firmware TSO on older chips gives lower performance, so it
15222 * is off by default, but can be enabled using ethtool.
15223 */
15224 if ((tg3_flag(tp, HW_TSO_1) ||
15225 tg3_flag(tp, HW_TSO_2) ||
15226 tg3_flag(tp, HW_TSO_3)) &&
15227 (dev->features & NETIF_F_IP_CSUM))
15228 hw_features |= NETIF_F_TSO;
15229 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15230 if (dev->features & NETIF_F_IPV6_CSUM)
15231 hw_features |= NETIF_F_TSO6;
15232 if (tg3_flag(tp, HW_TSO_3) ||
15233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15234 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15235 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15236 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15237 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15238 hw_features |= NETIF_F_TSO_ECN;
15239 }
15240
15241 dev->hw_features |= hw_features;
15242 dev->features |= hw_features;
15243 dev->vlan_features |= hw_features;
15244
15245 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15246 !tg3_flag(tp, TSO_CAPABLE) &&
15247 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15248 tg3_flag_set(tp, MAX_RXPEND_64);
15249 tp->rx_pending = 63;
15250 }
15251
15252 err = tg3_get_device_address(tp);
15253 if (err) {
15254 dev_err(&pdev->dev,
15255 "Could not obtain valid ethernet address, aborting\n");
15256 goto err_out_iounmap;
15257 }
15258
15259 if (tg3_flag(tp, ENABLE_APE)) {
15260 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15261 if (!tp->aperegs) {
15262 dev_err(&pdev->dev,
15263 "Cannot map APE registers, aborting\n");
15264 err = -ENOMEM;
15265 goto err_out_iounmap;
15266 }
15267
15268 tg3_ape_lock_init(tp);
15269
15270 if (tg3_flag(tp, ENABLE_ASF))
15271 tg3_read_dash_ver(tp);
15272 }
15273
15274 /*
15275 * Reset chip in case UNDI or EFI driver did not shutdown
15276 * DMA self test will enable WDMAC and we'll see (spurious)
15277 * pending DMA on the PCI bus at that point.
15278 */
15279 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15280 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15281 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15282 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15283 }
15284
15285 err = tg3_test_dma(tp);
15286 if (err) {
15287 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15288 goto err_out_apeunmap;
15289 }
15290
15291 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15292 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15293 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15294 for (i = 0; i < tp->irq_max; i++) {
15295 struct tg3_napi *tnapi = &tp->napi[i];
15296
15297 tnapi->tp = tp;
15298 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15299
15300 tnapi->int_mbox = intmbx;
15301 if (i < 4)
15302 intmbx += 0x8;
15303 else
15304 intmbx += 0x4;
15305
15306 tnapi->consmbox = rcvmbx;
15307 tnapi->prodmbox = sndmbx;
15308
15309 if (i)
15310 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15311 else
15312 tnapi->coal_now = HOSTCC_MODE_NOW;
15313
15314 if (!tg3_flag(tp, SUPPORT_MSIX))
15315 break;
15316
15317 /*
15318 * If we support MSIX, we'll be using RSS. If we're using
15319 * RSS, the first vector only handles link interrupts and the
15320 * remaining vectors handle rx and tx interrupts. Reuse the
15321 * mailbox values for the next iteration. The values we setup
15322 * above are still useful for the single vectored mode.
15323 */
15324 if (!i)
15325 continue;
15326
15327 rcvmbx += 0x8;
15328
15329 if (sndmbx & 0x4)
15330 sndmbx -= 0x4;
15331 else
15332 sndmbx += 0xc;
15333 }
15334
15335 tg3_init_coal(tp);
15336
15337 pci_set_drvdata(pdev, dev);
15338
15339 err = register_netdev(dev);
15340 if (err) {
15341 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15342 goto err_out_apeunmap;
15343 }
15344
15345 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15346 tp->board_part_number,
15347 tp->pci_chip_rev_id,
15348 tg3_bus_string(tp, str),
15349 dev->dev_addr);
15350
15351 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15352 struct phy_device *phydev;
15353 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15354 netdev_info(dev,
15355 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15356 phydev->drv->name, dev_name(&phydev->dev));
15357 } else {
15358 char *ethtype;
15359
15360 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15361 ethtype = "10/100Base-TX";
15362 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15363 ethtype = "1000Base-SX";
15364 else
15365 ethtype = "10/100/1000Base-T";
15366
15367 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15368 "(WireSpeed[%d], EEE[%d])\n",
15369 tg3_phy_string(tp), ethtype,
15370 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15371 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15372 }
15373
15374 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15375 (dev->features & NETIF_F_RXCSUM) != 0,
15376 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15377 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15378 tg3_flag(tp, ENABLE_ASF) != 0,
15379 tg3_flag(tp, TSO_CAPABLE) != 0);
15380 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15381 tp->dma_rwctrl,
15382 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15383 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15384
15385 pci_save_state(pdev);
15386
15387 return 0;
15388
15389 err_out_apeunmap:
15390 if (tp->aperegs) {
15391 iounmap(tp->aperegs);
15392 tp->aperegs = NULL;
15393 }
15394
15395 err_out_iounmap:
15396 if (tp->regs) {
15397 iounmap(tp->regs);
15398 tp->regs = NULL;
15399 }
15400
15401 err_out_free_dev:
15402 free_netdev(dev);
15403
15404 err_out_free_res:
15405 pci_release_regions(pdev);
15406
15407 err_out_disable_pdev:
15408 pci_disable_device(pdev);
15409 pci_set_drvdata(pdev, NULL);
15410 return err;
15411 }
15412
15413 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15414 {
15415 struct net_device *dev = pci_get_drvdata(pdev);
15416
15417 if (dev) {
15418 struct tg3 *tp = netdev_priv(dev);
15419
15420 if (tp->fw)
15421 release_firmware(tp->fw);
15422
15423 cancel_work_sync(&tp->reset_task);
15424
15425 if (!tg3_flag(tp, USE_PHYLIB)) {
15426 tg3_phy_fini(tp);
15427 tg3_mdio_fini(tp);
15428 }
15429
15430 unregister_netdev(dev);
15431 if (tp->aperegs) {
15432 iounmap(tp->aperegs);
15433 tp->aperegs = NULL;
15434 }
15435 if (tp->regs) {
15436 iounmap(tp->regs);
15437 tp->regs = NULL;
15438 }
15439 free_netdev(dev);
15440 pci_release_regions(pdev);
15441 pci_disable_device(pdev);
15442 pci_set_drvdata(pdev, NULL);
15443 }
15444 }
15445
15446 #ifdef CONFIG_PM_SLEEP
15447 static int tg3_suspend(struct device *device)
15448 {
15449 struct pci_dev *pdev = to_pci_dev(device);
15450 struct net_device *dev = pci_get_drvdata(pdev);
15451 struct tg3 *tp = netdev_priv(dev);
15452 int err;
15453
15454 if (!netif_running(dev))
15455 return 0;
15456
15457 flush_work_sync(&tp->reset_task);
15458 tg3_phy_stop(tp);
15459 tg3_netif_stop(tp);
15460
15461 del_timer_sync(&tp->timer);
15462
15463 tg3_full_lock(tp, 1);
15464 tg3_disable_ints(tp);
15465 tg3_full_unlock(tp);
15466
15467 netif_device_detach(dev);
15468
15469 tg3_full_lock(tp, 0);
15470 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15471 tg3_flag_clear(tp, INIT_COMPLETE);
15472 tg3_full_unlock(tp);
15473
15474 err = tg3_power_down_prepare(tp);
15475 if (err) {
15476 int err2;
15477
15478 tg3_full_lock(tp, 0);
15479
15480 tg3_flag_set(tp, INIT_COMPLETE);
15481 err2 = tg3_restart_hw(tp, 1);
15482 if (err2)
15483 goto out;
15484
15485 tp->timer.expires = jiffies + tp->timer_offset;
15486 add_timer(&tp->timer);
15487
15488 netif_device_attach(dev);
15489 tg3_netif_start(tp);
15490
15491 out:
15492 tg3_full_unlock(tp);
15493
15494 if (!err2)
15495 tg3_phy_start(tp);
15496 }
15497
15498 return err;
15499 }
15500
15501 static int tg3_resume(struct device *device)
15502 {
15503 struct pci_dev *pdev = to_pci_dev(device);
15504 struct net_device *dev = pci_get_drvdata(pdev);
15505 struct tg3 *tp = netdev_priv(dev);
15506 int err;
15507
15508 if (!netif_running(dev))
15509 return 0;
15510
15511 netif_device_attach(dev);
15512
15513 tg3_full_lock(tp, 0);
15514
15515 tg3_flag_set(tp, INIT_COMPLETE);
15516 err = tg3_restart_hw(tp, 1);
15517 if (err)
15518 goto out;
15519
15520 tp->timer.expires = jiffies + tp->timer_offset;
15521 add_timer(&tp->timer);
15522
15523 tg3_netif_start(tp);
15524
15525 out:
15526 tg3_full_unlock(tp);
15527
15528 if (!err)
15529 tg3_phy_start(tp);
15530
15531 return err;
15532 }
15533
15534 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15535 #define TG3_PM_OPS (&tg3_pm_ops)
15536
15537 #else
15538
15539 #define TG3_PM_OPS NULL
15540
15541 #endif /* CONFIG_PM_SLEEP */
15542
15543 /**
15544 * tg3_io_error_detected - called when PCI error is detected
15545 * @pdev: Pointer to PCI device
15546 * @state: The current pci connection state
15547 *
15548 * This function is called after a PCI bus error affecting
15549 * this device has been detected.
15550 */
15551 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15552 pci_channel_state_t state)
15553 {
15554 struct net_device *netdev = pci_get_drvdata(pdev);
15555 struct tg3 *tp = netdev_priv(netdev);
15556 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15557
15558 netdev_info(netdev, "PCI I/O error detected\n");
15559
15560 rtnl_lock();
15561
15562 if (!netif_running(netdev))
15563 goto done;
15564
15565 tg3_phy_stop(tp);
15566
15567 tg3_netif_stop(tp);
15568
15569 del_timer_sync(&tp->timer);
15570 tg3_flag_clear(tp, RESTART_TIMER);
15571
15572 /* Want to make sure that the reset task doesn't run */
15573 cancel_work_sync(&tp->reset_task);
15574 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15575 tg3_flag_clear(tp, RESTART_TIMER);
15576
15577 netif_device_detach(netdev);
15578
15579 /* Clean up software state, even if MMIO is blocked */
15580 tg3_full_lock(tp, 0);
15581 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15582 tg3_full_unlock(tp);
15583
15584 done:
15585 if (state == pci_channel_io_perm_failure)
15586 err = PCI_ERS_RESULT_DISCONNECT;
15587 else
15588 pci_disable_device(pdev);
15589
15590 rtnl_unlock();
15591
15592 return err;
15593 }
15594
15595 /**
15596 * tg3_io_slot_reset - called after the pci bus has been reset.
15597 * @pdev: Pointer to PCI device
15598 *
15599 * Restart the card from scratch, as if from a cold-boot.
15600 * At this point, the card has exprienced a hard reset,
15601 * followed by fixups by BIOS, and has its config space
15602 * set up identically to what it was at cold boot.
15603 */
15604 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15605 {
15606 struct net_device *netdev = pci_get_drvdata(pdev);
15607 struct tg3 *tp = netdev_priv(netdev);
15608 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15609 int err;
15610
15611 rtnl_lock();
15612
15613 if (pci_enable_device(pdev)) {
15614 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15615 goto done;
15616 }
15617
15618 pci_set_master(pdev);
15619 pci_restore_state(pdev);
15620 pci_save_state(pdev);
15621
15622 if (!netif_running(netdev)) {
15623 rc = PCI_ERS_RESULT_RECOVERED;
15624 goto done;
15625 }
15626
15627 err = tg3_power_up(tp);
15628 if (err) {
15629 netdev_err(netdev, "Failed to restore register access.\n");
15630 goto done;
15631 }
15632
15633 rc = PCI_ERS_RESULT_RECOVERED;
15634
15635 done:
15636 rtnl_unlock();
15637
15638 return rc;
15639 }
15640
15641 /**
15642 * tg3_io_resume - called when traffic can start flowing again.
15643 * @pdev: Pointer to PCI device
15644 *
15645 * This callback is called when the error recovery driver tells
15646 * us that its OK to resume normal operation.
15647 */
15648 static void tg3_io_resume(struct pci_dev *pdev)
15649 {
15650 struct net_device *netdev = pci_get_drvdata(pdev);
15651 struct tg3 *tp = netdev_priv(netdev);
15652 int err;
15653
15654 rtnl_lock();
15655
15656 if (!netif_running(netdev))
15657 goto done;
15658
15659 tg3_full_lock(tp, 0);
15660 tg3_flag_set(tp, INIT_COMPLETE);
15661 err = tg3_restart_hw(tp, 1);
15662 tg3_full_unlock(tp);
15663 if (err) {
15664 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15665 goto done;
15666 }
15667
15668 netif_device_attach(netdev);
15669
15670 tp->timer.expires = jiffies + tp->timer_offset;
15671 add_timer(&tp->timer);
15672
15673 tg3_netif_start(tp);
15674
15675 tg3_phy_start(tp);
15676
15677 done:
15678 rtnl_unlock();
15679 }
15680
15681 static struct pci_error_handlers tg3_err_handler = {
15682 .error_detected = tg3_io_error_detected,
15683 .slot_reset = tg3_io_slot_reset,
15684 .resume = tg3_io_resume
15685 };
15686
15687 static struct pci_driver tg3_driver = {
15688 .name = DRV_MODULE_NAME,
15689 .id_table = tg3_pci_tbl,
15690 .probe = tg3_init_one,
15691 .remove = __devexit_p(tg3_remove_one),
15692 .err_handler = &tg3_err_handler,
15693 .driver.pm = TG3_PM_OPS,
15694 };
15695
15696 static int __init tg3_init(void)
15697 {
15698 return pci_register_driver(&tg3_driver);
15699 }
15700
15701 static void __exit tg3_cleanup(void)
15702 {
15703 pci_unregister_driver(&tg3_driver);
15704 }
15705
15706 module_init(tg3_init);
15707 module_exit(tg3_cleanup);
This page took 0.431415 seconds and 5 git commands to generate.