978d1f0ccb7d5b93152b4c1e048501bce7aeb248
[deliverable/linux.git] / drivers / net / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0 0
58 #define BAR_2 2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #define TG3_TSO_SUPPORT 1
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME "tg3"
71 #define PFX DRV_MODULE_NAME ": "
72 #define DRV_MODULE_VERSION "3.94"
73 #define DRV_MODULE_RELDATE "August 14, 2008"
74
75 #define TG3_DEF_MAC_MODE 0
76 #define TG3_DEF_RX_MODE 0
77 #define TG3_DEF_TX_MODE 0
78 #define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91 #define TG3_TX_TIMEOUT (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU 60
95 #define TG3_MAX_MTU(tp) \
96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102 #define TG3_RX_RING_SIZE 512
103 #define TG3_DEF_RX_RING_PENDING 200
104 #define TG3_RX_JUMBO_RING_SIZE 256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 #define TG3_NUM_TEST 6
139
140 static char version[] __devinitdata =
141 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145 MODULE_LICENSE("GPL");
146 MODULE_VERSION(DRV_MODULE_VERSION);
147
148 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
149 module_param(tg3_debug, int, 0);
150 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152 static struct pci_device_id tg3_pci_tbl[] = {
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
214 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221 {}
222 };
223
224 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
226 static const struct {
227 const char string[ETH_GSTRING_LEN];
228 } ethtool_stats_keys[TG3_NUM_STATS] = {
229 { "rx_octets" },
230 { "rx_fragments" },
231 { "rx_ucast_packets" },
232 { "rx_mcast_packets" },
233 { "rx_bcast_packets" },
234 { "rx_fcs_errors" },
235 { "rx_align_errors" },
236 { "rx_xon_pause_rcvd" },
237 { "rx_xoff_pause_rcvd" },
238 { "rx_mac_ctrl_rcvd" },
239 { "rx_xoff_entered" },
240 { "rx_frame_too_long_errors" },
241 { "rx_jabbers" },
242 { "rx_undersize_packets" },
243 { "rx_in_length_errors" },
244 { "rx_out_length_errors" },
245 { "rx_64_or_less_octet_packets" },
246 { "rx_65_to_127_octet_packets" },
247 { "rx_128_to_255_octet_packets" },
248 { "rx_256_to_511_octet_packets" },
249 { "rx_512_to_1023_octet_packets" },
250 { "rx_1024_to_1522_octet_packets" },
251 { "rx_1523_to_2047_octet_packets" },
252 { "rx_2048_to_4095_octet_packets" },
253 { "rx_4096_to_8191_octet_packets" },
254 { "rx_8192_to_9022_octet_packets" },
255
256 { "tx_octets" },
257 { "tx_collisions" },
258
259 { "tx_xon_sent" },
260 { "tx_xoff_sent" },
261 { "tx_flow_control" },
262 { "tx_mac_errors" },
263 { "tx_single_collisions" },
264 { "tx_mult_collisions" },
265 { "tx_deferred" },
266 { "tx_excessive_collisions" },
267 { "tx_late_collisions" },
268 { "tx_collide_2times" },
269 { "tx_collide_3times" },
270 { "tx_collide_4times" },
271 { "tx_collide_5times" },
272 { "tx_collide_6times" },
273 { "tx_collide_7times" },
274 { "tx_collide_8times" },
275 { "tx_collide_9times" },
276 { "tx_collide_10times" },
277 { "tx_collide_11times" },
278 { "tx_collide_12times" },
279 { "tx_collide_13times" },
280 { "tx_collide_14times" },
281 { "tx_collide_15times" },
282 { "tx_ucast_packets" },
283 { "tx_mcast_packets" },
284 { "tx_bcast_packets" },
285 { "tx_carrier_sense_errors" },
286 { "tx_discards" },
287 { "tx_errors" },
288
289 { "dma_writeq_full" },
290 { "dma_write_prioq_full" },
291 { "rxbds_empty" },
292 { "rx_discards" },
293 { "rx_errors" },
294 { "rx_threshold_hit" },
295
296 { "dma_readq_full" },
297 { "dma_read_prioq_full" },
298 { "tx_comp_queue_full" },
299
300 { "ring_set_send_prod_index" },
301 { "ring_status_update" },
302 { "nic_irqs" },
303 { "nic_avoided_irqs" },
304 { "nic_tx_threshold_hit" }
305 };
306
307 static const struct {
308 const char string[ETH_GSTRING_LEN];
309 } ethtool_test_keys[TG3_NUM_TEST] = {
310 { "nvram test (online) " },
311 { "link test (online) " },
312 { "register test (offline)" },
313 { "memory test (offline)" },
314 { "loopback test (offline)" },
315 { "interrupt test (offline)" },
316 };
317
318 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319 {
320 writel(val, tp->regs + off);
321 }
322
323 static u32 tg3_read32(struct tg3 *tp, u32 off)
324 {
325 return (readl(tp->regs + off));
326 }
327
328 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329 {
330 writel(val, tp->aperegs + off);
331 }
332
333 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334 {
335 return (readl(tp->aperegs + off));
336 }
337
338 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339 {
340 unsigned long flags;
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
346 }
347
348 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349 {
350 writel(val, tp->regs + off);
351 readl(tp->regs + off);
352 }
353
354 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355 {
356 unsigned long flags;
357 u32 val;
358
359 spin_lock_irqsave(&tp->indirect_lock, flags);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 return val;
364 }
365
366 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367 {
368 unsigned long flags;
369
370 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372 TG3_64BIT_REG_LOW, val);
373 return;
374 }
375 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377 TG3_64BIT_REG_LOW, val);
378 return;
379 }
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386 /* In indirect mode when disabling interrupts, we also need
387 * to clear the interrupt bit in the GRC local ctrl register.
388 */
389 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390 (val == 0x1)) {
391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393 }
394 }
395
396 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397 {
398 unsigned long flags;
399 u32 val;
400
401 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 return val;
406 }
407
408 /* usec_wait specifies the wait time in usec when writing to certain registers
409 * where it is unsafe to read back the register without some delay.
410 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412 */
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
414 {
415 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417 /* Non-posted methods */
418 tp->write32(tp, off, val);
419 else {
420 /* Posted method */
421 tg3_write32(tp, off, val);
422 if (usec_wait)
423 udelay(usec_wait);
424 tp->read32(tp, off);
425 }
426 /* Wait again after the read for the posted method to guarantee that
427 * the wait time is met.
428 */
429 if (usec_wait)
430 udelay(usec_wait);
431 }
432
433 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434 {
435 tp->write32_mbox(tp, off, val);
436 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 tp->read32_mbox(tp, off);
439 }
440
441 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
442 {
443 void __iomem *mbox = tp->regs + off;
444 writel(val, mbox);
445 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448 readl(mbox);
449 }
450
451 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452 {
453 return (readl(tp->regs + off + GRCMBOX_BASE));
454 }
455
456 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457 {
458 writel(val, tp->regs + off + GRCMBOX_BASE);
459 }
460
461 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
462 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
463 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
464 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
465 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
466
467 #define tw32(reg,val) tp->write32(tp, reg, val)
468 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
469 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
470 #define tr32(reg) tp->read32(tp, reg)
471
472 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473 {
474 unsigned long flags;
475
476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478 return;
479
480 spin_lock_irqsave(&tp->indirect_lock, flags);
481 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
484
485 /* Always leave this as zero. */
486 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487 } else {
488 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491 /* Always leave this as zero. */
492 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493 }
494 spin_unlock_irqrestore(&tp->indirect_lock, flags);
495 }
496
497 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498 {
499 unsigned long flags;
500
501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503 *val = 0;
504 return;
505 }
506
507 spin_lock_irqsave(&tp->indirect_lock, flags);
508 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
511
512 /* Always leave this as zero. */
513 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 } else {
515 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518 /* Always leave this as zero. */
519 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520 }
521 spin_unlock_irqrestore(&tp->indirect_lock, flags);
522 }
523
524 static void tg3_ape_lock_init(struct tg3 *tp)
525 {
526 int i;
527
528 /* Make sure the driver hasn't any stale locks. */
529 for (i = 0; i < 8; i++)
530 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531 APE_LOCK_GRANT_DRIVER);
532 }
533
534 static int tg3_ape_lock(struct tg3 *tp, int locknum)
535 {
536 int i, off;
537 int ret = 0;
538 u32 status;
539
540 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541 return 0;
542
543 switch (locknum) {
544 case TG3_APE_LOCK_GRC:
545 case TG3_APE_LOCK_MEM:
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 off = 4 * locknum;
552
553 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555 /* Wait for up to 1 millisecond to acquire lock. */
556 for (i = 0; i < 100; i++) {
557 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558 if (status == APE_LOCK_GRANT_DRIVER)
559 break;
560 udelay(10);
561 }
562
563 if (status != APE_LOCK_GRANT_DRIVER) {
564 /* Revoke the lock request. */
565 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566 APE_LOCK_GRANT_DRIVER);
567
568 ret = -EBUSY;
569 }
570
571 return ret;
572 }
573
574 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575 {
576 int off;
577
578 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579 return;
580
581 switch (locknum) {
582 case TG3_APE_LOCK_GRC:
583 case TG3_APE_LOCK_MEM:
584 break;
585 default:
586 return;
587 }
588
589 off = 4 * locknum;
590 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591 }
592
593 static void tg3_disable_ints(struct tg3 *tp)
594 {
595 tw32(TG3PCI_MISC_HOST_CTRL,
596 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
597 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
598 }
599
600 static inline void tg3_cond_int(struct tg3 *tp)
601 {
602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603 (tp->hw_status->status & SD_STATUS_UPDATED))
604 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
605 else
606 tw32(HOSTCC_MODE, tp->coalesce_mode |
607 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
608 }
609
610 static void tg3_enable_ints(struct tg3 *tp)
611 {
612 tp->irq_sync = 0;
613 wmb();
614
615 tw32(TG3PCI_MISC_HOST_CTRL,
616 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
619 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621 (tp->last_tag << 24));
622 tg3_cond_int(tp);
623 }
624
625 static inline unsigned int tg3_has_work(struct tg3 *tp)
626 {
627 struct tg3_hw_status *sblk = tp->hw_status;
628 unsigned int work_exists = 0;
629
630 /* check for phy events */
631 if (!(tp->tg3_flags &
632 (TG3_FLAG_USE_LINKCHG_REG |
633 TG3_FLAG_POLL_SERDES))) {
634 if (sblk->status & SD_STATUS_LINK_CHG)
635 work_exists = 1;
636 }
637 /* check for RX/TX work to do */
638 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640 work_exists = 1;
641
642 return work_exists;
643 }
644
645 /* tg3_restart_ints
646 * similar to tg3_enable_ints, but it accurately determines whether there
647 * is new work pending and can return without flushing the PIO write
648 * which reenables interrupts
649 */
650 static void tg3_restart_ints(struct tg3 *tp)
651 {
652 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653 tp->last_tag << 24);
654 mmiowb();
655
656 /* When doing tagged status, this work check is unnecessary.
657 * The last_tag we write above tells the chip which piece of
658 * work we've completed.
659 */
660 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661 tg3_has_work(tp))
662 tw32(HOSTCC_MODE, tp->coalesce_mode |
663 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
664 }
665
666 static inline void tg3_netif_stop(struct tg3 *tp)
667 {
668 tp->dev->trans_start = jiffies; /* prevent tx timeout */
669 napi_disable(&tp->napi);
670 netif_tx_disable(tp->dev);
671 }
672
673 static inline void tg3_netif_start(struct tg3 *tp)
674 {
675 netif_wake_queue(tp->dev);
676 /* NOTE: unconditional netif_wake_queue is only appropriate
677 * so long as all callers are assured to have free tx slots
678 * (such as after tg3_init_hw)
679 */
680 napi_enable(&tp->napi);
681 tp->hw_status->status |= SD_STATUS_UPDATED;
682 tg3_enable_ints(tp);
683 }
684
685 static void tg3_switch_clocks(struct tg3 *tp)
686 {
687 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688 u32 orig_clock_ctrl;
689
690 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
692 return;
693
694 orig_clock_ctrl = clock_ctrl;
695 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696 CLOCK_CTRL_CLKRUN_OENABLE |
697 0x1f);
698 tp->pci_clock_ctrl = clock_ctrl;
699
700 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
702 tw32_wait_f(TG3PCI_CLOCK_CTRL,
703 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
704 }
705 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
706 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707 clock_ctrl |
708 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709 40);
710 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711 clock_ctrl | (CLOCK_CTRL_ALTCLK),
712 40);
713 }
714 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
715 }
716
717 #define PHY_BUSY_LOOPS 5000
718
719 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720 {
721 u32 frame_val;
722 unsigned int loops;
723 int ret;
724
725 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726 tw32_f(MAC_MI_MODE,
727 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728 udelay(80);
729 }
730
731 *val = 0x0;
732
733 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734 MI_COM_PHY_ADDR_MASK);
735 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736 MI_COM_REG_ADDR_MASK);
737 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
738
739 tw32_f(MAC_MI_COM, frame_val);
740
741 loops = PHY_BUSY_LOOPS;
742 while (loops != 0) {
743 udelay(10);
744 frame_val = tr32(MAC_MI_COM);
745
746 if ((frame_val & MI_COM_BUSY) == 0) {
747 udelay(5);
748 frame_val = tr32(MAC_MI_COM);
749 break;
750 }
751 loops -= 1;
752 }
753
754 ret = -EBUSY;
755 if (loops != 0) {
756 *val = frame_val & MI_COM_DATA_MASK;
757 ret = 0;
758 }
759
760 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761 tw32_f(MAC_MI_MODE, tp->mi_mode);
762 udelay(80);
763 }
764
765 return ret;
766 }
767
768 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769 {
770 u32 frame_val;
771 unsigned int loops;
772 int ret;
773
774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776 return 0;
777
778 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 tw32_f(MAC_MI_MODE,
780 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781 udelay(80);
782 }
783
784 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785 MI_COM_PHY_ADDR_MASK);
786 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787 MI_COM_REG_ADDR_MASK);
788 frame_val |= (val & MI_COM_DATA_MASK);
789 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
790
791 tw32_f(MAC_MI_COM, frame_val);
792
793 loops = PHY_BUSY_LOOPS;
794 while (loops != 0) {
795 udelay(10);
796 frame_val = tr32(MAC_MI_COM);
797 if ((frame_val & MI_COM_BUSY) == 0) {
798 udelay(5);
799 frame_val = tr32(MAC_MI_COM);
800 break;
801 }
802 loops -= 1;
803 }
804
805 ret = -EBUSY;
806 if (loops != 0)
807 ret = 0;
808
809 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810 tw32_f(MAC_MI_MODE, tp->mi_mode);
811 udelay(80);
812 }
813
814 return ret;
815 }
816
817 static int tg3_bmcr_reset(struct tg3 *tp)
818 {
819 u32 phy_control;
820 int limit, err;
821
822 /* OK, reset it, and poll the BMCR_RESET bit until it
823 * clears or we time out.
824 */
825 phy_control = BMCR_RESET;
826 err = tg3_writephy(tp, MII_BMCR, phy_control);
827 if (err != 0)
828 return -EBUSY;
829
830 limit = 5000;
831 while (limit--) {
832 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833 if (err != 0)
834 return -EBUSY;
835
836 if ((phy_control & BMCR_RESET) == 0) {
837 udelay(40);
838 break;
839 }
840 udelay(10);
841 }
842 if (limit <= 0)
843 return -EBUSY;
844
845 return 0;
846 }
847
848 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849 {
850 struct tg3 *tp = (struct tg3 *)bp->priv;
851 u32 val;
852
853 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854 return -EAGAIN;
855
856 if (tg3_readphy(tp, reg, &val))
857 return -EIO;
858
859 return val;
860 }
861
862 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863 {
864 struct tg3 *tp = (struct tg3 *)bp->priv;
865
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867 return -EAGAIN;
868
869 if (tg3_writephy(tp, reg, val))
870 return -EIO;
871
872 return 0;
873 }
874
875 static int tg3_mdio_reset(struct mii_bus *bp)
876 {
877 return 0;
878 }
879
880 static void tg3_mdio_config(struct tg3 *tp)
881 {
882 u32 val;
883
884 if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
885 PHY_INTERFACE_MODE_RGMII)
886 return;
887
888 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
889 MAC_PHYCFG1_RGMII_SND_STAT_EN);
890 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
891 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
892 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
893 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
894 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
895 }
896 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
897
898 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
899 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
900 val |= MAC_PHYCFG2_INBAND_ENABLE;
901 tw32(MAC_PHYCFG2, val);
902
903 val = tr32(MAC_EXT_RGMII_MODE);
904 val &= ~(MAC_RGMII_MODE_RX_INT_B |
905 MAC_RGMII_MODE_RX_QUALITY |
906 MAC_RGMII_MODE_RX_ACTIVITY |
907 MAC_RGMII_MODE_RX_ENG_DET |
908 MAC_RGMII_MODE_TX_ENABLE |
909 MAC_RGMII_MODE_TX_LOWPWR |
910 MAC_RGMII_MODE_TX_RESET);
911 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
912 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
913 val |= MAC_RGMII_MODE_RX_INT_B |
914 MAC_RGMII_MODE_RX_QUALITY |
915 MAC_RGMII_MODE_RX_ACTIVITY |
916 MAC_RGMII_MODE_RX_ENG_DET;
917 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
918 val |= MAC_RGMII_MODE_TX_ENABLE |
919 MAC_RGMII_MODE_TX_LOWPWR |
920 MAC_RGMII_MODE_TX_RESET;
921 }
922 tw32(MAC_EXT_RGMII_MODE, val);
923 }
924
925 static void tg3_mdio_start(struct tg3 *tp)
926 {
927 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
928 mutex_lock(&tp->mdio_bus->mdio_lock);
929 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
930 mutex_unlock(&tp->mdio_bus->mdio_lock);
931 }
932
933 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
934 tw32_f(MAC_MI_MODE, tp->mi_mode);
935 udelay(80);
936
937 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
938 tg3_mdio_config(tp);
939 }
940
941 static void tg3_mdio_stop(struct tg3 *tp)
942 {
943 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
944 mutex_lock(&tp->mdio_bus->mdio_lock);
945 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
946 mutex_unlock(&tp->mdio_bus->mdio_lock);
947 }
948 }
949
950 static int tg3_mdio_init(struct tg3 *tp)
951 {
952 int i;
953 u32 reg;
954 struct phy_device *phydev;
955
956 tg3_mdio_start(tp);
957
958 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
959 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
960 return 0;
961
962 tp->mdio_bus = mdiobus_alloc();
963 if (tp->mdio_bus == NULL)
964 return -ENOMEM;
965
966 tp->mdio_bus->name = "tg3 mdio bus";
967 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
968 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
969 tp->mdio_bus->priv = tp;
970 tp->mdio_bus->parent = &tp->pdev->dev;
971 tp->mdio_bus->read = &tg3_mdio_read;
972 tp->mdio_bus->write = &tg3_mdio_write;
973 tp->mdio_bus->reset = &tg3_mdio_reset;
974 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
975 tp->mdio_bus->irq = &tp->mdio_irq[0];
976
977 for (i = 0; i < PHY_MAX_ADDR; i++)
978 tp->mdio_bus->irq[i] = PHY_POLL;
979
980 /* The bus registration will look for all the PHYs on the mdio bus.
981 * Unfortunately, it does not ensure the PHY is powered up before
982 * accessing the PHY ID registers. A chip reset is the
983 * quickest way to bring the device back to an operational state..
984 */
985 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
986 tg3_bmcr_reset(tp);
987
988 i = mdiobus_register(tp->mdio_bus);
989 if (i) {
990 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
991 tp->dev->name, i);
992 return i;
993 }
994
995 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
996
997 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
998
999 switch (phydev->phy_id) {
1000 case TG3_PHY_ID_BCM50610:
1001 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1003 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1004 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1005 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1006 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1007 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1008 break;
1009 case TG3_PHY_ID_BCMAC131:
1010 phydev->interface = PHY_INTERFACE_MODE_MII;
1011 break;
1012 }
1013
1014 tg3_mdio_config(tp);
1015
1016 return 0;
1017 }
1018
1019 static void tg3_mdio_fini(struct tg3 *tp)
1020 {
1021 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1022 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1023 mdiobus_unregister(tp->mdio_bus);
1024 mdiobus_free(tp->mdio_bus);
1025 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1026 }
1027 }
1028
1029 /* tp->lock is held. */
1030 static inline void tg3_generate_fw_event(struct tg3 *tp)
1031 {
1032 u32 val;
1033
1034 val = tr32(GRC_RX_CPU_EVENT);
1035 val |= GRC_RX_CPU_DRIVER_EVENT;
1036 tw32_f(GRC_RX_CPU_EVENT, val);
1037
1038 tp->last_event_jiffies = jiffies;
1039 }
1040
1041 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1042
1043 /* tp->lock is held. */
1044 static void tg3_wait_for_event_ack(struct tg3 *tp)
1045 {
1046 int i;
1047 unsigned int delay_cnt;
1048 long time_remain;
1049
1050 /* If enough time has passed, no wait is necessary. */
1051 time_remain = (long)(tp->last_event_jiffies + 1 +
1052 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1053 (long)jiffies;
1054 if (time_remain < 0)
1055 return;
1056
1057 /* Check if we can shorten the wait time. */
1058 delay_cnt = jiffies_to_usecs(time_remain);
1059 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1060 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1061 delay_cnt = (delay_cnt >> 3) + 1;
1062
1063 for (i = 0; i < delay_cnt; i++) {
1064 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1065 break;
1066 udelay(8);
1067 }
1068 }
1069
1070 /* tp->lock is held. */
1071 static void tg3_ump_link_report(struct tg3 *tp)
1072 {
1073 u32 reg;
1074 u32 val;
1075
1076 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1077 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1078 return;
1079
1080 tg3_wait_for_event_ack(tp);
1081
1082 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1083
1084 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1085
1086 val = 0;
1087 if (!tg3_readphy(tp, MII_BMCR, &reg))
1088 val = reg << 16;
1089 if (!tg3_readphy(tp, MII_BMSR, &reg))
1090 val |= (reg & 0xffff);
1091 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1092
1093 val = 0;
1094 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1095 val = reg << 16;
1096 if (!tg3_readphy(tp, MII_LPA, &reg))
1097 val |= (reg & 0xffff);
1098 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1099
1100 val = 0;
1101 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1102 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1103 val = reg << 16;
1104 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1105 val |= (reg & 0xffff);
1106 }
1107 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1108
1109 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1110 val = reg << 16;
1111 else
1112 val = 0;
1113 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1114
1115 tg3_generate_fw_event(tp);
1116 }
1117
1118 static void tg3_link_report(struct tg3 *tp)
1119 {
1120 if (!netif_carrier_ok(tp->dev)) {
1121 if (netif_msg_link(tp))
1122 printk(KERN_INFO PFX "%s: Link is down.\n",
1123 tp->dev->name);
1124 tg3_ump_link_report(tp);
1125 } else if (netif_msg_link(tp)) {
1126 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1127 tp->dev->name,
1128 (tp->link_config.active_speed == SPEED_1000 ?
1129 1000 :
1130 (tp->link_config.active_speed == SPEED_100 ?
1131 100 : 10)),
1132 (tp->link_config.active_duplex == DUPLEX_FULL ?
1133 "full" : "half"));
1134
1135 printk(KERN_INFO PFX
1136 "%s: Flow control is %s for TX and %s for RX.\n",
1137 tp->dev->name,
1138 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1139 "on" : "off",
1140 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1141 "on" : "off");
1142 tg3_ump_link_report(tp);
1143 }
1144 }
1145
1146 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1147 {
1148 u16 miireg;
1149
1150 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1151 miireg = ADVERTISE_PAUSE_CAP;
1152 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1153 miireg = ADVERTISE_PAUSE_ASYM;
1154 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1155 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1156 else
1157 miireg = 0;
1158
1159 return miireg;
1160 }
1161
1162 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1163 {
1164 u16 miireg;
1165
1166 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1167 miireg = ADVERTISE_1000XPAUSE;
1168 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1169 miireg = ADVERTISE_1000XPSE_ASYM;
1170 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1171 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1172 else
1173 miireg = 0;
1174
1175 return miireg;
1176 }
1177
1178 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1179 {
1180 u8 cap = 0;
1181
1182 if (lcladv & ADVERTISE_PAUSE_CAP) {
1183 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1184 if (rmtadv & LPA_PAUSE_CAP)
1185 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186 else if (rmtadv & LPA_PAUSE_ASYM)
1187 cap = TG3_FLOW_CTRL_RX;
1188 } else {
1189 if (rmtadv & LPA_PAUSE_CAP)
1190 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1191 }
1192 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1193 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1194 cap = TG3_FLOW_CTRL_TX;
1195 }
1196
1197 return cap;
1198 }
1199
1200 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1201 {
1202 u8 cap = 0;
1203
1204 if (lcladv & ADVERTISE_1000XPAUSE) {
1205 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1206 if (rmtadv & LPA_1000XPAUSE)
1207 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1208 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1209 cap = TG3_FLOW_CTRL_RX;
1210 } else {
1211 if (rmtadv & LPA_1000XPAUSE)
1212 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1213 }
1214 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1215 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1216 cap = TG3_FLOW_CTRL_TX;
1217 }
1218
1219 return cap;
1220 }
1221
1222 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1223 {
1224 u8 autoneg;
1225 u8 flowctrl = 0;
1226 u32 old_rx_mode = tp->rx_mode;
1227 u32 old_tx_mode = tp->tx_mode;
1228
1229 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1230 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1231 else
1232 autoneg = tp->link_config.autoneg;
1233
1234 if (autoneg == AUTONEG_ENABLE &&
1235 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1236 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1237 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1238 else
1239 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1240 } else
1241 flowctrl = tp->link_config.flowctrl;
1242
1243 tp->link_config.active_flowctrl = flowctrl;
1244
1245 if (flowctrl & TG3_FLOW_CTRL_RX)
1246 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247 else
1248 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
1250 if (old_rx_mode != tp->rx_mode)
1251 tw32_f(MAC_RX_MODE, tp->rx_mode);
1252
1253 if (flowctrl & TG3_FLOW_CTRL_TX)
1254 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1255 else
1256 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1257
1258 if (old_tx_mode != tp->tx_mode)
1259 tw32_f(MAC_TX_MODE, tp->tx_mode);
1260 }
1261
1262 static void tg3_adjust_link(struct net_device *dev)
1263 {
1264 u8 oldflowctrl, linkmesg = 0;
1265 u32 mac_mode, lcl_adv, rmt_adv;
1266 struct tg3 *tp = netdev_priv(dev);
1267 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1268
1269 spin_lock(&tp->lock);
1270
1271 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1272 MAC_MODE_HALF_DUPLEX);
1273
1274 oldflowctrl = tp->link_config.active_flowctrl;
1275
1276 if (phydev->link) {
1277 lcl_adv = 0;
1278 rmt_adv = 0;
1279
1280 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1281 mac_mode |= MAC_MODE_PORT_MODE_MII;
1282 else
1283 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1284
1285 if (phydev->duplex == DUPLEX_HALF)
1286 mac_mode |= MAC_MODE_HALF_DUPLEX;
1287 else {
1288 lcl_adv = tg3_advert_flowctrl_1000T(
1289 tp->link_config.flowctrl);
1290
1291 if (phydev->pause)
1292 rmt_adv = LPA_PAUSE_CAP;
1293 if (phydev->asym_pause)
1294 rmt_adv |= LPA_PAUSE_ASYM;
1295 }
1296
1297 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1298 } else
1299 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1300
1301 if (mac_mode != tp->mac_mode) {
1302 tp->mac_mode = mac_mode;
1303 tw32_f(MAC_MODE, tp->mac_mode);
1304 udelay(40);
1305 }
1306
1307 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1308 tw32(MAC_TX_LENGTHS,
1309 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1310 (6 << TX_LENGTHS_IPG_SHIFT) |
1311 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1312 else
1313 tw32(MAC_TX_LENGTHS,
1314 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1315 (6 << TX_LENGTHS_IPG_SHIFT) |
1316 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1317
1318 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1319 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1320 phydev->speed != tp->link_config.active_speed ||
1321 phydev->duplex != tp->link_config.active_duplex ||
1322 oldflowctrl != tp->link_config.active_flowctrl)
1323 linkmesg = 1;
1324
1325 tp->link_config.active_speed = phydev->speed;
1326 tp->link_config.active_duplex = phydev->duplex;
1327
1328 spin_unlock(&tp->lock);
1329
1330 if (linkmesg)
1331 tg3_link_report(tp);
1332 }
1333
1334 static int tg3_phy_init(struct tg3 *tp)
1335 {
1336 struct phy_device *phydev;
1337
1338 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1339 return 0;
1340
1341 /* Bring the PHY back to a known state. */
1342 tg3_bmcr_reset(tp);
1343
1344 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1345
1346 /* Attach the MAC to the PHY. */
1347 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1348 phydev->dev_flags, phydev->interface);
1349 if (IS_ERR(phydev)) {
1350 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1351 return PTR_ERR(phydev);
1352 }
1353
1354 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1355
1356 /* Mask with MAC supported features. */
1357 phydev->supported &= (PHY_GBIT_FEATURES |
1358 SUPPORTED_Pause |
1359 SUPPORTED_Asym_Pause);
1360
1361 phydev->advertising = phydev->supported;
1362
1363 printk(KERN_INFO
1364 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1365 tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1366
1367 return 0;
1368 }
1369
1370 static void tg3_phy_start(struct tg3 *tp)
1371 {
1372 struct phy_device *phydev;
1373
1374 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1375 return;
1376
1377 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1378
1379 if (tp->link_config.phy_is_low_power) {
1380 tp->link_config.phy_is_low_power = 0;
1381 phydev->speed = tp->link_config.orig_speed;
1382 phydev->duplex = tp->link_config.orig_duplex;
1383 phydev->autoneg = tp->link_config.orig_autoneg;
1384 phydev->advertising = tp->link_config.orig_advertising;
1385 }
1386
1387 phy_start(phydev);
1388
1389 phy_start_aneg(phydev);
1390 }
1391
1392 static void tg3_phy_stop(struct tg3 *tp)
1393 {
1394 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1395 return;
1396
1397 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1398 }
1399
1400 static void tg3_phy_fini(struct tg3 *tp)
1401 {
1402 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1403 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1404 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1405 }
1406 }
1407
1408 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1409 {
1410 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1411 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1412 }
1413
1414 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1415 {
1416 u32 phy;
1417
1418 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1419 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1420 return;
1421
1422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1423 u32 ephy;
1424
1425 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1426 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1427 ephy | MII_TG3_EPHY_SHADOW_EN);
1428 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1429 if (enable)
1430 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1431 else
1432 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1433 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1434 }
1435 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1436 }
1437 } else {
1438 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1439 MII_TG3_AUXCTL_SHDWSEL_MISC;
1440 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1441 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1442 if (enable)
1443 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1444 else
1445 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1446 phy |= MII_TG3_AUXCTL_MISC_WREN;
1447 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1448 }
1449 }
1450 }
1451
1452 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1453 {
1454 u32 val;
1455
1456 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1457 return;
1458
1459 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1460 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1461 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1462 (val | (1 << 15) | (1 << 4)));
1463 }
1464
1465 static void tg3_phy_apply_otp(struct tg3 *tp)
1466 {
1467 u32 otp, phy;
1468
1469 if (!tp->phy_otp)
1470 return;
1471
1472 otp = tp->phy_otp;
1473
1474 /* Enable SM_DSP clock and tx 6dB coding. */
1475 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1476 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1477 MII_TG3_AUXCTL_ACTL_TX_6DB;
1478 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1479
1480 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1481 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1482 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1483
1484 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1485 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1486 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1487
1488 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1489 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1490 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1491
1492 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1493 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1494
1495 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1496 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1497
1498 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1499 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1500 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1501
1502 /* Turn off SM_DSP clock. */
1503 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1504 MII_TG3_AUXCTL_ACTL_TX_6DB;
1505 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1506 }
1507
1508 static int tg3_wait_macro_done(struct tg3 *tp)
1509 {
1510 int limit = 100;
1511
1512 while (limit--) {
1513 u32 tmp32;
1514
1515 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1516 if ((tmp32 & 0x1000) == 0)
1517 break;
1518 }
1519 }
1520 if (limit <= 0)
1521 return -EBUSY;
1522
1523 return 0;
1524 }
1525
1526 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1527 {
1528 static const u32 test_pat[4][6] = {
1529 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1530 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1531 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1532 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1533 };
1534 int chan;
1535
1536 for (chan = 0; chan < 4; chan++) {
1537 int i;
1538
1539 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1540 (chan * 0x2000) | 0x0200);
1541 tg3_writephy(tp, 0x16, 0x0002);
1542
1543 for (i = 0; i < 6; i++)
1544 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1545 test_pat[chan][i]);
1546
1547 tg3_writephy(tp, 0x16, 0x0202);
1548 if (tg3_wait_macro_done(tp)) {
1549 *resetp = 1;
1550 return -EBUSY;
1551 }
1552
1553 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1554 (chan * 0x2000) | 0x0200);
1555 tg3_writephy(tp, 0x16, 0x0082);
1556 if (tg3_wait_macro_done(tp)) {
1557 *resetp = 1;
1558 return -EBUSY;
1559 }
1560
1561 tg3_writephy(tp, 0x16, 0x0802);
1562 if (tg3_wait_macro_done(tp)) {
1563 *resetp = 1;
1564 return -EBUSY;
1565 }
1566
1567 for (i = 0; i < 6; i += 2) {
1568 u32 low, high;
1569
1570 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1571 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1572 tg3_wait_macro_done(tp)) {
1573 *resetp = 1;
1574 return -EBUSY;
1575 }
1576 low &= 0x7fff;
1577 high &= 0x000f;
1578 if (low != test_pat[chan][i] ||
1579 high != test_pat[chan][i+1]) {
1580 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1581 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1582 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1583
1584 return -EBUSY;
1585 }
1586 }
1587 }
1588
1589 return 0;
1590 }
1591
1592 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1593 {
1594 int chan;
1595
1596 for (chan = 0; chan < 4; chan++) {
1597 int i;
1598
1599 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1600 (chan * 0x2000) | 0x0200);
1601 tg3_writephy(tp, 0x16, 0x0002);
1602 for (i = 0; i < 6; i++)
1603 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1604 tg3_writephy(tp, 0x16, 0x0202);
1605 if (tg3_wait_macro_done(tp))
1606 return -EBUSY;
1607 }
1608
1609 return 0;
1610 }
1611
1612 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1613 {
1614 u32 reg32, phy9_orig;
1615 int retries, do_phy_reset, err;
1616
1617 retries = 10;
1618 do_phy_reset = 1;
1619 do {
1620 if (do_phy_reset) {
1621 err = tg3_bmcr_reset(tp);
1622 if (err)
1623 return err;
1624 do_phy_reset = 0;
1625 }
1626
1627 /* Disable transmitter and interrupt. */
1628 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1629 continue;
1630
1631 reg32 |= 0x3000;
1632 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1633
1634 /* Set full-duplex, 1000 mbps. */
1635 tg3_writephy(tp, MII_BMCR,
1636 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1637
1638 /* Set to master mode. */
1639 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1640 continue;
1641
1642 tg3_writephy(tp, MII_TG3_CTRL,
1643 (MII_TG3_CTRL_AS_MASTER |
1644 MII_TG3_CTRL_ENABLE_AS_MASTER));
1645
1646 /* Enable SM_DSP_CLOCK and 6dB. */
1647 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1648
1649 /* Block the PHY control access. */
1650 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1651 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1652
1653 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1654 if (!err)
1655 break;
1656 } while (--retries);
1657
1658 err = tg3_phy_reset_chanpat(tp);
1659 if (err)
1660 return err;
1661
1662 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1663 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1664
1665 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1666 tg3_writephy(tp, 0x16, 0x0000);
1667
1668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1670 /* Set Extended packet length bit for jumbo frames */
1671 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1672 }
1673 else {
1674 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1675 }
1676
1677 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1678
1679 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1680 reg32 &= ~0x3000;
1681 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1682 } else if (!err)
1683 err = -EBUSY;
1684
1685 return err;
1686 }
1687
1688 /* This will reset the tigon3 PHY if there is no valid
1689 * link unless the FORCE argument is non-zero.
1690 */
1691 static int tg3_phy_reset(struct tg3 *tp)
1692 {
1693 u32 cpmuctrl;
1694 u32 phy_status;
1695 int err;
1696
1697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1698 u32 val;
1699
1700 val = tr32(GRC_MISC_CFG);
1701 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1702 udelay(40);
1703 }
1704 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1705 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1706 if (err != 0)
1707 return -EBUSY;
1708
1709 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1710 netif_carrier_off(tp->dev);
1711 tg3_link_report(tp);
1712 }
1713
1714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1715 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1716 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1717 err = tg3_phy_reset_5703_4_5(tp);
1718 if (err)
1719 return err;
1720 goto out;
1721 }
1722
1723 cpmuctrl = 0;
1724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1725 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1726 cpmuctrl = tr32(TG3_CPMU_CTRL);
1727 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1728 tw32(TG3_CPMU_CTRL,
1729 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1730 }
1731
1732 err = tg3_bmcr_reset(tp);
1733 if (err)
1734 return err;
1735
1736 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1737 u32 phy;
1738
1739 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1740 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1741
1742 tw32(TG3_CPMU_CTRL, cpmuctrl);
1743 }
1744
1745 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1746 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1747 u32 val;
1748
1749 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1750 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1751 CPMU_LSPD_1000MB_MACCLK_12_5) {
1752 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1753 udelay(40);
1754 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1755 }
1756
1757 /* Disable GPHY autopowerdown. */
1758 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1759 MII_TG3_MISC_SHDW_WREN |
1760 MII_TG3_MISC_SHDW_APD_SEL |
1761 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1762 }
1763
1764 tg3_phy_apply_otp(tp);
1765
1766 out:
1767 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1768 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1769 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1770 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1771 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1772 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1773 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1774 }
1775 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1776 tg3_writephy(tp, 0x1c, 0x8d68);
1777 tg3_writephy(tp, 0x1c, 0x8d68);
1778 }
1779 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1780 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1781 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1782 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1783 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1784 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1785 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1786 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1787 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1788 }
1789 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1790 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1791 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1792 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1793 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1794 tg3_writephy(tp, MII_TG3_TEST1,
1795 MII_TG3_TEST1_TRIM_EN | 0x4);
1796 } else
1797 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1798 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1799 }
1800 /* Set Extended packet length bit (bit 14) on all chips that */
1801 /* support jumbo frames */
1802 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1803 /* Cannot do read-modify-write on 5401 */
1804 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1805 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1806 u32 phy_reg;
1807
1808 /* Set bit 14 with read-modify-write to preserve other bits */
1809 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1810 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1811 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1812 }
1813
1814 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1815 * jumbo frames transmission.
1816 */
1817 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1818 u32 phy_reg;
1819
1820 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1821 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1822 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1823 }
1824
1825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1826 /* adjust output voltage */
1827 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1828 }
1829
1830 tg3_phy_toggle_automdix(tp, 1);
1831 tg3_phy_set_wirespeed(tp);
1832 return 0;
1833 }
1834
1835 static void tg3_frob_aux_power(struct tg3 *tp)
1836 {
1837 struct tg3 *tp_peer = tp;
1838
1839 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1840 return;
1841
1842 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1843 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1844 struct net_device *dev_peer;
1845
1846 dev_peer = pci_get_drvdata(tp->pdev_peer);
1847 /* remove_one() may have been run on the peer. */
1848 if (!dev_peer)
1849 tp_peer = tp;
1850 else
1851 tp_peer = netdev_priv(dev_peer);
1852 }
1853
1854 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1855 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1856 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1857 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1858 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1859 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1860 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1861 (GRC_LCLCTRL_GPIO_OE0 |
1862 GRC_LCLCTRL_GPIO_OE1 |
1863 GRC_LCLCTRL_GPIO_OE2 |
1864 GRC_LCLCTRL_GPIO_OUTPUT0 |
1865 GRC_LCLCTRL_GPIO_OUTPUT1),
1866 100);
1867 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1868 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1869 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1870 GRC_LCLCTRL_GPIO_OE1 |
1871 GRC_LCLCTRL_GPIO_OE2 |
1872 GRC_LCLCTRL_GPIO_OUTPUT0 |
1873 GRC_LCLCTRL_GPIO_OUTPUT1 |
1874 tp->grc_local_ctrl;
1875 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1876
1877 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1878 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1879
1880 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1881 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1882 } else {
1883 u32 no_gpio2;
1884 u32 grc_local_ctrl = 0;
1885
1886 if (tp_peer != tp &&
1887 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1888 return;
1889
1890 /* Workaround to prevent overdrawing Amps. */
1891 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1892 ASIC_REV_5714) {
1893 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1894 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1895 grc_local_ctrl, 100);
1896 }
1897
1898 /* On 5753 and variants, GPIO2 cannot be used. */
1899 no_gpio2 = tp->nic_sram_data_cfg &
1900 NIC_SRAM_DATA_CFG_NO_GPIO2;
1901
1902 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1903 GRC_LCLCTRL_GPIO_OE1 |
1904 GRC_LCLCTRL_GPIO_OE2 |
1905 GRC_LCLCTRL_GPIO_OUTPUT1 |
1906 GRC_LCLCTRL_GPIO_OUTPUT2;
1907 if (no_gpio2) {
1908 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1909 GRC_LCLCTRL_GPIO_OUTPUT2);
1910 }
1911 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1912 grc_local_ctrl, 100);
1913
1914 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1915
1916 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1917 grc_local_ctrl, 100);
1918
1919 if (!no_gpio2) {
1920 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1921 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1922 grc_local_ctrl, 100);
1923 }
1924 }
1925 } else {
1926 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1927 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1928 if (tp_peer != tp &&
1929 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1930 return;
1931
1932 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1933 (GRC_LCLCTRL_GPIO_OE1 |
1934 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1935
1936 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1937 GRC_LCLCTRL_GPIO_OE1, 100);
1938
1939 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1940 (GRC_LCLCTRL_GPIO_OE1 |
1941 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1942 }
1943 }
1944 }
1945
1946 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1947 {
1948 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1949 return 1;
1950 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1951 if (speed != SPEED_10)
1952 return 1;
1953 } else if (speed == SPEED_10)
1954 return 1;
1955
1956 return 0;
1957 }
1958
1959 static int tg3_setup_phy(struct tg3 *, int);
1960
1961 #define RESET_KIND_SHUTDOWN 0
1962 #define RESET_KIND_INIT 1
1963 #define RESET_KIND_SUSPEND 2
1964
1965 static void tg3_write_sig_post_reset(struct tg3 *, int);
1966 static int tg3_halt_cpu(struct tg3 *, u32);
1967 static int tg3_nvram_lock(struct tg3 *);
1968 static void tg3_nvram_unlock(struct tg3 *);
1969
1970 static void tg3_power_down_phy(struct tg3 *tp)
1971 {
1972 u32 val;
1973
1974 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1976 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1977 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1978
1979 sg_dig_ctrl |=
1980 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1981 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1982 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1983 }
1984 return;
1985 }
1986
1987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1988 tg3_bmcr_reset(tp);
1989 val = tr32(GRC_MISC_CFG);
1990 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1991 udelay(40);
1992 return;
1993 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1994 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1995 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1996 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1997 }
1998
1999 /* The PHY should not be powered down on some chips because
2000 * of bugs.
2001 */
2002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2003 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2004 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2005 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2006 return;
2007
2008 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2009 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2010 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2011 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2012 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2013 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2014 }
2015
2016 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2017 }
2018
2019 /* tp->lock is held. */
2020 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2021 {
2022 u32 addr_high, addr_low;
2023 int i;
2024
2025 addr_high = ((tp->dev->dev_addr[0] << 8) |
2026 tp->dev->dev_addr[1]);
2027 addr_low = ((tp->dev->dev_addr[2] << 24) |
2028 (tp->dev->dev_addr[3] << 16) |
2029 (tp->dev->dev_addr[4] << 8) |
2030 (tp->dev->dev_addr[5] << 0));
2031 for (i = 0; i < 4; i++) {
2032 if (i == 1 && skip_mac_1)
2033 continue;
2034 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2035 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2036 }
2037
2038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2040 for (i = 0; i < 12; i++) {
2041 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2042 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2043 }
2044 }
2045
2046 addr_high = (tp->dev->dev_addr[0] +
2047 tp->dev->dev_addr[1] +
2048 tp->dev->dev_addr[2] +
2049 tp->dev->dev_addr[3] +
2050 tp->dev->dev_addr[4] +
2051 tp->dev->dev_addr[5]) &
2052 TX_BACKOFF_SEED_MASK;
2053 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2054 }
2055
2056 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2057 {
2058 u32 misc_host_ctrl;
2059
2060 /* Make sure register accesses (indirect or otherwise)
2061 * will function correctly.
2062 */
2063 pci_write_config_dword(tp->pdev,
2064 TG3PCI_MISC_HOST_CTRL,
2065 tp->misc_host_ctrl);
2066
2067 switch (state) {
2068 case PCI_D0:
2069 pci_enable_wake(tp->pdev, state, false);
2070 pci_set_power_state(tp->pdev, PCI_D0);
2071
2072 /* Switch out of Vaux if it is a NIC */
2073 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2074 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2075
2076 return 0;
2077
2078 case PCI_D1:
2079 case PCI_D2:
2080 case PCI_D3hot:
2081 break;
2082
2083 default:
2084 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2085 tp->dev->name, state);
2086 return -EINVAL;
2087 }
2088 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2089 tw32(TG3PCI_MISC_HOST_CTRL,
2090 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2091
2092 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2093 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2094 !tp->link_config.phy_is_low_power) {
2095 struct phy_device *phydev;
2096 u32 advertising;
2097
2098 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2099
2100 tp->link_config.phy_is_low_power = 1;
2101
2102 tp->link_config.orig_speed = phydev->speed;
2103 tp->link_config.orig_duplex = phydev->duplex;
2104 tp->link_config.orig_autoneg = phydev->autoneg;
2105 tp->link_config.orig_advertising = phydev->advertising;
2106
2107 advertising = ADVERTISED_TP |
2108 ADVERTISED_Pause |
2109 ADVERTISED_Autoneg |
2110 ADVERTISED_10baseT_Half;
2111
2112 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2113 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2114 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2115 advertising |=
2116 ADVERTISED_100baseT_Half |
2117 ADVERTISED_100baseT_Full |
2118 ADVERTISED_10baseT_Full;
2119 else
2120 advertising |= ADVERTISED_10baseT_Full;
2121 }
2122
2123 phydev->advertising = advertising;
2124
2125 phy_start_aneg(phydev);
2126 }
2127 } else {
2128 if (tp->link_config.phy_is_low_power == 0) {
2129 tp->link_config.phy_is_low_power = 1;
2130 tp->link_config.orig_speed = tp->link_config.speed;
2131 tp->link_config.orig_duplex = tp->link_config.duplex;
2132 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2133 }
2134
2135 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2136 tp->link_config.speed = SPEED_10;
2137 tp->link_config.duplex = DUPLEX_HALF;
2138 tp->link_config.autoneg = AUTONEG_ENABLE;
2139 tg3_setup_phy(tp, 0);
2140 }
2141 }
2142
2143 __tg3_set_mac_addr(tp, 0);
2144
2145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2146 u32 val;
2147
2148 val = tr32(GRC_VCPU_EXT_CTRL);
2149 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2150 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2151 int i;
2152 u32 val;
2153
2154 for (i = 0; i < 200; i++) {
2155 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2156 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2157 break;
2158 msleep(1);
2159 }
2160 }
2161 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2162 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2163 WOL_DRV_STATE_SHUTDOWN |
2164 WOL_DRV_WOL |
2165 WOL_SET_MAGIC_PKT);
2166
2167 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2168 u32 mac_mode;
2169
2170 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2171 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2172 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2173 udelay(40);
2174 }
2175
2176 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2177 mac_mode = MAC_MODE_PORT_MODE_GMII;
2178 else
2179 mac_mode = MAC_MODE_PORT_MODE_MII;
2180
2181 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2182 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2183 ASIC_REV_5700) {
2184 u32 speed = (tp->tg3_flags &
2185 TG3_FLAG_WOL_SPEED_100MB) ?
2186 SPEED_100 : SPEED_10;
2187 if (tg3_5700_link_polarity(tp, speed))
2188 mac_mode |= MAC_MODE_LINK_POLARITY;
2189 else
2190 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2191 }
2192 } else {
2193 mac_mode = MAC_MODE_PORT_MODE_TBI;
2194 }
2195
2196 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2197 tw32(MAC_LED_CTRL, tp->led_ctrl);
2198
2199 if (pci_pme_capable(tp->pdev, state) &&
2200 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2201 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2202 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2203 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2204 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2205 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2206 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2207 }
2208
2209 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2210 mac_mode |= tp->mac_mode &
2211 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2212 if (mac_mode & MAC_MODE_APE_TX_EN)
2213 mac_mode |= MAC_MODE_TDE_ENABLE;
2214 }
2215
2216 tw32_f(MAC_MODE, mac_mode);
2217 udelay(100);
2218
2219 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2220 udelay(10);
2221 }
2222
2223 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2224 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2226 u32 base_val;
2227
2228 base_val = tp->pci_clock_ctrl;
2229 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2230 CLOCK_CTRL_TXCLK_DISABLE);
2231
2232 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2233 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2234 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2235 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2236 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2237 /* do nothing */
2238 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2239 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2240 u32 newbits1, newbits2;
2241
2242 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2243 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2244 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2245 CLOCK_CTRL_TXCLK_DISABLE |
2246 CLOCK_CTRL_ALTCLK);
2247 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2248 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2249 newbits1 = CLOCK_CTRL_625_CORE;
2250 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2251 } else {
2252 newbits1 = CLOCK_CTRL_ALTCLK;
2253 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2254 }
2255
2256 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2257 40);
2258
2259 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2260 40);
2261
2262 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2263 u32 newbits3;
2264
2265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2267 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2268 CLOCK_CTRL_TXCLK_DISABLE |
2269 CLOCK_CTRL_44MHZ_CORE);
2270 } else {
2271 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2272 }
2273
2274 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2275 tp->pci_clock_ctrl | newbits3, 40);
2276 }
2277 }
2278
2279 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2280 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2281 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2282 tg3_power_down_phy(tp);
2283
2284 tg3_frob_aux_power(tp);
2285
2286 /* Workaround for unstable PLL clock */
2287 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2288 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2289 u32 val = tr32(0x7d00);
2290
2291 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2292 tw32(0x7d00, val);
2293 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2294 int err;
2295
2296 err = tg3_nvram_lock(tp);
2297 tg3_halt_cpu(tp, RX_CPU_BASE);
2298 if (!err)
2299 tg3_nvram_unlock(tp);
2300 }
2301 }
2302
2303 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2304
2305 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2306 pci_enable_wake(tp->pdev, state, true);
2307
2308 /* Finally, set the new power state. */
2309 pci_set_power_state(tp->pdev, state);
2310
2311 return 0;
2312 }
2313
2314 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2315 {
2316 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2317 case MII_TG3_AUX_STAT_10HALF:
2318 *speed = SPEED_10;
2319 *duplex = DUPLEX_HALF;
2320 break;
2321
2322 case MII_TG3_AUX_STAT_10FULL:
2323 *speed = SPEED_10;
2324 *duplex = DUPLEX_FULL;
2325 break;
2326
2327 case MII_TG3_AUX_STAT_100HALF:
2328 *speed = SPEED_100;
2329 *duplex = DUPLEX_HALF;
2330 break;
2331
2332 case MII_TG3_AUX_STAT_100FULL:
2333 *speed = SPEED_100;
2334 *duplex = DUPLEX_FULL;
2335 break;
2336
2337 case MII_TG3_AUX_STAT_1000HALF:
2338 *speed = SPEED_1000;
2339 *duplex = DUPLEX_HALF;
2340 break;
2341
2342 case MII_TG3_AUX_STAT_1000FULL:
2343 *speed = SPEED_1000;
2344 *duplex = DUPLEX_FULL;
2345 break;
2346
2347 default:
2348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2349 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2350 SPEED_10;
2351 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2352 DUPLEX_HALF;
2353 break;
2354 }
2355 *speed = SPEED_INVALID;
2356 *duplex = DUPLEX_INVALID;
2357 break;
2358 }
2359 }
2360
2361 static void tg3_phy_copper_begin(struct tg3 *tp)
2362 {
2363 u32 new_adv;
2364 int i;
2365
2366 if (tp->link_config.phy_is_low_power) {
2367 /* Entering low power mode. Disable gigabit and
2368 * 100baseT advertisements.
2369 */
2370 tg3_writephy(tp, MII_TG3_CTRL, 0);
2371
2372 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2373 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2374 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2375 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2376
2377 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2378 } else if (tp->link_config.speed == SPEED_INVALID) {
2379 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2380 tp->link_config.advertising &=
2381 ~(ADVERTISED_1000baseT_Half |
2382 ADVERTISED_1000baseT_Full);
2383
2384 new_adv = ADVERTISE_CSMA;
2385 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2386 new_adv |= ADVERTISE_10HALF;
2387 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2388 new_adv |= ADVERTISE_10FULL;
2389 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2390 new_adv |= ADVERTISE_100HALF;
2391 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2392 new_adv |= ADVERTISE_100FULL;
2393
2394 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2395
2396 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2397
2398 if (tp->link_config.advertising &
2399 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2400 new_adv = 0;
2401 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2402 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2403 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2404 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2405 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2406 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2407 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2408 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2409 MII_TG3_CTRL_ENABLE_AS_MASTER);
2410 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2411 } else {
2412 tg3_writephy(tp, MII_TG3_CTRL, 0);
2413 }
2414 } else {
2415 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2416 new_adv |= ADVERTISE_CSMA;
2417
2418 /* Asking for a specific link mode. */
2419 if (tp->link_config.speed == SPEED_1000) {
2420 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2421
2422 if (tp->link_config.duplex == DUPLEX_FULL)
2423 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2424 else
2425 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2426 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2427 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2428 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2429 MII_TG3_CTRL_ENABLE_AS_MASTER);
2430 } else {
2431 if (tp->link_config.speed == SPEED_100) {
2432 if (tp->link_config.duplex == DUPLEX_FULL)
2433 new_adv |= ADVERTISE_100FULL;
2434 else
2435 new_adv |= ADVERTISE_100HALF;
2436 } else {
2437 if (tp->link_config.duplex == DUPLEX_FULL)
2438 new_adv |= ADVERTISE_10FULL;
2439 else
2440 new_adv |= ADVERTISE_10HALF;
2441 }
2442 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2443
2444 new_adv = 0;
2445 }
2446
2447 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2448 }
2449
2450 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2451 tp->link_config.speed != SPEED_INVALID) {
2452 u32 bmcr, orig_bmcr;
2453
2454 tp->link_config.active_speed = tp->link_config.speed;
2455 tp->link_config.active_duplex = tp->link_config.duplex;
2456
2457 bmcr = 0;
2458 switch (tp->link_config.speed) {
2459 default:
2460 case SPEED_10:
2461 break;
2462
2463 case SPEED_100:
2464 bmcr |= BMCR_SPEED100;
2465 break;
2466
2467 case SPEED_1000:
2468 bmcr |= TG3_BMCR_SPEED1000;
2469 break;
2470 }
2471
2472 if (tp->link_config.duplex == DUPLEX_FULL)
2473 bmcr |= BMCR_FULLDPLX;
2474
2475 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2476 (bmcr != orig_bmcr)) {
2477 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2478 for (i = 0; i < 1500; i++) {
2479 u32 tmp;
2480
2481 udelay(10);
2482 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2483 tg3_readphy(tp, MII_BMSR, &tmp))
2484 continue;
2485 if (!(tmp & BMSR_LSTATUS)) {
2486 udelay(40);
2487 break;
2488 }
2489 }
2490 tg3_writephy(tp, MII_BMCR, bmcr);
2491 udelay(40);
2492 }
2493 } else {
2494 tg3_writephy(tp, MII_BMCR,
2495 BMCR_ANENABLE | BMCR_ANRESTART);
2496 }
2497 }
2498
2499 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2500 {
2501 int err;
2502
2503 /* Turn off tap power management. */
2504 /* Set Extended packet length bit */
2505 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2506
2507 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2508 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2509
2510 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2511 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2512
2513 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2514 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2515
2516 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2517 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2518
2519 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2520 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2521
2522 udelay(40);
2523
2524 return err;
2525 }
2526
2527 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2528 {
2529 u32 adv_reg, all_mask = 0;
2530
2531 if (mask & ADVERTISED_10baseT_Half)
2532 all_mask |= ADVERTISE_10HALF;
2533 if (mask & ADVERTISED_10baseT_Full)
2534 all_mask |= ADVERTISE_10FULL;
2535 if (mask & ADVERTISED_100baseT_Half)
2536 all_mask |= ADVERTISE_100HALF;
2537 if (mask & ADVERTISED_100baseT_Full)
2538 all_mask |= ADVERTISE_100FULL;
2539
2540 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2541 return 0;
2542
2543 if ((adv_reg & all_mask) != all_mask)
2544 return 0;
2545 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2546 u32 tg3_ctrl;
2547
2548 all_mask = 0;
2549 if (mask & ADVERTISED_1000baseT_Half)
2550 all_mask |= ADVERTISE_1000HALF;
2551 if (mask & ADVERTISED_1000baseT_Full)
2552 all_mask |= ADVERTISE_1000FULL;
2553
2554 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2555 return 0;
2556
2557 if ((tg3_ctrl & all_mask) != all_mask)
2558 return 0;
2559 }
2560 return 1;
2561 }
2562
2563 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2564 {
2565 u32 curadv, reqadv;
2566
2567 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2568 return 1;
2569
2570 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2571 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2572
2573 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2574 if (curadv != reqadv)
2575 return 0;
2576
2577 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2578 tg3_readphy(tp, MII_LPA, rmtadv);
2579 } else {
2580 /* Reprogram the advertisement register, even if it
2581 * does not affect the current link. If the link
2582 * gets renegotiated in the future, we can save an
2583 * additional renegotiation cycle by advertising
2584 * it correctly in the first place.
2585 */
2586 if (curadv != reqadv) {
2587 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2588 ADVERTISE_PAUSE_ASYM);
2589 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2590 }
2591 }
2592
2593 return 1;
2594 }
2595
2596 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2597 {
2598 int current_link_up;
2599 u32 bmsr, dummy;
2600 u32 lcl_adv, rmt_adv;
2601 u16 current_speed;
2602 u8 current_duplex;
2603 int i, err;
2604
2605 tw32(MAC_EVENT, 0);
2606
2607 tw32_f(MAC_STATUS,
2608 (MAC_STATUS_SYNC_CHANGED |
2609 MAC_STATUS_CFG_CHANGED |
2610 MAC_STATUS_MI_COMPLETION |
2611 MAC_STATUS_LNKSTATE_CHANGED));
2612 udelay(40);
2613
2614 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2615 tw32_f(MAC_MI_MODE,
2616 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2617 udelay(80);
2618 }
2619
2620 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2621
2622 /* Some third-party PHYs need to be reset on link going
2623 * down.
2624 */
2625 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2628 netif_carrier_ok(tp->dev)) {
2629 tg3_readphy(tp, MII_BMSR, &bmsr);
2630 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2631 !(bmsr & BMSR_LSTATUS))
2632 force_reset = 1;
2633 }
2634 if (force_reset)
2635 tg3_phy_reset(tp);
2636
2637 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2638 tg3_readphy(tp, MII_BMSR, &bmsr);
2639 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2640 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2641 bmsr = 0;
2642
2643 if (!(bmsr & BMSR_LSTATUS)) {
2644 err = tg3_init_5401phy_dsp(tp);
2645 if (err)
2646 return err;
2647
2648 tg3_readphy(tp, MII_BMSR, &bmsr);
2649 for (i = 0; i < 1000; i++) {
2650 udelay(10);
2651 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2652 (bmsr & BMSR_LSTATUS)) {
2653 udelay(40);
2654 break;
2655 }
2656 }
2657
2658 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2659 !(bmsr & BMSR_LSTATUS) &&
2660 tp->link_config.active_speed == SPEED_1000) {
2661 err = tg3_phy_reset(tp);
2662 if (!err)
2663 err = tg3_init_5401phy_dsp(tp);
2664 if (err)
2665 return err;
2666 }
2667 }
2668 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2669 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2670 /* 5701 {A0,B0} CRC bug workaround */
2671 tg3_writephy(tp, 0x15, 0x0a75);
2672 tg3_writephy(tp, 0x1c, 0x8c68);
2673 tg3_writephy(tp, 0x1c, 0x8d68);
2674 tg3_writephy(tp, 0x1c, 0x8c68);
2675 }
2676
2677 /* Clear pending interrupts... */
2678 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2679 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2680
2681 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2682 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2683 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2684 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2685
2686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2688 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2689 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2690 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2691 else
2692 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2693 }
2694
2695 current_link_up = 0;
2696 current_speed = SPEED_INVALID;
2697 current_duplex = DUPLEX_INVALID;
2698
2699 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2700 u32 val;
2701
2702 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2703 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2704 if (!(val & (1 << 10))) {
2705 val |= (1 << 10);
2706 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2707 goto relink;
2708 }
2709 }
2710
2711 bmsr = 0;
2712 for (i = 0; i < 100; i++) {
2713 tg3_readphy(tp, MII_BMSR, &bmsr);
2714 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2715 (bmsr & BMSR_LSTATUS))
2716 break;
2717 udelay(40);
2718 }
2719
2720 if (bmsr & BMSR_LSTATUS) {
2721 u32 aux_stat, bmcr;
2722
2723 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2724 for (i = 0; i < 2000; i++) {
2725 udelay(10);
2726 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2727 aux_stat)
2728 break;
2729 }
2730
2731 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2732 &current_speed,
2733 &current_duplex);
2734
2735 bmcr = 0;
2736 for (i = 0; i < 200; i++) {
2737 tg3_readphy(tp, MII_BMCR, &bmcr);
2738 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2739 continue;
2740 if (bmcr && bmcr != 0x7fff)
2741 break;
2742 udelay(10);
2743 }
2744
2745 lcl_adv = 0;
2746 rmt_adv = 0;
2747
2748 tp->link_config.active_speed = current_speed;
2749 tp->link_config.active_duplex = current_duplex;
2750
2751 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2752 if ((bmcr & BMCR_ANENABLE) &&
2753 tg3_copper_is_advertising_all(tp,
2754 tp->link_config.advertising)) {
2755 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2756 &rmt_adv))
2757 current_link_up = 1;
2758 }
2759 } else {
2760 if (!(bmcr & BMCR_ANENABLE) &&
2761 tp->link_config.speed == current_speed &&
2762 tp->link_config.duplex == current_duplex &&
2763 tp->link_config.flowctrl ==
2764 tp->link_config.active_flowctrl) {
2765 current_link_up = 1;
2766 }
2767 }
2768
2769 if (current_link_up == 1 &&
2770 tp->link_config.active_duplex == DUPLEX_FULL)
2771 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2772 }
2773
2774 relink:
2775 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2776 u32 tmp;
2777
2778 tg3_phy_copper_begin(tp);
2779
2780 tg3_readphy(tp, MII_BMSR, &tmp);
2781 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2782 (tmp & BMSR_LSTATUS))
2783 current_link_up = 1;
2784 }
2785
2786 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2787 if (current_link_up == 1) {
2788 if (tp->link_config.active_speed == SPEED_100 ||
2789 tp->link_config.active_speed == SPEED_10)
2790 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2791 else
2792 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2793 } else
2794 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2795
2796 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2797 if (tp->link_config.active_duplex == DUPLEX_HALF)
2798 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2799
2800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2801 if (current_link_up == 1 &&
2802 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2803 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2804 else
2805 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2806 }
2807
2808 /* ??? Without this setting Netgear GA302T PHY does not
2809 * ??? send/receive packets...
2810 */
2811 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2812 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2813 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2814 tw32_f(MAC_MI_MODE, tp->mi_mode);
2815 udelay(80);
2816 }
2817
2818 tw32_f(MAC_MODE, tp->mac_mode);
2819 udelay(40);
2820
2821 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2822 /* Polled via timer. */
2823 tw32_f(MAC_EVENT, 0);
2824 } else {
2825 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2826 }
2827 udelay(40);
2828
2829 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2830 current_link_up == 1 &&
2831 tp->link_config.active_speed == SPEED_1000 &&
2832 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2833 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2834 udelay(120);
2835 tw32_f(MAC_STATUS,
2836 (MAC_STATUS_SYNC_CHANGED |
2837 MAC_STATUS_CFG_CHANGED));
2838 udelay(40);
2839 tg3_write_mem(tp,
2840 NIC_SRAM_FIRMWARE_MBOX,
2841 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2842 }
2843
2844 if (current_link_up != netif_carrier_ok(tp->dev)) {
2845 if (current_link_up)
2846 netif_carrier_on(tp->dev);
2847 else
2848 netif_carrier_off(tp->dev);
2849 tg3_link_report(tp);
2850 }
2851
2852 return 0;
2853 }
2854
2855 struct tg3_fiber_aneginfo {
2856 int state;
2857 #define ANEG_STATE_UNKNOWN 0
2858 #define ANEG_STATE_AN_ENABLE 1
2859 #define ANEG_STATE_RESTART_INIT 2
2860 #define ANEG_STATE_RESTART 3
2861 #define ANEG_STATE_DISABLE_LINK_OK 4
2862 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2863 #define ANEG_STATE_ABILITY_DETECT 6
2864 #define ANEG_STATE_ACK_DETECT_INIT 7
2865 #define ANEG_STATE_ACK_DETECT 8
2866 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2867 #define ANEG_STATE_COMPLETE_ACK 10
2868 #define ANEG_STATE_IDLE_DETECT_INIT 11
2869 #define ANEG_STATE_IDLE_DETECT 12
2870 #define ANEG_STATE_LINK_OK 13
2871 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2872 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2873
2874 u32 flags;
2875 #define MR_AN_ENABLE 0x00000001
2876 #define MR_RESTART_AN 0x00000002
2877 #define MR_AN_COMPLETE 0x00000004
2878 #define MR_PAGE_RX 0x00000008
2879 #define MR_NP_LOADED 0x00000010
2880 #define MR_TOGGLE_TX 0x00000020
2881 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2882 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2883 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2884 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2885 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2886 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2887 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2888 #define MR_TOGGLE_RX 0x00002000
2889 #define MR_NP_RX 0x00004000
2890
2891 #define MR_LINK_OK 0x80000000
2892
2893 unsigned long link_time, cur_time;
2894
2895 u32 ability_match_cfg;
2896 int ability_match_count;
2897
2898 char ability_match, idle_match, ack_match;
2899
2900 u32 txconfig, rxconfig;
2901 #define ANEG_CFG_NP 0x00000080
2902 #define ANEG_CFG_ACK 0x00000040
2903 #define ANEG_CFG_RF2 0x00000020
2904 #define ANEG_CFG_RF1 0x00000010
2905 #define ANEG_CFG_PS2 0x00000001
2906 #define ANEG_CFG_PS1 0x00008000
2907 #define ANEG_CFG_HD 0x00004000
2908 #define ANEG_CFG_FD 0x00002000
2909 #define ANEG_CFG_INVAL 0x00001f06
2910
2911 };
2912 #define ANEG_OK 0
2913 #define ANEG_DONE 1
2914 #define ANEG_TIMER_ENAB 2
2915 #define ANEG_FAILED -1
2916
2917 #define ANEG_STATE_SETTLE_TIME 10000
2918
2919 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2920 struct tg3_fiber_aneginfo *ap)
2921 {
2922 u16 flowctrl;
2923 unsigned long delta;
2924 u32 rx_cfg_reg;
2925 int ret;
2926
2927 if (ap->state == ANEG_STATE_UNKNOWN) {
2928 ap->rxconfig = 0;
2929 ap->link_time = 0;
2930 ap->cur_time = 0;
2931 ap->ability_match_cfg = 0;
2932 ap->ability_match_count = 0;
2933 ap->ability_match = 0;
2934 ap->idle_match = 0;
2935 ap->ack_match = 0;
2936 }
2937 ap->cur_time++;
2938
2939 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2940 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2941
2942 if (rx_cfg_reg != ap->ability_match_cfg) {
2943 ap->ability_match_cfg = rx_cfg_reg;
2944 ap->ability_match = 0;
2945 ap->ability_match_count = 0;
2946 } else {
2947 if (++ap->ability_match_count > 1) {
2948 ap->ability_match = 1;
2949 ap->ability_match_cfg = rx_cfg_reg;
2950 }
2951 }
2952 if (rx_cfg_reg & ANEG_CFG_ACK)
2953 ap->ack_match = 1;
2954 else
2955 ap->ack_match = 0;
2956
2957 ap->idle_match = 0;
2958 } else {
2959 ap->idle_match = 1;
2960 ap->ability_match_cfg = 0;
2961 ap->ability_match_count = 0;
2962 ap->ability_match = 0;
2963 ap->ack_match = 0;
2964
2965 rx_cfg_reg = 0;
2966 }
2967
2968 ap->rxconfig = rx_cfg_reg;
2969 ret = ANEG_OK;
2970
2971 switch(ap->state) {
2972 case ANEG_STATE_UNKNOWN:
2973 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2974 ap->state = ANEG_STATE_AN_ENABLE;
2975
2976 /* fallthru */
2977 case ANEG_STATE_AN_ENABLE:
2978 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2979 if (ap->flags & MR_AN_ENABLE) {
2980 ap->link_time = 0;
2981 ap->cur_time = 0;
2982 ap->ability_match_cfg = 0;
2983 ap->ability_match_count = 0;
2984 ap->ability_match = 0;
2985 ap->idle_match = 0;
2986 ap->ack_match = 0;
2987
2988 ap->state = ANEG_STATE_RESTART_INIT;
2989 } else {
2990 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2991 }
2992 break;
2993
2994 case ANEG_STATE_RESTART_INIT:
2995 ap->link_time = ap->cur_time;
2996 ap->flags &= ~(MR_NP_LOADED);
2997 ap->txconfig = 0;
2998 tw32(MAC_TX_AUTO_NEG, 0);
2999 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3000 tw32_f(MAC_MODE, tp->mac_mode);
3001 udelay(40);
3002
3003 ret = ANEG_TIMER_ENAB;
3004 ap->state = ANEG_STATE_RESTART;
3005
3006 /* fallthru */
3007 case ANEG_STATE_RESTART:
3008 delta = ap->cur_time - ap->link_time;
3009 if (delta > ANEG_STATE_SETTLE_TIME) {
3010 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3011 } else {
3012 ret = ANEG_TIMER_ENAB;
3013 }
3014 break;
3015
3016 case ANEG_STATE_DISABLE_LINK_OK:
3017 ret = ANEG_DONE;
3018 break;
3019
3020 case ANEG_STATE_ABILITY_DETECT_INIT:
3021 ap->flags &= ~(MR_TOGGLE_TX);
3022 ap->txconfig = ANEG_CFG_FD;
3023 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3024 if (flowctrl & ADVERTISE_1000XPAUSE)
3025 ap->txconfig |= ANEG_CFG_PS1;
3026 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3027 ap->txconfig |= ANEG_CFG_PS2;
3028 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3029 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3030 tw32_f(MAC_MODE, tp->mac_mode);
3031 udelay(40);
3032
3033 ap->state = ANEG_STATE_ABILITY_DETECT;
3034 break;
3035
3036 case ANEG_STATE_ABILITY_DETECT:
3037 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3038 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3039 }
3040 break;
3041
3042 case ANEG_STATE_ACK_DETECT_INIT:
3043 ap->txconfig |= ANEG_CFG_ACK;
3044 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3045 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3046 tw32_f(MAC_MODE, tp->mac_mode);
3047 udelay(40);
3048
3049 ap->state = ANEG_STATE_ACK_DETECT;
3050
3051 /* fallthru */
3052 case ANEG_STATE_ACK_DETECT:
3053 if (ap->ack_match != 0) {
3054 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3055 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3056 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3057 } else {
3058 ap->state = ANEG_STATE_AN_ENABLE;
3059 }
3060 } else if (ap->ability_match != 0 &&
3061 ap->rxconfig == 0) {
3062 ap->state = ANEG_STATE_AN_ENABLE;
3063 }
3064 break;
3065
3066 case ANEG_STATE_COMPLETE_ACK_INIT:
3067 if (ap->rxconfig & ANEG_CFG_INVAL) {
3068 ret = ANEG_FAILED;
3069 break;
3070 }
3071 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3072 MR_LP_ADV_HALF_DUPLEX |
3073 MR_LP_ADV_SYM_PAUSE |
3074 MR_LP_ADV_ASYM_PAUSE |
3075 MR_LP_ADV_REMOTE_FAULT1 |
3076 MR_LP_ADV_REMOTE_FAULT2 |
3077 MR_LP_ADV_NEXT_PAGE |
3078 MR_TOGGLE_RX |
3079 MR_NP_RX);
3080 if (ap->rxconfig & ANEG_CFG_FD)
3081 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3082 if (ap->rxconfig & ANEG_CFG_HD)
3083 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3084 if (ap->rxconfig & ANEG_CFG_PS1)
3085 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3086 if (ap->rxconfig & ANEG_CFG_PS2)
3087 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3088 if (ap->rxconfig & ANEG_CFG_RF1)
3089 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3090 if (ap->rxconfig & ANEG_CFG_RF2)
3091 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3092 if (ap->rxconfig & ANEG_CFG_NP)
3093 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3094
3095 ap->link_time = ap->cur_time;
3096
3097 ap->flags ^= (MR_TOGGLE_TX);
3098 if (ap->rxconfig & 0x0008)
3099 ap->flags |= MR_TOGGLE_RX;
3100 if (ap->rxconfig & ANEG_CFG_NP)
3101 ap->flags |= MR_NP_RX;
3102 ap->flags |= MR_PAGE_RX;
3103
3104 ap->state = ANEG_STATE_COMPLETE_ACK;
3105 ret = ANEG_TIMER_ENAB;
3106 break;
3107
3108 case ANEG_STATE_COMPLETE_ACK:
3109 if (ap->ability_match != 0 &&
3110 ap->rxconfig == 0) {
3111 ap->state = ANEG_STATE_AN_ENABLE;
3112 break;
3113 }
3114 delta = ap->cur_time - ap->link_time;
3115 if (delta > ANEG_STATE_SETTLE_TIME) {
3116 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3117 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3118 } else {
3119 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3120 !(ap->flags & MR_NP_RX)) {
3121 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3122 } else {
3123 ret = ANEG_FAILED;
3124 }
3125 }
3126 }
3127 break;
3128
3129 case ANEG_STATE_IDLE_DETECT_INIT:
3130 ap->link_time = ap->cur_time;
3131 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3132 tw32_f(MAC_MODE, tp->mac_mode);
3133 udelay(40);
3134
3135 ap->state = ANEG_STATE_IDLE_DETECT;
3136 ret = ANEG_TIMER_ENAB;
3137 break;
3138
3139 case ANEG_STATE_IDLE_DETECT:
3140 if (ap->ability_match != 0 &&
3141 ap->rxconfig == 0) {
3142 ap->state = ANEG_STATE_AN_ENABLE;
3143 break;
3144 }
3145 delta = ap->cur_time - ap->link_time;
3146 if (delta > ANEG_STATE_SETTLE_TIME) {
3147 /* XXX another gem from the Broadcom driver :( */
3148 ap->state = ANEG_STATE_LINK_OK;
3149 }
3150 break;
3151
3152 case ANEG_STATE_LINK_OK:
3153 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3154 ret = ANEG_DONE;
3155 break;
3156
3157 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3158 /* ??? unimplemented */
3159 break;
3160
3161 case ANEG_STATE_NEXT_PAGE_WAIT:
3162 /* ??? unimplemented */
3163 break;
3164
3165 default:
3166 ret = ANEG_FAILED;
3167 break;
3168 }
3169
3170 return ret;
3171 }
3172
3173 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3174 {
3175 int res = 0;
3176 struct tg3_fiber_aneginfo aninfo;
3177 int status = ANEG_FAILED;
3178 unsigned int tick;
3179 u32 tmp;
3180
3181 tw32_f(MAC_TX_AUTO_NEG, 0);
3182
3183 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3184 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3185 udelay(40);
3186
3187 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3188 udelay(40);
3189
3190 memset(&aninfo, 0, sizeof(aninfo));
3191 aninfo.flags |= MR_AN_ENABLE;
3192 aninfo.state = ANEG_STATE_UNKNOWN;
3193 aninfo.cur_time = 0;
3194 tick = 0;
3195 while (++tick < 195000) {
3196 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3197 if (status == ANEG_DONE || status == ANEG_FAILED)
3198 break;
3199
3200 udelay(1);
3201 }
3202
3203 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3204 tw32_f(MAC_MODE, tp->mac_mode);
3205 udelay(40);
3206
3207 *txflags = aninfo.txconfig;
3208 *rxflags = aninfo.flags;
3209
3210 if (status == ANEG_DONE &&
3211 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3212 MR_LP_ADV_FULL_DUPLEX)))
3213 res = 1;
3214
3215 return res;
3216 }
3217
3218 static void tg3_init_bcm8002(struct tg3 *tp)
3219 {
3220 u32 mac_status = tr32(MAC_STATUS);
3221 int i;
3222
3223 /* Reset when initting first time or we have a link. */
3224 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3225 !(mac_status & MAC_STATUS_PCS_SYNCED))
3226 return;
3227
3228 /* Set PLL lock range. */
3229 tg3_writephy(tp, 0x16, 0x8007);
3230
3231 /* SW reset */
3232 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3233
3234 /* Wait for reset to complete. */
3235 /* XXX schedule_timeout() ... */
3236 for (i = 0; i < 500; i++)
3237 udelay(10);
3238
3239 /* Config mode; select PMA/Ch 1 regs. */
3240 tg3_writephy(tp, 0x10, 0x8411);
3241
3242 /* Enable auto-lock and comdet, select txclk for tx. */
3243 tg3_writephy(tp, 0x11, 0x0a10);
3244
3245 tg3_writephy(tp, 0x18, 0x00a0);
3246 tg3_writephy(tp, 0x16, 0x41ff);
3247
3248 /* Assert and deassert POR. */
3249 tg3_writephy(tp, 0x13, 0x0400);
3250 udelay(40);
3251 tg3_writephy(tp, 0x13, 0x0000);
3252
3253 tg3_writephy(tp, 0x11, 0x0a50);
3254 udelay(40);
3255 tg3_writephy(tp, 0x11, 0x0a10);
3256
3257 /* Wait for signal to stabilize */
3258 /* XXX schedule_timeout() ... */
3259 for (i = 0; i < 15000; i++)
3260 udelay(10);
3261
3262 /* Deselect the channel register so we can read the PHYID
3263 * later.
3264 */
3265 tg3_writephy(tp, 0x10, 0x8011);
3266 }
3267
3268 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3269 {
3270 u16 flowctrl;
3271 u32 sg_dig_ctrl, sg_dig_status;
3272 u32 serdes_cfg, expected_sg_dig_ctrl;
3273 int workaround, port_a;
3274 int current_link_up;
3275
3276 serdes_cfg = 0;
3277 expected_sg_dig_ctrl = 0;
3278 workaround = 0;
3279 port_a = 1;
3280 current_link_up = 0;
3281
3282 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3283 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3284 workaround = 1;
3285 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3286 port_a = 0;
3287
3288 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3289 /* preserve bits 20-23 for voltage regulator */
3290 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3291 }
3292
3293 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3294
3295 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3296 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3297 if (workaround) {
3298 u32 val = serdes_cfg;
3299
3300 if (port_a)
3301 val |= 0xc010000;
3302 else
3303 val |= 0x4010000;
3304 tw32_f(MAC_SERDES_CFG, val);
3305 }
3306
3307 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3308 }
3309 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3310 tg3_setup_flow_control(tp, 0, 0);
3311 current_link_up = 1;
3312 }
3313 goto out;
3314 }
3315
3316 /* Want auto-negotiation. */
3317 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3318
3319 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3320 if (flowctrl & ADVERTISE_1000XPAUSE)
3321 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3322 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3323 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3324
3325 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3326 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3327 tp->serdes_counter &&
3328 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3329 MAC_STATUS_RCVD_CFG)) ==
3330 MAC_STATUS_PCS_SYNCED)) {
3331 tp->serdes_counter--;
3332 current_link_up = 1;
3333 goto out;
3334 }
3335 restart_autoneg:
3336 if (workaround)
3337 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3338 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3339 udelay(5);
3340 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3341
3342 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3343 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3344 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3345 MAC_STATUS_SIGNAL_DET)) {
3346 sg_dig_status = tr32(SG_DIG_STATUS);
3347 mac_status = tr32(MAC_STATUS);
3348
3349 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3350 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3351 u32 local_adv = 0, remote_adv = 0;
3352
3353 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3354 local_adv |= ADVERTISE_1000XPAUSE;
3355 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3356 local_adv |= ADVERTISE_1000XPSE_ASYM;
3357
3358 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3359 remote_adv |= LPA_1000XPAUSE;
3360 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3361 remote_adv |= LPA_1000XPAUSE_ASYM;
3362
3363 tg3_setup_flow_control(tp, local_adv, remote_adv);
3364 current_link_up = 1;
3365 tp->serdes_counter = 0;
3366 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3367 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3368 if (tp->serdes_counter)
3369 tp->serdes_counter--;
3370 else {
3371 if (workaround) {
3372 u32 val = serdes_cfg;
3373
3374 if (port_a)
3375 val |= 0xc010000;
3376 else
3377 val |= 0x4010000;
3378
3379 tw32_f(MAC_SERDES_CFG, val);
3380 }
3381
3382 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3383 udelay(40);
3384
3385 /* Link parallel detection - link is up */
3386 /* only if we have PCS_SYNC and not */
3387 /* receiving config code words */
3388 mac_status = tr32(MAC_STATUS);
3389 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3390 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3391 tg3_setup_flow_control(tp, 0, 0);
3392 current_link_up = 1;
3393 tp->tg3_flags2 |=
3394 TG3_FLG2_PARALLEL_DETECT;
3395 tp->serdes_counter =
3396 SERDES_PARALLEL_DET_TIMEOUT;
3397 } else
3398 goto restart_autoneg;
3399 }
3400 }
3401 } else {
3402 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3403 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3404 }
3405
3406 out:
3407 return current_link_up;
3408 }
3409
3410 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3411 {
3412 int current_link_up = 0;
3413
3414 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3415 goto out;
3416
3417 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3418 u32 txflags, rxflags;
3419 int i;
3420
3421 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3422 u32 local_adv = 0, remote_adv = 0;
3423
3424 if (txflags & ANEG_CFG_PS1)
3425 local_adv |= ADVERTISE_1000XPAUSE;
3426 if (txflags & ANEG_CFG_PS2)
3427 local_adv |= ADVERTISE_1000XPSE_ASYM;
3428
3429 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3430 remote_adv |= LPA_1000XPAUSE;
3431 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3432 remote_adv |= LPA_1000XPAUSE_ASYM;
3433
3434 tg3_setup_flow_control(tp, local_adv, remote_adv);
3435
3436 current_link_up = 1;
3437 }
3438 for (i = 0; i < 30; i++) {
3439 udelay(20);
3440 tw32_f(MAC_STATUS,
3441 (MAC_STATUS_SYNC_CHANGED |
3442 MAC_STATUS_CFG_CHANGED));
3443 udelay(40);
3444 if ((tr32(MAC_STATUS) &
3445 (MAC_STATUS_SYNC_CHANGED |
3446 MAC_STATUS_CFG_CHANGED)) == 0)
3447 break;
3448 }
3449
3450 mac_status = tr32(MAC_STATUS);
3451 if (current_link_up == 0 &&
3452 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3453 !(mac_status & MAC_STATUS_RCVD_CFG))
3454 current_link_up = 1;
3455 } else {
3456 tg3_setup_flow_control(tp, 0, 0);
3457
3458 /* Forcing 1000FD link up. */
3459 current_link_up = 1;
3460
3461 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3462 udelay(40);
3463
3464 tw32_f(MAC_MODE, tp->mac_mode);
3465 udelay(40);
3466 }
3467
3468 out:
3469 return current_link_up;
3470 }
3471
3472 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3473 {
3474 u32 orig_pause_cfg;
3475 u16 orig_active_speed;
3476 u8 orig_active_duplex;
3477 u32 mac_status;
3478 int current_link_up;
3479 int i;
3480
3481 orig_pause_cfg = tp->link_config.active_flowctrl;
3482 orig_active_speed = tp->link_config.active_speed;
3483 orig_active_duplex = tp->link_config.active_duplex;
3484
3485 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3486 netif_carrier_ok(tp->dev) &&
3487 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3488 mac_status = tr32(MAC_STATUS);
3489 mac_status &= (MAC_STATUS_PCS_SYNCED |
3490 MAC_STATUS_SIGNAL_DET |
3491 MAC_STATUS_CFG_CHANGED |
3492 MAC_STATUS_RCVD_CFG);
3493 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3494 MAC_STATUS_SIGNAL_DET)) {
3495 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3496 MAC_STATUS_CFG_CHANGED));
3497 return 0;
3498 }
3499 }
3500
3501 tw32_f(MAC_TX_AUTO_NEG, 0);
3502
3503 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3504 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3505 tw32_f(MAC_MODE, tp->mac_mode);
3506 udelay(40);
3507
3508 if (tp->phy_id == PHY_ID_BCM8002)
3509 tg3_init_bcm8002(tp);
3510
3511 /* Enable link change event even when serdes polling. */
3512 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3513 udelay(40);
3514
3515 current_link_up = 0;
3516 mac_status = tr32(MAC_STATUS);
3517
3518 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3519 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3520 else
3521 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3522
3523 tp->hw_status->status =
3524 (SD_STATUS_UPDATED |
3525 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3526
3527 for (i = 0; i < 100; i++) {
3528 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3529 MAC_STATUS_CFG_CHANGED));
3530 udelay(5);
3531 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3532 MAC_STATUS_CFG_CHANGED |
3533 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3534 break;
3535 }
3536
3537 mac_status = tr32(MAC_STATUS);
3538 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3539 current_link_up = 0;
3540 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3541 tp->serdes_counter == 0) {
3542 tw32_f(MAC_MODE, (tp->mac_mode |
3543 MAC_MODE_SEND_CONFIGS));
3544 udelay(1);
3545 tw32_f(MAC_MODE, tp->mac_mode);
3546 }
3547 }
3548
3549 if (current_link_up == 1) {
3550 tp->link_config.active_speed = SPEED_1000;
3551 tp->link_config.active_duplex = DUPLEX_FULL;
3552 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3553 LED_CTRL_LNKLED_OVERRIDE |
3554 LED_CTRL_1000MBPS_ON));
3555 } else {
3556 tp->link_config.active_speed = SPEED_INVALID;
3557 tp->link_config.active_duplex = DUPLEX_INVALID;
3558 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3559 LED_CTRL_LNKLED_OVERRIDE |
3560 LED_CTRL_TRAFFIC_OVERRIDE));
3561 }
3562
3563 if (current_link_up != netif_carrier_ok(tp->dev)) {
3564 if (current_link_up)
3565 netif_carrier_on(tp->dev);
3566 else
3567 netif_carrier_off(tp->dev);
3568 tg3_link_report(tp);
3569 } else {
3570 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3571 if (orig_pause_cfg != now_pause_cfg ||
3572 orig_active_speed != tp->link_config.active_speed ||
3573 orig_active_duplex != tp->link_config.active_duplex)
3574 tg3_link_report(tp);
3575 }
3576
3577 return 0;
3578 }
3579
3580 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3581 {
3582 int current_link_up, err = 0;
3583 u32 bmsr, bmcr;
3584 u16 current_speed;
3585 u8 current_duplex;
3586 u32 local_adv, remote_adv;
3587
3588 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3589 tw32_f(MAC_MODE, tp->mac_mode);
3590 udelay(40);
3591
3592 tw32(MAC_EVENT, 0);
3593
3594 tw32_f(MAC_STATUS,
3595 (MAC_STATUS_SYNC_CHANGED |
3596 MAC_STATUS_CFG_CHANGED |
3597 MAC_STATUS_MI_COMPLETION |
3598 MAC_STATUS_LNKSTATE_CHANGED));
3599 udelay(40);
3600
3601 if (force_reset)
3602 tg3_phy_reset(tp);
3603
3604 current_link_up = 0;
3605 current_speed = SPEED_INVALID;
3606 current_duplex = DUPLEX_INVALID;
3607
3608 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3609 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3611 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3612 bmsr |= BMSR_LSTATUS;
3613 else
3614 bmsr &= ~BMSR_LSTATUS;
3615 }
3616
3617 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3618
3619 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3620 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3621 /* do nothing, just check for link up at the end */
3622 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3623 u32 adv, new_adv;
3624
3625 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3626 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3627 ADVERTISE_1000XPAUSE |
3628 ADVERTISE_1000XPSE_ASYM |
3629 ADVERTISE_SLCT);
3630
3631 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3632
3633 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3634 new_adv |= ADVERTISE_1000XHALF;
3635 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3636 new_adv |= ADVERTISE_1000XFULL;
3637
3638 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3639 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3640 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3641 tg3_writephy(tp, MII_BMCR, bmcr);
3642
3643 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3644 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3645 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3646
3647 return err;
3648 }
3649 } else {
3650 u32 new_bmcr;
3651
3652 bmcr &= ~BMCR_SPEED1000;
3653 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3654
3655 if (tp->link_config.duplex == DUPLEX_FULL)
3656 new_bmcr |= BMCR_FULLDPLX;
3657
3658 if (new_bmcr != bmcr) {
3659 /* BMCR_SPEED1000 is a reserved bit that needs
3660 * to be set on write.
3661 */
3662 new_bmcr |= BMCR_SPEED1000;
3663
3664 /* Force a linkdown */
3665 if (netif_carrier_ok(tp->dev)) {
3666 u32 adv;
3667
3668 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3669 adv &= ~(ADVERTISE_1000XFULL |
3670 ADVERTISE_1000XHALF |
3671 ADVERTISE_SLCT);
3672 tg3_writephy(tp, MII_ADVERTISE, adv);
3673 tg3_writephy(tp, MII_BMCR, bmcr |
3674 BMCR_ANRESTART |
3675 BMCR_ANENABLE);
3676 udelay(10);
3677 netif_carrier_off(tp->dev);
3678 }
3679 tg3_writephy(tp, MII_BMCR, new_bmcr);
3680 bmcr = new_bmcr;
3681 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3682 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3683 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3684 ASIC_REV_5714) {
3685 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3686 bmsr |= BMSR_LSTATUS;
3687 else
3688 bmsr &= ~BMSR_LSTATUS;
3689 }
3690 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3691 }
3692 }
3693
3694 if (bmsr & BMSR_LSTATUS) {
3695 current_speed = SPEED_1000;
3696 current_link_up = 1;
3697 if (bmcr & BMCR_FULLDPLX)
3698 current_duplex = DUPLEX_FULL;
3699 else
3700 current_duplex = DUPLEX_HALF;
3701
3702 local_adv = 0;
3703 remote_adv = 0;
3704
3705 if (bmcr & BMCR_ANENABLE) {
3706 u32 common;
3707
3708 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3709 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3710 common = local_adv & remote_adv;
3711 if (common & (ADVERTISE_1000XHALF |
3712 ADVERTISE_1000XFULL)) {
3713 if (common & ADVERTISE_1000XFULL)
3714 current_duplex = DUPLEX_FULL;
3715 else
3716 current_duplex = DUPLEX_HALF;
3717 }
3718 else
3719 current_link_up = 0;
3720 }
3721 }
3722
3723 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3724 tg3_setup_flow_control(tp, local_adv, remote_adv);
3725
3726 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3727 if (tp->link_config.active_duplex == DUPLEX_HALF)
3728 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3729
3730 tw32_f(MAC_MODE, tp->mac_mode);
3731 udelay(40);
3732
3733 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3734
3735 tp->link_config.active_speed = current_speed;
3736 tp->link_config.active_duplex = current_duplex;
3737
3738 if (current_link_up != netif_carrier_ok(tp->dev)) {
3739 if (current_link_up)
3740 netif_carrier_on(tp->dev);
3741 else {
3742 netif_carrier_off(tp->dev);
3743 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3744 }
3745 tg3_link_report(tp);
3746 }
3747 return err;
3748 }
3749
3750 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3751 {
3752 if (tp->serdes_counter) {
3753 /* Give autoneg time to complete. */
3754 tp->serdes_counter--;
3755 return;
3756 }
3757 if (!netif_carrier_ok(tp->dev) &&
3758 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3759 u32 bmcr;
3760
3761 tg3_readphy(tp, MII_BMCR, &bmcr);
3762 if (bmcr & BMCR_ANENABLE) {
3763 u32 phy1, phy2;
3764
3765 /* Select shadow register 0x1f */
3766 tg3_writephy(tp, 0x1c, 0x7c00);
3767 tg3_readphy(tp, 0x1c, &phy1);
3768
3769 /* Select expansion interrupt status register */
3770 tg3_writephy(tp, 0x17, 0x0f01);
3771 tg3_readphy(tp, 0x15, &phy2);
3772 tg3_readphy(tp, 0x15, &phy2);
3773
3774 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3775 /* We have signal detect and not receiving
3776 * config code words, link is up by parallel
3777 * detection.
3778 */
3779
3780 bmcr &= ~BMCR_ANENABLE;
3781 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3782 tg3_writephy(tp, MII_BMCR, bmcr);
3783 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3784 }
3785 }
3786 }
3787 else if (netif_carrier_ok(tp->dev) &&
3788 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3789 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3790 u32 phy2;
3791
3792 /* Select expansion interrupt status register */
3793 tg3_writephy(tp, 0x17, 0x0f01);
3794 tg3_readphy(tp, 0x15, &phy2);
3795 if (phy2 & 0x20) {
3796 u32 bmcr;
3797
3798 /* Config code words received, turn on autoneg. */
3799 tg3_readphy(tp, MII_BMCR, &bmcr);
3800 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3801
3802 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3803
3804 }
3805 }
3806 }
3807
3808 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3809 {
3810 int err;
3811
3812 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3813 err = tg3_setup_fiber_phy(tp, force_reset);
3814 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3815 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3816 } else {
3817 err = tg3_setup_copper_phy(tp, force_reset);
3818 }
3819
3820 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3821 u32 val, scale;
3822
3823 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3824 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3825 scale = 65;
3826 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3827 scale = 6;
3828 else
3829 scale = 12;
3830
3831 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3832 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3833 tw32(GRC_MISC_CFG, val);
3834 }
3835
3836 if (tp->link_config.active_speed == SPEED_1000 &&
3837 tp->link_config.active_duplex == DUPLEX_HALF)
3838 tw32(MAC_TX_LENGTHS,
3839 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3840 (6 << TX_LENGTHS_IPG_SHIFT) |
3841 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3842 else
3843 tw32(MAC_TX_LENGTHS,
3844 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3845 (6 << TX_LENGTHS_IPG_SHIFT) |
3846 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3847
3848 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3849 if (netif_carrier_ok(tp->dev)) {
3850 tw32(HOSTCC_STAT_COAL_TICKS,
3851 tp->coal.stats_block_coalesce_usecs);
3852 } else {
3853 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3854 }
3855 }
3856
3857 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3858 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3859 if (!netif_carrier_ok(tp->dev))
3860 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3861 tp->pwrmgmt_thresh;
3862 else
3863 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3864 tw32(PCIE_PWR_MGMT_THRESH, val);
3865 }
3866
3867 return err;
3868 }
3869
3870 /* This is called whenever we suspect that the system chipset is re-
3871 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3872 * is bogus tx completions. We try to recover by setting the
3873 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3874 * in the workqueue.
3875 */
3876 static void tg3_tx_recover(struct tg3 *tp)
3877 {
3878 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3879 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3880
3881 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3882 "mapped I/O cycles to the network device, attempting to "
3883 "recover. Please report the problem to the driver maintainer "
3884 "and include system chipset information.\n", tp->dev->name);
3885
3886 spin_lock(&tp->lock);
3887 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3888 spin_unlock(&tp->lock);
3889 }
3890
3891 static inline u32 tg3_tx_avail(struct tg3 *tp)
3892 {
3893 smp_mb();
3894 return (tp->tx_pending -
3895 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3896 }
3897
3898 /* Tigon3 never reports partial packet sends. So we do not
3899 * need special logic to handle SKBs that have not had all
3900 * of their frags sent yet, like SunGEM does.
3901 */
3902 static void tg3_tx(struct tg3 *tp)
3903 {
3904 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3905 u32 sw_idx = tp->tx_cons;
3906
3907 while (sw_idx != hw_idx) {
3908 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3909 struct sk_buff *skb = ri->skb;
3910 int i, tx_bug = 0;
3911
3912 if (unlikely(skb == NULL)) {
3913 tg3_tx_recover(tp);
3914 return;
3915 }
3916
3917 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3918
3919 ri->skb = NULL;
3920
3921 sw_idx = NEXT_TX(sw_idx);
3922
3923 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3924 ri = &tp->tx_buffers[sw_idx];
3925 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3926 tx_bug = 1;
3927 sw_idx = NEXT_TX(sw_idx);
3928 }
3929
3930 dev_kfree_skb(skb);
3931
3932 if (unlikely(tx_bug)) {
3933 tg3_tx_recover(tp);
3934 return;
3935 }
3936 }
3937
3938 tp->tx_cons = sw_idx;
3939
3940 /* Need to make the tx_cons update visible to tg3_start_xmit()
3941 * before checking for netif_queue_stopped(). Without the
3942 * memory barrier, there is a small possibility that tg3_start_xmit()
3943 * will miss it and cause the queue to be stopped forever.
3944 */
3945 smp_mb();
3946
3947 if (unlikely(netif_queue_stopped(tp->dev) &&
3948 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3949 netif_tx_lock(tp->dev);
3950 if (netif_queue_stopped(tp->dev) &&
3951 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3952 netif_wake_queue(tp->dev);
3953 netif_tx_unlock(tp->dev);
3954 }
3955 }
3956
3957 /* Returns size of skb allocated or < 0 on error.
3958 *
3959 * We only need to fill in the address because the other members
3960 * of the RX descriptor are invariant, see tg3_init_rings.
3961 *
3962 * Note the purposeful assymetry of cpu vs. chip accesses. For
3963 * posting buffers we only dirty the first cache line of the RX
3964 * descriptor (containing the address). Whereas for the RX status
3965 * buffers the cpu only reads the last cacheline of the RX descriptor
3966 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3967 */
3968 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3969 int src_idx, u32 dest_idx_unmasked)
3970 {
3971 struct tg3_rx_buffer_desc *desc;
3972 struct ring_info *map, *src_map;
3973 struct sk_buff *skb;
3974 dma_addr_t mapping;
3975 int skb_size, dest_idx;
3976
3977 src_map = NULL;
3978 switch (opaque_key) {
3979 case RXD_OPAQUE_RING_STD:
3980 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3981 desc = &tp->rx_std[dest_idx];
3982 map = &tp->rx_std_buffers[dest_idx];
3983 if (src_idx >= 0)
3984 src_map = &tp->rx_std_buffers[src_idx];
3985 skb_size = tp->rx_pkt_buf_sz;
3986 break;
3987
3988 case RXD_OPAQUE_RING_JUMBO:
3989 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3990 desc = &tp->rx_jumbo[dest_idx];
3991 map = &tp->rx_jumbo_buffers[dest_idx];
3992 if (src_idx >= 0)
3993 src_map = &tp->rx_jumbo_buffers[src_idx];
3994 skb_size = RX_JUMBO_PKT_BUF_SZ;
3995 break;
3996
3997 default:
3998 return -EINVAL;
3999 }
4000
4001 /* Do not overwrite any of the map or rp information
4002 * until we are sure we can commit to a new buffer.
4003 *
4004 * Callers depend upon this behavior and assume that
4005 * we leave everything unchanged if we fail.
4006 */
4007 skb = netdev_alloc_skb(tp->dev, skb_size);
4008 if (skb == NULL)
4009 return -ENOMEM;
4010
4011 skb_reserve(skb, tp->rx_offset);
4012
4013 mapping = pci_map_single(tp->pdev, skb->data,
4014 skb_size - tp->rx_offset,
4015 PCI_DMA_FROMDEVICE);
4016
4017 map->skb = skb;
4018 pci_unmap_addr_set(map, mapping, mapping);
4019
4020 if (src_map != NULL)
4021 src_map->skb = NULL;
4022
4023 desc->addr_hi = ((u64)mapping >> 32);
4024 desc->addr_lo = ((u64)mapping & 0xffffffff);
4025
4026 return skb_size;
4027 }
4028
4029 /* We only need to move over in the address because the other
4030 * members of the RX descriptor are invariant. See notes above
4031 * tg3_alloc_rx_skb for full details.
4032 */
4033 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4034 int src_idx, u32 dest_idx_unmasked)
4035 {
4036 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4037 struct ring_info *src_map, *dest_map;
4038 int dest_idx;
4039
4040 switch (opaque_key) {
4041 case RXD_OPAQUE_RING_STD:
4042 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4043 dest_desc = &tp->rx_std[dest_idx];
4044 dest_map = &tp->rx_std_buffers[dest_idx];
4045 src_desc = &tp->rx_std[src_idx];
4046 src_map = &tp->rx_std_buffers[src_idx];
4047 break;
4048
4049 case RXD_OPAQUE_RING_JUMBO:
4050 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4051 dest_desc = &tp->rx_jumbo[dest_idx];
4052 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4053 src_desc = &tp->rx_jumbo[src_idx];
4054 src_map = &tp->rx_jumbo_buffers[src_idx];
4055 break;
4056
4057 default:
4058 return;
4059 }
4060
4061 dest_map->skb = src_map->skb;
4062 pci_unmap_addr_set(dest_map, mapping,
4063 pci_unmap_addr(src_map, mapping));
4064 dest_desc->addr_hi = src_desc->addr_hi;
4065 dest_desc->addr_lo = src_desc->addr_lo;
4066
4067 src_map->skb = NULL;
4068 }
4069
4070 #if TG3_VLAN_TAG_USED
4071 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4072 {
4073 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4074 }
4075 #endif
4076
4077 /* The RX ring scheme is composed of multiple rings which post fresh
4078 * buffers to the chip, and one special ring the chip uses to report
4079 * status back to the host.
4080 *
4081 * The special ring reports the status of received packets to the
4082 * host. The chip does not write into the original descriptor the
4083 * RX buffer was obtained from. The chip simply takes the original
4084 * descriptor as provided by the host, updates the status and length
4085 * field, then writes this into the next status ring entry.
4086 *
4087 * Each ring the host uses to post buffers to the chip is described
4088 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4089 * it is first placed into the on-chip ram. When the packet's length
4090 * is known, it walks down the TG3_BDINFO entries to select the ring.
4091 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4092 * which is within the range of the new packet's length is chosen.
4093 *
4094 * The "separate ring for rx status" scheme may sound queer, but it makes
4095 * sense from a cache coherency perspective. If only the host writes
4096 * to the buffer post rings, and only the chip writes to the rx status
4097 * rings, then cache lines never move beyond shared-modified state.
4098 * If both the host and chip were to write into the same ring, cache line
4099 * eviction could occur since both entities want it in an exclusive state.
4100 */
4101 static int tg3_rx(struct tg3 *tp, int budget)
4102 {
4103 u32 work_mask, rx_std_posted = 0;
4104 u32 sw_idx = tp->rx_rcb_ptr;
4105 u16 hw_idx;
4106 int received;
4107
4108 hw_idx = tp->hw_status->idx[0].rx_producer;
4109 /*
4110 * We need to order the read of hw_idx and the read of
4111 * the opaque cookie.
4112 */
4113 rmb();
4114 work_mask = 0;
4115 received = 0;
4116 while (sw_idx != hw_idx && budget > 0) {
4117 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4118 unsigned int len;
4119 struct sk_buff *skb;
4120 dma_addr_t dma_addr;
4121 u32 opaque_key, desc_idx, *post_ptr;
4122
4123 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4124 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4125 if (opaque_key == RXD_OPAQUE_RING_STD) {
4126 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4127 mapping);
4128 skb = tp->rx_std_buffers[desc_idx].skb;
4129 post_ptr = &tp->rx_std_ptr;
4130 rx_std_posted++;
4131 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4132 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4133 mapping);
4134 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4135 post_ptr = &tp->rx_jumbo_ptr;
4136 }
4137 else {
4138 goto next_pkt_nopost;
4139 }
4140
4141 work_mask |= opaque_key;
4142
4143 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4144 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4145 drop_it:
4146 tg3_recycle_rx(tp, opaque_key,
4147 desc_idx, *post_ptr);
4148 drop_it_no_recycle:
4149 /* Other statistics kept track of by card. */
4150 tp->net_stats.rx_dropped++;
4151 goto next_pkt;
4152 }
4153
4154 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4155
4156 if (len > RX_COPY_THRESHOLD
4157 && tp->rx_offset == 2
4158 /* rx_offset != 2 iff this is a 5701 card running
4159 * in PCI-X mode [see tg3_get_invariants()] */
4160 ) {
4161 int skb_size;
4162
4163 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4164 desc_idx, *post_ptr);
4165 if (skb_size < 0)
4166 goto drop_it;
4167
4168 pci_unmap_single(tp->pdev, dma_addr,
4169 skb_size - tp->rx_offset,
4170 PCI_DMA_FROMDEVICE);
4171
4172 skb_put(skb, len);
4173 } else {
4174 struct sk_buff *copy_skb;
4175
4176 tg3_recycle_rx(tp, opaque_key,
4177 desc_idx, *post_ptr);
4178
4179 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4180 if (copy_skb == NULL)
4181 goto drop_it_no_recycle;
4182
4183 skb_reserve(copy_skb, 2);
4184 skb_put(copy_skb, len);
4185 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4186 skb_copy_from_linear_data(skb, copy_skb->data, len);
4187 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4188
4189 /* We'll reuse the original ring buffer. */
4190 skb = copy_skb;
4191 }
4192
4193 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4194 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4195 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4196 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4197 skb->ip_summed = CHECKSUM_UNNECESSARY;
4198 else
4199 skb->ip_summed = CHECKSUM_NONE;
4200
4201 skb->protocol = eth_type_trans(skb, tp->dev);
4202 #if TG3_VLAN_TAG_USED
4203 if (tp->vlgrp != NULL &&
4204 desc->type_flags & RXD_FLAG_VLAN) {
4205 tg3_vlan_rx(tp, skb,
4206 desc->err_vlan & RXD_VLAN_MASK);
4207 } else
4208 #endif
4209 netif_receive_skb(skb);
4210
4211 tp->dev->last_rx = jiffies;
4212 received++;
4213 budget--;
4214
4215 next_pkt:
4216 (*post_ptr)++;
4217
4218 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4219 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4220
4221 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4222 TG3_64BIT_REG_LOW, idx);
4223 work_mask &= ~RXD_OPAQUE_RING_STD;
4224 rx_std_posted = 0;
4225 }
4226 next_pkt_nopost:
4227 sw_idx++;
4228 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4229
4230 /* Refresh hw_idx to see if there is new work */
4231 if (sw_idx == hw_idx) {
4232 hw_idx = tp->hw_status->idx[0].rx_producer;
4233 rmb();
4234 }
4235 }
4236
4237 /* ACK the status ring. */
4238 tp->rx_rcb_ptr = sw_idx;
4239 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4240
4241 /* Refill RX ring(s). */
4242 if (work_mask & RXD_OPAQUE_RING_STD) {
4243 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4244 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4245 sw_idx);
4246 }
4247 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4248 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4249 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4250 sw_idx);
4251 }
4252 mmiowb();
4253
4254 return received;
4255 }
4256
4257 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4258 {
4259 struct tg3_hw_status *sblk = tp->hw_status;
4260
4261 /* handle link change and other phy events */
4262 if (!(tp->tg3_flags &
4263 (TG3_FLAG_USE_LINKCHG_REG |
4264 TG3_FLAG_POLL_SERDES))) {
4265 if (sblk->status & SD_STATUS_LINK_CHG) {
4266 sblk->status = SD_STATUS_UPDATED |
4267 (sblk->status & ~SD_STATUS_LINK_CHG);
4268 spin_lock(&tp->lock);
4269 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4270 tw32_f(MAC_STATUS,
4271 (MAC_STATUS_SYNC_CHANGED |
4272 MAC_STATUS_CFG_CHANGED |
4273 MAC_STATUS_MI_COMPLETION |
4274 MAC_STATUS_LNKSTATE_CHANGED));
4275 udelay(40);
4276 } else
4277 tg3_setup_phy(tp, 0);
4278 spin_unlock(&tp->lock);
4279 }
4280 }
4281
4282 /* run TX completion thread */
4283 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4284 tg3_tx(tp);
4285 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4286 return work_done;
4287 }
4288
4289 /* run RX thread, within the bounds set by NAPI.
4290 * All RX "locking" is done by ensuring outside
4291 * code synchronizes with tg3->napi.poll()
4292 */
4293 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4294 work_done += tg3_rx(tp, budget - work_done);
4295
4296 return work_done;
4297 }
4298
4299 static int tg3_poll(struct napi_struct *napi, int budget)
4300 {
4301 struct tg3 *tp = container_of(napi, struct tg3, napi);
4302 int work_done = 0;
4303 struct tg3_hw_status *sblk = tp->hw_status;
4304
4305 while (1) {
4306 work_done = tg3_poll_work(tp, work_done, budget);
4307
4308 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4309 goto tx_recovery;
4310
4311 if (unlikely(work_done >= budget))
4312 break;
4313
4314 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4315 /* tp->last_tag is used in tg3_restart_ints() below
4316 * to tell the hw how much work has been processed,
4317 * so we must read it before checking for more work.
4318 */
4319 tp->last_tag = sblk->status_tag;
4320 rmb();
4321 } else
4322 sblk->status &= ~SD_STATUS_UPDATED;
4323
4324 if (likely(!tg3_has_work(tp))) {
4325 netif_rx_complete(tp->dev, napi);
4326 tg3_restart_ints(tp);
4327 break;
4328 }
4329 }
4330
4331 return work_done;
4332
4333 tx_recovery:
4334 /* work_done is guaranteed to be less than budget. */
4335 netif_rx_complete(tp->dev, napi);
4336 schedule_work(&tp->reset_task);
4337 return work_done;
4338 }
4339
4340 static void tg3_irq_quiesce(struct tg3 *tp)
4341 {
4342 BUG_ON(tp->irq_sync);
4343
4344 tp->irq_sync = 1;
4345 smp_mb();
4346
4347 synchronize_irq(tp->pdev->irq);
4348 }
4349
4350 static inline int tg3_irq_sync(struct tg3 *tp)
4351 {
4352 return tp->irq_sync;
4353 }
4354
4355 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4356 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4357 * with as well. Most of the time, this is not necessary except when
4358 * shutting down the device.
4359 */
4360 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4361 {
4362 spin_lock_bh(&tp->lock);
4363 if (irq_sync)
4364 tg3_irq_quiesce(tp);
4365 }
4366
4367 static inline void tg3_full_unlock(struct tg3 *tp)
4368 {
4369 spin_unlock_bh(&tp->lock);
4370 }
4371
4372 /* One-shot MSI handler - Chip automatically disables interrupt
4373 * after sending MSI so driver doesn't have to do it.
4374 */
4375 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4376 {
4377 struct net_device *dev = dev_id;
4378 struct tg3 *tp = netdev_priv(dev);
4379
4380 prefetch(tp->hw_status);
4381 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4382
4383 if (likely(!tg3_irq_sync(tp)))
4384 netif_rx_schedule(dev, &tp->napi);
4385
4386 return IRQ_HANDLED;
4387 }
4388
4389 /* MSI ISR - No need to check for interrupt sharing and no need to
4390 * flush status block and interrupt mailbox. PCI ordering rules
4391 * guarantee that MSI will arrive after the status block.
4392 */
4393 static irqreturn_t tg3_msi(int irq, void *dev_id)
4394 {
4395 struct net_device *dev = dev_id;
4396 struct tg3 *tp = netdev_priv(dev);
4397
4398 prefetch(tp->hw_status);
4399 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4400 /*
4401 * Writing any value to intr-mbox-0 clears PCI INTA# and
4402 * chip-internal interrupt pending events.
4403 * Writing non-zero to intr-mbox-0 additional tells the
4404 * NIC to stop sending us irqs, engaging "in-intr-handler"
4405 * event coalescing.
4406 */
4407 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4408 if (likely(!tg3_irq_sync(tp)))
4409 netif_rx_schedule(dev, &tp->napi);
4410
4411 return IRQ_RETVAL(1);
4412 }
4413
4414 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4415 {
4416 struct net_device *dev = dev_id;
4417 struct tg3 *tp = netdev_priv(dev);
4418 struct tg3_hw_status *sblk = tp->hw_status;
4419 unsigned int handled = 1;
4420
4421 /* In INTx mode, it is possible for the interrupt to arrive at
4422 * the CPU before the status block posted prior to the interrupt.
4423 * Reading the PCI State register will confirm whether the
4424 * interrupt is ours and will flush the status block.
4425 */
4426 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4427 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4428 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4429 handled = 0;
4430 goto out;
4431 }
4432 }
4433
4434 /*
4435 * Writing any value to intr-mbox-0 clears PCI INTA# and
4436 * chip-internal interrupt pending events.
4437 * Writing non-zero to intr-mbox-0 additional tells the
4438 * NIC to stop sending us irqs, engaging "in-intr-handler"
4439 * event coalescing.
4440 *
4441 * Flush the mailbox to de-assert the IRQ immediately to prevent
4442 * spurious interrupts. The flush impacts performance but
4443 * excessive spurious interrupts can be worse in some cases.
4444 */
4445 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4446 if (tg3_irq_sync(tp))
4447 goto out;
4448 sblk->status &= ~SD_STATUS_UPDATED;
4449 if (likely(tg3_has_work(tp))) {
4450 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4451 netif_rx_schedule(dev, &tp->napi);
4452 } else {
4453 /* No work, shared interrupt perhaps? re-enable
4454 * interrupts, and flush that PCI write
4455 */
4456 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4457 0x00000000);
4458 }
4459 out:
4460 return IRQ_RETVAL(handled);
4461 }
4462
4463 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4464 {
4465 struct net_device *dev = dev_id;
4466 struct tg3 *tp = netdev_priv(dev);
4467 struct tg3_hw_status *sblk = tp->hw_status;
4468 unsigned int handled = 1;
4469
4470 /* In INTx mode, it is possible for the interrupt to arrive at
4471 * the CPU before the status block posted prior to the interrupt.
4472 * Reading the PCI State register will confirm whether the
4473 * interrupt is ours and will flush the status block.
4474 */
4475 if (unlikely(sblk->status_tag == tp->last_tag)) {
4476 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4477 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4478 handled = 0;
4479 goto out;
4480 }
4481 }
4482
4483 /*
4484 * writing any value to intr-mbox-0 clears PCI INTA# and
4485 * chip-internal interrupt pending events.
4486 * writing non-zero to intr-mbox-0 additional tells the
4487 * NIC to stop sending us irqs, engaging "in-intr-handler"
4488 * event coalescing.
4489 *
4490 * Flush the mailbox to de-assert the IRQ immediately to prevent
4491 * spurious interrupts. The flush impacts performance but
4492 * excessive spurious interrupts can be worse in some cases.
4493 */
4494 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4495 if (tg3_irq_sync(tp))
4496 goto out;
4497 if (netif_rx_schedule_prep(dev, &tp->napi)) {
4498 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4499 /* Update last_tag to mark that this status has been
4500 * seen. Because interrupt may be shared, we may be
4501 * racing with tg3_poll(), so only update last_tag
4502 * if tg3_poll() is not scheduled.
4503 */
4504 tp->last_tag = sblk->status_tag;
4505 __netif_rx_schedule(dev, &tp->napi);
4506 }
4507 out:
4508 return IRQ_RETVAL(handled);
4509 }
4510
4511 /* ISR for interrupt test */
4512 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4513 {
4514 struct net_device *dev = dev_id;
4515 struct tg3 *tp = netdev_priv(dev);
4516 struct tg3_hw_status *sblk = tp->hw_status;
4517
4518 if ((sblk->status & SD_STATUS_UPDATED) ||
4519 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4520 tg3_disable_ints(tp);
4521 return IRQ_RETVAL(1);
4522 }
4523 return IRQ_RETVAL(0);
4524 }
4525
4526 static int tg3_init_hw(struct tg3 *, int);
4527 static int tg3_halt(struct tg3 *, int, int);
4528
4529 /* Restart hardware after configuration changes, self-test, etc.
4530 * Invoked with tp->lock held.
4531 */
4532 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4533 __releases(tp->lock)
4534 __acquires(tp->lock)
4535 {
4536 int err;
4537
4538 err = tg3_init_hw(tp, reset_phy);
4539 if (err) {
4540 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4541 "aborting.\n", tp->dev->name);
4542 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4543 tg3_full_unlock(tp);
4544 del_timer_sync(&tp->timer);
4545 tp->irq_sync = 0;
4546 napi_enable(&tp->napi);
4547 dev_close(tp->dev);
4548 tg3_full_lock(tp, 0);
4549 }
4550 return err;
4551 }
4552
4553 #ifdef CONFIG_NET_POLL_CONTROLLER
4554 static void tg3_poll_controller(struct net_device *dev)
4555 {
4556 struct tg3 *tp = netdev_priv(dev);
4557
4558 tg3_interrupt(tp->pdev->irq, dev);
4559 }
4560 #endif
4561
4562 static void tg3_reset_task(struct work_struct *work)
4563 {
4564 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4565 int err;
4566 unsigned int restart_timer;
4567
4568 tg3_full_lock(tp, 0);
4569
4570 if (!netif_running(tp->dev)) {
4571 tg3_full_unlock(tp);
4572 return;
4573 }
4574
4575 tg3_full_unlock(tp);
4576
4577 tg3_phy_stop(tp);
4578
4579 tg3_netif_stop(tp);
4580
4581 tg3_full_lock(tp, 1);
4582
4583 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4584 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4585
4586 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4587 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4588 tp->write32_rx_mbox = tg3_write_flush_reg32;
4589 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4590 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4591 }
4592
4593 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4594 err = tg3_init_hw(tp, 1);
4595 if (err)
4596 goto out;
4597
4598 tg3_netif_start(tp);
4599
4600 if (restart_timer)
4601 mod_timer(&tp->timer, jiffies + 1);
4602
4603 out:
4604 tg3_full_unlock(tp);
4605
4606 if (!err)
4607 tg3_phy_start(tp);
4608 }
4609
4610 static void tg3_dump_short_state(struct tg3 *tp)
4611 {
4612 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4613 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4614 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4615 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4616 }
4617
4618 static void tg3_tx_timeout(struct net_device *dev)
4619 {
4620 struct tg3 *tp = netdev_priv(dev);
4621
4622 if (netif_msg_tx_err(tp)) {
4623 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4624 dev->name);
4625 tg3_dump_short_state(tp);
4626 }
4627
4628 schedule_work(&tp->reset_task);
4629 }
4630
4631 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4632 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4633 {
4634 u32 base = (u32) mapping & 0xffffffff;
4635
4636 return ((base > 0xffffdcc0) &&
4637 (base + len + 8 < base));
4638 }
4639
4640 /* Test for DMA addresses > 40-bit */
4641 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4642 int len)
4643 {
4644 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4645 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4646 return (((u64) mapping + len) > DMA_40BIT_MASK);
4647 return 0;
4648 #else
4649 return 0;
4650 #endif
4651 }
4652
4653 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4654
4655 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4656 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4657 u32 last_plus_one, u32 *start,
4658 u32 base_flags, u32 mss)
4659 {
4660 struct sk_buff *new_skb;
4661 dma_addr_t new_addr = 0;
4662 u32 entry = *start;
4663 int i, ret = 0;
4664
4665 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4666 new_skb = skb_copy(skb, GFP_ATOMIC);
4667 else {
4668 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4669
4670 new_skb = skb_copy_expand(skb,
4671 skb_headroom(skb) + more_headroom,
4672 skb_tailroom(skb), GFP_ATOMIC);
4673 }
4674
4675 if (!new_skb) {
4676 ret = -1;
4677 } else {
4678 /* New SKB is guaranteed to be linear. */
4679 entry = *start;
4680 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4681 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4682
4683 /* Make sure new skb does not cross any 4G boundaries.
4684 * Drop the packet if it does.
4685 */
4686 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4687 if (!ret)
4688 skb_dma_unmap(&tp->pdev->dev, new_skb,
4689 DMA_TO_DEVICE);
4690 ret = -1;
4691 dev_kfree_skb(new_skb);
4692 new_skb = NULL;
4693 } else {
4694 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4695 base_flags, 1 | (mss << 1));
4696 *start = NEXT_TX(entry);
4697 }
4698 }
4699
4700 /* Now clean up the sw ring entries. */
4701 i = 0;
4702 while (entry != last_plus_one) {
4703 if (i == 0) {
4704 tp->tx_buffers[entry].skb = new_skb;
4705 } else {
4706 tp->tx_buffers[entry].skb = NULL;
4707 }
4708 entry = NEXT_TX(entry);
4709 i++;
4710 }
4711
4712 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4713 dev_kfree_skb(skb);
4714
4715 return ret;
4716 }
4717
4718 static void tg3_set_txd(struct tg3 *tp, int entry,
4719 dma_addr_t mapping, int len, u32 flags,
4720 u32 mss_and_is_end)
4721 {
4722 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4723 int is_end = (mss_and_is_end & 0x1);
4724 u32 mss = (mss_and_is_end >> 1);
4725 u32 vlan_tag = 0;
4726
4727 if (is_end)
4728 flags |= TXD_FLAG_END;
4729 if (flags & TXD_FLAG_VLAN) {
4730 vlan_tag = flags >> 16;
4731 flags &= 0xffff;
4732 }
4733 vlan_tag |= (mss << TXD_MSS_SHIFT);
4734
4735 txd->addr_hi = ((u64) mapping >> 32);
4736 txd->addr_lo = ((u64) mapping & 0xffffffff);
4737 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4738 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4739 }
4740
4741 /* hard_start_xmit for devices that don't have any bugs and
4742 * support TG3_FLG2_HW_TSO_2 only.
4743 */
4744 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4745 {
4746 struct tg3 *tp = netdev_priv(dev);
4747 u32 len, entry, base_flags, mss;
4748 struct skb_shared_info *sp;
4749 dma_addr_t mapping;
4750
4751 len = skb_headlen(skb);
4752
4753 /* We are running in BH disabled context with netif_tx_lock
4754 * and TX reclaim runs via tp->napi.poll inside of a software
4755 * interrupt. Furthermore, IRQ processing runs lockless so we have
4756 * no IRQ context deadlocks to worry about either. Rejoice!
4757 */
4758 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4759 if (!netif_queue_stopped(dev)) {
4760 netif_stop_queue(dev);
4761
4762 /* This is a hard error, log it. */
4763 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4764 "queue awake!\n", dev->name);
4765 }
4766 return NETDEV_TX_BUSY;
4767 }
4768
4769 entry = tp->tx_prod;
4770 base_flags = 0;
4771 mss = 0;
4772 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4773 int tcp_opt_len, ip_tcp_len;
4774
4775 if (skb_header_cloned(skb) &&
4776 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4777 dev_kfree_skb(skb);
4778 goto out_unlock;
4779 }
4780
4781 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4782 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4783 else {
4784 struct iphdr *iph = ip_hdr(skb);
4785
4786 tcp_opt_len = tcp_optlen(skb);
4787 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4788
4789 iph->check = 0;
4790 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4791 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4792 }
4793
4794 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4795 TXD_FLAG_CPU_POST_DMA);
4796
4797 tcp_hdr(skb)->check = 0;
4798
4799 }
4800 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4801 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4802 #if TG3_VLAN_TAG_USED
4803 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4804 base_flags |= (TXD_FLAG_VLAN |
4805 (vlan_tx_tag_get(skb) << 16));
4806 #endif
4807
4808 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4809 dev_kfree_skb(skb);
4810 goto out_unlock;
4811 }
4812
4813 sp = skb_shinfo(skb);
4814
4815 mapping = sp->dma_maps[0];
4816
4817 tp->tx_buffers[entry].skb = skb;
4818
4819 tg3_set_txd(tp, entry, mapping, len, base_flags,
4820 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4821
4822 entry = NEXT_TX(entry);
4823
4824 /* Now loop through additional data fragments, and queue them. */
4825 if (skb_shinfo(skb)->nr_frags > 0) {
4826 unsigned int i, last;
4827
4828 last = skb_shinfo(skb)->nr_frags - 1;
4829 for (i = 0; i <= last; i++) {
4830 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4831
4832 len = frag->size;
4833 mapping = sp->dma_maps[i + 1];
4834 tp->tx_buffers[entry].skb = NULL;
4835
4836 tg3_set_txd(tp, entry, mapping, len,
4837 base_flags, (i == last) | (mss << 1));
4838
4839 entry = NEXT_TX(entry);
4840 }
4841 }
4842
4843 /* Packets are ready, update Tx producer idx local and on card. */
4844 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4845
4846 tp->tx_prod = entry;
4847 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4848 netif_stop_queue(dev);
4849 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4850 netif_wake_queue(tp->dev);
4851 }
4852
4853 out_unlock:
4854 mmiowb();
4855
4856 dev->trans_start = jiffies;
4857
4858 return NETDEV_TX_OK;
4859 }
4860
4861 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4862
4863 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4864 * TSO header is greater than 80 bytes.
4865 */
4866 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4867 {
4868 struct sk_buff *segs, *nskb;
4869
4870 /* Estimate the number of fragments in the worst case */
4871 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4872 netif_stop_queue(tp->dev);
4873 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4874 return NETDEV_TX_BUSY;
4875
4876 netif_wake_queue(tp->dev);
4877 }
4878
4879 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4880 if (IS_ERR(segs))
4881 goto tg3_tso_bug_end;
4882
4883 do {
4884 nskb = segs;
4885 segs = segs->next;
4886 nskb->next = NULL;
4887 tg3_start_xmit_dma_bug(nskb, tp->dev);
4888 } while (segs);
4889
4890 tg3_tso_bug_end:
4891 dev_kfree_skb(skb);
4892
4893 return NETDEV_TX_OK;
4894 }
4895
4896 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4897 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4898 */
4899 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4900 {
4901 struct tg3 *tp = netdev_priv(dev);
4902 u32 len, entry, base_flags, mss;
4903 struct skb_shared_info *sp;
4904 int would_hit_hwbug;
4905 dma_addr_t mapping;
4906
4907 len = skb_headlen(skb);
4908
4909 /* We are running in BH disabled context with netif_tx_lock
4910 * and TX reclaim runs via tp->napi.poll inside of a software
4911 * interrupt. Furthermore, IRQ processing runs lockless so we have
4912 * no IRQ context deadlocks to worry about either. Rejoice!
4913 */
4914 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4915 if (!netif_queue_stopped(dev)) {
4916 netif_stop_queue(dev);
4917
4918 /* This is a hard error, log it. */
4919 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4920 "queue awake!\n", dev->name);
4921 }
4922 return NETDEV_TX_BUSY;
4923 }
4924
4925 entry = tp->tx_prod;
4926 base_flags = 0;
4927 if (skb->ip_summed == CHECKSUM_PARTIAL)
4928 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4929 mss = 0;
4930 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4931 struct iphdr *iph;
4932 int tcp_opt_len, ip_tcp_len, hdr_len;
4933
4934 if (skb_header_cloned(skb) &&
4935 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4936 dev_kfree_skb(skb);
4937 goto out_unlock;
4938 }
4939
4940 tcp_opt_len = tcp_optlen(skb);
4941 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4942
4943 hdr_len = ip_tcp_len + tcp_opt_len;
4944 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4945 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4946 return (tg3_tso_bug(tp, skb));
4947
4948 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4949 TXD_FLAG_CPU_POST_DMA);
4950
4951 iph = ip_hdr(skb);
4952 iph->check = 0;
4953 iph->tot_len = htons(mss + hdr_len);
4954 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4955 tcp_hdr(skb)->check = 0;
4956 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4957 } else
4958 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4959 iph->daddr, 0,
4960 IPPROTO_TCP,
4961 0);
4962
4963 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4964 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4965 if (tcp_opt_len || iph->ihl > 5) {
4966 int tsflags;
4967
4968 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4969 mss |= (tsflags << 11);
4970 }
4971 } else {
4972 if (tcp_opt_len || iph->ihl > 5) {
4973 int tsflags;
4974
4975 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4976 base_flags |= tsflags << 12;
4977 }
4978 }
4979 }
4980 #if TG3_VLAN_TAG_USED
4981 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4982 base_flags |= (TXD_FLAG_VLAN |
4983 (vlan_tx_tag_get(skb) << 16));
4984 #endif
4985
4986 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4987 dev_kfree_skb(skb);
4988 goto out_unlock;
4989 }
4990
4991 sp = skb_shinfo(skb);
4992
4993 mapping = sp->dma_maps[0];
4994
4995 tp->tx_buffers[entry].skb = skb;
4996
4997 would_hit_hwbug = 0;
4998
4999 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5000 would_hit_hwbug = 1;
5001 else if (tg3_4g_overflow_test(mapping, len))
5002 would_hit_hwbug = 1;
5003
5004 tg3_set_txd(tp, entry, mapping, len, base_flags,
5005 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5006
5007 entry = NEXT_TX(entry);
5008
5009 /* Now loop through additional data fragments, and queue them. */
5010 if (skb_shinfo(skb)->nr_frags > 0) {
5011 unsigned int i, last;
5012
5013 last = skb_shinfo(skb)->nr_frags - 1;
5014 for (i = 0; i <= last; i++) {
5015 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5016
5017 len = frag->size;
5018 mapping = sp->dma_maps[i + 1];
5019
5020 tp->tx_buffers[entry].skb = NULL;
5021
5022 if (tg3_4g_overflow_test(mapping, len))
5023 would_hit_hwbug = 1;
5024
5025 if (tg3_40bit_overflow_test(tp, mapping, len))
5026 would_hit_hwbug = 1;
5027
5028 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5029 tg3_set_txd(tp, entry, mapping, len,
5030 base_flags, (i == last)|(mss << 1));
5031 else
5032 tg3_set_txd(tp, entry, mapping, len,
5033 base_flags, (i == last));
5034
5035 entry = NEXT_TX(entry);
5036 }
5037 }
5038
5039 if (would_hit_hwbug) {
5040 u32 last_plus_one = entry;
5041 u32 start;
5042
5043 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5044 start &= (TG3_TX_RING_SIZE - 1);
5045
5046 /* If the workaround fails due to memory/mapping
5047 * failure, silently drop this packet.
5048 */
5049 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5050 &start, base_flags, mss))
5051 goto out_unlock;
5052
5053 entry = start;
5054 }
5055
5056 /* Packets are ready, update Tx producer idx local and on card. */
5057 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5058
5059 tp->tx_prod = entry;
5060 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5061 netif_stop_queue(dev);
5062 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5063 netif_wake_queue(tp->dev);
5064 }
5065
5066 out_unlock:
5067 mmiowb();
5068
5069 dev->trans_start = jiffies;
5070
5071 return NETDEV_TX_OK;
5072 }
5073
5074 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5075 int new_mtu)
5076 {
5077 dev->mtu = new_mtu;
5078
5079 if (new_mtu > ETH_DATA_LEN) {
5080 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5081 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5082 ethtool_op_set_tso(dev, 0);
5083 }
5084 else
5085 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5086 } else {
5087 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5088 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5089 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5090 }
5091 }
5092
5093 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5094 {
5095 struct tg3 *tp = netdev_priv(dev);
5096 int err;
5097
5098 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5099 return -EINVAL;
5100
5101 if (!netif_running(dev)) {
5102 /* We'll just catch it later when the
5103 * device is up'd.
5104 */
5105 tg3_set_mtu(dev, tp, new_mtu);
5106 return 0;
5107 }
5108
5109 tg3_phy_stop(tp);
5110
5111 tg3_netif_stop(tp);
5112
5113 tg3_full_lock(tp, 1);
5114
5115 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5116
5117 tg3_set_mtu(dev, tp, new_mtu);
5118
5119 err = tg3_restart_hw(tp, 0);
5120
5121 if (!err)
5122 tg3_netif_start(tp);
5123
5124 tg3_full_unlock(tp);
5125
5126 if (!err)
5127 tg3_phy_start(tp);
5128
5129 return err;
5130 }
5131
5132 /* Free up pending packets in all rx/tx rings.
5133 *
5134 * The chip has been shut down and the driver detached from
5135 * the networking, so no interrupts or new tx packets will
5136 * end up in the driver. tp->{tx,}lock is not held and we are not
5137 * in an interrupt context and thus may sleep.
5138 */
5139 static void tg3_free_rings(struct tg3 *tp)
5140 {
5141 struct ring_info *rxp;
5142 int i;
5143
5144 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5145 rxp = &tp->rx_std_buffers[i];
5146
5147 if (rxp->skb == NULL)
5148 continue;
5149 pci_unmap_single(tp->pdev,
5150 pci_unmap_addr(rxp, mapping),
5151 tp->rx_pkt_buf_sz - tp->rx_offset,
5152 PCI_DMA_FROMDEVICE);
5153 dev_kfree_skb_any(rxp->skb);
5154 rxp->skb = NULL;
5155 }
5156
5157 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5158 rxp = &tp->rx_jumbo_buffers[i];
5159
5160 if (rxp->skb == NULL)
5161 continue;
5162 pci_unmap_single(tp->pdev,
5163 pci_unmap_addr(rxp, mapping),
5164 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5165 PCI_DMA_FROMDEVICE);
5166 dev_kfree_skb_any(rxp->skb);
5167 rxp->skb = NULL;
5168 }
5169
5170 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5171 struct tx_ring_info *txp;
5172 struct sk_buff *skb;
5173
5174 txp = &tp->tx_buffers[i];
5175 skb = txp->skb;
5176
5177 if (skb == NULL) {
5178 i++;
5179 continue;
5180 }
5181
5182 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5183
5184 txp->skb = NULL;
5185
5186 i += skb_shinfo(skb)->nr_frags + 1;
5187
5188 dev_kfree_skb_any(skb);
5189 }
5190 }
5191
5192 /* Initialize tx/rx rings for packet processing.
5193 *
5194 * The chip has been shut down and the driver detached from
5195 * the networking, so no interrupts or new tx packets will
5196 * end up in the driver. tp->{tx,}lock are held and thus
5197 * we may not sleep.
5198 */
5199 static int tg3_init_rings(struct tg3 *tp)
5200 {
5201 u32 i;
5202
5203 /* Free up all the SKBs. */
5204 tg3_free_rings(tp);
5205
5206 /* Zero out all descriptors. */
5207 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5208 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5209 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5210 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5211
5212 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5213 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5214 (tp->dev->mtu > ETH_DATA_LEN))
5215 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5216
5217 /* Initialize invariants of the rings, we only set this
5218 * stuff once. This works because the card does not
5219 * write into the rx buffer posting rings.
5220 */
5221 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5222 struct tg3_rx_buffer_desc *rxd;
5223
5224 rxd = &tp->rx_std[i];
5225 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5226 << RXD_LEN_SHIFT;
5227 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5228 rxd->opaque = (RXD_OPAQUE_RING_STD |
5229 (i << RXD_OPAQUE_INDEX_SHIFT));
5230 }
5231
5232 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5233 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5234 struct tg3_rx_buffer_desc *rxd;
5235
5236 rxd = &tp->rx_jumbo[i];
5237 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5238 << RXD_LEN_SHIFT;
5239 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5240 RXD_FLAG_JUMBO;
5241 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5242 (i << RXD_OPAQUE_INDEX_SHIFT));
5243 }
5244 }
5245
5246 /* Now allocate fresh SKBs for each rx ring. */
5247 for (i = 0; i < tp->rx_pending; i++) {
5248 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5249 printk(KERN_WARNING PFX
5250 "%s: Using a smaller RX standard ring, "
5251 "only %d out of %d buffers were allocated "
5252 "successfully.\n",
5253 tp->dev->name, i, tp->rx_pending);
5254 if (i == 0)
5255 return -ENOMEM;
5256 tp->rx_pending = i;
5257 break;
5258 }
5259 }
5260
5261 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5262 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5263 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5264 -1, i) < 0) {
5265 printk(KERN_WARNING PFX
5266 "%s: Using a smaller RX jumbo ring, "
5267 "only %d out of %d buffers were "
5268 "allocated successfully.\n",
5269 tp->dev->name, i, tp->rx_jumbo_pending);
5270 if (i == 0) {
5271 tg3_free_rings(tp);
5272 return -ENOMEM;
5273 }
5274 tp->rx_jumbo_pending = i;
5275 break;
5276 }
5277 }
5278 }
5279 return 0;
5280 }
5281
5282 /*
5283 * Must not be invoked with interrupt sources disabled and
5284 * the hardware shutdown down.
5285 */
5286 static void tg3_free_consistent(struct tg3 *tp)
5287 {
5288 kfree(tp->rx_std_buffers);
5289 tp->rx_std_buffers = NULL;
5290 if (tp->rx_std) {
5291 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5292 tp->rx_std, tp->rx_std_mapping);
5293 tp->rx_std = NULL;
5294 }
5295 if (tp->rx_jumbo) {
5296 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5297 tp->rx_jumbo, tp->rx_jumbo_mapping);
5298 tp->rx_jumbo = NULL;
5299 }
5300 if (tp->rx_rcb) {
5301 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5302 tp->rx_rcb, tp->rx_rcb_mapping);
5303 tp->rx_rcb = NULL;
5304 }
5305 if (tp->tx_ring) {
5306 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5307 tp->tx_ring, tp->tx_desc_mapping);
5308 tp->tx_ring = NULL;
5309 }
5310 if (tp->hw_status) {
5311 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5312 tp->hw_status, tp->status_mapping);
5313 tp->hw_status = NULL;
5314 }
5315 if (tp->hw_stats) {
5316 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5317 tp->hw_stats, tp->stats_mapping);
5318 tp->hw_stats = NULL;
5319 }
5320 }
5321
5322 /*
5323 * Must not be invoked with interrupt sources disabled and
5324 * the hardware shutdown down. Can sleep.
5325 */
5326 static int tg3_alloc_consistent(struct tg3 *tp)
5327 {
5328 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5329 (TG3_RX_RING_SIZE +
5330 TG3_RX_JUMBO_RING_SIZE)) +
5331 (sizeof(struct tx_ring_info) *
5332 TG3_TX_RING_SIZE),
5333 GFP_KERNEL);
5334 if (!tp->rx_std_buffers)
5335 return -ENOMEM;
5336
5337 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5338 tp->tx_buffers = (struct tx_ring_info *)
5339 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5340
5341 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5342 &tp->rx_std_mapping);
5343 if (!tp->rx_std)
5344 goto err_out;
5345
5346 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5347 &tp->rx_jumbo_mapping);
5348
5349 if (!tp->rx_jumbo)
5350 goto err_out;
5351
5352 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5353 &tp->rx_rcb_mapping);
5354 if (!tp->rx_rcb)
5355 goto err_out;
5356
5357 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5358 &tp->tx_desc_mapping);
5359 if (!tp->tx_ring)
5360 goto err_out;
5361
5362 tp->hw_status = pci_alloc_consistent(tp->pdev,
5363 TG3_HW_STATUS_SIZE,
5364 &tp->status_mapping);
5365 if (!tp->hw_status)
5366 goto err_out;
5367
5368 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5369 sizeof(struct tg3_hw_stats),
5370 &tp->stats_mapping);
5371 if (!tp->hw_stats)
5372 goto err_out;
5373
5374 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5375 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5376
5377 return 0;
5378
5379 err_out:
5380 tg3_free_consistent(tp);
5381 return -ENOMEM;
5382 }
5383
5384 #define MAX_WAIT_CNT 1000
5385
5386 /* To stop a block, clear the enable bit and poll till it
5387 * clears. tp->lock is held.
5388 */
5389 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5390 {
5391 unsigned int i;
5392 u32 val;
5393
5394 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5395 switch (ofs) {
5396 case RCVLSC_MODE:
5397 case DMAC_MODE:
5398 case MBFREE_MODE:
5399 case BUFMGR_MODE:
5400 case MEMARB_MODE:
5401 /* We can't enable/disable these bits of the
5402 * 5705/5750, just say success.
5403 */
5404 return 0;
5405
5406 default:
5407 break;
5408 }
5409 }
5410
5411 val = tr32(ofs);
5412 val &= ~enable_bit;
5413 tw32_f(ofs, val);
5414
5415 for (i = 0; i < MAX_WAIT_CNT; i++) {
5416 udelay(100);
5417 val = tr32(ofs);
5418 if ((val & enable_bit) == 0)
5419 break;
5420 }
5421
5422 if (i == MAX_WAIT_CNT && !silent) {
5423 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5424 "ofs=%lx enable_bit=%x\n",
5425 ofs, enable_bit);
5426 return -ENODEV;
5427 }
5428
5429 return 0;
5430 }
5431
5432 /* tp->lock is held. */
5433 static int tg3_abort_hw(struct tg3 *tp, int silent)
5434 {
5435 int i, err;
5436
5437 tg3_disable_ints(tp);
5438
5439 tp->rx_mode &= ~RX_MODE_ENABLE;
5440 tw32_f(MAC_RX_MODE, tp->rx_mode);
5441 udelay(10);
5442
5443 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5444 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5445 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5446 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5447 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5448 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5449
5450 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5451 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5452 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5453 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5454 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5455 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5456 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5457
5458 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5459 tw32_f(MAC_MODE, tp->mac_mode);
5460 udelay(40);
5461
5462 tp->tx_mode &= ~TX_MODE_ENABLE;
5463 tw32_f(MAC_TX_MODE, tp->tx_mode);
5464
5465 for (i = 0; i < MAX_WAIT_CNT; i++) {
5466 udelay(100);
5467 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5468 break;
5469 }
5470 if (i >= MAX_WAIT_CNT) {
5471 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5472 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5473 tp->dev->name, tr32(MAC_TX_MODE));
5474 err |= -ENODEV;
5475 }
5476
5477 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5478 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5479 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5480
5481 tw32(FTQ_RESET, 0xffffffff);
5482 tw32(FTQ_RESET, 0x00000000);
5483
5484 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5485 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5486
5487 if (tp->hw_status)
5488 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5489 if (tp->hw_stats)
5490 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5491
5492 return err;
5493 }
5494
5495 /* tp->lock is held. */
5496 static int tg3_nvram_lock(struct tg3 *tp)
5497 {
5498 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5499 int i;
5500
5501 if (tp->nvram_lock_cnt == 0) {
5502 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5503 for (i = 0; i < 8000; i++) {
5504 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5505 break;
5506 udelay(20);
5507 }
5508 if (i == 8000) {
5509 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5510 return -ENODEV;
5511 }
5512 }
5513 tp->nvram_lock_cnt++;
5514 }
5515 return 0;
5516 }
5517
5518 /* tp->lock is held. */
5519 static void tg3_nvram_unlock(struct tg3 *tp)
5520 {
5521 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5522 if (tp->nvram_lock_cnt > 0)
5523 tp->nvram_lock_cnt--;
5524 if (tp->nvram_lock_cnt == 0)
5525 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5526 }
5527 }
5528
5529 /* tp->lock is held. */
5530 static void tg3_enable_nvram_access(struct tg3 *tp)
5531 {
5532 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5533 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5534 u32 nvaccess = tr32(NVRAM_ACCESS);
5535
5536 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5537 }
5538 }
5539
5540 /* tp->lock is held. */
5541 static void tg3_disable_nvram_access(struct tg3 *tp)
5542 {
5543 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5544 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5545 u32 nvaccess = tr32(NVRAM_ACCESS);
5546
5547 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5548 }
5549 }
5550
5551 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5552 {
5553 int i;
5554 u32 apedata;
5555
5556 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5557 if (apedata != APE_SEG_SIG_MAGIC)
5558 return;
5559
5560 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5561 if (!(apedata & APE_FW_STATUS_READY))
5562 return;
5563
5564 /* Wait for up to 1 millisecond for APE to service previous event. */
5565 for (i = 0; i < 10; i++) {
5566 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5567 return;
5568
5569 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5570
5571 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5572 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5573 event | APE_EVENT_STATUS_EVENT_PENDING);
5574
5575 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5576
5577 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5578 break;
5579
5580 udelay(100);
5581 }
5582
5583 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5584 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5585 }
5586
5587 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5588 {
5589 u32 event;
5590 u32 apedata;
5591
5592 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5593 return;
5594
5595 switch (kind) {
5596 case RESET_KIND_INIT:
5597 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5598 APE_HOST_SEG_SIG_MAGIC);
5599 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5600 APE_HOST_SEG_LEN_MAGIC);
5601 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5602 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5603 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5604 APE_HOST_DRIVER_ID_MAGIC);
5605 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5606 APE_HOST_BEHAV_NO_PHYLOCK);
5607
5608 event = APE_EVENT_STATUS_STATE_START;
5609 break;
5610 case RESET_KIND_SHUTDOWN:
5611 /* With the interface we are currently using,
5612 * APE does not track driver state. Wiping
5613 * out the HOST SEGMENT SIGNATURE forces
5614 * the APE to assume OS absent status.
5615 */
5616 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5617
5618 event = APE_EVENT_STATUS_STATE_UNLOAD;
5619 break;
5620 case RESET_KIND_SUSPEND:
5621 event = APE_EVENT_STATUS_STATE_SUSPEND;
5622 break;
5623 default:
5624 return;
5625 }
5626
5627 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5628
5629 tg3_ape_send_event(tp, event);
5630 }
5631
5632 /* tp->lock is held. */
5633 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5634 {
5635 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5636 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5637
5638 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5639 switch (kind) {
5640 case RESET_KIND_INIT:
5641 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5642 DRV_STATE_START);
5643 break;
5644
5645 case RESET_KIND_SHUTDOWN:
5646 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5647 DRV_STATE_UNLOAD);
5648 break;
5649
5650 case RESET_KIND_SUSPEND:
5651 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5652 DRV_STATE_SUSPEND);
5653 break;
5654
5655 default:
5656 break;
5657 }
5658 }
5659
5660 if (kind == RESET_KIND_INIT ||
5661 kind == RESET_KIND_SUSPEND)
5662 tg3_ape_driver_state_change(tp, kind);
5663 }
5664
5665 /* tp->lock is held. */
5666 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5667 {
5668 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5669 switch (kind) {
5670 case RESET_KIND_INIT:
5671 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5672 DRV_STATE_START_DONE);
5673 break;
5674
5675 case RESET_KIND_SHUTDOWN:
5676 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5677 DRV_STATE_UNLOAD_DONE);
5678 break;
5679
5680 default:
5681 break;
5682 }
5683 }
5684
5685 if (kind == RESET_KIND_SHUTDOWN)
5686 tg3_ape_driver_state_change(tp, kind);
5687 }
5688
5689 /* tp->lock is held. */
5690 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5691 {
5692 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5693 switch (kind) {
5694 case RESET_KIND_INIT:
5695 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5696 DRV_STATE_START);
5697 break;
5698
5699 case RESET_KIND_SHUTDOWN:
5700 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5701 DRV_STATE_UNLOAD);
5702 break;
5703
5704 case RESET_KIND_SUSPEND:
5705 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5706 DRV_STATE_SUSPEND);
5707 break;
5708
5709 default:
5710 break;
5711 }
5712 }
5713 }
5714
5715 static int tg3_poll_fw(struct tg3 *tp)
5716 {
5717 int i;
5718 u32 val;
5719
5720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5721 /* Wait up to 20ms for init done. */
5722 for (i = 0; i < 200; i++) {
5723 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5724 return 0;
5725 udelay(100);
5726 }
5727 return -ENODEV;
5728 }
5729
5730 /* Wait for firmware initialization to complete. */
5731 for (i = 0; i < 100000; i++) {
5732 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5733 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5734 break;
5735 udelay(10);
5736 }
5737
5738 /* Chip might not be fitted with firmware. Some Sun onboard
5739 * parts are configured like that. So don't signal the timeout
5740 * of the above loop as an error, but do report the lack of
5741 * running firmware once.
5742 */
5743 if (i >= 100000 &&
5744 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5745 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5746
5747 printk(KERN_INFO PFX "%s: No firmware running.\n",
5748 tp->dev->name);
5749 }
5750
5751 return 0;
5752 }
5753
5754 /* Save PCI command register before chip reset */
5755 static void tg3_save_pci_state(struct tg3 *tp)
5756 {
5757 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5758 }
5759
5760 /* Restore PCI state after chip reset */
5761 static void tg3_restore_pci_state(struct tg3 *tp)
5762 {
5763 u32 val;
5764
5765 /* Re-enable indirect register accesses. */
5766 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5767 tp->misc_host_ctrl);
5768
5769 /* Set MAX PCI retry to zero. */
5770 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5771 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5772 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5773 val |= PCISTATE_RETRY_SAME_DMA;
5774 /* Allow reads and writes to the APE register and memory space. */
5775 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5776 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5777 PCISTATE_ALLOW_APE_SHMEM_WR;
5778 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5779
5780 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5781
5782 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5783 pcie_set_readrq(tp->pdev, 4096);
5784 else {
5785 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5786 tp->pci_cacheline_sz);
5787 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5788 tp->pci_lat_timer);
5789 }
5790
5791 /* Make sure PCI-X relaxed ordering bit is clear. */
5792 if (tp->pcix_cap) {
5793 u16 pcix_cmd;
5794
5795 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5796 &pcix_cmd);
5797 pcix_cmd &= ~PCI_X_CMD_ERO;
5798 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5799 pcix_cmd);
5800 }
5801
5802 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5803
5804 /* Chip reset on 5780 will reset MSI enable bit,
5805 * so need to restore it.
5806 */
5807 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5808 u16 ctrl;
5809
5810 pci_read_config_word(tp->pdev,
5811 tp->msi_cap + PCI_MSI_FLAGS,
5812 &ctrl);
5813 pci_write_config_word(tp->pdev,
5814 tp->msi_cap + PCI_MSI_FLAGS,
5815 ctrl | PCI_MSI_FLAGS_ENABLE);
5816 val = tr32(MSGINT_MODE);
5817 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5818 }
5819 }
5820 }
5821
5822 static void tg3_stop_fw(struct tg3 *);
5823
5824 /* tp->lock is held. */
5825 static int tg3_chip_reset(struct tg3 *tp)
5826 {
5827 u32 val;
5828 void (*write_op)(struct tg3 *, u32, u32);
5829 int err;
5830
5831 tg3_nvram_lock(tp);
5832
5833 tg3_mdio_stop(tp);
5834
5835 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5836
5837 /* No matching tg3_nvram_unlock() after this because
5838 * chip reset below will undo the nvram lock.
5839 */
5840 tp->nvram_lock_cnt = 0;
5841
5842 /* GRC_MISC_CFG core clock reset will clear the memory
5843 * enable bit in PCI register 4 and the MSI enable bit
5844 * on some chips, so we save relevant registers here.
5845 */
5846 tg3_save_pci_state(tp);
5847
5848 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5853 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5854 tw32(GRC_FASTBOOT_PC, 0);
5855
5856 /*
5857 * We must avoid the readl() that normally takes place.
5858 * It locks machines, causes machine checks, and other
5859 * fun things. So, temporarily disable the 5701
5860 * hardware workaround, while we do the reset.
5861 */
5862 write_op = tp->write32;
5863 if (write_op == tg3_write_flush_reg32)
5864 tp->write32 = tg3_write32;
5865
5866 /* Prevent the irq handler from reading or writing PCI registers
5867 * during chip reset when the memory enable bit in the PCI command
5868 * register may be cleared. The chip does not generate interrupt
5869 * at this time, but the irq handler may still be called due to irq
5870 * sharing or irqpoll.
5871 */
5872 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5873 if (tp->hw_status) {
5874 tp->hw_status->status = 0;
5875 tp->hw_status->status_tag = 0;
5876 }
5877 tp->last_tag = 0;
5878 smp_mb();
5879 synchronize_irq(tp->pdev->irq);
5880
5881 /* do the reset */
5882 val = GRC_MISC_CFG_CORECLK_RESET;
5883
5884 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5885 if (tr32(0x7e2c) == 0x60) {
5886 tw32(0x7e2c, 0x20);
5887 }
5888 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5889 tw32(GRC_MISC_CFG, (1 << 29));
5890 val |= (1 << 29);
5891 }
5892 }
5893
5894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5895 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5896 tw32(GRC_VCPU_EXT_CTRL,
5897 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5898 }
5899
5900 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5901 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5902 tw32(GRC_MISC_CFG, val);
5903
5904 /* restore 5701 hardware bug workaround write method */
5905 tp->write32 = write_op;
5906
5907 /* Unfortunately, we have to delay before the PCI read back.
5908 * Some 575X chips even will not respond to a PCI cfg access
5909 * when the reset command is given to the chip.
5910 *
5911 * How do these hardware designers expect things to work
5912 * properly if the PCI write is posted for a long period
5913 * of time? It is always necessary to have some method by
5914 * which a register read back can occur to push the write
5915 * out which does the reset.
5916 *
5917 * For most tg3 variants the trick below was working.
5918 * Ho hum...
5919 */
5920 udelay(120);
5921
5922 /* Flush PCI posted writes. The normal MMIO registers
5923 * are inaccessible at this time so this is the only
5924 * way to make this reliably (actually, this is no longer
5925 * the case, see above). I tried to use indirect
5926 * register read/write but this upset some 5701 variants.
5927 */
5928 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5929
5930 udelay(120);
5931
5932 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5933 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5934 int i;
5935 u32 cfg_val;
5936
5937 /* Wait for link training to complete. */
5938 for (i = 0; i < 5000; i++)
5939 udelay(100);
5940
5941 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5942 pci_write_config_dword(tp->pdev, 0xc4,
5943 cfg_val | (1 << 15));
5944 }
5945 /* Set PCIE max payload size and clear error status. */
5946 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5947 }
5948
5949 tg3_restore_pci_state(tp);
5950
5951 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5952
5953 val = 0;
5954 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5955 val = tr32(MEMARB_MODE);
5956 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5957
5958 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5959 tg3_stop_fw(tp);
5960 tw32(0x5000, 0x400);
5961 }
5962
5963 tw32(GRC_MODE, tp->grc_mode);
5964
5965 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5966 val = tr32(0xc4);
5967
5968 tw32(0xc4, val | (1 << 15));
5969 }
5970
5971 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5973 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5974 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5975 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5976 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5977 }
5978
5979 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5980 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5981 tw32_f(MAC_MODE, tp->mac_mode);
5982 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5983 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5984 tw32_f(MAC_MODE, tp->mac_mode);
5985 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5986 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5987 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5988 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5989 tw32_f(MAC_MODE, tp->mac_mode);
5990 } else
5991 tw32_f(MAC_MODE, 0);
5992 udelay(40);
5993
5994 tg3_mdio_start(tp);
5995
5996 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5997
5998 err = tg3_poll_fw(tp);
5999 if (err)
6000 return err;
6001
6002 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6003 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6004 val = tr32(0x7c00);
6005
6006 tw32(0x7c00, val | (1 << 25));
6007 }
6008
6009 /* Reprobe ASF enable state. */
6010 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6011 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6012 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6013 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6014 u32 nic_cfg;
6015
6016 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6017 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6018 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6019 tp->last_event_jiffies = jiffies;
6020 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6021 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6022 }
6023 }
6024
6025 return 0;
6026 }
6027
6028 /* tp->lock is held. */
6029 static void tg3_stop_fw(struct tg3 *tp)
6030 {
6031 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6032 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6033 /* Wait for RX cpu to ACK the previous event. */
6034 tg3_wait_for_event_ack(tp);
6035
6036 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6037
6038 tg3_generate_fw_event(tp);
6039
6040 /* Wait for RX cpu to ACK this event. */
6041 tg3_wait_for_event_ack(tp);
6042 }
6043 }
6044
6045 /* tp->lock is held. */
6046 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6047 {
6048 int err;
6049
6050 tg3_stop_fw(tp);
6051
6052 tg3_write_sig_pre_reset(tp, kind);
6053
6054 tg3_abort_hw(tp, silent);
6055 err = tg3_chip_reset(tp);
6056
6057 tg3_write_sig_legacy(tp, kind);
6058 tg3_write_sig_post_reset(tp, kind);
6059
6060 if (err)
6061 return err;
6062
6063 return 0;
6064 }
6065
6066 #define TG3_FW_RELEASE_MAJOR 0x0
6067 #define TG3_FW_RELASE_MINOR 0x0
6068 #define TG3_FW_RELEASE_FIX 0x0
6069 #define TG3_FW_START_ADDR 0x08000000
6070 #define TG3_FW_TEXT_ADDR 0x08000000
6071 #define TG3_FW_TEXT_LEN 0x9c0
6072 #define TG3_FW_RODATA_ADDR 0x080009c0
6073 #define TG3_FW_RODATA_LEN 0x60
6074 #define TG3_FW_DATA_ADDR 0x08000a40
6075 #define TG3_FW_DATA_LEN 0x20
6076 #define TG3_FW_SBSS_ADDR 0x08000a60
6077 #define TG3_FW_SBSS_LEN 0xc
6078 #define TG3_FW_BSS_ADDR 0x08000a70
6079 #define TG3_FW_BSS_LEN 0x10
6080
6081 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6082 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6083 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6084 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6085 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6086 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6087 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6088 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6089 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6090 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6091 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6092 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6093 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6094 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6095 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6096 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6097 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6098 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6099 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6100 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6101 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6102 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6103 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6104 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6107 0, 0, 0, 0, 0, 0,
6108 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6109 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6110 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6111 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6112 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6113 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6114 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6115 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6116 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6117 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6118 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6120 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6122 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6123 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6124 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6125 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6126 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6127 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6128 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6129 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6130 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6131 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6132 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6133 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6134 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6135 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6136 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6137 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6138 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6139 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6140 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6141 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6142 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6143 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6144 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6145 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6146 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6147 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6148 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6149 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6150 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6151 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6152 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6153 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6154 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6155 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6156 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6157 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6158 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6159 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6160 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6161 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6162 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6163 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6164 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6165 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6166 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6167 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6168 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6169 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6170 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6171 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6172 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6173 };
6174
6175 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6176 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6177 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6178 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6179 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6180 0x00000000
6181 };
6182
6183 #if 0 /* All zeros, don't eat up space with it. */
6184 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6185 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6186 0x00000000, 0x00000000, 0x00000000, 0x00000000
6187 };
6188 #endif
6189
6190 #define RX_CPU_SCRATCH_BASE 0x30000
6191 #define RX_CPU_SCRATCH_SIZE 0x04000
6192 #define TX_CPU_SCRATCH_BASE 0x34000
6193 #define TX_CPU_SCRATCH_SIZE 0x04000
6194
6195 /* tp->lock is held. */
6196 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6197 {
6198 int i;
6199
6200 BUG_ON(offset == TX_CPU_BASE &&
6201 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6202
6203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6204 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6205
6206 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6207 return 0;
6208 }
6209 if (offset == RX_CPU_BASE) {
6210 for (i = 0; i < 10000; i++) {
6211 tw32(offset + CPU_STATE, 0xffffffff);
6212 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6213 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6214 break;
6215 }
6216
6217 tw32(offset + CPU_STATE, 0xffffffff);
6218 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6219 udelay(10);
6220 } else {
6221 for (i = 0; i < 10000; i++) {
6222 tw32(offset + CPU_STATE, 0xffffffff);
6223 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6224 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6225 break;
6226 }
6227 }
6228
6229 if (i >= 10000) {
6230 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6231 "and %s CPU\n",
6232 tp->dev->name,
6233 (offset == RX_CPU_BASE ? "RX" : "TX"));
6234 return -ENODEV;
6235 }
6236
6237 /* Clear firmware's nvram arbitration. */
6238 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6239 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6240 return 0;
6241 }
6242
6243 struct fw_info {
6244 unsigned int text_base;
6245 unsigned int text_len;
6246 const u32 *text_data;
6247 unsigned int rodata_base;
6248 unsigned int rodata_len;
6249 const u32 *rodata_data;
6250 unsigned int data_base;
6251 unsigned int data_len;
6252 const u32 *data_data;
6253 };
6254
6255 /* tp->lock is held. */
6256 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6257 int cpu_scratch_size, struct fw_info *info)
6258 {
6259 int err, lock_err, i;
6260 void (*write_op)(struct tg3 *, u32, u32);
6261
6262 if (cpu_base == TX_CPU_BASE &&
6263 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6264 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6265 "TX cpu firmware on %s which is 5705.\n",
6266 tp->dev->name);
6267 return -EINVAL;
6268 }
6269
6270 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6271 write_op = tg3_write_mem;
6272 else
6273 write_op = tg3_write_indirect_reg32;
6274
6275 /* It is possible that bootcode is still loading at this point.
6276 * Get the nvram lock first before halting the cpu.
6277 */
6278 lock_err = tg3_nvram_lock(tp);
6279 err = tg3_halt_cpu(tp, cpu_base);
6280 if (!lock_err)
6281 tg3_nvram_unlock(tp);
6282 if (err)
6283 goto out;
6284
6285 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6286 write_op(tp, cpu_scratch_base + i, 0);
6287 tw32(cpu_base + CPU_STATE, 0xffffffff);
6288 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6289 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6290 write_op(tp, (cpu_scratch_base +
6291 (info->text_base & 0xffff) +
6292 (i * sizeof(u32))),
6293 (info->text_data ?
6294 info->text_data[i] : 0));
6295 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6296 write_op(tp, (cpu_scratch_base +
6297 (info->rodata_base & 0xffff) +
6298 (i * sizeof(u32))),
6299 (info->rodata_data ?
6300 info->rodata_data[i] : 0));
6301 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6302 write_op(tp, (cpu_scratch_base +
6303 (info->data_base & 0xffff) +
6304 (i * sizeof(u32))),
6305 (info->data_data ?
6306 info->data_data[i] : 0));
6307
6308 err = 0;
6309
6310 out:
6311 return err;
6312 }
6313
6314 /* tp->lock is held. */
6315 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6316 {
6317 struct fw_info info;
6318 int err, i;
6319
6320 info.text_base = TG3_FW_TEXT_ADDR;
6321 info.text_len = TG3_FW_TEXT_LEN;
6322 info.text_data = &tg3FwText[0];
6323 info.rodata_base = TG3_FW_RODATA_ADDR;
6324 info.rodata_len = TG3_FW_RODATA_LEN;
6325 info.rodata_data = &tg3FwRodata[0];
6326 info.data_base = TG3_FW_DATA_ADDR;
6327 info.data_len = TG3_FW_DATA_LEN;
6328 info.data_data = NULL;
6329
6330 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6331 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6332 &info);
6333 if (err)
6334 return err;
6335
6336 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6337 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6338 &info);
6339 if (err)
6340 return err;
6341
6342 /* Now startup only the RX cpu. */
6343 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6344 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6345
6346 for (i = 0; i < 5; i++) {
6347 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6348 break;
6349 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6350 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6351 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6352 udelay(1000);
6353 }
6354 if (i >= 5) {
6355 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6356 "to set RX CPU PC, is %08x should be %08x\n",
6357 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6358 TG3_FW_TEXT_ADDR);
6359 return -ENODEV;
6360 }
6361 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6362 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6363
6364 return 0;
6365 }
6366
6367
6368 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
6369 #define TG3_TSO_FW_RELASE_MINOR 0x6
6370 #define TG3_TSO_FW_RELEASE_FIX 0x0
6371 #define TG3_TSO_FW_START_ADDR 0x08000000
6372 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
6373 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
6374 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6375 #define TG3_TSO_FW_RODATA_LEN 0x60
6376 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
6377 #define TG3_TSO_FW_DATA_LEN 0x30
6378 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6379 #define TG3_TSO_FW_SBSS_LEN 0x2c
6380 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
6381 #define TG3_TSO_FW_BSS_LEN 0x894
6382
6383 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6384 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6385 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6386 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6387 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6388 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6389 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6390 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6391 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6392 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6393 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6394 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6395 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6396 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6397 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6398 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6399 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6400 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6401 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6402 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6403 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6404 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6405 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6406 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6407 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6408 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6409 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6410 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6411 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6412 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6413 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6414 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6415 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6416 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6417 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6418 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6419 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6420 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6421 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6422 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6423 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6424 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6425 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6426 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6427 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6428 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6429 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6430 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6431 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6432 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6433 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6434 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6435 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6436 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6437 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6438 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6439 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6440 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6441 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6442 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6443 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6444 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6445 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6446 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6447 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6448 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6449 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6450 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6451 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6452 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6453 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6454 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6455 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6456 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6457 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6458 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6459 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6460 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6461 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6462 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6463 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6464 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6465 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6466 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6467 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6468 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6469 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6470 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6471 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6472 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6473 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6474 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6475 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6476 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6477 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6478 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6479 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6480 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6481 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6482 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6483 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6484 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6485 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6486 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6487 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6488 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6489 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6490 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6491 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6492 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6493 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6494 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6495 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6496 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6497 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6498 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6499 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6500 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6501 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6502 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6503 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6504 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6505 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6506 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6507 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6508 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6509 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6510 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6511 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6512 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6513 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6514 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6515 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6516 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6517 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6518 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6519 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6520 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6521 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6522 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6523 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6524 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6525 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6526 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6527 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6528 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6529 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6530 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6531 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6532 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6533 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6534 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6535 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6536 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6537 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6538 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6539 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6540 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6541 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6542 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6543 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6544 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6545 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6546 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6547 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6548 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6549 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6550 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6551 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6552 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6553 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6554 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6555 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6556 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6557 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6558 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6559 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6560 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6561 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6562 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6563 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6564 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6565 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6566 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6567 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6568 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6569 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6570 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6571 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6572 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6573 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6574 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6575 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6576 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6577 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6578 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6579 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6580 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6581 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6582 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6583 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6584 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6585 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6586 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6587 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6588 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6589 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6590 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6591 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6592 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6593 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6594 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6595 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6596 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6597 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6598 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6599 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6600 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6601 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6602 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6603 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6604 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6605 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6606 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6607 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6608 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6609 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6610 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6611 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6612 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6613 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6614 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6615 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6616 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6617 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6618 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6619 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6620 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6621 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6622 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6623 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6624 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6625 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6626 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6627 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6628 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6629 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6630 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6631 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6632 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6633 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6634 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6635 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6636 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6637 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6638 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6639 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6640 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6641 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6642 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6643 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6644 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6645 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6646 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6647 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6648 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6649 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6650 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6651 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6652 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6653 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6654 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6655 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6656 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6657 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6658 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6659 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6660 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6661 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6662 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6663 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6664 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6665 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6666 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6667 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6668 };
6669
6670 static const u32 tg3TsoFwRodata[] = {
6671 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6672 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6673 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6674 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6675 0x00000000,
6676 };
6677
6678 static const u32 tg3TsoFwData[] = {
6679 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6680 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6681 0x00000000,
6682 };
6683
6684 /* 5705 needs a special version of the TSO firmware. */
6685 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6686 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6687 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6688 #define TG3_TSO5_FW_START_ADDR 0x00010000
6689 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6690 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6691 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6692 #define TG3_TSO5_FW_RODATA_LEN 0x50
6693 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6694 #define TG3_TSO5_FW_DATA_LEN 0x20
6695 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6696 #define TG3_TSO5_FW_SBSS_LEN 0x28
6697 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6698 #define TG3_TSO5_FW_BSS_LEN 0x88
6699
6700 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6701 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6702 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6703 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6704 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6705 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6706 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6707 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6708 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6709 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6710 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6711 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6712 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6713 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6714 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6715 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6716 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6717 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6718 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6719 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6720 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6721 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6722 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6723 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6724 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6725 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6726 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6727 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6728 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6729 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6730 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6731 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6732 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6733 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6734 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6735 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6736 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6737 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6738 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6739 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6740 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6741 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6742 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6743 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6744 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6745 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6746 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6747 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6748 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6749 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6750 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6751 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6752 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6753 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6754 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6755 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6756 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6757 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6758 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6759 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6760 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6761 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6762 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6763 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6764 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6765 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6766 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6767 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6768 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6769 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6770 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6771 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6772 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6773 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6774 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6775 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6776 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6777 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6778 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6779 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6780 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6781 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6782 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6783 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6784 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6785 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6786 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6787 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6788 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6789 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6790 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6791 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6792 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6793 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6794 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6795 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6796 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6797 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6798 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6799 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6800 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6801 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6802 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6803 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6804 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6805 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6806 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6807 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6808 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6809 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6810 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6811 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6812 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6813 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6814 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6815 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6816 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6817 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6818 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6819 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6820 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6821 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6822 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6823 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6824 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6825 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6826 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6827 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6828 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6829 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6830 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6831 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6832 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6833 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6834 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6835 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6836 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6837 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6838 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6839 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6840 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6841 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6842 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6843 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6844 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6845 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6846 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6847 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6848 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6849 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6850 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6851 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6852 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6853 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6854 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6855 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6856 0x00000000, 0x00000000, 0x00000000,
6857 };
6858
6859 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6860 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6861 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6862 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6863 0x00000000, 0x00000000, 0x00000000,
6864 };
6865
6866 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6867 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6868 0x00000000, 0x00000000, 0x00000000,
6869 };
6870
6871 /* tp->lock is held. */
6872 static int tg3_load_tso_firmware(struct tg3 *tp)
6873 {
6874 struct fw_info info;
6875 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6876 int err, i;
6877
6878 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6879 return 0;
6880
6881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6882 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6883 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6884 info.text_data = &tg3Tso5FwText[0];
6885 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6886 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6887 info.rodata_data = &tg3Tso5FwRodata[0];
6888 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6889 info.data_len = TG3_TSO5_FW_DATA_LEN;
6890 info.data_data = &tg3Tso5FwData[0];
6891 cpu_base = RX_CPU_BASE;
6892 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6893 cpu_scratch_size = (info.text_len +
6894 info.rodata_len +
6895 info.data_len +
6896 TG3_TSO5_FW_SBSS_LEN +
6897 TG3_TSO5_FW_BSS_LEN);
6898 } else {
6899 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6900 info.text_len = TG3_TSO_FW_TEXT_LEN;
6901 info.text_data = &tg3TsoFwText[0];
6902 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6903 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6904 info.rodata_data = &tg3TsoFwRodata[0];
6905 info.data_base = TG3_TSO_FW_DATA_ADDR;
6906 info.data_len = TG3_TSO_FW_DATA_LEN;
6907 info.data_data = &tg3TsoFwData[0];
6908 cpu_base = TX_CPU_BASE;
6909 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6910 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6911 }
6912
6913 err = tg3_load_firmware_cpu(tp, cpu_base,
6914 cpu_scratch_base, cpu_scratch_size,
6915 &info);
6916 if (err)
6917 return err;
6918
6919 /* Now startup the cpu. */
6920 tw32(cpu_base + CPU_STATE, 0xffffffff);
6921 tw32_f(cpu_base + CPU_PC, info.text_base);
6922
6923 for (i = 0; i < 5; i++) {
6924 if (tr32(cpu_base + CPU_PC) == info.text_base)
6925 break;
6926 tw32(cpu_base + CPU_STATE, 0xffffffff);
6927 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6928 tw32_f(cpu_base + CPU_PC, info.text_base);
6929 udelay(1000);
6930 }
6931 if (i >= 5) {
6932 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6933 "to set CPU PC, is %08x should be %08x\n",
6934 tp->dev->name, tr32(cpu_base + CPU_PC),
6935 info.text_base);
6936 return -ENODEV;
6937 }
6938 tw32(cpu_base + CPU_STATE, 0xffffffff);
6939 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6940 return 0;
6941 }
6942
6943
6944 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6945 {
6946 struct tg3 *tp = netdev_priv(dev);
6947 struct sockaddr *addr = p;
6948 int err = 0, skip_mac_1 = 0;
6949
6950 if (!is_valid_ether_addr(addr->sa_data))
6951 return -EINVAL;
6952
6953 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6954
6955 if (!netif_running(dev))
6956 return 0;
6957
6958 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6959 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6960
6961 addr0_high = tr32(MAC_ADDR_0_HIGH);
6962 addr0_low = tr32(MAC_ADDR_0_LOW);
6963 addr1_high = tr32(MAC_ADDR_1_HIGH);
6964 addr1_low = tr32(MAC_ADDR_1_LOW);
6965
6966 /* Skip MAC addr 1 if ASF is using it. */
6967 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6968 !(addr1_high == 0 && addr1_low == 0))
6969 skip_mac_1 = 1;
6970 }
6971 spin_lock_bh(&tp->lock);
6972 __tg3_set_mac_addr(tp, skip_mac_1);
6973 spin_unlock_bh(&tp->lock);
6974
6975 return err;
6976 }
6977
6978 /* tp->lock is held. */
6979 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6980 dma_addr_t mapping, u32 maxlen_flags,
6981 u32 nic_addr)
6982 {
6983 tg3_write_mem(tp,
6984 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6985 ((u64) mapping >> 32));
6986 tg3_write_mem(tp,
6987 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6988 ((u64) mapping & 0xffffffff));
6989 tg3_write_mem(tp,
6990 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6991 maxlen_flags);
6992
6993 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6994 tg3_write_mem(tp,
6995 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6996 nic_addr);
6997 }
6998
6999 static void __tg3_set_rx_mode(struct net_device *);
7000 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7001 {
7002 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7003 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7004 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7005 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7006 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7007 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7008 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7009 }
7010 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7011 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7012 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7013 u32 val = ec->stats_block_coalesce_usecs;
7014
7015 if (!netif_carrier_ok(tp->dev))
7016 val = 0;
7017
7018 tw32(HOSTCC_STAT_COAL_TICKS, val);
7019 }
7020 }
7021
7022 /* tp->lock is held. */
7023 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7024 {
7025 u32 val, rdmac_mode;
7026 int i, err, limit;
7027
7028 tg3_disable_ints(tp);
7029
7030 tg3_stop_fw(tp);
7031
7032 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7033
7034 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7035 tg3_abort_hw(tp, 1);
7036 }
7037
7038 if (reset_phy &&
7039 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7040 tg3_phy_reset(tp);
7041
7042 err = tg3_chip_reset(tp);
7043 if (err)
7044 return err;
7045
7046 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7047
7048 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7049 val = tr32(TG3_CPMU_CTRL);
7050 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7051 tw32(TG3_CPMU_CTRL, val);
7052
7053 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7054 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7055 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7056 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7057
7058 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7059 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7060 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7061 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7062
7063 val = tr32(TG3_CPMU_HST_ACC);
7064 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7065 val |= CPMU_HST_ACC_MACCLK_6_25;
7066 tw32(TG3_CPMU_HST_ACC, val);
7067 }
7068
7069 /* This works around an issue with Athlon chipsets on
7070 * B3 tigon3 silicon. This bit has no effect on any
7071 * other revision. But do not set this on PCI Express
7072 * chips and don't even touch the clocks if the CPMU is present.
7073 */
7074 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7075 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7076 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7077 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7078 }
7079
7080 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7081 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7082 val = tr32(TG3PCI_PCISTATE);
7083 val |= PCISTATE_RETRY_SAME_DMA;
7084 tw32(TG3PCI_PCISTATE, val);
7085 }
7086
7087 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7088 /* Allow reads and writes to the
7089 * APE register and memory space.
7090 */
7091 val = tr32(TG3PCI_PCISTATE);
7092 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7093 PCISTATE_ALLOW_APE_SHMEM_WR;
7094 tw32(TG3PCI_PCISTATE, val);
7095 }
7096
7097 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7098 /* Enable some hw fixes. */
7099 val = tr32(TG3PCI_MSI_DATA);
7100 val |= (1 << 26) | (1 << 28) | (1 << 29);
7101 tw32(TG3PCI_MSI_DATA, val);
7102 }
7103
7104 /* Descriptor ring init may make accesses to the
7105 * NIC SRAM area to setup the TX descriptors, so we
7106 * can only do this after the hardware has been
7107 * successfully reset.
7108 */
7109 err = tg3_init_rings(tp);
7110 if (err)
7111 return err;
7112
7113 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7114 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7115 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7116 /* This value is determined during the probe time DMA
7117 * engine test, tg3_test_dma.
7118 */
7119 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7120 }
7121
7122 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7123 GRC_MODE_4X_NIC_SEND_RINGS |
7124 GRC_MODE_NO_TX_PHDR_CSUM |
7125 GRC_MODE_NO_RX_PHDR_CSUM);
7126 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7127
7128 /* Pseudo-header checksum is done by hardware logic and not
7129 * the offload processers, so make the chip do the pseudo-
7130 * header checksums on receive. For transmit it is more
7131 * convenient to do the pseudo-header checksum in software
7132 * as Linux does that on transmit for us in all cases.
7133 */
7134 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7135
7136 tw32(GRC_MODE,
7137 tp->grc_mode |
7138 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7139
7140 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7141 val = tr32(GRC_MISC_CFG);
7142 val &= ~0xff;
7143 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7144 tw32(GRC_MISC_CFG, val);
7145
7146 /* Initialize MBUF/DESC pool. */
7147 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7148 /* Do nothing. */
7149 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7150 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7152 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7153 else
7154 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7155 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7156 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7157 }
7158 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7159 int fw_len;
7160
7161 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7162 TG3_TSO5_FW_RODATA_LEN +
7163 TG3_TSO5_FW_DATA_LEN +
7164 TG3_TSO5_FW_SBSS_LEN +
7165 TG3_TSO5_FW_BSS_LEN);
7166 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7167 tw32(BUFMGR_MB_POOL_ADDR,
7168 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7169 tw32(BUFMGR_MB_POOL_SIZE,
7170 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7171 }
7172
7173 if (tp->dev->mtu <= ETH_DATA_LEN) {
7174 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7175 tp->bufmgr_config.mbuf_read_dma_low_water);
7176 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7177 tp->bufmgr_config.mbuf_mac_rx_low_water);
7178 tw32(BUFMGR_MB_HIGH_WATER,
7179 tp->bufmgr_config.mbuf_high_water);
7180 } else {
7181 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7182 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7183 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7184 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7185 tw32(BUFMGR_MB_HIGH_WATER,
7186 tp->bufmgr_config.mbuf_high_water_jumbo);
7187 }
7188 tw32(BUFMGR_DMA_LOW_WATER,
7189 tp->bufmgr_config.dma_low_water);
7190 tw32(BUFMGR_DMA_HIGH_WATER,
7191 tp->bufmgr_config.dma_high_water);
7192
7193 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7194 for (i = 0; i < 2000; i++) {
7195 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7196 break;
7197 udelay(10);
7198 }
7199 if (i >= 2000) {
7200 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7201 tp->dev->name);
7202 return -ENODEV;
7203 }
7204
7205 /* Setup replenish threshold. */
7206 val = tp->rx_pending / 8;
7207 if (val == 0)
7208 val = 1;
7209 else if (val > tp->rx_std_max_post)
7210 val = tp->rx_std_max_post;
7211 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7212 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7213 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7214
7215 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7216 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7217 }
7218
7219 tw32(RCVBDI_STD_THRESH, val);
7220
7221 /* Initialize TG3_BDINFO's at:
7222 * RCVDBDI_STD_BD: standard eth size rx ring
7223 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7224 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7225 *
7226 * like so:
7227 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7228 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7229 * ring attribute flags
7230 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7231 *
7232 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7233 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7234 *
7235 * The size of each ring is fixed in the firmware, but the location is
7236 * configurable.
7237 */
7238 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7239 ((u64) tp->rx_std_mapping >> 32));
7240 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7241 ((u64) tp->rx_std_mapping & 0xffffffff));
7242 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7243 NIC_SRAM_RX_BUFFER_DESC);
7244
7245 /* Don't even try to program the JUMBO/MINI buffer descriptor
7246 * configs on 5705.
7247 */
7248 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7249 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7250 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7251 } else {
7252 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7253 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7254
7255 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7256 BDINFO_FLAGS_DISABLED);
7257
7258 /* Setup replenish threshold. */
7259 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7260
7261 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7262 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7263 ((u64) tp->rx_jumbo_mapping >> 32));
7264 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7265 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7266 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7267 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7268 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7269 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7270 } else {
7271 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7272 BDINFO_FLAGS_DISABLED);
7273 }
7274
7275 }
7276
7277 /* There is only one send ring on 5705/5750, no need to explicitly
7278 * disable the others.
7279 */
7280 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7281 /* Clear out send RCB ring in SRAM. */
7282 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7283 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7284 BDINFO_FLAGS_DISABLED);
7285 }
7286
7287 tp->tx_prod = 0;
7288 tp->tx_cons = 0;
7289 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7290 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7291
7292 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7293 tp->tx_desc_mapping,
7294 (TG3_TX_RING_SIZE <<
7295 BDINFO_FLAGS_MAXLEN_SHIFT),
7296 NIC_SRAM_TX_BUFFER_DESC);
7297
7298 /* There is only one receive return ring on 5705/5750, no need
7299 * to explicitly disable the others.
7300 */
7301 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7302 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7303 i += TG3_BDINFO_SIZE) {
7304 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7305 BDINFO_FLAGS_DISABLED);
7306 }
7307 }
7308
7309 tp->rx_rcb_ptr = 0;
7310 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7311
7312 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7313 tp->rx_rcb_mapping,
7314 (TG3_RX_RCB_RING_SIZE(tp) <<
7315 BDINFO_FLAGS_MAXLEN_SHIFT),
7316 0);
7317
7318 tp->rx_std_ptr = tp->rx_pending;
7319 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7320 tp->rx_std_ptr);
7321
7322 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7323 tp->rx_jumbo_pending : 0;
7324 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7325 tp->rx_jumbo_ptr);
7326
7327 /* Initialize MAC address and backoff seed. */
7328 __tg3_set_mac_addr(tp, 0);
7329
7330 /* MTU + ethernet header + FCS + optional VLAN tag */
7331 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7332
7333 /* The slot time is changed by tg3_setup_phy if we
7334 * run at gigabit with half duplex.
7335 */
7336 tw32(MAC_TX_LENGTHS,
7337 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7338 (6 << TX_LENGTHS_IPG_SHIFT) |
7339 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7340
7341 /* Receive rules. */
7342 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7343 tw32(RCVLPC_CONFIG, 0x0181);
7344
7345 /* Calculate RDMAC_MODE setting early, we need it to determine
7346 * the RCVLPC_STATE_ENABLE mask.
7347 */
7348 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7349 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7350 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7351 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7352 RDMAC_MODE_LNGREAD_ENAB);
7353
7354 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7356 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7357 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7358 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7359
7360 /* If statement applies to 5705 and 5750 PCI devices only */
7361 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7362 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7363 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7364 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7366 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7367 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7368 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7369 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7370 }
7371 }
7372
7373 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7374 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7375
7376 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7377 rdmac_mode |= (1 << 27);
7378
7379 /* Receive/send statistics. */
7380 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7381 val = tr32(RCVLPC_STATS_ENABLE);
7382 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7383 tw32(RCVLPC_STATS_ENABLE, val);
7384 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7385 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7386 val = tr32(RCVLPC_STATS_ENABLE);
7387 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7388 tw32(RCVLPC_STATS_ENABLE, val);
7389 } else {
7390 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7391 }
7392 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7393 tw32(SNDDATAI_STATSENAB, 0xffffff);
7394 tw32(SNDDATAI_STATSCTRL,
7395 (SNDDATAI_SCTRL_ENABLE |
7396 SNDDATAI_SCTRL_FASTUPD));
7397
7398 /* Setup host coalescing engine. */
7399 tw32(HOSTCC_MODE, 0);
7400 for (i = 0; i < 2000; i++) {
7401 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7402 break;
7403 udelay(10);
7404 }
7405
7406 __tg3_set_coalesce(tp, &tp->coal);
7407
7408 /* set status block DMA address */
7409 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7410 ((u64) tp->status_mapping >> 32));
7411 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7412 ((u64) tp->status_mapping & 0xffffffff));
7413
7414 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7415 /* Status/statistics block address. See tg3_timer,
7416 * the tg3_periodic_fetch_stats call there, and
7417 * tg3_get_stats to see how this works for 5705/5750 chips.
7418 */
7419 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7420 ((u64) tp->stats_mapping >> 32));
7421 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7422 ((u64) tp->stats_mapping & 0xffffffff));
7423 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7424 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7425 }
7426
7427 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7428
7429 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7430 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7431 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7432 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7433
7434 /* Clear statistics/status block in chip, and status block in ram. */
7435 for (i = NIC_SRAM_STATS_BLK;
7436 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7437 i += sizeof(u32)) {
7438 tg3_write_mem(tp, i, 0);
7439 udelay(40);
7440 }
7441 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7442
7443 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7444 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7445 /* reset to prevent losing 1st rx packet intermittently */
7446 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7447 udelay(10);
7448 }
7449
7450 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7451 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7452 else
7453 tp->mac_mode = 0;
7454 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7455 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7456 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7457 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7458 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7459 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7460 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7461 udelay(40);
7462
7463 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7464 * If TG3_FLG2_IS_NIC is zero, we should read the
7465 * register to preserve the GPIO settings for LOMs. The GPIOs,
7466 * whether used as inputs or outputs, are set by boot code after
7467 * reset.
7468 */
7469 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7470 u32 gpio_mask;
7471
7472 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7473 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7474 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7475
7476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7477 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7478 GRC_LCLCTRL_GPIO_OUTPUT3;
7479
7480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7481 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7482
7483 tp->grc_local_ctrl &= ~gpio_mask;
7484 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7485
7486 /* GPIO1 must be driven high for eeprom write protect */
7487 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7488 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7489 GRC_LCLCTRL_GPIO_OUTPUT1);
7490 }
7491 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7492 udelay(100);
7493
7494 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7495 tp->last_tag = 0;
7496
7497 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7498 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7499 udelay(40);
7500 }
7501
7502 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7503 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7504 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7505 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7506 WDMAC_MODE_LNGREAD_ENAB);
7507
7508 /* If statement applies to 5705 and 5750 PCI devices only */
7509 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7510 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7512 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7513 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7514 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7515 /* nothing */
7516 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7517 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7518 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7519 val |= WDMAC_MODE_RX_ACCEL;
7520 }
7521 }
7522
7523 /* Enable host coalescing bug fix */
7524 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7525 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7526 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7527 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7528 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7529 val |= WDMAC_MODE_STATUS_TAG_FIX;
7530
7531 tw32_f(WDMAC_MODE, val);
7532 udelay(40);
7533
7534 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7535 u16 pcix_cmd;
7536
7537 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7538 &pcix_cmd);
7539 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7540 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7541 pcix_cmd |= PCI_X_CMD_READ_2K;
7542 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7543 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7544 pcix_cmd |= PCI_X_CMD_READ_2K;
7545 }
7546 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7547 pcix_cmd);
7548 }
7549
7550 tw32_f(RDMAC_MODE, rdmac_mode);
7551 udelay(40);
7552
7553 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7554 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7555 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7556
7557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7558 tw32(SNDDATAC_MODE,
7559 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7560 else
7561 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7562
7563 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7564 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7565 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7566 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7567 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7568 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7569 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7570 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7571
7572 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7573 err = tg3_load_5701_a0_firmware_fix(tp);
7574 if (err)
7575 return err;
7576 }
7577
7578 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7579 err = tg3_load_tso_firmware(tp);
7580 if (err)
7581 return err;
7582 }
7583
7584 tp->tx_mode = TX_MODE_ENABLE;
7585 tw32_f(MAC_TX_MODE, tp->tx_mode);
7586 udelay(100);
7587
7588 tp->rx_mode = RX_MODE_ENABLE;
7589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7593 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7594
7595 tw32_f(MAC_RX_MODE, tp->rx_mode);
7596 udelay(10);
7597
7598 tw32(MAC_LED_CTRL, tp->led_ctrl);
7599
7600 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7601 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7602 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7603 udelay(10);
7604 }
7605 tw32_f(MAC_RX_MODE, tp->rx_mode);
7606 udelay(10);
7607
7608 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7609 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7610 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7611 /* Set drive transmission level to 1.2V */
7612 /* only if the signal pre-emphasis bit is not set */
7613 val = tr32(MAC_SERDES_CFG);
7614 val &= 0xfffff000;
7615 val |= 0x880;
7616 tw32(MAC_SERDES_CFG, val);
7617 }
7618 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7619 tw32(MAC_SERDES_CFG, 0x616000);
7620 }
7621
7622 /* Prevent chip from dropping frames when flow control
7623 * is enabled.
7624 */
7625 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7626
7627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7628 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7629 /* Use hardware link auto-negotiation */
7630 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7631 }
7632
7633 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7634 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7635 u32 tmp;
7636
7637 tmp = tr32(SERDES_RX_CTRL);
7638 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7639 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7640 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7641 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7642 }
7643
7644 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7645 if (tp->link_config.phy_is_low_power) {
7646 tp->link_config.phy_is_low_power = 0;
7647 tp->link_config.speed = tp->link_config.orig_speed;
7648 tp->link_config.duplex = tp->link_config.orig_duplex;
7649 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7650 }
7651
7652 err = tg3_setup_phy(tp, 0);
7653 if (err)
7654 return err;
7655
7656 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7657 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7658 u32 tmp;
7659
7660 /* Clear CRC stats. */
7661 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7662 tg3_writephy(tp, MII_TG3_TEST1,
7663 tmp | MII_TG3_TEST1_CRC_EN);
7664 tg3_readphy(tp, 0x14, &tmp);
7665 }
7666 }
7667 }
7668
7669 __tg3_set_rx_mode(tp->dev);
7670
7671 /* Initialize receive rules. */
7672 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7673 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7674 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7675 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7676
7677 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7678 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7679 limit = 8;
7680 else
7681 limit = 16;
7682 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7683 limit -= 4;
7684 switch (limit) {
7685 case 16:
7686 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7687 case 15:
7688 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7689 case 14:
7690 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7691 case 13:
7692 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7693 case 12:
7694 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7695 case 11:
7696 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7697 case 10:
7698 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7699 case 9:
7700 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7701 case 8:
7702 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7703 case 7:
7704 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7705 case 6:
7706 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7707 case 5:
7708 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7709 case 4:
7710 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7711 case 3:
7712 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7713 case 2:
7714 case 1:
7715
7716 default:
7717 break;
7718 }
7719
7720 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7721 /* Write our heartbeat update interval to APE. */
7722 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7723 APE_HOST_HEARTBEAT_INT_DISABLE);
7724
7725 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7726
7727 return 0;
7728 }
7729
7730 /* Called at device open time to get the chip ready for
7731 * packet processing. Invoked with tp->lock held.
7732 */
7733 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7734 {
7735 tg3_switch_clocks(tp);
7736
7737 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7738
7739 return tg3_reset_hw(tp, reset_phy);
7740 }
7741
7742 #define TG3_STAT_ADD32(PSTAT, REG) \
7743 do { u32 __val = tr32(REG); \
7744 (PSTAT)->low += __val; \
7745 if ((PSTAT)->low < __val) \
7746 (PSTAT)->high += 1; \
7747 } while (0)
7748
7749 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7750 {
7751 struct tg3_hw_stats *sp = tp->hw_stats;
7752
7753 if (!netif_carrier_ok(tp->dev))
7754 return;
7755
7756 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7757 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7758 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7759 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7760 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7761 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7762 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7763 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7764 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7765 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7766 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7767 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7768 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7769
7770 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7771 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7772 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7773 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7774 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7775 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7776 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7777 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7778 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7779 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7780 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7781 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7782 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7783 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7784
7785 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7786 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7787 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7788 }
7789
7790 static void tg3_timer(unsigned long __opaque)
7791 {
7792 struct tg3 *tp = (struct tg3 *) __opaque;
7793
7794 if (tp->irq_sync)
7795 goto restart_timer;
7796
7797 spin_lock(&tp->lock);
7798
7799 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7800 /* All of this garbage is because when using non-tagged
7801 * IRQ status the mailbox/status_block protocol the chip
7802 * uses with the cpu is race prone.
7803 */
7804 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7805 tw32(GRC_LOCAL_CTRL,
7806 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7807 } else {
7808 tw32(HOSTCC_MODE, tp->coalesce_mode |
7809 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7810 }
7811
7812 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7813 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7814 spin_unlock(&tp->lock);
7815 schedule_work(&tp->reset_task);
7816 return;
7817 }
7818 }
7819
7820 /* This part only runs once per second. */
7821 if (!--tp->timer_counter) {
7822 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7823 tg3_periodic_fetch_stats(tp);
7824
7825 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7826 u32 mac_stat;
7827 int phy_event;
7828
7829 mac_stat = tr32(MAC_STATUS);
7830
7831 phy_event = 0;
7832 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7833 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7834 phy_event = 1;
7835 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7836 phy_event = 1;
7837
7838 if (phy_event)
7839 tg3_setup_phy(tp, 0);
7840 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7841 u32 mac_stat = tr32(MAC_STATUS);
7842 int need_setup = 0;
7843
7844 if (netif_carrier_ok(tp->dev) &&
7845 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7846 need_setup = 1;
7847 }
7848 if (! netif_carrier_ok(tp->dev) &&
7849 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7850 MAC_STATUS_SIGNAL_DET))) {
7851 need_setup = 1;
7852 }
7853 if (need_setup) {
7854 if (!tp->serdes_counter) {
7855 tw32_f(MAC_MODE,
7856 (tp->mac_mode &
7857 ~MAC_MODE_PORT_MODE_MASK));
7858 udelay(40);
7859 tw32_f(MAC_MODE, tp->mac_mode);
7860 udelay(40);
7861 }
7862 tg3_setup_phy(tp, 0);
7863 }
7864 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7865 tg3_serdes_parallel_detect(tp);
7866
7867 tp->timer_counter = tp->timer_multiplier;
7868 }
7869
7870 /* Heartbeat is only sent once every 2 seconds.
7871 *
7872 * The heartbeat is to tell the ASF firmware that the host
7873 * driver is still alive. In the event that the OS crashes,
7874 * ASF needs to reset the hardware to free up the FIFO space
7875 * that may be filled with rx packets destined for the host.
7876 * If the FIFO is full, ASF will no longer function properly.
7877 *
7878 * Unintended resets have been reported on real time kernels
7879 * where the timer doesn't run on time. Netpoll will also have
7880 * same problem.
7881 *
7882 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7883 * to check the ring condition when the heartbeat is expiring
7884 * before doing the reset. This will prevent most unintended
7885 * resets.
7886 */
7887 if (!--tp->asf_counter) {
7888 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7889 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7890 tg3_wait_for_event_ack(tp);
7891
7892 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7893 FWCMD_NICDRV_ALIVE3);
7894 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7895 /* 5 seconds timeout */
7896 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7897
7898 tg3_generate_fw_event(tp);
7899 }
7900 tp->asf_counter = tp->asf_multiplier;
7901 }
7902
7903 spin_unlock(&tp->lock);
7904
7905 restart_timer:
7906 tp->timer.expires = jiffies + tp->timer_offset;
7907 add_timer(&tp->timer);
7908 }
7909
7910 static int tg3_request_irq(struct tg3 *tp)
7911 {
7912 irq_handler_t fn;
7913 unsigned long flags;
7914 struct net_device *dev = tp->dev;
7915
7916 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7917 fn = tg3_msi;
7918 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7919 fn = tg3_msi_1shot;
7920 flags = IRQF_SAMPLE_RANDOM;
7921 } else {
7922 fn = tg3_interrupt;
7923 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7924 fn = tg3_interrupt_tagged;
7925 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7926 }
7927 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7928 }
7929
7930 static int tg3_test_interrupt(struct tg3 *tp)
7931 {
7932 struct net_device *dev = tp->dev;
7933 int err, i, intr_ok = 0;
7934
7935 if (!netif_running(dev))
7936 return -ENODEV;
7937
7938 tg3_disable_ints(tp);
7939
7940 free_irq(tp->pdev->irq, dev);
7941
7942 err = request_irq(tp->pdev->irq, tg3_test_isr,
7943 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7944 if (err)
7945 return err;
7946
7947 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7948 tg3_enable_ints(tp);
7949
7950 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7951 HOSTCC_MODE_NOW);
7952
7953 for (i = 0; i < 5; i++) {
7954 u32 int_mbox, misc_host_ctrl;
7955
7956 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7957 TG3_64BIT_REG_LOW);
7958 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7959
7960 if ((int_mbox != 0) ||
7961 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7962 intr_ok = 1;
7963 break;
7964 }
7965
7966 msleep(10);
7967 }
7968
7969 tg3_disable_ints(tp);
7970
7971 free_irq(tp->pdev->irq, dev);
7972
7973 err = tg3_request_irq(tp);
7974
7975 if (err)
7976 return err;
7977
7978 if (intr_ok)
7979 return 0;
7980
7981 return -EIO;
7982 }
7983
7984 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7985 * successfully restored
7986 */
7987 static int tg3_test_msi(struct tg3 *tp)
7988 {
7989 struct net_device *dev = tp->dev;
7990 int err;
7991 u16 pci_cmd;
7992
7993 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7994 return 0;
7995
7996 /* Turn off SERR reporting in case MSI terminates with Master
7997 * Abort.
7998 */
7999 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8000 pci_write_config_word(tp->pdev, PCI_COMMAND,
8001 pci_cmd & ~PCI_COMMAND_SERR);
8002
8003 err = tg3_test_interrupt(tp);
8004
8005 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8006
8007 if (!err)
8008 return 0;
8009
8010 /* other failures */
8011 if (err != -EIO)
8012 return err;
8013
8014 /* MSI test failed, go back to INTx mode */
8015 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8016 "switching to INTx mode. Please report this failure to "
8017 "the PCI maintainer and include system chipset information.\n",
8018 tp->dev->name);
8019
8020 free_irq(tp->pdev->irq, dev);
8021 pci_disable_msi(tp->pdev);
8022
8023 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8024
8025 err = tg3_request_irq(tp);
8026 if (err)
8027 return err;
8028
8029 /* Need to reset the chip because the MSI cycle may have terminated
8030 * with Master Abort.
8031 */
8032 tg3_full_lock(tp, 1);
8033
8034 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8035 err = tg3_init_hw(tp, 1);
8036
8037 tg3_full_unlock(tp);
8038
8039 if (err)
8040 free_irq(tp->pdev->irq, dev);
8041
8042 return err;
8043 }
8044
8045 static int tg3_open(struct net_device *dev)
8046 {
8047 struct tg3 *tp = netdev_priv(dev);
8048 int err;
8049
8050 netif_carrier_off(tp->dev);
8051
8052 err = tg3_set_power_state(tp, PCI_D0);
8053 if (err)
8054 return err;
8055
8056 tg3_full_lock(tp, 0);
8057
8058 tg3_disable_ints(tp);
8059 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8060
8061 tg3_full_unlock(tp);
8062
8063 /* The placement of this call is tied
8064 * to the setup and use of Host TX descriptors.
8065 */
8066 err = tg3_alloc_consistent(tp);
8067 if (err)
8068 return err;
8069
8070 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8071 /* All MSI supporting chips should support tagged
8072 * status. Assert that this is the case.
8073 */
8074 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8075 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8076 "Not using MSI.\n", tp->dev->name);
8077 } else if (pci_enable_msi(tp->pdev) == 0) {
8078 u32 msi_mode;
8079
8080 msi_mode = tr32(MSGINT_MODE);
8081 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8082 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8083 }
8084 }
8085 err = tg3_request_irq(tp);
8086
8087 if (err) {
8088 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8089 pci_disable_msi(tp->pdev);
8090 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8091 }
8092 tg3_free_consistent(tp);
8093 return err;
8094 }
8095
8096 napi_enable(&tp->napi);
8097
8098 tg3_full_lock(tp, 0);
8099
8100 err = tg3_init_hw(tp, 1);
8101 if (err) {
8102 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8103 tg3_free_rings(tp);
8104 } else {
8105 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8106 tp->timer_offset = HZ;
8107 else
8108 tp->timer_offset = HZ / 10;
8109
8110 BUG_ON(tp->timer_offset > HZ);
8111 tp->timer_counter = tp->timer_multiplier =
8112 (HZ / tp->timer_offset);
8113 tp->asf_counter = tp->asf_multiplier =
8114 ((HZ / tp->timer_offset) * 2);
8115
8116 init_timer(&tp->timer);
8117 tp->timer.expires = jiffies + tp->timer_offset;
8118 tp->timer.data = (unsigned long) tp;
8119 tp->timer.function = tg3_timer;
8120 }
8121
8122 tg3_full_unlock(tp);
8123
8124 if (err) {
8125 napi_disable(&tp->napi);
8126 free_irq(tp->pdev->irq, dev);
8127 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8128 pci_disable_msi(tp->pdev);
8129 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8130 }
8131 tg3_free_consistent(tp);
8132 return err;
8133 }
8134
8135 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8136 err = tg3_test_msi(tp);
8137
8138 if (err) {
8139 tg3_full_lock(tp, 0);
8140
8141 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8142 pci_disable_msi(tp->pdev);
8143 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8144 }
8145 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8146 tg3_free_rings(tp);
8147 tg3_free_consistent(tp);
8148
8149 tg3_full_unlock(tp);
8150
8151 napi_disable(&tp->napi);
8152
8153 return err;
8154 }
8155
8156 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8157 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8158 u32 val = tr32(PCIE_TRANSACTION_CFG);
8159
8160 tw32(PCIE_TRANSACTION_CFG,
8161 val | PCIE_TRANS_CFG_1SHOT_MSI);
8162 }
8163 }
8164 }
8165
8166 tg3_phy_start(tp);
8167
8168 tg3_full_lock(tp, 0);
8169
8170 add_timer(&tp->timer);
8171 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8172 tg3_enable_ints(tp);
8173
8174 tg3_full_unlock(tp);
8175
8176 netif_start_queue(dev);
8177
8178 return 0;
8179 }
8180
8181 #if 0
8182 /*static*/ void tg3_dump_state(struct tg3 *tp)
8183 {
8184 u32 val32, val32_2, val32_3, val32_4, val32_5;
8185 u16 val16;
8186 int i;
8187
8188 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8189 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8190 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8191 val16, val32);
8192
8193 /* MAC block */
8194 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8195 tr32(MAC_MODE), tr32(MAC_STATUS));
8196 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8197 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8198 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8199 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8200 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8201 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8202
8203 /* Send data initiator control block */
8204 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8205 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8206 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8207 tr32(SNDDATAI_STATSCTRL));
8208
8209 /* Send data completion control block */
8210 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8211
8212 /* Send BD ring selector block */
8213 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8214 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8215
8216 /* Send BD initiator control block */
8217 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8218 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8219
8220 /* Send BD completion control block */
8221 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8222
8223 /* Receive list placement control block */
8224 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8225 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8226 printk(" RCVLPC_STATSCTRL[%08x]\n",
8227 tr32(RCVLPC_STATSCTRL));
8228
8229 /* Receive data and receive BD initiator control block */
8230 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8231 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8232
8233 /* Receive data completion control block */
8234 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8235 tr32(RCVDCC_MODE));
8236
8237 /* Receive BD initiator control block */
8238 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8239 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8240
8241 /* Receive BD completion control block */
8242 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8243 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8244
8245 /* Receive list selector control block */
8246 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8247 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8248
8249 /* Mbuf cluster free block */
8250 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8251 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8252
8253 /* Host coalescing control block */
8254 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8255 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8256 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8257 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8258 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8259 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8260 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8261 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8262 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8263 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8264 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8265 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8266
8267 /* Memory arbiter control block */
8268 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8269 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8270
8271 /* Buffer manager control block */
8272 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8273 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8274 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8275 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8276 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8277 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8278 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8279 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8280
8281 /* Read DMA control block */
8282 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8283 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8284
8285 /* Write DMA control block */
8286 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8287 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8288
8289 /* DMA completion block */
8290 printk("DEBUG: DMAC_MODE[%08x]\n",
8291 tr32(DMAC_MODE));
8292
8293 /* GRC block */
8294 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8295 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8296 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8297 tr32(GRC_LOCAL_CTRL));
8298
8299 /* TG3_BDINFOs */
8300 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8301 tr32(RCVDBDI_JUMBO_BD + 0x0),
8302 tr32(RCVDBDI_JUMBO_BD + 0x4),
8303 tr32(RCVDBDI_JUMBO_BD + 0x8),
8304 tr32(RCVDBDI_JUMBO_BD + 0xc));
8305 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8306 tr32(RCVDBDI_STD_BD + 0x0),
8307 tr32(RCVDBDI_STD_BD + 0x4),
8308 tr32(RCVDBDI_STD_BD + 0x8),
8309 tr32(RCVDBDI_STD_BD + 0xc));
8310 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8311 tr32(RCVDBDI_MINI_BD + 0x0),
8312 tr32(RCVDBDI_MINI_BD + 0x4),
8313 tr32(RCVDBDI_MINI_BD + 0x8),
8314 tr32(RCVDBDI_MINI_BD + 0xc));
8315
8316 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8317 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8318 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8319 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8320 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8321 val32, val32_2, val32_3, val32_4);
8322
8323 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8324 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8325 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8326 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8327 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8328 val32, val32_2, val32_3, val32_4);
8329
8330 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8331 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8332 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8333 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8334 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8335 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8336 val32, val32_2, val32_3, val32_4, val32_5);
8337
8338 /* SW status block */
8339 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8340 tp->hw_status->status,
8341 tp->hw_status->status_tag,
8342 tp->hw_status->rx_jumbo_consumer,
8343 tp->hw_status->rx_consumer,
8344 tp->hw_status->rx_mini_consumer,
8345 tp->hw_status->idx[0].rx_producer,
8346 tp->hw_status->idx[0].tx_consumer);
8347
8348 /* SW statistics block */
8349 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8350 ((u32 *)tp->hw_stats)[0],
8351 ((u32 *)tp->hw_stats)[1],
8352 ((u32 *)tp->hw_stats)[2],
8353 ((u32 *)tp->hw_stats)[3]);
8354
8355 /* Mailboxes */
8356 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8357 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8358 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8359 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8360 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8361
8362 /* NIC side send descriptors. */
8363 for (i = 0; i < 6; i++) {
8364 unsigned long txd;
8365
8366 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8367 + (i * sizeof(struct tg3_tx_buffer_desc));
8368 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8369 i,
8370 readl(txd + 0x0), readl(txd + 0x4),
8371 readl(txd + 0x8), readl(txd + 0xc));
8372 }
8373
8374 /* NIC side RX descriptors. */
8375 for (i = 0; i < 6; i++) {
8376 unsigned long rxd;
8377
8378 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8379 + (i * sizeof(struct tg3_rx_buffer_desc));
8380 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8381 i,
8382 readl(rxd + 0x0), readl(rxd + 0x4),
8383 readl(rxd + 0x8), readl(rxd + 0xc));
8384 rxd += (4 * sizeof(u32));
8385 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8386 i,
8387 readl(rxd + 0x0), readl(rxd + 0x4),
8388 readl(rxd + 0x8), readl(rxd + 0xc));
8389 }
8390
8391 for (i = 0; i < 6; i++) {
8392 unsigned long rxd;
8393
8394 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8395 + (i * sizeof(struct tg3_rx_buffer_desc));
8396 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8397 i,
8398 readl(rxd + 0x0), readl(rxd + 0x4),
8399 readl(rxd + 0x8), readl(rxd + 0xc));
8400 rxd += (4 * sizeof(u32));
8401 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8402 i,
8403 readl(rxd + 0x0), readl(rxd + 0x4),
8404 readl(rxd + 0x8), readl(rxd + 0xc));
8405 }
8406 }
8407 #endif
8408
8409 static struct net_device_stats *tg3_get_stats(struct net_device *);
8410 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8411
8412 static int tg3_close(struct net_device *dev)
8413 {
8414 struct tg3 *tp = netdev_priv(dev);
8415
8416 napi_disable(&tp->napi);
8417 cancel_work_sync(&tp->reset_task);
8418
8419 netif_stop_queue(dev);
8420
8421 del_timer_sync(&tp->timer);
8422
8423 tg3_full_lock(tp, 1);
8424 #if 0
8425 tg3_dump_state(tp);
8426 #endif
8427
8428 tg3_disable_ints(tp);
8429
8430 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8431 tg3_free_rings(tp);
8432 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8433
8434 tg3_full_unlock(tp);
8435
8436 free_irq(tp->pdev->irq, dev);
8437 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8438 pci_disable_msi(tp->pdev);
8439 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8440 }
8441
8442 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8443 sizeof(tp->net_stats_prev));
8444 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8445 sizeof(tp->estats_prev));
8446
8447 tg3_free_consistent(tp);
8448
8449 tg3_set_power_state(tp, PCI_D3hot);
8450
8451 netif_carrier_off(tp->dev);
8452
8453 return 0;
8454 }
8455
8456 static inline unsigned long get_stat64(tg3_stat64_t *val)
8457 {
8458 unsigned long ret;
8459
8460 #if (BITS_PER_LONG == 32)
8461 ret = val->low;
8462 #else
8463 ret = ((u64)val->high << 32) | ((u64)val->low);
8464 #endif
8465 return ret;
8466 }
8467
8468 static inline u64 get_estat64(tg3_stat64_t *val)
8469 {
8470 return ((u64)val->high << 32) | ((u64)val->low);
8471 }
8472
8473 static unsigned long calc_crc_errors(struct tg3 *tp)
8474 {
8475 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8476
8477 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8478 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8480 u32 val;
8481
8482 spin_lock_bh(&tp->lock);
8483 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8484 tg3_writephy(tp, MII_TG3_TEST1,
8485 val | MII_TG3_TEST1_CRC_EN);
8486 tg3_readphy(tp, 0x14, &val);
8487 } else
8488 val = 0;
8489 spin_unlock_bh(&tp->lock);
8490
8491 tp->phy_crc_errors += val;
8492
8493 return tp->phy_crc_errors;
8494 }
8495
8496 return get_stat64(&hw_stats->rx_fcs_errors);
8497 }
8498
8499 #define ESTAT_ADD(member) \
8500 estats->member = old_estats->member + \
8501 get_estat64(&hw_stats->member)
8502
8503 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8504 {
8505 struct tg3_ethtool_stats *estats = &tp->estats;
8506 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8507 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8508
8509 if (!hw_stats)
8510 return old_estats;
8511
8512 ESTAT_ADD(rx_octets);
8513 ESTAT_ADD(rx_fragments);
8514 ESTAT_ADD(rx_ucast_packets);
8515 ESTAT_ADD(rx_mcast_packets);
8516 ESTAT_ADD(rx_bcast_packets);
8517 ESTAT_ADD(rx_fcs_errors);
8518 ESTAT_ADD(rx_align_errors);
8519 ESTAT_ADD(rx_xon_pause_rcvd);
8520 ESTAT_ADD(rx_xoff_pause_rcvd);
8521 ESTAT_ADD(rx_mac_ctrl_rcvd);
8522 ESTAT_ADD(rx_xoff_entered);
8523 ESTAT_ADD(rx_frame_too_long_errors);
8524 ESTAT_ADD(rx_jabbers);
8525 ESTAT_ADD(rx_undersize_packets);
8526 ESTAT_ADD(rx_in_length_errors);
8527 ESTAT_ADD(rx_out_length_errors);
8528 ESTAT_ADD(rx_64_or_less_octet_packets);
8529 ESTAT_ADD(rx_65_to_127_octet_packets);
8530 ESTAT_ADD(rx_128_to_255_octet_packets);
8531 ESTAT_ADD(rx_256_to_511_octet_packets);
8532 ESTAT_ADD(rx_512_to_1023_octet_packets);
8533 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8534 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8535 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8536 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8537 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8538
8539 ESTAT_ADD(tx_octets);
8540 ESTAT_ADD(tx_collisions);
8541 ESTAT_ADD(tx_xon_sent);
8542 ESTAT_ADD(tx_xoff_sent);
8543 ESTAT_ADD(tx_flow_control);
8544 ESTAT_ADD(tx_mac_errors);
8545 ESTAT_ADD(tx_single_collisions);
8546 ESTAT_ADD(tx_mult_collisions);
8547 ESTAT_ADD(tx_deferred);
8548 ESTAT_ADD(tx_excessive_collisions);
8549 ESTAT_ADD(tx_late_collisions);
8550 ESTAT_ADD(tx_collide_2times);
8551 ESTAT_ADD(tx_collide_3times);
8552 ESTAT_ADD(tx_collide_4times);
8553 ESTAT_ADD(tx_collide_5times);
8554 ESTAT_ADD(tx_collide_6times);
8555 ESTAT_ADD(tx_collide_7times);
8556 ESTAT_ADD(tx_collide_8times);
8557 ESTAT_ADD(tx_collide_9times);
8558 ESTAT_ADD(tx_collide_10times);
8559 ESTAT_ADD(tx_collide_11times);
8560 ESTAT_ADD(tx_collide_12times);
8561 ESTAT_ADD(tx_collide_13times);
8562 ESTAT_ADD(tx_collide_14times);
8563 ESTAT_ADD(tx_collide_15times);
8564 ESTAT_ADD(tx_ucast_packets);
8565 ESTAT_ADD(tx_mcast_packets);
8566 ESTAT_ADD(tx_bcast_packets);
8567 ESTAT_ADD(tx_carrier_sense_errors);
8568 ESTAT_ADD(tx_discards);
8569 ESTAT_ADD(tx_errors);
8570
8571 ESTAT_ADD(dma_writeq_full);
8572 ESTAT_ADD(dma_write_prioq_full);
8573 ESTAT_ADD(rxbds_empty);
8574 ESTAT_ADD(rx_discards);
8575 ESTAT_ADD(rx_errors);
8576 ESTAT_ADD(rx_threshold_hit);
8577
8578 ESTAT_ADD(dma_readq_full);
8579 ESTAT_ADD(dma_read_prioq_full);
8580 ESTAT_ADD(tx_comp_queue_full);
8581
8582 ESTAT_ADD(ring_set_send_prod_index);
8583 ESTAT_ADD(ring_status_update);
8584 ESTAT_ADD(nic_irqs);
8585 ESTAT_ADD(nic_avoided_irqs);
8586 ESTAT_ADD(nic_tx_threshold_hit);
8587
8588 return estats;
8589 }
8590
8591 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8592 {
8593 struct tg3 *tp = netdev_priv(dev);
8594 struct net_device_stats *stats = &tp->net_stats;
8595 struct net_device_stats *old_stats = &tp->net_stats_prev;
8596 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8597
8598 if (!hw_stats)
8599 return old_stats;
8600
8601 stats->rx_packets = old_stats->rx_packets +
8602 get_stat64(&hw_stats->rx_ucast_packets) +
8603 get_stat64(&hw_stats->rx_mcast_packets) +
8604 get_stat64(&hw_stats->rx_bcast_packets);
8605
8606 stats->tx_packets = old_stats->tx_packets +
8607 get_stat64(&hw_stats->tx_ucast_packets) +
8608 get_stat64(&hw_stats->tx_mcast_packets) +
8609 get_stat64(&hw_stats->tx_bcast_packets);
8610
8611 stats->rx_bytes = old_stats->rx_bytes +
8612 get_stat64(&hw_stats->rx_octets);
8613 stats->tx_bytes = old_stats->tx_bytes +
8614 get_stat64(&hw_stats->tx_octets);
8615
8616 stats->rx_errors = old_stats->rx_errors +
8617 get_stat64(&hw_stats->rx_errors);
8618 stats->tx_errors = old_stats->tx_errors +
8619 get_stat64(&hw_stats->tx_errors) +
8620 get_stat64(&hw_stats->tx_mac_errors) +
8621 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8622 get_stat64(&hw_stats->tx_discards);
8623
8624 stats->multicast = old_stats->multicast +
8625 get_stat64(&hw_stats->rx_mcast_packets);
8626 stats->collisions = old_stats->collisions +
8627 get_stat64(&hw_stats->tx_collisions);
8628
8629 stats->rx_length_errors = old_stats->rx_length_errors +
8630 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8631 get_stat64(&hw_stats->rx_undersize_packets);
8632
8633 stats->rx_over_errors = old_stats->rx_over_errors +
8634 get_stat64(&hw_stats->rxbds_empty);
8635 stats->rx_frame_errors = old_stats->rx_frame_errors +
8636 get_stat64(&hw_stats->rx_align_errors);
8637 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8638 get_stat64(&hw_stats->tx_discards);
8639 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8640 get_stat64(&hw_stats->tx_carrier_sense_errors);
8641
8642 stats->rx_crc_errors = old_stats->rx_crc_errors +
8643 calc_crc_errors(tp);
8644
8645 stats->rx_missed_errors = old_stats->rx_missed_errors +
8646 get_stat64(&hw_stats->rx_discards);
8647
8648 return stats;
8649 }
8650
8651 static inline u32 calc_crc(unsigned char *buf, int len)
8652 {
8653 u32 reg;
8654 u32 tmp;
8655 int j, k;
8656
8657 reg = 0xffffffff;
8658
8659 for (j = 0; j < len; j++) {
8660 reg ^= buf[j];
8661
8662 for (k = 0; k < 8; k++) {
8663 tmp = reg & 0x01;
8664
8665 reg >>= 1;
8666
8667 if (tmp) {
8668 reg ^= 0xedb88320;
8669 }
8670 }
8671 }
8672
8673 return ~reg;
8674 }
8675
8676 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8677 {
8678 /* accept or reject all multicast frames */
8679 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8680 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8681 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8682 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8683 }
8684
8685 static void __tg3_set_rx_mode(struct net_device *dev)
8686 {
8687 struct tg3 *tp = netdev_priv(dev);
8688 u32 rx_mode;
8689
8690 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8691 RX_MODE_KEEP_VLAN_TAG);
8692
8693 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8694 * flag clear.
8695 */
8696 #if TG3_VLAN_TAG_USED
8697 if (!tp->vlgrp &&
8698 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8699 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8700 #else
8701 /* By definition, VLAN is disabled always in this
8702 * case.
8703 */
8704 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8705 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8706 #endif
8707
8708 if (dev->flags & IFF_PROMISC) {
8709 /* Promiscuous mode. */
8710 rx_mode |= RX_MODE_PROMISC;
8711 } else if (dev->flags & IFF_ALLMULTI) {
8712 /* Accept all multicast. */
8713 tg3_set_multi (tp, 1);
8714 } else if (dev->mc_count < 1) {
8715 /* Reject all multicast. */
8716 tg3_set_multi (tp, 0);
8717 } else {
8718 /* Accept one or more multicast(s). */
8719 struct dev_mc_list *mclist;
8720 unsigned int i;
8721 u32 mc_filter[4] = { 0, };
8722 u32 regidx;
8723 u32 bit;
8724 u32 crc;
8725
8726 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8727 i++, mclist = mclist->next) {
8728
8729 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8730 bit = ~crc & 0x7f;
8731 regidx = (bit & 0x60) >> 5;
8732 bit &= 0x1f;
8733 mc_filter[regidx] |= (1 << bit);
8734 }
8735
8736 tw32(MAC_HASH_REG_0, mc_filter[0]);
8737 tw32(MAC_HASH_REG_1, mc_filter[1]);
8738 tw32(MAC_HASH_REG_2, mc_filter[2]);
8739 tw32(MAC_HASH_REG_3, mc_filter[3]);
8740 }
8741
8742 if (rx_mode != tp->rx_mode) {
8743 tp->rx_mode = rx_mode;
8744 tw32_f(MAC_RX_MODE, rx_mode);
8745 udelay(10);
8746 }
8747 }
8748
8749 static void tg3_set_rx_mode(struct net_device *dev)
8750 {
8751 struct tg3 *tp = netdev_priv(dev);
8752
8753 if (!netif_running(dev))
8754 return;
8755
8756 tg3_full_lock(tp, 0);
8757 __tg3_set_rx_mode(dev);
8758 tg3_full_unlock(tp);
8759 }
8760
8761 #define TG3_REGDUMP_LEN (32 * 1024)
8762
8763 static int tg3_get_regs_len(struct net_device *dev)
8764 {
8765 return TG3_REGDUMP_LEN;
8766 }
8767
8768 static void tg3_get_regs(struct net_device *dev,
8769 struct ethtool_regs *regs, void *_p)
8770 {
8771 u32 *p = _p;
8772 struct tg3 *tp = netdev_priv(dev);
8773 u8 *orig_p = _p;
8774 int i;
8775
8776 regs->version = 0;
8777
8778 memset(p, 0, TG3_REGDUMP_LEN);
8779
8780 if (tp->link_config.phy_is_low_power)
8781 return;
8782
8783 tg3_full_lock(tp, 0);
8784
8785 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8786 #define GET_REG32_LOOP(base,len) \
8787 do { p = (u32 *)(orig_p + (base)); \
8788 for (i = 0; i < len; i += 4) \
8789 __GET_REG32((base) + i); \
8790 } while (0)
8791 #define GET_REG32_1(reg) \
8792 do { p = (u32 *)(orig_p + (reg)); \
8793 __GET_REG32((reg)); \
8794 } while (0)
8795
8796 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8797 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8798 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8799 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8800 GET_REG32_1(SNDDATAC_MODE);
8801 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8802 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8803 GET_REG32_1(SNDBDC_MODE);
8804 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8805 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8806 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8807 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8808 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8809 GET_REG32_1(RCVDCC_MODE);
8810 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8811 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8812 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8813 GET_REG32_1(MBFREE_MODE);
8814 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8815 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8816 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8817 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8818 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8819 GET_REG32_1(RX_CPU_MODE);
8820 GET_REG32_1(RX_CPU_STATE);
8821 GET_REG32_1(RX_CPU_PGMCTR);
8822 GET_REG32_1(RX_CPU_HWBKPT);
8823 GET_REG32_1(TX_CPU_MODE);
8824 GET_REG32_1(TX_CPU_STATE);
8825 GET_REG32_1(TX_CPU_PGMCTR);
8826 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8827 GET_REG32_LOOP(FTQ_RESET, 0x120);
8828 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8829 GET_REG32_1(DMAC_MODE);
8830 GET_REG32_LOOP(GRC_MODE, 0x4c);
8831 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8832 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8833
8834 #undef __GET_REG32
8835 #undef GET_REG32_LOOP
8836 #undef GET_REG32_1
8837
8838 tg3_full_unlock(tp);
8839 }
8840
8841 static int tg3_get_eeprom_len(struct net_device *dev)
8842 {
8843 struct tg3 *tp = netdev_priv(dev);
8844
8845 return tp->nvram_size;
8846 }
8847
8848 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8849 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8850 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8851
8852 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8853 {
8854 struct tg3 *tp = netdev_priv(dev);
8855 int ret;
8856 u8 *pd;
8857 u32 i, offset, len, b_offset, b_count;
8858 __le32 val;
8859
8860 if (tp->link_config.phy_is_low_power)
8861 return -EAGAIN;
8862
8863 offset = eeprom->offset;
8864 len = eeprom->len;
8865 eeprom->len = 0;
8866
8867 eeprom->magic = TG3_EEPROM_MAGIC;
8868
8869 if (offset & 3) {
8870 /* adjustments to start on required 4 byte boundary */
8871 b_offset = offset & 3;
8872 b_count = 4 - b_offset;
8873 if (b_count > len) {
8874 /* i.e. offset=1 len=2 */
8875 b_count = len;
8876 }
8877 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8878 if (ret)
8879 return ret;
8880 memcpy(data, ((char*)&val) + b_offset, b_count);
8881 len -= b_count;
8882 offset += b_count;
8883 eeprom->len += b_count;
8884 }
8885
8886 /* read bytes upto the last 4 byte boundary */
8887 pd = &data[eeprom->len];
8888 for (i = 0; i < (len - (len & 3)); i += 4) {
8889 ret = tg3_nvram_read_le(tp, offset + i, &val);
8890 if (ret) {
8891 eeprom->len += i;
8892 return ret;
8893 }
8894 memcpy(pd + i, &val, 4);
8895 }
8896 eeprom->len += i;
8897
8898 if (len & 3) {
8899 /* read last bytes not ending on 4 byte boundary */
8900 pd = &data[eeprom->len];
8901 b_count = len & 3;
8902 b_offset = offset + len - b_count;
8903 ret = tg3_nvram_read_le(tp, b_offset, &val);
8904 if (ret)
8905 return ret;
8906 memcpy(pd, &val, b_count);
8907 eeprom->len += b_count;
8908 }
8909 return 0;
8910 }
8911
8912 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8913
8914 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8915 {
8916 struct tg3 *tp = netdev_priv(dev);
8917 int ret;
8918 u32 offset, len, b_offset, odd_len;
8919 u8 *buf;
8920 __le32 start, end;
8921
8922 if (tp->link_config.phy_is_low_power)
8923 return -EAGAIN;
8924
8925 if (eeprom->magic != TG3_EEPROM_MAGIC)
8926 return -EINVAL;
8927
8928 offset = eeprom->offset;
8929 len = eeprom->len;
8930
8931 if ((b_offset = (offset & 3))) {
8932 /* adjustments to start on required 4 byte boundary */
8933 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8934 if (ret)
8935 return ret;
8936 len += b_offset;
8937 offset &= ~3;
8938 if (len < 4)
8939 len = 4;
8940 }
8941
8942 odd_len = 0;
8943 if (len & 3) {
8944 /* adjustments to end on required 4 byte boundary */
8945 odd_len = 1;
8946 len = (len + 3) & ~3;
8947 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8948 if (ret)
8949 return ret;
8950 }
8951
8952 buf = data;
8953 if (b_offset || odd_len) {
8954 buf = kmalloc(len, GFP_KERNEL);
8955 if (!buf)
8956 return -ENOMEM;
8957 if (b_offset)
8958 memcpy(buf, &start, 4);
8959 if (odd_len)
8960 memcpy(buf+len-4, &end, 4);
8961 memcpy(buf + b_offset, data, eeprom->len);
8962 }
8963
8964 ret = tg3_nvram_write_block(tp, offset, len, buf);
8965
8966 if (buf != data)
8967 kfree(buf);
8968
8969 return ret;
8970 }
8971
8972 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8973 {
8974 struct tg3 *tp = netdev_priv(dev);
8975
8976 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8977 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8978 return -EAGAIN;
8979 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8980 }
8981
8982 cmd->supported = (SUPPORTED_Autoneg);
8983
8984 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8985 cmd->supported |= (SUPPORTED_1000baseT_Half |
8986 SUPPORTED_1000baseT_Full);
8987
8988 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8989 cmd->supported |= (SUPPORTED_100baseT_Half |
8990 SUPPORTED_100baseT_Full |
8991 SUPPORTED_10baseT_Half |
8992 SUPPORTED_10baseT_Full |
8993 SUPPORTED_TP);
8994 cmd->port = PORT_TP;
8995 } else {
8996 cmd->supported |= SUPPORTED_FIBRE;
8997 cmd->port = PORT_FIBRE;
8998 }
8999
9000 cmd->advertising = tp->link_config.advertising;
9001 if (netif_running(dev)) {
9002 cmd->speed = tp->link_config.active_speed;
9003 cmd->duplex = tp->link_config.active_duplex;
9004 }
9005 cmd->phy_address = PHY_ADDR;
9006 cmd->transceiver = 0;
9007 cmd->autoneg = tp->link_config.autoneg;
9008 cmd->maxtxpkt = 0;
9009 cmd->maxrxpkt = 0;
9010 return 0;
9011 }
9012
9013 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9014 {
9015 struct tg3 *tp = netdev_priv(dev);
9016
9017 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9018 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9019 return -EAGAIN;
9020 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9021 }
9022
9023 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9024 /* These are the only valid advertisement bits allowed. */
9025 if (cmd->autoneg == AUTONEG_ENABLE &&
9026 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9027 ADVERTISED_1000baseT_Full |
9028 ADVERTISED_Autoneg |
9029 ADVERTISED_FIBRE)))
9030 return -EINVAL;
9031 /* Fiber can only do SPEED_1000. */
9032 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9033 (cmd->speed != SPEED_1000))
9034 return -EINVAL;
9035 /* Copper cannot force SPEED_1000. */
9036 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9037 (cmd->speed == SPEED_1000))
9038 return -EINVAL;
9039 else if ((cmd->speed == SPEED_1000) &&
9040 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9041 return -EINVAL;
9042
9043 tg3_full_lock(tp, 0);
9044
9045 tp->link_config.autoneg = cmd->autoneg;
9046 if (cmd->autoneg == AUTONEG_ENABLE) {
9047 tp->link_config.advertising = (cmd->advertising |
9048 ADVERTISED_Autoneg);
9049 tp->link_config.speed = SPEED_INVALID;
9050 tp->link_config.duplex = DUPLEX_INVALID;
9051 } else {
9052 tp->link_config.advertising = 0;
9053 tp->link_config.speed = cmd->speed;
9054 tp->link_config.duplex = cmd->duplex;
9055 }
9056
9057 tp->link_config.orig_speed = tp->link_config.speed;
9058 tp->link_config.orig_duplex = tp->link_config.duplex;
9059 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9060
9061 if (netif_running(dev))
9062 tg3_setup_phy(tp, 1);
9063
9064 tg3_full_unlock(tp);
9065
9066 return 0;
9067 }
9068
9069 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9070 {
9071 struct tg3 *tp = netdev_priv(dev);
9072
9073 strcpy(info->driver, DRV_MODULE_NAME);
9074 strcpy(info->version, DRV_MODULE_VERSION);
9075 strcpy(info->fw_version, tp->fw_ver);
9076 strcpy(info->bus_info, pci_name(tp->pdev));
9077 }
9078
9079 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9080 {
9081 struct tg3 *tp = netdev_priv(dev);
9082
9083 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9084 device_can_wakeup(&tp->pdev->dev))
9085 wol->supported = WAKE_MAGIC;
9086 else
9087 wol->supported = 0;
9088 wol->wolopts = 0;
9089 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9090 wol->wolopts = WAKE_MAGIC;
9091 memset(&wol->sopass, 0, sizeof(wol->sopass));
9092 }
9093
9094 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9095 {
9096 struct tg3 *tp = netdev_priv(dev);
9097 struct device *dp = &tp->pdev->dev;
9098
9099 if (wol->wolopts & ~WAKE_MAGIC)
9100 return -EINVAL;
9101 if ((wol->wolopts & WAKE_MAGIC) &&
9102 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9103 return -EINVAL;
9104
9105 spin_lock_bh(&tp->lock);
9106 if (wol->wolopts & WAKE_MAGIC) {
9107 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9108 device_set_wakeup_enable(dp, true);
9109 } else {
9110 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9111 device_set_wakeup_enable(dp, false);
9112 }
9113 spin_unlock_bh(&tp->lock);
9114
9115 return 0;
9116 }
9117
9118 static u32 tg3_get_msglevel(struct net_device *dev)
9119 {
9120 struct tg3 *tp = netdev_priv(dev);
9121 return tp->msg_enable;
9122 }
9123
9124 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9125 {
9126 struct tg3 *tp = netdev_priv(dev);
9127 tp->msg_enable = value;
9128 }
9129
9130 static int tg3_set_tso(struct net_device *dev, u32 value)
9131 {
9132 struct tg3 *tp = netdev_priv(dev);
9133
9134 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9135 if (value)
9136 return -EINVAL;
9137 return 0;
9138 }
9139 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9140 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9141 if (value) {
9142 dev->features |= NETIF_F_TSO6;
9143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9144 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9145 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9147 dev->features |= NETIF_F_TSO_ECN;
9148 } else
9149 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9150 }
9151 return ethtool_op_set_tso(dev, value);
9152 }
9153
9154 static int tg3_nway_reset(struct net_device *dev)
9155 {
9156 struct tg3 *tp = netdev_priv(dev);
9157 int r;
9158
9159 if (!netif_running(dev))
9160 return -EAGAIN;
9161
9162 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9163 return -EINVAL;
9164
9165 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9166 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9167 return -EAGAIN;
9168 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9169 } else {
9170 u32 bmcr;
9171
9172 spin_lock_bh(&tp->lock);
9173 r = -EINVAL;
9174 tg3_readphy(tp, MII_BMCR, &bmcr);
9175 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9176 ((bmcr & BMCR_ANENABLE) ||
9177 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9178 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9179 BMCR_ANENABLE);
9180 r = 0;
9181 }
9182 spin_unlock_bh(&tp->lock);
9183 }
9184
9185 return r;
9186 }
9187
9188 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9189 {
9190 struct tg3 *tp = netdev_priv(dev);
9191
9192 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9193 ering->rx_mini_max_pending = 0;
9194 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9195 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9196 else
9197 ering->rx_jumbo_max_pending = 0;
9198
9199 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9200
9201 ering->rx_pending = tp->rx_pending;
9202 ering->rx_mini_pending = 0;
9203 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9204 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9205 else
9206 ering->rx_jumbo_pending = 0;
9207
9208 ering->tx_pending = tp->tx_pending;
9209 }
9210
9211 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9212 {
9213 struct tg3 *tp = netdev_priv(dev);
9214 int irq_sync = 0, err = 0;
9215
9216 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9217 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9218 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9219 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9220 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9221 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9222 return -EINVAL;
9223
9224 if (netif_running(dev)) {
9225 tg3_phy_stop(tp);
9226 tg3_netif_stop(tp);
9227 irq_sync = 1;
9228 }
9229
9230 tg3_full_lock(tp, irq_sync);
9231
9232 tp->rx_pending = ering->rx_pending;
9233
9234 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9235 tp->rx_pending > 63)
9236 tp->rx_pending = 63;
9237 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9238 tp->tx_pending = ering->tx_pending;
9239
9240 if (netif_running(dev)) {
9241 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9242 err = tg3_restart_hw(tp, 1);
9243 if (!err)
9244 tg3_netif_start(tp);
9245 }
9246
9247 tg3_full_unlock(tp);
9248
9249 if (irq_sync && !err)
9250 tg3_phy_start(tp);
9251
9252 return err;
9253 }
9254
9255 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9256 {
9257 struct tg3 *tp = netdev_priv(dev);
9258
9259 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9260
9261 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9262 epause->rx_pause = 1;
9263 else
9264 epause->rx_pause = 0;
9265
9266 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9267 epause->tx_pause = 1;
9268 else
9269 epause->tx_pause = 0;
9270 }
9271
9272 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9273 {
9274 struct tg3 *tp = netdev_priv(dev);
9275 int err = 0;
9276
9277 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9278 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9279 return -EAGAIN;
9280
9281 if (epause->autoneg) {
9282 u32 newadv;
9283 struct phy_device *phydev;
9284
9285 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9286
9287 if (epause->rx_pause) {
9288 if (epause->tx_pause)
9289 newadv = ADVERTISED_Pause;
9290 else
9291 newadv = ADVERTISED_Pause |
9292 ADVERTISED_Asym_Pause;
9293 } else if (epause->tx_pause) {
9294 newadv = ADVERTISED_Asym_Pause;
9295 } else
9296 newadv = 0;
9297
9298 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9299 u32 oldadv = phydev->advertising &
9300 (ADVERTISED_Pause |
9301 ADVERTISED_Asym_Pause);
9302 if (oldadv != newadv) {
9303 phydev->advertising &=
9304 ~(ADVERTISED_Pause |
9305 ADVERTISED_Asym_Pause);
9306 phydev->advertising |= newadv;
9307 err = phy_start_aneg(phydev);
9308 }
9309 } else {
9310 tp->link_config.advertising &=
9311 ~(ADVERTISED_Pause |
9312 ADVERTISED_Asym_Pause);
9313 tp->link_config.advertising |= newadv;
9314 }
9315 } else {
9316 if (epause->rx_pause)
9317 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9318 else
9319 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9320
9321 if (epause->tx_pause)
9322 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9323 else
9324 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9325
9326 if (netif_running(dev))
9327 tg3_setup_flow_control(tp, 0, 0);
9328 }
9329 } else {
9330 int irq_sync = 0;
9331
9332 if (netif_running(dev)) {
9333 tg3_netif_stop(tp);
9334 irq_sync = 1;
9335 }
9336
9337 tg3_full_lock(tp, irq_sync);
9338
9339 if (epause->autoneg)
9340 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9341 else
9342 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9343 if (epause->rx_pause)
9344 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9345 else
9346 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9347 if (epause->tx_pause)
9348 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9349 else
9350 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9351
9352 if (netif_running(dev)) {
9353 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9354 err = tg3_restart_hw(tp, 1);
9355 if (!err)
9356 tg3_netif_start(tp);
9357 }
9358
9359 tg3_full_unlock(tp);
9360 }
9361
9362 return err;
9363 }
9364
9365 static u32 tg3_get_rx_csum(struct net_device *dev)
9366 {
9367 struct tg3 *tp = netdev_priv(dev);
9368 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9369 }
9370
9371 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9372 {
9373 struct tg3 *tp = netdev_priv(dev);
9374
9375 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9376 if (data != 0)
9377 return -EINVAL;
9378 return 0;
9379 }
9380
9381 spin_lock_bh(&tp->lock);
9382 if (data)
9383 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9384 else
9385 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9386 spin_unlock_bh(&tp->lock);
9387
9388 return 0;
9389 }
9390
9391 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9392 {
9393 struct tg3 *tp = netdev_priv(dev);
9394
9395 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9396 if (data != 0)
9397 return -EINVAL;
9398 return 0;
9399 }
9400
9401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9405 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9406 ethtool_op_set_tx_ipv6_csum(dev, data);
9407 else
9408 ethtool_op_set_tx_csum(dev, data);
9409
9410 return 0;
9411 }
9412
9413 static int tg3_get_sset_count (struct net_device *dev, int sset)
9414 {
9415 switch (sset) {
9416 case ETH_SS_TEST:
9417 return TG3_NUM_TEST;
9418 case ETH_SS_STATS:
9419 return TG3_NUM_STATS;
9420 default:
9421 return -EOPNOTSUPP;
9422 }
9423 }
9424
9425 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9426 {
9427 switch (stringset) {
9428 case ETH_SS_STATS:
9429 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9430 break;
9431 case ETH_SS_TEST:
9432 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9433 break;
9434 default:
9435 WARN_ON(1); /* we need a WARN() */
9436 break;
9437 }
9438 }
9439
9440 static int tg3_phys_id(struct net_device *dev, u32 data)
9441 {
9442 struct tg3 *tp = netdev_priv(dev);
9443 int i;
9444
9445 if (!netif_running(tp->dev))
9446 return -EAGAIN;
9447
9448 if (data == 0)
9449 data = UINT_MAX / 2;
9450
9451 for (i = 0; i < (data * 2); i++) {
9452 if ((i % 2) == 0)
9453 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9454 LED_CTRL_1000MBPS_ON |
9455 LED_CTRL_100MBPS_ON |
9456 LED_CTRL_10MBPS_ON |
9457 LED_CTRL_TRAFFIC_OVERRIDE |
9458 LED_CTRL_TRAFFIC_BLINK |
9459 LED_CTRL_TRAFFIC_LED);
9460
9461 else
9462 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9463 LED_CTRL_TRAFFIC_OVERRIDE);
9464
9465 if (msleep_interruptible(500))
9466 break;
9467 }
9468 tw32(MAC_LED_CTRL, tp->led_ctrl);
9469 return 0;
9470 }
9471
9472 static void tg3_get_ethtool_stats (struct net_device *dev,
9473 struct ethtool_stats *estats, u64 *tmp_stats)
9474 {
9475 struct tg3 *tp = netdev_priv(dev);
9476 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9477 }
9478
9479 #define NVRAM_TEST_SIZE 0x100
9480 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9481 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9482 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9483 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9484 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9485
9486 static int tg3_test_nvram(struct tg3 *tp)
9487 {
9488 u32 csum, magic;
9489 __le32 *buf;
9490 int i, j, k, err = 0, size;
9491
9492 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9493 return -EIO;
9494
9495 if (magic == TG3_EEPROM_MAGIC)
9496 size = NVRAM_TEST_SIZE;
9497 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9498 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9499 TG3_EEPROM_SB_FORMAT_1) {
9500 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9501 case TG3_EEPROM_SB_REVISION_0:
9502 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9503 break;
9504 case TG3_EEPROM_SB_REVISION_2:
9505 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9506 break;
9507 case TG3_EEPROM_SB_REVISION_3:
9508 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9509 break;
9510 default:
9511 return 0;
9512 }
9513 } else
9514 return 0;
9515 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9516 size = NVRAM_SELFBOOT_HW_SIZE;
9517 else
9518 return -EIO;
9519
9520 buf = kmalloc(size, GFP_KERNEL);
9521 if (buf == NULL)
9522 return -ENOMEM;
9523
9524 err = -EIO;
9525 for (i = 0, j = 0; i < size; i += 4, j++) {
9526 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9527 break;
9528 }
9529 if (i < size)
9530 goto out;
9531
9532 /* Selfboot format */
9533 magic = swab32(le32_to_cpu(buf[0]));
9534 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9535 TG3_EEPROM_MAGIC_FW) {
9536 u8 *buf8 = (u8 *) buf, csum8 = 0;
9537
9538 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9539 TG3_EEPROM_SB_REVISION_2) {
9540 /* For rev 2, the csum doesn't include the MBA. */
9541 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9542 csum8 += buf8[i];
9543 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9544 csum8 += buf8[i];
9545 } else {
9546 for (i = 0; i < size; i++)
9547 csum8 += buf8[i];
9548 }
9549
9550 if (csum8 == 0) {
9551 err = 0;
9552 goto out;
9553 }
9554
9555 err = -EIO;
9556 goto out;
9557 }
9558
9559 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9560 TG3_EEPROM_MAGIC_HW) {
9561 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9562 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9563 u8 *buf8 = (u8 *) buf;
9564
9565 /* Separate the parity bits and the data bytes. */
9566 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9567 if ((i == 0) || (i == 8)) {
9568 int l;
9569 u8 msk;
9570
9571 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9572 parity[k++] = buf8[i] & msk;
9573 i++;
9574 }
9575 else if (i == 16) {
9576 int l;
9577 u8 msk;
9578
9579 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9580 parity[k++] = buf8[i] & msk;
9581 i++;
9582
9583 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9584 parity[k++] = buf8[i] & msk;
9585 i++;
9586 }
9587 data[j++] = buf8[i];
9588 }
9589
9590 err = -EIO;
9591 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9592 u8 hw8 = hweight8(data[i]);
9593
9594 if ((hw8 & 0x1) && parity[i])
9595 goto out;
9596 else if (!(hw8 & 0x1) && !parity[i])
9597 goto out;
9598 }
9599 err = 0;
9600 goto out;
9601 }
9602
9603 /* Bootstrap checksum at offset 0x10 */
9604 csum = calc_crc((unsigned char *) buf, 0x10);
9605 if(csum != le32_to_cpu(buf[0x10/4]))
9606 goto out;
9607
9608 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9609 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9610 if (csum != le32_to_cpu(buf[0xfc/4]))
9611 goto out;
9612
9613 err = 0;
9614
9615 out:
9616 kfree(buf);
9617 return err;
9618 }
9619
9620 #define TG3_SERDES_TIMEOUT_SEC 2
9621 #define TG3_COPPER_TIMEOUT_SEC 6
9622
9623 static int tg3_test_link(struct tg3 *tp)
9624 {
9625 int i, max;
9626
9627 if (!netif_running(tp->dev))
9628 return -ENODEV;
9629
9630 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9631 max = TG3_SERDES_TIMEOUT_SEC;
9632 else
9633 max = TG3_COPPER_TIMEOUT_SEC;
9634
9635 for (i = 0; i < max; i++) {
9636 if (netif_carrier_ok(tp->dev))
9637 return 0;
9638
9639 if (msleep_interruptible(1000))
9640 break;
9641 }
9642
9643 return -EIO;
9644 }
9645
9646 /* Only test the commonly used registers */
9647 static int tg3_test_registers(struct tg3 *tp)
9648 {
9649 int i, is_5705, is_5750;
9650 u32 offset, read_mask, write_mask, val, save_val, read_val;
9651 static struct {
9652 u16 offset;
9653 u16 flags;
9654 #define TG3_FL_5705 0x1
9655 #define TG3_FL_NOT_5705 0x2
9656 #define TG3_FL_NOT_5788 0x4
9657 #define TG3_FL_NOT_5750 0x8
9658 u32 read_mask;
9659 u32 write_mask;
9660 } reg_tbl[] = {
9661 /* MAC Control Registers */
9662 { MAC_MODE, TG3_FL_NOT_5705,
9663 0x00000000, 0x00ef6f8c },
9664 { MAC_MODE, TG3_FL_5705,
9665 0x00000000, 0x01ef6b8c },
9666 { MAC_STATUS, TG3_FL_NOT_5705,
9667 0x03800107, 0x00000000 },
9668 { MAC_STATUS, TG3_FL_5705,
9669 0x03800100, 0x00000000 },
9670 { MAC_ADDR_0_HIGH, 0x0000,
9671 0x00000000, 0x0000ffff },
9672 { MAC_ADDR_0_LOW, 0x0000,
9673 0x00000000, 0xffffffff },
9674 { MAC_RX_MTU_SIZE, 0x0000,
9675 0x00000000, 0x0000ffff },
9676 { MAC_TX_MODE, 0x0000,
9677 0x00000000, 0x00000070 },
9678 { MAC_TX_LENGTHS, 0x0000,
9679 0x00000000, 0x00003fff },
9680 { MAC_RX_MODE, TG3_FL_NOT_5705,
9681 0x00000000, 0x000007fc },
9682 { MAC_RX_MODE, TG3_FL_5705,
9683 0x00000000, 0x000007dc },
9684 { MAC_HASH_REG_0, 0x0000,
9685 0x00000000, 0xffffffff },
9686 { MAC_HASH_REG_1, 0x0000,
9687 0x00000000, 0xffffffff },
9688 { MAC_HASH_REG_2, 0x0000,
9689 0x00000000, 0xffffffff },
9690 { MAC_HASH_REG_3, 0x0000,
9691 0x00000000, 0xffffffff },
9692
9693 /* Receive Data and Receive BD Initiator Control Registers. */
9694 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9695 0x00000000, 0xffffffff },
9696 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9697 0x00000000, 0xffffffff },
9698 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9699 0x00000000, 0x00000003 },
9700 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9701 0x00000000, 0xffffffff },
9702 { RCVDBDI_STD_BD+0, 0x0000,
9703 0x00000000, 0xffffffff },
9704 { RCVDBDI_STD_BD+4, 0x0000,
9705 0x00000000, 0xffffffff },
9706 { RCVDBDI_STD_BD+8, 0x0000,
9707 0x00000000, 0xffff0002 },
9708 { RCVDBDI_STD_BD+0xc, 0x0000,
9709 0x00000000, 0xffffffff },
9710
9711 /* Receive BD Initiator Control Registers. */
9712 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9713 0x00000000, 0xffffffff },
9714 { RCVBDI_STD_THRESH, TG3_FL_5705,
9715 0x00000000, 0x000003ff },
9716 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9717 0x00000000, 0xffffffff },
9718
9719 /* Host Coalescing Control Registers. */
9720 { HOSTCC_MODE, TG3_FL_NOT_5705,
9721 0x00000000, 0x00000004 },
9722 { HOSTCC_MODE, TG3_FL_5705,
9723 0x00000000, 0x000000f6 },
9724 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9725 0x00000000, 0xffffffff },
9726 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9727 0x00000000, 0x000003ff },
9728 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9729 0x00000000, 0xffffffff },
9730 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9731 0x00000000, 0x000003ff },
9732 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9733 0x00000000, 0xffffffff },
9734 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9735 0x00000000, 0x000000ff },
9736 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9737 0x00000000, 0xffffffff },
9738 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9739 0x00000000, 0x000000ff },
9740 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9741 0x00000000, 0xffffffff },
9742 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9743 0x00000000, 0xffffffff },
9744 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9745 0x00000000, 0xffffffff },
9746 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9747 0x00000000, 0x000000ff },
9748 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9749 0x00000000, 0xffffffff },
9750 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9751 0x00000000, 0x000000ff },
9752 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9753 0x00000000, 0xffffffff },
9754 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9755 0x00000000, 0xffffffff },
9756 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9757 0x00000000, 0xffffffff },
9758 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9759 0x00000000, 0xffffffff },
9760 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9761 0x00000000, 0xffffffff },
9762 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9763 0xffffffff, 0x00000000 },
9764 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9765 0xffffffff, 0x00000000 },
9766
9767 /* Buffer Manager Control Registers. */
9768 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9769 0x00000000, 0x007fff80 },
9770 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9771 0x00000000, 0x007fffff },
9772 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9773 0x00000000, 0x0000003f },
9774 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9775 0x00000000, 0x000001ff },
9776 { BUFMGR_MB_HIGH_WATER, 0x0000,
9777 0x00000000, 0x000001ff },
9778 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9779 0xffffffff, 0x00000000 },
9780 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9781 0xffffffff, 0x00000000 },
9782
9783 /* Mailbox Registers */
9784 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9785 0x00000000, 0x000001ff },
9786 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9787 0x00000000, 0x000001ff },
9788 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9789 0x00000000, 0x000007ff },
9790 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9791 0x00000000, 0x000001ff },
9792
9793 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9794 };
9795
9796 is_5705 = is_5750 = 0;
9797 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9798 is_5705 = 1;
9799 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9800 is_5750 = 1;
9801 }
9802
9803 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9804 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9805 continue;
9806
9807 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9808 continue;
9809
9810 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9811 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9812 continue;
9813
9814 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9815 continue;
9816
9817 offset = (u32) reg_tbl[i].offset;
9818 read_mask = reg_tbl[i].read_mask;
9819 write_mask = reg_tbl[i].write_mask;
9820
9821 /* Save the original register content */
9822 save_val = tr32(offset);
9823
9824 /* Determine the read-only value. */
9825 read_val = save_val & read_mask;
9826
9827 /* Write zero to the register, then make sure the read-only bits
9828 * are not changed and the read/write bits are all zeros.
9829 */
9830 tw32(offset, 0);
9831
9832 val = tr32(offset);
9833
9834 /* Test the read-only and read/write bits. */
9835 if (((val & read_mask) != read_val) || (val & write_mask))
9836 goto out;
9837
9838 /* Write ones to all the bits defined by RdMask and WrMask, then
9839 * make sure the read-only bits are not changed and the
9840 * read/write bits are all ones.
9841 */
9842 tw32(offset, read_mask | write_mask);
9843
9844 val = tr32(offset);
9845
9846 /* Test the read-only bits. */
9847 if ((val & read_mask) != read_val)
9848 goto out;
9849
9850 /* Test the read/write bits. */
9851 if ((val & write_mask) != write_mask)
9852 goto out;
9853
9854 tw32(offset, save_val);
9855 }
9856
9857 return 0;
9858
9859 out:
9860 if (netif_msg_hw(tp))
9861 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9862 offset);
9863 tw32(offset, save_val);
9864 return -EIO;
9865 }
9866
9867 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9868 {
9869 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9870 int i;
9871 u32 j;
9872
9873 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9874 for (j = 0; j < len; j += 4) {
9875 u32 val;
9876
9877 tg3_write_mem(tp, offset + j, test_pattern[i]);
9878 tg3_read_mem(tp, offset + j, &val);
9879 if (val != test_pattern[i])
9880 return -EIO;
9881 }
9882 }
9883 return 0;
9884 }
9885
9886 static int tg3_test_memory(struct tg3 *tp)
9887 {
9888 static struct mem_entry {
9889 u32 offset;
9890 u32 len;
9891 } mem_tbl_570x[] = {
9892 { 0x00000000, 0x00b50},
9893 { 0x00002000, 0x1c000},
9894 { 0xffffffff, 0x00000}
9895 }, mem_tbl_5705[] = {
9896 { 0x00000100, 0x0000c},
9897 { 0x00000200, 0x00008},
9898 { 0x00004000, 0x00800},
9899 { 0x00006000, 0x01000},
9900 { 0x00008000, 0x02000},
9901 { 0x00010000, 0x0e000},
9902 { 0xffffffff, 0x00000}
9903 }, mem_tbl_5755[] = {
9904 { 0x00000200, 0x00008},
9905 { 0x00004000, 0x00800},
9906 { 0x00006000, 0x00800},
9907 { 0x00008000, 0x02000},
9908 { 0x00010000, 0x0c000},
9909 { 0xffffffff, 0x00000}
9910 }, mem_tbl_5906[] = {
9911 { 0x00000200, 0x00008},
9912 { 0x00004000, 0x00400},
9913 { 0x00006000, 0x00400},
9914 { 0x00008000, 0x01000},
9915 { 0x00010000, 0x01000},
9916 { 0xffffffff, 0x00000}
9917 };
9918 struct mem_entry *mem_tbl;
9919 int err = 0;
9920 int i;
9921
9922 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9928 mem_tbl = mem_tbl_5755;
9929 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9930 mem_tbl = mem_tbl_5906;
9931 else
9932 mem_tbl = mem_tbl_5705;
9933 } else
9934 mem_tbl = mem_tbl_570x;
9935
9936 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9937 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9938 mem_tbl[i].len)) != 0)
9939 break;
9940 }
9941
9942 return err;
9943 }
9944
9945 #define TG3_MAC_LOOPBACK 0
9946 #define TG3_PHY_LOOPBACK 1
9947
9948 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9949 {
9950 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9951 u32 desc_idx;
9952 struct sk_buff *skb, *rx_skb;
9953 u8 *tx_data;
9954 dma_addr_t map;
9955 int num_pkts, tx_len, rx_len, i, err;
9956 struct tg3_rx_buffer_desc *desc;
9957
9958 if (loopback_mode == TG3_MAC_LOOPBACK) {
9959 /* HW errata - mac loopback fails in some cases on 5780.
9960 * Normal traffic and PHY loopback are not affected by
9961 * errata.
9962 */
9963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9964 return 0;
9965
9966 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9967 MAC_MODE_PORT_INT_LPBACK;
9968 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9969 mac_mode |= MAC_MODE_LINK_POLARITY;
9970 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9971 mac_mode |= MAC_MODE_PORT_MODE_MII;
9972 else
9973 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9974 tw32(MAC_MODE, mac_mode);
9975 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9976 u32 val;
9977
9978 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9979 u32 phytest;
9980
9981 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9982 u32 phy;
9983
9984 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9985 phytest | MII_TG3_EPHY_SHADOW_EN);
9986 if (!tg3_readphy(tp, 0x1b, &phy))
9987 tg3_writephy(tp, 0x1b, phy & ~0x20);
9988 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9989 }
9990 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9991 } else
9992 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9993
9994 tg3_phy_toggle_automdix(tp, 0);
9995
9996 tg3_writephy(tp, MII_BMCR, val);
9997 udelay(40);
9998
9999 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10001 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10002 mac_mode |= MAC_MODE_PORT_MODE_MII;
10003 } else
10004 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10005
10006 /* reset to prevent losing 1st rx packet intermittently */
10007 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10008 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10009 udelay(10);
10010 tw32_f(MAC_RX_MODE, tp->rx_mode);
10011 }
10012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10013 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10014 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10015 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10016 mac_mode |= MAC_MODE_LINK_POLARITY;
10017 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10018 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10019 }
10020 tw32(MAC_MODE, mac_mode);
10021 }
10022 else
10023 return -EINVAL;
10024
10025 err = -EIO;
10026
10027 tx_len = 1514;
10028 skb = netdev_alloc_skb(tp->dev, tx_len);
10029 if (!skb)
10030 return -ENOMEM;
10031
10032 tx_data = skb_put(skb, tx_len);
10033 memcpy(tx_data, tp->dev->dev_addr, 6);
10034 memset(tx_data + 6, 0x0, 8);
10035
10036 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10037
10038 for (i = 14; i < tx_len; i++)
10039 tx_data[i] = (u8) (i & 0xff);
10040
10041 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10042
10043 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10044 HOSTCC_MODE_NOW);
10045
10046 udelay(10);
10047
10048 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10049
10050 num_pkts = 0;
10051
10052 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10053
10054 tp->tx_prod++;
10055 num_pkts++;
10056
10057 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10058 tp->tx_prod);
10059 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10060
10061 udelay(10);
10062
10063 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10064 for (i = 0; i < 25; i++) {
10065 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10066 HOSTCC_MODE_NOW);
10067
10068 udelay(10);
10069
10070 tx_idx = tp->hw_status->idx[0].tx_consumer;
10071 rx_idx = tp->hw_status->idx[0].rx_producer;
10072 if ((tx_idx == tp->tx_prod) &&
10073 (rx_idx == (rx_start_idx + num_pkts)))
10074 break;
10075 }
10076
10077 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10078 dev_kfree_skb(skb);
10079
10080 if (tx_idx != tp->tx_prod)
10081 goto out;
10082
10083 if (rx_idx != rx_start_idx + num_pkts)
10084 goto out;
10085
10086 desc = &tp->rx_rcb[rx_start_idx];
10087 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10088 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10089 if (opaque_key != RXD_OPAQUE_RING_STD)
10090 goto out;
10091
10092 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10093 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10094 goto out;
10095
10096 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10097 if (rx_len != tx_len)
10098 goto out;
10099
10100 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10101
10102 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10103 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10104
10105 for (i = 14; i < tx_len; i++) {
10106 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10107 goto out;
10108 }
10109 err = 0;
10110
10111 /* tg3_free_rings will unmap and free the rx_skb */
10112 out:
10113 return err;
10114 }
10115
10116 #define TG3_MAC_LOOPBACK_FAILED 1
10117 #define TG3_PHY_LOOPBACK_FAILED 2
10118 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10119 TG3_PHY_LOOPBACK_FAILED)
10120
10121 static int tg3_test_loopback(struct tg3 *tp)
10122 {
10123 int err = 0;
10124 u32 cpmuctrl = 0;
10125
10126 if (!netif_running(tp->dev))
10127 return TG3_LOOPBACK_FAILED;
10128
10129 err = tg3_reset_hw(tp, 1);
10130 if (err)
10131 return TG3_LOOPBACK_FAILED;
10132
10133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10136 int i;
10137 u32 status;
10138
10139 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10140
10141 /* Wait for up to 40 microseconds to acquire lock. */
10142 for (i = 0; i < 4; i++) {
10143 status = tr32(TG3_CPMU_MUTEX_GNT);
10144 if (status == CPMU_MUTEX_GNT_DRIVER)
10145 break;
10146 udelay(10);
10147 }
10148
10149 if (status != CPMU_MUTEX_GNT_DRIVER)
10150 return TG3_LOOPBACK_FAILED;
10151
10152 /* Turn off link-based power management. */
10153 cpmuctrl = tr32(TG3_CPMU_CTRL);
10154 tw32(TG3_CPMU_CTRL,
10155 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10156 CPMU_CTRL_LINK_AWARE_MODE));
10157 }
10158
10159 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10160 err |= TG3_MAC_LOOPBACK_FAILED;
10161
10162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10163 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10164 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10165 tw32(TG3_CPMU_CTRL, cpmuctrl);
10166
10167 /* Release the mutex */
10168 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10169 }
10170
10171 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10172 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10173 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10174 err |= TG3_PHY_LOOPBACK_FAILED;
10175 }
10176
10177 return err;
10178 }
10179
10180 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10181 u64 *data)
10182 {
10183 struct tg3 *tp = netdev_priv(dev);
10184
10185 if (tp->link_config.phy_is_low_power)
10186 tg3_set_power_state(tp, PCI_D0);
10187
10188 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10189
10190 if (tg3_test_nvram(tp) != 0) {
10191 etest->flags |= ETH_TEST_FL_FAILED;
10192 data[0] = 1;
10193 }
10194 if (tg3_test_link(tp) != 0) {
10195 etest->flags |= ETH_TEST_FL_FAILED;
10196 data[1] = 1;
10197 }
10198 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10199 int err, err2 = 0, irq_sync = 0;
10200
10201 if (netif_running(dev)) {
10202 tg3_phy_stop(tp);
10203 tg3_netif_stop(tp);
10204 irq_sync = 1;
10205 }
10206
10207 tg3_full_lock(tp, irq_sync);
10208
10209 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10210 err = tg3_nvram_lock(tp);
10211 tg3_halt_cpu(tp, RX_CPU_BASE);
10212 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10213 tg3_halt_cpu(tp, TX_CPU_BASE);
10214 if (!err)
10215 tg3_nvram_unlock(tp);
10216
10217 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10218 tg3_phy_reset(tp);
10219
10220 if (tg3_test_registers(tp) != 0) {
10221 etest->flags |= ETH_TEST_FL_FAILED;
10222 data[2] = 1;
10223 }
10224 if (tg3_test_memory(tp) != 0) {
10225 etest->flags |= ETH_TEST_FL_FAILED;
10226 data[3] = 1;
10227 }
10228 if ((data[4] = tg3_test_loopback(tp)) != 0)
10229 etest->flags |= ETH_TEST_FL_FAILED;
10230
10231 tg3_full_unlock(tp);
10232
10233 if (tg3_test_interrupt(tp) != 0) {
10234 etest->flags |= ETH_TEST_FL_FAILED;
10235 data[5] = 1;
10236 }
10237
10238 tg3_full_lock(tp, 0);
10239
10240 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10241 if (netif_running(dev)) {
10242 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10243 err2 = tg3_restart_hw(tp, 1);
10244 if (!err2)
10245 tg3_netif_start(tp);
10246 }
10247
10248 tg3_full_unlock(tp);
10249
10250 if (irq_sync && !err2)
10251 tg3_phy_start(tp);
10252 }
10253 if (tp->link_config.phy_is_low_power)
10254 tg3_set_power_state(tp, PCI_D3hot);
10255
10256 }
10257
10258 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10259 {
10260 struct mii_ioctl_data *data = if_mii(ifr);
10261 struct tg3 *tp = netdev_priv(dev);
10262 int err;
10263
10264 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10265 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10266 return -EAGAIN;
10267 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10268 }
10269
10270 switch(cmd) {
10271 case SIOCGMIIPHY:
10272 data->phy_id = PHY_ADDR;
10273
10274 /* fallthru */
10275 case SIOCGMIIREG: {
10276 u32 mii_regval;
10277
10278 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10279 break; /* We have no PHY */
10280
10281 if (tp->link_config.phy_is_low_power)
10282 return -EAGAIN;
10283
10284 spin_lock_bh(&tp->lock);
10285 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10286 spin_unlock_bh(&tp->lock);
10287
10288 data->val_out = mii_regval;
10289
10290 return err;
10291 }
10292
10293 case SIOCSMIIREG:
10294 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10295 break; /* We have no PHY */
10296
10297 if (!capable(CAP_NET_ADMIN))
10298 return -EPERM;
10299
10300 if (tp->link_config.phy_is_low_power)
10301 return -EAGAIN;
10302
10303 spin_lock_bh(&tp->lock);
10304 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10305 spin_unlock_bh(&tp->lock);
10306
10307 return err;
10308
10309 default:
10310 /* do nothing */
10311 break;
10312 }
10313 return -EOPNOTSUPP;
10314 }
10315
10316 #if TG3_VLAN_TAG_USED
10317 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10318 {
10319 struct tg3 *tp = netdev_priv(dev);
10320
10321 if (netif_running(dev))
10322 tg3_netif_stop(tp);
10323
10324 tg3_full_lock(tp, 0);
10325
10326 tp->vlgrp = grp;
10327
10328 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10329 __tg3_set_rx_mode(dev);
10330
10331 if (netif_running(dev))
10332 tg3_netif_start(tp);
10333
10334 tg3_full_unlock(tp);
10335 }
10336 #endif
10337
10338 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10339 {
10340 struct tg3 *tp = netdev_priv(dev);
10341
10342 memcpy(ec, &tp->coal, sizeof(*ec));
10343 return 0;
10344 }
10345
10346 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10347 {
10348 struct tg3 *tp = netdev_priv(dev);
10349 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10350 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10351
10352 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10353 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10354 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10355 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10356 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10357 }
10358
10359 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10360 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10361 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10362 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10363 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10364 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10365 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10366 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10367 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10368 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10369 return -EINVAL;
10370
10371 /* No rx interrupts will be generated if both are zero */
10372 if ((ec->rx_coalesce_usecs == 0) &&
10373 (ec->rx_max_coalesced_frames == 0))
10374 return -EINVAL;
10375
10376 /* No tx interrupts will be generated if both are zero */
10377 if ((ec->tx_coalesce_usecs == 0) &&
10378 (ec->tx_max_coalesced_frames == 0))
10379 return -EINVAL;
10380
10381 /* Only copy relevant parameters, ignore all others. */
10382 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10383 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10384 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10385 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10386 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10387 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10388 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10389 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10390 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10391
10392 if (netif_running(dev)) {
10393 tg3_full_lock(tp, 0);
10394 __tg3_set_coalesce(tp, &tp->coal);
10395 tg3_full_unlock(tp);
10396 }
10397 return 0;
10398 }
10399
10400 static const struct ethtool_ops tg3_ethtool_ops = {
10401 .get_settings = tg3_get_settings,
10402 .set_settings = tg3_set_settings,
10403 .get_drvinfo = tg3_get_drvinfo,
10404 .get_regs_len = tg3_get_regs_len,
10405 .get_regs = tg3_get_regs,
10406 .get_wol = tg3_get_wol,
10407 .set_wol = tg3_set_wol,
10408 .get_msglevel = tg3_get_msglevel,
10409 .set_msglevel = tg3_set_msglevel,
10410 .nway_reset = tg3_nway_reset,
10411 .get_link = ethtool_op_get_link,
10412 .get_eeprom_len = tg3_get_eeprom_len,
10413 .get_eeprom = tg3_get_eeprom,
10414 .set_eeprom = tg3_set_eeprom,
10415 .get_ringparam = tg3_get_ringparam,
10416 .set_ringparam = tg3_set_ringparam,
10417 .get_pauseparam = tg3_get_pauseparam,
10418 .set_pauseparam = tg3_set_pauseparam,
10419 .get_rx_csum = tg3_get_rx_csum,
10420 .set_rx_csum = tg3_set_rx_csum,
10421 .set_tx_csum = tg3_set_tx_csum,
10422 .set_sg = ethtool_op_set_sg,
10423 .set_tso = tg3_set_tso,
10424 .self_test = tg3_self_test,
10425 .get_strings = tg3_get_strings,
10426 .phys_id = tg3_phys_id,
10427 .get_ethtool_stats = tg3_get_ethtool_stats,
10428 .get_coalesce = tg3_get_coalesce,
10429 .set_coalesce = tg3_set_coalesce,
10430 .get_sset_count = tg3_get_sset_count,
10431 };
10432
10433 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10434 {
10435 u32 cursize, val, magic;
10436
10437 tp->nvram_size = EEPROM_CHIP_SIZE;
10438
10439 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10440 return;
10441
10442 if ((magic != TG3_EEPROM_MAGIC) &&
10443 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10444 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10445 return;
10446
10447 /*
10448 * Size the chip by reading offsets at increasing powers of two.
10449 * When we encounter our validation signature, we know the addressing
10450 * has wrapped around, and thus have our chip size.
10451 */
10452 cursize = 0x10;
10453
10454 while (cursize < tp->nvram_size) {
10455 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10456 return;
10457
10458 if (val == magic)
10459 break;
10460
10461 cursize <<= 1;
10462 }
10463
10464 tp->nvram_size = cursize;
10465 }
10466
10467 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10468 {
10469 u32 val;
10470
10471 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10472 return;
10473
10474 /* Selfboot format */
10475 if (val != TG3_EEPROM_MAGIC) {
10476 tg3_get_eeprom_size(tp);
10477 return;
10478 }
10479
10480 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10481 if (val != 0) {
10482 tp->nvram_size = (val >> 16) * 1024;
10483 return;
10484 }
10485 }
10486 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10487 }
10488
10489 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10490 {
10491 u32 nvcfg1;
10492
10493 nvcfg1 = tr32(NVRAM_CFG1);
10494 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10495 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10496 }
10497 else {
10498 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10499 tw32(NVRAM_CFG1, nvcfg1);
10500 }
10501
10502 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10503 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10504 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10505 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10506 tp->nvram_jedecnum = JEDEC_ATMEL;
10507 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10508 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10509 break;
10510 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10511 tp->nvram_jedecnum = JEDEC_ATMEL;
10512 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10513 break;
10514 case FLASH_VENDOR_ATMEL_EEPROM:
10515 tp->nvram_jedecnum = JEDEC_ATMEL;
10516 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10517 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10518 break;
10519 case FLASH_VENDOR_ST:
10520 tp->nvram_jedecnum = JEDEC_ST;
10521 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10522 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10523 break;
10524 case FLASH_VENDOR_SAIFUN:
10525 tp->nvram_jedecnum = JEDEC_SAIFUN;
10526 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10527 break;
10528 case FLASH_VENDOR_SST_SMALL:
10529 case FLASH_VENDOR_SST_LARGE:
10530 tp->nvram_jedecnum = JEDEC_SST;
10531 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10532 break;
10533 }
10534 }
10535 else {
10536 tp->nvram_jedecnum = JEDEC_ATMEL;
10537 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10538 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10539 }
10540 }
10541
10542 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10543 {
10544 u32 nvcfg1;
10545
10546 nvcfg1 = tr32(NVRAM_CFG1);
10547
10548 /* NVRAM protection for TPM */
10549 if (nvcfg1 & (1 << 27))
10550 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10551
10552 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10553 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10554 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10555 tp->nvram_jedecnum = JEDEC_ATMEL;
10556 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10557 break;
10558 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10559 tp->nvram_jedecnum = JEDEC_ATMEL;
10560 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10561 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10562 break;
10563 case FLASH_5752VENDOR_ST_M45PE10:
10564 case FLASH_5752VENDOR_ST_M45PE20:
10565 case FLASH_5752VENDOR_ST_M45PE40:
10566 tp->nvram_jedecnum = JEDEC_ST;
10567 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10568 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10569 break;
10570 }
10571
10572 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10573 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10574 case FLASH_5752PAGE_SIZE_256:
10575 tp->nvram_pagesize = 256;
10576 break;
10577 case FLASH_5752PAGE_SIZE_512:
10578 tp->nvram_pagesize = 512;
10579 break;
10580 case FLASH_5752PAGE_SIZE_1K:
10581 tp->nvram_pagesize = 1024;
10582 break;
10583 case FLASH_5752PAGE_SIZE_2K:
10584 tp->nvram_pagesize = 2048;
10585 break;
10586 case FLASH_5752PAGE_SIZE_4K:
10587 tp->nvram_pagesize = 4096;
10588 break;
10589 case FLASH_5752PAGE_SIZE_264:
10590 tp->nvram_pagesize = 264;
10591 break;
10592 }
10593 }
10594 else {
10595 /* For eeprom, set pagesize to maximum eeprom size */
10596 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10597
10598 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10599 tw32(NVRAM_CFG1, nvcfg1);
10600 }
10601 }
10602
10603 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10604 {
10605 u32 nvcfg1, protect = 0;
10606
10607 nvcfg1 = tr32(NVRAM_CFG1);
10608
10609 /* NVRAM protection for TPM */
10610 if (nvcfg1 & (1 << 27)) {
10611 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10612 protect = 1;
10613 }
10614
10615 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10616 switch (nvcfg1) {
10617 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10618 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10619 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10620 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10621 tp->nvram_jedecnum = JEDEC_ATMEL;
10622 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10623 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10624 tp->nvram_pagesize = 264;
10625 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10626 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10627 tp->nvram_size = (protect ? 0x3e200 :
10628 TG3_NVRAM_SIZE_512KB);
10629 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10630 tp->nvram_size = (protect ? 0x1f200 :
10631 TG3_NVRAM_SIZE_256KB);
10632 else
10633 tp->nvram_size = (protect ? 0x1f200 :
10634 TG3_NVRAM_SIZE_128KB);
10635 break;
10636 case FLASH_5752VENDOR_ST_M45PE10:
10637 case FLASH_5752VENDOR_ST_M45PE20:
10638 case FLASH_5752VENDOR_ST_M45PE40:
10639 tp->nvram_jedecnum = JEDEC_ST;
10640 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10641 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10642 tp->nvram_pagesize = 256;
10643 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10644 tp->nvram_size = (protect ?
10645 TG3_NVRAM_SIZE_64KB :
10646 TG3_NVRAM_SIZE_128KB);
10647 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10648 tp->nvram_size = (protect ?
10649 TG3_NVRAM_SIZE_64KB :
10650 TG3_NVRAM_SIZE_256KB);
10651 else
10652 tp->nvram_size = (protect ?
10653 TG3_NVRAM_SIZE_128KB :
10654 TG3_NVRAM_SIZE_512KB);
10655 break;
10656 }
10657 }
10658
10659 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10660 {
10661 u32 nvcfg1;
10662
10663 nvcfg1 = tr32(NVRAM_CFG1);
10664
10665 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10666 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10667 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10668 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10669 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10670 tp->nvram_jedecnum = JEDEC_ATMEL;
10671 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10672 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10673
10674 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10675 tw32(NVRAM_CFG1, nvcfg1);
10676 break;
10677 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10678 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10679 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10680 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10681 tp->nvram_jedecnum = JEDEC_ATMEL;
10682 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10683 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10684 tp->nvram_pagesize = 264;
10685 break;
10686 case FLASH_5752VENDOR_ST_M45PE10:
10687 case FLASH_5752VENDOR_ST_M45PE20:
10688 case FLASH_5752VENDOR_ST_M45PE40:
10689 tp->nvram_jedecnum = JEDEC_ST;
10690 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10691 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10692 tp->nvram_pagesize = 256;
10693 break;
10694 }
10695 }
10696
10697 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10698 {
10699 u32 nvcfg1, protect = 0;
10700
10701 nvcfg1 = tr32(NVRAM_CFG1);
10702
10703 /* NVRAM protection for TPM */
10704 if (nvcfg1 & (1 << 27)) {
10705 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10706 protect = 1;
10707 }
10708
10709 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10710 switch (nvcfg1) {
10711 case FLASH_5761VENDOR_ATMEL_ADB021D:
10712 case FLASH_5761VENDOR_ATMEL_ADB041D:
10713 case FLASH_5761VENDOR_ATMEL_ADB081D:
10714 case FLASH_5761VENDOR_ATMEL_ADB161D:
10715 case FLASH_5761VENDOR_ATMEL_MDB021D:
10716 case FLASH_5761VENDOR_ATMEL_MDB041D:
10717 case FLASH_5761VENDOR_ATMEL_MDB081D:
10718 case FLASH_5761VENDOR_ATMEL_MDB161D:
10719 tp->nvram_jedecnum = JEDEC_ATMEL;
10720 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10721 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10722 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10723 tp->nvram_pagesize = 256;
10724 break;
10725 case FLASH_5761VENDOR_ST_A_M45PE20:
10726 case FLASH_5761VENDOR_ST_A_M45PE40:
10727 case FLASH_5761VENDOR_ST_A_M45PE80:
10728 case FLASH_5761VENDOR_ST_A_M45PE16:
10729 case FLASH_5761VENDOR_ST_M_M45PE20:
10730 case FLASH_5761VENDOR_ST_M_M45PE40:
10731 case FLASH_5761VENDOR_ST_M_M45PE80:
10732 case FLASH_5761VENDOR_ST_M_M45PE16:
10733 tp->nvram_jedecnum = JEDEC_ST;
10734 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10735 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10736 tp->nvram_pagesize = 256;
10737 break;
10738 }
10739
10740 if (protect) {
10741 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10742 } else {
10743 switch (nvcfg1) {
10744 case FLASH_5761VENDOR_ATMEL_ADB161D:
10745 case FLASH_5761VENDOR_ATMEL_MDB161D:
10746 case FLASH_5761VENDOR_ST_A_M45PE16:
10747 case FLASH_5761VENDOR_ST_M_M45PE16:
10748 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10749 break;
10750 case FLASH_5761VENDOR_ATMEL_ADB081D:
10751 case FLASH_5761VENDOR_ATMEL_MDB081D:
10752 case FLASH_5761VENDOR_ST_A_M45PE80:
10753 case FLASH_5761VENDOR_ST_M_M45PE80:
10754 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10755 break;
10756 case FLASH_5761VENDOR_ATMEL_ADB041D:
10757 case FLASH_5761VENDOR_ATMEL_MDB041D:
10758 case FLASH_5761VENDOR_ST_A_M45PE40:
10759 case FLASH_5761VENDOR_ST_M_M45PE40:
10760 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10761 break;
10762 case FLASH_5761VENDOR_ATMEL_ADB021D:
10763 case FLASH_5761VENDOR_ATMEL_MDB021D:
10764 case FLASH_5761VENDOR_ST_A_M45PE20:
10765 case FLASH_5761VENDOR_ST_M_M45PE20:
10766 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10767 break;
10768 }
10769 }
10770 }
10771
10772 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10773 {
10774 tp->nvram_jedecnum = JEDEC_ATMEL;
10775 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10776 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10777 }
10778
10779 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10780 static void __devinit tg3_nvram_init(struct tg3 *tp)
10781 {
10782 tw32_f(GRC_EEPROM_ADDR,
10783 (EEPROM_ADDR_FSM_RESET |
10784 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10785 EEPROM_ADDR_CLKPERD_SHIFT)));
10786
10787 msleep(1);
10788
10789 /* Enable seeprom accesses. */
10790 tw32_f(GRC_LOCAL_CTRL,
10791 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10792 udelay(100);
10793
10794 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10795 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10796 tp->tg3_flags |= TG3_FLAG_NVRAM;
10797
10798 if (tg3_nvram_lock(tp)) {
10799 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10800 "tg3_nvram_init failed.\n", tp->dev->name);
10801 return;
10802 }
10803 tg3_enable_nvram_access(tp);
10804
10805 tp->nvram_size = 0;
10806
10807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10808 tg3_get_5752_nvram_info(tp);
10809 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10810 tg3_get_5755_nvram_info(tp);
10811 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10813 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10814 tg3_get_5787_nvram_info(tp);
10815 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10816 tg3_get_5761_nvram_info(tp);
10817 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10818 tg3_get_5906_nvram_info(tp);
10819 else
10820 tg3_get_nvram_info(tp);
10821
10822 if (tp->nvram_size == 0)
10823 tg3_get_nvram_size(tp);
10824
10825 tg3_disable_nvram_access(tp);
10826 tg3_nvram_unlock(tp);
10827
10828 } else {
10829 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10830
10831 tg3_get_eeprom_size(tp);
10832 }
10833 }
10834
10835 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10836 u32 offset, u32 *val)
10837 {
10838 u32 tmp;
10839 int i;
10840
10841 if (offset > EEPROM_ADDR_ADDR_MASK ||
10842 (offset % 4) != 0)
10843 return -EINVAL;
10844
10845 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10846 EEPROM_ADDR_DEVID_MASK |
10847 EEPROM_ADDR_READ);
10848 tw32(GRC_EEPROM_ADDR,
10849 tmp |
10850 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10851 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10852 EEPROM_ADDR_ADDR_MASK) |
10853 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10854
10855 for (i = 0; i < 1000; i++) {
10856 tmp = tr32(GRC_EEPROM_ADDR);
10857
10858 if (tmp & EEPROM_ADDR_COMPLETE)
10859 break;
10860 msleep(1);
10861 }
10862 if (!(tmp & EEPROM_ADDR_COMPLETE))
10863 return -EBUSY;
10864
10865 *val = tr32(GRC_EEPROM_DATA);
10866 return 0;
10867 }
10868
10869 #define NVRAM_CMD_TIMEOUT 10000
10870
10871 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10872 {
10873 int i;
10874
10875 tw32(NVRAM_CMD, nvram_cmd);
10876 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10877 udelay(10);
10878 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10879 udelay(10);
10880 break;
10881 }
10882 }
10883 if (i == NVRAM_CMD_TIMEOUT) {
10884 return -EBUSY;
10885 }
10886 return 0;
10887 }
10888
10889 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10890 {
10891 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10892 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10893 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10894 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10895 (tp->nvram_jedecnum == JEDEC_ATMEL))
10896
10897 addr = ((addr / tp->nvram_pagesize) <<
10898 ATMEL_AT45DB0X1B_PAGE_POS) +
10899 (addr % tp->nvram_pagesize);
10900
10901 return addr;
10902 }
10903
10904 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10905 {
10906 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10907 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10908 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10909 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10910 (tp->nvram_jedecnum == JEDEC_ATMEL))
10911
10912 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10913 tp->nvram_pagesize) +
10914 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10915
10916 return addr;
10917 }
10918
10919 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10920 {
10921 int ret;
10922
10923 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10924 return tg3_nvram_read_using_eeprom(tp, offset, val);
10925
10926 offset = tg3_nvram_phys_addr(tp, offset);
10927
10928 if (offset > NVRAM_ADDR_MSK)
10929 return -EINVAL;
10930
10931 ret = tg3_nvram_lock(tp);
10932 if (ret)
10933 return ret;
10934
10935 tg3_enable_nvram_access(tp);
10936
10937 tw32(NVRAM_ADDR, offset);
10938 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10939 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10940
10941 if (ret == 0)
10942 *val = swab32(tr32(NVRAM_RDDATA));
10943
10944 tg3_disable_nvram_access(tp);
10945
10946 tg3_nvram_unlock(tp);
10947
10948 return ret;
10949 }
10950
10951 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10952 {
10953 u32 v;
10954 int res = tg3_nvram_read(tp, offset, &v);
10955 if (!res)
10956 *val = cpu_to_le32(v);
10957 return res;
10958 }
10959
10960 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10961 {
10962 int err;
10963 u32 tmp;
10964
10965 err = tg3_nvram_read(tp, offset, &tmp);
10966 *val = swab32(tmp);
10967 return err;
10968 }
10969
10970 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10971 u32 offset, u32 len, u8 *buf)
10972 {
10973 int i, j, rc = 0;
10974 u32 val;
10975
10976 for (i = 0; i < len; i += 4) {
10977 u32 addr;
10978 __le32 data;
10979
10980 addr = offset + i;
10981
10982 memcpy(&data, buf + i, 4);
10983
10984 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10985
10986 val = tr32(GRC_EEPROM_ADDR);
10987 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10988
10989 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10990 EEPROM_ADDR_READ);
10991 tw32(GRC_EEPROM_ADDR, val |
10992 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10993 (addr & EEPROM_ADDR_ADDR_MASK) |
10994 EEPROM_ADDR_START |
10995 EEPROM_ADDR_WRITE);
10996
10997 for (j = 0; j < 1000; j++) {
10998 val = tr32(GRC_EEPROM_ADDR);
10999
11000 if (val & EEPROM_ADDR_COMPLETE)
11001 break;
11002 msleep(1);
11003 }
11004 if (!(val & EEPROM_ADDR_COMPLETE)) {
11005 rc = -EBUSY;
11006 break;
11007 }
11008 }
11009
11010 return rc;
11011 }
11012
11013 /* offset and length are dword aligned */
11014 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11015 u8 *buf)
11016 {
11017 int ret = 0;
11018 u32 pagesize = tp->nvram_pagesize;
11019 u32 pagemask = pagesize - 1;
11020 u32 nvram_cmd;
11021 u8 *tmp;
11022
11023 tmp = kmalloc(pagesize, GFP_KERNEL);
11024 if (tmp == NULL)
11025 return -ENOMEM;
11026
11027 while (len) {
11028 int j;
11029 u32 phy_addr, page_off, size;
11030
11031 phy_addr = offset & ~pagemask;
11032
11033 for (j = 0; j < pagesize; j += 4) {
11034 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11035 (__le32 *) (tmp + j))))
11036 break;
11037 }
11038 if (ret)
11039 break;
11040
11041 page_off = offset & pagemask;
11042 size = pagesize;
11043 if (len < size)
11044 size = len;
11045
11046 len -= size;
11047
11048 memcpy(tmp + page_off, buf, size);
11049
11050 offset = offset + (pagesize - page_off);
11051
11052 tg3_enable_nvram_access(tp);
11053
11054 /*
11055 * Before we can erase the flash page, we need
11056 * to issue a special "write enable" command.
11057 */
11058 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11059
11060 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11061 break;
11062
11063 /* Erase the target page */
11064 tw32(NVRAM_ADDR, phy_addr);
11065
11066 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11067 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11068
11069 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11070 break;
11071
11072 /* Issue another write enable to start the write. */
11073 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11074
11075 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11076 break;
11077
11078 for (j = 0; j < pagesize; j += 4) {
11079 __be32 data;
11080
11081 data = *((__be32 *) (tmp + j));
11082 /* swab32(le32_to_cpu(data)), actually */
11083 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11084
11085 tw32(NVRAM_ADDR, phy_addr + j);
11086
11087 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11088 NVRAM_CMD_WR;
11089
11090 if (j == 0)
11091 nvram_cmd |= NVRAM_CMD_FIRST;
11092 else if (j == (pagesize - 4))
11093 nvram_cmd |= NVRAM_CMD_LAST;
11094
11095 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11096 break;
11097 }
11098 if (ret)
11099 break;
11100 }
11101
11102 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11103 tg3_nvram_exec_cmd(tp, nvram_cmd);
11104
11105 kfree(tmp);
11106
11107 return ret;
11108 }
11109
11110 /* offset and length are dword aligned */
11111 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11112 u8 *buf)
11113 {
11114 int i, ret = 0;
11115
11116 for (i = 0; i < len; i += 4, offset += 4) {
11117 u32 page_off, phy_addr, nvram_cmd;
11118 __be32 data;
11119
11120 memcpy(&data, buf + i, 4);
11121 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11122
11123 page_off = offset % tp->nvram_pagesize;
11124
11125 phy_addr = tg3_nvram_phys_addr(tp, offset);
11126
11127 tw32(NVRAM_ADDR, phy_addr);
11128
11129 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11130
11131 if ((page_off == 0) || (i == 0))
11132 nvram_cmd |= NVRAM_CMD_FIRST;
11133 if (page_off == (tp->nvram_pagesize - 4))
11134 nvram_cmd |= NVRAM_CMD_LAST;
11135
11136 if (i == (len - 4))
11137 nvram_cmd |= NVRAM_CMD_LAST;
11138
11139 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11140 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11141 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11142 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11143 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11144 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11145 (tp->nvram_jedecnum == JEDEC_ST) &&
11146 (nvram_cmd & NVRAM_CMD_FIRST)) {
11147
11148 if ((ret = tg3_nvram_exec_cmd(tp,
11149 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11150 NVRAM_CMD_DONE)))
11151
11152 break;
11153 }
11154 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11155 /* We always do complete word writes to eeprom. */
11156 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11157 }
11158
11159 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11160 break;
11161 }
11162 return ret;
11163 }
11164
11165 /* offset and length are dword aligned */
11166 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11167 {
11168 int ret;
11169
11170 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11171 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11172 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11173 udelay(40);
11174 }
11175
11176 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11177 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11178 }
11179 else {
11180 u32 grc_mode;
11181
11182 ret = tg3_nvram_lock(tp);
11183 if (ret)
11184 return ret;
11185
11186 tg3_enable_nvram_access(tp);
11187 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11188 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11189 tw32(NVRAM_WRITE1, 0x406);
11190
11191 grc_mode = tr32(GRC_MODE);
11192 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11193
11194 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11195 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11196
11197 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11198 buf);
11199 }
11200 else {
11201 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11202 buf);
11203 }
11204
11205 grc_mode = tr32(GRC_MODE);
11206 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11207
11208 tg3_disable_nvram_access(tp);
11209 tg3_nvram_unlock(tp);
11210 }
11211
11212 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11213 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11214 udelay(40);
11215 }
11216
11217 return ret;
11218 }
11219
11220 struct subsys_tbl_ent {
11221 u16 subsys_vendor, subsys_devid;
11222 u32 phy_id;
11223 };
11224
11225 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11226 /* Broadcom boards. */
11227 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11228 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11229 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11230 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11231 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11232 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11233 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11234 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11235 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11236 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11237 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11238
11239 /* 3com boards. */
11240 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11241 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11242 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11243 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11244 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11245
11246 /* DELL boards. */
11247 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11248 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11249 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11250 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11251
11252 /* Compaq boards. */
11253 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11254 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11255 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11256 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11257 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11258
11259 /* IBM boards. */
11260 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11261 };
11262
11263 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11264 {
11265 int i;
11266
11267 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11268 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11269 tp->pdev->subsystem_vendor) &&
11270 (subsys_id_to_phy_id[i].subsys_devid ==
11271 tp->pdev->subsystem_device))
11272 return &subsys_id_to_phy_id[i];
11273 }
11274 return NULL;
11275 }
11276
11277 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11278 {
11279 u32 val;
11280 u16 pmcsr;
11281
11282 /* On some early chips the SRAM cannot be accessed in D3hot state,
11283 * so need make sure we're in D0.
11284 */
11285 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11286 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11287 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11288 msleep(1);
11289
11290 /* Make sure register accesses (indirect or otherwise)
11291 * will function correctly.
11292 */
11293 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11294 tp->misc_host_ctrl);
11295
11296 /* The memory arbiter has to be enabled in order for SRAM accesses
11297 * to succeed. Normally on powerup the tg3 chip firmware will make
11298 * sure it is enabled, but other entities such as system netboot
11299 * code might disable it.
11300 */
11301 val = tr32(MEMARB_MODE);
11302 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11303
11304 tp->phy_id = PHY_ID_INVALID;
11305 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11306
11307 /* Assume an onboard device and WOL capable by default. */
11308 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11309
11310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11311 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11312 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11313 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11314 }
11315 val = tr32(VCPU_CFGSHDW);
11316 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11317 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11318 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11319 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11320 device_may_wakeup(&tp->pdev->dev))
11321 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11322 return;
11323 }
11324
11325 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11326 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11327 u32 nic_cfg, led_cfg;
11328 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11329 int eeprom_phy_serdes = 0;
11330
11331 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11332 tp->nic_sram_data_cfg = nic_cfg;
11333
11334 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11335 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11336 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11337 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11338 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11339 (ver > 0) && (ver < 0x100))
11340 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11341
11342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11343 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11344
11345 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11346 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11347 eeprom_phy_serdes = 1;
11348
11349 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11350 if (nic_phy_id != 0) {
11351 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11352 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11353
11354 eeprom_phy_id = (id1 >> 16) << 10;
11355 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11356 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11357 } else
11358 eeprom_phy_id = 0;
11359
11360 tp->phy_id = eeprom_phy_id;
11361 if (eeprom_phy_serdes) {
11362 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11363 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11364 else
11365 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11366 }
11367
11368 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11369 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11370 SHASTA_EXT_LED_MODE_MASK);
11371 else
11372 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11373
11374 switch (led_cfg) {
11375 default:
11376 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11377 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11378 break;
11379
11380 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11381 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11382 break;
11383
11384 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11385 tp->led_ctrl = LED_CTRL_MODE_MAC;
11386
11387 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11388 * read on some older 5700/5701 bootcode.
11389 */
11390 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11391 ASIC_REV_5700 ||
11392 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11393 ASIC_REV_5701)
11394 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11395
11396 break;
11397
11398 case SHASTA_EXT_LED_SHARED:
11399 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11400 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11401 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11402 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11403 LED_CTRL_MODE_PHY_2);
11404 break;
11405
11406 case SHASTA_EXT_LED_MAC:
11407 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11408 break;
11409
11410 case SHASTA_EXT_LED_COMBO:
11411 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11412 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11413 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11414 LED_CTRL_MODE_PHY_2);
11415 break;
11416
11417 }
11418
11419 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11421 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11422 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11423
11424 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11425 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11426
11427 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11428 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11429 if ((tp->pdev->subsystem_vendor ==
11430 PCI_VENDOR_ID_ARIMA) &&
11431 (tp->pdev->subsystem_device == 0x205a ||
11432 tp->pdev->subsystem_device == 0x2063))
11433 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11434 } else {
11435 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11436 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11437 }
11438
11439 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11440 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11441 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11442 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11443 }
11444
11445 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11446 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11447 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11448
11449 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11450 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11451 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11452
11453 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11454 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11455 device_may_wakeup(&tp->pdev->dev))
11456 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11457
11458 if (cfg2 & (1 << 17))
11459 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11460
11461 /* serdes signal pre-emphasis in register 0x590 set by */
11462 /* bootcode if bit 18 is set */
11463 if (cfg2 & (1 << 18))
11464 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11465
11466 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11467 u32 cfg3;
11468
11469 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11470 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11471 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11472 }
11473
11474 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11475 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11476 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11477 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11478 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11479 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11480 }
11481 }
11482
11483 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11484 {
11485 int i;
11486 u32 val;
11487
11488 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11489 tw32(OTP_CTRL, cmd);
11490
11491 /* Wait for up to 1 ms for command to execute. */
11492 for (i = 0; i < 100; i++) {
11493 val = tr32(OTP_STATUS);
11494 if (val & OTP_STATUS_CMD_DONE)
11495 break;
11496 udelay(10);
11497 }
11498
11499 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11500 }
11501
11502 /* Read the gphy configuration from the OTP region of the chip. The gphy
11503 * configuration is a 32-bit value that straddles the alignment boundary.
11504 * We do two 32-bit reads and then shift and merge the results.
11505 */
11506 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11507 {
11508 u32 bhalf_otp, thalf_otp;
11509
11510 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11511
11512 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11513 return 0;
11514
11515 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11516
11517 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11518 return 0;
11519
11520 thalf_otp = tr32(OTP_READ_DATA);
11521
11522 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11523
11524 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11525 return 0;
11526
11527 bhalf_otp = tr32(OTP_READ_DATA);
11528
11529 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11530 }
11531
11532 static int __devinit tg3_phy_probe(struct tg3 *tp)
11533 {
11534 u32 hw_phy_id_1, hw_phy_id_2;
11535 u32 hw_phy_id, hw_phy_id_masked;
11536 int err;
11537
11538 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11539 return tg3_phy_init(tp);
11540
11541 /* Reading the PHY ID register can conflict with ASF
11542 * firwmare access to the PHY hardware.
11543 */
11544 err = 0;
11545 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11546 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11547 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11548 } else {
11549 /* Now read the physical PHY_ID from the chip and verify
11550 * that it is sane. If it doesn't look good, we fall back
11551 * to either the hard-coded table based PHY_ID and failing
11552 * that the value found in the eeprom area.
11553 */
11554 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11555 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11556
11557 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11558 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11559 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11560
11561 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11562 }
11563
11564 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11565 tp->phy_id = hw_phy_id;
11566 if (hw_phy_id_masked == PHY_ID_BCM8002)
11567 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11568 else
11569 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11570 } else {
11571 if (tp->phy_id != PHY_ID_INVALID) {
11572 /* Do nothing, phy ID already set up in
11573 * tg3_get_eeprom_hw_cfg().
11574 */
11575 } else {
11576 struct subsys_tbl_ent *p;
11577
11578 /* No eeprom signature? Try the hardcoded
11579 * subsys device table.
11580 */
11581 p = lookup_by_subsys(tp);
11582 if (!p)
11583 return -ENODEV;
11584
11585 tp->phy_id = p->phy_id;
11586 if (!tp->phy_id ||
11587 tp->phy_id == PHY_ID_BCM8002)
11588 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11589 }
11590 }
11591
11592 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11593 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11594 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11595 u32 bmsr, adv_reg, tg3_ctrl, mask;
11596
11597 tg3_readphy(tp, MII_BMSR, &bmsr);
11598 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11599 (bmsr & BMSR_LSTATUS))
11600 goto skip_phy_reset;
11601
11602 err = tg3_phy_reset(tp);
11603 if (err)
11604 return err;
11605
11606 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11607 ADVERTISE_100HALF | ADVERTISE_100FULL |
11608 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11609 tg3_ctrl = 0;
11610 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11611 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11612 MII_TG3_CTRL_ADV_1000_FULL);
11613 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11614 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11615 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11616 MII_TG3_CTRL_ENABLE_AS_MASTER);
11617 }
11618
11619 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11620 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11621 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11622 if (!tg3_copper_is_advertising_all(tp, mask)) {
11623 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11624
11625 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11626 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11627
11628 tg3_writephy(tp, MII_BMCR,
11629 BMCR_ANENABLE | BMCR_ANRESTART);
11630 }
11631 tg3_phy_set_wirespeed(tp);
11632
11633 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11634 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11635 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11636 }
11637
11638 skip_phy_reset:
11639 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11640 err = tg3_init_5401phy_dsp(tp);
11641 if (err)
11642 return err;
11643 }
11644
11645 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11646 err = tg3_init_5401phy_dsp(tp);
11647 }
11648
11649 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11650 tp->link_config.advertising =
11651 (ADVERTISED_1000baseT_Half |
11652 ADVERTISED_1000baseT_Full |
11653 ADVERTISED_Autoneg |
11654 ADVERTISED_FIBRE);
11655 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11656 tp->link_config.advertising &=
11657 ~(ADVERTISED_1000baseT_Half |
11658 ADVERTISED_1000baseT_Full);
11659
11660 return err;
11661 }
11662
11663 static void __devinit tg3_read_partno(struct tg3 *tp)
11664 {
11665 unsigned char vpd_data[256];
11666 unsigned int i;
11667 u32 magic;
11668
11669 if (tg3_nvram_read_swab(tp, 0x0, &magic))
11670 goto out_not_found;
11671
11672 if (magic == TG3_EEPROM_MAGIC) {
11673 for (i = 0; i < 256; i += 4) {
11674 u32 tmp;
11675
11676 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11677 goto out_not_found;
11678
11679 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11680 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11681 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11682 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11683 }
11684 } else {
11685 int vpd_cap;
11686
11687 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11688 for (i = 0; i < 256; i += 4) {
11689 u32 tmp, j = 0;
11690 __le32 v;
11691 u16 tmp16;
11692
11693 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11694 i);
11695 while (j++ < 100) {
11696 pci_read_config_word(tp->pdev, vpd_cap +
11697 PCI_VPD_ADDR, &tmp16);
11698 if (tmp16 & 0x8000)
11699 break;
11700 msleep(1);
11701 }
11702 if (!(tmp16 & 0x8000))
11703 goto out_not_found;
11704
11705 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11706 &tmp);
11707 v = cpu_to_le32(tmp);
11708 memcpy(&vpd_data[i], &v, 4);
11709 }
11710 }
11711
11712 /* Now parse and find the part number. */
11713 for (i = 0; i < 254; ) {
11714 unsigned char val = vpd_data[i];
11715 unsigned int block_end;
11716
11717 if (val == 0x82 || val == 0x91) {
11718 i = (i + 3 +
11719 (vpd_data[i + 1] +
11720 (vpd_data[i + 2] << 8)));
11721 continue;
11722 }
11723
11724 if (val != 0x90)
11725 goto out_not_found;
11726
11727 block_end = (i + 3 +
11728 (vpd_data[i + 1] +
11729 (vpd_data[i + 2] << 8)));
11730 i += 3;
11731
11732 if (block_end > 256)
11733 goto out_not_found;
11734
11735 while (i < (block_end - 2)) {
11736 if (vpd_data[i + 0] == 'P' &&
11737 vpd_data[i + 1] == 'N') {
11738 int partno_len = vpd_data[i + 2];
11739
11740 i += 3;
11741 if (partno_len > 24 || (partno_len + i) > 256)
11742 goto out_not_found;
11743
11744 memcpy(tp->board_part_number,
11745 &vpd_data[i], partno_len);
11746
11747 /* Success. */
11748 return;
11749 }
11750 i += 3 + vpd_data[i + 2];
11751 }
11752
11753 /* Part number not found. */
11754 goto out_not_found;
11755 }
11756
11757 out_not_found:
11758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11759 strcpy(tp->board_part_number, "BCM95906");
11760 else
11761 strcpy(tp->board_part_number, "none");
11762 }
11763
11764 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11765 {
11766 u32 val;
11767
11768 if (tg3_nvram_read_swab(tp, offset, &val) ||
11769 (val & 0xfc000000) != 0x0c000000 ||
11770 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11771 val != 0)
11772 return 0;
11773
11774 return 1;
11775 }
11776
11777 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11778 {
11779 u32 val, offset, start;
11780 u32 ver_offset;
11781 int i, bcnt;
11782
11783 if (tg3_nvram_read_swab(tp, 0, &val))
11784 return;
11785
11786 if (val != TG3_EEPROM_MAGIC)
11787 return;
11788
11789 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11790 tg3_nvram_read_swab(tp, 0x4, &start))
11791 return;
11792
11793 offset = tg3_nvram_logical_addr(tp, offset);
11794
11795 if (!tg3_fw_img_is_valid(tp, offset) ||
11796 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11797 return;
11798
11799 offset = offset + ver_offset - start;
11800 for (i = 0; i < 16; i += 4) {
11801 __le32 v;
11802 if (tg3_nvram_read_le(tp, offset + i, &v))
11803 return;
11804
11805 memcpy(tp->fw_ver + i, &v, 4);
11806 }
11807
11808 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11809 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11810 return;
11811
11812 for (offset = TG3_NVM_DIR_START;
11813 offset < TG3_NVM_DIR_END;
11814 offset += TG3_NVM_DIRENT_SIZE) {
11815 if (tg3_nvram_read_swab(tp, offset, &val))
11816 return;
11817
11818 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11819 break;
11820 }
11821
11822 if (offset == TG3_NVM_DIR_END)
11823 return;
11824
11825 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11826 start = 0x08000000;
11827 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11828 return;
11829
11830 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11831 !tg3_fw_img_is_valid(tp, offset) ||
11832 tg3_nvram_read_swab(tp, offset + 8, &val))
11833 return;
11834
11835 offset += val - start;
11836
11837 bcnt = strlen(tp->fw_ver);
11838
11839 tp->fw_ver[bcnt++] = ',';
11840 tp->fw_ver[bcnt++] = ' ';
11841
11842 for (i = 0; i < 4; i++) {
11843 __le32 v;
11844 if (tg3_nvram_read_le(tp, offset, &v))
11845 return;
11846
11847 offset += sizeof(v);
11848
11849 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11850 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11851 break;
11852 }
11853
11854 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11855 bcnt += sizeof(v);
11856 }
11857
11858 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11859 }
11860
11861 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11862
11863 static int __devinit tg3_get_invariants(struct tg3 *tp)
11864 {
11865 static struct pci_device_id write_reorder_chipsets[] = {
11866 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11867 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11868 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11869 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11870 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11871 PCI_DEVICE_ID_VIA_8385_0) },
11872 { },
11873 };
11874 u32 misc_ctrl_reg;
11875 u32 cacheline_sz_reg;
11876 u32 pci_state_reg, grc_misc_cfg;
11877 u32 val;
11878 u16 pci_cmd;
11879 int err, pcie_cap;
11880
11881 /* Force memory write invalidate off. If we leave it on,
11882 * then on 5700_BX chips we have to enable a workaround.
11883 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11884 * to match the cacheline size. The Broadcom driver have this
11885 * workaround but turns MWI off all the times so never uses
11886 * it. This seems to suggest that the workaround is insufficient.
11887 */
11888 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11889 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11890 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11891
11892 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11893 * has the register indirect write enable bit set before
11894 * we try to access any of the MMIO registers. It is also
11895 * critical that the PCI-X hw workaround situation is decided
11896 * before that as well.
11897 */
11898 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11899 &misc_ctrl_reg);
11900
11901 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11902 MISC_HOST_CTRL_CHIPREV_SHIFT);
11903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11904 u32 prod_id_asic_rev;
11905
11906 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11907 &prod_id_asic_rev);
11908 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11909 }
11910
11911 /* Wrong chip ID in 5752 A0. This code can be removed later
11912 * as A0 is not in production.
11913 */
11914 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11915 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11916
11917 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11918 * we need to disable memory and use config. cycles
11919 * only to access all registers. The 5702/03 chips
11920 * can mistakenly decode the special cycles from the
11921 * ICH chipsets as memory write cycles, causing corruption
11922 * of register and memory space. Only certain ICH bridges
11923 * will drive special cycles with non-zero data during the
11924 * address phase which can fall within the 5703's address
11925 * range. This is not an ICH bug as the PCI spec allows
11926 * non-zero address during special cycles. However, only
11927 * these ICH bridges are known to drive non-zero addresses
11928 * during special cycles.
11929 *
11930 * Since special cycles do not cross PCI bridges, we only
11931 * enable this workaround if the 5703 is on the secondary
11932 * bus of these ICH bridges.
11933 */
11934 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11935 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11936 static struct tg3_dev_id {
11937 u32 vendor;
11938 u32 device;
11939 u32 rev;
11940 } ich_chipsets[] = {
11941 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11942 PCI_ANY_ID },
11943 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11944 PCI_ANY_ID },
11945 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11946 0xa },
11947 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11948 PCI_ANY_ID },
11949 { },
11950 };
11951 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11952 struct pci_dev *bridge = NULL;
11953
11954 while (pci_id->vendor != 0) {
11955 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11956 bridge);
11957 if (!bridge) {
11958 pci_id++;
11959 continue;
11960 }
11961 if (pci_id->rev != PCI_ANY_ID) {
11962 if (bridge->revision > pci_id->rev)
11963 continue;
11964 }
11965 if (bridge->subordinate &&
11966 (bridge->subordinate->number ==
11967 tp->pdev->bus->number)) {
11968
11969 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11970 pci_dev_put(bridge);
11971 break;
11972 }
11973 }
11974 }
11975
11976 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11977 static struct tg3_dev_id {
11978 u32 vendor;
11979 u32 device;
11980 } bridge_chipsets[] = {
11981 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11982 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11983 { },
11984 };
11985 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11986 struct pci_dev *bridge = NULL;
11987
11988 while (pci_id->vendor != 0) {
11989 bridge = pci_get_device(pci_id->vendor,
11990 pci_id->device,
11991 bridge);
11992 if (!bridge) {
11993 pci_id++;
11994 continue;
11995 }
11996 if (bridge->subordinate &&
11997 (bridge->subordinate->number <=
11998 tp->pdev->bus->number) &&
11999 (bridge->subordinate->subordinate >=
12000 tp->pdev->bus->number)) {
12001 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12002 pci_dev_put(bridge);
12003 break;
12004 }
12005 }
12006 }
12007
12008 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12009 * DMA addresses > 40-bit. This bridge may have other additional
12010 * 57xx devices behind it in some 4-port NIC designs for example.
12011 * Any tg3 device found behind the bridge will also need the 40-bit
12012 * DMA workaround.
12013 */
12014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12016 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12017 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12018 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12019 }
12020 else {
12021 struct pci_dev *bridge = NULL;
12022
12023 do {
12024 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12025 PCI_DEVICE_ID_SERVERWORKS_EPB,
12026 bridge);
12027 if (bridge && bridge->subordinate &&
12028 (bridge->subordinate->number <=
12029 tp->pdev->bus->number) &&
12030 (bridge->subordinate->subordinate >=
12031 tp->pdev->bus->number)) {
12032 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12033 pci_dev_put(bridge);
12034 break;
12035 }
12036 } while (bridge);
12037 }
12038
12039 /* Initialize misc host control in PCI block. */
12040 tp->misc_host_ctrl |= (misc_ctrl_reg &
12041 MISC_HOST_CTRL_CHIPREV);
12042 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12043 tp->misc_host_ctrl);
12044
12045 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12046 &cacheline_sz_reg);
12047
12048 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12049 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12050 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12051 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12052
12053 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12054 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12055 tp->pdev_peer = tg3_find_peer(tp);
12056
12057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12065 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12066 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12067
12068 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12069 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12070 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12071
12072 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12073 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12074 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12075 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12076 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12077 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12078 tp->pdev_peer == tp->pdev))
12079 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12080
12081 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12087 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12088 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12089 } else {
12090 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12091 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12092 ASIC_REV_5750 &&
12093 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12094 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12095 }
12096 }
12097
12098 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12099 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12100 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12101
12102 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12103 if (pcie_cap != 0) {
12104 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12105
12106 pcie_set_readrq(tp->pdev, 4096);
12107
12108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12109 u16 lnkctl;
12110
12111 pci_read_config_word(tp->pdev,
12112 pcie_cap + PCI_EXP_LNKCTL,
12113 &lnkctl);
12114 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12115 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12116 }
12117 }
12118
12119 /* If we have an AMD 762 or VIA K8T800 chipset, write
12120 * reordering to the mailbox registers done by the host
12121 * controller can cause major troubles. We read back from
12122 * every mailbox register write to force the writes to be
12123 * posted to the chip in order.
12124 */
12125 if (pci_dev_present(write_reorder_chipsets) &&
12126 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12127 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12128
12129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12130 tp->pci_lat_timer < 64) {
12131 tp->pci_lat_timer = 64;
12132
12133 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12134 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12135 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12136 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12137
12138 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12139 cacheline_sz_reg);
12140 }
12141
12142 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12143 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12144 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12145 if (!tp->pcix_cap) {
12146 printk(KERN_ERR PFX "Cannot find PCI-X "
12147 "capability, aborting.\n");
12148 return -EIO;
12149 }
12150 }
12151
12152 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12153 &pci_state_reg);
12154
12155 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12156 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12157
12158 /* If this is a 5700 BX chipset, and we are in PCI-X
12159 * mode, enable register write workaround.
12160 *
12161 * The workaround is to use indirect register accesses
12162 * for all chip writes not to mailbox registers.
12163 */
12164 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12165 u32 pm_reg;
12166
12167 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12168
12169 /* The chip can have it's power management PCI config
12170 * space registers clobbered due to this bug.
12171 * So explicitly force the chip into D0 here.
12172 */
12173 pci_read_config_dword(tp->pdev,
12174 tp->pm_cap + PCI_PM_CTRL,
12175 &pm_reg);
12176 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12177 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12178 pci_write_config_dword(tp->pdev,
12179 tp->pm_cap + PCI_PM_CTRL,
12180 pm_reg);
12181
12182 /* Also, force SERR#/PERR# in PCI command. */
12183 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12184 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12185 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12186 }
12187 }
12188
12189 /* 5700 BX chips need to have their TX producer index mailboxes
12190 * written twice to workaround a bug.
12191 */
12192 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12193 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12194
12195 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12196 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12197 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12198 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12199
12200 /* Chip-specific fixup from Broadcom driver */
12201 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12202 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12203 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12204 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12205 }
12206
12207 /* Default fast path register access methods */
12208 tp->read32 = tg3_read32;
12209 tp->write32 = tg3_write32;
12210 tp->read32_mbox = tg3_read32;
12211 tp->write32_mbox = tg3_write32;
12212 tp->write32_tx_mbox = tg3_write32;
12213 tp->write32_rx_mbox = tg3_write32;
12214
12215 /* Various workaround register access methods */
12216 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12217 tp->write32 = tg3_write_indirect_reg32;
12218 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12219 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12220 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12221 /*
12222 * Back to back register writes can cause problems on these
12223 * chips, the workaround is to read back all reg writes
12224 * except those to mailbox regs.
12225 *
12226 * See tg3_write_indirect_reg32().
12227 */
12228 tp->write32 = tg3_write_flush_reg32;
12229 }
12230
12231
12232 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12233 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12234 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12235 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12236 tp->write32_rx_mbox = tg3_write_flush_reg32;
12237 }
12238
12239 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12240 tp->read32 = tg3_read_indirect_reg32;
12241 tp->write32 = tg3_write_indirect_reg32;
12242 tp->read32_mbox = tg3_read_indirect_mbox;
12243 tp->write32_mbox = tg3_write_indirect_mbox;
12244 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12245 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12246
12247 iounmap(tp->regs);
12248 tp->regs = NULL;
12249
12250 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12251 pci_cmd &= ~PCI_COMMAND_MEMORY;
12252 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12253 }
12254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12255 tp->read32_mbox = tg3_read32_mbox_5906;
12256 tp->write32_mbox = tg3_write32_mbox_5906;
12257 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12258 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12259 }
12260
12261 if (tp->write32 == tg3_write_indirect_reg32 ||
12262 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12263 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12265 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12266
12267 /* Get eeprom hw config before calling tg3_set_power_state().
12268 * In particular, the TG3_FLG2_IS_NIC flag must be
12269 * determined before calling tg3_set_power_state() so that
12270 * we know whether or not to switch out of Vaux power.
12271 * When the flag is set, it means that GPIO1 is used for eeprom
12272 * write protect and also implies that it is a LOM where GPIOs
12273 * are not used to switch power.
12274 */
12275 tg3_get_eeprom_hw_cfg(tp);
12276
12277 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12278 /* Allow reads and writes to the
12279 * APE register and memory space.
12280 */
12281 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12282 PCISTATE_ALLOW_APE_SHMEM_WR;
12283 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12284 pci_state_reg);
12285 }
12286
12287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12290 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12291
12292 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12293 * GPIO1 driven high will bring 5700's external PHY out of reset.
12294 * It is also used as eeprom write protect on LOMs.
12295 */
12296 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12297 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12298 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12299 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12300 GRC_LCLCTRL_GPIO_OUTPUT1);
12301 /* Unused GPIO3 must be driven as output on 5752 because there
12302 * are no pull-up resistors on unused GPIO pins.
12303 */
12304 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12305 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12306
12307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12308 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12309
12310 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12311 /* Turn off the debug UART. */
12312 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12313 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12314 /* Keep VMain power. */
12315 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12316 GRC_LCLCTRL_GPIO_OUTPUT0;
12317 }
12318
12319 /* Force the chip into D0. */
12320 err = tg3_set_power_state(tp, PCI_D0);
12321 if (err) {
12322 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12323 pci_name(tp->pdev));
12324 return err;
12325 }
12326
12327 /* 5700 B0 chips do not support checksumming correctly due
12328 * to hardware bugs.
12329 */
12330 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12331 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12332
12333 /* Derive initial jumbo mode from MTU assigned in
12334 * ether_setup() via the alloc_etherdev() call
12335 */
12336 if (tp->dev->mtu > ETH_DATA_LEN &&
12337 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12338 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12339
12340 /* Determine WakeOnLan speed to use. */
12341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12342 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12343 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12344 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12345 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12346 } else {
12347 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12348 }
12349
12350 /* A few boards don't want Ethernet@WireSpeed phy feature */
12351 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12352 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12353 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12354 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12355 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12356 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12357 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12358
12359 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12360 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12361 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12362 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12363 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12364
12365 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12366 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12367 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12369 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12370 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12371 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12372 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12373 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12374 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12375 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12376 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12377 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12378 }
12379
12380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12381 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12382 tp->phy_otp = tg3_read_otp_phycfg(tp);
12383 if (tp->phy_otp == 0)
12384 tp->phy_otp = TG3_OTP_DEFAULT;
12385 }
12386
12387 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12388 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12389 else
12390 tp->mi_mode = MAC_MI_MODE_BASE;
12391
12392 tp->coalesce_mode = 0;
12393 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12394 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12395 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12396
12397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12398 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12399
12400 err = tg3_mdio_init(tp);
12401 if (err)
12402 return err;
12403
12404 /* Initialize data/descriptor byte/word swapping. */
12405 val = tr32(GRC_MODE);
12406 val &= GRC_MODE_HOST_STACKUP;
12407 tw32(GRC_MODE, val | tp->grc_mode);
12408
12409 tg3_switch_clocks(tp);
12410
12411 /* Clear this out for sanity. */
12412 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12413
12414 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12415 &pci_state_reg);
12416 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12417 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12418 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12419
12420 if (chiprevid == CHIPREV_ID_5701_A0 ||
12421 chiprevid == CHIPREV_ID_5701_B0 ||
12422 chiprevid == CHIPREV_ID_5701_B2 ||
12423 chiprevid == CHIPREV_ID_5701_B5) {
12424 void __iomem *sram_base;
12425
12426 /* Write some dummy words into the SRAM status block
12427 * area, see if it reads back correctly. If the return
12428 * value is bad, force enable the PCIX workaround.
12429 */
12430 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12431
12432 writel(0x00000000, sram_base);
12433 writel(0x00000000, sram_base + 4);
12434 writel(0xffffffff, sram_base + 4);
12435 if (readl(sram_base) != 0x00000000)
12436 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12437 }
12438 }
12439
12440 udelay(50);
12441 tg3_nvram_init(tp);
12442
12443 grc_misc_cfg = tr32(GRC_MISC_CFG);
12444 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12445
12446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12447 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12448 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12449 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12450
12451 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12452 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12453 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12454 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12455 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12456 HOSTCC_MODE_CLRTICK_TXBD);
12457
12458 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12459 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12460 tp->misc_host_ctrl);
12461 }
12462
12463 /* Preserve the APE MAC_MODE bits */
12464 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12465 tp->mac_mode = tr32(MAC_MODE) |
12466 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12467 else
12468 tp->mac_mode = TG3_DEF_MAC_MODE;
12469
12470 /* these are limited to 10/100 only */
12471 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12472 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12473 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12474 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12475 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12476 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12477 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12478 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12479 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12480 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12481 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12482 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12483 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12484
12485 err = tg3_phy_probe(tp);
12486 if (err) {
12487 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12488 pci_name(tp->pdev), err);
12489 /* ... but do not return immediately ... */
12490 tg3_mdio_fini(tp);
12491 }
12492
12493 tg3_read_partno(tp);
12494 tg3_read_fw_ver(tp);
12495
12496 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12497 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12498 } else {
12499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12500 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12501 else
12502 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12503 }
12504
12505 /* 5700 {AX,BX} chips have a broken status block link
12506 * change bit implementation, so we must use the
12507 * status register in those cases.
12508 */
12509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12510 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12511 else
12512 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12513
12514 /* The led_ctrl is set during tg3_phy_probe, here we might
12515 * have to force the link status polling mechanism based
12516 * upon subsystem IDs.
12517 */
12518 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12520 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12521 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12522 TG3_FLAG_USE_LINKCHG_REG);
12523 }
12524
12525 /* For all SERDES we poll the MAC status register. */
12526 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12527 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12528 else
12529 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12530
12531 /* All chips before 5787 can get confused if TX buffers
12532 * straddle the 4GB address boundary in some cases.
12533 */
12534 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12535 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12536 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12537 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12538 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12540 tp->dev->hard_start_xmit = tg3_start_xmit;
12541 else
12542 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12543
12544 tp->rx_offset = 2;
12545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12546 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12547 tp->rx_offset = 0;
12548
12549 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12550
12551 /* Increment the rx prod index on the rx std ring by at most
12552 * 8 for these chips to workaround hw errata.
12553 */
12554 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12556 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12557 tp->rx_std_max_post = 8;
12558
12559 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12560 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12561 PCIE_PWR_MGMT_L1_THRESH_MSK;
12562
12563 return err;
12564 }
12565
12566 #ifdef CONFIG_SPARC
12567 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12568 {
12569 struct net_device *dev = tp->dev;
12570 struct pci_dev *pdev = tp->pdev;
12571 struct device_node *dp = pci_device_to_OF_node(pdev);
12572 const unsigned char *addr;
12573 int len;
12574
12575 addr = of_get_property(dp, "local-mac-address", &len);
12576 if (addr && len == 6) {
12577 memcpy(dev->dev_addr, addr, 6);
12578 memcpy(dev->perm_addr, dev->dev_addr, 6);
12579 return 0;
12580 }
12581 return -ENODEV;
12582 }
12583
12584 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12585 {
12586 struct net_device *dev = tp->dev;
12587
12588 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12589 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12590 return 0;
12591 }
12592 #endif
12593
12594 static int __devinit tg3_get_device_address(struct tg3 *tp)
12595 {
12596 struct net_device *dev = tp->dev;
12597 u32 hi, lo, mac_offset;
12598 int addr_ok = 0;
12599
12600 #ifdef CONFIG_SPARC
12601 if (!tg3_get_macaddr_sparc(tp))
12602 return 0;
12603 #endif
12604
12605 mac_offset = 0x7c;
12606 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12607 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12608 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12609 mac_offset = 0xcc;
12610 if (tg3_nvram_lock(tp))
12611 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12612 else
12613 tg3_nvram_unlock(tp);
12614 }
12615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12616 mac_offset = 0x10;
12617
12618 /* First try to get it from MAC address mailbox. */
12619 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12620 if ((hi >> 16) == 0x484b) {
12621 dev->dev_addr[0] = (hi >> 8) & 0xff;
12622 dev->dev_addr[1] = (hi >> 0) & 0xff;
12623
12624 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12625 dev->dev_addr[2] = (lo >> 24) & 0xff;
12626 dev->dev_addr[3] = (lo >> 16) & 0xff;
12627 dev->dev_addr[4] = (lo >> 8) & 0xff;
12628 dev->dev_addr[5] = (lo >> 0) & 0xff;
12629
12630 /* Some old bootcode may report a 0 MAC address in SRAM */
12631 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12632 }
12633 if (!addr_ok) {
12634 /* Next, try NVRAM. */
12635 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12636 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12637 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12638 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12639 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12640 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12641 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12642 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12643 }
12644 /* Finally just fetch it out of the MAC control regs. */
12645 else {
12646 hi = tr32(MAC_ADDR_0_HIGH);
12647 lo = tr32(MAC_ADDR_0_LOW);
12648
12649 dev->dev_addr[5] = lo & 0xff;
12650 dev->dev_addr[4] = (lo >> 8) & 0xff;
12651 dev->dev_addr[3] = (lo >> 16) & 0xff;
12652 dev->dev_addr[2] = (lo >> 24) & 0xff;
12653 dev->dev_addr[1] = hi & 0xff;
12654 dev->dev_addr[0] = (hi >> 8) & 0xff;
12655 }
12656 }
12657
12658 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12659 #ifdef CONFIG_SPARC
12660 if (!tg3_get_default_macaddr_sparc(tp))
12661 return 0;
12662 #endif
12663 return -EINVAL;
12664 }
12665 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12666 return 0;
12667 }
12668
12669 #define BOUNDARY_SINGLE_CACHELINE 1
12670 #define BOUNDARY_MULTI_CACHELINE 2
12671
12672 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12673 {
12674 int cacheline_size;
12675 u8 byte;
12676 int goal;
12677
12678 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12679 if (byte == 0)
12680 cacheline_size = 1024;
12681 else
12682 cacheline_size = (int) byte * 4;
12683
12684 /* On 5703 and later chips, the boundary bits have no
12685 * effect.
12686 */
12687 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12688 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12689 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12690 goto out;
12691
12692 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12693 goal = BOUNDARY_MULTI_CACHELINE;
12694 #else
12695 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12696 goal = BOUNDARY_SINGLE_CACHELINE;
12697 #else
12698 goal = 0;
12699 #endif
12700 #endif
12701
12702 if (!goal)
12703 goto out;
12704
12705 /* PCI controllers on most RISC systems tend to disconnect
12706 * when a device tries to burst across a cache-line boundary.
12707 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12708 *
12709 * Unfortunately, for PCI-E there are only limited
12710 * write-side controls for this, and thus for reads
12711 * we will still get the disconnects. We'll also waste
12712 * these PCI cycles for both read and write for chips
12713 * other than 5700 and 5701 which do not implement the
12714 * boundary bits.
12715 */
12716 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12717 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12718 switch (cacheline_size) {
12719 case 16:
12720 case 32:
12721 case 64:
12722 case 128:
12723 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12724 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12725 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12726 } else {
12727 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12728 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12729 }
12730 break;
12731
12732 case 256:
12733 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12734 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12735 break;
12736
12737 default:
12738 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12739 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12740 break;
12741 }
12742 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12743 switch (cacheline_size) {
12744 case 16:
12745 case 32:
12746 case 64:
12747 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12748 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12749 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12750 break;
12751 }
12752 /* fallthrough */
12753 case 128:
12754 default:
12755 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12756 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12757 break;
12758 }
12759 } else {
12760 switch (cacheline_size) {
12761 case 16:
12762 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12763 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12764 DMA_RWCTRL_WRITE_BNDRY_16);
12765 break;
12766 }
12767 /* fallthrough */
12768 case 32:
12769 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12770 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12771 DMA_RWCTRL_WRITE_BNDRY_32);
12772 break;
12773 }
12774 /* fallthrough */
12775 case 64:
12776 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12777 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12778 DMA_RWCTRL_WRITE_BNDRY_64);
12779 break;
12780 }
12781 /* fallthrough */
12782 case 128:
12783 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12784 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12785 DMA_RWCTRL_WRITE_BNDRY_128);
12786 break;
12787 }
12788 /* fallthrough */
12789 case 256:
12790 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12791 DMA_RWCTRL_WRITE_BNDRY_256);
12792 break;
12793 case 512:
12794 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12795 DMA_RWCTRL_WRITE_BNDRY_512);
12796 break;
12797 case 1024:
12798 default:
12799 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12800 DMA_RWCTRL_WRITE_BNDRY_1024);
12801 break;
12802 }
12803 }
12804
12805 out:
12806 return val;
12807 }
12808
12809 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12810 {
12811 struct tg3_internal_buffer_desc test_desc;
12812 u32 sram_dma_descs;
12813 int i, ret;
12814
12815 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12816
12817 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12818 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12819 tw32(RDMAC_STATUS, 0);
12820 tw32(WDMAC_STATUS, 0);
12821
12822 tw32(BUFMGR_MODE, 0);
12823 tw32(FTQ_RESET, 0);
12824
12825 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12826 test_desc.addr_lo = buf_dma & 0xffffffff;
12827 test_desc.nic_mbuf = 0x00002100;
12828 test_desc.len = size;
12829
12830 /*
12831 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12832 * the *second* time the tg3 driver was getting loaded after an
12833 * initial scan.
12834 *
12835 * Broadcom tells me:
12836 * ...the DMA engine is connected to the GRC block and a DMA
12837 * reset may affect the GRC block in some unpredictable way...
12838 * The behavior of resets to individual blocks has not been tested.
12839 *
12840 * Broadcom noted the GRC reset will also reset all sub-components.
12841 */
12842 if (to_device) {
12843 test_desc.cqid_sqid = (13 << 8) | 2;
12844
12845 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12846 udelay(40);
12847 } else {
12848 test_desc.cqid_sqid = (16 << 8) | 7;
12849
12850 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12851 udelay(40);
12852 }
12853 test_desc.flags = 0x00000005;
12854
12855 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12856 u32 val;
12857
12858 val = *(((u32 *)&test_desc) + i);
12859 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12860 sram_dma_descs + (i * sizeof(u32)));
12861 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12862 }
12863 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12864
12865 if (to_device) {
12866 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12867 } else {
12868 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12869 }
12870
12871 ret = -ENODEV;
12872 for (i = 0; i < 40; i++) {
12873 u32 val;
12874
12875 if (to_device)
12876 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12877 else
12878 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12879 if ((val & 0xffff) == sram_dma_descs) {
12880 ret = 0;
12881 break;
12882 }
12883
12884 udelay(100);
12885 }
12886
12887 return ret;
12888 }
12889
12890 #define TEST_BUFFER_SIZE 0x2000
12891
12892 static int __devinit tg3_test_dma(struct tg3 *tp)
12893 {
12894 dma_addr_t buf_dma;
12895 u32 *buf, saved_dma_rwctrl;
12896 int ret;
12897
12898 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12899 if (!buf) {
12900 ret = -ENOMEM;
12901 goto out_nofree;
12902 }
12903
12904 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12905 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12906
12907 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12908
12909 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12910 /* DMA read watermark not used on PCIE */
12911 tp->dma_rwctrl |= 0x00180000;
12912 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12913 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12915 tp->dma_rwctrl |= 0x003f0000;
12916 else
12917 tp->dma_rwctrl |= 0x003f000f;
12918 } else {
12919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12920 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12921 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12922 u32 read_water = 0x7;
12923
12924 /* If the 5704 is behind the EPB bridge, we can
12925 * do the less restrictive ONE_DMA workaround for
12926 * better performance.
12927 */
12928 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12930 tp->dma_rwctrl |= 0x8000;
12931 else if (ccval == 0x6 || ccval == 0x7)
12932 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12933
12934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12935 read_water = 4;
12936 /* Set bit 23 to enable PCIX hw bug fix */
12937 tp->dma_rwctrl |=
12938 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12939 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12940 (1 << 23);
12941 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12942 /* 5780 always in PCIX mode */
12943 tp->dma_rwctrl |= 0x00144000;
12944 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12945 /* 5714 always in PCIX mode */
12946 tp->dma_rwctrl |= 0x00148000;
12947 } else {
12948 tp->dma_rwctrl |= 0x001b000f;
12949 }
12950 }
12951
12952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12953 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12954 tp->dma_rwctrl &= 0xfffffff0;
12955
12956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12958 /* Remove this if it causes problems for some boards. */
12959 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12960
12961 /* On 5700/5701 chips, we need to set this bit.
12962 * Otherwise the chip will issue cacheline transactions
12963 * to streamable DMA memory with not all the byte
12964 * enables turned on. This is an error on several
12965 * RISC PCI controllers, in particular sparc64.
12966 *
12967 * On 5703/5704 chips, this bit has been reassigned
12968 * a different meaning. In particular, it is used
12969 * on those chips to enable a PCI-X workaround.
12970 */
12971 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12972 }
12973
12974 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12975
12976 #if 0
12977 /* Unneeded, already done by tg3_get_invariants. */
12978 tg3_switch_clocks(tp);
12979 #endif
12980
12981 ret = 0;
12982 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12983 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12984 goto out;
12985
12986 /* It is best to perform DMA test with maximum write burst size
12987 * to expose the 5700/5701 write DMA bug.
12988 */
12989 saved_dma_rwctrl = tp->dma_rwctrl;
12990 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12991 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12992
12993 while (1) {
12994 u32 *p = buf, i;
12995
12996 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12997 p[i] = i;
12998
12999 /* Send the buffer to the chip. */
13000 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13001 if (ret) {
13002 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13003 break;
13004 }
13005
13006 #if 0
13007 /* validate data reached card RAM correctly. */
13008 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13009 u32 val;
13010 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13011 if (le32_to_cpu(val) != p[i]) {
13012 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13013 /* ret = -ENODEV here? */
13014 }
13015 p[i] = 0;
13016 }
13017 #endif
13018 /* Now read it back. */
13019 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13020 if (ret) {
13021 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13022
13023 break;
13024 }
13025
13026 /* Verify it. */
13027 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13028 if (p[i] == i)
13029 continue;
13030
13031 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13032 DMA_RWCTRL_WRITE_BNDRY_16) {
13033 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13034 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13035 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13036 break;
13037 } else {
13038 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13039 ret = -ENODEV;
13040 goto out;
13041 }
13042 }
13043
13044 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13045 /* Success. */
13046 ret = 0;
13047 break;
13048 }
13049 }
13050 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13051 DMA_RWCTRL_WRITE_BNDRY_16) {
13052 static struct pci_device_id dma_wait_state_chipsets[] = {
13053 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13054 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13055 { },
13056 };
13057
13058 /* DMA test passed without adjusting DMA boundary,
13059 * now look for chipsets that are known to expose the
13060 * DMA bug without failing the test.
13061 */
13062 if (pci_dev_present(dma_wait_state_chipsets)) {
13063 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13064 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13065 }
13066 else
13067 /* Safe to use the calculated DMA boundary. */
13068 tp->dma_rwctrl = saved_dma_rwctrl;
13069
13070 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13071 }
13072
13073 out:
13074 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13075 out_nofree:
13076 return ret;
13077 }
13078
13079 static void __devinit tg3_init_link_config(struct tg3 *tp)
13080 {
13081 tp->link_config.advertising =
13082 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13083 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13084 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13085 ADVERTISED_Autoneg | ADVERTISED_MII);
13086 tp->link_config.speed = SPEED_INVALID;
13087 tp->link_config.duplex = DUPLEX_INVALID;
13088 tp->link_config.autoneg = AUTONEG_ENABLE;
13089 tp->link_config.active_speed = SPEED_INVALID;
13090 tp->link_config.active_duplex = DUPLEX_INVALID;
13091 tp->link_config.phy_is_low_power = 0;
13092 tp->link_config.orig_speed = SPEED_INVALID;
13093 tp->link_config.orig_duplex = DUPLEX_INVALID;
13094 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13095 }
13096
13097 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13098 {
13099 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13100 tp->bufmgr_config.mbuf_read_dma_low_water =
13101 DEFAULT_MB_RDMA_LOW_WATER_5705;
13102 tp->bufmgr_config.mbuf_mac_rx_low_water =
13103 DEFAULT_MB_MACRX_LOW_WATER_5705;
13104 tp->bufmgr_config.mbuf_high_water =
13105 DEFAULT_MB_HIGH_WATER_5705;
13106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13107 tp->bufmgr_config.mbuf_mac_rx_low_water =
13108 DEFAULT_MB_MACRX_LOW_WATER_5906;
13109 tp->bufmgr_config.mbuf_high_water =
13110 DEFAULT_MB_HIGH_WATER_5906;
13111 }
13112
13113 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13114 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13115 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13116 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13117 tp->bufmgr_config.mbuf_high_water_jumbo =
13118 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13119 } else {
13120 tp->bufmgr_config.mbuf_read_dma_low_water =
13121 DEFAULT_MB_RDMA_LOW_WATER;
13122 tp->bufmgr_config.mbuf_mac_rx_low_water =
13123 DEFAULT_MB_MACRX_LOW_WATER;
13124 tp->bufmgr_config.mbuf_high_water =
13125 DEFAULT_MB_HIGH_WATER;
13126
13127 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13128 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13129 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13130 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13131 tp->bufmgr_config.mbuf_high_water_jumbo =
13132 DEFAULT_MB_HIGH_WATER_JUMBO;
13133 }
13134
13135 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13136 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13137 }
13138
13139 static char * __devinit tg3_phy_string(struct tg3 *tp)
13140 {
13141 switch (tp->phy_id & PHY_ID_MASK) {
13142 case PHY_ID_BCM5400: return "5400";
13143 case PHY_ID_BCM5401: return "5401";
13144 case PHY_ID_BCM5411: return "5411";
13145 case PHY_ID_BCM5701: return "5701";
13146 case PHY_ID_BCM5703: return "5703";
13147 case PHY_ID_BCM5704: return "5704";
13148 case PHY_ID_BCM5705: return "5705";
13149 case PHY_ID_BCM5750: return "5750";
13150 case PHY_ID_BCM5752: return "5752";
13151 case PHY_ID_BCM5714: return "5714";
13152 case PHY_ID_BCM5780: return "5780";
13153 case PHY_ID_BCM5755: return "5755";
13154 case PHY_ID_BCM5787: return "5787";
13155 case PHY_ID_BCM5784: return "5784";
13156 case PHY_ID_BCM5756: return "5722/5756";
13157 case PHY_ID_BCM5906: return "5906";
13158 case PHY_ID_BCM5761: return "5761";
13159 case PHY_ID_BCM8002: return "8002/serdes";
13160 case 0: return "serdes";
13161 default: return "unknown";
13162 }
13163 }
13164
13165 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13166 {
13167 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13168 strcpy(str, "PCI Express");
13169 return str;
13170 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13171 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13172
13173 strcpy(str, "PCIX:");
13174
13175 if ((clock_ctrl == 7) ||
13176 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13177 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13178 strcat(str, "133MHz");
13179 else if (clock_ctrl == 0)
13180 strcat(str, "33MHz");
13181 else if (clock_ctrl == 2)
13182 strcat(str, "50MHz");
13183 else if (clock_ctrl == 4)
13184 strcat(str, "66MHz");
13185 else if (clock_ctrl == 6)
13186 strcat(str, "100MHz");
13187 } else {
13188 strcpy(str, "PCI:");
13189 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13190 strcat(str, "66MHz");
13191 else
13192 strcat(str, "33MHz");
13193 }
13194 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13195 strcat(str, ":32-bit");
13196 else
13197 strcat(str, ":64-bit");
13198 return str;
13199 }
13200
13201 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13202 {
13203 struct pci_dev *peer;
13204 unsigned int func, devnr = tp->pdev->devfn & ~7;
13205
13206 for (func = 0; func < 8; func++) {
13207 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13208 if (peer && peer != tp->pdev)
13209 break;
13210 pci_dev_put(peer);
13211 }
13212 /* 5704 can be configured in single-port mode, set peer to
13213 * tp->pdev in that case.
13214 */
13215 if (!peer) {
13216 peer = tp->pdev;
13217 return peer;
13218 }
13219
13220 /*
13221 * We don't need to keep the refcount elevated; there's no way
13222 * to remove one half of this device without removing the other
13223 */
13224 pci_dev_put(peer);
13225
13226 return peer;
13227 }
13228
13229 static void __devinit tg3_init_coal(struct tg3 *tp)
13230 {
13231 struct ethtool_coalesce *ec = &tp->coal;
13232
13233 memset(ec, 0, sizeof(*ec));
13234 ec->cmd = ETHTOOL_GCOALESCE;
13235 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13236 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13237 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13238 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13239 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13240 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13241 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13242 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13243 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13244
13245 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13246 HOSTCC_MODE_CLRTICK_TXBD)) {
13247 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13248 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13249 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13250 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13251 }
13252
13253 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13254 ec->rx_coalesce_usecs_irq = 0;
13255 ec->tx_coalesce_usecs_irq = 0;
13256 ec->stats_block_coalesce_usecs = 0;
13257 }
13258 }
13259
13260 static int __devinit tg3_init_one(struct pci_dev *pdev,
13261 const struct pci_device_id *ent)
13262 {
13263 static int tg3_version_printed = 0;
13264 resource_size_t tg3reg_len;
13265 struct net_device *dev;
13266 struct tg3 *tp;
13267 int err, pm_cap;
13268 char str[40];
13269 u64 dma_mask, persist_dma_mask;
13270
13271 if (tg3_version_printed++ == 0)
13272 printk(KERN_INFO "%s", version);
13273
13274 err = pci_enable_device(pdev);
13275 if (err) {
13276 printk(KERN_ERR PFX "Cannot enable PCI device, "
13277 "aborting.\n");
13278 return err;
13279 }
13280
13281 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
13282 printk(KERN_ERR PFX "Cannot find proper PCI device "
13283 "base address, aborting.\n");
13284 err = -ENODEV;
13285 goto err_out_disable_pdev;
13286 }
13287
13288 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13289 if (err) {
13290 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13291 "aborting.\n");
13292 goto err_out_disable_pdev;
13293 }
13294
13295 pci_set_master(pdev);
13296
13297 /* Find power-management capability. */
13298 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13299 if (pm_cap == 0) {
13300 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13301 "aborting.\n");
13302 err = -EIO;
13303 goto err_out_free_res;
13304 }
13305
13306 dev = alloc_etherdev(sizeof(*tp));
13307 if (!dev) {
13308 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13309 err = -ENOMEM;
13310 goto err_out_free_res;
13311 }
13312
13313 SET_NETDEV_DEV(dev, &pdev->dev);
13314
13315 #if TG3_VLAN_TAG_USED
13316 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13317 dev->vlan_rx_register = tg3_vlan_rx_register;
13318 #endif
13319
13320 tp = netdev_priv(dev);
13321 tp->pdev = pdev;
13322 tp->dev = dev;
13323 tp->pm_cap = pm_cap;
13324 tp->rx_mode = TG3_DEF_RX_MODE;
13325 tp->tx_mode = TG3_DEF_TX_MODE;
13326
13327 if (tg3_debug > 0)
13328 tp->msg_enable = tg3_debug;
13329 else
13330 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13331
13332 /* The word/byte swap controls here control register access byte
13333 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13334 * setting below.
13335 */
13336 tp->misc_host_ctrl =
13337 MISC_HOST_CTRL_MASK_PCI_INT |
13338 MISC_HOST_CTRL_WORD_SWAP |
13339 MISC_HOST_CTRL_INDIR_ACCESS |
13340 MISC_HOST_CTRL_PCISTATE_RW;
13341
13342 /* The NONFRM (non-frame) byte/word swap controls take effect
13343 * on descriptor entries, anything which isn't packet data.
13344 *
13345 * The StrongARM chips on the board (one for tx, one for rx)
13346 * are running in big-endian mode.
13347 */
13348 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13349 GRC_MODE_WSWAP_NONFRM_DATA);
13350 #ifdef __BIG_ENDIAN
13351 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13352 #endif
13353 spin_lock_init(&tp->lock);
13354 spin_lock_init(&tp->indirect_lock);
13355 INIT_WORK(&tp->reset_task, tg3_reset_task);
13356
13357 dev->mem_start = pci_resource_start(pdev, BAR_0);
13358 tg3reg_len = pci_resource_len(pdev, BAR_0);
13359 dev->mem_end = dev->mem_start + tg3reg_len;
13360
13361 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
13362 if (!tp->regs) {
13363 printk(KERN_ERR PFX "Cannot map device registers, "
13364 "aborting.\n");
13365 err = -ENOMEM;
13366 goto err_out_free_dev;
13367 }
13368
13369 tg3_init_link_config(tp);
13370
13371 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13372 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13373 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13374
13375 dev->open = tg3_open;
13376 dev->stop = tg3_close;
13377 dev->get_stats = tg3_get_stats;
13378 dev->set_multicast_list = tg3_set_rx_mode;
13379 dev->set_mac_address = tg3_set_mac_addr;
13380 dev->do_ioctl = tg3_ioctl;
13381 dev->tx_timeout = tg3_tx_timeout;
13382 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13383 dev->ethtool_ops = &tg3_ethtool_ops;
13384 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13385 dev->change_mtu = tg3_change_mtu;
13386 dev->irq = pdev->irq;
13387 #ifdef CONFIG_NET_POLL_CONTROLLER
13388 dev->poll_controller = tg3_poll_controller;
13389 #endif
13390
13391 err = tg3_get_invariants(tp);
13392 if (err) {
13393 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13394 "aborting.\n");
13395 goto err_out_iounmap;
13396 }
13397
13398 /* The EPB bridge inside 5714, 5715, and 5780 and any
13399 * device behind the EPB cannot support DMA addresses > 40-bit.
13400 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13401 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13402 * do DMA address check in tg3_start_xmit().
13403 */
13404 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13405 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13406 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13407 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13408 #ifdef CONFIG_HIGHMEM
13409 dma_mask = DMA_64BIT_MASK;
13410 #endif
13411 } else
13412 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13413
13414 /* Configure DMA attributes. */
13415 if (dma_mask > DMA_32BIT_MASK) {
13416 err = pci_set_dma_mask(pdev, dma_mask);
13417 if (!err) {
13418 dev->features |= NETIF_F_HIGHDMA;
13419 err = pci_set_consistent_dma_mask(pdev,
13420 persist_dma_mask);
13421 if (err < 0) {
13422 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13423 "DMA for consistent allocations\n");
13424 goto err_out_iounmap;
13425 }
13426 }
13427 }
13428 if (err || dma_mask == DMA_32BIT_MASK) {
13429 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13430 if (err) {
13431 printk(KERN_ERR PFX "No usable DMA configuration, "
13432 "aborting.\n");
13433 goto err_out_iounmap;
13434 }
13435 }
13436
13437 tg3_init_bufmgr_config(tp);
13438
13439 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13440 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13441 }
13442 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13444 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13446 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13447 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13448 } else {
13449 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13450 }
13451
13452 /* TSO is on by default on chips that support hardware TSO.
13453 * Firmware TSO on older chips gives lower performance, so it
13454 * is off by default, but can be enabled using ethtool.
13455 */
13456 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13457 dev->features |= NETIF_F_TSO;
13458 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13459 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13460 dev->features |= NETIF_F_TSO6;
13461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13462 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13463 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13464 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13465 dev->features |= NETIF_F_TSO_ECN;
13466 }
13467
13468
13469 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13470 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13471 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13472 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13473 tp->rx_pending = 63;
13474 }
13475
13476 err = tg3_get_device_address(tp);
13477 if (err) {
13478 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13479 "aborting.\n");
13480 goto err_out_iounmap;
13481 }
13482
13483 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13484 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
13485 printk(KERN_ERR PFX "Cannot find proper PCI device "
13486 "base address for APE, aborting.\n");
13487 err = -ENODEV;
13488 goto err_out_iounmap;
13489 }
13490
13491 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13492 if (!tp->aperegs) {
13493 printk(KERN_ERR PFX "Cannot map APE registers, "
13494 "aborting.\n");
13495 err = -ENOMEM;
13496 goto err_out_iounmap;
13497 }
13498
13499 tg3_ape_lock_init(tp);
13500 }
13501
13502 /*
13503 * Reset chip in case UNDI or EFI driver did not shutdown
13504 * DMA self test will enable WDMAC and we'll see (spurious)
13505 * pending DMA on the PCI bus at that point.
13506 */
13507 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13508 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13509 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13510 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13511 }
13512
13513 err = tg3_test_dma(tp);
13514 if (err) {
13515 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13516 goto err_out_apeunmap;
13517 }
13518
13519 /* Tigon3 can do ipv4 only... and some chips have buggy
13520 * checksumming.
13521 */
13522 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13523 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13529 dev->features |= NETIF_F_IPV6_CSUM;
13530
13531 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13532 } else
13533 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13534
13535 /* flow control autonegotiation is default behavior */
13536 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13537 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13538
13539 tg3_init_coal(tp);
13540
13541 pci_set_drvdata(pdev, dev);
13542
13543 err = register_netdev(dev);
13544 if (err) {
13545 printk(KERN_ERR PFX "Cannot register net device, "
13546 "aborting.\n");
13547 goto err_out_apeunmap;
13548 }
13549
13550 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13551 "(%s) %s Ethernet %pM\n",
13552 dev->name,
13553 tp->board_part_number,
13554 tp->pci_chip_rev_id,
13555 tg3_phy_string(tp),
13556 tg3_bus_string(tp, str),
13557 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13558 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13559 "10/100/1000Base-T")),
13560 dev->dev_addr);
13561
13562 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13563 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13564 dev->name,
13565 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13566 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13567 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13568 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13569 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13570 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13571 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13572 dev->name, tp->dma_rwctrl,
13573 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13574 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13575
13576 return 0;
13577
13578 err_out_apeunmap:
13579 if (tp->aperegs) {
13580 iounmap(tp->aperegs);
13581 tp->aperegs = NULL;
13582 }
13583
13584 err_out_iounmap:
13585 if (tp->regs) {
13586 iounmap(tp->regs);
13587 tp->regs = NULL;
13588 }
13589
13590 err_out_free_dev:
13591 free_netdev(dev);
13592
13593 err_out_free_res:
13594 pci_release_regions(pdev);
13595
13596 err_out_disable_pdev:
13597 pci_disable_device(pdev);
13598 pci_set_drvdata(pdev, NULL);
13599 return err;
13600 }
13601
13602 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13603 {
13604 struct net_device *dev = pci_get_drvdata(pdev);
13605
13606 if (dev) {
13607 struct tg3 *tp = netdev_priv(dev);
13608
13609 flush_scheduled_work();
13610
13611 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13612 tg3_phy_fini(tp);
13613 tg3_mdio_fini(tp);
13614 }
13615
13616 unregister_netdev(dev);
13617 if (tp->aperegs) {
13618 iounmap(tp->aperegs);
13619 tp->aperegs = NULL;
13620 }
13621 if (tp->regs) {
13622 iounmap(tp->regs);
13623 tp->regs = NULL;
13624 }
13625 free_netdev(dev);
13626 pci_release_regions(pdev);
13627 pci_disable_device(pdev);
13628 pci_set_drvdata(pdev, NULL);
13629 }
13630 }
13631
13632 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13633 {
13634 struct net_device *dev = pci_get_drvdata(pdev);
13635 struct tg3 *tp = netdev_priv(dev);
13636 pci_power_t target_state;
13637 int err;
13638
13639 /* PCI register 4 needs to be saved whether netif_running() or not.
13640 * MSI address and data need to be saved if using MSI and
13641 * netif_running().
13642 */
13643 pci_save_state(pdev);
13644
13645 if (!netif_running(dev))
13646 return 0;
13647
13648 flush_scheduled_work();
13649 tg3_phy_stop(tp);
13650 tg3_netif_stop(tp);
13651
13652 del_timer_sync(&tp->timer);
13653
13654 tg3_full_lock(tp, 1);
13655 tg3_disable_ints(tp);
13656 tg3_full_unlock(tp);
13657
13658 netif_device_detach(dev);
13659
13660 tg3_full_lock(tp, 0);
13661 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13662 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13663 tg3_full_unlock(tp);
13664
13665 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13666
13667 err = tg3_set_power_state(tp, target_state);
13668 if (err) {
13669 int err2;
13670
13671 tg3_full_lock(tp, 0);
13672
13673 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13674 err2 = tg3_restart_hw(tp, 1);
13675 if (err2)
13676 goto out;
13677
13678 tp->timer.expires = jiffies + tp->timer_offset;
13679 add_timer(&tp->timer);
13680
13681 netif_device_attach(dev);
13682 tg3_netif_start(tp);
13683
13684 out:
13685 tg3_full_unlock(tp);
13686
13687 if (!err2)
13688 tg3_phy_start(tp);
13689 }
13690
13691 return err;
13692 }
13693
13694 static int tg3_resume(struct pci_dev *pdev)
13695 {
13696 struct net_device *dev = pci_get_drvdata(pdev);
13697 struct tg3 *tp = netdev_priv(dev);
13698 int err;
13699
13700 pci_restore_state(tp->pdev);
13701
13702 if (!netif_running(dev))
13703 return 0;
13704
13705 err = tg3_set_power_state(tp, PCI_D0);
13706 if (err)
13707 return err;
13708
13709 netif_device_attach(dev);
13710
13711 tg3_full_lock(tp, 0);
13712
13713 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13714 err = tg3_restart_hw(tp, 1);
13715 if (err)
13716 goto out;
13717
13718 tp->timer.expires = jiffies + tp->timer_offset;
13719 add_timer(&tp->timer);
13720
13721 tg3_netif_start(tp);
13722
13723 out:
13724 tg3_full_unlock(tp);
13725
13726 if (!err)
13727 tg3_phy_start(tp);
13728
13729 return err;
13730 }
13731
13732 static struct pci_driver tg3_driver = {
13733 .name = DRV_MODULE_NAME,
13734 .id_table = tg3_pci_tbl,
13735 .probe = tg3_init_one,
13736 .remove = __devexit_p(tg3_remove_one),
13737 .suspend = tg3_suspend,
13738 .resume = tg3_resume
13739 };
13740
13741 static int __init tg3_init(void)
13742 {
13743 return pci_register_driver(&tg3_driver);
13744 }
13745
13746 static void __exit tg3_cleanup(void)
13747 {
13748 pci_unregister_driver(&tg3_driver);
13749 }
13750
13751 module_init(tg3_init);
13752 module_exit(tg3_cleanup);
This page took 0.457054 seconds and 4 git commands to generate.