[TG3]: Add TSO workaround using GSO
[deliverable/linux.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4
LT
28#include <linux/init.h>
29#include <linux/ioport.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/if_vlan.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/workqueue.h>
61487480 40#include <linux/prefetch.h>
f9a5f7d3 41#include <linux/dma-mapping.h>
1da177e4
LT
42
43#include <net/checksum.h>
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
50#ifdef CONFIG_SPARC64
51#include <asm/idprom.h>
52#include <asm/oplib.h>
53#include <asm/pbm.h>
54#endif
55
56#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57#define TG3_VLAN_TAG_USED 1
58#else
59#define TG3_VLAN_TAG_USED 0
60#endif
61
62#ifdef NETIF_F_TSO
63#define TG3_TSO_SUPPORT 1
64#else
65#define TG3_TSO_SUPPORT 0
66#endif
67
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
9cb3528c
MC
72#define DRV_MODULE_VERSION "3.60"
73#define DRV_MODULE_RELDATE "June 17, 2006"
1da177e4
LT
74
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
0f893dc6 96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
97
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
1da177e4 127#define TX_BUFFS_AVAIL(TP) \
51b91468
MC
128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
1da177e4
LT
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
4cafd3f5
MC
141#define TG3_NUM_TEST 6
142
1da177e4
LT
143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
6e9017a7 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
af2bcd97 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d8659255
XVP
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d9ab5ad1
MC
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
af36e6b6
MC
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
30b6c28d
MC
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d9ab5ad1
MC
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
4cf78e4f
MC
246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
250 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
266 { 0, }
267};
268
269MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
270
271static struct {
272 const char string[ETH_GSTRING_LEN];
273} ethtool_stats_keys[TG3_NUM_STATS] = {
274 { "rx_octets" },
275 { "rx_fragments" },
276 { "rx_ucast_packets" },
277 { "rx_mcast_packets" },
278 { "rx_bcast_packets" },
279 { "rx_fcs_errors" },
280 { "rx_align_errors" },
281 { "rx_xon_pause_rcvd" },
282 { "rx_xoff_pause_rcvd" },
283 { "rx_mac_ctrl_rcvd" },
284 { "rx_xoff_entered" },
285 { "rx_frame_too_long_errors" },
286 { "rx_jabbers" },
287 { "rx_undersize_packets" },
288 { "rx_in_length_errors" },
289 { "rx_out_length_errors" },
290 { "rx_64_or_less_octet_packets" },
291 { "rx_65_to_127_octet_packets" },
292 { "rx_128_to_255_octet_packets" },
293 { "rx_256_to_511_octet_packets" },
294 { "rx_512_to_1023_octet_packets" },
295 { "rx_1024_to_1522_octet_packets" },
296 { "rx_1523_to_2047_octet_packets" },
297 { "rx_2048_to_4095_octet_packets" },
298 { "rx_4096_to_8191_octet_packets" },
299 { "rx_8192_to_9022_octet_packets" },
300
301 { "tx_octets" },
302 { "tx_collisions" },
303
304 { "tx_xon_sent" },
305 { "tx_xoff_sent" },
306 { "tx_flow_control" },
307 { "tx_mac_errors" },
308 { "tx_single_collisions" },
309 { "tx_mult_collisions" },
310 { "tx_deferred" },
311 { "tx_excessive_collisions" },
312 { "tx_late_collisions" },
313 { "tx_collide_2times" },
314 { "tx_collide_3times" },
315 { "tx_collide_4times" },
316 { "tx_collide_5times" },
317 { "tx_collide_6times" },
318 { "tx_collide_7times" },
319 { "tx_collide_8times" },
320 { "tx_collide_9times" },
321 { "tx_collide_10times" },
322 { "tx_collide_11times" },
323 { "tx_collide_12times" },
324 { "tx_collide_13times" },
325 { "tx_collide_14times" },
326 { "tx_collide_15times" },
327 { "tx_ucast_packets" },
328 { "tx_mcast_packets" },
329 { "tx_bcast_packets" },
330 { "tx_carrier_sense_errors" },
331 { "tx_discards" },
332 { "tx_errors" },
333
334 { "dma_writeq_full" },
335 { "dma_write_prioq_full" },
336 { "rxbds_empty" },
337 { "rx_discards" },
338 { "rx_errors" },
339 { "rx_threshold_hit" },
340
341 { "dma_readq_full" },
342 { "dma_read_prioq_full" },
343 { "tx_comp_queue_full" },
344
345 { "ring_set_send_prod_index" },
346 { "ring_status_update" },
347 { "nic_irqs" },
348 { "nic_avoided_irqs" },
349 { "nic_tx_threshold_hit" }
350};
351
4cafd3f5
MC
352static struct {
353 const char string[ETH_GSTRING_LEN];
354} ethtool_test_keys[TG3_NUM_TEST] = {
355 { "nvram test (online) " },
356 { "link test (online) " },
357 { "register test (offline)" },
358 { "memory test (offline)" },
359 { "loopback test (offline)" },
360 { "interrupt test (offline)" },
361};
362
b401e9e2
MC
363static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
364{
365 writel(val, tp->regs + off);
366}
367
368static u32 tg3_read32(struct tg3 *tp, u32 off)
369{
370 return (readl(tp->regs + off));
371}
372
1da177e4
LT
373static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
374{
6892914f
MC
375 unsigned long flags;
376
377 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
378 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
379 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 380 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
381}
382
383static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
384{
385 writel(val, tp->regs + off);
386 readl(tp->regs + off);
1da177e4
LT
387}
388
6892914f 389static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 390{
6892914f
MC
391 unsigned long flags;
392 u32 val;
393
394 spin_lock_irqsave(&tp->indirect_lock, flags);
395 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
396 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
397 spin_unlock_irqrestore(&tp->indirect_lock, flags);
398 return val;
399}
400
401static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
402{
403 unsigned long flags;
404
405 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
406 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
407 TG3_64BIT_REG_LOW, val);
408 return;
409 }
410 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
411 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
412 TG3_64BIT_REG_LOW, val);
413 return;
1da177e4 414 }
6892914f
MC
415
416 spin_lock_irqsave(&tp->indirect_lock, flags);
417 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
418 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
419 spin_unlock_irqrestore(&tp->indirect_lock, flags);
420
421 /* In indirect mode when disabling interrupts, we also need
422 * to clear the interrupt bit in the GRC local ctrl register.
423 */
424 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
425 (val == 0x1)) {
426 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
427 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
428 }
429}
430
431static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
432{
433 unsigned long flags;
434 u32 val;
435
436 spin_lock_irqsave(&tp->indirect_lock, flags);
437 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
438 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
439 spin_unlock_irqrestore(&tp->indirect_lock, flags);
440 return val;
441}
442
b401e9e2
MC
443/* usec_wait specifies the wait time in usec when writing to certain registers
444 * where it is unsafe to read back the register without some delay.
445 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
446 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
447 */
448static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 449{
b401e9e2
MC
450 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
451 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
452 /* Non-posted methods */
453 tp->write32(tp, off, val);
454 else {
455 /* Posted method */
456 tg3_write32(tp, off, val);
457 if (usec_wait)
458 udelay(usec_wait);
459 tp->read32(tp, off);
460 }
461 /* Wait again after the read for the posted method to guarantee that
462 * the wait time is met.
463 */
464 if (usec_wait)
465 udelay(usec_wait);
1da177e4
LT
466}
467
09ee929c
MC
468static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
469{
470 tp->write32_mbox(tp, off, val);
6892914f
MC
471 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
472 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473 tp->read32_mbox(tp, off);
09ee929c
MC
474}
475
20094930 476static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
477{
478 void __iomem *mbox = tp->regs + off;
479 writel(val, mbox);
480 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
481 writel(val, mbox);
482 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
483 readl(mbox);
484}
485
20094930 486#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 487#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
488#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
489#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 490#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
491
492#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
493#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
494#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 495#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
496
497static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
498{
6892914f
MC
499 unsigned long flags;
500
501 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
502 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 505
bbadf503
MC
506 /* Always leave this as zero. */
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
508 } else {
509 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
510 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 511
bbadf503
MC
512 /* Always leave this as zero. */
513 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 }
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
516}
517
1da177e4
LT
518static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
519{
6892914f
MC
520 unsigned long flags;
521
522 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
523 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
524 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
525 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 526
bbadf503
MC
527 /* Always leave this as zero. */
528 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
529 } else {
530 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
531 *val = tr32(TG3PCI_MEM_WIN_DATA);
532
533 /* Always leave this as zero. */
534 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
535 }
6892914f 536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
537}
538
539static void tg3_disable_ints(struct tg3 *tp)
540{
541 tw32(TG3PCI_MISC_HOST_CTRL,
542 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 543 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
544}
545
546static inline void tg3_cond_int(struct tg3 *tp)
547{
38f3843e
MC
548 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
549 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4
LT
550 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
551}
552
553static void tg3_enable_ints(struct tg3 *tp)
554{
bbe832c0
MC
555 tp->irq_sync = 0;
556 wmb();
557
1da177e4
LT
558 tw32(TG3PCI_MISC_HOST_CTRL,
559 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
560 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561 (tp->last_tag << 24));
fcfa0a32
MC
562 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
563 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
564 (tp->last_tag << 24));
1da177e4
LT
565 tg3_cond_int(tp);
566}
567
04237ddd
MC
568static inline unsigned int tg3_has_work(struct tg3 *tp)
569{
570 struct tg3_hw_status *sblk = tp->hw_status;
571 unsigned int work_exists = 0;
572
573 /* check for phy events */
574 if (!(tp->tg3_flags &
575 (TG3_FLAG_USE_LINKCHG_REG |
576 TG3_FLAG_POLL_SERDES))) {
577 if (sblk->status & SD_STATUS_LINK_CHG)
578 work_exists = 1;
579 }
580 /* check for RX/TX work to do */
581 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
582 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
583 work_exists = 1;
584
585 return work_exists;
586}
587
1da177e4 588/* tg3_restart_ints
04237ddd
MC
589 * similar to tg3_enable_ints, but it accurately determines whether there
590 * is new work pending and can return without flushing the PIO write
591 * which reenables interrupts
1da177e4
LT
592 */
593static void tg3_restart_ints(struct tg3 *tp)
594{
fac9b83e
DM
595 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
596 tp->last_tag << 24);
1da177e4
LT
597 mmiowb();
598
fac9b83e
DM
599 /* When doing tagged status, this work check is unnecessary.
600 * The last_tag we write above tells the chip which piece of
601 * work we've completed.
602 */
603 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
604 tg3_has_work(tp))
04237ddd
MC
605 tw32(HOSTCC_MODE, tp->coalesce_mode |
606 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
607}
608
609static inline void tg3_netif_stop(struct tg3 *tp)
610{
bbe832c0 611 tp->dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
612 netif_poll_disable(tp->dev);
613 netif_tx_disable(tp->dev);
614}
615
616static inline void tg3_netif_start(struct tg3 *tp)
617{
618 netif_wake_queue(tp->dev);
619 /* NOTE: unconditional netif_wake_queue is only appropriate
620 * so long as all callers are assured to have free tx slots
621 * (such as after tg3_init_hw)
622 */
623 netif_poll_enable(tp->dev);
f47c11ee
DM
624 tp->hw_status->status |= SD_STATUS_UPDATED;
625 tg3_enable_ints(tp);
1da177e4
LT
626}
627
628static void tg3_switch_clocks(struct tg3 *tp)
629{
630 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
631 u32 orig_clock_ctrl;
632
a4e2b347 633 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f
MC
634 return;
635
1da177e4
LT
636 orig_clock_ctrl = clock_ctrl;
637 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
638 CLOCK_CTRL_CLKRUN_OENABLE |
639 0x1f);
640 tp->pci_clock_ctrl = clock_ctrl;
641
642 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
643 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
644 tw32_wait_f(TG3PCI_CLOCK_CTRL,
645 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
646 }
647 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
648 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649 clock_ctrl |
650 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
651 40);
652 tw32_wait_f(TG3PCI_CLOCK_CTRL,
653 clock_ctrl | (CLOCK_CTRL_ALTCLK),
654 40);
1da177e4 655 }
b401e9e2 656 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
657}
658
659#define PHY_BUSY_LOOPS 5000
660
661static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
662{
663 u32 frame_val;
664 unsigned int loops;
665 int ret;
666
667 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668 tw32_f(MAC_MI_MODE,
669 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
670 udelay(80);
671 }
672
673 *val = 0x0;
674
675 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
676 MI_COM_PHY_ADDR_MASK);
677 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
678 MI_COM_REG_ADDR_MASK);
679 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
680
681 tw32_f(MAC_MI_COM, frame_val);
682
683 loops = PHY_BUSY_LOOPS;
684 while (loops != 0) {
685 udelay(10);
686 frame_val = tr32(MAC_MI_COM);
687
688 if ((frame_val & MI_COM_BUSY) == 0) {
689 udelay(5);
690 frame_val = tr32(MAC_MI_COM);
691 break;
692 }
693 loops -= 1;
694 }
695
696 ret = -EBUSY;
697 if (loops != 0) {
698 *val = frame_val & MI_COM_DATA_MASK;
699 ret = 0;
700 }
701
702 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703 tw32_f(MAC_MI_MODE, tp->mi_mode);
704 udelay(80);
705 }
706
707 return ret;
708}
709
710static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
711{
712 u32 frame_val;
713 unsigned int loops;
714 int ret;
715
716 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717 tw32_f(MAC_MI_MODE,
718 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
719 udelay(80);
720 }
721
722 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723 MI_COM_PHY_ADDR_MASK);
724 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725 MI_COM_REG_ADDR_MASK);
726 frame_val |= (val & MI_COM_DATA_MASK);
727 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
728
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735 if ((frame_val & MI_COM_BUSY) == 0) {
736 udelay(5);
737 frame_val = tr32(MAC_MI_COM);
738 break;
739 }
740 loops -= 1;
741 }
742
743 ret = -EBUSY;
744 if (loops != 0)
745 ret = 0;
746
747 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748 tw32_f(MAC_MI_MODE, tp->mi_mode);
749 udelay(80);
750 }
751
752 return ret;
753}
754
755static void tg3_phy_set_wirespeed(struct tg3 *tp)
756{
757 u32 val;
758
759 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
760 return;
761
762 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
763 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
764 tg3_writephy(tp, MII_TG3_AUX_CTRL,
765 (val | (1 << 15) | (1 << 4)));
766}
767
768static int tg3_bmcr_reset(struct tg3 *tp)
769{
770 u32 phy_control;
771 int limit, err;
772
773 /* OK, reset it, and poll the BMCR_RESET bit until it
774 * clears or we time out.
775 */
776 phy_control = BMCR_RESET;
777 err = tg3_writephy(tp, MII_BMCR, phy_control);
778 if (err != 0)
779 return -EBUSY;
780
781 limit = 5000;
782 while (limit--) {
783 err = tg3_readphy(tp, MII_BMCR, &phy_control);
784 if (err != 0)
785 return -EBUSY;
786
787 if ((phy_control & BMCR_RESET) == 0) {
788 udelay(40);
789 break;
790 }
791 udelay(10);
792 }
793 if (limit <= 0)
794 return -EBUSY;
795
796 return 0;
797}
798
799static int tg3_wait_macro_done(struct tg3 *tp)
800{
801 int limit = 100;
802
803 while (limit--) {
804 u32 tmp32;
805
806 if (!tg3_readphy(tp, 0x16, &tmp32)) {
807 if ((tmp32 & 0x1000) == 0)
808 break;
809 }
810 }
811 if (limit <= 0)
812 return -EBUSY;
813
814 return 0;
815}
816
817static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
818{
819 static const u32 test_pat[4][6] = {
820 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
821 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
822 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
823 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
824 };
825 int chan;
826
827 for (chan = 0; chan < 4; chan++) {
828 int i;
829
830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
831 (chan * 0x2000) | 0x0200);
832 tg3_writephy(tp, 0x16, 0x0002);
833
834 for (i = 0; i < 6; i++)
835 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
836 test_pat[chan][i]);
837
838 tg3_writephy(tp, 0x16, 0x0202);
839 if (tg3_wait_macro_done(tp)) {
840 *resetp = 1;
841 return -EBUSY;
842 }
843
844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
845 (chan * 0x2000) | 0x0200);
846 tg3_writephy(tp, 0x16, 0x0082);
847 if (tg3_wait_macro_done(tp)) {
848 *resetp = 1;
849 return -EBUSY;
850 }
851
852 tg3_writephy(tp, 0x16, 0x0802);
853 if (tg3_wait_macro_done(tp)) {
854 *resetp = 1;
855 return -EBUSY;
856 }
857
858 for (i = 0; i < 6; i += 2) {
859 u32 low, high;
860
861 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
862 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
863 tg3_wait_macro_done(tp)) {
864 *resetp = 1;
865 return -EBUSY;
866 }
867 low &= 0x7fff;
868 high &= 0x000f;
869 if (low != test_pat[chan][i] ||
870 high != test_pat[chan][i+1]) {
871 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
873 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
874
875 return -EBUSY;
876 }
877 }
878 }
879
880 return 0;
881}
882
883static int tg3_phy_reset_chanpat(struct tg3 *tp)
884{
885 int chan;
886
887 for (chan = 0; chan < 4; chan++) {
888 int i;
889
890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
891 (chan * 0x2000) | 0x0200);
892 tg3_writephy(tp, 0x16, 0x0002);
893 for (i = 0; i < 6; i++)
894 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
895 tg3_writephy(tp, 0x16, 0x0202);
896 if (tg3_wait_macro_done(tp))
897 return -EBUSY;
898 }
899
900 return 0;
901}
902
903static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
904{
905 u32 reg32, phy9_orig;
906 int retries, do_phy_reset, err;
907
908 retries = 10;
909 do_phy_reset = 1;
910 do {
911 if (do_phy_reset) {
912 err = tg3_bmcr_reset(tp);
913 if (err)
914 return err;
915 do_phy_reset = 0;
916 }
917
918 /* Disable transmitter and interrupt. */
919 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
920 continue;
921
922 reg32 |= 0x3000;
923 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
924
925 /* Set full-duplex, 1000 mbps. */
926 tg3_writephy(tp, MII_BMCR,
927 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
928
929 /* Set to master mode. */
930 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
931 continue;
932
933 tg3_writephy(tp, MII_TG3_CTRL,
934 (MII_TG3_CTRL_AS_MASTER |
935 MII_TG3_CTRL_ENABLE_AS_MASTER));
936
937 /* Enable SM_DSP_CLOCK and 6dB. */
938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
939
940 /* Block the PHY control access. */
941 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
942 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
943
944 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
945 if (!err)
946 break;
947 } while (--retries);
948
949 err = tg3_phy_reset_chanpat(tp);
950 if (err)
951 return err;
952
953 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
954 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
955
956 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
957 tg3_writephy(tp, 0x16, 0x0000);
958
959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
961 /* Set Extended packet length bit for jumbo frames */
962 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
963 }
964 else {
965 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
966 }
967
968 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
969
970 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
971 reg32 &= ~0x3000;
972 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
973 } else if (!err)
974 err = -EBUSY;
975
976 return err;
977}
978
c8e1e82b
MC
979static void tg3_link_report(struct tg3 *);
980
1da177e4
LT
981/* This will reset the tigon3 PHY if there is no valid
982 * link unless the FORCE argument is non-zero.
983 */
984static int tg3_phy_reset(struct tg3 *tp)
985{
986 u32 phy_status;
987 int err;
988
989 err = tg3_readphy(tp, MII_BMSR, &phy_status);
990 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
991 if (err != 0)
992 return -EBUSY;
993
c8e1e82b
MC
994 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
995 netif_carrier_off(tp->dev);
996 tg3_link_report(tp);
997 }
998
1da177e4
LT
999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1002 err = tg3_phy_reset_5703_4_5(tp);
1003 if (err)
1004 return err;
1005 goto out;
1006 }
1007
1008 err = tg3_bmcr_reset(tp);
1009 if (err)
1010 return err;
1011
1012out:
1013 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1014 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1015 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1016 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1019 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020 }
1021 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1022 tg3_writephy(tp, 0x1c, 0x8d68);
1023 tg3_writephy(tp, 0x1c, 0x8d68);
1024 }
1025 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1026 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1027 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1028 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1033 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1034 }
c424cb24
MC
1035 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1036 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1037 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1038 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1039 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1040 }
1da177e4
LT
1041 /* Set Extended packet length bit (bit 14) on all chips that */
1042 /* support jumbo frames */
1043 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1044 /* Cannot do read-modify-write on 5401 */
1045 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1046 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1047 u32 phy_reg;
1048
1049 /* Set bit 14 with read-modify-write to preserve other bits */
1050 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1051 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1053 }
1054
1055 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1056 * jumbo frames transmission.
1057 */
0f893dc6 1058 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1059 u32 phy_reg;
1060
1061 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1063 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1064 }
1065
1066 tg3_phy_set_wirespeed(tp);
1067 return 0;
1068}
1069
1070static void tg3_frob_aux_power(struct tg3 *tp)
1071{
1072 struct tg3 *tp_peer = tp;
1073
1074 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1075 return;
1076
8c2dc7e1
MC
1077 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1078 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1079 struct net_device *dev_peer;
1080
1081 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1082 /* remove_one() may have been run on the peer. */
8c2dc7e1 1083 if (!dev_peer)
bc1c7567
MC
1084 tp_peer = tp;
1085 else
1086 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1087 }
1088
1da177e4 1089 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1090 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1091 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1092 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1095 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1096 (GRC_LCLCTRL_GPIO_OE0 |
1097 GRC_LCLCTRL_GPIO_OE1 |
1098 GRC_LCLCTRL_GPIO_OE2 |
1099 GRC_LCLCTRL_GPIO_OUTPUT0 |
1100 GRC_LCLCTRL_GPIO_OUTPUT1),
1101 100);
1da177e4
LT
1102 } else {
1103 u32 no_gpio2;
dc56b7d4 1104 u32 grc_local_ctrl = 0;
1da177e4
LT
1105
1106 if (tp_peer != tp &&
1107 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1108 return;
1109
dc56b7d4
MC
1110 /* Workaround to prevent overdrawing Amps. */
1111 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1112 ASIC_REV_5714) {
1113 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1114 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115 grc_local_ctrl, 100);
dc56b7d4
MC
1116 }
1117
1da177e4
LT
1118 /* On 5753 and variants, GPIO2 cannot be used. */
1119 no_gpio2 = tp->nic_sram_data_cfg &
1120 NIC_SRAM_DATA_CFG_NO_GPIO2;
1121
dc56b7d4 1122 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1123 GRC_LCLCTRL_GPIO_OE1 |
1124 GRC_LCLCTRL_GPIO_OE2 |
1125 GRC_LCLCTRL_GPIO_OUTPUT1 |
1126 GRC_LCLCTRL_GPIO_OUTPUT2;
1127 if (no_gpio2) {
1128 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1129 GRC_LCLCTRL_GPIO_OUTPUT2);
1130 }
b401e9e2
MC
1131 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1132 grc_local_ctrl, 100);
1da177e4
LT
1133
1134 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1135
b401e9e2
MC
1136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1137 grc_local_ctrl, 100);
1da177e4
LT
1138
1139 if (!no_gpio2) {
1140 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142 grc_local_ctrl, 100);
1da177e4
LT
1143 }
1144 }
1145 } else {
1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1147 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1148 if (tp_peer != tp &&
1149 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1150 return;
1151
b401e9e2
MC
1152 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153 (GRC_LCLCTRL_GPIO_OE1 |
1154 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1155
b401e9e2
MC
1156 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1157 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1158
b401e9e2
MC
1159 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160 (GRC_LCLCTRL_GPIO_OE1 |
1161 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1162 }
1163 }
1164}
1165
1166static int tg3_setup_phy(struct tg3 *, int);
1167
1168#define RESET_KIND_SHUTDOWN 0
1169#define RESET_KIND_INIT 1
1170#define RESET_KIND_SUSPEND 2
1171
1172static void tg3_write_sig_post_reset(struct tg3 *, int);
1173static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1174static int tg3_nvram_lock(struct tg3 *);
1175static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1176
15c3b696
MC
1177static void tg3_power_down_phy(struct tg3 *tp)
1178{
1179 /* The PHY should not be powered down on some chips because
1180 * of bugs.
1181 */
1182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1184 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1185 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1186 return;
1187 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1188}
1189
bc1c7567 1190static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1191{
1192 u32 misc_host_ctrl;
1193 u16 power_control, power_caps;
1194 int pm = tp->pm_cap;
1195
1196 /* Make sure register accesses (indirect or otherwise)
1197 * will function correctly.
1198 */
1199 pci_write_config_dword(tp->pdev,
1200 TG3PCI_MISC_HOST_CTRL,
1201 tp->misc_host_ctrl);
1202
1203 pci_read_config_word(tp->pdev,
1204 pm + PCI_PM_CTRL,
1205 &power_control);
1206 power_control |= PCI_PM_CTRL_PME_STATUS;
1207 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1208 switch (state) {
bc1c7567 1209 case PCI_D0:
1da177e4
LT
1210 power_control |= 0;
1211 pci_write_config_word(tp->pdev,
1212 pm + PCI_PM_CTRL,
1213 power_control);
8c6bda1a
MC
1214 udelay(100); /* Delay after power state change */
1215
1216 /* Switch out of Vaux if it is not a LOM */
b401e9e2
MC
1217 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1218 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1219
1220 return 0;
1221
bc1c7567 1222 case PCI_D1:
1da177e4
LT
1223 power_control |= 1;
1224 break;
1225
bc1c7567 1226 case PCI_D2:
1da177e4
LT
1227 power_control |= 2;
1228 break;
1229
bc1c7567 1230 case PCI_D3hot:
1da177e4
LT
1231 power_control |= 3;
1232 break;
1233
1234 default:
1235 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1236 "requested.\n",
1237 tp->dev->name, state);
1238 return -EINVAL;
1239 };
1240
1241 power_control |= PCI_PM_CTRL_PME_ENABLE;
1242
1243 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1244 tw32(TG3PCI_MISC_HOST_CTRL,
1245 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1246
1247 if (tp->link_config.phy_is_low_power == 0) {
1248 tp->link_config.phy_is_low_power = 1;
1249 tp->link_config.orig_speed = tp->link_config.speed;
1250 tp->link_config.orig_duplex = tp->link_config.duplex;
1251 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1252 }
1253
747e8f8b 1254 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1255 tp->link_config.speed = SPEED_10;
1256 tp->link_config.duplex = DUPLEX_HALF;
1257 tp->link_config.autoneg = AUTONEG_ENABLE;
1258 tg3_setup_phy(tp, 0);
1259 }
1260
6921d201
MC
1261 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1262 int i;
1263 u32 val;
1264
1265 for (i = 0; i < 200; i++) {
1266 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1267 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1268 break;
1269 msleep(1);
1270 }
1271 }
1272 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1273 WOL_DRV_STATE_SHUTDOWN |
1274 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1275
1da177e4
LT
1276 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1277
1278 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1279 u32 mac_mode;
1280
1281 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1282 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1283 udelay(40);
1284
1285 mac_mode = MAC_MODE_PORT_MODE_MII;
1286
1287 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1288 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1289 mac_mode |= MAC_MODE_LINK_POLARITY;
1290 } else {
1291 mac_mode = MAC_MODE_PORT_MODE_TBI;
1292 }
1293
cbf46853 1294 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1295 tw32(MAC_LED_CTRL, tp->led_ctrl);
1296
1297 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1298 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1299 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1300
1301 tw32_f(MAC_MODE, mac_mode);
1302 udelay(100);
1303
1304 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1305 udelay(10);
1306 }
1307
1308 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1309 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1311 u32 base_val;
1312
1313 base_val = tp->pci_clock_ctrl;
1314 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1315 CLOCK_CTRL_TXCLK_DISABLE);
1316
b401e9e2
MC
1317 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1318 CLOCK_CTRL_PWRDOWN_PLL133, 40);
a4e2b347 1319 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f 1320 /* do nothing */
85e94ced 1321 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1322 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1323 u32 newbits1, newbits2;
1324
1325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1327 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1328 CLOCK_CTRL_TXCLK_DISABLE |
1329 CLOCK_CTRL_ALTCLK);
1330 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1331 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1332 newbits1 = CLOCK_CTRL_625_CORE;
1333 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1334 } else {
1335 newbits1 = CLOCK_CTRL_ALTCLK;
1336 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1337 }
1338
b401e9e2
MC
1339 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1340 40);
1da177e4 1341
b401e9e2
MC
1342 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1343 40);
1da177e4
LT
1344
1345 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1346 u32 newbits3;
1347
1348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1350 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1351 CLOCK_CTRL_TXCLK_DISABLE |
1352 CLOCK_CTRL_44MHZ_CORE);
1353 } else {
1354 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1355 }
1356
b401e9e2
MC
1357 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1358 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1359 }
1360 }
1361
6921d201
MC
1362 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1363 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1364 /* Turn off the PHY */
1365 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1366 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1367 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1368 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
15c3b696 1369 tg3_power_down_phy(tp);
6921d201
MC
1370 }
1371 }
1372
1da177e4
LT
1373 tg3_frob_aux_power(tp);
1374
1375 /* Workaround for unstable PLL clock */
1376 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1377 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1378 u32 val = tr32(0x7d00);
1379
1380 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1381 tw32(0x7d00, val);
6921d201 1382 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1383 int err;
1384
1385 err = tg3_nvram_lock(tp);
1da177e4 1386 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1387 if (!err)
1388 tg3_nvram_unlock(tp);
6921d201 1389 }
1da177e4
LT
1390 }
1391
bbadf503
MC
1392 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1393
1da177e4
LT
1394 /* Finally, set the new power state. */
1395 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1396 udelay(100); /* Delay after power state change */
1da177e4 1397
1da177e4
LT
1398 return 0;
1399}
1400
1401static void tg3_link_report(struct tg3 *tp)
1402{
1403 if (!netif_carrier_ok(tp->dev)) {
1404 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1405 } else {
1406 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1407 tp->dev->name,
1408 (tp->link_config.active_speed == SPEED_1000 ?
1409 1000 :
1410 (tp->link_config.active_speed == SPEED_100 ?
1411 100 : 10)),
1412 (tp->link_config.active_duplex == DUPLEX_FULL ?
1413 "full" : "half"));
1414
1415 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1416 "%s for RX.\n",
1417 tp->dev->name,
1418 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1419 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1420 }
1421}
1422
1423static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1424{
1425 u32 new_tg3_flags = 0;
1426 u32 old_rx_mode = tp->rx_mode;
1427 u32 old_tx_mode = tp->tx_mode;
1428
1429 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1430
1431 /* Convert 1000BaseX flow control bits to 1000BaseT
1432 * bits before resolving flow control.
1433 */
1434 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1435 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1436 ADVERTISE_PAUSE_ASYM);
1437 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1438
1439 if (local_adv & ADVERTISE_1000XPAUSE)
1440 local_adv |= ADVERTISE_PAUSE_CAP;
1441 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1442 local_adv |= ADVERTISE_PAUSE_ASYM;
1443 if (remote_adv & LPA_1000XPAUSE)
1444 remote_adv |= LPA_PAUSE_CAP;
1445 if (remote_adv & LPA_1000XPAUSE_ASYM)
1446 remote_adv |= LPA_PAUSE_ASYM;
1447 }
1448
1da177e4
LT
1449 if (local_adv & ADVERTISE_PAUSE_CAP) {
1450 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1451 if (remote_adv & LPA_PAUSE_CAP)
1452 new_tg3_flags |=
1453 (TG3_FLAG_RX_PAUSE |
1454 TG3_FLAG_TX_PAUSE);
1455 else if (remote_adv & LPA_PAUSE_ASYM)
1456 new_tg3_flags |=
1457 (TG3_FLAG_RX_PAUSE);
1458 } else {
1459 if (remote_adv & LPA_PAUSE_CAP)
1460 new_tg3_flags |=
1461 (TG3_FLAG_RX_PAUSE |
1462 TG3_FLAG_TX_PAUSE);
1463 }
1464 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1465 if ((remote_adv & LPA_PAUSE_CAP) &&
1466 (remote_adv & LPA_PAUSE_ASYM))
1467 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1468 }
1469
1470 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1471 tp->tg3_flags |= new_tg3_flags;
1472 } else {
1473 new_tg3_flags = tp->tg3_flags;
1474 }
1475
1476 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1477 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1478 else
1479 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1480
1481 if (old_rx_mode != tp->rx_mode) {
1482 tw32_f(MAC_RX_MODE, tp->rx_mode);
1483 }
1484
1485 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1486 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1487 else
1488 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1489
1490 if (old_tx_mode != tp->tx_mode) {
1491 tw32_f(MAC_TX_MODE, tp->tx_mode);
1492 }
1493}
1494
1495static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1496{
1497 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1498 case MII_TG3_AUX_STAT_10HALF:
1499 *speed = SPEED_10;
1500 *duplex = DUPLEX_HALF;
1501 break;
1502
1503 case MII_TG3_AUX_STAT_10FULL:
1504 *speed = SPEED_10;
1505 *duplex = DUPLEX_FULL;
1506 break;
1507
1508 case MII_TG3_AUX_STAT_100HALF:
1509 *speed = SPEED_100;
1510 *duplex = DUPLEX_HALF;
1511 break;
1512
1513 case MII_TG3_AUX_STAT_100FULL:
1514 *speed = SPEED_100;
1515 *duplex = DUPLEX_FULL;
1516 break;
1517
1518 case MII_TG3_AUX_STAT_1000HALF:
1519 *speed = SPEED_1000;
1520 *duplex = DUPLEX_HALF;
1521 break;
1522
1523 case MII_TG3_AUX_STAT_1000FULL:
1524 *speed = SPEED_1000;
1525 *duplex = DUPLEX_FULL;
1526 break;
1527
1528 default:
1529 *speed = SPEED_INVALID;
1530 *duplex = DUPLEX_INVALID;
1531 break;
1532 };
1533}
1534
1535static void tg3_phy_copper_begin(struct tg3 *tp)
1536{
1537 u32 new_adv;
1538 int i;
1539
1540 if (tp->link_config.phy_is_low_power) {
1541 /* Entering low power mode. Disable gigabit and
1542 * 100baseT advertisements.
1543 */
1544 tg3_writephy(tp, MII_TG3_CTRL, 0);
1545
1546 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1547 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1548 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1549 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1550
1551 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552 } else if (tp->link_config.speed == SPEED_INVALID) {
1553 tp->link_config.advertising =
1554 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1555 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1556 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1557 ADVERTISED_Autoneg | ADVERTISED_MII);
1558
1559 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1560 tp->link_config.advertising &=
1561 ~(ADVERTISED_1000baseT_Half |
1562 ADVERTISED_1000baseT_Full);
1563
1564 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1565 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1566 new_adv |= ADVERTISE_10HALF;
1567 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1568 new_adv |= ADVERTISE_10FULL;
1569 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1570 new_adv |= ADVERTISE_100HALF;
1571 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1572 new_adv |= ADVERTISE_100FULL;
1573 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574
1575 if (tp->link_config.advertising &
1576 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1577 new_adv = 0;
1578 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1579 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1580 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1581 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1582 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1583 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1584 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1585 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1586 MII_TG3_CTRL_ENABLE_AS_MASTER);
1587 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1588 } else {
1589 tg3_writephy(tp, MII_TG3_CTRL, 0);
1590 }
1591 } else {
1592 /* Asking for a specific link mode. */
1593 if (tp->link_config.speed == SPEED_1000) {
1594 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1596
1597 if (tp->link_config.duplex == DUPLEX_FULL)
1598 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1599 else
1600 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1601 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1602 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1603 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1604 MII_TG3_CTRL_ENABLE_AS_MASTER);
1605 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1606 } else {
1607 tg3_writephy(tp, MII_TG3_CTRL, 0);
1608
1609 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1610 if (tp->link_config.speed == SPEED_100) {
1611 if (tp->link_config.duplex == DUPLEX_FULL)
1612 new_adv |= ADVERTISE_100FULL;
1613 else
1614 new_adv |= ADVERTISE_100HALF;
1615 } else {
1616 if (tp->link_config.duplex == DUPLEX_FULL)
1617 new_adv |= ADVERTISE_10FULL;
1618 else
1619 new_adv |= ADVERTISE_10HALF;
1620 }
1621 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1622 }
1623 }
1624
1625 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1626 tp->link_config.speed != SPEED_INVALID) {
1627 u32 bmcr, orig_bmcr;
1628
1629 tp->link_config.active_speed = tp->link_config.speed;
1630 tp->link_config.active_duplex = tp->link_config.duplex;
1631
1632 bmcr = 0;
1633 switch (tp->link_config.speed) {
1634 default:
1635 case SPEED_10:
1636 break;
1637
1638 case SPEED_100:
1639 bmcr |= BMCR_SPEED100;
1640 break;
1641
1642 case SPEED_1000:
1643 bmcr |= TG3_BMCR_SPEED1000;
1644 break;
1645 };
1646
1647 if (tp->link_config.duplex == DUPLEX_FULL)
1648 bmcr |= BMCR_FULLDPLX;
1649
1650 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1651 (bmcr != orig_bmcr)) {
1652 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1653 for (i = 0; i < 1500; i++) {
1654 u32 tmp;
1655
1656 udelay(10);
1657 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1658 tg3_readphy(tp, MII_BMSR, &tmp))
1659 continue;
1660 if (!(tmp & BMSR_LSTATUS)) {
1661 udelay(40);
1662 break;
1663 }
1664 }
1665 tg3_writephy(tp, MII_BMCR, bmcr);
1666 udelay(40);
1667 }
1668 } else {
1669 tg3_writephy(tp, MII_BMCR,
1670 BMCR_ANENABLE | BMCR_ANRESTART);
1671 }
1672}
1673
1674static int tg3_init_5401phy_dsp(struct tg3 *tp)
1675{
1676 int err;
1677
1678 /* Turn off tap power management. */
1679 /* Set Extended packet length bit */
1680 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1681
1682 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1683 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1684
1685 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1686 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1687
1688 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1689 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1690
1691 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1692 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1693
1694 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1695 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1696
1697 udelay(40);
1698
1699 return err;
1700}
1701
1702static int tg3_copper_is_advertising_all(struct tg3 *tp)
1703{
1704 u32 adv_reg, all_mask;
1705
1706 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1707 return 0;
1708
1709 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1710 ADVERTISE_100HALF | ADVERTISE_100FULL);
1711 if ((adv_reg & all_mask) != all_mask)
1712 return 0;
1713 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1714 u32 tg3_ctrl;
1715
1716 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1717 return 0;
1718
1719 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1720 MII_TG3_CTRL_ADV_1000_FULL);
1721 if ((tg3_ctrl & all_mask) != all_mask)
1722 return 0;
1723 }
1724 return 1;
1725}
1726
1727static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1728{
1729 int current_link_up;
1730 u32 bmsr, dummy;
1731 u16 current_speed;
1732 u8 current_duplex;
1733 int i, err;
1734
1735 tw32(MAC_EVENT, 0);
1736
1737 tw32_f(MAC_STATUS,
1738 (MAC_STATUS_SYNC_CHANGED |
1739 MAC_STATUS_CFG_CHANGED |
1740 MAC_STATUS_MI_COMPLETION |
1741 MAC_STATUS_LNKSTATE_CHANGED));
1742 udelay(40);
1743
1744 tp->mi_mode = MAC_MI_MODE_BASE;
1745 tw32_f(MAC_MI_MODE, tp->mi_mode);
1746 udelay(80);
1747
1748 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1749
1750 /* Some third-party PHYs need to be reset on link going
1751 * down.
1752 */
1753 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1756 netif_carrier_ok(tp->dev)) {
1757 tg3_readphy(tp, MII_BMSR, &bmsr);
1758 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1759 !(bmsr & BMSR_LSTATUS))
1760 force_reset = 1;
1761 }
1762 if (force_reset)
1763 tg3_phy_reset(tp);
1764
1765 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1766 tg3_readphy(tp, MII_BMSR, &bmsr);
1767 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1768 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1769 bmsr = 0;
1770
1771 if (!(bmsr & BMSR_LSTATUS)) {
1772 err = tg3_init_5401phy_dsp(tp);
1773 if (err)
1774 return err;
1775
1776 tg3_readphy(tp, MII_BMSR, &bmsr);
1777 for (i = 0; i < 1000; i++) {
1778 udelay(10);
1779 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1780 (bmsr & BMSR_LSTATUS)) {
1781 udelay(40);
1782 break;
1783 }
1784 }
1785
1786 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1787 !(bmsr & BMSR_LSTATUS) &&
1788 tp->link_config.active_speed == SPEED_1000) {
1789 err = tg3_phy_reset(tp);
1790 if (!err)
1791 err = tg3_init_5401phy_dsp(tp);
1792 if (err)
1793 return err;
1794 }
1795 }
1796 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1797 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1798 /* 5701 {A0,B0} CRC bug workaround */
1799 tg3_writephy(tp, 0x15, 0x0a75);
1800 tg3_writephy(tp, 0x1c, 0x8c68);
1801 tg3_writephy(tp, 0x1c, 0x8d68);
1802 tg3_writephy(tp, 0x1c, 0x8c68);
1803 }
1804
1805 /* Clear pending interrupts... */
1806 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1808
1809 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1810 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1811 else
1812 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1813
1814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1816 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1819 else
1820 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1821 }
1822
1823 current_link_up = 0;
1824 current_speed = SPEED_INVALID;
1825 current_duplex = DUPLEX_INVALID;
1826
1827 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1828 u32 val;
1829
1830 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1831 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1832 if (!(val & (1 << 10))) {
1833 val |= (1 << 10);
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1835 goto relink;
1836 }
1837 }
1838
1839 bmsr = 0;
1840 for (i = 0; i < 100; i++) {
1841 tg3_readphy(tp, MII_BMSR, &bmsr);
1842 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1843 (bmsr & BMSR_LSTATUS))
1844 break;
1845 udelay(40);
1846 }
1847
1848 if (bmsr & BMSR_LSTATUS) {
1849 u32 aux_stat, bmcr;
1850
1851 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1852 for (i = 0; i < 2000; i++) {
1853 udelay(10);
1854 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1855 aux_stat)
1856 break;
1857 }
1858
1859 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1860 &current_speed,
1861 &current_duplex);
1862
1863 bmcr = 0;
1864 for (i = 0; i < 200; i++) {
1865 tg3_readphy(tp, MII_BMCR, &bmcr);
1866 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1867 continue;
1868 if (bmcr && bmcr != 0x7fff)
1869 break;
1870 udelay(10);
1871 }
1872
1873 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1874 if (bmcr & BMCR_ANENABLE) {
1875 current_link_up = 1;
1876
1877 /* Force autoneg restart if we are exiting
1878 * low power mode.
1879 */
1880 if (!tg3_copper_is_advertising_all(tp))
1881 current_link_up = 0;
1882 } else {
1883 current_link_up = 0;
1884 }
1885 } else {
1886 if (!(bmcr & BMCR_ANENABLE) &&
1887 tp->link_config.speed == current_speed &&
1888 tp->link_config.duplex == current_duplex) {
1889 current_link_up = 1;
1890 } else {
1891 current_link_up = 0;
1892 }
1893 }
1894
1895 tp->link_config.active_speed = current_speed;
1896 tp->link_config.active_duplex = current_duplex;
1897 }
1898
1899 if (current_link_up == 1 &&
1900 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1901 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1902 u32 local_adv, remote_adv;
1903
1904 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1905 local_adv = 0;
1906 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1907
1908 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1909 remote_adv = 0;
1910
1911 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1912
1913 /* If we are not advertising full pause capability,
1914 * something is wrong. Bring the link down and reconfigure.
1915 */
1916 if (local_adv != ADVERTISE_PAUSE_CAP) {
1917 current_link_up = 0;
1918 } else {
1919 tg3_setup_flow_control(tp, local_adv, remote_adv);
1920 }
1921 }
1922relink:
6921d201 1923 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
1924 u32 tmp;
1925
1926 tg3_phy_copper_begin(tp);
1927
1928 tg3_readphy(tp, MII_BMSR, &tmp);
1929 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1930 (tmp & BMSR_LSTATUS))
1931 current_link_up = 1;
1932 }
1933
1934 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1935 if (current_link_up == 1) {
1936 if (tp->link_config.active_speed == SPEED_100 ||
1937 tp->link_config.active_speed == SPEED_10)
1938 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1939 else
1940 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1941 } else
1942 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1943
1944 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1945 if (tp->link_config.active_duplex == DUPLEX_HALF)
1946 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1947
1948 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1950 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1951 (current_link_up == 1 &&
1952 tp->link_config.active_speed == SPEED_10))
1953 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1954 } else {
1955 if (current_link_up == 1)
1956 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1957 }
1958
1959 /* ??? Without this setting Netgear GA302T PHY does not
1960 * ??? send/receive packets...
1961 */
1962 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1963 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1964 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1965 tw32_f(MAC_MI_MODE, tp->mi_mode);
1966 udelay(80);
1967 }
1968
1969 tw32_f(MAC_MODE, tp->mac_mode);
1970 udelay(40);
1971
1972 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1973 /* Polled via timer. */
1974 tw32_f(MAC_EVENT, 0);
1975 } else {
1976 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1977 }
1978 udelay(40);
1979
1980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1981 current_link_up == 1 &&
1982 tp->link_config.active_speed == SPEED_1000 &&
1983 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1984 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1985 udelay(120);
1986 tw32_f(MAC_STATUS,
1987 (MAC_STATUS_SYNC_CHANGED |
1988 MAC_STATUS_CFG_CHANGED));
1989 udelay(40);
1990 tg3_write_mem(tp,
1991 NIC_SRAM_FIRMWARE_MBOX,
1992 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1993 }
1994
1995 if (current_link_up != netif_carrier_ok(tp->dev)) {
1996 if (current_link_up)
1997 netif_carrier_on(tp->dev);
1998 else
1999 netif_carrier_off(tp->dev);
2000 tg3_link_report(tp);
2001 }
2002
2003 return 0;
2004}
2005
2006struct tg3_fiber_aneginfo {
2007 int state;
2008#define ANEG_STATE_UNKNOWN 0
2009#define ANEG_STATE_AN_ENABLE 1
2010#define ANEG_STATE_RESTART_INIT 2
2011#define ANEG_STATE_RESTART 3
2012#define ANEG_STATE_DISABLE_LINK_OK 4
2013#define ANEG_STATE_ABILITY_DETECT_INIT 5
2014#define ANEG_STATE_ABILITY_DETECT 6
2015#define ANEG_STATE_ACK_DETECT_INIT 7
2016#define ANEG_STATE_ACK_DETECT 8
2017#define ANEG_STATE_COMPLETE_ACK_INIT 9
2018#define ANEG_STATE_COMPLETE_ACK 10
2019#define ANEG_STATE_IDLE_DETECT_INIT 11
2020#define ANEG_STATE_IDLE_DETECT 12
2021#define ANEG_STATE_LINK_OK 13
2022#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2023#define ANEG_STATE_NEXT_PAGE_WAIT 15
2024
2025 u32 flags;
2026#define MR_AN_ENABLE 0x00000001
2027#define MR_RESTART_AN 0x00000002
2028#define MR_AN_COMPLETE 0x00000004
2029#define MR_PAGE_RX 0x00000008
2030#define MR_NP_LOADED 0x00000010
2031#define MR_TOGGLE_TX 0x00000020
2032#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2033#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2034#define MR_LP_ADV_SYM_PAUSE 0x00000100
2035#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2036#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2037#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2038#define MR_LP_ADV_NEXT_PAGE 0x00001000
2039#define MR_TOGGLE_RX 0x00002000
2040#define MR_NP_RX 0x00004000
2041
2042#define MR_LINK_OK 0x80000000
2043
2044 unsigned long link_time, cur_time;
2045
2046 u32 ability_match_cfg;
2047 int ability_match_count;
2048
2049 char ability_match, idle_match, ack_match;
2050
2051 u32 txconfig, rxconfig;
2052#define ANEG_CFG_NP 0x00000080
2053#define ANEG_CFG_ACK 0x00000040
2054#define ANEG_CFG_RF2 0x00000020
2055#define ANEG_CFG_RF1 0x00000010
2056#define ANEG_CFG_PS2 0x00000001
2057#define ANEG_CFG_PS1 0x00008000
2058#define ANEG_CFG_HD 0x00004000
2059#define ANEG_CFG_FD 0x00002000
2060#define ANEG_CFG_INVAL 0x00001f06
2061
2062};
2063#define ANEG_OK 0
2064#define ANEG_DONE 1
2065#define ANEG_TIMER_ENAB 2
2066#define ANEG_FAILED -1
2067
2068#define ANEG_STATE_SETTLE_TIME 10000
2069
2070static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2071 struct tg3_fiber_aneginfo *ap)
2072{
2073 unsigned long delta;
2074 u32 rx_cfg_reg;
2075 int ret;
2076
2077 if (ap->state == ANEG_STATE_UNKNOWN) {
2078 ap->rxconfig = 0;
2079 ap->link_time = 0;
2080 ap->cur_time = 0;
2081 ap->ability_match_cfg = 0;
2082 ap->ability_match_count = 0;
2083 ap->ability_match = 0;
2084 ap->idle_match = 0;
2085 ap->ack_match = 0;
2086 }
2087 ap->cur_time++;
2088
2089 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2090 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2091
2092 if (rx_cfg_reg != ap->ability_match_cfg) {
2093 ap->ability_match_cfg = rx_cfg_reg;
2094 ap->ability_match = 0;
2095 ap->ability_match_count = 0;
2096 } else {
2097 if (++ap->ability_match_count > 1) {
2098 ap->ability_match = 1;
2099 ap->ability_match_cfg = rx_cfg_reg;
2100 }
2101 }
2102 if (rx_cfg_reg & ANEG_CFG_ACK)
2103 ap->ack_match = 1;
2104 else
2105 ap->ack_match = 0;
2106
2107 ap->idle_match = 0;
2108 } else {
2109 ap->idle_match = 1;
2110 ap->ability_match_cfg = 0;
2111 ap->ability_match_count = 0;
2112 ap->ability_match = 0;
2113 ap->ack_match = 0;
2114
2115 rx_cfg_reg = 0;
2116 }
2117
2118 ap->rxconfig = rx_cfg_reg;
2119 ret = ANEG_OK;
2120
2121 switch(ap->state) {
2122 case ANEG_STATE_UNKNOWN:
2123 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2124 ap->state = ANEG_STATE_AN_ENABLE;
2125
2126 /* fallthru */
2127 case ANEG_STATE_AN_ENABLE:
2128 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2129 if (ap->flags & MR_AN_ENABLE) {
2130 ap->link_time = 0;
2131 ap->cur_time = 0;
2132 ap->ability_match_cfg = 0;
2133 ap->ability_match_count = 0;
2134 ap->ability_match = 0;
2135 ap->idle_match = 0;
2136 ap->ack_match = 0;
2137
2138 ap->state = ANEG_STATE_RESTART_INIT;
2139 } else {
2140 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2141 }
2142 break;
2143
2144 case ANEG_STATE_RESTART_INIT:
2145 ap->link_time = ap->cur_time;
2146 ap->flags &= ~(MR_NP_LOADED);
2147 ap->txconfig = 0;
2148 tw32(MAC_TX_AUTO_NEG, 0);
2149 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150 tw32_f(MAC_MODE, tp->mac_mode);
2151 udelay(40);
2152
2153 ret = ANEG_TIMER_ENAB;
2154 ap->state = ANEG_STATE_RESTART;
2155
2156 /* fallthru */
2157 case ANEG_STATE_RESTART:
2158 delta = ap->cur_time - ap->link_time;
2159 if (delta > ANEG_STATE_SETTLE_TIME) {
2160 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2161 } else {
2162 ret = ANEG_TIMER_ENAB;
2163 }
2164 break;
2165
2166 case ANEG_STATE_DISABLE_LINK_OK:
2167 ret = ANEG_DONE;
2168 break;
2169
2170 case ANEG_STATE_ABILITY_DETECT_INIT:
2171 ap->flags &= ~(MR_TOGGLE_TX);
2172 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2173 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2174 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2175 tw32_f(MAC_MODE, tp->mac_mode);
2176 udelay(40);
2177
2178 ap->state = ANEG_STATE_ABILITY_DETECT;
2179 break;
2180
2181 case ANEG_STATE_ABILITY_DETECT:
2182 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2183 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2184 }
2185 break;
2186
2187 case ANEG_STATE_ACK_DETECT_INIT:
2188 ap->txconfig |= ANEG_CFG_ACK;
2189 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2190 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2191 tw32_f(MAC_MODE, tp->mac_mode);
2192 udelay(40);
2193
2194 ap->state = ANEG_STATE_ACK_DETECT;
2195
2196 /* fallthru */
2197 case ANEG_STATE_ACK_DETECT:
2198 if (ap->ack_match != 0) {
2199 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2200 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2201 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2202 } else {
2203 ap->state = ANEG_STATE_AN_ENABLE;
2204 }
2205 } else if (ap->ability_match != 0 &&
2206 ap->rxconfig == 0) {
2207 ap->state = ANEG_STATE_AN_ENABLE;
2208 }
2209 break;
2210
2211 case ANEG_STATE_COMPLETE_ACK_INIT:
2212 if (ap->rxconfig & ANEG_CFG_INVAL) {
2213 ret = ANEG_FAILED;
2214 break;
2215 }
2216 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2217 MR_LP_ADV_HALF_DUPLEX |
2218 MR_LP_ADV_SYM_PAUSE |
2219 MR_LP_ADV_ASYM_PAUSE |
2220 MR_LP_ADV_REMOTE_FAULT1 |
2221 MR_LP_ADV_REMOTE_FAULT2 |
2222 MR_LP_ADV_NEXT_PAGE |
2223 MR_TOGGLE_RX |
2224 MR_NP_RX);
2225 if (ap->rxconfig & ANEG_CFG_FD)
2226 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2227 if (ap->rxconfig & ANEG_CFG_HD)
2228 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2229 if (ap->rxconfig & ANEG_CFG_PS1)
2230 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2231 if (ap->rxconfig & ANEG_CFG_PS2)
2232 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2233 if (ap->rxconfig & ANEG_CFG_RF1)
2234 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2235 if (ap->rxconfig & ANEG_CFG_RF2)
2236 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2237 if (ap->rxconfig & ANEG_CFG_NP)
2238 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2239
2240 ap->link_time = ap->cur_time;
2241
2242 ap->flags ^= (MR_TOGGLE_TX);
2243 if (ap->rxconfig & 0x0008)
2244 ap->flags |= MR_TOGGLE_RX;
2245 if (ap->rxconfig & ANEG_CFG_NP)
2246 ap->flags |= MR_NP_RX;
2247 ap->flags |= MR_PAGE_RX;
2248
2249 ap->state = ANEG_STATE_COMPLETE_ACK;
2250 ret = ANEG_TIMER_ENAB;
2251 break;
2252
2253 case ANEG_STATE_COMPLETE_ACK:
2254 if (ap->ability_match != 0 &&
2255 ap->rxconfig == 0) {
2256 ap->state = ANEG_STATE_AN_ENABLE;
2257 break;
2258 }
2259 delta = ap->cur_time - ap->link_time;
2260 if (delta > ANEG_STATE_SETTLE_TIME) {
2261 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2262 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263 } else {
2264 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2265 !(ap->flags & MR_NP_RX)) {
2266 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2267 } else {
2268 ret = ANEG_FAILED;
2269 }
2270 }
2271 }
2272 break;
2273
2274 case ANEG_STATE_IDLE_DETECT_INIT:
2275 ap->link_time = ap->cur_time;
2276 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2277 tw32_f(MAC_MODE, tp->mac_mode);
2278 udelay(40);
2279
2280 ap->state = ANEG_STATE_IDLE_DETECT;
2281 ret = ANEG_TIMER_ENAB;
2282 break;
2283
2284 case ANEG_STATE_IDLE_DETECT:
2285 if (ap->ability_match != 0 &&
2286 ap->rxconfig == 0) {
2287 ap->state = ANEG_STATE_AN_ENABLE;
2288 break;
2289 }
2290 delta = ap->cur_time - ap->link_time;
2291 if (delta > ANEG_STATE_SETTLE_TIME) {
2292 /* XXX another gem from the Broadcom driver :( */
2293 ap->state = ANEG_STATE_LINK_OK;
2294 }
2295 break;
2296
2297 case ANEG_STATE_LINK_OK:
2298 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2299 ret = ANEG_DONE;
2300 break;
2301
2302 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2303 /* ??? unimplemented */
2304 break;
2305
2306 case ANEG_STATE_NEXT_PAGE_WAIT:
2307 /* ??? unimplemented */
2308 break;
2309
2310 default:
2311 ret = ANEG_FAILED;
2312 break;
2313 };
2314
2315 return ret;
2316}
2317
2318static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2319{
2320 int res = 0;
2321 struct tg3_fiber_aneginfo aninfo;
2322 int status = ANEG_FAILED;
2323 unsigned int tick;
2324 u32 tmp;
2325
2326 tw32_f(MAC_TX_AUTO_NEG, 0);
2327
2328 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2329 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2330 udelay(40);
2331
2332 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2333 udelay(40);
2334
2335 memset(&aninfo, 0, sizeof(aninfo));
2336 aninfo.flags |= MR_AN_ENABLE;
2337 aninfo.state = ANEG_STATE_UNKNOWN;
2338 aninfo.cur_time = 0;
2339 tick = 0;
2340 while (++tick < 195000) {
2341 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2342 if (status == ANEG_DONE || status == ANEG_FAILED)
2343 break;
2344
2345 udelay(1);
2346 }
2347
2348 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2349 tw32_f(MAC_MODE, tp->mac_mode);
2350 udelay(40);
2351
2352 *flags = aninfo.flags;
2353
2354 if (status == ANEG_DONE &&
2355 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2356 MR_LP_ADV_FULL_DUPLEX)))
2357 res = 1;
2358
2359 return res;
2360}
2361
2362static void tg3_init_bcm8002(struct tg3 *tp)
2363{
2364 u32 mac_status = tr32(MAC_STATUS);
2365 int i;
2366
2367 /* Reset when initting first time or we have a link. */
2368 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2369 !(mac_status & MAC_STATUS_PCS_SYNCED))
2370 return;
2371
2372 /* Set PLL lock range. */
2373 tg3_writephy(tp, 0x16, 0x8007);
2374
2375 /* SW reset */
2376 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2377
2378 /* Wait for reset to complete. */
2379 /* XXX schedule_timeout() ... */
2380 for (i = 0; i < 500; i++)
2381 udelay(10);
2382
2383 /* Config mode; select PMA/Ch 1 regs. */
2384 tg3_writephy(tp, 0x10, 0x8411);
2385
2386 /* Enable auto-lock and comdet, select txclk for tx. */
2387 tg3_writephy(tp, 0x11, 0x0a10);
2388
2389 tg3_writephy(tp, 0x18, 0x00a0);
2390 tg3_writephy(tp, 0x16, 0x41ff);
2391
2392 /* Assert and deassert POR. */
2393 tg3_writephy(tp, 0x13, 0x0400);
2394 udelay(40);
2395 tg3_writephy(tp, 0x13, 0x0000);
2396
2397 tg3_writephy(tp, 0x11, 0x0a50);
2398 udelay(40);
2399 tg3_writephy(tp, 0x11, 0x0a10);
2400
2401 /* Wait for signal to stabilize */
2402 /* XXX schedule_timeout() ... */
2403 for (i = 0; i < 15000; i++)
2404 udelay(10);
2405
2406 /* Deselect the channel register so we can read the PHYID
2407 * later.
2408 */
2409 tg3_writephy(tp, 0x10, 0x8011);
2410}
2411
2412static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2413{
2414 u32 sg_dig_ctrl, sg_dig_status;
2415 u32 serdes_cfg, expected_sg_dig_ctrl;
2416 int workaround, port_a;
2417 int current_link_up;
2418
2419 serdes_cfg = 0;
2420 expected_sg_dig_ctrl = 0;
2421 workaround = 0;
2422 port_a = 1;
2423 current_link_up = 0;
2424
2425 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2426 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2427 workaround = 1;
2428 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2429 port_a = 0;
2430
2431 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2432 /* preserve bits 20-23 for voltage regulator */
2433 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2434 }
2435
2436 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2437
2438 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2439 if (sg_dig_ctrl & (1 << 31)) {
2440 if (workaround) {
2441 u32 val = serdes_cfg;
2442
2443 if (port_a)
2444 val |= 0xc010000;
2445 else
2446 val |= 0x4010000;
2447 tw32_f(MAC_SERDES_CFG, val);
2448 }
2449 tw32_f(SG_DIG_CTRL, 0x01388400);
2450 }
2451 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2452 tg3_setup_flow_control(tp, 0, 0);
2453 current_link_up = 1;
2454 }
2455 goto out;
2456 }
2457
2458 /* Want auto-negotiation. */
2459 expected_sg_dig_ctrl = 0x81388400;
2460
2461 /* Pause capability */
2462 expected_sg_dig_ctrl |= (1 << 11);
2463
2464 /* Asymettric pause */
2465 expected_sg_dig_ctrl |= (1 << 12);
2466
2467 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2468 if (workaround)
2469 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2470 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2471 udelay(5);
2472 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2473
2474 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2475 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2476 MAC_STATUS_SIGNAL_DET)) {
2477 int i;
2478
2479 /* Giver time to negotiate (~200ms) */
2480 for (i = 0; i < 40000; i++) {
2481 sg_dig_status = tr32(SG_DIG_STATUS);
2482 if (sg_dig_status & (0x3))
2483 break;
2484 udelay(5);
2485 }
2486 mac_status = tr32(MAC_STATUS);
2487
2488 if ((sg_dig_status & (1 << 1)) &&
2489 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2490 u32 local_adv, remote_adv;
2491
2492 local_adv = ADVERTISE_PAUSE_CAP;
2493 remote_adv = 0;
2494 if (sg_dig_status & (1 << 19))
2495 remote_adv |= LPA_PAUSE_CAP;
2496 if (sg_dig_status & (1 << 20))
2497 remote_adv |= LPA_PAUSE_ASYM;
2498
2499 tg3_setup_flow_control(tp, local_adv, remote_adv);
2500 current_link_up = 1;
2501 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2502 } else if (!(sg_dig_status & (1 << 1))) {
2503 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2504 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2505 else {
2506 if (workaround) {
2507 u32 val = serdes_cfg;
2508
2509 if (port_a)
2510 val |= 0xc010000;
2511 else
2512 val |= 0x4010000;
2513
2514 tw32_f(MAC_SERDES_CFG, val);
2515 }
2516
2517 tw32_f(SG_DIG_CTRL, 0x01388400);
2518 udelay(40);
2519
2520 /* Link parallel detection - link is up */
2521 /* only if we have PCS_SYNC and not */
2522 /* receiving config code words */
2523 mac_status = tr32(MAC_STATUS);
2524 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2525 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2526 tg3_setup_flow_control(tp, 0, 0);
2527 current_link_up = 1;
2528 }
2529 }
2530 }
2531 }
2532
2533out:
2534 return current_link_up;
2535}
2536
2537static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2538{
2539 int current_link_up = 0;
2540
2541 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2542 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2543 goto out;
2544 }
2545
2546 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2547 u32 flags;
2548 int i;
2549
2550 if (fiber_autoneg(tp, &flags)) {
2551 u32 local_adv, remote_adv;
2552
2553 local_adv = ADVERTISE_PAUSE_CAP;
2554 remote_adv = 0;
2555 if (flags & MR_LP_ADV_SYM_PAUSE)
2556 remote_adv |= LPA_PAUSE_CAP;
2557 if (flags & MR_LP_ADV_ASYM_PAUSE)
2558 remote_adv |= LPA_PAUSE_ASYM;
2559
2560 tg3_setup_flow_control(tp, local_adv, remote_adv);
2561
2562 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2563 current_link_up = 1;
2564 }
2565 for (i = 0; i < 30; i++) {
2566 udelay(20);
2567 tw32_f(MAC_STATUS,
2568 (MAC_STATUS_SYNC_CHANGED |
2569 MAC_STATUS_CFG_CHANGED));
2570 udelay(40);
2571 if ((tr32(MAC_STATUS) &
2572 (MAC_STATUS_SYNC_CHANGED |
2573 MAC_STATUS_CFG_CHANGED)) == 0)
2574 break;
2575 }
2576
2577 mac_status = tr32(MAC_STATUS);
2578 if (current_link_up == 0 &&
2579 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2580 !(mac_status & MAC_STATUS_RCVD_CFG))
2581 current_link_up = 1;
2582 } else {
2583 /* Forcing 1000FD link up. */
2584 current_link_up = 1;
2585 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2586
2587 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2588 udelay(40);
2589 }
2590
2591out:
2592 return current_link_up;
2593}
2594
2595static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2596{
2597 u32 orig_pause_cfg;
2598 u16 orig_active_speed;
2599 u8 orig_active_duplex;
2600 u32 mac_status;
2601 int current_link_up;
2602 int i;
2603
2604 orig_pause_cfg =
2605 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2606 TG3_FLAG_TX_PAUSE));
2607 orig_active_speed = tp->link_config.active_speed;
2608 orig_active_duplex = tp->link_config.active_duplex;
2609
2610 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2611 netif_carrier_ok(tp->dev) &&
2612 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2613 mac_status = tr32(MAC_STATUS);
2614 mac_status &= (MAC_STATUS_PCS_SYNCED |
2615 MAC_STATUS_SIGNAL_DET |
2616 MAC_STATUS_CFG_CHANGED |
2617 MAC_STATUS_RCVD_CFG);
2618 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2619 MAC_STATUS_SIGNAL_DET)) {
2620 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2621 MAC_STATUS_CFG_CHANGED));
2622 return 0;
2623 }
2624 }
2625
2626 tw32_f(MAC_TX_AUTO_NEG, 0);
2627
2628 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2629 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2630 tw32_f(MAC_MODE, tp->mac_mode);
2631 udelay(40);
2632
2633 if (tp->phy_id == PHY_ID_BCM8002)
2634 tg3_init_bcm8002(tp);
2635
2636 /* Enable link change event even when serdes polling. */
2637 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2638 udelay(40);
2639
2640 current_link_up = 0;
2641 mac_status = tr32(MAC_STATUS);
2642
2643 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2644 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2645 else
2646 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2647
2648 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2649 tw32_f(MAC_MODE, tp->mac_mode);
2650 udelay(40);
2651
2652 tp->hw_status->status =
2653 (SD_STATUS_UPDATED |
2654 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2655
2656 for (i = 0; i < 100; i++) {
2657 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2658 MAC_STATUS_CFG_CHANGED));
2659 udelay(5);
2660 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2661 MAC_STATUS_CFG_CHANGED)) == 0)
2662 break;
2663 }
2664
2665 mac_status = tr32(MAC_STATUS);
2666 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2667 current_link_up = 0;
2668 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2669 tw32_f(MAC_MODE, (tp->mac_mode |
2670 MAC_MODE_SEND_CONFIGS));
2671 udelay(1);
2672 tw32_f(MAC_MODE, tp->mac_mode);
2673 }
2674 }
2675
2676 if (current_link_up == 1) {
2677 tp->link_config.active_speed = SPEED_1000;
2678 tp->link_config.active_duplex = DUPLEX_FULL;
2679 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2680 LED_CTRL_LNKLED_OVERRIDE |
2681 LED_CTRL_1000MBPS_ON));
2682 } else {
2683 tp->link_config.active_speed = SPEED_INVALID;
2684 tp->link_config.active_duplex = DUPLEX_INVALID;
2685 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2686 LED_CTRL_LNKLED_OVERRIDE |
2687 LED_CTRL_TRAFFIC_OVERRIDE));
2688 }
2689
2690 if (current_link_up != netif_carrier_ok(tp->dev)) {
2691 if (current_link_up)
2692 netif_carrier_on(tp->dev);
2693 else
2694 netif_carrier_off(tp->dev);
2695 tg3_link_report(tp);
2696 } else {
2697 u32 now_pause_cfg =
2698 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2699 TG3_FLAG_TX_PAUSE);
2700 if (orig_pause_cfg != now_pause_cfg ||
2701 orig_active_speed != tp->link_config.active_speed ||
2702 orig_active_duplex != tp->link_config.active_duplex)
2703 tg3_link_report(tp);
2704 }
2705
2706 return 0;
2707}
2708
747e8f8b
MC
2709static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2710{
2711 int current_link_up, err = 0;
2712 u32 bmsr, bmcr;
2713 u16 current_speed;
2714 u8 current_duplex;
2715
2716 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2717 tw32_f(MAC_MODE, tp->mac_mode);
2718 udelay(40);
2719
2720 tw32(MAC_EVENT, 0);
2721
2722 tw32_f(MAC_STATUS,
2723 (MAC_STATUS_SYNC_CHANGED |
2724 MAC_STATUS_CFG_CHANGED |
2725 MAC_STATUS_MI_COMPLETION |
2726 MAC_STATUS_LNKSTATE_CHANGED));
2727 udelay(40);
2728
2729 if (force_reset)
2730 tg3_phy_reset(tp);
2731
2732 current_link_up = 0;
2733 current_speed = SPEED_INVALID;
2734 current_duplex = DUPLEX_INVALID;
2735
2736 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2739 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2740 bmsr |= BMSR_LSTATUS;
2741 else
2742 bmsr &= ~BMSR_LSTATUS;
2743 }
747e8f8b
MC
2744
2745 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2746
2747 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2748 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2749 /* do nothing, just check for link up at the end */
2750 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2751 u32 adv, new_adv;
2752
2753 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2754 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2755 ADVERTISE_1000XPAUSE |
2756 ADVERTISE_1000XPSE_ASYM |
2757 ADVERTISE_SLCT);
2758
2759 /* Always advertise symmetric PAUSE just like copper */
2760 new_adv |= ADVERTISE_1000XPAUSE;
2761
2762 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2763 new_adv |= ADVERTISE_1000XHALF;
2764 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2765 new_adv |= ADVERTISE_1000XFULL;
2766
2767 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2768 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2769 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2770 tg3_writephy(tp, MII_BMCR, bmcr);
2771
2772 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2773 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2774 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2775
2776 return err;
2777 }
2778 } else {
2779 u32 new_bmcr;
2780
2781 bmcr &= ~BMCR_SPEED1000;
2782 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2783
2784 if (tp->link_config.duplex == DUPLEX_FULL)
2785 new_bmcr |= BMCR_FULLDPLX;
2786
2787 if (new_bmcr != bmcr) {
2788 /* BMCR_SPEED1000 is a reserved bit that needs
2789 * to be set on write.
2790 */
2791 new_bmcr |= BMCR_SPEED1000;
2792
2793 /* Force a linkdown */
2794 if (netif_carrier_ok(tp->dev)) {
2795 u32 adv;
2796
2797 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2798 adv &= ~(ADVERTISE_1000XFULL |
2799 ADVERTISE_1000XHALF |
2800 ADVERTISE_SLCT);
2801 tg3_writephy(tp, MII_ADVERTISE, adv);
2802 tg3_writephy(tp, MII_BMCR, bmcr |
2803 BMCR_ANRESTART |
2804 BMCR_ANENABLE);
2805 udelay(10);
2806 netif_carrier_off(tp->dev);
2807 }
2808 tg3_writephy(tp, MII_BMCR, new_bmcr);
2809 bmcr = new_bmcr;
2810 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2812 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2813 ASIC_REV_5714) {
2814 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2815 bmsr |= BMSR_LSTATUS;
2816 else
2817 bmsr &= ~BMSR_LSTATUS;
2818 }
747e8f8b
MC
2819 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2820 }
2821 }
2822
2823 if (bmsr & BMSR_LSTATUS) {
2824 current_speed = SPEED_1000;
2825 current_link_up = 1;
2826 if (bmcr & BMCR_FULLDPLX)
2827 current_duplex = DUPLEX_FULL;
2828 else
2829 current_duplex = DUPLEX_HALF;
2830
2831 if (bmcr & BMCR_ANENABLE) {
2832 u32 local_adv, remote_adv, common;
2833
2834 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2835 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2836 common = local_adv & remote_adv;
2837 if (common & (ADVERTISE_1000XHALF |
2838 ADVERTISE_1000XFULL)) {
2839 if (common & ADVERTISE_1000XFULL)
2840 current_duplex = DUPLEX_FULL;
2841 else
2842 current_duplex = DUPLEX_HALF;
2843
2844 tg3_setup_flow_control(tp, local_adv,
2845 remote_adv);
2846 }
2847 else
2848 current_link_up = 0;
2849 }
2850 }
2851
2852 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2853 if (tp->link_config.active_duplex == DUPLEX_HALF)
2854 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2855
2856 tw32_f(MAC_MODE, tp->mac_mode);
2857 udelay(40);
2858
2859 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2860
2861 tp->link_config.active_speed = current_speed;
2862 tp->link_config.active_duplex = current_duplex;
2863
2864 if (current_link_up != netif_carrier_ok(tp->dev)) {
2865 if (current_link_up)
2866 netif_carrier_on(tp->dev);
2867 else {
2868 netif_carrier_off(tp->dev);
2869 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2870 }
2871 tg3_link_report(tp);
2872 }
2873 return err;
2874}
2875
2876static void tg3_serdes_parallel_detect(struct tg3 *tp)
2877{
2878 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2879 /* Give autoneg time to complete. */
2880 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2881 return;
2882 }
2883 if (!netif_carrier_ok(tp->dev) &&
2884 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2885 u32 bmcr;
2886
2887 tg3_readphy(tp, MII_BMCR, &bmcr);
2888 if (bmcr & BMCR_ANENABLE) {
2889 u32 phy1, phy2;
2890
2891 /* Select shadow register 0x1f */
2892 tg3_writephy(tp, 0x1c, 0x7c00);
2893 tg3_readphy(tp, 0x1c, &phy1);
2894
2895 /* Select expansion interrupt status register */
2896 tg3_writephy(tp, 0x17, 0x0f01);
2897 tg3_readphy(tp, 0x15, &phy2);
2898 tg3_readphy(tp, 0x15, &phy2);
2899
2900 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2901 /* We have signal detect and not receiving
2902 * config code words, link is up by parallel
2903 * detection.
2904 */
2905
2906 bmcr &= ~BMCR_ANENABLE;
2907 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2908 tg3_writephy(tp, MII_BMCR, bmcr);
2909 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2910 }
2911 }
2912 }
2913 else if (netif_carrier_ok(tp->dev) &&
2914 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2915 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2916 u32 phy2;
2917
2918 /* Select expansion interrupt status register */
2919 tg3_writephy(tp, 0x17, 0x0f01);
2920 tg3_readphy(tp, 0x15, &phy2);
2921 if (phy2 & 0x20) {
2922 u32 bmcr;
2923
2924 /* Config code words received, turn on autoneg. */
2925 tg3_readphy(tp, MII_BMCR, &bmcr);
2926 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2927
2928 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2929
2930 }
2931 }
2932}
2933
1da177e4
LT
2934static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2935{
2936 int err;
2937
2938 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2939 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
2940 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2941 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
2942 } else {
2943 err = tg3_setup_copper_phy(tp, force_reset);
2944 }
2945
2946 if (tp->link_config.active_speed == SPEED_1000 &&
2947 tp->link_config.active_duplex == DUPLEX_HALF)
2948 tw32(MAC_TX_LENGTHS,
2949 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2950 (6 << TX_LENGTHS_IPG_SHIFT) |
2951 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2952 else
2953 tw32(MAC_TX_LENGTHS,
2954 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2955 (6 << TX_LENGTHS_IPG_SHIFT) |
2956 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2957
2958 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2959 if (netif_carrier_ok(tp->dev)) {
2960 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2961 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2962 } else {
2963 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2964 }
2965 }
2966
2967 return err;
2968}
2969
df3e6548
MC
2970/* This is called whenever we suspect that the system chipset is re-
2971 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2972 * is bogus tx completions. We try to recover by setting the
2973 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2974 * in the workqueue.
2975 */
2976static void tg3_tx_recover(struct tg3 *tp)
2977{
2978 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2979 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2980
2981 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2982 "mapped I/O cycles to the network device, attempting to "
2983 "recover. Please report the problem to the driver maintainer "
2984 "and include system chipset information.\n", tp->dev->name);
2985
2986 spin_lock(&tp->lock);
df3e6548 2987 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
2988 spin_unlock(&tp->lock);
2989}
2990
1da177e4
LT
2991/* Tigon3 never reports partial packet sends. So we do not
2992 * need special logic to handle SKBs that have not had all
2993 * of their frags sent yet, like SunGEM does.
2994 */
2995static void tg3_tx(struct tg3 *tp)
2996{
2997 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2998 u32 sw_idx = tp->tx_cons;
2999
3000 while (sw_idx != hw_idx) {
3001 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3002 struct sk_buff *skb = ri->skb;
df3e6548
MC
3003 int i, tx_bug = 0;
3004
3005 if (unlikely(skb == NULL)) {
3006 tg3_tx_recover(tp);
3007 return;
3008 }
1da177e4 3009
1da177e4
LT
3010 pci_unmap_single(tp->pdev,
3011 pci_unmap_addr(ri, mapping),
3012 skb_headlen(skb),
3013 PCI_DMA_TODEVICE);
3014
3015 ri->skb = NULL;
3016
3017 sw_idx = NEXT_TX(sw_idx);
3018
3019 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3020 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3021 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3022 tx_bug = 1;
1da177e4
LT
3023
3024 pci_unmap_page(tp->pdev,
3025 pci_unmap_addr(ri, mapping),
3026 skb_shinfo(skb)->frags[i].size,
3027 PCI_DMA_TODEVICE);
3028
3029 sw_idx = NEXT_TX(sw_idx);
3030 }
3031
f47c11ee 3032 dev_kfree_skb(skb);
df3e6548
MC
3033
3034 if (unlikely(tx_bug)) {
3035 tg3_tx_recover(tp);
3036 return;
3037 }
1da177e4
LT
3038 }
3039
3040 tp->tx_cons = sw_idx;
3041
51b91468
MC
3042 if (unlikely(netif_queue_stopped(tp->dev))) {
3043 spin_lock(&tp->tx_lock);
3044 if (netif_queue_stopped(tp->dev) &&
3045 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3046 netif_wake_queue(tp->dev);
3047 spin_unlock(&tp->tx_lock);
3048 }
1da177e4
LT
3049}
3050
3051/* Returns size of skb allocated or < 0 on error.
3052 *
3053 * We only need to fill in the address because the other members
3054 * of the RX descriptor are invariant, see tg3_init_rings.
3055 *
3056 * Note the purposeful assymetry of cpu vs. chip accesses. For
3057 * posting buffers we only dirty the first cache line of the RX
3058 * descriptor (containing the address). Whereas for the RX status
3059 * buffers the cpu only reads the last cacheline of the RX descriptor
3060 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3061 */
3062static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3063 int src_idx, u32 dest_idx_unmasked)
3064{
3065 struct tg3_rx_buffer_desc *desc;
3066 struct ring_info *map, *src_map;
3067 struct sk_buff *skb;
3068 dma_addr_t mapping;
3069 int skb_size, dest_idx;
3070
3071 src_map = NULL;
3072 switch (opaque_key) {
3073 case RXD_OPAQUE_RING_STD:
3074 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3075 desc = &tp->rx_std[dest_idx];
3076 map = &tp->rx_std_buffers[dest_idx];
3077 if (src_idx >= 0)
3078 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3079 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3080 break;
3081
3082 case RXD_OPAQUE_RING_JUMBO:
3083 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3084 desc = &tp->rx_jumbo[dest_idx];
3085 map = &tp->rx_jumbo_buffers[dest_idx];
3086 if (src_idx >= 0)
3087 src_map = &tp->rx_jumbo_buffers[src_idx];
3088 skb_size = RX_JUMBO_PKT_BUF_SZ;
3089 break;
3090
3091 default:
3092 return -EINVAL;
3093 };
3094
3095 /* Do not overwrite any of the map or rp information
3096 * until we are sure we can commit to a new buffer.
3097 *
3098 * Callers depend upon this behavior and assume that
3099 * we leave everything unchanged if we fail.
3100 */
3101 skb = dev_alloc_skb(skb_size);
3102 if (skb == NULL)
3103 return -ENOMEM;
3104
3105 skb->dev = tp->dev;
3106 skb_reserve(skb, tp->rx_offset);
3107
3108 mapping = pci_map_single(tp->pdev, skb->data,
3109 skb_size - tp->rx_offset,
3110 PCI_DMA_FROMDEVICE);
3111
3112 map->skb = skb;
3113 pci_unmap_addr_set(map, mapping, mapping);
3114
3115 if (src_map != NULL)
3116 src_map->skb = NULL;
3117
3118 desc->addr_hi = ((u64)mapping >> 32);
3119 desc->addr_lo = ((u64)mapping & 0xffffffff);
3120
3121 return skb_size;
3122}
3123
3124/* We only need to move over in the address because the other
3125 * members of the RX descriptor are invariant. See notes above
3126 * tg3_alloc_rx_skb for full details.
3127 */
3128static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3129 int src_idx, u32 dest_idx_unmasked)
3130{
3131 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3132 struct ring_info *src_map, *dest_map;
3133 int dest_idx;
3134
3135 switch (opaque_key) {
3136 case RXD_OPAQUE_RING_STD:
3137 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3138 dest_desc = &tp->rx_std[dest_idx];
3139 dest_map = &tp->rx_std_buffers[dest_idx];
3140 src_desc = &tp->rx_std[src_idx];
3141 src_map = &tp->rx_std_buffers[src_idx];
3142 break;
3143
3144 case RXD_OPAQUE_RING_JUMBO:
3145 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3146 dest_desc = &tp->rx_jumbo[dest_idx];
3147 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3148 src_desc = &tp->rx_jumbo[src_idx];
3149 src_map = &tp->rx_jumbo_buffers[src_idx];
3150 break;
3151
3152 default:
3153 return;
3154 };
3155
3156 dest_map->skb = src_map->skb;
3157 pci_unmap_addr_set(dest_map, mapping,
3158 pci_unmap_addr(src_map, mapping));
3159 dest_desc->addr_hi = src_desc->addr_hi;
3160 dest_desc->addr_lo = src_desc->addr_lo;
3161
3162 src_map->skb = NULL;
3163}
3164
3165#if TG3_VLAN_TAG_USED
3166static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3167{
3168 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3169}
3170#endif
3171
3172/* The RX ring scheme is composed of multiple rings which post fresh
3173 * buffers to the chip, and one special ring the chip uses to report
3174 * status back to the host.
3175 *
3176 * The special ring reports the status of received packets to the
3177 * host. The chip does not write into the original descriptor the
3178 * RX buffer was obtained from. The chip simply takes the original
3179 * descriptor as provided by the host, updates the status and length
3180 * field, then writes this into the next status ring entry.
3181 *
3182 * Each ring the host uses to post buffers to the chip is described
3183 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3184 * it is first placed into the on-chip ram. When the packet's length
3185 * is known, it walks down the TG3_BDINFO entries to select the ring.
3186 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3187 * which is within the range of the new packet's length is chosen.
3188 *
3189 * The "separate ring for rx status" scheme may sound queer, but it makes
3190 * sense from a cache coherency perspective. If only the host writes
3191 * to the buffer post rings, and only the chip writes to the rx status
3192 * rings, then cache lines never move beyond shared-modified state.
3193 * If both the host and chip were to write into the same ring, cache line
3194 * eviction could occur since both entities want it in an exclusive state.
3195 */
3196static int tg3_rx(struct tg3 *tp, int budget)
3197{
f92905de 3198 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3199 u32 sw_idx = tp->rx_rcb_ptr;
3200 u16 hw_idx;
1da177e4
LT
3201 int received;
3202
3203 hw_idx = tp->hw_status->idx[0].rx_producer;
3204 /*
3205 * We need to order the read of hw_idx and the read of
3206 * the opaque cookie.
3207 */
3208 rmb();
1da177e4
LT
3209 work_mask = 0;
3210 received = 0;
3211 while (sw_idx != hw_idx && budget > 0) {
3212 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3213 unsigned int len;
3214 struct sk_buff *skb;
3215 dma_addr_t dma_addr;
3216 u32 opaque_key, desc_idx, *post_ptr;
3217
3218 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3219 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3220 if (opaque_key == RXD_OPAQUE_RING_STD) {
3221 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3222 mapping);
3223 skb = tp->rx_std_buffers[desc_idx].skb;
3224 post_ptr = &tp->rx_std_ptr;
f92905de 3225 rx_std_posted++;
1da177e4
LT
3226 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3227 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3228 mapping);
3229 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3230 post_ptr = &tp->rx_jumbo_ptr;
3231 }
3232 else {
3233 goto next_pkt_nopost;
3234 }
3235
3236 work_mask |= opaque_key;
3237
3238 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3239 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3240 drop_it:
3241 tg3_recycle_rx(tp, opaque_key,
3242 desc_idx, *post_ptr);
3243 drop_it_no_recycle:
3244 /* Other statistics kept track of by card. */
3245 tp->net_stats.rx_dropped++;
3246 goto next_pkt;
3247 }
3248
3249 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3250
3251 if (len > RX_COPY_THRESHOLD
3252 && tp->rx_offset == 2
3253 /* rx_offset != 2 iff this is a 5701 card running
3254 * in PCI-X mode [see tg3_get_invariants()] */
3255 ) {
3256 int skb_size;
3257
3258 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3259 desc_idx, *post_ptr);
3260 if (skb_size < 0)
3261 goto drop_it;
3262
3263 pci_unmap_single(tp->pdev, dma_addr,
3264 skb_size - tp->rx_offset,
3265 PCI_DMA_FROMDEVICE);
3266
3267 skb_put(skb, len);
3268 } else {
3269 struct sk_buff *copy_skb;
3270
3271 tg3_recycle_rx(tp, opaque_key,
3272 desc_idx, *post_ptr);
3273
3274 copy_skb = dev_alloc_skb(len + 2);
3275 if (copy_skb == NULL)
3276 goto drop_it_no_recycle;
3277
3278 copy_skb->dev = tp->dev;
3279 skb_reserve(copy_skb, 2);
3280 skb_put(copy_skb, len);
3281 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3282 memcpy(copy_skb->data, skb->data, len);
3283 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3284
3285 /* We'll reuse the original ring buffer. */
3286 skb = copy_skb;
3287 }
3288
3289 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3290 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3291 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3292 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3293 skb->ip_summed = CHECKSUM_UNNECESSARY;
3294 else
3295 skb->ip_summed = CHECKSUM_NONE;
3296
3297 skb->protocol = eth_type_trans(skb, tp->dev);
3298#if TG3_VLAN_TAG_USED
3299 if (tp->vlgrp != NULL &&
3300 desc->type_flags & RXD_FLAG_VLAN) {
3301 tg3_vlan_rx(tp, skb,
3302 desc->err_vlan & RXD_VLAN_MASK);
3303 } else
3304#endif
3305 netif_receive_skb(skb);
3306
3307 tp->dev->last_rx = jiffies;
3308 received++;
3309 budget--;
3310
3311next_pkt:
3312 (*post_ptr)++;
f92905de
MC
3313
3314 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3315 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3316
3317 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3318 TG3_64BIT_REG_LOW, idx);
3319 work_mask &= ~RXD_OPAQUE_RING_STD;
3320 rx_std_posted = 0;
3321 }
1da177e4 3322next_pkt_nopost:
483ba50b
MC
3323 sw_idx++;
3324 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
3325
3326 /* Refresh hw_idx to see if there is new work */
3327 if (sw_idx == hw_idx) {
3328 hw_idx = tp->hw_status->idx[0].rx_producer;
3329 rmb();
3330 }
1da177e4
LT
3331 }
3332
3333 /* ACK the status ring. */
483ba50b
MC
3334 tp->rx_rcb_ptr = sw_idx;
3335 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3336
3337 /* Refill RX ring(s). */
3338 if (work_mask & RXD_OPAQUE_RING_STD) {
3339 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3340 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3341 sw_idx);
3342 }
3343 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3344 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3345 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3346 sw_idx);
3347 }
3348 mmiowb();
3349
3350 return received;
3351}
3352
3353static int tg3_poll(struct net_device *netdev, int *budget)
3354{
3355 struct tg3 *tp = netdev_priv(netdev);
3356 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3357 int done;
3358
1da177e4
LT
3359 /* handle link change and other phy events */
3360 if (!(tp->tg3_flags &
3361 (TG3_FLAG_USE_LINKCHG_REG |
3362 TG3_FLAG_POLL_SERDES))) {
3363 if (sblk->status & SD_STATUS_LINK_CHG) {
3364 sblk->status = SD_STATUS_UPDATED |
3365 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3366 spin_lock(&tp->lock);
1da177e4 3367 tg3_setup_phy(tp, 0);
f47c11ee 3368 spin_unlock(&tp->lock);
1da177e4
LT
3369 }
3370 }
3371
3372 /* run TX completion thread */
3373 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3374 tg3_tx(tp);
df3e6548
MC
3375 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3376 netif_rx_complete(netdev);
3377 schedule_work(&tp->reset_task);
3378 return 0;
3379 }
1da177e4
LT
3380 }
3381
1da177e4
LT
3382 /* run RX thread, within the bounds set by NAPI.
3383 * All RX "locking" is done by ensuring outside
3384 * code synchronizes with dev->poll()
3385 */
1da177e4
LT
3386 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3387 int orig_budget = *budget;
3388 int work_done;
3389
3390 if (orig_budget > netdev->quota)
3391 orig_budget = netdev->quota;
3392
3393 work_done = tg3_rx(tp, orig_budget);
3394
3395 *budget -= work_done;
3396 netdev->quota -= work_done;
1da177e4
LT
3397 }
3398
38f3843e 3399 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3400 tp->last_tag = sblk->status_tag;
38f3843e
MC
3401 rmb();
3402 } else
3403 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3404
1da177e4 3405 /* if no more work, tell net stack and NIC we're done */
f7383c22 3406 done = !tg3_has_work(tp);
1da177e4 3407 if (done) {
f47c11ee 3408 netif_rx_complete(netdev);
1da177e4 3409 tg3_restart_ints(tp);
1da177e4
LT
3410 }
3411
3412 return (done ? 0 : 1);
3413}
3414
f47c11ee
DM
3415static void tg3_irq_quiesce(struct tg3 *tp)
3416{
3417 BUG_ON(tp->irq_sync);
3418
3419 tp->irq_sync = 1;
3420 smp_mb();
3421
3422 synchronize_irq(tp->pdev->irq);
3423}
3424
3425static inline int tg3_irq_sync(struct tg3 *tp)
3426{
3427 return tp->irq_sync;
3428}
3429
3430/* Fully shutdown all tg3 driver activity elsewhere in the system.
3431 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3432 * with as well. Most of the time, this is not necessary except when
3433 * shutting down the device.
3434 */
3435static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3436{
3437 if (irq_sync)
3438 tg3_irq_quiesce(tp);
3439 spin_lock_bh(&tp->lock);
f47c11ee
DM
3440}
3441
3442static inline void tg3_full_unlock(struct tg3 *tp)
3443{
f47c11ee
DM
3444 spin_unlock_bh(&tp->lock);
3445}
3446
fcfa0a32
MC
3447/* One-shot MSI handler - Chip automatically disables interrupt
3448 * after sending MSI so driver doesn't have to do it.
3449 */
3450static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3451{
3452 struct net_device *dev = dev_id;
3453 struct tg3 *tp = netdev_priv(dev);
3454
3455 prefetch(tp->hw_status);
3456 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3457
3458 if (likely(!tg3_irq_sync(tp)))
3459 netif_rx_schedule(dev); /* schedule NAPI poll */
3460
3461 return IRQ_HANDLED;
3462}
3463
88b06bc2
MC
3464/* MSI ISR - No need to check for interrupt sharing and no need to
3465 * flush status block and interrupt mailbox. PCI ordering rules
3466 * guarantee that MSI will arrive after the status block.
3467 */
3468static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3469{
3470 struct net_device *dev = dev_id;
3471 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3472
61487480
MC
3473 prefetch(tp->hw_status);
3474 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3475 /*
fac9b83e 3476 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3477 * chip-internal interrupt pending events.
fac9b83e 3478 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3479 * NIC to stop sending us irqs, engaging "in-intr-handler"
3480 * event coalescing.
3481 */
3482 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3483 if (likely(!tg3_irq_sync(tp)))
88b06bc2 3484 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3485
88b06bc2
MC
3486 return IRQ_RETVAL(1);
3487}
3488
1da177e4
LT
3489static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3490{
3491 struct net_device *dev = dev_id;
3492 struct tg3 *tp = netdev_priv(dev);
3493 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3494 unsigned int handled = 1;
3495
1da177e4
LT
3496 /* In INTx mode, it is possible for the interrupt to arrive at
3497 * the CPU before the status block posted prior to the interrupt.
3498 * Reading the PCI State register will confirm whether the
3499 * interrupt is ours and will flush the status block.
3500 */
3501 if ((sblk->status & SD_STATUS_UPDATED) ||
3502 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3503 /*
fac9b83e 3504 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3505 * chip-internal interrupt pending events.
fac9b83e 3506 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3507 * NIC to stop sending us irqs, engaging "in-intr-handler"
3508 * event coalescing.
3509 */
3510 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3511 0x00000001);
f47c11ee
DM
3512 if (tg3_irq_sync(tp))
3513 goto out;
fac9b83e 3514 sblk->status &= ~SD_STATUS_UPDATED;
61487480
MC
3515 if (likely(tg3_has_work(tp))) {
3516 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
fac9b83e 3517 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3518 } else {
fac9b83e
DM
3519 /* No work, shared interrupt perhaps? re-enable
3520 * interrupts, and flush that PCI write
3521 */
09ee929c 3522 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3523 0x00000000);
fac9b83e
DM
3524 }
3525 } else { /* shared interrupt */
3526 handled = 0;
3527 }
f47c11ee 3528out:
fac9b83e
DM
3529 return IRQ_RETVAL(handled);
3530}
3531
3532static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3533{
3534 struct net_device *dev = dev_id;
3535 struct tg3 *tp = netdev_priv(dev);
3536 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3537 unsigned int handled = 1;
3538
fac9b83e
DM
3539 /* In INTx mode, it is possible for the interrupt to arrive at
3540 * the CPU before the status block posted prior to the interrupt.
3541 * Reading the PCI State register will confirm whether the
3542 * interrupt is ours and will flush the status block.
3543 */
38f3843e 3544 if ((sblk->status_tag != tp->last_tag) ||
fac9b83e 3545 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3546 /*
fac9b83e
DM
3547 * writing any value to intr-mbox-0 clears PCI INTA# and
3548 * chip-internal interrupt pending events.
3549 * writing non-zero to intr-mbox-0 additional tells the
3550 * NIC to stop sending us irqs, engaging "in-intr-handler"
3551 * event coalescing.
1da177e4 3552 */
fac9b83e
DM
3553 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3554 0x00000001);
f47c11ee
DM
3555 if (tg3_irq_sync(tp))
3556 goto out;
38f3843e 3557 if (netif_rx_schedule_prep(dev)) {
61487480 3558 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
38f3843e
MC
3559 /* Update last_tag to mark that this status has been
3560 * seen. Because interrupt may be shared, we may be
3561 * racing with tg3_poll(), so only update last_tag
3562 * if tg3_poll() is not scheduled.
1da177e4 3563 */
38f3843e
MC
3564 tp->last_tag = sblk->status_tag;
3565 __netif_rx_schedule(dev);
1da177e4
LT
3566 }
3567 } else { /* shared interrupt */
3568 handled = 0;
3569 }
f47c11ee 3570out:
1da177e4
LT
3571 return IRQ_RETVAL(handled);
3572}
3573
7938109f
MC
3574/* ISR for interrupt test */
3575static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3576 struct pt_regs *regs)
3577{
3578 struct net_device *dev = dev_id;
3579 struct tg3 *tp = netdev_priv(dev);
3580 struct tg3_hw_status *sblk = tp->hw_status;
3581
f9804ddb
MC
3582 if ((sblk->status & SD_STATUS_UPDATED) ||
3583 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7938109f
MC
3584 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3585 0x00000001);
3586 return IRQ_RETVAL(1);
3587 }
3588 return IRQ_RETVAL(0);
3589}
3590
8e7a22e3 3591static int tg3_init_hw(struct tg3 *, int);
944d980e 3592static int tg3_halt(struct tg3 *, int, int);
1da177e4
LT
3593
3594#ifdef CONFIG_NET_POLL_CONTROLLER
3595static void tg3_poll_controller(struct net_device *dev)
3596{
88b06bc2
MC
3597 struct tg3 *tp = netdev_priv(dev);
3598
3599 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3600}
3601#endif
3602
3603static void tg3_reset_task(void *_data)
3604{
3605 struct tg3 *tp = _data;
3606 unsigned int restart_timer;
3607
7faa006f
MC
3608 tg3_full_lock(tp, 0);
3609 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3610
3611 if (!netif_running(tp->dev)) {
3612 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3613 tg3_full_unlock(tp);
3614 return;
3615 }
3616
3617 tg3_full_unlock(tp);
3618
1da177e4
LT
3619 tg3_netif_stop(tp);
3620
f47c11ee 3621 tg3_full_lock(tp, 1);
1da177e4
LT
3622
3623 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3624 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3625
df3e6548
MC
3626 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3627 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3628 tp->write32_rx_mbox = tg3_write_flush_reg32;
3629 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3630 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3631 }
3632
944d980e 3633 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
8e7a22e3 3634 tg3_init_hw(tp, 1);
1da177e4
LT
3635
3636 tg3_netif_start(tp);
3637
1da177e4
LT
3638 if (restart_timer)
3639 mod_timer(&tp->timer, jiffies + 1);
7faa006f
MC
3640
3641 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3642
3643 tg3_full_unlock(tp);
1da177e4
LT
3644}
3645
3646static void tg3_tx_timeout(struct net_device *dev)
3647{
3648 struct tg3 *tp = netdev_priv(dev);
3649
3650 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3651 dev->name);
3652
3653 schedule_work(&tp->reset_task);
3654}
3655
c58ec932
MC
3656/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3657static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3658{
3659 u32 base = (u32) mapping & 0xffffffff;
3660
3661 return ((base > 0xffffdcc0) &&
3662 (base + len + 8 < base));
3663}
3664
72f2afb8
MC
3665/* Test for DMA addresses > 40-bit */
3666static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3667 int len)
3668{
3669#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 3670 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
3671 return (((u64) mapping + len) > DMA_40BIT_MASK);
3672 return 0;
3673#else
3674 return 0;
3675#endif
3676}
3677
1da177e4
LT
3678static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3679
72f2afb8
MC
3680/* Workaround 4GB and 40-bit hardware DMA bugs. */
3681static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3682 u32 last_plus_one, u32 *start,
3683 u32 base_flags, u32 mss)
1da177e4
LT
3684{
3685 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3686 dma_addr_t new_addr = 0;
1da177e4 3687 u32 entry = *start;
c58ec932 3688 int i, ret = 0;
1da177e4
LT
3689
3690 if (!new_skb) {
c58ec932
MC
3691 ret = -1;
3692 } else {
3693 /* New SKB is guaranteed to be linear. */
3694 entry = *start;
3695 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3696 PCI_DMA_TODEVICE);
3697 /* Make sure new skb does not cross any 4G boundaries.
3698 * Drop the packet if it does.
3699 */
3700 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3701 ret = -1;
3702 dev_kfree_skb(new_skb);
3703 new_skb = NULL;
3704 } else {
3705 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3706 base_flags, 1 | (mss << 1));
3707 *start = NEXT_TX(entry);
3708 }
1da177e4
LT
3709 }
3710
1da177e4
LT
3711 /* Now clean up the sw ring entries. */
3712 i = 0;
3713 while (entry != last_plus_one) {
3714 int len;
3715
3716 if (i == 0)
3717 len = skb_headlen(skb);
3718 else
3719 len = skb_shinfo(skb)->frags[i-1].size;
3720 pci_unmap_single(tp->pdev,
3721 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3722 len, PCI_DMA_TODEVICE);
3723 if (i == 0) {
3724 tp->tx_buffers[entry].skb = new_skb;
3725 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3726 } else {
3727 tp->tx_buffers[entry].skb = NULL;
3728 }
3729 entry = NEXT_TX(entry);
3730 i++;
3731 }
3732
3733 dev_kfree_skb(skb);
3734
c58ec932 3735 return ret;
1da177e4
LT
3736}
3737
3738static void tg3_set_txd(struct tg3 *tp, int entry,
3739 dma_addr_t mapping, int len, u32 flags,
3740 u32 mss_and_is_end)
3741{
3742 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3743 int is_end = (mss_and_is_end & 0x1);
3744 u32 mss = (mss_and_is_end >> 1);
3745 u32 vlan_tag = 0;
3746
3747 if (is_end)
3748 flags |= TXD_FLAG_END;
3749 if (flags & TXD_FLAG_VLAN) {
3750 vlan_tag = flags >> 16;
3751 flags &= 0xffff;
3752 }
3753 vlan_tag |= (mss << TXD_MSS_SHIFT);
3754
3755 txd->addr_hi = ((u64) mapping >> 32);
3756 txd->addr_lo = ((u64) mapping & 0xffffffff);
3757 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3758 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3759}
3760
5a6f3074
MC
3761/* hard_start_xmit for devices that don't have any bugs and
3762 * support TG3_FLG2_HW_TSO_2 only.
3763 */
1da177e4 3764static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
3765{
3766 struct tg3 *tp = netdev_priv(dev);
3767 dma_addr_t mapping;
3768 u32 len, entry, base_flags, mss;
3769
3770 len = skb_headlen(skb);
3771
00b70504
MC
3772 /* We are running in BH disabled context with netif_tx_lock
3773 * and TX reclaim runs via tp->poll inside of a software
5a6f3074
MC
3774 * interrupt. Furthermore, IRQ processing runs lockless so we have
3775 * no IRQ context deadlocks to worry about either. Rejoice!
3776 */
5a6f3074
MC
3777 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3778 if (!netif_queue_stopped(dev)) {
3779 netif_stop_queue(dev);
3780
3781 /* This is a hard error, log it. */
3782 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3783 "queue awake!\n", dev->name);
3784 }
5a6f3074
MC
3785 return NETDEV_TX_BUSY;
3786 }
3787
3788 entry = tp->tx_prod;
3789 base_flags = 0;
3790#if TG3_TSO_SUPPORT != 0
3791 mss = 0;
3792 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
7967168c 3793 (mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
3794 int tcp_opt_len, ip_tcp_len;
3795
3796 if (skb_header_cloned(skb) &&
3797 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3798 dev_kfree_skb(skb);
3799 goto out_unlock;
3800 }
3801
3802 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3803 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3804
3805 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3806 TXD_FLAG_CPU_POST_DMA);
3807
3808 skb->nh.iph->check = 0;
3809 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3810
3811 skb->h.th->check = 0;
3812
3813 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3814 }
3815 else if (skb->ip_summed == CHECKSUM_HW)
3816 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3817#else
3818 mss = 0;
3819 if (skb->ip_summed == CHECKSUM_HW)
3820 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3821#endif
3822#if TG3_VLAN_TAG_USED
3823 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3824 base_flags |= (TXD_FLAG_VLAN |
3825 (vlan_tx_tag_get(skb) << 16));
3826#endif
3827
3828 /* Queue skb data, a.k.a. the main skb fragment. */
3829 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3830
3831 tp->tx_buffers[entry].skb = skb;
3832 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3833
3834 tg3_set_txd(tp, entry, mapping, len, base_flags,
3835 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3836
3837 entry = NEXT_TX(entry);
3838
3839 /* Now loop through additional data fragments, and queue them. */
3840 if (skb_shinfo(skb)->nr_frags > 0) {
3841 unsigned int i, last;
3842
3843 last = skb_shinfo(skb)->nr_frags - 1;
3844 for (i = 0; i <= last; i++) {
3845 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3846
3847 len = frag->size;
3848 mapping = pci_map_page(tp->pdev,
3849 frag->page,
3850 frag->page_offset,
3851 len, PCI_DMA_TODEVICE);
3852
3853 tp->tx_buffers[entry].skb = NULL;
3854 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3855
3856 tg3_set_txd(tp, entry, mapping, len,
3857 base_flags, (i == last) | (mss << 1));
3858
3859 entry = NEXT_TX(entry);
3860 }
3861 }
3862
3863 /* Packets are ready, update Tx producer idx local and on card. */
3864 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3865
3866 tp->tx_prod = entry;
00b70504
MC
3867 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3868 spin_lock(&tp->tx_lock);
5a6f3074
MC
3869 netif_stop_queue(dev);
3870 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3871 netif_wake_queue(tp->dev);
00b70504 3872 spin_unlock(&tp->tx_lock);
5a6f3074
MC
3873 }
3874
3875out_unlock:
3876 mmiowb();
5a6f3074
MC
3877
3878 dev->trans_start = jiffies;
3879
3880 return NETDEV_TX_OK;
3881}
3882
52c0fd83
MC
3883#if TG3_TSO_SUPPORT != 0
3884static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3885
3886/* Use GSO to workaround a rare TSO bug that may be triggered when the
3887 * TSO header is greater than 80 bytes.
3888 */
3889static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3890{
3891 struct sk_buff *segs, *nskb;
3892
3893 /* Estimate the number of fragments in the worst case */
3894 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3895 netif_stop_queue(tp->dev);
3896 return NETDEV_TX_BUSY;
3897 }
3898
3899 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3900 if (unlikely(IS_ERR(segs)))
3901 goto tg3_tso_bug_end;
3902
3903 do {
3904 nskb = segs;
3905 segs = segs->next;
3906 nskb->next = NULL;
3907 tg3_start_xmit_dma_bug(nskb, tp->dev);
3908 } while (segs);
3909
3910tg3_tso_bug_end:
3911 dev_kfree_skb(skb);
3912
3913 return NETDEV_TX_OK;
3914}
3915#endif
3916
5a6f3074
MC
3917/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3918 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3919 */
3920static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3921{
3922 struct tg3 *tp = netdev_priv(dev);
3923 dma_addr_t mapping;
1da177e4
LT
3924 u32 len, entry, base_flags, mss;
3925 int would_hit_hwbug;
1da177e4
LT
3926
3927 len = skb_headlen(skb);
3928
00b70504
MC
3929 /* We are running in BH disabled context with netif_tx_lock
3930 * and TX reclaim runs via tp->poll inside of a software
f47c11ee
DM
3931 * interrupt. Furthermore, IRQ processing runs lockless so we have
3932 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 3933 */
1da177e4 3934 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
3935 if (!netif_queue_stopped(dev)) {
3936 netif_stop_queue(dev);
3937
3938 /* This is a hard error, log it. */
3939 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3940 "queue awake!\n", dev->name);
3941 }
1da177e4
LT
3942 return NETDEV_TX_BUSY;
3943 }
3944
3945 entry = tp->tx_prod;
3946 base_flags = 0;
3947 if (skb->ip_summed == CHECKSUM_HW)
3948 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3949#if TG3_TSO_SUPPORT != 0
3950 mss = 0;
3951 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
7967168c 3952 (mss = skb_shinfo(skb)->gso_size) != 0) {
52c0fd83 3953 int tcp_opt_len, ip_tcp_len, hdr_len;
1da177e4
LT
3954
3955 if (skb_header_cloned(skb) &&
3956 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3957 dev_kfree_skb(skb);
3958 goto out_unlock;
3959 }
3960
3961 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3962 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3963
52c0fd83
MC
3964 hdr_len = ip_tcp_len + tcp_opt_len;
3965 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3966 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3967 return (tg3_tso_bug(tp, skb));
3968
1da177e4
LT
3969 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3970 TXD_FLAG_CPU_POST_DMA);
3971
3972 skb->nh.iph->check = 0;
52c0fd83 3973 skb->nh.iph->tot_len = htons(mss + hdr_len);
1da177e4
LT
3974 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3975 skb->h.th->check = 0;
3976 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3977 }
3978 else {
3979 skb->h.th->check =
3980 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3981 skb->nh.iph->daddr,
3982 0, IPPROTO_TCP, 0);
3983 }
3984
3985 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3986 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3987 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3988 int tsflags;
3989
3990 tsflags = ((skb->nh.iph->ihl - 5) +
3991 (tcp_opt_len >> 2));
3992 mss |= (tsflags << 11);
3993 }
3994 } else {
3995 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3996 int tsflags;
3997
3998 tsflags = ((skb->nh.iph->ihl - 5) +
3999 (tcp_opt_len >> 2));
4000 base_flags |= tsflags << 12;
4001 }
4002 }
4003 }
4004#else
4005 mss = 0;
4006#endif
4007#if TG3_VLAN_TAG_USED
4008 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4009 base_flags |= (TXD_FLAG_VLAN |
4010 (vlan_tx_tag_get(skb) << 16));
4011#endif
4012
4013 /* Queue skb data, a.k.a. the main skb fragment. */
4014 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4015
4016 tp->tx_buffers[entry].skb = skb;
4017 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4018
4019 would_hit_hwbug = 0;
4020
4021 if (tg3_4g_overflow_test(mapping, len))
c58ec932 4022 would_hit_hwbug = 1;
1da177e4
LT
4023
4024 tg3_set_txd(tp, entry, mapping, len, base_flags,
4025 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4026
4027 entry = NEXT_TX(entry);
4028
4029 /* Now loop through additional data fragments, and queue them. */
4030 if (skb_shinfo(skb)->nr_frags > 0) {
4031 unsigned int i, last;
4032
4033 last = skb_shinfo(skb)->nr_frags - 1;
4034 for (i = 0; i <= last; i++) {
4035 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4036
4037 len = frag->size;
4038 mapping = pci_map_page(tp->pdev,
4039 frag->page,
4040 frag->page_offset,
4041 len, PCI_DMA_TODEVICE);
4042
4043 tp->tx_buffers[entry].skb = NULL;
4044 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4045
c58ec932
MC
4046 if (tg3_4g_overflow_test(mapping, len))
4047 would_hit_hwbug = 1;
1da177e4 4048
72f2afb8
MC
4049 if (tg3_40bit_overflow_test(tp, mapping, len))
4050 would_hit_hwbug = 1;
4051
1da177e4
LT
4052 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4053 tg3_set_txd(tp, entry, mapping, len,
4054 base_flags, (i == last)|(mss << 1));
4055 else
4056 tg3_set_txd(tp, entry, mapping, len,
4057 base_flags, (i == last));
4058
4059 entry = NEXT_TX(entry);
4060 }
4061 }
4062
4063 if (would_hit_hwbug) {
4064 u32 last_plus_one = entry;
4065 u32 start;
1da177e4 4066
c58ec932
MC
4067 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4068 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4069
4070 /* If the workaround fails due to memory/mapping
4071 * failure, silently drop this packet.
4072 */
72f2afb8 4073 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4074 &start, base_flags, mss))
1da177e4
LT
4075 goto out_unlock;
4076
4077 entry = start;
4078 }
4079
4080 /* Packets are ready, update Tx producer idx local and on card. */
4081 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4082
4083 tp->tx_prod = entry;
00b70504
MC
4084 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4085 spin_lock(&tp->tx_lock);
1da177e4 4086 netif_stop_queue(dev);
51b91468
MC
4087 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4088 netif_wake_queue(tp->dev);
00b70504 4089 spin_unlock(&tp->tx_lock);
51b91468 4090 }
1da177e4
LT
4091
4092out_unlock:
4093 mmiowb();
1da177e4
LT
4094
4095 dev->trans_start = jiffies;
4096
4097 return NETDEV_TX_OK;
4098}
4099
4100static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4101 int new_mtu)
4102{
4103 dev->mtu = new_mtu;
4104
ef7f5ec0 4105 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4106 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4107 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4108 ethtool_op_set_tso(dev, 0);
4109 }
4110 else
4111 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4112 } else {
a4e2b347 4113 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4114 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4115 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4116 }
1da177e4
LT
4117}
4118
4119static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4120{
4121 struct tg3 *tp = netdev_priv(dev);
4122
4123 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4124 return -EINVAL;
4125
4126 if (!netif_running(dev)) {
4127 /* We'll just catch it later when the
4128 * device is up'd.
4129 */
4130 tg3_set_mtu(dev, tp, new_mtu);
4131 return 0;
4132 }
4133
4134 tg3_netif_stop(tp);
f47c11ee
DM
4135
4136 tg3_full_lock(tp, 1);
1da177e4 4137
944d980e 4138 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4139
4140 tg3_set_mtu(dev, tp, new_mtu);
4141
8e7a22e3 4142 tg3_init_hw(tp, 0);
1da177e4
LT
4143
4144 tg3_netif_start(tp);
4145
f47c11ee 4146 tg3_full_unlock(tp);
1da177e4
LT
4147
4148 return 0;
4149}
4150
4151/* Free up pending packets in all rx/tx rings.
4152 *
4153 * The chip has been shut down and the driver detached from
4154 * the networking, so no interrupts or new tx packets will
4155 * end up in the driver. tp->{tx,}lock is not held and we are not
4156 * in an interrupt context and thus may sleep.
4157 */
4158static void tg3_free_rings(struct tg3 *tp)
4159{
4160 struct ring_info *rxp;
4161 int i;
4162
4163 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4164 rxp = &tp->rx_std_buffers[i];
4165
4166 if (rxp->skb == NULL)
4167 continue;
4168 pci_unmap_single(tp->pdev,
4169 pci_unmap_addr(rxp, mapping),
7e72aad4 4170 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4171 PCI_DMA_FROMDEVICE);
4172 dev_kfree_skb_any(rxp->skb);
4173 rxp->skb = NULL;
4174 }
4175
4176 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4177 rxp = &tp->rx_jumbo_buffers[i];
4178
4179 if (rxp->skb == NULL)
4180 continue;
4181 pci_unmap_single(tp->pdev,
4182 pci_unmap_addr(rxp, mapping),
4183 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4184 PCI_DMA_FROMDEVICE);
4185 dev_kfree_skb_any(rxp->skb);
4186 rxp->skb = NULL;
4187 }
4188
4189 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4190 struct tx_ring_info *txp;
4191 struct sk_buff *skb;
4192 int j;
4193
4194 txp = &tp->tx_buffers[i];
4195 skb = txp->skb;
4196
4197 if (skb == NULL) {
4198 i++;
4199 continue;
4200 }
4201
4202 pci_unmap_single(tp->pdev,
4203 pci_unmap_addr(txp, mapping),
4204 skb_headlen(skb),
4205 PCI_DMA_TODEVICE);
4206 txp->skb = NULL;
4207
4208 i++;
4209
4210 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4211 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4212 pci_unmap_page(tp->pdev,
4213 pci_unmap_addr(txp, mapping),
4214 skb_shinfo(skb)->frags[j].size,
4215 PCI_DMA_TODEVICE);
4216 i++;
4217 }
4218
4219 dev_kfree_skb_any(skb);
4220 }
4221}
4222
4223/* Initialize tx/rx rings for packet processing.
4224 *
4225 * The chip has been shut down and the driver detached from
4226 * the networking, so no interrupts or new tx packets will
4227 * end up in the driver. tp->{tx,}lock are held and thus
4228 * we may not sleep.
4229 */
4230static void tg3_init_rings(struct tg3 *tp)
4231{
4232 u32 i;
4233
4234 /* Free up all the SKBs. */
4235 tg3_free_rings(tp);
4236
4237 /* Zero out all descriptors. */
4238 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4239 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4240 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4241 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4242
7e72aad4 4243 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4244 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4245 (tp->dev->mtu > ETH_DATA_LEN))
4246 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4247
1da177e4
LT
4248 /* Initialize invariants of the rings, we only set this
4249 * stuff once. This works because the card does not
4250 * write into the rx buffer posting rings.
4251 */
4252 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4253 struct tg3_rx_buffer_desc *rxd;
4254
4255 rxd = &tp->rx_std[i];
7e72aad4 4256 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4257 << RXD_LEN_SHIFT;
4258 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4259 rxd->opaque = (RXD_OPAQUE_RING_STD |
4260 (i << RXD_OPAQUE_INDEX_SHIFT));
4261 }
4262
0f893dc6 4263 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4264 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4265 struct tg3_rx_buffer_desc *rxd;
4266
4267 rxd = &tp->rx_jumbo[i];
4268 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4269 << RXD_LEN_SHIFT;
4270 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4271 RXD_FLAG_JUMBO;
4272 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4273 (i << RXD_OPAQUE_INDEX_SHIFT));
4274 }
4275 }
4276
4277 /* Now allocate fresh SKBs for each rx ring. */
4278 for (i = 0; i < tp->rx_pending; i++) {
4279 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4280 -1, i) < 0)
4281 break;
4282 }
4283
0f893dc6 4284 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4285 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4286 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4287 -1, i) < 0)
4288 break;
4289 }
4290 }
4291}
4292
4293/*
4294 * Must not be invoked with interrupt sources disabled and
4295 * the hardware shutdown down.
4296 */
4297static void tg3_free_consistent(struct tg3 *tp)
4298{
b4558ea9
JJ
4299 kfree(tp->rx_std_buffers);
4300 tp->rx_std_buffers = NULL;
1da177e4
LT
4301 if (tp->rx_std) {
4302 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4303 tp->rx_std, tp->rx_std_mapping);
4304 tp->rx_std = NULL;
4305 }
4306 if (tp->rx_jumbo) {
4307 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4308 tp->rx_jumbo, tp->rx_jumbo_mapping);
4309 tp->rx_jumbo = NULL;
4310 }
4311 if (tp->rx_rcb) {
4312 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4313 tp->rx_rcb, tp->rx_rcb_mapping);
4314 tp->rx_rcb = NULL;
4315 }
4316 if (tp->tx_ring) {
4317 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4318 tp->tx_ring, tp->tx_desc_mapping);
4319 tp->tx_ring = NULL;
4320 }
4321 if (tp->hw_status) {
4322 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4323 tp->hw_status, tp->status_mapping);
4324 tp->hw_status = NULL;
4325 }
4326 if (tp->hw_stats) {
4327 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4328 tp->hw_stats, tp->stats_mapping);
4329 tp->hw_stats = NULL;
4330 }
4331}
4332
4333/*
4334 * Must not be invoked with interrupt sources disabled and
4335 * the hardware shutdown down. Can sleep.
4336 */
4337static int tg3_alloc_consistent(struct tg3 *tp)
4338{
4339 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4340 (TG3_RX_RING_SIZE +
4341 TG3_RX_JUMBO_RING_SIZE)) +
4342 (sizeof(struct tx_ring_info) *
4343 TG3_TX_RING_SIZE),
4344 GFP_KERNEL);
4345 if (!tp->rx_std_buffers)
4346 return -ENOMEM;
4347
4348 memset(tp->rx_std_buffers, 0,
4349 (sizeof(struct ring_info) *
4350 (TG3_RX_RING_SIZE +
4351 TG3_RX_JUMBO_RING_SIZE)) +
4352 (sizeof(struct tx_ring_info) *
4353 TG3_TX_RING_SIZE));
4354
4355 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4356 tp->tx_buffers = (struct tx_ring_info *)
4357 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4358
4359 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4360 &tp->rx_std_mapping);
4361 if (!tp->rx_std)
4362 goto err_out;
4363
4364 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4365 &tp->rx_jumbo_mapping);
4366
4367 if (!tp->rx_jumbo)
4368 goto err_out;
4369
4370 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4371 &tp->rx_rcb_mapping);
4372 if (!tp->rx_rcb)
4373 goto err_out;
4374
4375 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4376 &tp->tx_desc_mapping);
4377 if (!tp->tx_ring)
4378 goto err_out;
4379
4380 tp->hw_status = pci_alloc_consistent(tp->pdev,
4381 TG3_HW_STATUS_SIZE,
4382 &tp->status_mapping);
4383 if (!tp->hw_status)
4384 goto err_out;
4385
4386 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4387 sizeof(struct tg3_hw_stats),
4388 &tp->stats_mapping);
4389 if (!tp->hw_stats)
4390 goto err_out;
4391
4392 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4393 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4394
4395 return 0;
4396
4397err_out:
4398 tg3_free_consistent(tp);
4399 return -ENOMEM;
4400}
4401
4402#define MAX_WAIT_CNT 1000
4403
4404/* To stop a block, clear the enable bit and poll till it
4405 * clears. tp->lock is held.
4406 */
b3b7d6be 4407static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4408{
4409 unsigned int i;
4410 u32 val;
4411
4412 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4413 switch (ofs) {
4414 case RCVLSC_MODE:
4415 case DMAC_MODE:
4416 case MBFREE_MODE:
4417 case BUFMGR_MODE:
4418 case MEMARB_MODE:
4419 /* We can't enable/disable these bits of the
4420 * 5705/5750, just say success.
4421 */
4422 return 0;
4423
4424 default:
4425 break;
4426 };
4427 }
4428
4429 val = tr32(ofs);
4430 val &= ~enable_bit;
4431 tw32_f(ofs, val);
4432
4433 for (i = 0; i < MAX_WAIT_CNT; i++) {
4434 udelay(100);
4435 val = tr32(ofs);
4436 if ((val & enable_bit) == 0)
4437 break;
4438 }
4439
b3b7d6be 4440 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4441 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4442 "ofs=%lx enable_bit=%x\n",
4443 ofs, enable_bit);
4444 return -ENODEV;
4445 }
4446
4447 return 0;
4448}
4449
4450/* tp->lock is held. */
b3b7d6be 4451static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4452{
4453 int i, err;
4454
4455 tg3_disable_ints(tp);
4456
4457 tp->rx_mode &= ~RX_MODE_ENABLE;
4458 tw32_f(MAC_RX_MODE, tp->rx_mode);
4459 udelay(10);
4460
b3b7d6be
DM
4461 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4462 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4463 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4464 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4465 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4466 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4467
4468 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4469 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4470 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4471 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4472 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4473 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4474 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4475
4476 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4477 tw32_f(MAC_MODE, tp->mac_mode);
4478 udelay(40);
4479
4480 tp->tx_mode &= ~TX_MODE_ENABLE;
4481 tw32_f(MAC_TX_MODE, tp->tx_mode);
4482
4483 for (i = 0; i < MAX_WAIT_CNT; i++) {
4484 udelay(100);
4485 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4486 break;
4487 }
4488 if (i >= MAX_WAIT_CNT) {
4489 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4490 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4491 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4492 err |= -ENODEV;
1da177e4
LT
4493 }
4494
e6de8ad1 4495 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4496 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4497 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4498
4499 tw32(FTQ_RESET, 0xffffffff);
4500 tw32(FTQ_RESET, 0x00000000);
4501
b3b7d6be
DM
4502 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4503 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4504
4505 if (tp->hw_status)
4506 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4507 if (tp->hw_stats)
4508 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4509
1da177e4
LT
4510 return err;
4511}
4512
4513/* tp->lock is held. */
4514static int tg3_nvram_lock(struct tg3 *tp)
4515{
4516 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4517 int i;
4518
ec41c7df
MC
4519 if (tp->nvram_lock_cnt == 0) {
4520 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4521 for (i = 0; i < 8000; i++) {
4522 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4523 break;
4524 udelay(20);
4525 }
4526 if (i == 8000) {
4527 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4528 return -ENODEV;
4529 }
1da177e4 4530 }
ec41c7df 4531 tp->nvram_lock_cnt++;
1da177e4
LT
4532 }
4533 return 0;
4534}
4535
4536/* tp->lock is held. */
4537static void tg3_nvram_unlock(struct tg3 *tp)
4538{
ec41c7df
MC
4539 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4540 if (tp->nvram_lock_cnt > 0)
4541 tp->nvram_lock_cnt--;
4542 if (tp->nvram_lock_cnt == 0)
4543 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4544 }
1da177e4
LT
4545}
4546
e6af301b
MC
4547/* tp->lock is held. */
4548static void tg3_enable_nvram_access(struct tg3 *tp)
4549{
4550 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4551 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4552 u32 nvaccess = tr32(NVRAM_ACCESS);
4553
4554 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4555 }
4556}
4557
4558/* tp->lock is held. */
4559static void tg3_disable_nvram_access(struct tg3 *tp)
4560{
4561 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4562 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4563 u32 nvaccess = tr32(NVRAM_ACCESS);
4564
4565 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4566 }
4567}
4568
1da177e4
LT
4569/* tp->lock is held. */
4570static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4571{
f49639e6
DM
4572 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4573 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
4574
4575 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4576 switch (kind) {
4577 case RESET_KIND_INIT:
4578 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4579 DRV_STATE_START);
4580 break;
4581
4582 case RESET_KIND_SHUTDOWN:
4583 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4584 DRV_STATE_UNLOAD);
4585 break;
4586
4587 case RESET_KIND_SUSPEND:
4588 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4589 DRV_STATE_SUSPEND);
4590 break;
4591
4592 default:
4593 break;
4594 };
4595 }
4596}
4597
4598/* tp->lock is held. */
4599static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4600{
4601 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4602 switch (kind) {
4603 case RESET_KIND_INIT:
4604 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4605 DRV_STATE_START_DONE);
4606 break;
4607
4608 case RESET_KIND_SHUTDOWN:
4609 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4610 DRV_STATE_UNLOAD_DONE);
4611 break;
4612
4613 default:
4614 break;
4615 };
4616 }
4617}
4618
4619/* tp->lock is held. */
4620static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4621{
4622 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4623 switch (kind) {
4624 case RESET_KIND_INIT:
4625 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4626 DRV_STATE_START);
4627 break;
4628
4629 case RESET_KIND_SHUTDOWN:
4630 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4631 DRV_STATE_UNLOAD);
4632 break;
4633
4634 case RESET_KIND_SUSPEND:
4635 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4636 DRV_STATE_SUSPEND);
4637 break;
4638
4639 default:
4640 break;
4641 };
4642 }
4643}
4644
4645static void tg3_stop_fw(struct tg3 *);
4646
4647/* tp->lock is held. */
4648static int tg3_chip_reset(struct tg3 *tp)
4649{
4650 u32 val;
1ee582d8 4651 void (*write_op)(struct tg3 *, u32, u32);
1da177e4
LT
4652 int i;
4653
f49639e6
DM
4654 tg3_nvram_lock(tp);
4655
4656 /* No matching tg3_nvram_unlock() after this because
4657 * chip reset below will undo the nvram lock.
4658 */
4659 tp->nvram_lock_cnt = 0;
1da177e4 4660
d9ab5ad1 4661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 4662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1
MC
4663 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4664 tw32(GRC_FASTBOOT_PC, 0);
4665
1da177e4
LT
4666 /*
4667 * We must avoid the readl() that normally takes place.
4668 * It locks machines, causes machine checks, and other
4669 * fun things. So, temporarily disable the 5701
4670 * hardware workaround, while we do the reset.
4671 */
1ee582d8
MC
4672 write_op = tp->write32;
4673 if (write_op == tg3_write_flush_reg32)
4674 tp->write32 = tg3_write32;
1da177e4
LT
4675
4676 /* do the reset */
4677 val = GRC_MISC_CFG_CORECLK_RESET;
4678
4679 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4680 if (tr32(0x7e2c) == 0x60) {
4681 tw32(0x7e2c, 0x20);
4682 }
4683 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4684 tw32(GRC_MISC_CFG, (1 << 29));
4685 val |= (1 << 29);
4686 }
4687 }
4688
4689 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4690 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4691 tw32(GRC_MISC_CFG, val);
4692
1ee582d8
MC
4693 /* restore 5701 hardware bug workaround write method */
4694 tp->write32 = write_op;
1da177e4
LT
4695
4696 /* Unfortunately, we have to delay before the PCI read back.
4697 * Some 575X chips even will not respond to a PCI cfg access
4698 * when the reset command is given to the chip.
4699 *
4700 * How do these hardware designers expect things to work
4701 * properly if the PCI write is posted for a long period
4702 * of time? It is always necessary to have some method by
4703 * which a register read back can occur to push the write
4704 * out which does the reset.
4705 *
4706 * For most tg3 variants the trick below was working.
4707 * Ho hum...
4708 */
4709 udelay(120);
4710
4711 /* Flush PCI posted writes. The normal MMIO registers
4712 * are inaccessible at this time so this is the only
4713 * way to make this reliably (actually, this is no longer
4714 * the case, see above). I tried to use indirect
4715 * register read/write but this upset some 5701 variants.
4716 */
4717 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4718
4719 udelay(120);
4720
4721 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4722 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4723 int i;
4724 u32 cfg_val;
4725
4726 /* Wait for link training to complete. */
4727 for (i = 0; i < 5000; i++)
4728 udelay(100);
4729
4730 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4731 pci_write_config_dword(tp->pdev, 0xc4,
4732 cfg_val | (1 << 15));
4733 }
4734 /* Set PCIE max payload size and clear error status. */
4735 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4736 }
4737
4738 /* Re-enable indirect register accesses. */
4739 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4740 tp->misc_host_ctrl);
4741
4742 /* Set MAX PCI retry to zero. */
4743 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4744 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4745 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4746 val |= PCISTATE_RETRY_SAME_DMA;
4747 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4748
4749 pci_restore_state(tp->pdev);
4750
4751 /* Make sure PCI-X relaxed ordering bit is clear. */
4752 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4753 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4754 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4755
a4e2b347 4756 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f
MC
4757 u32 val;
4758
4759 /* Chip reset on 5780 will reset MSI enable bit,
4760 * so need to restore it.
4761 */
4762 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4763 u16 ctrl;
4764
4765 pci_read_config_word(tp->pdev,
4766 tp->msi_cap + PCI_MSI_FLAGS,
4767 &ctrl);
4768 pci_write_config_word(tp->pdev,
4769 tp->msi_cap + PCI_MSI_FLAGS,
4770 ctrl | PCI_MSI_FLAGS_ENABLE);
4771 val = tr32(MSGINT_MODE);
4772 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4773 }
4774
4775 val = tr32(MEMARB_MODE);
4776 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4777
4778 } else
4779 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1da177e4
LT
4780
4781 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4782 tg3_stop_fw(tp);
4783 tw32(0x5000, 0x400);
4784 }
4785
4786 tw32(GRC_MODE, tp->grc_mode);
4787
4788 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4789 u32 val = tr32(0xc4);
4790
4791 tw32(0xc4, val | (1 << 15));
4792 }
4793
4794 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4796 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4797 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4798 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4799 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4800 }
4801
4802 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4803 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4804 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
4805 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4806 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4807 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
4808 } else
4809 tw32_f(MAC_MODE, 0);
4810 udelay(40);
4811
f49639e6
DM
4812 /* Wait for firmware initialization to complete. */
4813 for (i = 0; i < 100000; i++) {
4814 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4815 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4816 break;
4817 udelay(10);
4818 }
4819
4820 /* Chip might not be fitted with firmare. Some Sun onboard
4821 * parts are configured like that. So don't signal the timeout
4822 * of the above loop as an error, but do report the lack of
4823 * running firmware once.
4824 */
4825 if (i >= 100000 &&
4826 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4827 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4828
4829 printk(KERN_INFO PFX "%s: No firmware running.\n",
4830 tp->dev->name);
1da177e4
LT
4831 }
4832
4833 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4834 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4835 u32 val = tr32(0x7c00);
4836
4837 tw32(0x7c00, val | (1 << 25));
4838 }
4839
4840 /* Reprobe ASF enable state. */
4841 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4842 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4843 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4844 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4845 u32 nic_cfg;
4846
4847 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4848 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4849 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4850 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4851 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4852 }
4853 }
4854
4855 return 0;
4856}
4857
4858/* tp->lock is held. */
4859static void tg3_stop_fw(struct tg3 *tp)
4860{
4861 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4862 u32 val;
4863 int i;
4864
4865 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4866 val = tr32(GRC_RX_CPU_EVENT);
4867 val |= (1 << 14);
4868 tw32(GRC_RX_CPU_EVENT, val);
4869
4870 /* Wait for RX cpu to ACK the event. */
4871 for (i = 0; i < 100; i++) {
4872 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4873 break;
4874 udelay(1);
4875 }
4876 }
4877}
4878
4879/* tp->lock is held. */
944d980e 4880static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4881{
4882 int err;
4883
4884 tg3_stop_fw(tp);
4885
944d980e 4886 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4887
b3b7d6be 4888 tg3_abort_hw(tp, silent);
1da177e4
LT
4889 err = tg3_chip_reset(tp);
4890
944d980e
MC
4891 tg3_write_sig_legacy(tp, kind);
4892 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
4893
4894 if (err)
4895 return err;
4896
4897 return 0;
4898}
4899
4900#define TG3_FW_RELEASE_MAJOR 0x0
4901#define TG3_FW_RELASE_MINOR 0x0
4902#define TG3_FW_RELEASE_FIX 0x0
4903#define TG3_FW_START_ADDR 0x08000000
4904#define TG3_FW_TEXT_ADDR 0x08000000
4905#define TG3_FW_TEXT_LEN 0x9c0
4906#define TG3_FW_RODATA_ADDR 0x080009c0
4907#define TG3_FW_RODATA_LEN 0x60
4908#define TG3_FW_DATA_ADDR 0x08000a40
4909#define TG3_FW_DATA_LEN 0x20
4910#define TG3_FW_SBSS_ADDR 0x08000a60
4911#define TG3_FW_SBSS_LEN 0xc
4912#define TG3_FW_BSS_ADDR 0x08000a70
4913#define TG3_FW_BSS_LEN 0x10
4914
4915static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4916 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4917 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4918 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4919 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4920 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4921 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4922 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4923 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4924 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4925 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4926 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4927 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4928 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4929 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4930 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4931 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4932 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4933 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4934 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4935 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4936 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4937 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4938 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4939 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4940 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4941 0, 0, 0, 0, 0, 0,
4942 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4943 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4944 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4945 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4946 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4947 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4948 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4949 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4950 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4951 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4952 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4953 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4954 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4955 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4956 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4957 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4958 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4959 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4960 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4961 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4962 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4963 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4964 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4965 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4966 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4967 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4968 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4969 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4970 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4971 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4972 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4973 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4974 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4975 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4976 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4977 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4978 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4979 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4980 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4981 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4982 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4983 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4984 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4985 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4986 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4987 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4988 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4989 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4990 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4991 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4992 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4993 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4994 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4995 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4996 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4997 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4998 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4999 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5000 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5001 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5002 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5003 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5004 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5005 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5006 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5007};
5008
5009static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5010 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5011 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5012 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5013 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5014 0x00000000
5015};
5016
5017#if 0 /* All zeros, don't eat up space with it. */
5018u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5019 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5020 0x00000000, 0x00000000, 0x00000000, 0x00000000
5021};
5022#endif
5023
5024#define RX_CPU_SCRATCH_BASE 0x30000
5025#define RX_CPU_SCRATCH_SIZE 0x04000
5026#define TX_CPU_SCRATCH_BASE 0x34000
5027#define TX_CPU_SCRATCH_SIZE 0x04000
5028
5029/* tp->lock is held. */
5030static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5031{
5032 int i;
5033
5d9428de
ES
5034 BUG_ON(offset == TX_CPU_BASE &&
5035 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4
LT
5036
5037 if (offset == RX_CPU_BASE) {
5038 for (i = 0; i < 10000; i++) {
5039 tw32(offset + CPU_STATE, 0xffffffff);
5040 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5041 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5042 break;
5043 }
5044
5045 tw32(offset + CPU_STATE, 0xffffffff);
5046 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5047 udelay(10);
5048 } else {
5049 for (i = 0; i < 10000; i++) {
5050 tw32(offset + CPU_STATE, 0xffffffff);
5051 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5052 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5053 break;
5054 }
5055 }
5056
5057 if (i >= 10000) {
5058 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5059 "and %s CPU\n",
5060 tp->dev->name,
5061 (offset == RX_CPU_BASE ? "RX" : "TX"));
5062 return -ENODEV;
5063 }
ec41c7df
MC
5064
5065 /* Clear firmware's nvram arbitration. */
5066 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5067 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5068 return 0;
5069}
5070
5071struct fw_info {
5072 unsigned int text_base;
5073 unsigned int text_len;
5074 u32 *text_data;
5075 unsigned int rodata_base;
5076 unsigned int rodata_len;
5077 u32 *rodata_data;
5078 unsigned int data_base;
5079 unsigned int data_len;
5080 u32 *data_data;
5081};
5082
5083/* tp->lock is held. */
5084static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5085 int cpu_scratch_size, struct fw_info *info)
5086{
ec41c7df 5087 int err, lock_err, i;
1da177e4
LT
5088 void (*write_op)(struct tg3 *, u32, u32);
5089
5090 if (cpu_base == TX_CPU_BASE &&
5091 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5092 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5093 "TX cpu firmware on %s which is 5705.\n",
5094 tp->dev->name);
5095 return -EINVAL;
5096 }
5097
5098 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5099 write_op = tg3_write_mem;
5100 else
5101 write_op = tg3_write_indirect_reg32;
5102
1b628151
MC
5103 /* It is possible that bootcode is still loading at this point.
5104 * Get the nvram lock first before halting the cpu.
5105 */
ec41c7df 5106 lock_err = tg3_nvram_lock(tp);
1da177e4 5107 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5108 if (!lock_err)
5109 tg3_nvram_unlock(tp);
1da177e4
LT
5110 if (err)
5111 goto out;
5112
5113 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5114 write_op(tp, cpu_scratch_base + i, 0);
5115 tw32(cpu_base + CPU_STATE, 0xffffffff);
5116 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5117 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5118 write_op(tp, (cpu_scratch_base +
5119 (info->text_base & 0xffff) +
5120 (i * sizeof(u32))),
5121 (info->text_data ?
5122 info->text_data[i] : 0));
5123 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5124 write_op(tp, (cpu_scratch_base +
5125 (info->rodata_base & 0xffff) +
5126 (i * sizeof(u32))),
5127 (info->rodata_data ?
5128 info->rodata_data[i] : 0));
5129 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5130 write_op(tp, (cpu_scratch_base +
5131 (info->data_base & 0xffff) +
5132 (i * sizeof(u32))),
5133 (info->data_data ?
5134 info->data_data[i] : 0));
5135
5136 err = 0;
5137
5138out:
1da177e4
LT
5139 return err;
5140}
5141
5142/* tp->lock is held. */
5143static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5144{
5145 struct fw_info info;
5146 int err, i;
5147
5148 info.text_base = TG3_FW_TEXT_ADDR;
5149 info.text_len = TG3_FW_TEXT_LEN;
5150 info.text_data = &tg3FwText[0];
5151 info.rodata_base = TG3_FW_RODATA_ADDR;
5152 info.rodata_len = TG3_FW_RODATA_LEN;
5153 info.rodata_data = &tg3FwRodata[0];
5154 info.data_base = TG3_FW_DATA_ADDR;
5155 info.data_len = TG3_FW_DATA_LEN;
5156 info.data_data = NULL;
5157
5158 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5159 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5160 &info);
5161 if (err)
5162 return err;
5163
5164 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5165 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5166 &info);
5167 if (err)
5168 return err;
5169
5170 /* Now startup only the RX cpu. */
5171 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5172 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5173
5174 for (i = 0; i < 5; i++) {
5175 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5176 break;
5177 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5178 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5179 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5180 udelay(1000);
5181 }
5182 if (i >= 5) {
5183 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5184 "to set RX CPU PC, is %08x should be %08x\n",
5185 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5186 TG3_FW_TEXT_ADDR);
5187 return -ENODEV;
5188 }
5189 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5190 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5191
5192 return 0;
5193}
5194
5195#if TG3_TSO_SUPPORT != 0
5196
5197#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5198#define TG3_TSO_FW_RELASE_MINOR 0x6
5199#define TG3_TSO_FW_RELEASE_FIX 0x0
5200#define TG3_TSO_FW_START_ADDR 0x08000000
5201#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5202#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5203#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5204#define TG3_TSO_FW_RODATA_LEN 0x60
5205#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5206#define TG3_TSO_FW_DATA_LEN 0x30
5207#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5208#define TG3_TSO_FW_SBSS_LEN 0x2c
5209#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5210#define TG3_TSO_FW_BSS_LEN 0x894
5211
5212static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5213 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5214 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5215 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5216 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5217 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5218 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5219 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5220 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5221 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5222 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5223 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5224 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5225 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5226 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5227 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5228 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5229 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5230 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5231 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5232 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5233 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5234 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5235 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5236 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5237 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5238 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5239 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5240 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5241 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5242 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5243 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5244 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5245 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5246 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5247 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5248 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5249 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5250 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5251 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5252 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5253 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5254 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5255 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5256 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5257 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5258 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5259 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5260 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5261 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5262 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5263 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5264 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5265 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5266 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5267 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5268 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5269 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5270 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5271 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5272 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5273 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5274 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5275 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5276 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5277 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5278 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5279 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5280 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5281 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5282 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5283 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5284 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5285 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5286 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5287 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5288 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5289 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5290 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5291 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5292 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5293 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5294 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5295 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5296 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5297 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5298 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5299 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5300 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5301 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5302 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5303 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5304 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5305 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5306 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5307 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5308 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5309 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5310 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5311 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5312 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5313 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5314 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5315 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5316 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5317 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5318 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5319 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5320 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5321 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5322 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5323 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5324 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5325 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5326 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5327 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5328 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5329 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5330 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5331 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5332 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5333 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5334 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5335 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5336 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5337 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5338 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5339 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5340 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5341 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5342 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5343 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5344 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5345 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5346 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5347 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5348 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5349 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5350 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5351 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5352 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5353 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5354 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5355 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5356 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5357 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5358 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5359 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5360 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5361 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5362 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5363 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5364 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5365 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5366 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5367 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5368 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5369 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5370 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5371 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5372 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5373 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5374 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5375 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5376 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5377 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5378 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5379 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5380 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5381 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5382 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5383 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5384 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5385 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5386 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5387 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5388 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5389 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5390 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5391 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5392 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5393 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5394 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5395 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5396 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5397 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5398 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5399 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5400 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5401 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5402 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5403 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5404 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5405 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5406 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5407 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5408 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5409 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5410 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5411 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5412 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5413 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5414 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5415 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5416 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5417 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5418 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5419 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5420 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5421 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5422 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5423 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5424 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5425 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5426 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5427 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5428 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5429 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5430 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5431 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5432 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5433 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5434 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5435 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5436 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5437 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5438 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5439 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5440 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5441 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5442 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5443 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5444 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5445 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5446 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5447 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5448 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5449 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5450 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5451 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5452 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5453 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5454 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5455 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5456 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5457 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5458 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5459 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5460 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5461 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5462 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5463 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5464 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5465 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5466 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5467 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5468 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5469 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5470 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5471 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5472 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5473 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5474 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5475 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5476 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5477 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5478 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5479 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5480 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5481 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5482 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5483 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5484 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5485 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5486 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5487 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5488 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5489 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5490 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5491 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5492 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5493 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5494 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5495 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5496 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5497};
5498
5499static u32 tg3TsoFwRodata[] = {
5500 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5501 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5502 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5503 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5504 0x00000000,
5505};
5506
5507static u32 tg3TsoFwData[] = {
5508 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5509 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5510 0x00000000,
5511};
5512
5513/* 5705 needs a special version of the TSO firmware. */
5514#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5515#define TG3_TSO5_FW_RELASE_MINOR 0x2
5516#define TG3_TSO5_FW_RELEASE_FIX 0x0
5517#define TG3_TSO5_FW_START_ADDR 0x00010000
5518#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5519#define TG3_TSO5_FW_TEXT_LEN 0xe90
5520#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5521#define TG3_TSO5_FW_RODATA_LEN 0x50
5522#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5523#define TG3_TSO5_FW_DATA_LEN 0x20
5524#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5525#define TG3_TSO5_FW_SBSS_LEN 0x28
5526#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5527#define TG3_TSO5_FW_BSS_LEN 0x88
5528
5529static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5530 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5531 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5532 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5533 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5534 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5535 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5536 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5537 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5538 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5539 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5540 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5541 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5542 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5543 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5544 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5545 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5546 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5547 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5548 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5549 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5550 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5551 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5552 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5553 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5554 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5555 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5556 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5557 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5558 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5559 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5560 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5561 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5562 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5563 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5564 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5565 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5566 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5567 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5568 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5569 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5570 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5571 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5572 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5573 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5574 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5575 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5576 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5577 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5578 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5579 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5580 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5581 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5582 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5583 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5584 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5585 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5586 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5587 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5588 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5589 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5590 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5591 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5592 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5593 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5594 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5595 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5596 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5597 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5598 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5599 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5600 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5601 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5602 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5603 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5604 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5605 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5606 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5607 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5608 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5609 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5610 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5611 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5612 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5613 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5614 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5615 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5616 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5617 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5618 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5619 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5620 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5621 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5622 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5623 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5624 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5625 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5626 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5627 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5628 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5629 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5630 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5631 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5632 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5633 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5634 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5635 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5636 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5637 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5638 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5639 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5640 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5641 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5642 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5643 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5644 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5645 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5646 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5647 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5648 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5649 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5650 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5651 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5652 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5653 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5654 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5655 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5656 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5657 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5658 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5659 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5660 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5661 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5662 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5663 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5664 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5665 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5666 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5667 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5668 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5669 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5670 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5671 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5672 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5673 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5674 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5675 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5676 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5677 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5678 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5679 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5680 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5681 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5682 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5683 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5684 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5685 0x00000000, 0x00000000, 0x00000000,
5686};
5687
5688static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5689 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5690 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5691 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5692 0x00000000, 0x00000000, 0x00000000,
5693};
5694
5695static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5696 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5697 0x00000000, 0x00000000, 0x00000000,
5698};
5699
5700/* tp->lock is held. */
5701static int tg3_load_tso_firmware(struct tg3 *tp)
5702{
5703 struct fw_info info;
5704 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5705 int err, i;
5706
5707 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5708 return 0;
5709
5710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5711 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5712 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5713 info.text_data = &tg3Tso5FwText[0];
5714 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5715 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5716 info.rodata_data = &tg3Tso5FwRodata[0];
5717 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5718 info.data_len = TG3_TSO5_FW_DATA_LEN;
5719 info.data_data = &tg3Tso5FwData[0];
5720 cpu_base = RX_CPU_BASE;
5721 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5722 cpu_scratch_size = (info.text_len +
5723 info.rodata_len +
5724 info.data_len +
5725 TG3_TSO5_FW_SBSS_LEN +
5726 TG3_TSO5_FW_BSS_LEN);
5727 } else {
5728 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5729 info.text_len = TG3_TSO_FW_TEXT_LEN;
5730 info.text_data = &tg3TsoFwText[0];
5731 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5732 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5733 info.rodata_data = &tg3TsoFwRodata[0];
5734 info.data_base = TG3_TSO_FW_DATA_ADDR;
5735 info.data_len = TG3_TSO_FW_DATA_LEN;
5736 info.data_data = &tg3TsoFwData[0];
5737 cpu_base = TX_CPU_BASE;
5738 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5739 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5740 }
5741
5742 err = tg3_load_firmware_cpu(tp, cpu_base,
5743 cpu_scratch_base, cpu_scratch_size,
5744 &info);
5745 if (err)
5746 return err;
5747
5748 /* Now startup the cpu. */
5749 tw32(cpu_base + CPU_STATE, 0xffffffff);
5750 tw32_f(cpu_base + CPU_PC, info.text_base);
5751
5752 for (i = 0; i < 5; i++) {
5753 if (tr32(cpu_base + CPU_PC) == info.text_base)
5754 break;
5755 tw32(cpu_base + CPU_STATE, 0xffffffff);
5756 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5757 tw32_f(cpu_base + CPU_PC, info.text_base);
5758 udelay(1000);
5759 }
5760 if (i >= 5) {
5761 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5762 "to set CPU PC, is %08x should be %08x\n",
5763 tp->dev->name, tr32(cpu_base + CPU_PC),
5764 info.text_base);
5765 return -ENODEV;
5766 }
5767 tw32(cpu_base + CPU_STATE, 0xffffffff);
5768 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5769 return 0;
5770}
5771
5772#endif /* TG3_TSO_SUPPORT != 0 */
5773
5774/* tp->lock is held. */
5775static void __tg3_set_mac_addr(struct tg3 *tp)
5776{
5777 u32 addr_high, addr_low;
5778 int i;
5779
5780 addr_high = ((tp->dev->dev_addr[0] << 8) |
5781 tp->dev->dev_addr[1]);
5782 addr_low = ((tp->dev->dev_addr[2] << 24) |
5783 (tp->dev->dev_addr[3] << 16) |
5784 (tp->dev->dev_addr[4] << 8) |
5785 (tp->dev->dev_addr[5] << 0));
5786 for (i = 0; i < 4; i++) {
5787 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5788 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5789 }
5790
5791 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5793 for (i = 0; i < 12; i++) {
5794 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5795 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5796 }
5797 }
5798
5799 addr_high = (tp->dev->dev_addr[0] +
5800 tp->dev->dev_addr[1] +
5801 tp->dev->dev_addr[2] +
5802 tp->dev->dev_addr[3] +
5803 tp->dev->dev_addr[4] +
5804 tp->dev->dev_addr[5]) &
5805 TX_BACKOFF_SEED_MASK;
5806 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5807}
5808
5809static int tg3_set_mac_addr(struct net_device *dev, void *p)
5810{
5811 struct tg3 *tp = netdev_priv(dev);
5812 struct sockaddr *addr = p;
5813
f9804ddb
MC
5814 if (!is_valid_ether_addr(addr->sa_data))
5815 return -EINVAL;
5816
1da177e4
LT
5817 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5818
e75f7c90
MC
5819 if (!netif_running(dev))
5820 return 0;
5821
58712ef9
MC
5822 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5823 /* Reset chip so that ASF can re-init any MAC addresses it
5824 * needs.
5825 */
5826 tg3_netif_stop(tp);
5827 tg3_full_lock(tp, 1);
5828
5829 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 5830 tg3_init_hw(tp, 0);
58712ef9
MC
5831
5832 tg3_netif_start(tp);
5833 tg3_full_unlock(tp);
5834 } else {
5835 spin_lock_bh(&tp->lock);
5836 __tg3_set_mac_addr(tp);
5837 spin_unlock_bh(&tp->lock);
5838 }
1da177e4
LT
5839
5840 return 0;
5841}
5842
5843/* tp->lock is held. */
5844static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5845 dma_addr_t mapping, u32 maxlen_flags,
5846 u32 nic_addr)
5847{
5848 tg3_write_mem(tp,
5849 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5850 ((u64) mapping >> 32));
5851 tg3_write_mem(tp,
5852 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5853 ((u64) mapping & 0xffffffff));
5854 tg3_write_mem(tp,
5855 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5856 maxlen_flags);
5857
5858 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5859 tg3_write_mem(tp,
5860 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5861 nic_addr);
5862}
5863
5864static void __tg3_set_rx_mode(struct net_device *);
d244c892 5865static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
5866{
5867 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5868 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5869 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5870 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5871 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5872 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5873 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5874 }
5875 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5876 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5877 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5878 u32 val = ec->stats_block_coalesce_usecs;
5879
5880 if (!netif_carrier_ok(tp->dev))
5881 val = 0;
5882
5883 tw32(HOSTCC_STAT_COAL_TICKS, val);
5884 }
5885}
1da177e4
LT
5886
5887/* tp->lock is held. */
8e7a22e3 5888static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
5889{
5890 u32 val, rdmac_mode;
5891 int i, err, limit;
5892
5893 tg3_disable_ints(tp);
5894
5895 tg3_stop_fw(tp);
5896
5897 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5898
5899 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 5900 tg3_abort_hw(tp, 1);
1da177e4
LT
5901 }
5902
8e7a22e3 5903 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
d4d2c558
MC
5904 tg3_phy_reset(tp);
5905
1da177e4
LT
5906 err = tg3_chip_reset(tp);
5907 if (err)
5908 return err;
5909
5910 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5911
5912 /* This works around an issue with Athlon chipsets on
5913 * B3 tigon3 silicon. This bit has no effect on any
5914 * other revision. But do not set this on PCI Express
5915 * chips.
5916 */
5917 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5918 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5919 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5920
5921 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5922 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5923 val = tr32(TG3PCI_PCISTATE);
5924 val |= PCISTATE_RETRY_SAME_DMA;
5925 tw32(TG3PCI_PCISTATE, val);
5926 }
5927
5928 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5929 /* Enable some hw fixes. */
5930 val = tr32(TG3PCI_MSI_DATA);
5931 val |= (1 << 26) | (1 << 28) | (1 << 29);
5932 tw32(TG3PCI_MSI_DATA, val);
5933 }
5934
5935 /* Descriptor ring init may make accesses to the
5936 * NIC SRAM area to setup the TX descriptors, so we
5937 * can only do this after the hardware has been
5938 * successfully reset.
5939 */
5940 tg3_init_rings(tp);
5941
5942 /* This value is determined during the probe time DMA
5943 * engine test, tg3_test_dma.
5944 */
5945 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5946
5947 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5948 GRC_MODE_4X_NIC_SEND_RINGS |
5949 GRC_MODE_NO_TX_PHDR_CSUM |
5950 GRC_MODE_NO_RX_PHDR_CSUM);
5951 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
5952
5953 /* Pseudo-header checksum is done by hardware logic and not
5954 * the offload processers, so make the chip do the pseudo-
5955 * header checksums on receive. For transmit it is more
5956 * convenient to do the pseudo-header checksum in software
5957 * as Linux does that on transmit for us in all cases.
5958 */
5959 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
5960
5961 tw32(GRC_MODE,
5962 tp->grc_mode |
5963 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5964
5965 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5966 val = tr32(GRC_MISC_CFG);
5967 val &= ~0xff;
5968 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5969 tw32(GRC_MISC_CFG, val);
5970
5971 /* Initialize MBUF/DESC pool. */
cbf46853 5972 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
5973 /* Do nothing. */
5974 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5975 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5977 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5978 else
5979 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5980 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5981 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5982 }
5983#if TG3_TSO_SUPPORT != 0
5984 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5985 int fw_len;
5986
5987 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5988 TG3_TSO5_FW_RODATA_LEN +
5989 TG3_TSO5_FW_DATA_LEN +
5990 TG3_TSO5_FW_SBSS_LEN +
5991 TG3_TSO5_FW_BSS_LEN);
5992 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5993 tw32(BUFMGR_MB_POOL_ADDR,
5994 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5995 tw32(BUFMGR_MB_POOL_SIZE,
5996 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5997 }
5998#endif
5999
0f893dc6 6000 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
6001 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6002 tp->bufmgr_config.mbuf_read_dma_low_water);
6003 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6004 tp->bufmgr_config.mbuf_mac_rx_low_water);
6005 tw32(BUFMGR_MB_HIGH_WATER,
6006 tp->bufmgr_config.mbuf_high_water);
6007 } else {
6008 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6009 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6010 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6011 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6012 tw32(BUFMGR_MB_HIGH_WATER,
6013 tp->bufmgr_config.mbuf_high_water_jumbo);
6014 }
6015 tw32(BUFMGR_DMA_LOW_WATER,
6016 tp->bufmgr_config.dma_low_water);
6017 tw32(BUFMGR_DMA_HIGH_WATER,
6018 tp->bufmgr_config.dma_high_water);
6019
6020 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6021 for (i = 0; i < 2000; i++) {
6022 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6023 break;
6024 udelay(10);
6025 }
6026 if (i >= 2000) {
6027 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6028 tp->dev->name);
6029 return -ENODEV;
6030 }
6031
6032 /* Setup replenish threshold. */
f92905de
MC
6033 val = tp->rx_pending / 8;
6034 if (val == 0)
6035 val = 1;
6036 else if (val > tp->rx_std_max_post)
6037 val = tp->rx_std_max_post;
6038
6039 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6040
6041 /* Initialize TG3_BDINFO's at:
6042 * RCVDBDI_STD_BD: standard eth size rx ring
6043 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6044 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6045 *
6046 * like so:
6047 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6048 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6049 * ring attribute flags
6050 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6051 *
6052 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6053 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6054 *
6055 * The size of each ring is fixed in the firmware, but the location is
6056 * configurable.
6057 */
6058 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6059 ((u64) tp->rx_std_mapping >> 32));
6060 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6061 ((u64) tp->rx_std_mapping & 0xffffffff));
6062 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6063 NIC_SRAM_RX_BUFFER_DESC);
6064
6065 /* Don't even try to program the JUMBO/MINI buffer descriptor
6066 * configs on 5705.
6067 */
6068 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6069 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6070 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6071 } else {
6072 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6073 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6074
6075 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6076 BDINFO_FLAGS_DISABLED);
6077
6078 /* Setup replenish threshold. */
6079 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6080
0f893dc6 6081 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6082 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6083 ((u64) tp->rx_jumbo_mapping >> 32));
6084 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6085 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6086 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6087 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6088 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6089 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6090 } else {
6091 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6092 BDINFO_FLAGS_DISABLED);
6093 }
6094
6095 }
6096
6097 /* There is only one send ring on 5705/5750, no need to explicitly
6098 * disable the others.
6099 */
6100 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6101 /* Clear out send RCB ring in SRAM. */
6102 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6103 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6104 BDINFO_FLAGS_DISABLED);
6105 }
6106
6107 tp->tx_prod = 0;
6108 tp->tx_cons = 0;
6109 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6110 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6111
6112 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6113 tp->tx_desc_mapping,
6114 (TG3_TX_RING_SIZE <<
6115 BDINFO_FLAGS_MAXLEN_SHIFT),
6116 NIC_SRAM_TX_BUFFER_DESC);
6117
6118 /* There is only one receive return ring on 5705/5750, no need
6119 * to explicitly disable the others.
6120 */
6121 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6122 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6123 i += TG3_BDINFO_SIZE) {
6124 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6125 BDINFO_FLAGS_DISABLED);
6126 }
6127 }
6128
6129 tp->rx_rcb_ptr = 0;
6130 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6131
6132 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6133 tp->rx_rcb_mapping,
6134 (TG3_RX_RCB_RING_SIZE(tp) <<
6135 BDINFO_FLAGS_MAXLEN_SHIFT),
6136 0);
6137
6138 tp->rx_std_ptr = tp->rx_pending;
6139 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6140 tp->rx_std_ptr);
6141
0f893dc6 6142 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6143 tp->rx_jumbo_pending : 0;
6144 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6145 tp->rx_jumbo_ptr);
6146
6147 /* Initialize MAC address and backoff seed. */
6148 __tg3_set_mac_addr(tp);
6149
6150 /* MTU + ethernet header + FCS + optional VLAN tag */
6151 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6152
6153 /* The slot time is changed by tg3_setup_phy if we
6154 * run at gigabit with half duplex.
6155 */
6156 tw32(MAC_TX_LENGTHS,
6157 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6158 (6 << TX_LENGTHS_IPG_SHIFT) |
6159 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6160
6161 /* Receive rules. */
6162 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6163 tw32(RCVLPC_CONFIG, 0x0181);
6164
6165 /* Calculate RDMAC_MODE setting early, we need it to determine
6166 * the RCVLPC_STATE_ENABLE mask.
6167 */
6168 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6169 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6170 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6171 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6172 RDMAC_MODE_LNGREAD_ENAB);
6173 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6174 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
6175
6176 /* If statement applies to 5705 and 5750 PCI devices only */
6177 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6178 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6179 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
6180 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6181 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6182 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6183 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6184 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6185 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6186 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6187 }
6188 }
6189
85e94ced
MC
6190 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6191 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6192
1da177e4
LT
6193#if TG3_TSO_SUPPORT != 0
6194 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6195 rdmac_mode |= (1 << 27);
6196#endif
6197
6198 /* Receive/send statistics. */
1661394e
MC
6199 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6200 val = tr32(RCVLPC_STATS_ENABLE);
6201 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6202 tw32(RCVLPC_STATS_ENABLE, val);
6203 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6204 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
1da177e4
LT
6205 val = tr32(RCVLPC_STATS_ENABLE);
6206 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6207 tw32(RCVLPC_STATS_ENABLE, val);
6208 } else {
6209 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6210 }
6211 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6212 tw32(SNDDATAI_STATSENAB, 0xffffff);
6213 tw32(SNDDATAI_STATSCTRL,
6214 (SNDDATAI_SCTRL_ENABLE |
6215 SNDDATAI_SCTRL_FASTUPD));
6216
6217 /* Setup host coalescing engine. */
6218 tw32(HOSTCC_MODE, 0);
6219 for (i = 0; i < 2000; i++) {
6220 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6221 break;
6222 udelay(10);
6223 }
6224
d244c892 6225 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6226
6227 /* set status block DMA address */
6228 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6229 ((u64) tp->status_mapping >> 32));
6230 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6231 ((u64) tp->status_mapping & 0xffffffff));
6232
6233 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6234 /* Status/statistics block address. See tg3_timer,
6235 * the tg3_periodic_fetch_stats call there, and
6236 * tg3_get_stats to see how this works for 5705/5750 chips.
6237 */
1da177e4
LT
6238 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6239 ((u64) tp->stats_mapping >> 32));
6240 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6241 ((u64) tp->stats_mapping & 0xffffffff));
6242 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6243 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6244 }
6245
6246 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6247
6248 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6249 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6250 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6251 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6252
6253 /* Clear statistics/status block in chip, and status block in ram. */
6254 for (i = NIC_SRAM_STATS_BLK;
6255 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6256 i += sizeof(u32)) {
6257 tg3_write_mem(tp, i, 0);
6258 udelay(40);
6259 }
6260 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6261
c94e3941
MC
6262 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6263 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6264 /* reset to prevent losing 1st rx packet intermittently */
6265 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6266 udelay(10);
6267 }
6268
1da177e4
LT
6269 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6270 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6271 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6272 udelay(40);
6273
314fba34
MC
6274 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6275 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6276 * register to preserve the GPIO settings for LOMs. The GPIOs,
6277 * whether used as inputs or outputs, are set by boot code after
6278 * reset.
6279 */
6280 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6281 u32 gpio_mask;
6282
6283 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6284 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
6285
6286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6287 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6288 GRC_LCLCTRL_GPIO_OUTPUT3;
6289
af36e6b6
MC
6290 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6291 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6292
314fba34
MC
6293 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6294
6295 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
6296 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6297 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 6298 }
1da177e4
LT
6299 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6300 udelay(100);
6301
09ee929c 6302 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 6303 tp->last_tag = 0;
1da177e4
LT
6304
6305 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6306 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6307 udelay(40);
6308 }
6309
6310 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6311 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6312 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6313 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6314 WDMAC_MODE_LNGREAD_ENAB);
6315
85e94ced
MC
6316 /* If statement applies to 5705 and 5750 PCI devices only */
6317 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6318 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6319 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6320 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6321 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6322 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6323 /* nothing */
6324 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6325 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6326 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6327 val |= WDMAC_MODE_RX_ACCEL;
6328 }
6329 }
6330
d9ab5ad1 6331 /* Enable host coalescing bug fix */
af36e6b6
MC
6332 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6333 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
d9ab5ad1
MC
6334 val |= (1 << 29);
6335
1da177e4
LT
6336 tw32_f(WDMAC_MODE, val);
6337 udelay(40);
6338
6339 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6340 val = tr32(TG3PCI_X_CAPS);
6341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6342 val &= ~PCIX_CAPS_BURST_MASK;
6343 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6344 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6345 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6346 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6347 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6348 val |= (tp->split_mode_max_reqs <<
6349 PCIX_CAPS_SPLIT_SHIFT);
6350 }
6351 tw32(TG3PCI_X_CAPS, val);
6352 }
6353
6354 tw32_f(RDMAC_MODE, rdmac_mode);
6355 udelay(40);
6356
6357 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6358 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6359 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6360 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6361 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6362 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6363 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6364 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6365#if TG3_TSO_SUPPORT != 0
6366 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6367 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6368#endif
6369 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6370 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6371
6372 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6373 err = tg3_load_5701_a0_firmware_fix(tp);
6374 if (err)
6375 return err;
6376 }
6377
6378#if TG3_TSO_SUPPORT != 0
6379 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6380 err = tg3_load_tso_firmware(tp);
6381 if (err)
6382 return err;
6383 }
6384#endif
6385
6386 tp->tx_mode = TX_MODE_ENABLE;
6387 tw32_f(MAC_TX_MODE, tp->tx_mode);
6388 udelay(100);
6389
6390 tp->rx_mode = RX_MODE_ENABLE;
af36e6b6
MC
6391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6392 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6393
1da177e4
LT
6394 tw32_f(MAC_RX_MODE, tp->rx_mode);
6395 udelay(10);
6396
6397 if (tp->link_config.phy_is_low_power) {
6398 tp->link_config.phy_is_low_power = 0;
6399 tp->link_config.speed = tp->link_config.orig_speed;
6400 tp->link_config.duplex = tp->link_config.orig_duplex;
6401 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6402 }
6403
6404 tp->mi_mode = MAC_MI_MODE_BASE;
6405 tw32_f(MAC_MI_MODE, tp->mi_mode);
6406 udelay(80);
6407
6408 tw32(MAC_LED_CTRL, tp->led_ctrl);
6409
6410 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6411 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6412 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6413 udelay(10);
6414 }
6415 tw32_f(MAC_RX_MODE, tp->rx_mode);
6416 udelay(10);
6417
6418 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6419 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6420 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6421 /* Set drive transmission level to 1.2V */
6422 /* only if the signal pre-emphasis bit is not set */
6423 val = tr32(MAC_SERDES_CFG);
6424 val &= 0xfffff000;
6425 val |= 0x880;
6426 tw32(MAC_SERDES_CFG, val);
6427 }
6428 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6429 tw32(MAC_SERDES_CFG, 0x616000);
6430 }
6431
6432 /* Prevent chip from dropping frames when flow control
6433 * is enabled.
6434 */
6435 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6436
6437 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6438 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6439 /* Use hardware link auto-negotiation */
6440 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6441 }
6442
d4d2c558
MC
6443 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6444 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6445 u32 tmp;
6446
6447 tmp = tr32(SERDES_RX_CTRL);
6448 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6449 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6450 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6451 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6452 }
6453
8e7a22e3 6454 err = tg3_setup_phy(tp, reset_phy);
1da177e4
LT
6455 if (err)
6456 return err;
6457
6458 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6459 u32 tmp;
6460
6461 /* Clear CRC stats. */
6462 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6463 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6464 tg3_readphy(tp, 0x14, &tmp);
6465 }
6466 }
6467
6468 __tg3_set_rx_mode(tp->dev);
6469
6470 /* Initialize receive rules. */
6471 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6472 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6473 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6474 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6475
4cf78e4f 6476 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6477 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6478 limit = 8;
6479 else
6480 limit = 16;
6481 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6482 limit -= 4;
6483 switch (limit) {
6484 case 16:
6485 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6486 case 15:
6487 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6488 case 14:
6489 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6490 case 13:
6491 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6492 case 12:
6493 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6494 case 11:
6495 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6496 case 10:
6497 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6498 case 9:
6499 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6500 case 8:
6501 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6502 case 7:
6503 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6504 case 6:
6505 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6506 case 5:
6507 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6508 case 4:
6509 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6510 case 3:
6511 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6512 case 2:
6513 case 1:
6514
6515 default:
6516 break;
6517 };
6518
6519 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6520
1da177e4
LT
6521 return 0;
6522}
6523
6524/* Called at device open time to get the chip ready for
6525 * packet processing. Invoked with tp->lock held.
6526 */
8e7a22e3 6527static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6528{
6529 int err;
6530
6531 /* Force the chip into D0. */
bc1c7567 6532 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
6533 if (err)
6534 goto out;
6535
6536 tg3_switch_clocks(tp);
6537
6538 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6539
8e7a22e3 6540 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
6541
6542out:
6543 return err;
6544}
6545
6546#define TG3_STAT_ADD32(PSTAT, REG) \
6547do { u32 __val = tr32(REG); \
6548 (PSTAT)->low += __val; \
6549 if ((PSTAT)->low < __val) \
6550 (PSTAT)->high += 1; \
6551} while (0)
6552
6553static void tg3_periodic_fetch_stats(struct tg3 *tp)
6554{
6555 struct tg3_hw_stats *sp = tp->hw_stats;
6556
6557 if (!netif_carrier_ok(tp->dev))
6558 return;
6559
6560 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6561 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6562 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6563 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6564 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6565 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6566 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6567 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6568 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6569 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6570 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6571 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6572 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6573
6574 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6575 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6576 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6577 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6578 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6579 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6580 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6581 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6582 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6583 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6584 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6585 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6586 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6587 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
6588
6589 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6590 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6591 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
6592}
6593
6594static void tg3_timer(unsigned long __opaque)
6595{
6596 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 6597
f475f163
MC
6598 if (tp->irq_sync)
6599 goto restart_timer;
6600
f47c11ee 6601 spin_lock(&tp->lock);
1da177e4 6602
fac9b83e
DM
6603 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6604 /* All of this garbage is because when using non-tagged
6605 * IRQ status the mailbox/status_block protocol the chip
6606 * uses with the cpu is race prone.
6607 */
6608 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6609 tw32(GRC_LOCAL_CTRL,
6610 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6611 } else {
6612 tw32(HOSTCC_MODE, tp->coalesce_mode |
6613 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6614 }
1da177e4 6615
fac9b83e
DM
6616 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6617 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 6618 spin_unlock(&tp->lock);
fac9b83e
DM
6619 schedule_work(&tp->reset_task);
6620 return;
6621 }
1da177e4
LT
6622 }
6623
1da177e4
LT
6624 /* This part only runs once per second. */
6625 if (!--tp->timer_counter) {
fac9b83e
DM
6626 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6627 tg3_periodic_fetch_stats(tp);
6628
1da177e4
LT
6629 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6630 u32 mac_stat;
6631 int phy_event;
6632
6633 mac_stat = tr32(MAC_STATUS);
6634
6635 phy_event = 0;
6636 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6637 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6638 phy_event = 1;
6639 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6640 phy_event = 1;
6641
6642 if (phy_event)
6643 tg3_setup_phy(tp, 0);
6644 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6645 u32 mac_stat = tr32(MAC_STATUS);
6646 int need_setup = 0;
6647
6648 if (netif_carrier_ok(tp->dev) &&
6649 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6650 need_setup = 1;
6651 }
6652 if (! netif_carrier_ok(tp->dev) &&
6653 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6654 MAC_STATUS_SIGNAL_DET))) {
6655 need_setup = 1;
6656 }
6657 if (need_setup) {
6658 tw32_f(MAC_MODE,
6659 (tp->mac_mode &
6660 ~MAC_MODE_PORT_MODE_MASK));
6661 udelay(40);
6662 tw32_f(MAC_MODE, tp->mac_mode);
6663 udelay(40);
6664 tg3_setup_phy(tp, 0);
6665 }
747e8f8b
MC
6666 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6667 tg3_serdes_parallel_detect(tp);
1da177e4
LT
6668
6669 tp->timer_counter = tp->timer_multiplier;
6670 }
6671
28fbef78 6672 /* Heartbeat is only sent once every 2 seconds. */
1da177e4
LT
6673 if (!--tp->asf_counter) {
6674 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6675 u32 val;
6676
bbadf503
MC
6677 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6678 FWCMD_NICDRV_ALIVE2);
6679 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 6680 /* 5 seconds timeout */
bbadf503 6681 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
6682 val = tr32(GRC_RX_CPU_EVENT);
6683 val |= (1 << 14);
6684 tw32(GRC_RX_CPU_EVENT, val);
6685 }
6686 tp->asf_counter = tp->asf_multiplier;
6687 }
6688
f47c11ee 6689 spin_unlock(&tp->lock);
1da177e4 6690
f475f163 6691restart_timer:
1da177e4
LT
6692 tp->timer.expires = jiffies + tp->timer_offset;
6693 add_timer(&tp->timer);
6694}
6695
81789ef5 6696static int tg3_request_irq(struct tg3 *tp)
fcfa0a32
MC
6697{
6698 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6699 unsigned long flags;
6700 struct net_device *dev = tp->dev;
6701
6702 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6703 fn = tg3_msi;
6704 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6705 fn = tg3_msi_1shot;
6706 flags = SA_SAMPLE_RANDOM;
6707 } else {
6708 fn = tg3_interrupt;
6709 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6710 fn = tg3_interrupt_tagged;
6711 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6712 }
6713 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6714}
6715
7938109f
MC
6716static int tg3_test_interrupt(struct tg3 *tp)
6717{
6718 struct net_device *dev = tp->dev;
6719 int err, i;
6720 u32 int_mbox = 0;
6721
d4bc3927
MC
6722 if (!netif_running(dev))
6723 return -ENODEV;
6724
7938109f
MC
6725 tg3_disable_ints(tp);
6726
6727 free_irq(tp->pdev->irq, dev);
6728
6729 err = request_irq(tp->pdev->irq, tg3_test_isr,
f4d0ee98 6730 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
6731 if (err)
6732 return err;
6733
38f3843e 6734 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
6735 tg3_enable_ints(tp);
6736
6737 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6738 HOSTCC_MODE_NOW);
6739
6740 for (i = 0; i < 5; i++) {
09ee929c
MC
6741 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6742 TG3_64BIT_REG_LOW);
7938109f
MC
6743 if (int_mbox != 0)
6744 break;
6745 msleep(10);
6746 }
6747
6748 tg3_disable_ints(tp);
6749
6750 free_irq(tp->pdev->irq, dev);
6751
fcfa0a32 6752 err = tg3_request_irq(tp);
7938109f
MC
6753
6754 if (err)
6755 return err;
6756
6757 if (int_mbox != 0)
6758 return 0;
6759
6760 return -EIO;
6761}
6762
6763/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6764 * successfully restored
6765 */
6766static int tg3_test_msi(struct tg3 *tp)
6767{
6768 struct net_device *dev = tp->dev;
6769 int err;
6770 u16 pci_cmd;
6771
6772 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6773 return 0;
6774
6775 /* Turn off SERR reporting in case MSI terminates with Master
6776 * Abort.
6777 */
6778 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6779 pci_write_config_word(tp->pdev, PCI_COMMAND,
6780 pci_cmd & ~PCI_COMMAND_SERR);
6781
6782 err = tg3_test_interrupt(tp);
6783
6784 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6785
6786 if (!err)
6787 return 0;
6788
6789 /* other failures */
6790 if (err != -EIO)
6791 return err;
6792
6793 /* MSI test failed, go back to INTx mode */
6794 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6795 "switching to INTx mode. Please report this failure to "
6796 "the PCI maintainer and include system chipset information.\n",
6797 tp->dev->name);
6798
6799 free_irq(tp->pdev->irq, dev);
6800 pci_disable_msi(tp->pdev);
6801
6802 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6803
fcfa0a32 6804 err = tg3_request_irq(tp);
7938109f
MC
6805 if (err)
6806 return err;
6807
6808 /* Need to reset the chip because the MSI cycle may have terminated
6809 * with Master Abort.
6810 */
f47c11ee 6811 tg3_full_lock(tp, 1);
7938109f 6812
944d980e 6813 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 6814 err = tg3_init_hw(tp, 1);
7938109f 6815
f47c11ee 6816 tg3_full_unlock(tp);
7938109f
MC
6817
6818 if (err)
6819 free_irq(tp->pdev->irq, dev);
6820
6821 return err;
6822}
6823
1da177e4
LT
6824static int tg3_open(struct net_device *dev)
6825{
6826 struct tg3 *tp = netdev_priv(dev);
6827 int err;
6828
f47c11ee 6829 tg3_full_lock(tp, 0);
1da177e4 6830
bc1c7567
MC
6831 err = tg3_set_power_state(tp, PCI_D0);
6832 if (err)
6833 return err;
6834
1da177e4
LT
6835 tg3_disable_ints(tp);
6836 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6837
f47c11ee 6838 tg3_full_unlock(tp);
1da177e4
LT
6839
6840 /* The placement of this call is tied
6841 * to the setup and use of Host TX descriptors.
6842 */
6843 err = tg3_alloc_consistent(tp);
6844 if (err)
6845 return err;
6846
88b06bc2
MC
6847 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6848 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
d4d2c558
MC
6849 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6850 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6851 (tp->pdev_peer == tp->pdev))) {
fac9b83e
DM
6852 /* All MSI supporting chips should support tagged
6853 * status. Assert that this is the case.
6854 */
6855 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6856 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6857 "Not using MSI.\n", tp->dev->name);
6858 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
6859 u32 msi_mode;
6860
6861 msi_mode = tr32(MSGINT_MODE);
6862 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6863 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6864 }
6865 }
fcfa0a32 6866 err = tg3_request_irq(tp);
1da177e4
LT
6867
6868 if (err) {
88b06bc2
MC
6869 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6870 pci_disable_msi(tp->pdev);
6871 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6872 }
1da177e4
LT
6873 tg3_free_consistent(tp);
6874 return err;
6875 }
6876
f47c11ee 6877 tg3_full_lock(tp, 0);
1da177e4 6878
8e7a22e3 6879 err = tg3_init_hw(tp, 1);
1da177e4 6880 if (err) {
944d980e 6881 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6882 tg3_free_rings(tp);
6883 } else {
fac9b83e
DM
6884 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6885 tp->timer_offset = HZ;
6886 else
6887 tp->timer_offset = HZ / 10;
6888
6889 BUG_ON(tp->timer_offset > HZ);
6890 tp->timer_counter = tp->timer_multiplier =
6891 (HZ / tp->timer_offset);
6892 tp->asf_counter = tp->asf_multiplier =
28fbef78 6893 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
6894
6895 init_timer(&tp->timer);
6896 tp->timer.expires = jiffies + tp->timer_offset;
6897 tp->timer.data = (unsigned long) tp;
6898 tp->timer.function = tg3_timer;
1da177e4
LT
6899 }
6900
f47c11ee 6901 tg3_full_unlock(tp);
1da177e4
LT
6902
6903 if (err) {
88b06bc2
MC
6904 free_irq(tp->pdev->irq, dev);
6905 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6906 pci_disable_msi(tp->pdev);
6907 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6908 }
1da177e4
LT
6909 tg3_free_consistent(tp);
6910 return err;
6911 }
6912
7938109f
MC
6913 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6914 err = tg3_test_msi(tp);
fac9b83e 6915
7938109f 6916 if (err) {
f47c11ee 6917 tg3_full_lock(tp, 0);
7938109f
MC
6918
6919 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6920 pci_disable_msi(tp->pdev);
6921 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6922 }
944d980e 6923 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6924 tg3_free_rings(tp);
6925 tg3_free_consistent(tp);
6926
f47c11ee 6927 tg3_full_unlock(tp);
7938109f
MC
6928
6929 return err;
6930 }
fcfa0a32
MC
6931
6932 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6933 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6934 u32 val = tr32(0x7c04);
6935
6936 tw32(0x7c04, val | (1 << 29));
6937 }
6938 }
7938109f
MC
6939 }
6940
f47c11ee 6941 tg3_full_lock(tp, 0);
1da177e4 6942
7938109f
MC
6943 add_timer(&tp->timer);
6944 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
6945 tg3_enable_ints(tp);
6946
f47c11ee 6947 tg3_full_unlock(tp);
1da177e4
LT
6948
6949 netif_start_queue(dev);
6950
6951 return 0;
6952}
6953
6954#if 0
6955/*static*/ void tg3_dump_state(struct tg3 *tp)
6956{
6957 u32 val32, val32_2, val32_3, val32_4, val32_5;
6958 u16 val16;
6959 int i;
6960
6961 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6962 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6963 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6964 val16, val32);
6965
6966 /* MAC block */
6967 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6968 tr32(MAC_MODE), tr32(MAC_STATUS));
6969 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6970 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6971 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6972 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6973 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6974 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6975
6976 /* Send data initiator control block */
6977 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6978 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6979 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6980 tr32(SNDDATAI_STATSCTRL));
6981
6982 /* Send data completion control block */
6983 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6984
6985 /* Send BD ring selector block */
6986 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6987 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6988
6989 /* Send BD initiator control block */
6990 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6991 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6992
6993 /* Send BD completion control block */
6994 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6995
6996 /* Receive list placement control block */
6997 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6998 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6999 printk(" RCVLPC_STATSCTRL[%08x]\n",
7000 tr32(RCVLPC_STATSCTRL));
7001
7002 /* Receive data and receive BD initiator control block */
7003 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7004 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7005
7006 /* Receive data completion control block */
7007 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7008 tr32(RCVDCC_MODE));
7009
7010 /* Receive BD initiator control block */
7011 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7012 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7013
7014 /* Receive BD completion control block */
7015 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7016 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7017
7018 /* Receive list selector control block */
7019 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7020 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7021
7022 /* Mbuf cluster free block */
7023 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7024 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7025
7026 /* Host coalescing control block */
7027 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7028 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7029 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7030 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7031 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7032 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7033 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7034 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7035 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7036 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7037 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7038 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7039
7040 /* Memory arbiter control block */
7041 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7042 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7043
7044 /* Buffer manager control block */
7045 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7046 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7047 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7048 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7049 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7050 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7051 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7052 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7053
7054 /* Read DMA control block */
7055 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7056 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7057
7058 /* Write DMA control block */
7059 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7060 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7061
7062 /* DMA completion block */
7063 printk("DEBUG: DMAC_MODE[%08x]\n",
7064 tr32(DMAC_MODE));
7065
7066 /* GRC block */
7067 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7068 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7069 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7070 tr32(GRC_LOCAL_CTRL));
7071
7072 /* TG3_BDINFOs */
7073 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7074 tr32(RCVDBDI_JUMBO_BD + 0x0),
7075 tr32(RCVDBDI_JUMBO_BD + 0x4),
7076 tr32(RCVDBDI_JUMBO_BD + 0x8),
7077 tr32(RCVDBDI_JUMBO_BD + 0xc));
7078 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7079 tr32(RCVDBDI_STD_BD + 0x0),
7080 tr32(RCVDBDI_STD_BD + 0x4),
7081 tr32(RCVDBDI_STD_BD + 0x8),
7082 tr32(RCVDBDI_STD_BD + 0xc));
7083 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7084 tr32(RCVDBDI_MINI_BD + 0x0),
7085 tr32(RCVDBDI_MINI_BD + 0x4),
7086 tr32(RCVDBDI_MINI_BD + 0x8),
7087 tr32(RCVDBDI_MINI_BD + 0xc));
7088
7089 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7090 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7091 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7092 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7093 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7094 val32, val32_2, val32_3, val32_4);
7095
7096 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7097 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7098 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7099 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7100 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7101 val32, val32_2, val32_3, val32_4);
7102
7103 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7104 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7105 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7106 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7107 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7108 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7109 val32, val32_2, val32_3, val32_4, val32_5);
7110
7111 /* SW status block */
7112 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7113 tp->hw_status->status,
7114 tp->hw_status->status_tag,
7115 tp->hw_status->rx_jumbo_consumer,
7116 tp->hw_status->rx_consumer,
7117 tp->hw_status->rx_mini_consumer,
7118 tp->hw_status->idx[0].rx_producer,
7119 tp->hw_status->idx[0].tx_consumer);
7120
7121 /* SW statistics block */
7122 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7123 ((u32 *)tp->hw_stats)[0],
7124 ((u32 *)tp->hw_stats)[1],
7125 ((u32 *)tp->hw_stats)[2],
7126 ((u32 *)tp->hw_stats)[3]);
7127
7128 /* Mailboxes */
7129 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7130 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7131 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7132 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7133 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7134
7135 /* NIC side send descriptors. */
7136 for (i = 0; i < 6; i++) {
7137 unsigned long txd;
7138
7139 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7140 + (i * sizeof(struct tg3_tx_buffer_desc));
7141 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7142 i,
7143 readl(txd + 0x0), readl(txd + 0x4),
7144 readl(txd + 0x8), readl(txd + 0xc));
7145 }
7146
7147 /* NIC side RX descriptors. */
7148 for (i = 0; i < 6; i++) {
7149 unsigned long rxd;
7150
7151 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7152 + (i * sizeof(struct tg3_rx_buffer_desc));
7153 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7154 i,
7155 readl(rxd + 0x0), readl(rxd + 0x4),
7156 readl(rxd + 0x8), readl(rxd + 0xc));
7157 rxd += (4 * sizeof(u32));
7158 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7159 i,
7160 readl(rxd + 0x0), readl(rxd + 0x4),
7161 readl(rxd + 0x8), readl(rxd + 0xc));
7162 }
7163
7164 for (i = 0; i < 6; i++) {
7165 unsigned long rxd;
7166
7167 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7168 + (i * sizeof(struct tg3_rx_buffer_desc));
7169 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7170 i,
7171 readl(rxd + 0x0), readl(rxd + 0x4),
7172 readl(rxd + 0x8), readl(rxd + 0xc));
7173 rxd += (4 * sizeof(u32));
7174 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7175 i,
7176 readl(rxd + 0x0), readl(rxd + 0x4),
7177 readl(rxd + 0x8), readl(rxd + 0xc));
7178 }
7179}
7180#endif
7181
7182static struct net_device_stats *tg3_get_stats(struct net_device *);
7183static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7184
7185static int tg3_close(struct net_device *dev)
7186{
7187 struct tg3 *tp = netdev_priv(dev);
7188
7faa006f
MC
7189 /* Calling flush_scheduled_work() may deadlock because
7190 * linkwatch_event() may be on the workqueue and it will try to get
7191 * the rtnl_lock which we are holding.
7192 */
7193 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7194 msleep(1);
7195
1da177e4
LT
7196 netif_stop_queue(dev);
7197
7198 del_timer_sync(&tp->timer);
7199
f47c11ee 7200 tg3_full_lock(tp, 1);
1da177e4
LT
7201#if 0
7202 tg3_dump_state(tp);
7203#endif
7204
7205 tg3_disable_ints(tp);
7206
944d980e 7207 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7208 tg3_free_rings(tp);
7209 tp->tg3_flags &=
7210 ~(TG3_FLAG_INIT_COMPLETE |
7211 TG3_FLAG_GOT_SERDES_FLOWCTL);
1da177e4 7212
f47c11ee 7213 tg3_full_unlock(tp);
1da177e4 7214
88b06bc2
MC
7215 free_irq(tp->pdev->irq, dev);
7216 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7217 pci_disable_msi(tp->pdev);
7218 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7219 }
1da177e4
LT
7220
7221 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7222 sizeof(tp->net_stats_prev));
7223 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7224 sizeof(tp->estats_prev));
7225
7226 tg3_free_consistent(tp);
7227
bc1c7567
MC
7228 tg3_set_power_state(tp, PCI_D3hot);
7229
7230 netif_carrier_off(tp->dev);
7231
1da177e4
LT
7232 return 0;
7233}
7234
7235static inline unsigned long get_stat64(tg3_stat64_t *val)
7236{
7237 unsigned long ret;
7238
7239#if (BITS_PER_LONG == 32)
7240 ret = val->low;
7241#else
7242 ret = ((u64)val->high << 32) | ((u64)val->low);
7243#endif
7244 return ret;
7245}
7246
7247static unsigned long calc_crc_errors(struct tg3 *tp)
7248{
7249 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7250
7251 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7252 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
7254 u32 val;
7255
f47c11ee 7256 spin_lock_bh(&tp->lock);
1da177e4
LT
7257 if (!tg3_readphy(tp, 0x1e, &val)) {
7258 tg3_writephy(tp, 0x1e, val | 0x8000);
7259 tg3_readphy(tp, 0x14, &val);
7260 } else
7261 val = 0;
f47c11ee 7262 spin_unlock_bh(&tp->lock);
1da177e4
LT
7263
7264 tp->phy_crc_errors += val;
7265
7266 return tp->phy_crc_errors;
7267 }
7268
7269 return get_stat64(&hw_stats->rx_fcs_errors);
7270}
7271
7272#define ESTAT_ADD(member) \
7273 estats->member = old_estats->member + \
7274 get_stat64(&hw_stats->member)
7275
7276static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7277{
7278 struct tg3_ethtool_stats *estats = &tp->estats;
7279 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7280 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7281
7282 if (!hw_stats)
7283 return old_estats;
7284
7285 ESTAT_ADD(rx_octets);
7286 ESTAT_ADD(rx_fragments);
7287 ESTAT_ADD(rx_ucast_packets);
7288 ESTAT_ADD(rx_mcast_packets);
7289 ESTAT_ADD(rx_bcast_packets);
7290 ESTAT_ADD(rx_fcs_errors);
7291 ESTAT_ADD(rx_align_errors);
7292 ESTAT_ADD(rx_xon_pause_rcvd);
7293 ESTAT_ADD(rx_xoff_pause_rcvd);
7294 ESTAT_ADD(rx_mac_ctrl_rcvd);
7295 ESTAT_ADD(rx_xoff_entered);
7296 ESTAT_ADD(rx_frame_too_long_errors);
7297 ESTAT_ADD(rx_jabbers);
7298 ESTAT_ADD(rx_undersize_packets);
7299 ESTAT_ADD(rx_in_length_errors);
7300 ESTAT_ADD(rx_out_length_errors);
7301 ESTAT_ADD(rx_64_or_less_octet_packets);
7302 ESTAT_ADD(rx_65_to_127_octet_packets);
7303 ESTAT_ADD(rx_128_to_255_octet_packets);
7304 ESTAT_ADD(rx_256_to_511_octet_packets);
7305 ESTAT_ADD(rx_512_to_1023_octet_packets);
7306 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7307 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7308 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7309 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7310 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7311
7312 ESTAT_ADD(tx_octets);
7313 ESTAT_ADD(tx_collisions);
7314 ESTAT_ADD(tx_xon_sent);
7315 ESTAT_ADD(tx_xoff_sent);
7316 ESTAT_ADD(tx_flow_control);
7317 ESTAT_ADD(tx_mac_errors);
7318 ESTAT_ADD(tx_single_collisions);
7319 ESTAT_ADD(tx_mult_collisions);
7320 ESTAT_ADD(tx_deferred);
7321 ESTAT_ADD(tx_excessive_collisions);
7322 ESTAT_ADD(tx_late_collisions);
7323 ESTAT_ADD(tx_collide_2times);
7324 ESTAT_ADD(tx_collide_3times);
7325 ESTAT_ADD(tx_collide_4times);
7326 ESTAT_ADD(tx_collide_5times);
7327 ESTAT_ADD(tx_collide_6times);
7328 ESTAT_ADD(tx_collide_7times);
7329 ESTAT_ADD(tx_collide_8times);
7330 ESTAT_ADD(tx_collide_9times);
7331 ESTAT_ADD(tx_collide_10times);
7332 ESTAT_ADD(tx_collide_11times);
7333 ESTAT_ADD(tx_collide_12times);
7334 ESTAT_ADD(tx_collide_13times);
7335 ESTAT_ADD(tx_collide_14times);
7336 ESTAT_ADD(tx_collide_15times);
7337 ESTAT_ADD(tx_ucast_packets);
7338 ESTAT_ADD(tx_mcast_packets);
7339 ESTAT_ADD(tx_bcast_packets);
7340 ESTAT_ADD(tx_carrier_sense_errors);
7341 ESTAT_ADD(tx_discards);
7342 ESTAT_ADD(tx_errors);
7343
7344 ESTAT_ADD(dma_writeq_full);
7345 ESTAT_ADD(dma_write_prioq_full);
7346 ESTAT_ADD(rxbds_empty);
7347 ESTAT_ADD(rx_discards);
7348 ESTAT_ADD(rx_errors);
7349 ESTAT_ADD(rx_threshold_hit);
7350
7351 ESTAT_ADD(dma_readq_full);
7352 ESTAT_ADD(dma_read_prioq_full);
7353 ESTAT_ADD(tx_comp_queue_full);
7354
7355 ESTAT_ADD(ring_set_send_prod_index);
7356 ESTAT_ADD(ring_status_update);
7357 ESTAT_ADD(nic_irqs);
7358 ESTAT_ADD(nic_avoided_irqs);
7359 ESTAT_ADD(nic_tx_threshold_hit);
7360
7361 return estats;
7362}
7363
7364static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7365{
7366 struct tg3 *tp = netdev_priv(dev);
7367 struct net_device_stats *stats = &tp->net_stats;
7368 struct net_device_stats *old_stats = &tp->net_stats_prev;
7369 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7370
7371 if (!hw_stats)
7372 return old_stats;
7373
7374 stats->rx_packets = old_stats->rx_packets +
7375 get_stat64(&hw_stats->rx_ucast_packets) +
7376 get_stat64(&hw_stats->rx_mcast_packets) +
7377 get_stat64(&hw_stats->rx_bcast_packets);
7378
7379 stats->tx_packets = old_stats->tx_packets +
7380 get_stat64(&hw_stats->tx_ucast_packets) +
7381 get_stat64(&hw_stats->tx_mcast_packets) +
7382 get_stat64(&hw_stats->tx_bcast_packets);
7383
7384 stats->rx_bytes = old_stats->rx_bytes +
7385 get_stat64(&hw_stats->rx_octets);
7386 stats->tx_bytes = old_stats->tx_bytes +
7387 get_stat64(&hw_stats->tx_octets);
7388
7389 stats->rx_errors = old_stats->rx_errors +
4f63b877 7390 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7391 stats->tx_errors = old_stats->tx_errors +
7392 get_stat64(&hw_stats->tx_errors) +
7393 get_stat64(&hw_stats->tx_mac_errors) +
7394 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7395 get_stat64(&hw_stats->tx_discards);
7396
7397 stats->multicast = old_stats->multicast +
7398 get_stat64(&hw_stats->rx_mcast_packets);
7399 stats->collisions = old_stats->collisions +
7400 get_stat64(&hw_stats->tx_collisions);
7401
7402 stats->rx_length_errors = old_stats->rx_length_errors +
7403 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7404 get_stat64(&hw_stats->rx_undersize_packets);
7405
7406 stats->rx_over_errors = old_stats->rx_over_errors +
7407 get_stat64(&hw_stats->rxbds_empty);
7408 stats->rx_frame_errors = old_stats->rx_frame_errors +
7409 get_stat64(&hw_stats->rx_align_errors);
7410 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7411 get_stat64(&hw_stats->tx_discards);
7412 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7413 get_stat64(&hw_stats->tx_carrier_sense_errors);
7414
7415 stats->rx_crc_errors = old_stats->rx_crc_errors +
7416 calc_crc_errors(tp);
7417
4f63b877
JL
7418 stats->rx_missed_errors = old_stats->rx_missed_errors +
7419 get_stat64(&hw_stats->rx_discards);
7420
1da177e4
LT
7421 return stats;
7422}
7423
7424static inline u32 calc_crc(unsigned char *buf, int len)
7425{
7426 u32 reg;
7427 u32 tmp;
7428 int j, k;
7429
7430 reg = 0xffffffff;
7431
7432 for (j = 0; j < len; j++) {
7433 reg ^= buf[j];
7434
7435 for (k = 0; k < 8; k++) {
7436 tmp = reg & 0x01;
7437
7438 reg >>= 1;
7439
7440 if (tmp) {
7441 reg ^= 0xedb88320;
7442 }
7443 }
7444 }
7445
7446 return ~reg;
7447}
7448
7449static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7450{
7451 /* accept or reject all multicast frames */
7452 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7453 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7454 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7455 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7456}
7457
7458static void __tg3_set_rx_mode(struct net_device *dev)
7459{
7460 struct tg3 *tp = netdev_priv(dev);
7461 u32 rx_mode;
7462
7463 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7464 RX_MODE_KEEP_VLAN_TAG);
7465
7466 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7467 * flag clear.
7468 */
7469#if TG3_VLAN_TAG_USED
7470 if (!tp->vlgrp &&
7471 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7472 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7473#else
7474 /* By definition, VLAN is disabled always in this
7475 * case.
7476 */
7477 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7478 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7479#endif
7480
7481 if (dev->flags & IFF_PROMISC) {
7482 /* Promiscuous mode. */
7483 rx_mode |= RX_MODE_PROMISC;
7484 } else if (dev->flags & IFF_ALLMULTI) {
7485 /* Accept all multicast. */
7486 tg3_set_multi (tp, 1);
7487 } else if (dev->mc_count < 1) {
7488 /* Reject all multicast. */
7489 tg3_set_multi (tp, 0);
7490 } else {
7491 /* Accept one or more multicast(s). */
7492 struct dev_mc_list *mclist;
7493 unsigned int i;
7494 u32 mc_filter[4] = { 0, };
7495 u32 regidx;
7496 u32 bit;
7497 u32 crc;
7498
7499 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7500 i++, mclist = mclist->next) {
7501
7502 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7503 bit = ~crc & 0x7f;
7504 regidx = (bit & 0x60) >> 5;
7505 bit &= 0x1f;
7506 mc_filter[regidx] |= (1 << bit);
7507 }
7508
7509 tw32(MAC_HASH_REG_0, mc_filter[0]);
7510 tw32(MAC_HASH_REG_1, mc_filter[1]);
7511 tw32(MAC_HASH_REG_2, mc_filter[2]);
7512 tw32(MAC_HASH_REG_3, mc_filter[3]);
7513 }
7514
7515 if (rx_mode != tp->rx_mode) {
7516 tp->rx_mode = rx_mode;
7517 tw32_f(MAC_RX_MODE, rx_mode);
7518 udelay(10);
7519 }
7520}
7521
7522static void tg3_set_rx_mode(struct net_device *dev)
7523{
7524 struct tg3 *tp = netdev_priv(dev);
7525
e75f7c90
MC
7526 if (!netif_running(dev))
7527 return;
7528
f47c11ee 7529 tg3_full_lock(tp, 0);
1da177e4 7530 __tg3_set_rx_mode(dev);
f47c11ee 7531 tg3_full_unlock(tp);
1da177e4
LT
7532}
7533
7534#define TG3_REGDUMP_LEN (32 * 1024)
7535
7536static int tg3_get_regs_len(struct net_device *dev)
7537{
7538 return TG3_REGDUMP_LEN;
7539}
7540
7541static void tg3_get_regs(struct net_device *dev,
7542 struct ethtool_regs *regs, void *_p)
7543{
7544 u32 *p = _p;
7545 struct tg3 *tp = netdev_priv(dev);
7546 u8 *orig_p = _p;
7547 int i;
7548
7549 regs->version = 0;
7550
7551 memset(p, 0, TG3_REGDUMP_LEN);
7552
bc1c7567
MC
7553 if (tp->link_config.phy_is_low_power)
7554 return;
7555
f47c11ee 7556 tg3_full_lock(tp, 0);
1da177e4
LT
7557
7558#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7559#define GET_REG32_LOOP(base,len) \
7560do { p = (u32 *)(orig_p + (base)); \
7561 for (i = 0; i < len; i += 4) \
7562 __GET_REG32((base) + i); \
7563} while (0)
7564#define GET_REG32_1(reg) \
7565do { p = (u32 *)(orig_p + (reg)); \
7566 __GET_REG32((reg)); \
7567} while (0)
7568
7569 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7570 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7571 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7572 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7573 GET_REG32_1(SNDDATAC_MODE);
7574 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7575 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7576 GET_REG32_1(SNDBDC_MODE);
7577 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7578 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7579 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7580 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7581 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7582 GET_REG32_1(RCVDCC_MODE);
7583 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7584 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7585 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7586 GET_REG32_1(MBFREE_MODE);
7587 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7588 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7589 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7590 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7591 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
7592 GET_REG32_1(RX_CPU_MODE);
7593 GET_REG32_1(RX_CPU_STATE);
7594 GET_REG32_1(RX_CPU_PGMCTR);
7595 GET_REG32_1(RX_CPU_HWBKPT);
7596 GET_REG32_1(TX_CPU_MODE);
7597 GET_REG32_1(TX_CPU_STATE);
7598 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
7599 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7600 GET_REG32_LOOP(FTQ_RESET, 0x120);
7601 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7602 GET_REG32_1(DMAC_MODE);
7603 GET_REG32_LOOP(GRC_MODE, 0x4c);
7604 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7605 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7606
7607#undef __GET_REG32
7608#undef GET_REG32_LOOP
7609#undef GET_REG32_1
7610
f47c11ee 7611 tg3_full_unlock(tp);
1da177e4
LT
7612}
7613
7614static int tg3_get_eeprom_len(struct net_device *dev)
7615{
7616 struct tg3 *tp = netdev_priv(dev);
7617
7618 return tp->nvram_size;
7619}
7620
7621static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
1820180b 7622static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
7623
7624static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7625{
7626 struct tg3 *tp = netdev_priv(dev);
7627 int ret;
7628 u8 *pd;
7629 u32 i, offset, len, val, b_offset, b_count;
7630
bc1c7567
MC
7631 if (tp->link_config.phy_is_low_power)
7632 return -EAGAIN;
7633
1da177e4
LT
7634 offset = eeprom->offset;
7635 len = eeprom->len;
7636 eeprom->len = 0;
7637
7638 eeprom->magic = TG3_EEPROM_MAGIC;
7639
7640 if (offset & 3) {
7641 /* adjustments to start on required 4 byte boundary */
7642 b_offset = offset & 3;
7643 b_count = 4 - b_offset;
7644 if (b_count > len) {
7645 /* i.e. offset=1 len=2 */
7646 b_count = len;
7647 }
7648 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7649 if (ret)
7650 return ret;
7651 val = cpu_to_le32(val);
7652 memcpy(data, ((char*)&val) + b_offset, b_count);
7653 len -= b_count;
7654 offset += b_count;
7655 eeprom->len += b_count;
7656 }
7657
7658 /* read bytes upto the last 4 byte boundary */
7659 pd = &data[eeprom->len];
7660 for (i = 0; i < (len - (len & 3)); i += 4) {
7661 ret = tg3_nvram_read(tp, offset + i, &val);
7662 if (ret) {
7663 eeprom->len += i;
7664 return ret;
7665 }
7666 val = cpu_to_le32(val);
7667 memcpy(pd + i, &val, 4);
7668 }
7669 eeprom->len += i;
7670
7671 if (len & 3) {
7672 /* read last bytes not ending on 4 byte boundary */
7673 pd = &data[eeprom->len];
7674 b_count = len & 3;
7675 b_offset = offset + len - b_count;
7676 ret = tg3_nvram_read(tp, b_offset, &val);
7677 if (ret)
7678 return ret;
7679 val = cpu_to_le32(val);
7680 memcpy(pd, ((char*)&val), b_count);
7681 eeprom->len += b_count;
7682 }
7683 return 0;
7684}
7685
7686static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7687
7688static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7689{
7690 struct tg3 *tp = netdev_priv(dev);
7691 int ret;
7692 u32 offset, len, b_offset, odd_len, start, end;
7693 u8 *buf;
7694
bc1c7567
MC
7695 if (tp->link_config.phy_is_low_power)
7696 return -EAGAIN;
7697
1da177e4
LT
7698 if (eeprom->magic != TG3_EEPROM_MAGIC)
7699 return -EINVAL;
7700
7701 offset = eeprom->offset;
7702 len = eeprom->len;
7703
7704 if ((b_offset = (offset & 3))) {
7705 /* adjustments to start on required 4 byte boundary */
7706 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7707 if (ret)
7708 return ret;
7709 start = cpu_to_le32(start);
7710 len += b_offset;
7711 offset &= ~3;
1c8594b4
MC
7712 if (len < 4)
7713 len = 4;
1da177e4
LT
7714 }
7715
7716 odd_len = 0;
1c8594b4 7717 if (len & 3) {
1da177e4
LT
7718 /* adjustments to end on required 4 byte boundary */
7719 odd_len = 1;
7720 len = (len + 3) & ~3;
7721 ret = tg3_nvram_read(tp, offset+len-4, &end);
7722 if (ret)
7723 return ret;
7724 end = cpu_to_le32(end);
7725 }
7726
7727 buf = data;
7728 if (b_offset || odd_len) {
7729 buf = kmalloc(len, GFP_KERNEL);
7730 if (buf == 0)
7731 return -ENOMEM;
7732 if (b_offset)
7733 memcpy(buf, &start, 4);
7734 if (odd_len)
7735 memcpy(buf+len-4, &end, 4);
7736 memcpy(buf + b_offset, data, eeprom->len);
7737 }
7738
7739 ret = tg3_nvram_write_block(tp, offset, len, buf);
7740
7741 if (buf != data)
7742 kfree(buf);
7743
7744 return ret;
7745}
7746
7747static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7748{
7749 struct tg3 *tp = netdev_priv(dev);
7750
7751 cmd->supported = (SUPPORTED_Autoneg);
7752
7753 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7754 cmd->supported |= (SUPPORTED_1000baseT_Half |
7755 SUPPORTED_1000baseT_Full);
7756
ef348144 7757 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
7758 cmd->supported |= (SUPPORTED_100baseT_Half |
7759 SUPPORTED_100baseT_Full |
7760 SUPPORTED_10baseT_Half |
7761 SUPPORTED_10baseT_Full |
7762 SUPPORTED_MII);
ef348144
KK
7763 cmd->port = PORT_TP;
7764 } else {
1da177e4 7765 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
7766 cmd->port = PORT_FIBRE;
7767 }
1da177e4
LT
7768
7769 cmd->advertising = tp->link_config.advertising;
7770 if (netif_running(dev)) {
7771 cmd->speed = tp->link_config.active_speed;
7772 cmd->duplex = tp->link_config.active_duplex;
7773 }
1da177e4
LT
7774 cmd->phy_address = PHY_ADDR;
7775 cmd->transceiver = 0;
7776 cmd->autoneg = tp->link_config.autoneg;
7777 cmd->maxtxpkt = 0;
7778 cmd->maxrxpkt = 0;
7779 return 0;
7780}
7781
7782static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7783{
7784 struct tg3 *tp = netdev_priv(dev);
7785
37ff238d 7786 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
7787 /* These are the only valid advertisement bits allowed. */
7788 if (cmd->autoneg == AUTONEG_ENABLE &&
7789 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7790 ADVERTISED_1000baseT_Full |
7791 ADVERTISED_Autoneg |
7792 ADVERTISED_FIBRE)))
7793 return -EINVAL;
37ff238d
MC
7794 /* Fiber can only do SPEED_1000. */
7795 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7796 (cmd->speed != SPEED_1000))
7797 return -EINVAL;
7798 /* Copper cannot force SPEED_1000. */
7799 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7800 (cmd->speed == SPEED_1000))
7801 return -EINVAL;
7802 else if ((cmd->speed == SPEED_1000) &&
7803 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7804 return -EINVAL;
1da177e4 7805
f47c11ee 7806 tg3_full_lock(tp, 0);
1da177e4
LT
7807
7808 tp->link_config.autoneg = cmd->autoneg;
7809 if (cmd->autoneg == AUTONEG_ENABLE) {
7810 tp->link_config.advertising = cmd->advertising;
7811 tp->link_config.speed = SPEED_INVALID;
7812 tp->link_config.duplex = DUPLEX_INVALID;
7813 } else {
7814 tp->link_config.advertising = 0;
7815 tp->link_config.speed = cmd->speed;
7816 tp->link_config.duplex = cmd->duplex;
7817 }
7818
7819 if (netif_running(dev))
7820 tg3_setup_phy(tp, 1);
7821
f47c11ee 7822 tg3_full_unlock(tp);
1da177e4
LT
7823
7824 return 0;
7825}
7826
7827static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7828{
7829 struct tg3 *tp = netdev_priv(dev);
7830
7831 strcpy(info->driver, DRV_MODULE_NAME);
7832 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 7833 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
7834 strcpy(info->bus_info, pci_name(tp->pdev));
7835}
7836
7837static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7838{
7839 struct tg3 *tp = netdev_priv(dev);
7840
7841 wol->supported = WAKE_MAGIC;
7842 wol->wolopts = 0;
7843 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7844 wol->wolopts = WAKE_MAGIC;
7845 memset(&wol->sopass, 0, sizeof(wol->sopass));
7846}
7847
7848static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7849{
7850 struct tg3 *tp = netdev_priv(dev);
7851
7852 if (wol->wolopts & ~WAKE_MAGIC)
7853 return -EINVAL;
7854 if ((wol->wolopts & WAKE_MAGIC) &&
7855 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7856 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7857 return -EINVAL;
7858
f47c11ee 7859 spin_lock_bh(&tp->lock);
1da177e4
LT
7860 if (wol->wolopts & WAKE_MAGIC)
7861 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7862 else
7863 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 7864 spin_unlock_bh(&tp->lock);
1da177e4
LT
7865
7866 return 0;
7867}
7868
7869static u32 tg3_get_msglevel(struct net_device *dev)
7870{
7871 struct tg3 *tp = netdev_priv(dev);
7872 return tp->msg_enable;
7873}
7874
7875static void tg3_set_msglevel(struct net_device *dev, u32 value)
7876{
7877 struct tg3 *tp = netdev_priv(dev);
7878 tp->msg_enable = value;
7879}
7880
7881#if TG3_TSO_SUPPORT != 0
7882static int tg3_set_tso(struct net_device *dev, u32 value)
7883{
7884 struct tg3 *tp = netdev_priv(dev);
7885
7886 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7887 if (value)
7888 return -EINVAL;
7889 return 0;
7890 }
7891 return ethtool_op_set_tso(dev, value);
7892}
7893#endif
7894
7895static int tg3_nway_reset(struct net_device *dev)
7896{
7897 struct tg3 *tp = netdev_priv(dev);
7898 u32 bmcr;
7899 int r;
7900
7901 if (!netif_running(dev))
7902 return -EAGAIN;
7903
c94e3941
MC
7904 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7905 return -EINVAL;
7906
f47c11ee 7907 spin_lock_bh(&tp->lock);
1da177e4
LT
7908 r = -EINVAL;
7909 tg3_readphy(tp, MII_BMCR, &bmcr);
7910 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
7911 ((bmcr & BMCR_ANENABLE) ||
7912 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7913 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7914 BMCR_ANENABLE);
1da177e4
LT
7915 r = 0;
7916 }
f47c11ee 7917 spin_unlock_bh(&tp->lock);
1da177e4
LT
7918
7919 return r;
7920}
7921
7922static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7923{
7924 struct tg3 *tp = netdev_priv(dev);
7925
7926 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7927 ering->rx_mini_max_pending = 0;
4f81c32b
MC
7928 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7929 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7930 else
7931 ering->rx_jumbo_max_pending = 0;
7932
7933 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
7934
7935 ering->rx_pending = tp->rx_pending;
7936 ering->rx_mini_pending = 0;
4f81c32b
MC
7937 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7938 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7939 else
7940 ering->rx_jumbo_pending = 0;
7941
1da177e4
LT
7942 ering->tx_pending = tp->tx_pending;
7943}
7944
7945static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7946{
7947 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7948 int irq_sync = 0;
1da177e4
LT
7949
7950 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7951 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7952 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7953 return -EINVAL;
7954
bbe832c0 7955 if (netif_running(dev)) {
1da177e4 7956 tg3_netif_stop(tp);
bbe832c0
MC
7957 irq_sync = 1;
7958 }
1da177e4 7959
bbe832c0 7960 tg3_full_lock(tp, irq_sync);
1da177e4
LT
7961
7962 tp->rx_pending = ering->rx_pending;
7963
7964 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7965 tp->rx_pending > 63)
7966 tp->rx_pending = 63;
7967 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7968 tp->tx_pending = ering->tx_pending;
7969
7970 if (netif_running(dev)) {
944d980e 7971 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7972 tg3_init_hw(tp, 1);
1da177e4
LT
7973 tg3_netif_start(tp);
7974 }
7975
f47c11ee 7976 tg3_full_unlock(tp);
1da177e4
LT
7977
7978 return 0;
7979}
7980
7981static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7982{
7983 struct tg3 *tp = netdev_priv(dev);
7984
7985 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7986 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7987 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7988}
7989
7990static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7991{
7992 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7993 int irq_sync = 0;
1da177e4 7994
bbe832c0 7995 if (netif_running(dev)) {
1da177e4 7996 tg3_netif_stop(tp);
bbe832c0
MC
7997 irq_sync = 1;
7998 }
1da177e4 7999
bbe832c0 8000 tg3_full_lock(tp, irq_sync);
f47c11ee 8001
1da177e4
LT
8002 if (epause->autoneg)
8003 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8004 else
8005 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8006 if (epause->rx_pause)
8007 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8008 else
8009 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8010 if (epause->tx_pause)
8011 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8012 else
8013 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8014
8015 if (netif_running(dev)) {
944d980e 8016 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 8017 tg3_init_hw(tp, 1);
1da177e4
LT
8018 tg3_netif_start(tp);
8019 }
f47c11ee
DM
8020
8021 tg3_full_unlock(tp);
1da177e4
LT
8022
8023 return 0;
8024}
8025
8026static u32 tg3_get_rx_csum(struct net_device *dev)
8027{
8028 struct tg3 *tp = netdev_priv(dev);
8029 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8030}
8031
8032static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8033{
8034 struct tg3 *tp = netdev_priv(dev);
8035
8036 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8037 if (data != 0)
8038 return -EINVAL;
8039 return 0;
8040 }
8041
f47c11ee 8042 spin_lock_bh(&tp->lock);
1da177e4
LT
8043 if (data)
8044 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8045 else
8046 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8047 spin_unlock_bh(&tp->lock);
1da177e4
LT
8048
8049 return 0;
8050}
8051
8052static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8053{
8054 struct tg3 *tp = netdev_priv(dev);
8055
8056 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8057 if (data != 0)
8058 return -EINVAL;
8059 return 0;
8060 }
8061
af36e6b6
MC
8062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9c27dbdf 8064 ethtool_op_set_tx_hw_csum(dev, data);
1da177e4 8065 else
9c27dbdf 8066 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8067
8068 return 0;
8069}
8070
8071static int tg3_get_stats_count (struct net_device *dev)
8072{
8073 return TG3_NUM_STATS;
8074}
8075
4cafd3f5
MC
8076static int tg3_get_test_count (struct net_device *dev)
8077{
8078 return TG3_NUM_TEST;
8079}
8080
1da177e4
LT
8081static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8082{
8083 switch (stringset) {
8084 case ETH_SS_STATS:
8085 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8086 break;
4cafd3f5
MC
8087 case ETH_SS_TEST:
8088 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8089 break;
1da177e4
LT
8090 default:
8091 WARN_ON(1); /* we need a WARN() */
8092 break;
8093 }
8094}
8095
4009a93d
MC
8096static int tg3_phys_id(struct net_device *dev, u32 data)
8097{
8098 struct tg3 *tp = netdev_priv(dev);
8099 int i;
8100
8101 if (!netif_running(tp->dev))
8102 return -EAGAIN;
8103
8104 if (data == 0)
8105 data = 2;
8106
8107 for (i = 0; i < (data * 2); i++) {
8108 if ((i % 2) == 0)
8109 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8110 LED_CTRL_1000MBPS_ON |
8111 LED_CTRL_100MBPS_ON |
8112 LED_CTRL_10MBPS_ON |
8113 LED_CTRL_TRAFFIC_OVERRIDE |
8114 LED_CTRL_TRAFFIC_BLINK |
8115 LED_CTRL_TRAFFIC_LED);
8116
8117 else
8118 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8119 LED_CTRL_TRAFFIC_OVERRIDE);
8120
8121 if (msleep_interruptible(500))
8122 break;
8123 }
8124 tw32(MAC_LED_CTRL, tp->led_ctrl);
8125 return 0;
8126}
8127
1da177e4
LT
8128static void tg3_get_ethtool_stats (struct net_device *dev,
8129 struct ethtool_stats *estats, u64 *tmp_stats)
8130{
8131 struct tg3 *tp = netdev_priv(dev);
8132 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8133}
8134
566f86ad 8135#define NVRAM_TEST_SIZE 0x100
1b27777a 8136#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
566f86ad
MC
8137
8138static int tg3_test_nvram(struct tg3 *tp)
8139{
1b27777a
MC
8140 u32 *buf, csum, magic;
8141 int i, j, err = 0, size;
566f86ad 8142
1820180b 8143 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8144 return -EIO;
8145
1b27777a
MC
8146 if (magic == TG3_EEPROM_MAGIC)
8147 size = NVRAM_TEST_SIZE;
8148 else if ((magic & 0xff000000) == 0xa5000000) {
8149 if ((magic & 0xe00000) == 0x200000)
8150 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8151 else
8152 return 0;
8153 } else
8154 return -EIO;
8155
8156 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
8157 if (buf == NULL)
8158 return -ENOMEM;
8159
1b27777a
MC
8160 err = -EIO;
8161 for (i = 0, j = 0; i < size; i += 4, j++) {
566f86ad
MC
8162 u32 val;
8163
8164 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8165 break;
8166 buf[j] = cpu_to_le32(val);
8167 }
1b27777a 8168 if (i < size)
566f86ad
MC
8169 goto out;
8170
1b27777a
MC
8171 /* Selfboot format */
8172 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8173 u8 *buf8 = (u8 *) buf, csum8 = 0;
8174
8175 for (i = 0; i < size; i++)
8176 csum8 += buf8[i];
8177
ad96b485
AB
8178 if (csum8 == 0) {
8179 err = 0;
8180 goto out;
8181 }
8182
8183 err = -EIO;
8184 goto out;
1b27777a 8185 }
566f86ad
MC
8186
8187 /* Bootstrap checksum at offset 0x10 */
8188 csum = calc_crc((unsigned char *) buf, 0x10);
8189 if(csum != cpu_to_le32(buf[0x10/4]))
8190 goto out;
8191
8192 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8193 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8194 if (csum != cpu_to_le32(buf[0xfc/4]))
8195 goto out;
8196
8197 err = 0;
8198
8199out:
8200 kfree(buf);
8201 return err;
8202}
8203
ca43007a
MC
8204#define TG3_SERDES_TIMEOUT_SEC 2
8205#define TG3_COPPER_TIMEOUT_SEC 6
8206
8207static int tg3_test_link(struct tg3 *tp)
8208{
8209 int i, max;
8210
8211 if (!netif_running(tp->dev))
8212 return -ENODEV;
8213
4c987487 8214 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
8215 max = TG3_SERDES_TIMEOUT_SEC;
8216 else
8217 max = TG3_COPPER_TIMEOUT_SEC;
8218
8219 for (i = 0; i < max; i++) {
8220 if (netif_carrier_ok(tp->dev))
8221 return 0;
8222
8223 if (msleep_interruptible(1000))
8224 break;
8225 }
8226
8227 return -EIO;
8228}
8229
a71116d1 8230/* Only test the commonly used registers */
30ca3e37 8231static int tg3_test_registers(struct tg3 *tp)
a71116d1
MC
8232{
8233 int i, is_5705;
8234 u32 offset, read_mask, write_mask, val, save_val, read_val;
8235 static struct {
8236 u16 offset;
8237 u16 flags;
8238#define TG3_FL_5705 0x1
8239#define TG3_FL_NOT_5705 0x2
8240#define TG3_FL_NOT_5788 0x4
8241 u32 read_mask;
8242 u32 write_mask;
8243 } reg_tbl[] = {
8244 /* MAC Control Registers */
8245 { MAC_MODE, TG3_FL_NOT_5705,
8246 0x00000000, 0x00ef6f8c },
8247 { MAC_MODE, TG3_FL_5705,
8248 0x00000000, 0x01ef6b8c },
8249 { MAC_STATUS, TG3_FL_NOT_5705,
8250 0x03800107, 0x00000000 },
8251 { MAC_STATUS, TG3_FL_5705,
8252 0x03800100, 0x00000000 },
8253 { MAC_ADDR_0_HIGH, 0x0000,
8254 0x00000000, 0x0000ffff },
8255 { MAC_ADDR_0_LOW, 0x0000,
8256 0x00000000, 0xffffffff },
8257 { MAC_RX_MTU_SIZE, 0x0000,
8258 0x00000000, 0x0000ffff },
8259 { MAC_TX_MODE, 0x0000,
8260 0x00000000, 0x00000070 },
8261 { MAC_TX_LENGTHS, 0x0000,
8262 0x00000000, 0x00003fff },
8263 { MAC_RX_MODE, TG3_FL_NOT_5705,
8264 0x00000000, 0x000007fc },
8265 { MAC_RX_MODE, TG3_FL_5705,
8266 0x00000000, 0x000007dc },
8267 { MAC_HASH_REG_0, 0x0000,
8268 0x00000000, 0xffffffff },
8269 { MAC_HASH_REG_1, 0x0000,
8270 0x00000000, 0xffffffff },
8271 { MAC_HASH_REG_2, 0x0000,
8272 0x00000000, 0xffffffff },
8273 { MAC_HASH_REG_3, 0x0000,
8274 0x00000000, 0xffffffff },
8275
8276 /* Receive Data and Receive BD Initiator Control Registers. */
8277 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8278 0x00000000, 0xffffffff },
8279 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8280 0x00000000, 0xffffffff },
8281 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8282 0x00000000, 0x00000003 },
8283 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8284 0x00000000, 0xffffffff },
8285 { RCVDBDI_STD_BD+0, 0x0000,
8286 0x00000000, 0xffffffff },
8287 { RCVDBDI_STD_BD+4, 0x0000,
8288 0x00000000, 0xffffffff },
8289 { RCVDBDI_STD_BD+8, 0x0000,
8290 0x00000000, 0xffff0002 },
8291 { RCVDBDI_STD_BD+0xc, 0x0000,
8292 0x00000000, 0xffffffff },
8293
8294 /* Receive BD Initiator Control Registers. */
8295 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8296 0x00000000, 0xffffffff },
8297 { RCVBDI_STD_THRESH, TG3_FL_5705,
8298 0x00000000, 0x000003ff },
8299 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8300 0x00000000, 0xffffffff },
8301
8302 /* Host Coalescing Control Registers. */
8303 { HOSTCC_MODE, TG3_FL_NOT_5705,
8304 0x00000000, 0x00000004 },
8305 { HOSTCC_MODE, TG3_FL_5705,
8306 0x00000000, 0x000000f6 },
8307 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8308 0x00000000, 0xffffffff },
8309 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8310 0x00000000, 0x000003ff },
8311 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8312 0x00000000, 0xffffffff },
8313 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8314 0x00000000, 0x000003ff },
8315 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8316 0x00000000, 0xffffffff },
8317 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8318 0x00000000, 0x000000ff },
8319 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8320 0x00000000, 0xffffffff },
8321 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8322 0x00000000, 0x000000ff },
8323 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8324 0x00000000, 0xffffffff },
8325 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8326 0x00000000, 0xffffffff },
8327 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8328 0x00000000, 0xffffffff },
8329 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8330 0x00000000, 0x000000ff },
8331 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8332 0x00000000, 0xffffffff },
8333 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8334 0x00000000, 0x000000ff },
8335 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8336 0x00000000, 0xffffffff },
8337 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8338 0x00000000, 0xffffffff },
8339 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8340 0x00000000, 0xffffffff },
8341 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8342 0x00000000, 0xffffffff },
8343 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8344 0x00000000, 0xffffffff },
8345 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8346 0xffffffff, 0x00000000 },
8347 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8348 0xffffffff, 0x00000000 },
8349
8350 /* Buffer Manager Control Registers. */
8351 { BUFMGR_MB_POOL_ADDR, 0x0000,
8352 0x00000000, 0x007fff80 },
8353 { BUFMGR_MB_POOL_SIZE, 0x0000,
8354 0x00000000, 0x007fffff },
8355 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8356 0x00000000, 0x0000003f },
8357 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8358 0x00000000, 0x000001ff },
8359 { BUFMGR_MB_HIGH_WATER, 0x0000,
8360 0x00000000, 0x000001ff },
8361 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8362 0xffffffff, 0x00000000 },
8363 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8364 0xffffffff, 0x00000000 },
8365
8366 /* Mailbox Registers */
8367 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8368 0x00000000, 0x000001ff },
8369 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8370 0x00000000, 0x000001ff },
8371 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8372 0x00000000, 0x000007ff },
8373 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8374 0x00000000, 0x000001ff },
8375
8376 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8377 };
8378
8379 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8380 is_5705 = 1;
8381 else
8382 is_5705 = 0;
8383
8384 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8385 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8386 continue;
8387
8388 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8389 continue;
8390
8391 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8392 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8393 continue;
8394
8395 offset = (u32) reg_tbl[i].offset;
8396 read_mask = reg_tbl[i].read_mask;
8397 write_mask = reg_tbl[i].write_mask;
8398
8399 /* Save the original register content */
8400 save_val = tr32(offset);
8401
8402 /* Determine the read-only value. */
8403 read_val = save_val & read_mask;
8404
8405 /* Write zero to the register, then make sure the read-only bits
8406 * are not changed and the read/write bits are all zeros.
8407 */
8408 tw32(offset, 0);
8409
8410 val = tr32(offset);
8411
8412 /* Test the read-only and read/write bits. */
8413 if (((val & read_mask) != read_val) || (val & write_mask))
8414 goto out;
8415
8416 /* Write ones to all the bits defined by RdMask and WrMask, then
8417 * make sure the read-only bits are not changed and the
8418 * read/write bits are all ones.
8419 */
8420 tw32(offset, read_mask | write_mask);
8421
8422 val = tr32(offset);
8423
8424 /* Test the read-only bits. */
8425 if ((val & read_mask) != read_val)
8426 goto out;
8427
8428 /* Test the read/write bits. */
8429 if ((val & write_mask) != write_mask)
8430 goto out;
8431
8432 tw32(offset, save_val);
8433 }
8434
8435 return 0;
8436
8437out:
8438 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8439 tw32(offset, save_val);
8440 return -EIO;
8441}
8442
7942e1db
MC
8443static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8444{
f71e1309 8445 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
8446 int i;
8447 u32 j;
8448
8449 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8450 for (j = 0; j < len; j += 4) {
8451 u32 val;
8452
8453 tg3_write_mem(tp, offset + j, test_pattern[i]);
8454 tg3_read_mem(tp, offset + j, &val);
8455 if (val != test_pattern[i])
8456 return -EIO;
8457 }
8458 }
8459 return 0;
8460}
8461
8462static int tg3_test_memory(struct tg3 *tp)
8463{
8464 static struct mem_entry {
8465 u32 offset;
8466 u32 len;
8467 } mem_tbl_570x[] = {
38690194 8468 { 0x00000000, 0x00b50},
7942e1db
MC
8469 { 0x00002000, 0x1c000},
8470 { 0xffffffff, 0x00000}
8471 }, mem_tbl_5705[] = {
8472 { 0x00000100, 0x0000c},
8473 { 0x00000200, 0x00008},
7942e1db
MC
8474 { 0x00004000, 0x00800},
8475 { 0x00006000, 0x01000},
8476 { 0x00008000, 0x02000},
8477 { 0x00010000, 0x0e000},
8478 { 0xffffffff, 0x00000}
79f4d13a
MC
8479 }, mem_tbl_5755[] = {
8480 { 0x00000200, 0x00008},
8481 { 0x00004000, 0x00800},
8482 { 0x00006000, 0x00800},
8483 { 0x00008000, 0x02000},
8484 { 0x00010000, 0x0c000},
8485 { 0xffffffff, 0x00000}
7942e1db
MC
8486 };
8487 struct mem_entry *mem_tbl;
8488 int err = 0;
8489 int i;
8490
79f4d13a 8491 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6
MC
8492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
79f4d13a
MC
8494 mem_tbl = mem_tbl_5755;
8495 else
8496 mem_tbl = mem_tbl_5705;
8497 } else
7942e1db
MC
8498 mem_tbl = mem_tbl_570x;
8499
8500 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8501 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8502 mem_tbl[i].len)) != 0)
8503 break;
8504 }
8505
8506 return err;
8507}
8508
9f40dead
MC
8509#define TG3_MAC_LOOPBACK 0
8510#define TG3_PHY_LOOPBACK 1
8511
8512static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 8513{
9f40dead 8514 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
8515 u32 desc_idx;
8516 struct sk_buff *skb, *rx_skb;
8517 u8 *tx_data;
8518 dma_addr_t map;
8519 int num_pkts, tx_len, rx_len, i, err;
8520 struct tg3_rx_buffer_desc *desc;
8521
9f40dead 8522 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
8523 /* HW errata - mac loopback fails in some cases on 5780.
8524 * Normal traffic and PHY loopback are not affected by
8525 * errata.
8526 */
8527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8528 return 0;
8529
9f40dead
MC
8530 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8531 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8532 MAC_MODE_PORT_MODE_GMII;
8533 tw32(MAC_MODE, mac_mode);
8534 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
c94e3941
MC
8535 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8536 BMCR_SPEED1000);
8537 udelay(40);
8538 /* reset to prevent losing 1st rx packet intermittently */
8539 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8540 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8541 udelay(10);
8542 tw32_f(MAC_RX_MODE, tp->rx_mode);
8543 }
9f40dead
MC
8544 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8545 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
ff18ff02 8546 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9f40dead 8547 mac_mode &= ~MAC_MODE_LINK_POLARITY;
ff18ff02
MC
8548 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8549 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8550 }
9f40dead 8551 tw32(MAC_MODE, mac_mode);
9f40dead
MC
8552 }
8553 else
8554 return -EINVAL;
c76949a6
MC
8555
8556 err = -EIO;
8557
c76949a6
MC
8558 tx_len = 1514;
8559 skb = dev_alloc_skb(tx_len);
a50bb7b9
JJ
8560 if (!skb)
8561 return -ENOMEM;
8562
c76949a6
MC
8563 tx_data = skb_put(skb, tx_len);
8564 memcpy(tx_data, tp->dev->dev_addr, 6);
8565 memset(tx_data + 6, 0x0, 8);
8566
8567 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8568
8569 for (i = 14; i < tx_len; i++)
8570 tx_data[i] = (u8) (i & 0xff);
8571
8572 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8573
8574 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8575 HOSTCC_MODE_NOW);
8576
8577 udelay(10);
8578
8579 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8580
c76949a6
MC
8581 num_pkts = 0;
8582
9f40dead 8583 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 8584
9f40dead 8585 tp->tx_prod++;
c76949a6
MC
8586 num_pkts++;
8587
9f40dead
MC
8588 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8589 tp->tx_prod);
09ee929c 8590 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
8591
8592 udelay(10);
8593
8594 for (i = 0; i < 10; i++) {
8595 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8596 HOSTCC_MODE_NOW);
8597
8598 udelay(10);
8599
8600 tx_idx = tp->hw_status->idx[0].tx_consumer;
8601 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 8602 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
8603 (rx_idx == (rx_start_idx + num_pkts)))
8604 break;
8605 }
8606
8607 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8608 dev_kfree_skb(skb);
8609
9f40dead 8610 if (tx_idx != tp->tx_prod)
c76949a6
MC
8611 goto out;
8612
8613 if (rx_idx != rx_start_idx + num_pkts)
8614 goto out;
8615
8616 desc = &tp->rx_rcb[rx_start_idx];
8617 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8618 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8619 if (opaque_key != RXD_OPAQUE_RING_STD)
8620 goto out;
8621
8622 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8623 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8624 goto out;
8625
8626 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8627 if (rx_len != tx_len)
8628 goto out;
8629
8630 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8631
8632 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8633 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8634
8635 for (i = 14; i < tx_len; i++) {
8636 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8637 goto out;
8638 }
8639 err = 0;
8640
8641 /* tg3_free_rings will unmap and free the rx_skb */
8642out:
8643 return err;
8644}
8645
9f40dead
MC
8646#define TG3_MAC_LOOPBACK_FAILED 1
8647#define TG3_PHY_LOOPBACK_FAILED 2
8648#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8649 TG3_PHY_LOOPBACK_FAILED)
8650
8651static int tg3_test_loopback(struct tg3 *tp)
8652{
8653 int err = 0;
8654
8655 if (!netif_running(tp->dev))
8656 return TG3_LOOPBACK_FAILED;
8657
8e7a22e3 8658 tg3_reset_hw(tp, 1);
9f40dead
MC
8659
8660 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8661 err |= TG3_MAC_LOOPBACK_FAILED;
8662 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8663 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8664 err |= TG3_PHY_LOOPBACK_FAILED;
8665 }
8666
8667 return err;
8668}
8669
4cafd3f5
MC
8670static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8671 u64 *data)
8672{
566f86ad
MC
8673 struct tg3 *tp = netdev_priv(dev);
8674
bc1c7567
MC
8675 if (tp->link_config.phy_is_low_power)
8676 tg3_set_power_state(tp, PCI_D0);
8677
566f86ad
MC
8678 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8679
8680 if (tg3_test_nvram(tp) != 0) {
8681 etest->flags |= ETH_TEST_FL_FAILED;
8682 data[0] = 1;
8683 }
ca43007a
MC
8684 if (tg3_test_link(tp) != 0) {
8685 etest->flags |= ETH_TEST_FL_FAILED;
8686 data[1] = 1;
8687 }
a71116d1 8688 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 8689 int err, irq_sync = 0;
bbe832c0
MC
8690
8691 if (netif_running(dev)) {
a71116d1 8692 tg3_netif_stop(tp);
bbe832c0
MC
8693 irq_sync = 1;
8694 }
a71116d1 8695
bbe832c0 8696 tg3_full_lock(tp, irq_sync);
a71116d1
MC
8697
8698 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 8699 err = tg3_nvram_lock(tp);
a71116d1
MC
8700 tg3_halt_cpu(tp, RX_CPU_BASE);
8701 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8702 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
8703 if (!err)
8704 tg3_nvram_unlock(tp);
a71116d1 8705
d9ab5ad1
MC
8706 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8707 tg3_phy_reset(tp);
8708
a71116d1
MC
8709 if (tg3_test_registers(tp) != 0) {
8710 etest->flags |= ETH_TEST_FL_FAILED;
8711 data[2] = 1;
8712 }
7942e1db
MC
8713 if (tg3_test_memory(tp) != 0) {
8714 etest->flags |= ETH_TEST_FL_FAILED;
8715 data[3] = 1;
8716 }
9f40dead 8717 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 8718 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 8719
f47c11ee
DM
8720 tg3_full_unlock(tp);
8721
d4bc3927
MC
8722 if (tg3_test_interrupt(tp) != 0) {
8723 etest->flags |= ETH_TEST_FL_FAILED;
8724 data[5] = 1;
8725 }
f47c11ee
DM
8726
8727 tg3_full_lock(tp, 0);
d4bc3927 8728
a71116d1
MC
8729 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8730 if (netif_running(dev)) {
8731 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8e7a22e3 8732 tg3_init_hw(tp, 1);
a71116d1
MC
8733 tg3_netif_start(tp);
8734 }
f47c11ee
DM
8735
8736 tg3_full_unlock(tp);
a71116d1 8737 }
bc1c7567
MC
8738 if (tp->link_config.phy_is_low_power)
8739 tg3_set_power_state(tp, PCI_D3hot);
8740
4cafd3f5
MC
8741}
8742
1da177e4
LT
8743static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8744{
8745 struct mii_ioctl_data *data = if_mii(ifr);
8746 struct tg3 *tp = netdev_priv(dev);
8747 int err;
8748
8749 switch(cmd) {
8750 case SIOCGMIIPHY:
8751 data->phy_id = PHY_ADDR;
8752
8753 /* fallthru */
8754 case SIOCGMIIREG: {
8755 u32 mii_regval;
8756
8757 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8758 break; /* We have no PHY */
8759
bc1c7567
MC
8760 if (tp->link_config.phy_is_low_power)
8761 return -EAGAIN;
8762
f47c11ee 8763 spin_lock_bh(&tp->lock);
1da177e4 8764 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 8765 spin_unlock_bh(&tp->lock);
1da177e4
LT
8766
8767 data->val_out = mii_regval;
8768
8769 return err;
8770 }
8771
8772 case SIOCSMIIREG:
8773 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8774 break; /* We have no PHY */
8775
8776 if (!capable(CAP_NET_ADMIN))
8777 return -EPERM;
8778
bc1c7567
MC
8779 if (tp->link_config.phy_is_low_power)
8780 return -EAGAIN;
8781
f47c11ee 8782 spin_lock_bh(&tp->lock);
1da177e4 8783 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 8784 spin_unlock_bh(&tp->lock);
1da177e4
LT
8785
8786 return err;
8787
8788 default:
8789 /* do nothing */
8790 break;
8791 }
8792 return -EOPNOTSUPP;
8793}
8794
8795#if TG3_VLAN_TAG_USED
8796static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8797{
8798 struct tg3 *tp = netdev_priv(dev);
8799
29315e87
MC
8800 if (netif_running(dev))
8801 tg3_netif_stop(tp);
8802
f47c11ee 8803 tg3_full_lock(tp, 0);
1da177e4
LT
8804
8805 tp->vlgrp = grp;
8806
8807 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8808 __tg3_set_rx_mode(dev);
8809
f47c11ee 8810 tg3_full_unlock(tp);
29315e87
MC
8811
8812 if (netif_running(dev))
8813 tg3_netif_start(tp);
1da177e4
LT
8814}
8815
8816static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8817{
8818 struct tg3 *tp = netdev_priv(dev);
8819
29315e87
MC
8820 if (netif_running(dev))
8821 tg3_netif_stop(tp);
8822
f47c11ee 8823 tg3_full_lock(tp, 0);
1da177e4
LT
8824 if (tp->vlgrp)
8825 tp->vlgrp->vlan_devices[vid] = NULL;
f47c11ee 8826 tg3_full_unlock(tp);
29315e87
MC
8827
8828 if (netif_running(dev))
8829 tg3_netif_start(tp);
1da177e4
LT
8830}
8831#endif
8832
15f9850d
DM
8833static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8834{
8835 struct tg3 *tp = netdev_priv(dev);
8836
8837 memcpy(ec, &tp->coal, sizeof(*ec));
8838 return 0;
8839}
8840
d244c892
MC
8841static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8842{
8843 struct tg3 *tp = netdev_priv(dev);
8844 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8845 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8846
8847 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8848 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8849 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8850 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8851 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8852 }
8853
8854 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8855 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8856 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8857 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8858 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8859 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8860 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8861 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8862 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8863 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8864 return -EINVAL;
8865
8866 /* No rx interrupts will be generated if both are zero */
8867 if ((ec->rx_coalesce_usecs == 0) &&
8868 (ec->rx_max_coalesced_frames == 0))
8869 return -EINVAL;
8870
8871 /* No tx interrupts will be generated if both are zero */
8872 if ((ec->tx_coalesce_usecs == 0) &&
8873 (ec->tx_max_coalesced_frames == 0))
8874 return -EINVAL;
8875
8876 /* Only copy relevant parameters, ignore all others. */
8877 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8878 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8879 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8880 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8881 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8882 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8883 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8884 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8885 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8886
8887 if (netif_running(dev)) {
8888 tg3_full_lock(tp, 0);
8889 __tg3_set_coalesce(tp, &tp->coal);
8890 tg3_full_unlock(tp);
8891 }
8892 return 0;
8893}
8894
1da177e4
LT
8895static struct ethtool_ops tg3_ethtool_ops = {
8896 .get_settings = tg3_get_settings,
8897 .set_settings = tg3_set_settings,
8898 .get_drvinfo = tg3_get_drvinfo,
8899 .get_regs_len = tg3_get_regs_len,
8900 .get_regs = tg3_get_regs,
8901 .get_wol = tg3_get_wol,
8902 .set_wol = tg3_set_wol,
8903 .get_msglevel = tg3_get_msglevel,
8904 .set_msglevel = tg3_set_msglevel,
8905 .nway_reset = tg3_nway_reset,
8906 .get_link = ethtool_op_get_link,
8907 .get_eeprom_len = tg3_get_eeprom_len,
8908 .get_eeprom = tg3_get_eeprom,
8909 .set_eeprom = tg3_set_eeprom,
8910 .get_ringparam = tg3_get_ringparam,
8911 .set_ringparam = tg3_set_ringparam,
8912 .get_pauseparam = tg3_get_pauseparam,
8913 .set_pauseparam = tg3_set_pauseparam,
8914 .get_rx_csum = tg3_get_rx_csum,
8915 .set_rx_csum = tg3_set_rx_csum,
8916 .get_tx_csum = ethtool_op_get_tx_csum,
8917 .set_tx_csum = tg3_set_tx_csum,
8918 .get_sg = ethtool_op_get_sg,
8919 .set_sg = ethtool_op_set_sg,
8920#if TG3_TSO_SUPPORT != 0
8921 .get_tso = ethtool_op_get_tso,
8922 .set_tso = tg3_set_tso,
8923#endif
4cafd3f5
MC
8924 .self_test_count = tg3_get_test_count,
8925 .self_test = tg3_self_test,
1da177e4 8926 .get_strings = tg3_get_strings,
4009a93d 8927 .phys_id = tg3_phys_id,
1da177e4
LT
8928 .get_stats_count = tg3_get_stats_count,
8929 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 8930 .get_coalesce = tg3_get_coalesce,
d244c892 8931 .set_coalesce = tg3_set_coalesce,
2ff43697 8932 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
8933};
8934
8935static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8936{
1b27777a 8937 u32 cursize, val, magic;
1da177e4
LT
8938
8939 tp->nvram_size = EEPROM_CHIP_SIZE;
8940
1820180b 8941 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
8942 return;
8943
1b27777a 8944 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
1da177e4
LT
8945 return;
8946
8947 /*
8948 * Size the chip by reading offsets at increasing powers of two.
8949 * When we encounter our validation signature, we know the addressing
8950 * has wrapped around, and thus have our chip size.
8951 */
1b27777a 8952 cursize = 0x10;
1da177e4
LT
8953
8954 while (cursize < tp->nvram_size) {
1820180b 8955 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
8956 return;
8957
1820180b 8958 if (val == magic)
1da177e4
LT
8959 break;
8960
8961 cursize <<= 1;
8962 }
8963
8964 tp->nvram_size = cursize;
8965}
8966
8967static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8968{
8969 u32 val;
8970
1820180b 8971 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
8972 return;
8973
8974 /* Selfboot format */
1820180b 8975 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
8976 tg3_get_eeprom_size(tp);
8977 return;
8978 }
8979
1da177e4
LT
8980 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8981 if (val != 0) {
8982 tp->nvram_size = (val >> 16) * 1024;
8983 return;
8984 }
8985 }
8986 tp->nvram_size = 0x20000;
8987}
8988
8989static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8990{
8991 u32 nvcfg1;
8992
8993 nvcfg1 = tr32(NVRAM_CFG1);
8994 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8995 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8996 }
8997 else {
8998 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8999 tw32(NVRAM_CFG1, nvcfg1);
9000 }
9001
4c987487 9002 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 9003 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9004 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9005 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9006 tp->nvram_jedecnum = JEDEC_ATMEL;
9007 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9008 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9009 break;
9010 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9011 tp->nvram_jedecnum = JEDEC_ATMEL;
9012 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9013 break;
9014 case FLASH_VENDOR_ATMEL_EEPROM:
9015 tp->nvram_jedecnum = JEDEC_ATMEL;
9016 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9017 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9018 break;
9019 case FLASH_VENDOR_ST:
9020 tp->nvram_jedecnum = JEDEC_ST;
9021 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9022 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9023 break;
9024 case FLASH_VENDOR_SAIFUN:
9025 tp->nvram_jedecnum = JEDEC_SAIFUN;
9026 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9027 break;
9028 case FLASH_VENDOR_SST_SMALL:
9029 case FLASH_VENDOR_SST_LARGE:
9030 tp->nvram_jedecnum = JEDEC_SST;
9031 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9032 break;
9033 }
9034 }
9035 else {
9036 tp->nvram_jedecnum = JEDEC_ATMEL;
9037 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9038 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9039 }
9040}
9041
361b4ac2
MC
9042static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9043{
9044 u32 nvcfg1;
9045
9046 nvcfg1 = tr32(NVRAM_CFG1);
9047
e6af301b
MC
9048 /* NVRAM protection for TPM */
9049 if (nvcfg1 & (1 << 27))
9050 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9051
361b4ac2
MC
9052 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9053 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9054 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9055 tp->nvram_jedecnum = JEDEC_ATMEL;
9056 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9057 break;
9058 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9059 tp->nvram_jedecnum = JEDEC_ATMEL;
9060 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9061 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9062 break;
9063 case FLASH_5752VENDOR_ST_M45PE10:
9064 case FLASH_5752VENDOR_ST_M45PE20:
9065 case FLASH_5752VENDOR_ST_M45PE40:
9066 tp->nvram_jedecnum = JEDEC_ST;
9067 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9068 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9069 break;
9070 }
9071
9072 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9073 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9074 case FLASH_5752PAGE_SIZE_256:
9075 tp->nvram_pagesize = 256;
9076 break;
9077 case FLASH_5752PAGE_SIZE_512:
9078 tp->nvram_pagesize = 512;
9079 break;
9080 case FLASH_5752PAGE_SIZE_1K:
9081 tp->nvram_pagesize = 1024;
9082 break;
9083 case FLASH_5752PAGE_SIZE_2K:
9084 tp->nvram_pagesize = 2048;
9085 break;
9086 case FLASH_5752PAGE_SIZE_4K:
9087 tp->nvram_pagesize = 4096;
9088 break;
9089 case FLASH_5752PAGE_SIZE_264:
9090 tp->nvram_pagesize = 264;
9091 break;
9092 }
9093 }
9094 else {
9095 /* For eeprom, set pagesize to maximum eeprom size */
9096 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9097
9098 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9099 tw32(NVRAM_CFG1, nvcfg1);
9100 }
9101}
9102
d3c7b886
MC
9103static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9104{
9105 u32 nvcfg1;
9106
9107 nvcfg1 = tr32(NVRAM_CFG1);
9108
9109 /* NVRAM protection for TPM */
9110 if (nvcfg1 & (1 << 27))
9111 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9112
9113 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9114 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9115 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9116 tp->nvram_jedecnum = JEDEC_ATMEL;
9117 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9118 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9119
9120 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9121 tw32(NVRAM_CFG1, nvcfg1);
9122 break;
9123 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9124 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9125 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9126 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9127 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9128 tp->nvram_jedecnum = JEDEC_ATMEL;
9129 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9130 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9131 tp->nvram_pagesize = 264;
9132 break;
9133 case FLASH_5752VENDOR_ST_M45PE10:
9134 case FLASH_5752VENDOR_ST_M45PE20:
9135 case FLASH_5752VENDOR_ST_M45PE40:
9136 tp->nvram_jedecnum = JEDEC_ST;
9137 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9138 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9139 tp->nvram_pagesize = 256;
9140 break;
9141 }
9142}
9143
1b27777a
MC
9144static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9145{
9146 u32 nvcfg1;
9147
9148 nvcfg1 = tr32(NVRAM_CFG1);
9149
9150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9151 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9152 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9153 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9154 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9155 tp->nvram_jedecnum = JEDEC_ATMEL;
9156 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9157 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9158
9159 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9160 tw32(NVRAM_CFG1, nvcfg1);
9161 break;
9162 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9163 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9164 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9165 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9166 tp->nvram_jedecnum = JEDEC_ATMEL;
9167 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9168 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9169 tp->nvram_pagesize = 264;
9170 break;
9171 case FLASH_5752VENDOR_ST_M45PE10:
9172 case FLASH_5752VENDOR_ST_M45PE20:
9173 case FLASH_5752VENDOR_ST_M45PE40:
9174 tp->nvram_jedecnum = JEDEC_ST;
9175 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9176 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9177 tp->nvram_pagesize = 256;
9178 break;
9179 }
9180}
9181
1da177e4
LT
9182/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9183static void __devinit tg3_nvram_init(struct tg3 *tp)
9184{
9185 int j;
9186
1da177e4
LT
9187 tw32_f(GRC_EEPROM_ADDR,
9188 (EEPROM_ADDR_FSM_RESET |
9189 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9190 EEPROM_ADDR_CLKPERD_SHIFT)));
9191
9192 /* XXX schedule_timeout() ... */
9193 for (j = 0; j < 100; j++)
9194 udelay(10);
9195
9196 /* Enable seeprom accesses. */
9197 tw32_f(GRC_LOCAL_CTRL,
9198 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9199 udelay(100);
9200
9201 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9202 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9203 tp->tg3_flags |= TG3_FLAG_NVRAM;
9204
ec41c7df
MC
9205 if (tg3_nvram_lock(tp)) {
9206 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9207 "tg3_nvram_init failed.\n", tp->dev->name);
9208 return;
9209 }
e6af301b 9210 tg3_enable_nvram_access(tp);
1da177e4 9211
361b4ac2
MC
9212 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9213 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
9214 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9215 tg3_get_5755_nvram_info(tp);
1b27777a
MC
9216 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9217 tg3_get_5787_nvram_info(tp);
361b4ac2
MC
9218 else
9219 tg3_get_nvram_info(tp);
9220
1da177e4
LT
9221 tg3_get_nvram_size(tp);
9222
e6af301b 9223 tg3_disable_nvram_access(tp);
381291b7 9224 tg3_nvram_unlock(tp);
1da177e4
LT
9225
9226 } else {
9227 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9228
9229 tg3_get_eeprom_size(tp);
9230 }
9231}
9232
9233static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9234 u32 offset, u32 *val)
9235{
9236 u32 tmp;
9237 int i;
9238
9239 if (offset > EEPROM_ADDR_ADDR_MASK ||
9240 (offset % 4) != 0)
9241 return -EINVAL;
9242
9243 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9244 EEPROM_ADDR_DEVID_MASK |
9245 EEPROM_ADDR_READ);
9246 tw32(GRC_EEPROM_ADDR,
9247 tmp |
9248 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9249 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9250 EEPROM_ADDR_ADDR_MASK) |
9251 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9252
9253 for (i = 0; i < 10000; i++) {
9254 tmp = tr32(GRC_EEPROM_ADDR);
9255
9256 if (tmp & EEPROM_ADDR_COMPLETE)
9257 break;
9258 udelay(100);
9259 }
9260 if (!(tmp & EEPROM_ADDR_COMPLETE))
9261 return -EBUSY;
9262
9263 *val = tr32(GRC_EEPROM_DATA);
9264 return 0;
9265}
9266
9267#define NVRAM_CMD_TIMEOUT 10000
9268
9269static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9270{
9271 int i;
9272
9273 tw32(NVRAM_CMD, nvram_cmd);
9274 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9275 udelay(10);
9276 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9277 udelay(10);
9278 break;
9279 }
9280 }
9281 if (i == NVRAM_CMD_TIMEOUT) {
9282 return -EBUSY;
9283 }
9284 return 0;
9285}
9286
1820180b
MC
9287static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9288{
9289 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9290 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9291 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9292 (tp->nvram_jedecnum == JEDEC_ATMEL))
9293
9294 addr = ((addr / tp->nvram_pagesize) <<
9295 ATMEL_AT45DB0X1B_PAGE_POS) +
9296 (addr % tp->nvram_pagesize);
9297
9298 return addr;
9299}
9300
c4e6575c
MC
9301static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9302{
9303 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9304 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9305 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9306 (tp->nvram_jedecnum == JEDEC_ATMEL))
9307
9308 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9309 tp->nvram_pagesize) +
9310 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9311
9312 return addr;
9313}
9314
1da177e4
LT
9315static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9316{
9317 int ret;
9318
1da177e4
LT
9319 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9320 return tg3_nvram_read_using_eeprom(tp, offset, val);
9321
1820180b 9322 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
9323
9324 if (offset > NVRAM_ADDR_MSK)
9325 return -EINVAL;
9326
ec41c7df
MC
9327 ret = tg3_nvram_lock(tp);
9328 if (ret)
9329 return ret;
1da177e4 9330
e6af301b 9331 tg3_enable_nvram_access(tp);
1da177e4
LT
9332
9333 tw32(NVRAM_ADDR, offset);
9334 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9335 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9336
9337 if (ret == 0)
9338 *val = swab32(tr32(NVRAM_RDDATA));
9339
e6af301b 9340 tg3_disable_nvram_access(tp);
1da177e4 9341
381291b7
MC
9342 tg3_nvram_unlock(tp);
9343
1da177e4
LT
9344 return ret;
9345}
9346
1820180b
MC
9347static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9348{
9349 int err;
9350 u32 tmp;
9351
9352 err = tg3_nvram_read(tp, offset, &tmp);
9353 *val = swab32(tmp);
9354 return err;
9355}
9356
1da177e4
LT
9357static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9358 u32 offset, u32 len, u8 *buf)
9359{
9360 int i, j, rc = 0;
9361 u32 val;
9362
9363 for (i = 0; i < len; i += 4) {
9364 u32 addr, data;
9365
9366 addr = offset + i;
9367
9368 memcpy(&data, buf + i, 4);
9369
9370 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9371
9372 val = tr32(GRC_EEPROM_ADDR);
9373 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9374
9375 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9376 EEPROM_ADDR_READ);
9377 tw32(GRC_EEPROM_ADDR, val |
9378 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9379 (addr & EEPROM_ADDR_ADDR_MASK) |
9380 EEPROM_ADDR_START |
9381 EEPROM_ADDR_WRITE);
9382
9383 for (j = 0; j < 10000; j++) {
9384 val = tr32(GRC_EEPROM_ADDR);
9385
9386 if (val & EEPROM_ADDR_COMPLETE)
9387 break;
9388 udelay(100);
9389 }
9390 if (!(val & EEPROM_ADDR_COMPLETE)) {
9391 rc = -EBUSY;
9392 break;
9393 }
9394 }
9395
9396 return rc;
9397}
9398
9399/* offset and length are dword aligned */
9400static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9401 u8 *buf)
9402{
9403 int ret = 0;
9404 u32 pagesize = tp->nvram_pagesize;
9405 u32 pagemask = pagesize - 1;
9406 u32 nvram_cmd;
9407 u8 *tmp;
9408
9409 tmp = kmalloc(pagesize, GFP_KERNEL);
9410 if (tmp == NULL)
9411 return -ENOMEM;
9412
9413 while (len) {
9414 int j;
e6af301b 9415 u32 phy_addr, page_off, size;
1da177e4
LT
9416
9417 phy_addr = offset & ~pagemask;
9418
9419 for (j = 0; j < pagesize; j += 4) {
9420 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9421 (u32 *) (tmp + j))))
9422 break;
9423 }
9424 if (ret)
9425 break;
9426
9427 page_off = offset & pagemask;
9428 size = pagesize;
9429 if (len < size)
9430 size = len;
9431
9432 len -= size;
9433
9434 memcpy(tmp + page_off, buf, size);
9435
9436 offset = offset + (pagesize - page_off);
9437
e6af301b 9438 tg3_enable_nvram_access(tp);
1da177e4
LT
9439
9440 /*
9441 * Before we can erase the flash page, we need
9442 * to issue a special "write enable" command.
9443 */
9444 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9445
9446 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9447 break;
9448
9449 /* Erase the target page */
9450 tw32(NVRAM_ADDR, phy_addr);
9451
9452 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9453 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9454
9455 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9456 break;
9457
9458 /* Issue another write enable to start the write. */
9459 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9460
9461 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9462 break;
9463
9464 for (j = 0; j < pagesize; j += 4) {
9465 u32 data;
9466
9467 data = *((u32 *) (tmp + j));
9468 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9469
9470 tw32(NVRAM_ADDR, phy_addr + j);
9471
9472 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9473 NVRAM_CMD_WR;
9474
9475 if (j == 0)
9476 nvram_cmd |= NVRAM_CMD_FIRST;
9477 else if (j == (pagesize - 4))
9478 nvram_cmd |= NVRAM_CMD_LAST;
9479
9480 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9481 break;
9482 }
9483 if (ret)
9484 break;
9485 }
9486
9487 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9488 tg3_nvram_exec_cmd(tp, nvram_cmd);
9489
9490 kfree(tmp);
9491
9492 return ret;
9493}
9494
9495/* offset and length are dword aligned */
9496static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9497 u8 *buf)
9498{
9499 int i, ret = 0;
9500
9501 for (i = 0; i < len; i += 4, offset += 4) {
9502 u32 data, page_off, phy_addr, nvram_cmd;
9503
9504 memcpy(&data, buf + i, 4);
9505 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9506
9507 page_off = offset % tp->nvram_pagesize;
9508
1820180b 9509 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
9510
9511 tw32(NVRAM_ADDR, phy_addr);
9512
9513 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9514
9515 if ((page_off == 0) || (i == 0))
9516 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 9517 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
9518 nvram_cmd |= NVRAM_CMD_LAST;
9519
9520 if (i == (len - 4))
9521 nvram_cmd |= NVRAM_CMD_LAST;
9522
4c987487 9523 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 9524 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 9525 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
4c987487
MC
9526 (tp->nvram_jedecnum == JEDEC_ST) &&
9527 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
9528
9529 if ((ret = tg3_nvram_exec_cmd(tp,
9530 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9531 NVRAM_CMD_DONE)))
9532
9533 break;
9534 }
9535 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9536 /* We always do complete word writes to eeprom. */
9537 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9538 }
9539
9540 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9541 break;
9542 }
9543 return ret;
9544}
9545
9546/* offset and length are dword aligned */
9547static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9548{
9549 int ret;
9550
1da177e4 9551 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
9552 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9553 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
9554 udelay(40);
9555 }
9556
9557 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9558 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9559 }
9560 else {
9561 u32 grc_mode;
9562
ec41c7df
MC
9563 ret = tg3_nvram_lock(tp);
9564 if (ret)
9565 return ret;
1da177e4 9566
e6af301b
MC
9567 tg3_enable_nvram_access(tp);
9568 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9569 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 9570 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
9571
9572 grc_mode = tr32(GRC_MODE);
9573 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9574
9575 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9576 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9577
9578 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9579 buf);
9580 }
9581 else {
9582 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9583 buf);
9584 }
9585
9586 grc_mode = tr32(GRC_MODE);
9587 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9588
e6af301b 9589 tg3_disable_nvram_access(tp);
1da177e4
LT
9590 tg3_nvram_unlock(tp);
9591 }
9592
9593 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 9594 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
9595 udelay(40);
9596 }
9597
9598 return ret;
9599}
9600
9601struct subsys_tbl_ent {
9602 u16 subsys_vendor, subsys_devid;
9603 u32 phy_id;
9604};
9605
9606static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9607 /* Broadcom boards. */
9608 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9609 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9610 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9611 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9612 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9613 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9614 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9615 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9616 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9617 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9618 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9619
9620 /* 3com boards. */
9621 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9622 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9623 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9624 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9625 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9626
9627 /* DELL boards. */
9628 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9629 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9630 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9631 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9632
9633 /* Compaq boards. */
9634 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9635 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9636 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9637 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9638 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9639
9640 /* IBM boards. */
9641 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9642};
9643
9644static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9645{
9646 int i;
9647
9648 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9649 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9650 tp->pdev->subsystem_vendor) &&
9651 (subsys_id_to_phy_id[i].subsys_devid ==
9652 tp->pdev->subsystem_device))
9653 return &subsys_id_to_phy_id[i];
9654 }
9655 return NULL;
9656}
9657
7d0c41ef 9658static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 9659{
1da177e4 9660 u32 val;
caf636c7
MC
9661 u16 pmcsr;
9662
9663 /* On some early chips the SRAM cannot be accessed in D3hot state,
9664 * so need make sure we're in D0.
9665 */
9666 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9667 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9668 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9669 msleep(1);
7d0c41ef
MC
9670
9671 /* Make sure register accesses (indirect or otherwise)
9672 * will function correctly.
9673 */
9674 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9675 tp->misc_host_ctrl);
1da177e4 9676
f49639e6
DM
9677 /* The memory arbiter has to be enabled in order for SRAM accesses
9678 * to succeed. Normally on powerup the tg3 chip firmware will make
9679 * sure it is enabled, but other entities such as system netboot
9680 * code might disable it.
9681 */
9682 val = tr32(MEMARB_MODE);
9683 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9684
1da177e4 9685 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
9686 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9687
f49639e6
DM
9688 /* Assume an onboard device by default. */
9689 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
72b845e0 9690
1da177e4
LT
9691 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9692 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9693 u32 nic_cfg, led_cfg;
7d0c41ef
MC
9694 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9695 int eeprom_phy_serdes = 0;
1da177e4
LT
9696
9697 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9698 tp->nic_sram_data_cfg = nic_cfg;
9699
9700 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9701 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9702 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9703 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9704 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9705 (ver > 0) && (ver < 0x100))
9706 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9707
1da177e4
LT
9708 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9709 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9710 eeprom_phy_serdes = 1;
9711
9712 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9713 if (nic_phy_id != 0) {
9714 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9715 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9716
9717 eeprom_phy_id = (id1 >> 16) << 10;
9718 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9719 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9720 } else
9721 eeprom_phy_id = 0;
9722
7d0c41ef 9723 tp->phy_id = eeprom_phy_id;
747e8f8b 9724 if (eeprom_phy_serdes) {
a4e2b347 9725 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
9726 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9727 else
9728 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9729 }
7d0c41ef 9730
cbf46853 9731 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9732 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9733 SHASTA_EXT_LED_MODE_MASK);
cbf46853 9734 else
1da177e4
LT
9735 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9736
9737 switch (led_cfg) {
9738 default:
9739 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9740 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9741 break;
9742
9743 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9744 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9745 break;
9746
9747 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9748 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
9749
9750 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9751 * read on some older 5700/5701 bootcode.
9752 */
9753 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9754 ASIC_REV_5700 ||
9755 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9756 ASIC_REV_5701)
9757 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9758
1da177e4
LT
9759 break;
9760
9761 case SHASTA_EXT_LED_SHARED:
9762 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9763 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9764 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9765 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9766 LED_CTRL_MODE_PHY_2);
9767 break;
9768
9769 case SHASTA_EXT_LED_MAC:
9770 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9771 break;
9772
9773 case SHASTA_EXT_LED_COMBO:
9774 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9775 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9776 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9777 LED_CTRL_MODE_PHY_2);
9778 break;
9779
9780 };
9781
9782 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9784 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9785 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9786
bbadf503 9787 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
1da177e4 9788 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
f49639e6
DM
9789 else
9790 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
1da177e4
LT
9791
9792 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9793 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 9794 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9795 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9796 }
9797 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9798 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9799
9800 if (cfg2 & (1 << 17))
9801 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9802
9803 /* serdes signal pre-emphasis in register 0x590 set by */
9804 /* bootcode if bit 18 is set */
9805 if (cfg2 & (1 << 18))
9806 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9807 }
7d0c41ef
MC
9808}
9809
9810static int __devinit tg3_phy_probe(struct tg3 *tp)
9811{
9812 u32 hw_phy_id_1, hw_phy_id_2;
9813 u32 hw_phy_id, hw_phy_id_masked;
9814 int err;
1da177e4
LT
9815
9816 /* Reading the PHY ID register can conflict with ASF
9817 * firwmare access to the PHY hardware.
9818 */
9819 err = 0;
9820 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9821 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9822 } else {
9823 /* Now read the physical PHY_ID from the chip and verify
9824 * that it is sane. If it doesn't look good, we fall back
9825 * to either the hard-coded table based PHY_ID and failing
9826 * that the value found in the eeprom area.
9827 */
9828 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9829 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9830
9831 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9832 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9833 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9834
9835 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9836 }
9837
9838 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9839 tp->phy_id = hw_phy_id;
9840 if (hw_phy_id_masked == PHY_ID_BCM8002)
9841 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
9842 else
9843 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 9844 } else {
7d0c41ef
MC
9845 if (tp->phy_id != PHY_ID_INVALID) {
9846 /* Do nothing, phy ID already set up in
9847 * tg3_get_eeprom_hw_cfg().
9848 */
1da177e4
LT
9849 } else {
9850 struct subsys_tbl_ent *p;
9851
9852 /* No eeprom signature? Try the hardcoded
9853 * subsys device table.
9854 */
9855 p = lookup_by_subsys(tp);
9856 if (!p)
9857 return -ENODEV;
9858
9859 tp->phy_id = p->phy_id;
9860 if (!tp->phy_id ||
9861 tp->phy_id == PHY_ID_BCM8002)
9862 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9863 }
9864 }
9865
747e8f8b 9866 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
1da177e4
LT
9867 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9868 u32 bmsr, adv_reg, tg3_ctrl;
9869
9870 tg3_readphy(tp, MII_BMSR, &bmsr);
9871 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9872 (bmsr & BMSR_LSTATUS))
9873 goto skip_phy_reset;
9874
9875 err = tg3_phy_reset(tp);
9876 if (err)
9877 return err;
9878
9879 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9880 ADVERTISE_100HALF | ADVERTISE_100FULL |
9881 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9882 tg3_ctrl = 0;
9883 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9884 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9885 MII_TG3_CTRL_ADV_1000_FULL);
9886 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9887 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9888 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9889 MII_TG3_CTRL_ENABLE_AS_MASTER);
9890 }
9891
9892 if (!tg3_copper_is_advertising_all(tp)) {
9893 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9894
9895 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9896 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9897
9898 tg3_writephy(tp, MII_BMCR,
9899 BMCR_ANENABLE | BMCR_ANRESTART);
9900 }
9901 tg3_phy_set_wirespeed(tp);
9902
9903 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9904 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9905 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9906 }
9907
9908skip_phy_reset:
9909 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9910 err = tg3_init_5401phy_dsp(tp);
9911 if (err)
9912 return err;
9913 }
9914
9915 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9916 err = tg3_init_5401phy_dsp(tp);
9917 }
9918
747e8f8b 9919 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
9920 tp->link_config.advertising =
9921 (ADVERTISED_1000baseT_Half |
9922 ADVERTISED_1000baseT_Full |
9923 ADVERTISED_Autoneg |
9924 ADVERTISED_FIBRE);
9925 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9926 tp->link_config.advertising &=
9927 ~(ADVERTISED_1000baseT_Half |
9928 ADVERTISED_1000baseT_Full);
9929
9930 return err;
9931}
9932
9933static void __devinit tg3_read_partno(struct tg3 *tp)
9934{
9935 unsigned char vpd_data[256];
9936 int i;
1b27777a 9937 u32 magic;
1da177e4 9938
1820180b 9939 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 9940 goto out_not_found;
1da177e4 9941
1820180b 9942 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
9943 for (i = 0; i < 256; i += 4) {
9944 u32 tmp;
1da177e4 9945
1b27777a
MC
9946 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9947 goto out_not_found;
9948
9949 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9950 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9951 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9952 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9953 }
9954 } else {
9955 int vpd_cap;
9956
9957 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9958 for (i = 0; i < 256; i += 4) {
9959 u32 tmp, j = 0;
9960 u16 tmp16;
9961
9962 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9963 i);
9964 while (j++ < 100) {
9965 pci_read_config_word(tp->pdev, vpd_cap +
9966 PCI_VPD_ADDR, &tmp16);
9967 if (tmp16 & 0x8000)
9968 break;
9969 msleep(1);
9970 }
f49639e6
DM
9971 if (!(tmp16 & 0x8000))
9972 goto out_not_found;
9973
1b27777a
MC
9974 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9975 &tmp);
9976 tmp = cpu_to_le32(tmp);
9977 memcpy(&vpd_data[i], &tmp, 4);
9978 }
1da177e4
LT
9979 }
9980
9981 /* Now parse and find the part number. */
9982 for (i = 0; i < 256; ) {
9983 unsigned char val = vpd_data[i];
9984 int block_end;
9985
9986 if (val == 0x82 || val == 0x91) {
9987 i = (i + 3 +
9988 (vpd_data[i + 1] +
9989 (vpd_data[i + 2] << 8)));
9990 continue;
9991 }
9992
9993 if (val != 0x90)
9994 goto out_not_found;
9995
9996 block_end = (i + 3 +
9997 (vpd_data[i + 1] +
9998 (vpd_data[i + 2] << 8)));
9999 i += 3;
10000 while (i < block_end) {
10001 if (vpd_data[i + 0] == 'P' &&
10002 vpd_data[i + 1] == 'N') {
10003 int partno_len = vpd_data[i + 2];
10004
10005 if (partno_len > 24)
10006 goto out_not_found;
10007
10008 memcpy(tp->board_part_number,
10009 &vpd_data[i + 3],
10010 partno_len);
10011
10012 /* Success. */
10013 return;
10014 }
10015 }
10016
10017 /* Part number not found. */
10018 goto out_not_found;
10019 }
10020
10021out_not_found:
10022 strcpy(tp->board_part_number, "none");
10023}
10024
c4e6575c
MC
10025static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10026{
10027 u32 val, offset, start;
10028
10029 if (tg3_nvram_read_swab(tp, 0, &val))
10030 return;
10031
10032 if (val != TG3_EEPROM_MAGIC)
10033 return;
10034
10035 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10036 tg3_nvram_read_swab(tp, 0x4, &start))
10037 return;
10038
10039 offset = tg3_nvram_logical_addr(tp, offset);
10040 if (tg3_nvram_read_swab(tp, offset, &val))
10041 return;
10042
10043 if ((val & 0xfc000000) == 0x0c000000) {
10044 u32 ver_offset, addr;
10045 int i;
10046
10047 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10048 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10049 return;
10050
10051 if (val != 0)
10052 return;
10053
10054 addr = offset + ver_offset - start;
10055 for (i = 0; i < 16; i += 4) {
10056 if (tg3_nvram_read(tp, addr + i, &val))
10057 return;
10058
10059 val = cpu_to_le32(val);
10060 memcpy(tp->fw_ver + i, &val, 4);
10061 }
10062 }
10063}
10064
1da177e4
LT
10065static int __devinit tg3_get_invariants(struct tg3 *tp)
10066{
10067 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
10068 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10069 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
399de50b
MC
10070 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10071 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
10072 { },
10073 };
10074 u32 misc_ctrl_reg;
10075 u32 cacheline_sz_reg;
10076 u32 pci_state_reg, grc_misc_cfg;
10077 u32 val;
10078 u16 pci_cmd;
10079 int err;
10080
1da177e4
LT
10081 /* Force memory write invalidate off. If we leave it on,
10082 * then on 5700_BX chips we have to enable a workaround.
10083 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10084 * to match the cacheline size. The Broadcom driver have this
10085 * workaround but turns MWI off all the times so never uses
10086 * it. This seems to suggest that the workaround is insufficient.
10087 */
10088 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10089 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10090 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10091
10092 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10093 * has the register indirect write enable bit set before
10094 * we try to access any of the MMIO registers. It is also
10095 * critical that the PCI-X hw workaround situation is decided
10096 * before that as well.
10097 */
10098 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10099 &misc_ctrl_reg);
10100
10101 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10102 MISC_HOST_CTRL_CHIPREV_SHIFT);
10103
ff645bec
MC
10104 /* Wrong chip ID in 5752 A0. This code can be removed later
10105 * as A0 is not in production.
10106 */
10107 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10108 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10109
6892914f
MC
10110 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10111 * we need to disable memory and use config. cycles
10112 * only to access all registers. The 5702/03 chips
10113 * can mistakenly decode the special cycles from the
10114 * ICH chipsets as memory write cycles, causing corruption
10115 * of register and memory space. Only certain ICH bridges
10116 * will drive special cycles with non-zero data during the
10117 * address phase which can fall within the 5703's address
10118 * range. This is not an ICH bug as the PCI spec allows
10119 * non-zero address during special cycles. However, only
10120 * these ICH bridges are known to drive non-zero addresses
10121 * during special cycles.
10122 *
10123 * Since special cycles do not cross PCI bridges, we only
10124 * enable this workaround if the 5703 is on the secondary
10125 * bus of these ICH bridges.
10126 */
10127 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10128 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10129 static struct tg3_dev_id {
10130 u32 vendor;
10131 u32 device;
10132 u32 rev;
10133 } ich_chipsets[] = {
10134 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10135 PCI_ANY_ID },
10136 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10137 PCI_ANY_ID },
10138 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10139 0xa },
10140 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10141 PCI_ANY_ID },
10142 { },
10143 };
10144 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10145 struct pci_dev *bridge = NULL;
10146
10147 while (pci_id->vendor != 0) {
10148 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10149 bridge);
10150 if (!bridge) {
10151 pci_id++;
10152 continue;
10153 }
10154 if (pci_id->rev != PCI_ANY_ID) {
10155 u8 rev;
10156
10157 pci_read_config_byte(bridge, PCI_REVISION_ID,
10158 &rev);
10159 if (rev > pci_id->rev)
10160 continue;
10161 }
10162 if (bridge->subordinate &&
10163 (bridge->subordinate->number ==
10164 tp->pdev->bus->number)) {
10165
10166 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10167 pci_dev_put(bridge);
10168 break;
10169 }
10170 }
10171 }
10172
4a29cc2e
MC
10173 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10174 * DMA addresses > 40-bit. This bridge may have other additional
10175 * 57xx devices behind it in some 4-port NIC designs for example.
10176 * Any tg3 device found behind the bridge will also need the 40-bit
10177 * DMA workaround.
10178 */
a4e2b347
MC
10179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10181 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 10182 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 10183 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 10184 }
4a29cc2e
MC
10185 else {
10186 struct pci_dev *bridge = NULL;
10187
10188 do {
10189 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10190 PCI_DEVICE_ID_SERVERWORKS_EPB,
10191 bridge);
10192 if (bridge && bridge->subordinate &&
10193 (bridge->subordinate->number <=
10194 tp->pdev->bus->number) &&
10195 (bridge->subordinate->subordinate >=
10196 tp->pdev->bus->number)) {
10197 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10198 pci_dev_put(bridge);
10199 break;
10200 }
10201 } while (bridge);
10202 }
4cf78e4f 10203
1da177e4
LT
10204 /* Initialize misc host control in PCI block. */
10205 tp->misc_host_ctrl |= (misc_ctrl_reg &
10206 MISC_HOST_CTRL_CHIPREV);
10207 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10208 tp->misc_host_ctrl);
10209
10210 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10211 &cacheline_sz_reg);
10212
10213 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10214 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10215 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10216 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10217
6708e5cc 10218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 10219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 10220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 10221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
a4e2b347 10222 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
10223 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10224
1b440c56
JL
10225 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10226 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10227 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10228
5a6f3074 10229 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
af36e6b6
MC
10230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10231 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
5a6f3074 10232 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32 10233 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
52c0fd83
MC
10234 } else {
10235 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10236 TG3_FLG2_HW_TSO_1_BUG;
10237 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10238 ASIC_REV_5750 &&
10239 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10240 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10241 }
5a6f3074 10242 }
1da177e4 10243
0f893dc6
MC
10244 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10245 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 10246 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 10247 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
d9ab5ad1 10248 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
0f893dc6
MC
10249 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10250
1da177e4
LT
10251 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10252 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10253
399de50b
MC
10254 /* If we have an AMD 762 or VIA K8T800 chipset, write
10255 * reordering to the mailbox registers done by the host
10256 * controller can cause major troubles. We read back from
10257 * every mailbox register write to force the writes to be
10258 * posted to the chip in order.
10259 */
10260 if (pci_dev_present(write_reorder_chipsets) &&
10261 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10262 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10263
1da177e4
LT
10264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10265 tp->pci_lat_timer < 64) {
10266 tp->pci_lat_timer = 64;
10267
10268 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10269 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10270 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10271 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10272
10273 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10274 cacheline_sz_reg);
10275 }
10276
10277 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10278 &pci_state_reg);
10279
10280 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10281 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10282
10283 /* If this is a 5700 BX chipset, and we are in PCI-X
10284 * mode, enable register write workaround.
10285 *
10286 * The workaround is to use indirect register accesses
10287 * for all chip writes not to mailbox registers.
10288 */
10289 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10290 u32 pm_reg;
10291 u16 pci_cmd;
10292
10293 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10294
10295 /* The chip can have it's power management PCI config
10296 * space registers clobbered due to this bug.
10297 * So explicitly force the chip into D0 here.
10298 */
10299 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10300 &pm_reg);
10301 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10302 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10303 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10304 pm_reg);
10305
10306 /* Also, force SERR#/PERR# in PCI command. */
10307 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10308 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10309 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10310 }
10311 }
10312
087fe256
MC
10313 /* 5700 BX chips need to have their TX producer index mailboxes
10314 * written twice to workaround a bug.
10315 */
10316 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10317 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10318
1da177e4
LT
10319 /* Back to back register writes can cause problems on this chip,
10320 * the workaround is to read back all reg writes except those to
10321 * mailbox regs. See tg3_write_indirect_reg32().
10322 *
10323 * PCI Express 5750_A0 rev chips need this workaround too.
10324 */
10325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10326 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10327 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10328 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10329
10330 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10331 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10332 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10333 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10334
10335 /* Chip-specific fixup from Broadcom driver */
10336 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10337 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10338 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10339 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10340 }
10341
1ee582d8 10342 /* Default fast path register access methods */
20094930 10343 tp->read32 = tg3_read32;
1ee582d8 10344 tp->write32 = tg3_write32;
09ee929c 10345 tp->read32_mbox = tg3_read32;
20094930 10346 tp->write32_mbox = tg3_write32;
1ee582d8
MC
10347 tp->write32_tx_mbox = tg3_write32;
10348 tp->write32_rx_mbox = tg3_write32;
10349
10350 /* Various workaround register access methods */
10351 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10352 tp->write32 = tg3_write_indirect_reg32;
10353 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10354 tp->write32 = tg3_write_flush_reg32;
10355
10356 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10357 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10358 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10359 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10360 tp->write32_rx_mbox = tg3_write_flush_reg32;
10361 }
20094930 10362
6892914f
MC
10363 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10364 tp->read32 = tg3_read_indirect_reg32;
10365 tp->write32 = tg3_write_indirect_reg32;
10366 tp->read32_mbox = tg3_read_indirect_mbox;
10367 tp->write32_mbox = tg3_write_indirect_mbox;
10368 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10369 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10370
10371 iounmap(tp->regs);
22abe310 10372 tp->regs = NULL;
6892914f
MC
10373
10374 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10375 pci_cmd &= ~PCI_COMMAND_MEMORY;
10376 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10377 }
10378
bbadf503
MC
10379 if (tp->write32 == tg3_write_indirect_reg32 ||
10380 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10381 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 10382 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
10383 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10384
7d0c41ef
MC
10385 /* Get eeprom hw config before calling tg3_set_power_state().
10386 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10387 * determined before calling tg3_set_power_state() so that
10388 * we know whether or not to switch out of Vaux power.
10389 * When the flag is set, it means that GPIO1 is used for eeprom
10390 * write protect and also implies that it is a LOM where GPIOs
10391 * are not used to switch power.
10392 */
10393 tg3_get_eeprom_hw_cfg(tp);
10394
314fba34
MC
10395 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10396 * GPIO1 driven high will bring 5700's external PHY out of reset.
10397 * It is also used as eeprom write protect on LOMs.
10398 */
10399 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10400 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10401 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10402 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10403 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
10404 /* Unused GPIO3 must be driven as output on 5752 because there
10405 * are no pull-up resistors on unused GPIO pins.
10406 */
10407 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10408 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 10409
af36e6b6
MC
10410 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10411 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10412
1da177e4 10413 /* Force the chip into D0. */
bc1c7567 10414 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
10415 if (err) {
10416 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10417 pci_name(tp->pdev));
10418 return err;
10419 }
10420
10421 /* 5700 B0 chips do not support checksumming correctly due
10422 * to hardware bugs.
10423 */
10424 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10425 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10426
1da177e4
LT
10427 /* Derive initial jumbo mode from MTU assigned in
10428 * ether_setup() via the alloc_etherdev() call
10429 */
0f893dc6 10430 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 10431 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 10432 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
10433
10434 /* Determine WakeOnLan speed to use. */
10435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10436 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10437 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10438 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10439 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10440 } else {
10441 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10442 }
10443
10444 /* A few boards don't want Ethernet@WireSpeed phy feature */
10445 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10446 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10447 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b
MC
10448 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10449 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
10450 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10451
10452 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10453 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10454 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10455 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10456 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10457
c424cb24
MC
10458 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10461 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10462 else
10463 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10464 }
1da177e4 10465
1da177e4 10466 tp->coalesce_mode = 0;
1da177e4
LT
10467 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10468 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10469 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10470
10471 /* Initialize MAC MI mode, polling disabled. */
10472 tw32_f(MAC_MI_MODE, tp->mi_mode);
10473 udelay(80);
10474
10475 /* Initialize data/descriptor byte/word swapping. */
10476 val = tr32(GRC_MODE);
10477 val &= GRC_MODE_HOST_STACKUP;
10478 tw32(GRC_MODE, val | tp->grc_mode);
10479
10480 tg3_switch_clocks(tp);
10481
10482 /* Clear this out for sanity. */
10483 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10484
10485 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10486 &pci_state_reg);
10487 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10488 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10489 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10490
10491 if (chiprevid == CHIPREV_ID_5701_A0 ||
10492 chiprevid == CHIPREV_ID_5701_B0 ||
10493 chiprevid == CHIPREV_ID_5701_B2 ||
10494 chiprevid == CHIPREV_ID_5701_B5) {
10495 void __iomem *sram_base;
10496
10497 /* Write some dummy words into the SRAM status block
10498 * area, see if it reads back correctly. If the return
10499 * value is bad, force enable the PCIX workaround.
10500 */
10501 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10502
10503 writel(0x00000000, sram_base);
10504 writel(0x00000000, sram_base + 4);
10505 writel(0xffffffff, sram_base + 4);
10506 if (readl(sram_base) != 0x00000000)
10507 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10508 }
10509 }
10510
10511 udelay(50);
10512 tg3_nvram_init(tp);
10513
10514 grc_misc_cfg = tr32(GRC_MISC_CFG);
10515 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10516
10517 /* Broadcom's driver says that CIOBE multisplit has a bug */
10518#if 0
10519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10520 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10521 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10522 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10523 }
10524#endif
10525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10526 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10527 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10528 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10529
fac9b83e
DM
10530 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10531 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10532 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10533 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10534 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10535 HOSTCC_MODE_CLRTICK_TXBD);
10536
10537 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10538 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10539 tp->misc_host_ctrl);
10540 }
10541
1da177e4
LT
10542 /* these are limited to 10/100 only */
10543 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10544 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10545 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10546 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10547 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10548 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10549 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10550 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10551 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10552 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10553 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10554
10555 err = tg3_phy_probe(tp);
10556 if (err) {
10557 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10558 pci_name(tp->pdev), err);
10559 /* ... but do not return immediately ... */
10560 }
10561
10562 tg3_read_partno(tp);
c4e6575c 10563 tg3_read_fw_ver(tp);
1da177e4
LT
10564
10565 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10566 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10567 } else {
10568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10569 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10570 else
10571 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10572 }
10573
10574 /* 5700 {AX,BX} chips have a broken status block link
10575 * change bit implementation, so we must use the
10576 * status register in those cases.
10577 */
10578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10579 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10580 else
10581 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10582
10583 /* The led_ctrl is set during tg3_phy_probe, here we might
10584 * have to force the link status polling mechanism based
10585 * upon subsystem IDs.
10586 */
10587 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10588 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10589 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10590 TG3_FLAG_USE_LINKCHG_REG);
10591 }
10592
10593 /* For all SERDES we poll the MAC status register. */
10594 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10595 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10596 else
10597 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10598
5a6f3074 10599 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
10600 * straddle the 4GB address boundary in some cases.
10601 */
af36e6b6
MC
10602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10603 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
5a6f3074
MC
10604 tp->dev->hard_start_xmit = tg3_start_xmit;
10605 else
10606 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
10607
10608 tp->rx_offset = 2;
10609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10610 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10611 tp->rx_offset = 0;
10612
f92905de
MC
10613 tp->rx_std_max_post = TG3_RX_RING_SIZE;
10614
10615 /* Increment the rx prod index on the rx std ring by at most
10616 * 8 for these chips to workaround hw errata.
10617 */
10618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10620 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10621 tp->rx_std_max_post = 8;
10622
1da177e4
LT
10623 /* By default, disable wake-on-lan. User can change this
10624 * using ETHTOOL_SWOL.
10625 */
10626 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10627
10628 return err;
10629}
10630
10631#ifdef CONFIG_SPARC64
10632static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10633{
10634 struct net_device *dev = tp->dev;
10635 struct pci_dev *pdev = tp->pdev;
10636 struct pcidev_cookie *pcp = pdev->sysdata;
10637
10638 if (pcp != NULL) {
de8d28b1
DM
10639 unsigned char *addr;
10640 int len;
1da177e4 10641
de8d28b1
DM
10642 addr = of_get_property(pcp->prom_node, "local-mac-address",
10643 &len);
10644 if (addr && len == 6) {
10645 memcpy(dev->dev_addr, addr, 6);
2ff43697 10646 memcpy(dev->perm_addr, dev->dev_addr, 6);
1da177e4
LT
10647 return 0;
10648 }
10649 }
10650 return -ENODEV;
10651}
10652
10653static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10654{
10655 struct net_device *dev = tp->dev;
10656
10657 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 10658 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
10659 return 0;
10660}
10661#endif
10662
10663static int __devinit tg3_get_device_address(struct tg3 *tp)
10664{
10665 struct net_device *dev = tp->dev;
10666 u32 hi, lo, mac_offset;
008652b3 10667 int addr_ok = 0;
1da177e4
LT
10668
10669#ifdef CONFIG_SPARC64
10670 if (!tg3_get_macaddr_sparc(tp))
10671 return 0;
10672#endif
10673
10674 mac_offset = 0x7c;
f49639e6 10675 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 10676 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
10677 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10678 mac_offset = 0xcc;
10679 if (tg3_nvram_lock(tp))
10680 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10681 else
10682 tg3_nvram_unlock(tp);
10683 }
10684
10685 /* First try to get it from MAC address mailbox. */
10686 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10687 if ((hi >> 16) == 0x484b) {
10688 dev->dev_addr[0] = (hi >> 8) & 0xff;
10689 dev->dev_addr[1] = (hi >> 0) & 0xff;
10690
10691 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10692 dev->dev_addr[2] = (lo >> 24) & 0xff;
10693 dev->dev_addr[3] = (lo >> 16) & 0xff;
10694 dev->dev_addr[4] = (lo >> 8) & 0xff;
10695 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 10696
008652b3
MC
10697 /* Some old bootcode may report a 0 MAC address in SRAM */
10698 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10699 }
10700 if (!addr_ok) {
10701 /* Next, try NVRAM. */
f49639e6 10702 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
10703 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10704 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10705 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10706 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10707 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10708 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10709 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10710 }
10711 /* Finally just fetch it out of the MAC control regs. */
10712 else {
10713 hi = tr32(MAC_ADDR_0_HIGH);
10714 lo = tr32(MAC_ADDR_0_LOW);
10715
10716 dev->dev_addr[5] = lo & 0xff;
10717 dev->dev_addr[4] = (lo >> 8) & 0xff;
10718 dev->dev_addr[3] = (lo >> 16) & 0xff;
10719 dev->dev_addr[2] = (lo >> 24) & 0xff;
10720 dev->dev_addr[1] = hi & 0xff;
10721 dev->dev_addr[0] = (hi >> 8) & 0xff;
10722 }
1da177e4
LT
10723 }
10724
10725 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10726#ifdef CONFIG_SPARC64
10727 if (!tg3_get_default_macaddr_sparc(tp))
10728 return 0;
10729#endif
10730 return -EINVAL;
10731 }
2ff43697 10732 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
10733 return 0;
10734}
10735
59e6b434
DM
10736#define BOUNDARY_SINGLE_CACHELINE 1
10737#define BOUNDARY_MULTI_CACHELINE 2
10738
10739static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10740{
10741 int cacheline_size;
10742 u8 byte;
10743 int goal;
10744
10745 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10746 if (byte == 0)
10747 cacheline_size = 1024;
10748 else
10749 cacheline_size = (int) byte * 4;
10750
10751 /* On 5703 and later chips, the boundary bits have no
10752 * effect.
10753 */
10754 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10755 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10756 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10757 goto out;
10758
10759#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10760 goal = BOUNDARY_MULTI_CACHELINE;
10761#else
10762#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10763 goal = BOUNDARY_SINGLE_CACHELINE;
10764#else
10765 goal = 0;
10766#endif
10767#endif
10768
10769 if (!goal)
10770 goto out;
10771
10772 /* PCI controllers on most RISC systems tend to disconnect
10773 * when a device tries to burst across a cache-line boundary.
10774 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10775 *
10776 * Unfortunately, for PCI-E there are only limited
10777 * write-side controls for this, and thus for reads
10778 * we will still get the disconnects. We'll also waste
10779 * these PCI cycles for both read and write for chips
10780 * other than 5700 and 5701 which do not implement the
10781 * boundary bits.
10782 */
10783 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10784 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10785 switch (cacheline_size) {
10786 case 16:
10787 case 32:
10788 case 64:
10789 case 128:
10790 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10791 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10792 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10793 } else {
10794 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10795 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10796 }
10797 break;
10798
10799 case 256:
10800 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10801 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10802 break;
10803
10804 default:
10805 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10806 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10807 break;
10808 };
10809 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10810 switch (cacheline_size) {
10811 case 16:
10812 case 32:
10813 case 64:
10814 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10815 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10816 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10817 break;
10818 }
10819 /* fallthrough */
10820 case 128:
10821 default:
10822 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10823 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10824 break;
10825 };
10826 } else {
10827 switch (cacheline_size) {
10828 case 16:
10829 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10830 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10831 DMA_RWCTRL_WRITE_BNDRY_16);
10832 break;
10833 }
10834 /* fallthrough */
10835 case 32:
10836 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10837 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10838 DMA_RWCTRL_WRITE_BNDRY_32);
10839 break;
10840 }
10841 /* fallthrough */
10842 case 64:
10843 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10844 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10845 DMA_RWCTRL_WRITE_BNDRY_64);
10846 break;
10847 }
10848 /* fallthrough */
10849 case 128:
10850 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10851 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10852 DMA_RWCTRL_WRITE_BNDRY_128);
10853 break;
10854 }
10855 /* fallthrough */
10856 case 256:
10857 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10858 DMA_RWCTRL_WRITE_BNDRY_256);
10859 break;
10860 case 512:
10861 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10862 DMA_RWCTRL_WRITE_BNDRY_512);
10863 break;
10864 case 1024:
10865 default:
10866 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10867 DMA_RWCTRL_WRITE_BNDRY_1024);
10868 break;
10869 };
10870 }
10871
10872out:
10873 return val;
10874}
10875
1da177e4
LT
10876static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10877{
10878 struct tg3_internal_buffer_desc test_desc;
10879 u32 sram_dma_descs;
10880 int i, ret;
10881
10882 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10883
10884 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10885 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10886 tw32(RDMAC_STATUS, 0);
10887 tw32(WDMAC_STATUS, 0);
10888
10889 tw32(BUFMGR_MODE, 0);
10890 tw32(FTQ_RESET, 0);
10891
10892 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10893 test_desc.addr_lo = buf_dma & 0xffffffff;
10894 test_desc.nic_mbuf = 0x00002100;
10895 test_desc.len = size;
10896
10897 /*
10898 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10899 * the *second* time the tg3 driver was getting loaded after an
10900 * initial scan.
10901 *
10902 * Broadcom tells me:
10903 * ...the DMA engine is connected to the GRC block and a DMA
10904 * reset may affect the GRC block in some unpredictable way...
10905 * The behavior of resets to individual blocks has not been tested.
10906 *
10907 * Broadcom noted the GRC reset will also reset all sub-components.
10908 */
10909 if (to_device) {
10910 test_desc.cqid_sqid = (13 << 8) | 2;
10911
10912 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10913 udelay(40);
10914 } else {
10915 test_desc.cqid_sqid = (16 << 8) | 7;
10916
10917 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10918 udelay(40);
10919 }
10920 test_desc.flags = 0x00000005;
10921
10922 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10923 u32 val;
10924
10925 val = *(((u32 *)&test_desc) + i);
10926 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10927 sram_dma_descs + (i * sizeof(u32)));
10928 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10929 }
10930 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10931
10932 if (to_device) {
10933 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10934 } else {
10935 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10936 }
10937
10938 ret = -ENODEV;
10939 for (i = 0; i < 40; i++) {
10940 u32 val;
10941
10942 if (to_device)
10943 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10944 else
10945 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10946 if ((val & 0xffff) == sram_dma_descs) {
10947 ret = 0;
10948 break;
10949 }
10950
10951 udelay(100);
10952 }
10953
10954 return ret;
10955}
10956
ded7340d 10957#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
10958
10959static int __devinit tg3_test_dma(struct tg3 *tp)
10960{
10961 dma_addr_t buf_dma;
59e6b434 10962 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
10963 int ret;
10964
10965 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10966 if (!buf) {
10967 ret = -ENOMEM;
10968 goto out_nofree;
10969 }
10970
10971 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10972 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10973
59e6b434 10974 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
10975
10976 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10977 /* DMA read watermark not used on PCIE */
10978 tp->dma_rwctrl |= 0x00180000;
10979 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
10980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
10982 tp->dma_rwctrl |= 0x003f0000;
10983 else
10984 tp->dma_rwctrl |= 0x003f000f;
10985 } else {
10986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10988 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10989
4a29cc2e
MC
10990 /* If the 5704 is behind the EPB bridge, we can
10991 * do the less restrictive ONE_DMA workaround for
10992 * better performance.
10993 */
10994 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10996 tp->dma_rwctrl |= 0x8000;
10997 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
10998 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10999
59e6b434 11000 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4 11001 tp->dma_rwctrl |= 0x009f0000;
4cf78e4f
MC
11002 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11003 /* 5780 always in PCIX mode */
11004 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
11005 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11006 /* 5714 always in PCIX mode */
11007 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
11008 } else {
11009 tp->dma_rwctrl |= 0x001b000f;
11010 }
11011 }
11012
11013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11015 tp->dma_rwctrl &= 0xfffffff0;
11016
11017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11019 /* Remove this if it causes problems for some boards. */
11020 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11021
11022 /* On 5700/5701 chips, we need to set this bit.
11023 * Otherwise the chip will issue cacheline transactions
11024 * to streamable DMA memory with not all the byte
11025 * enables turned on. This is an error on several
11026 * RISC PCI controllers, in particular sparc64.
11027 *
11028 * On 5703/5704 chips, this bit has been reassigned
11029 * a different meaning. In particular, it is used
11030 * on those chips to enable a PCI-X workaround.
11031 */
11032 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11033 }
11034
11035 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11036
11037#if 0
11038 /* Unneeded, already done by tg3_get_invariants. */
11039 tg3_switch_clocks(tp);
11040#endif
11041
11042 ret = 0;
11043 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11044 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11045 goto out;
11046
59e6b434
DM
11047 /* It is best to perform DMA test with maximum write burst size
11048 * to expose the 5700/5701 write DMA bug.
11049 */
11050 saved_dma_rwctrl = tp->dma_rwctrl;
11051 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11052 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11053
1da177e4
LT
11054 while (1) {
11055 u32 *p = buf, i;
11056
11057 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11058 p[i] = i;
11059
11060 /* Send the buffer to the chip. */
11061 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11062 if (ret) {
11063 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11064 break;
11065 }
11066
11067#if 0
11068 /* validate data reached card RAM correctly. */
11069 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11070 u32 val;
11071 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11072 if (le32_to_cpu(val) != p[i]) {
11073 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11074 /* ret = -ENODEV here? */
11075 }
11076 p[i] = 0;
11077 }
11078#endif
11079 /* Now read it back. */
11080 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11081 if (ret) {
11082 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11083
11084 break;
11085 }
11086
11087 /* Verify it. */
11088 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11089 if (p[i] == i)
11090 continue;
11091
59e6b434
DM
11092 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11093 DMA_RWCTRL_WRITE_BNDRY_16) {
11094 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
11095 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11096 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11097 break;
11098 } else {
11099 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11100 ret = -ENODEV;
11101 goto out;
11102 }
11103 }
11104
11105 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11106 /* Success. */
11107 ret = 0;
11108 break;
11109 }
11110 }
59e6b434
DM
11111 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11112 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
11113 static struct pci_device_id dma_wait_state_chipsets[] = {
11114 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11115 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11116 { },
11117 };
11118
59e6b434 11119 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
11120 * now look for chipsets that are known to expose the
11121 * DMA bug without failing the test.
59e6b434 11122 */
6d1cfbab
MC
11123 if (pci_dev_present(dma_wait_state_chipsets)) {
11124 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11125 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11126 }
11127 else
11128 /* Safe to use the calculated DMA boundary. */
11129 tp->dma_rwctrl = saved_dma_rwctrl;
11130
59e6b434
DM
11131 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11132 }
1da177e4
LT
11133
11134out:
11135 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11136out_nofree:
11137 return ret;
11138}
11139
11140static void __devinit tg3_init_link_config(struct tg3 *tp)
11141{
11142 tp->link_config.advertising =
11143 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11144 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11145 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11146 ADVERTISED_Autoneg | ADVERTISED_MII);
11147 tp->link_config.speed = SPEED_INVALID;
11148 tp->link_config.duplex = DUPLEX_INVALID;
11149 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
11150 tp->link_config.active_speed = SPEED_INVALID;
11151 tp->link_config.active_duplex = DUPLEX_INVALID;
11152 tp->link_config.phy_is_low_power = 0;
11153 tp->link_config.orig_speed = SPEED_INVALID;
11154 tp->link_config.orig_duplex = DUPLEX_INVALID;
11155 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11156}
11157
11158static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11159{
fdfec172
MC
11160 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11161 tp->bufmgr_config.mbuf_read_dma_low_water =
11162 DEFAULT_MB_RDMA_LOW_WATER_5705;
11163 tp->bufmgr_config.mbuf_mac_rx_low_water =
11164 DEFAULT_MB_MACRX_LOW_WATER_5705;
11165 tp->bufmgr_config.mbuf_high_water =
11166 DEFAULT_MB_HIGH_WATER_5705;
11167
11168 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11169 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11170 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11171 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11172 tp->bufmgr_config.mbuf_high_water_jumbo =
11173 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11174 } else {
11175 tp->bufmgr_config.mbuf_read_dma_low_water =
11176 DEFAULT_MB_RDMA_LOW_WATER;
11177 tp->bufmgr_config.mbuf_mac_rx_low_water =
11178 DEFAULT_MB_MACRX_LOW_WATER;
11179 tp->bufmgr_config.mbuf_high_water =
11180 DEFAULT_MB_HIGH_WATER;
11181
11182 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11183 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11184 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11185 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11186 tp->bufmgr_config.mbuf_high_water_jumbo =
11187 DEFAULT_MB_HIGH_WATER_JUMBO;
11188 }
1da177e4
LT
11189
11190 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11191 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11192}
11193
11194static char * __devinit tg3_phy_string(struct tg3 *tp)
11195{
11196 switch (tp->phy_id & PHY_ID_MASK) {
11197 case PHY_ID_BCM5400: return "5400";
11198 case PHY_ID_BCM5401: return "5401";
11199 case PHY_ID_BCM5411: return "5411";
11200 case PHY_ID_BCM5701: return "5701";
11201 case PHY_ID_BCM5703: return "5703";
11202 case PHY_ID_BCM5704: return "5704";
11203 case PHY_ID_BCM5705: return "5705";
11204 case PHY_ID_BCM5750: return "5750";
85e94ced 11205 case PHY_ID_BCM5752: return "5752";
a4e2b347 11206 case PHY_ID_BCM5714: return "5714";
4cf78e4f 11207 case PHY_ID_BCM5780: return "5780";
af36e6b6 11208 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 11209 case PHY_ID_BCM5787: return "5787";
1da177e4
LT
11210 case PHY_ID_BCM8002: return "8002/serdes";
11211 case 0: return "serdes";
11212 default: return "unknown";
11213 };
11214}
11215
f9804ddb
MC
11216static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11217{
11218 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11219 strcpy(str, "PCI Express");
11220 return str;
11221 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11222 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11223
11224 strcpy(str, "PCIX:");
11225
11226 if ((clock_ctrl == 7) ||
11227 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11228 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11229 strcat(str, "133MHz");
11230 else if (clock_ctrl == 0)
11231 strcat(str, "33MHz");
11232 else if (clock_ctrl == 2)
11233 strcat(str, "50MHz");
11234 else if (clock_ctrl == 4)
11235 strcat(str, "66MHz");
11236 else if (clock_ctrl == 6)
11237 strcat(str, "100MHz");
f9804ddb
MC
11238 } else {
11239 strcpy(str, "PCI:");
11240 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11241 strcat(str, "66MHz");
11242 else
11243 strcat(str, "33MHz");
11244 }
11245 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11246 strcat(str, ":32-bit");
11247 else
11248 strcat(str, ":64-bit");
11249 return str;
11250}
11251
8c2dc7e1 11252static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
11253{
11254 struct pci_dev *peer;
11255 unsigned int func, devnr = tp->pdev->devfn & ~7;
11256
11257 for (func = 0; func < 8; func++) {
11258 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11259 if (peer && peer != tp->pdev)
11260 break;
11261 pci_dev_put(peer);
11262 }
16fe9d74
MC
11263 /* 5704 can be configured in single-port mode, set peer to
11264 * tp->pdev in that case.
11265 */
11266 if (!peer) {
11267 peer = tp->pdev;
11268 return peer;
11269 }
1da177e4
LT
11270
11271 /*
11272 * We don't need to keep the refcount elevated; there's no way
11273 * to remove one half of this device without removing the other
11274 */
11275 pci_dev_put(peer);
11276
11277 return peer;
11278}
11279
15f9850d
DM
11280static void __devinit tg3_init_coal(struct tg3 *tp)
11281{
11282 struct ethtool_coalesce *ec = &tp->coal;
11283
11284 memset(ec, 0, sizeof(*ec));
11285 ec->cmd = ETHTOOL_GCOALESCE;
11286 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11287 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11288 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11289 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11290 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11291 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11292 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11293 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11294 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11295
11296 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11297 HOSTCC_MODE_CLRTICK_TXBD)) {
11298 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11299 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11300 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11301 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11302 }
d244c892
MC
11303
11304 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11305 ec->rx_coalesce_usecs_irq = 0;
11306 ec->tx_coalesce_usecs_irq = 0;
11307 ec->stats_block_coalesce_usecs = 0;
11308 }
15f9850d
DM
11309}
11310
1da177e4
LT
11311static int __devinit tg3_init_one(struct pci_dev *pdev,
11312 const struct pci_device_id *ent)
11313{
11314 static int tg3_version_printed = 0;
11315 unsigned long tg3reg_base, tg3reg_len;
11316 struct net_device *dev;
11317 struct tg3 *tp;
72f2afb8 11318 int i, err, pm_cap;
f9804ddb 11319 char str[40];
72f2afb8 11320 u64 dma_mask, persist_dma_mask;
1da177e4
LT
11321
11322 if (tg3_version_printed++ == 0)
11323 printk(KERN_INFO "%s", version);
11324
11325 err = pci_enable_device(pdev);
11326 if (err) {
11327 printk(KERN_ERR PFX "Cannot enable PCI device, "
11328 "aborting.\n");
11329 return err;
11330 }
11331
11332 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11333 printk(KERN_ERR PFX "Cannot find proper PCI device "
11334 "base address, aborting.\n");
11335 err = -ENODEV;
11336 goto err_out_disable_pdev;
11337 }
11338
11339 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11340 if (err) {
11341 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11342 "aborting.\n");
11343 goto err_out_disable_pdev;
11344 }
11345
11346 pci_set_master(pdev);
11347
11348 /* Find power-management capability. */
11349 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11350 if (pm_cap == 0) {
11351 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11352 "aborting.\n");
11353 err = -EIO;
11354 goto err_out_free_res;
11355 }
11356
1da177e4
LT
11357 tg3reg_base = pci_resource_start(pdev, 0);
11358 tg3reg_len = pci_resource_len(pdev, 0);
11359
11360 dev = alloc_etherdev(sizeof(*tp));
11361 if (!dev) {
11362 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11363 err = -ENOMEM;
11364 goto err_out_free_res;
11365 }
11366
11367 SET_MODULE_OWNER(dev);
11368 SET_NETDEV_DEV(dev, &pdev->dev);
11369
1da177e4
LT
11370#if TG3_VLAN_TAG_USED
11371 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11372 dev->vlan_rx_register = tg3_vlan_rx_register;
11373 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11374#endif
11375
11376 tp = netdev_priv(dev);
11377 tp->pdev = pdev;
11378 tp->dev = dev;
11379 tp->pm_cap = pm_cap;
11380 tp->mac_mode = TG3_DEF_MAC_MODE;
11381 tp->rx_mode = TG3_DEF_RX_MODE;
11382 tp->tx_mode = TG3_DEF_TX_MODE;
11383 tp->mi_mode = MAC_MI_MODE_BASE;
11384 if (tg3_debug > 0)
11385 tp->msg_enable = tg3_debug;
11386 else
11387 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11388
11389 /* The word/byte swap controls here control register access byte
11390 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11391 * setting below.
11392 */
11393 tp->misc_host_ctrl =
11394 MISC_HOST_CTRL_MASK_PCI_INT |
11395 MISC_HOST_CTRL_WORD_SWAP |
11396 MISC_HOST_CTRL_INDIR_ACCESS |
11397 MISC_HOST_CTRL_PCISTATE_RW;
11398
11399 /* The NONFRM (non-frame) byte/word swap controls take effect
11400 * on descriptor entries, anything which isn't packet data.
11401 *
11402 * The StrongARM chips on the board (one for tx, one for rx)
11403 * are running in big-endian mode.
11404 */
11405 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11406 GRC_MODE_WSWAP_NONFRM_DATA);
11407#ifdef __BIG_ENDIAN
11408 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11409#endif
11410 spin_lock_init(&tp->lock);
11411 spin_lock_init(&tp->tx_lock);
11412 spin_lock_init(&tp->indirect_lock);
11413 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11414
11415 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11416 if (tp->regs == 0UL) {
11417 printk(KERN_ERR PFX "Cannot map device registers, "
11418 "aborting.\n");
11419 err = -ENOMEM;
11420 goto err_out_free_dev;
11421 }
11422
11423 tg3_init_link_config(tp);
11424
1da177e4
LT
11425 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11426 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11427 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11428
11429 dev->open = tg3_open;
11430 dev->stop = tg3_close;
11431 dev->get_stats = tg3_get_stats;
11432 dev->set_multicast_list = tg3_set_rx_mode;
11433 dev->set_mac_address = tg3_set_mac_addr;
11434 dev->do_ioctl = tg3_ioctl;
11435 dev->tx_timeout = tg3_tx_timeout;
11436 dev->poll = tg3_poll;
11437 dev->ethtool_ops = &tg3_ethtool_ops;
11438 dev->weight = 64;
11439 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11440 dev->change_mtu = tg3_change_mtu;
11441 dev->irq = pdev->irq;
11442#ifdef CONFIG_NET_POLL_CONTROLLER
11443 dev->poll_controller = tg3_poll_controller;
11444#endif
11445
11446 err = tg3_get_invariants(tp);
11447 if (err) {
11448 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11449 "aborting.\n");
11450 goto err_out_iounmap;
11451 }
11452
4a29cc2e
MC
11453 /* The EPB bridge inside 5714, 5715, and 5780 and any
11454 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
11455 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11456 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11457 * do DMA address check in tg3_start_xmit().
11458 */
4a29cc2e
MC
11459 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11460 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11461 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
11462 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11463#ifdef CONFIG_HIGHMEM
11464 dma_mask = DMA_64BIT_MASK;
11465#endif
4a29cc2e 11466 } else
72f2afb8
MC
11467 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11468
11469 /* Configure DMA attributes. */
11470 if (dma_mask > DMA_32BIT_MASK) {
11471 err = pci_set_dma_mask(pdev, dma_mask);
11472 if (!err) {
11473 dev->features |= NETIF_F_HIGHDMA;
11474 err = pci_set_consistent_dma_mask(pdev,
11475 persist_dma_mask);
11476 if (err < 0) {
11477 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11478 "DMA for consistent allocations\n");
11479 goto err_out_iounmap;
11480 }
11481 }
11482 }
11483 if (err || dma_mask == DMA_32BIT_MASK) {
11484 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11485 if (err) {
11486 printk(KERN_ERR PFX "No usable DMA configuration, "
11487 "aborting.\n");
11488 goto err_out_iounmap;
11489 }
11490 }
11491
fdfec172 11492 tg3_init_bufmgr_config(tp);
1da177e4
LT
11493
11494#if TG3_TSO_SUPPORT != 0
11495 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11496 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11497 }
11498 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11499 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11500 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11501 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11502 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11503 } else {
11504 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11505 }
11506
4e3a7aaa
MC
11507 /* TSO is on by default on chips that support hardware TSO.
11508 * Firmware TSO on older chips gives lower performance, so it
11509 * is off by default, but can be enabled using ethtool.
11510 */
11511 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
1da177e4 11512 dev->features |= NETIF_F_TSO;
1da177e4
LT
11513
11514#endif
11515
11516 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11517 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11518 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11519 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11520 tp->rx_pending = 63;
11521 }
11522
8c2dc7e1
MC
11523 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11524 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11525 tp->pdev_peer = tg3_find_peer(tp);
1da177e4
LT
11526
11527 err = tg3_get_device_address(tp);
11528 if (err) {
11529 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11530 "aborting.\n");
11531 goto err_out_iounmap;
11532 }
11533
11534 /*
11535 * Reset chip in case UNDI or EFI driver did not shutdown
11536 * DMA self test will enable WDMAC and we'll see (spurious)
11537 * pending DMA on the PCI bus at that point.
11538 */
11539 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11540 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11541 pci_save_state(tp->pdev);
11542 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 11543 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
11544 }
11545
11546 err = tg3_test_dma(tp);
11547 if (err) {
11548 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11549 goto err_out_iounmap;
11550 }
11551
11552 /* Tigon3 can do ipv4 only... and some chips have buggy
11553 * checksumming.
11554 */
11555 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
af36e6b6
MC
11556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9c27dbdf
MC
11558 dev->features |= NETIF_F_HW_CSUM;
11559 else
11560 dev->features |= NETIF_F_IP_CSUM;
11561 dev->features |= NETIF_F_SG;
1da177e4
LT
11562 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11563 } else
11564 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11565
1da177e4
LT
11566 /* flow control autonegotiation is default behavior */
11567 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11568
15f9850d
DM
11569 tg3_init_coal(tp);
11570
7d3f4c97
DM
11571 /* Now that we have fully setup the chip, save away a snapshot
11572 * of the PCI config space. We need to restore this after
11573 * GRC_MISC_CFG core clock resets and some resume events.
11574 */
11575 pci_save_state(tp->pdev);
11576
1da177e4
LT
11577 err = register_netdev(dev);
11578 if (err) {
11579 printk(KERN_ERR PFX "Cannot register net device, "
11580 "aborting.\n");
11581 goto err_out_iounmap;
11582 }
11583
11584 pci_set_drvdata(pdev, dev);
11585
f9804ddb 11586 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
1da177e4
LT
11587 dev->name,
11588 tp->board_part_number,
11589 tp->pci_chip_rev_id,
11590 tg3_phy_string(tp),
f9804ddb 11591 tg3_bus_string(tp, str),
1da177e4
LT
11592 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11593
11594 for (i = 0; i < 6; i++)
11595 printk("%2.2x%c", dev->dev_addr[i],
11596 i == 5 ? '\n' : ':');
11597
11598 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11599 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11600 "TSOcap[%d] \n",
11601 dev->name,
11602 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11603 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11604 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11605 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11606 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11607 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11608 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
11609 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11610 dev->name, tp->dma_rwctrl,
11611 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11612 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4 11613
59f1741e
JM
11614 netif_carrier_off(tp->dev);
11615
1da177e4
LT
11616 return 0;
11617
11618err_out_iounmap:
6892914f
MC
11619 if (tp->regs) {
11620 iounmap(tp->regs);
22abe310 11621 tp->regs = NULL;
6892914f 11622 }
1da177e4
LT
11623
11624err_out_free_dev:
11625 free_netdev(dev);
11626
11627err_out_free_res:
11628 pci_release_regions(pdev);
11629
11630err_out_disable_pdev:
11631 pci_disable_device(pdev);
11632 pci_set_drvdata(pdev, NULL);
11633 return err;
11634}
11635
11636static void __devexit tg3_remove_one(struct pci_dev *pdev)
11637{
11638 struct net_device *dev = pci_get_drvdata(pdev);
11639
11640 if (dev) {
11641 struct tg3 *tp = netdev_priv(dev);
11642
7faa006f 11643 flush_scheduled_work();
1da177e4 11644 unregister_netdev(dev);
6892914f
MC
11645 if (tp->regs) {
11646 iounmap(tp->regs);
22abe310 11647 tp->regs = NULL;
6892914f 11648 }
1da177e4
LT
11649 free_netdev(dev);
11650 pci_release_regions(pdev);
11651 pci_disable_device(pdev);
11652 pci_set_drvdata(pdev, NULL);
11653 }
11654}
11655
11656static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11657{
11658 struct net_device *dev = pci_get_drvdata(pdev);
11659 struct tg3 *tp = netdev_priv(dev);
11660 int err;
11661
11662 if (!netif_running(dev))
11663 return 0;
11664
7faa006f 11665 flush_scheduled_work();
1da177e4
LT
11666 tg3_netif_stop(tp);
11667
11668 del_timer_sync(&tp->timer);
11669
f47c11ee 11670 tg3_full_lock(tp, 1);
1da177e4 11671 tg3_disable_ints(tp);
f47c11ee 11672 tg3_full_unlock(tp);
1da177e4
LT
11673
11674 netif_device_detach(dev);
11675
f47c11ee 11676 tg3_full_lock(tp, 0);
944d980e 11677 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 11678 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 11679 tg3_full_unlock(tp);
1da177e4
LT
11680
11681 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11682 if (err) {
f47c11ee 11683 tg3_full_lock(tp, 0);
1da177e4 11684
6a9eba15 11685 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8e7a22e3 11686 tg3_init_hw(tp, 1);
1da177e4
LT
11687
11688 tp->timer.expires = jiffies + tp->timer_offset;
11689 add_timer(&tp->timer);
11690
11691 netif_device_attach(dev);
11692 tg3_netif_start(tp);
11693
f47c11ee 11694 tg3_full_unlock(tp);
1da177e4
LT
11695 }
11696
11697 return err;
11698}
11699
11700static int tg3_resume(struct pci_dev *pdev)
11701{
11702 struct net_device *dev = pci_get_drvdata(pdev);
11703 struct tg3 *tp = netdev_priv(dev);
11704 int err;
11705
11706 if (!netif_running(dev))
11707 return 0;
11708
11709 pci_restore_state(tp->pdev);
11710
bc1c7567 11711 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11712 if (err)
11713 return err;
11714
11715 netif_device_attach(dev);
11716
f47c11ee 11717 tg3_full_lock(tp, 0);
1da177e4 11718
6a9eba15 11719 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8e7a22e3 11720 tg3_init_hw(tp, 1);
1da177e4
LT
11721
11722 tp->timer.expires = jiffies + tp->timer_offset;
11723 add_timer(&tp->timer);
11724
1da177e4
LT
11725 tg3_netif_start(tp);
11726
f47c11ee 11727 tg3_full_unlock(tp);
1da177e4
LT
11728
11729 return 0;
11730}
11731
11732static struct pci_driver tg3_driver = {
11733 .name = DRV_MODULE_NAME,
11734 .id_table = tg3_pci_tbl,
11735 .probe = tg3_init_one,
11736 .remove = __devexit_p(tg3_remove_one),
11737 .suspend = tg3_suspend,
11738 .resume = tg3_resume
11739};
11740
11741static int __init tg3_init(void)
11742{
11743 return pci_module_init(&tg3_driver);
11744}
11745
11746static void __exit tg3_cleanup(void)
11747{
11748 pci_unregister_driver(&tg3_driver);
11749}
11750
11751module_init(tg3_init);
11752module_exit(tg3_cleanup);
This page took 0.885262 seconds and 5 git commands to generate.