net: use netdev_mc_count and netdev_mc_empty when appropriate
[deliverable/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51
52 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53 #define BCM_CNIC 1
54 #include "cnic_if.h"
55 #endif
56 #include "bnx2.h"
57 #include "bnx2_fw.h"
58
59 #define DRV_MODULE_NAME "bnx2"
60 #define PFX DRV_MODULE_NAME ": "
61 #define DRV_MODULE_VERSION "2.0.3"
62 #define DRV_MODULE_RELDATE "Dec 03, 2009"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
73
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
98 BCM5708,
99 BCM5708S,
100 BCM5709,
101 BCM5709S,
102 BCM5716,
103 BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108 char *name;
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153 /* Slow EEPROM */
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250 {
251 u32 diff;
252
253 smp_mb();
254
255 /* The ring uses 256 indices for 255 entries, one of them
256 * needs to be skipped.
257 */
258 diff = txr->tx_prod - txr->tx_cons;
259 if (unlikely(diff >= TX_DESC_CNT)) {
260 diff &= 0xffff;
261 if (diff == TX_DESC_CNT)
262 diff = MAX_TX_DESC_CNT;
263 }
264 return (bp->tx_ring_size - diff);
265 }
266
267 static u32
268 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
269 {
270 u32 val;
271
272 spin_lock_bh(&bp->indirect_lock);
273 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
274 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
275 spin_unlock_bh(&bp->indirect_lock);
276 return val;
277 }
278
279 static void
280 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
281 {
282 spin_lock_bh(&bp->indirect_lock);
283 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
284 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
285 spin_unlock_bh(&bp->indirect_lock);
286 }
287
288 static void
289 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
290 {
291 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
292 }
293
294 static u32
295 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
296 {
297 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
298 }
299
300 static void
301 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
302 {
303 offset += cid_addr;
304 spin_lock_bh(&bp->indirect_lock);
305 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
306 int i;
307
308 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
309 REG_WR(bp, BNX2_CTX_CTX_CTRL,
310 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
311 for (i = 0; i < 5; i++) {
312 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
313 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
314 break;
315 udelay(5);
316 }
317 } else {
318 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
319 REG_WR(bp, BNX2_CTX_DATA, val);
320 }
321 spin_unlock_bh(&bp->indirect_lock);
322 }
323
324 #ifdef BCM_CNIC
325 static int
326 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
327 {
328 struct bnx2 *bp = netdev_priv(dev);
329 struct drv_ctl_io *io = &info->data.io;
330
331 switch (info->cmd) {
332 case DRV_CTL_IO_WR_CMD:
333 bnx2_reg_wr_ind(bp, io->offset, io->data);
334 break;
335 case DRV_CTL_IO_RD_CMD:
336 io->data = bnx2_reg_rd_ind(bp, io->offset);
337 break;
338 case DRV_CTL_CTX_WR_CMD:
339 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
340 break;
341 default:
342 return -EINVAL;
343 }
344 return 0;
345 }
346
347 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
348 {
349 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351 int sb_id;
352
353 if (bp->flags & BNX2_FLAG_USING_MSIX) {
354 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
355 bnapi->cnic_present = 0;
356 sb_id = bp->irq_nvecs;
357 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
358 } else {
359 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
360 bnapi->cnic_tag = bnapi->last_status_idx;
361 bnapi->cnic_present = 1;
362 sb_id = 0;
363 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
364 }
365
366 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
367 cp->irq_arr[0].status_blk = (void *)
368 ((unsigned long) bnapi->status_blk.msi +
369 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
370 cp->irq_arr[0].status_blk_num = sb_id;
371 cp->num_irq = 1;
372 }
373
374 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
375 void *data)
376 {
377 struct bnx2 *bp = netdev_priv(dev);
378 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
379
380 if (ops == NULL)
381 return -EINVAL;
382
383 if (cp->drv_state & CNIC_DRV_STATE_REGD)
384 return -EBUSY;
385
386 bp->cnic_data = data;
387 rcu_assign_pointer(bp->cnic_ops, ops);
388
389 cp->num_irq = 0;
390 cp->drv_state = CNIC_DRV_STATE_REGD;
391
392 bnx2_setup_cnic_irq_info(bp);
393
394 return 0;
395 }
396
397 static int bnx2_unregister_cnic(struct net_device *dev)
398 {
399 struct bnx2 *bp = netdev_priv(dev);
400 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
401 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
402
403 mutex_lock(&bp->cnic_lock);
404 cp->drv_state = 0;
405 bnapi->cnic_present = 0;
406 rcu_assign_pointer(bp->cnic_ops, NULL);
407 mutex_unlock(&bp->cnic_lock);
408 synchronize_rcu();
409 return 0;
410 }
411
412 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
413 {
414 struct bnx2 *bp = netdev_priv(dev);
415 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
416
417 cp->drv_owner = THIS_MODULE;
418 cp->chip_id = bp->chip_id;
419 cp->pdev = bp->pdev;
420 cp->io_base = bp->regview;
421 cp->drv_ctl = bnx2_drv_ctl;
422 cp->drv_register_cnic = bnx2_register_cnic;
423 cp->drv_unregister_cnic = bnx2_unregister_cnic;
424
425 return cp;
426 }
427 EXPORT_SYMBOL(bnx2_cnic_probe);
428
429 static void
430 bnx2_cnic_stop(struct bnx2 *bp)
431 {
432 struct cnic_ops *c_ops;
433 struct cnic_ctl_info info;
434
435 mutex_lock(&bp->cnic_lock);
436 c_ops = bp->cnic_ops;
437 if (c_ops) {
438 info.cmd = CNIC_CTL_STOP_CMD;
439 c_ops->cnic_ctl(bp->cnic_data, &info);
440 }
441 mutex_unlock(&bp->cnic_lock);
442 }
443
444 static void
445 bnx2_cnic_start(struct bnx2 *bp)
446 {
447 struct cnic_ops *c_ops;
448 struct cnic_ctl_info info;
449
450 mutex_lock(&bp->cnic_lock);
451 c_ops = bp->cnic_ops;
452 if (c_ops) {
453 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
454 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
455
456 bnapi->cnic_tag = bnapi->last_status_idx;
457 }
458 info.cmd = CNIC_CTL_START_CMD;
459 c_ops->cnic_ctl(bp->cnic_data, &info);
460 }
461 mutex_unlock(&bp->cnic_lock);
462 }
463
464 #else
465
466 static void
467 bnx2_cnic_stop(struct bnx2 *bp)
468 {
469 }
470
471 static void
472 bnx2_cnic_start(struct bnx2 *bp)
473 {
474 }
475
476 #endif
477
478 static int
479 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
480 {
481 u32 val1;
482 int i, ret;
483
484 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
485 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
486 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
487
488 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
489 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490
491 udelay(40);
492 }
493
494 val1 = (bp->phy_addr << 21) | (reg << 16) |
495 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
496 BNX2_EMAC_MDIO_COMM_START_BUSY;
497 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
498
499 for (i = 0; i < 50; i++) {
500 udelay(10);
501
502 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
503 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
504 udelay(5);
505
506 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
507 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
508
509 break;
510 }
511 }
512
513 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
514 *val = 0x0;
515 ret = -EBUSY;
516 }
517 else {
518 *val = val1;
519 ret = 0;
520 }
521
522 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
523 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
524 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
525
526 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
527 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
528
529 udelay(40);
530 }
531
532 return ret;
533 }
534
535 static int
536 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
537 {
538 u32 val1;
539 int i, ret;
540
541 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
542 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
543 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
544
545 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
546 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547
548 udelay(40);
549 }
550
551 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
552 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
553 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
554 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
555
556 for (i = 0; i < 50; i++) {
557 udelay(10);
558
559 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
560 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
561 udelay(5);
562 break;
563 }
564 }
565
566 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
567 ret = -EBUSY;
568 else
569 ret = 0;
570
571 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
572 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
573 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
574
575 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
576 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
577
578 udelay(40);
579 }
580
581 return ret;
582 }
583
584 static void
585 bnx2_disable_int(struct bnx2 *bp)
586 {
587 int i;
588 struct bnx2_napi *bnapi;
589
590 for (i = 0; i < bp->irq_nvecs; i++) {
591 bnapi = &bp->bnx2_napi[i];
592 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
593 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
594 }
595 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
596 }
597
598 static void
599 bnx2_enable_int(struct bnx2 *bp)
600 {
601 int i;
602 struct bnx2_napi *bnapi;
603
604 for (i = 0; i < bp->irq_nvecs; i++) {
605 bnapi = &bp->bnx2_napi[i];
606
607 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
608 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
609 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
610 bnapi->last_status_idx);
611
612 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
613 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
614 bnapi->last_status_idx);
615 }
616 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
617 }
618
619 static void
620 bnx2_disable_int_sync(struct bnx2 *bp)
621 {
622 int i;
623
624 atomic_inc(&bp->intr_sem);
625 if (!netif_running(bp->dev))
626 return;
627
628 bnx2_disable_int(bp);
629 for (i = 0; i < bp->irq_nvecs; i++)
630 synchronize_irq(bp->irq_tbl[i].vector);
631 }
632
633 static void
634 bnx2_napi_disable(struct bnx2 *bp)
635 {
636 int i;
637
638 for (i = 0; i < bp->irq_nvecs; i++)
639 napi_disable(&bp->bnx2_napi[i].napi);
640 }
641
642 static void
643 bnx2_napi_enable(struct bnx2 *bp)
644 {
645 int i;
646
647 for (i = 0; i < bp->irq_nvecs; i++)
648 napi_enable(&bp->bnx2_napi[i].napi);
649 }
650
651 static void
652 bnx2_netif_stop(struct bnx2 *bp)
653 {
654 bnx2_cnic_stop(bp);
655 if (netif_running(bp->dev)) {
656 int i;
657
658 bnx2_napi_disable(bp);
659 netif_tx_disable(bp->dev);
660 /* prevent tx timeout */
661 for (i = 0; i < bp->dev->num_tx_queues; i++) {
662 struct netdev_queue *txq;
663
664 txq = netdev_get_tx_queue(bp->dev, i);
665 txq->trans_start = jiffies;
666 }
667 }
668 bnx2_disable_int_sync(bp);
669 }
670
671 static void
672 bnx2_netif_start(struct bnx2 *bp)
673 {
674 if (atomic_dec_and_test(&bp->intr_sem)) {
675 if (netif_running(bp->dev)) {
676 netif_tx_wake_all_queues(bp->dev);
677 bnx2_napi_enable(bp);
678 bnx2_enable_int(bp);
679 bnx2_cnic_start(bp);
680 }
681 }
682 }
683
684 static void
685 bnx2_free_tx_mem(struct bnx2 *bp)
686 {
687 int i;
688
689 for (i = 0; i < bp->num_tx_rings; i++) {
690 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
691 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
692
693 if (txr->tx_desc_ring) {
694 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
695 txr->tx_desc_ring,
696 txr->tx_desc_mapping);
697 txr->tx_desc_ring = NULL;
698 }
699 kfree(txr->tx_buf_ring);
700 txr->tx_buf_ring = NULL;
701 }
702 }
703
704 static void
705 bnx2_free_rx_mem(struct bnx2 *bp)
706 {
707 int i;
708
709 for (i = 0; i < bp->num_rx_rings; i++) {
710 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
711 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
712 int j;
713
714 for (j = 0; j < bp->rx_max_ring; j++) {
715 if (rxr->rx_desc_ring[j])
716 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
717 rxr->rx_desc_ring[j],
718 rxr->rx_desc_mapping[j]);
719 rxr->rx_desc_ring[j] = NULL;
720 }
721 vfree(rxr->rx_buf_ring);
722 rxr->rx_buf_ring = NULL;
723
724 for (j = 0; j < bp->rx_max_pg_ring; j++) {
725 if (rxr->rx_pg_desc_ring[j])
726 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
727 rxr->rx_pg_desc_ring[j],
728 rxr->rx_pg_desc_mapping[j]);
729 rxr->rx_pg_desc_ring[j] = NULL;
730 }
731 vfree(rxr->rx_pg_ring);
732 rxr->rx_pg_ring = NULL;
733 }
734 }
735
736 static int
737 bnx2_alloc_tx_mem(struct bnx2 *bp)
738 {
739 int i;
740
741 for (i = 0; i < bp->num_tx_rings; i++) {
742 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
743 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
744
745 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
746 if (txr->tx_buf_ring == NULL)
747 return -ENOMEM;
748
749 txr->tx_desc_ring =
750 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
751 &txr->tx_desc_mapping);
752 if (txr->tx_desc_ring == NULL)
753 return -ENOMEM;
754 }
755 return 0;
756 }
757
758 static int
759 bnx2_alloc_rx_mem(struct bnx2 *bp)
760 {
761 int i;
762
763 for (i = 0; i < bp->num_rx_rings; i++) {
764 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
765 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
766 int j;
767
768 rxr->rx_buf_ring =
769 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
770 if (rxr->rx_buf_ring == NULL)
771 return -ENOMEM;
772
773 memset(rxr->rx_buf_ring, 0,
774 SW_RXBD_RING_SIZE * bp->rx_max_ring);
775
776 for (j = 0; j < bp->rx_max_ring; j++) {
777 rxr->rx_desc_ring[j] =
778 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
779 &rxr->rx_desc_mapping[j]);
780 if (rxr->rx_desc_ring[j] == NULL)
781 return -ENOMEM;
782
783 }
784
785 if (bp->rx_pg_ring_size) {
786 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
787 bp->rx_max_pg_ring);
788 if (rxr->rx_pg_ring == NULL)
789 return -ENOMEM;
790
791 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
792 bp->rx_max_pg_ring);
793 }
794
795 for (j = 0; j < bp->rx_max_pg_ring; j++) {
796 rxr->rx_pg_desc_ring[j] =
797 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
798 &rxr->rx_pg_desc_mapping[j]);
799 if (rxr->rx_pg_desc_ring[j] == NULL)
800 return -ENOMEM;
801
802 }
803 }
804 return 0;
805 }
806
807 static void
808 bnx2_free_mem(struct bnx2 *bp)
809 {
810 int i;
811 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
812
813 bnx2_free_tx_mem(bp);
814 bnx2_free_rx_mem(bp);
815
816 for (i = 0; i < bp->ctx_pages; i++) {
817 if (bp->ctx_blk[i]) {
818 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
819 bp->ctx_blk[i],
820 bp->ctx_blk_mapping[i]);
821 bp->ctx_blk[i] = NULL;
822 }
823 }
824 if (bnapi->status_blk.msi) {
825 pci_free_consistent(bp->pdev, bp->status_stats_size,
826 bnapi->status_blk.msi,
827 bp->status_blk_mapping);
828 bnapi->status_blk.msi = NULL;
829 bp->stats_blk = NULL;
830 }
831 }
832
833 static int
834 bnx2_alloc_mem(struct bnx2 *bp)
835 {
836 int i, status_blk_size, err;
837 struct bnx2_napi *bnapi;
838 void *status_blk;
839
840 /* Combine status and statistics blocks into one allocation. */
841 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
842 if (bp->flags & BNX2_FLAG_MSIX_CAP)
843 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
844 BNX2_SBLK_MSIX_ALIGN_SIZE);
845 bp->status_stats_size = status_blk_size +
846 sizeof(struct statistics_block);
847
848 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
849 &bp->status_blk_mapping);
850 if (status_blk == NULL)
851 goto alloc_mem_err;
852
853 memset(status_blk, 0, bp->status_stats_size);
854
855 bnapi = &bp->bnx2_napi[0];
856 bnapi->status_blk.msi = status_blk;
857 bnapi->hw_tx_cons_ptr =
858 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
859 bnapi->hw_rx_cons_ptr =
860 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
861 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
862 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
863 struct status_block_msix *sblk;
864
865 bnapi = &bp->bnx2_napi[i];
866
867 sblk = (void *) (status_blk +
868 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
869 bnapi->status_blk.msix = sblk;
870 bnapi->hw_tx_cons_ptr =
871 &sblk->status_tx_quick_consumer_index;
872 bnapi->hw_rx_cons_ptr =
873 &sblk->status_rx_quick_consumer_index;
874 bnapi->int_num = i << 24;
875 }
876 }
877
878 bp->stats_blk = status_blk + status_blk_size;
879
880 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
881
882 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
883 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
884 if (bp->ctx_pages == 0)
885 bp->ctx_pages = 1;
886 for (i = 0; i < bp->ctx_pages; i++) {
887 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
888 BCM_PAGE_SIZE,
889 &bp->ctx_blk_mapping[i]);
890 if (bp->ctx_blk[i] == NULL)
891 goto alloc_mem_err;
892 }
893 }
894
895 err = bnx2_alloc_rx_mem(bp);
896 if (err)
897 goto alloc_mem_err;
898
899 err = bnx2_alloc_tx_mem(bp);
900 if (err)
901 goto alloc_mem_err;
902
903 return 0;
904
905 alloc_mem_err:
906 bnx2_free_mem(bp);
907 return -ENOMEM;
908 }
909
910 static void
911 bnx2_report_fw_link(struct bnx2 *bp)
912 {
913 u32 fw_link_status = 0;
914
915 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
916 return;
917
918 if (bp->link_up) {
919 u32 bmsr;
920
921 switch (bp->line_speed) {
922 case SPEED_10:
923 if (bp->duplex == DUPLEX_HALF)
924 fw_link_status = BNX2_LINK_STATUS_10HALF;
925 else
926 fw_link_status = BNX2_LINK_STATUS_10FULL;
927 break;
928 case SPEED_100:
929 if (bp->duplex == DUPLEX_HALF)
930 fw_link_status = BNX2_LINK_STATUS_100HALF;
931 else
932 fw_link_status = BNX2_LINK_STATUS_100FULL;
933 break;
934 case SPEED_1000:
935 if (bp->duplex == DUPLEX_HALF)
936 fw_link_status = BNX2_LINK_STATUS_1000HALF;
937 else
938 fw_link_status = BNX2_LINK_STATUS_1000FULL;
939 break;
940 case SPEED_2500:
941 if (bp->duplex == DUPLEX_HALF)
942 fw_link_status = BNX2_LINK_STATUS_2500HALF;
943 else
944 fw_link_status = BNX2_LINK_STATUS_2500FULL;
945 break;
946 }
947
948 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
949
950 if (bp->autoneg) {
951 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
952
953 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
954 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955
956 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
957 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
958 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
959 else
960 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
961 }
962 }
963 else
964 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
965
966 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
967 }
968
969 static char *
970 bnx2_xceiver_str(struct bnx2 *bp)
971 {
972 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
973 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
974 "Copper"));
975 }
976
977 static void
978 bnx2_report_link(struct bnx2 *bp)
979 {
980 if (bp->link_up) {
981 netif_carrier_on(bp->dev);
982 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
983 bnx2_xceiver_str(bp));
984
985 printk("%d Mbps ", bp->line_speed);
986
987 if (bp->duplex == DUPLEX_FULL)
988 printk("full duplex");
989 else
990 printk("half duplex");
991
992 if (bp->flow_ctrl) {
993 if (bp->flow_ctrl & FLOW_CTRL_RX) {
994 printk(", receive ");
995 if (bp->flow_ctrl & FLOW_CTRL_TX)
996 printk("& transmit ");
997 }
998 else {
999 printk(", transmit ");
1000 }
1001 printk("flow control ON");
1002 }
1003 printk("\n");
1004 }
1005 else {
1006 netif_carrier_off(bp->dev);
1007 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
1008 bnx2_xceiver_str(bp));
1009 }
1010
1011 bnx2_report_fw_link(bp);
1012 }
1013
1014 static void
1015 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1016 {
1017 u32 local_adv, remote_adv;
1018
1019 bp->flow_ctrl = 0;
1020 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1021 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1022
1023 if (bp->duplex == DUPLEX_FULL) {
1024 bp->flow_ctrl = bp->req_flow_ctrl;
1025 }
1026 return;
1027 }
1028
1029 if (bp->duplex != DUPLEX_FULL) {
1030 return;
1031 }
1032
1033 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1034 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1035 u32 val;
1036
1037 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1038 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1039 bp->flow_ctrl |= FLOW_CTRL_TX;
1040 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1041 bp->flow_ctrl |= FLOW_CTRL_RX;
1042 return;
1043 }
1044
1045 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047
1048 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1049 u32 new_local_adv = 0;
1050 u32 new_remote_adv = 0;
1051
1052 if (local_adv & ADVERTISE_1000XPAUSE)
1053 new_local_adv |= ADVERTISE_PAUSE_CAP;
1054 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1055 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1056 if (remote_adv & ADVERTISE_1000XPAUSE)
1057 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1058 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1059 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1060
1061 local_adv = new_local_adv;
1062 remote_adv = new_remote_adv;
1063 }
1064
1065 /* See Table 28B-3 of 802.3ab-1999 spec. */
1066 if (local_adv & ADVERTISE_PAUSE_CAP) {
1067 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1068 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1069 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1070 }
1071 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1072 bp->flow_ctrl = FLOW_CTRL_RX;
1073 }
1074 }
1075 else {
1076 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1077 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1078 }
1079 }
1080 }
1081 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1082 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1083 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1084
1085 bp->flow_ctrl = FLOW_CTRL_TX;
1086 }
1087 }
1088 }
1089
1090 static int
1091 bnx2_5709s_linkup(struct bnx2 *bp)
1092 {
1093 u32 val, speed;
1094
1095 bp->link_up = 1;
1096
1097 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1098 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1099 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1100
1101 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1102 bp->line_speed = bp->req_line_speed;
1103 bp->duplex = bp->req_duplex;
1104 return 0;
1105 }
1106 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1107 switch (speed) {
1108 case MII_BNX2_GP_TOP_AN_SPEED_10:
1109 bp->line_speed = SPEED_10;
1110 break;
1111 case MII_BNX2_GP_TOP_AN_SPEED_100:
1112 bp->line_speed = SPEED_100;
1113 break;
1114 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1115 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1116 bp->line_speed = SPEED_1000;
1117 break;
1118 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1119 bp->line_speed = SPEED_2500;
1120 break;
1121 }
1122 if (val & MII_BNX2_GP_TOP_AN_FD)
1123 bp->duplex = DUPLEX_FULL;
1124 else
1125 bp->duplex = DUPLEX_HALF;
1126 return 0;
1127 }
1128
1129 static int
1130 bnx2_5708s_linkup(struct bnx2 *bp)
1131 {
1132 u32 val;
1133
1134 bp->link_up = 1;
1135 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1136 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1137 case BCM5708S_1000X_STAT1_SPEED_10:
1138 bp->line_speed = SPEED_10;
1139 break;
1140 case BCM5708S_1000X_STAT1_SPEED_100:
1141 bp->line_speed = SPEED_100;
1142 break;
1143 case BCM5708S_1000X_STAT1_SPEED_1G:
1144 bp->line_speed = SPEED_1000;
1145 break;
1146 case BCM5708S_1000X_STAT1_SPEED_2G5:
1147 bp->line_speed = SPEED_2500;
1148 break;
1149 }
1150 if (val & BCM5708S_1000X_STAT1_FD)
1151 bp->duplex = DUPLEX_FULL;
1152 else
1153 bp->duplex = DUPLEX_HALF;
1154
1155 return 0;
1156 }
1157
1158 static int
1159 bnx2_5706s_linkup(struct bnx2 *bp)
1160 {
1161 u32 bmcr, local_adv, remote_adv, common;
1162
1163 bp->link_up = 1;
1164 bp->line_speed = SPEED_1000;
1165
1166 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1167 if (bmcr & BMCR_FULLDPLX) {
1168 bp->duplex = DUPLEX_FULL;
1169 }
1170 else {
1171 bp->duplex = DUPLEX_HALF;
1172 }
1173
1174 if (!(bmcr & BMCR_ANENABLE)) {
1175 return 0;
1176 }
1177
1178 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1179 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1180
1181 common = local_adv & remote_adv;
1182 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1183
1184 if (common & ADVERTISE_1000XFULL) {
1185 bp->duplex = DUPLEX_FULL;
1186 }
1187 else {
1188 bp->duplex = DUPLEX_HALF;
1189 }
1190 }
1191
1192 return 0;
1193 }
1194
1195 static int
1196 bnx2_copper_linkup(struct bnx2 *bp)
1197 {
1198 u32 bmcr;
1199
1200 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1201 if (bmcr & BMCR_ANENABLE) {
1202 u32 local_adv, remote_adv, common;
1203
1204 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1205 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1206
1207 common = local_adv & (remote_adv >> 2);
1208 if (common & ADVERTISE_1000FULL) {
1209 bp->line_speed = SPEED_1000;
1210 bp->duplex = DUPLEX_FULL;
1211 }
1212 else if (common & ADVERTISE_1000HALF) {
1213 bp->line_speed = SPEED_1000;
1214 bp->duplex = DUPLEX_HALF;
1215 }
1216 else {
1217 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1218 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1219
1220 common = local_adv & remote_adv;
1221 if (common & ADVERTISE_100FULL) {
1222 bp->line_speed = SPEED_100;
1223 bp->duplex = DUPLEX_FULL;
1224 }
1225 else if (common & ADVERTISE_100HALF) {
1226 bp->line_speed = SPEED_100;
1227 bp->duplex = DUPLEX_HALF;
1228 }
1229 else if (common & ADVERTISE_10FULL) {
1230 bp->line_speed = SPEED_10;
1231 bp->duplex = DUPLEX_FULL;
1232 }
1233 else if (common & ADVERTISE_10HALF) {
1234 bp->line_speed = SPEED_10;
1235 bp->duplex = DUPLEX_HALF;
1236 }
1237 else {
1238 bp->line_speed = 0;
1239 bp->link_up = 0;
1240 }
1241 }
1242 }
1243 else {
1244 if (bmcr & BMCR_SPEED100) {
1245 bp->line_speed = SPEED_100;
1246 }
1247 else {
1248 bp->line_speed = SPEED_10;
1249 }
1250 if (bmcr & BMCR_FULLDPLX) {
1251 bp->duplex = DUPLEX_FULL;
1252 }
1253 else {
1254 bp->duplex = DUPLEX_HALF;
1255 }
1256 }
1257
1258 return 0;
1259 }
1260
1261 static void
1262 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1263 {
1264 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1265
1266 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1267 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1268 val |= 0x02 << 8;
1269
1270 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1271 u32 lo_water, hi_water;
1272
1273 if (bp->flow_ctrl & FLOW_CTRL_TX)
1274 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1275 else
1276 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1277 if (lo_water >= bp->rx_ring_size)
1278 lo_water = 0;
1279
1280 hi_water = bp->rx_ring_size / 4;
1281
1282 if (hi_water <= lo_water)
1283 lo_water = 0;
1284
1285 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1286 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1287
1288 if (hi_water > 0xf)
1289 hi_water = 0xf;
1290 else if (hi_water == 0)
1291 lo_water = 0;
1292 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1293 }
1294 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1295 }
1296
1297 static void
1298 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1299 {
1300 int i;
1301 u32 cid;
1302
1303 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1304 if (i == 1)
1305 cid = RX_RSS_CID;
1306 bnx2_init_rx_context(bp, cid);
1307 }
1308 }
1309
1310 static void
1311 bnx2_set_mac_link(struct bnx2 *bp)
1312 {
1313 u32 val;
1314
1315 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1316 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1317 (bp->duplex == DUPLEX_HALF)) {
1318 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1319 }
1320
1321 /* Configure the EMAC mode register. */
1322 val = REG_RD(bp, BNX2_EMAC_MODE);
1323
1324 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1325 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1326 BNX2_EMAC_MODE_25G_MODE);
1327
1328 if (bp->link_up) {
1329 switch (bp->line_speed) {
1330 case SPEED_10:
1331 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1332 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1333 break;
1334 }
1335 /* fall through */
1336 case SPEED_100:
1337 val |= BNX2_EMAC_MODE_PORT_MII;
1338 break;
1339 case SPEED_2500:
1340 val |= BNX2_EMAC_MODE_25G_MODE;
1341 /* fall through */
1342 case SPEED_1000:
1343 val |= BNX2_EMAC_MODE_PORT_GMII;
1344 break;
1345 }
1346 }
1347 else {
1348 val |= BNX2_EMAC_MODE_PORT_GMII;
1349 }
1350
1351 /* Set the MAC to operate in the appropriate duplex mode. */
1352 if (bp->duplex == DUPLEX_HALF)
1353 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1354 REG_WR(bp, BNX2_EMAC_MODE, val);
1355
1356 /* Enable/disable rx PAUSE. */
1357 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1358
1359 if (bp->flow_ctrl & FLOW_CTRL_RX)
1360 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1361 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1362
1363 /* Enable/disable tx PAUSE. */
1364 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1365 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1366
1367 if (bp->flow_ctrl & FLOW_CTRL_TX)
1368 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1369 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1370
1371 /* Acknowledge the interrupt. */
1372 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1373
1374 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1375 bnx2_init_all_rx_contexts(bp);
1376 }
1377
1378 static void
1379 bnx2_enable_bmsr1(struct bnx2 *bp)
1380 {
1381 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1382 (CHIP_NUM(bp) == CHIP_NUM_5709))
1383 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1384 MII_BNX2_BLK_ADDR_GP_STATUS);
1385 }
1386
1387 static void
1388 bnx2_disable_bmsr1(struct bnx2 *bp)
1389 {
1390 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1391 (CHIP_NUM(bp) == CHIP_NUM_5709))
1392 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1393 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1394 }
1395
1396 static int
1397 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1398 {
1399 u32 up1;
1400 int ret = 1;
1401
1402 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1403 return 0;
1404
1405 if (bp->autoneg & AUTONEG_SPEED)
1406 bp->advertising |= ADVERTISED_2500baseX_Full;
1407
1408 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1409 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1410
1411 bnx2_read_phy(bp, bp->mii_up1, &up1);
1412 if (!(up1 & BCM5708S_UP1_2G5)) {
1413 up1 |= BCM5708S_UP1_2G5;
1414 bnx2_write_phy(bp, bp->mii_up1, up1);
1415 ret = 0;
1416 }
1417
1418 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1419 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1420 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1421
1422 return ret;
1423 }
1424
1425 static int
1426 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1427 {
1428 u32 up1;
1429 int ret = 0;
1430
1431 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1432 return 0;
1433
1434 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1435 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1436
1437 bnx2_read_phy(bp, bp->mii_up1, &up1);
1438 if (up1 & BCM5708S_UP1_2G5) {
1439 up1 &= ~BCM5708S_UP1_2G5;
1440 bnx2_write_phy(bp, bp->mii_up1, up1);
1441 ret = 1;
1442 }
1443
1444 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1445 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1446 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1447
1448 return ret;
1449 }
1450
1451 static void
1452 bnx2_enable_forced_2g5(struct bnx2 *bp)
1453 {
1454 u32 bmcr;
1455
1456 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1457 return;
1458
1459 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1460 u32 val;
1461
1462 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1463 MII_BNX2_BLK_ADDR_SERDES_DIG);
1464 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1465 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1466 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1467 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1468
1469 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1470 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1471 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1472
1473 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1474 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1475 bmcr |= BCM5708S_BMCR_FORCE_2500;
1476 } else {
1477 return;
1478 }
1479
1480 if (bp->autoneg & AUTONEG_SPEED) {
1481 bmcr &= ~BMCR_ANENABLE;
1482 if (bp->req_duplex == DUPLEX_FULL)
1483 bmcr |= BMCR_FULLDPLX;
1484 }
1485 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1486 }
1487
1488 static void
1489 bnx2_disable_forced_2g5(struct bnx2 *bp)
1490 {
1491 u32 bmcr;
1492
1493 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1494 return;
1495
1496 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1497 u32 val;
1498
1499 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1500 MII_BNX2_BLK_ADDR_SERDES_DIG);
1501 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1502 val &= ~MII_BNX2_SD_MISC1_FORCE;
1503 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1504
1505 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1506 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1507 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1508
1509 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1510 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1511 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1512 } else {
1513 return;
1514 }
1515
1516 if (bp->autoneg & AUTONEG_SPEED)
1517 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1518 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1519 }
1520
1521 static void
1522 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1523 {
1524 u32 val;
1525
1526 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1527 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1528 if (start)
1529 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1530 else
1531 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1532 }
1533
1534 static int
1535 bnx2_set_link(struct bnx2 *bp)
1536 {
1537 u32 bmsr;
1538 u8 link_up;
1539
1540 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1541 bp->link_up = 1;
1542 return 0;
1543 }
1544
1545 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1546 return 0;
1547
1548 link_up = bp->link_up;
1549
1550 bnx2_enable_bmsr1(bp);
1551 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1552 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553 bnx2_disable_bmsr1(bp);
1554
1555 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1556 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1557 u32 val, an_dbg;
1558
1559 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1560 bnx2_5706s_force_link_dn(bp, 0);
1561 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1562 }
1563 val = REG_RD(bp, BNX2_EMAC_STATUS);
1564
1565 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1566 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1567 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1568
1569 if ((val & BNX2_EMAC_STATUS_LINK) &&
1570 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1571 bmsr |= BMSR_LSTATUS;
1572 else
1573 bmsr &= ~BMSR_LSTATUS;
1574 }
1575
1576 if (bmsr & BMSR_LSTATUS) {
1577 bp->link_up = 1;
1578
1579 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1580 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1581 bnx2_5706s_linkup(bp);
1582 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1583 bnx2_5708s_linkup(bp);
1584 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1585 bnx2_5709s_linkup(bp);
1586 }
1587 else {
1588 bnx2_copper_linkup(bp);
1589 }
1590 bnx2_resolve_flow_ctrl(bp);
1591 }
1592 else {
1593 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1594 (bp->autoneg & AUTONEG_SPEED))
1595 bnx2_disable_forced_2g5(bp);
1596
1597 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1598 u32 bmcr;
1599
1600 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1601 bmcr |= BMCR_ANENABLE;
1602 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1603
1604 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1605 }
1606 bp->link_up = 0;
1607 }
1608
1609 if (bp->link_up != link_up) {
1610 bnx2_report_link(bp);
1611 }
1612
1613 bnx2_set_mac_link(bp);
1614
1615 return 0;
1616 }
1617
1618 static int
1619 bnx2_reset_phy(struct bnx2 *bp)
1620 {
1621 int i;
1622 u32 reg;
1623
1624 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1625
1626 #define PHY_RESET_MAX_WAIT 100
1627 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1628 udelay(10);
1629
1630 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1631 if (!(reg & BMCR_RESET)) {
1632 udelay(20);
1633 break;
1634 }
1635 }
1636 if (i == PHY_RESET_MAX_WAIT) {
1637 return -EBUSY;
1638 }
1639 return 0;
1640 }
1641
1642 static u32
1643 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1644 {
1645 u32 adv = 0;
1646
1647 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1648 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1649
1650 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651 adv = ADVERTISE_1000XPAUSE;
1652 }
1653 else {
1654 adv = ADVERTISE_PAUSE_CAP;
1655 }
1656 }
1657 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1658 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659 adv = ADVERTISE_1000XPSE_ASYM;
1660 }
1661 else {
1662 adv = ADVERTISE_PAUSE_ASYM;
1663 }
1664 }
1665 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1666 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1668 }
1669 else {
1670 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1671 }
1672 }
1673 return adv;
1674 }
1675
1676 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1677
1678 static int
1679 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1680 __releases(&bp->phy_lock)
1681 __acquires(&bp->phy_lock)
1682 {
1683 u32 speed_arg = 0, pause_adv;
1684
1685 pause_adv = bnx2_phy_get_pause_adv(bp);
1686
1687 if (bp->autoneg & AUTONEG_SPEED) {
1688 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1689 if (bp->advertising & ADVERTISED_10baseT_Half)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1691 if (bp->advertising & ADVERTISED_10baseT_Full)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1693 if (bp->advertising & ADVERTISED_100baseT_Half)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1695 if (bp->advertising & ADVERTISED_100baseT_Full)
1696 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1697 if (bp->advertising & ADVERTISED_1000baseT_Full)
1698 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699 if (bp->advertising & ADVERTISED_2500baseX_Full)
1700 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1701 } else {
1702 if (bp->req_line_speed == SPEED_2500)
1703 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1704 else if (bp->req_line_speed == SPEED_1000)
1705 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1706 else if (bp->req_line_speed == SPEED_100) {
1707 if (bp->req_duplex == DUPLEX_FULL)
1708 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1709 else
1710 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1711 } else if (bp->req_line_speed == SPEED_10) {
1712 if (bp->req_duplex == DUPLEX_FULL)
1713 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1714 else
1715 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1716 }
1717 }
1718
1719 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1720 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1721 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1722 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1723
1724 if (port == PORT_TP)
1725 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1726 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1727
1728 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1729
1730 spin_unlock_bh(&bp->phy_lock);
1731 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1732 spin_lock_bh(&bp->phy_lock);
1733
1734 return 0;
1735 }
1736
1737 static int
1738 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1739 __releases(&bp->phy_lock)
1740 __acquires(&bp->phy_lock)
1741 {
1742 u32 adv, bmcr;
1743 u32 new_adv = 0;
1744
1745 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1746 return (bnx2_setup_remote_phy(bp, port));
1747
1748 if (!(bp->autoneg & AUTONEG_SPEED)) {
1749 u32 new_bmcr;
1750 int force_link_down = 0;
1751
1752 if (bp->req_line_speed == SPEED_2500) {
1753 if (!bnx2_test_and_enable_2g5(bp))
1754 force_link_down = 1;
1755 } else if (bp->req_line_speed == SPEED_1000) {
1756 if (bnx2_test_and_disable_2g5(bp))
1757 force_link_down = 1;
1758 }
1759 bnx2_read_phy(bp, bp->mii_adv, &adv);
1760 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1761
1762 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1763 new_bmcr = bmcr & ~BMCR_ANENABLE;
1764 new_bmcr |= BMCR_SPEED1000;
1765
1766 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1767 if (bp->req_line_speed == SPEED_2500)
1768 bnx2_enable_forced_2g5(bp);
1769 else if (bp->req_line_speed == SPEED_1000) {
1770 bnx2_disable_forced_2g5(bp);
1771 new_bmcr &= ~0x2000;
1772 }
1773
1774 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1775 if (bp->req_line_speed == SPEED_2500)
1776 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1777 else
1778 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1779 }
1780
1781 if (bp->req_duplex == DUPLEX_FULL) {
1782 adv |= ADVERTISE_1000XFULL;
1783 new_bmcr |= BMCR_FULLDPLX;
1784 }
1785 else {
1786 adv |= ADVERTISE_1000XHALF;
1787 new_bmcr &= ~BMCR_FULLDPLX;
1788 }
1789 if ((new_bmcr != bmcr) || (force_link_down)) {
1790 /* Force a link down visible on the other side */
1791 if (bp->link_up) {
1792 bnx2_write_phy(bp, bp->mii_adv, adv &
1793 ~(ADVERTISE_1000XFULL |
1794 ADVERTISE_1000XHALF));
1795 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1796 BMCR_ANRESTART | BMCR_ANENABLE);
1797
1798 bp->link_up = 0;
1799 netif_carrier_off(bp->dev);
1800 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1801 bnx2_report_link(bp);
1802 }
1803 bnx2_write_phy(bp, bp->mii_adv, adv);
1804 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1805 } else {
1806 bnx2_resolve_flow_ctrl(bp);
1807 bnx2_set_mac_link(bp);
1808 }
1809 return 0;
1810 }
1811
1812 bnx2_test_and_enable_2g5(bp);
1813
1814 if (bp->advertising & ADVERTISED_1000baseT_Full)
1815 new_adv |= ADVERTISE_1000XFULL;
1816
1817 new_adv |= bnx2_phy_get_pause_adv(bp);
1818
1819 bnx2_read_phy(bp, bp->mii_adv, &adv);
1820 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1821
1822 bp->serdes_an_pending = 0;
1823 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1824 /* Force a link down visible on the other side */
1825 if (bp->link_up) {
1826 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1827 spin_unlock_bh(&bp->phy_lock);
1828 msleep(20);
1829 spin_lock_bh(&bp->phy_lock);
1830 }
1831
1832 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1833 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1834 BMCR_ANENABLE);
1835 /* Speed up link-up time when the link partner
1836 * does not autonegotiate which is very common
1837 * in blade servers. Some blade servers use
1838 * IPMI for kerboard input and it's important
1839 * to minimize link disruptions. Autoneg. involves
1840 * exchanging base pages plus 3 next pages and
1841 * normally completes in about 120 msec.
1842 */
1843 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1844 bp->serdes_an_pending = 1;
1845 mod_timer(&bp->timer, jiffies + bp->current_interval);
1846 } else {
1847 bnx2_resolve_flow_ctrl(bp);
1848 bnx2_set_mac_link(bp);
1849 }
1850
1851 return 0;
1852 }
1853
1854 #define ETHTOOL_ALL_FIBRE_SPEED \
1855 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1856 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1857 (ADVERTISED_1000baseT_Full)
1858
1859 #define ETHTOOL_ALL_COPPER_SPEED \
1860 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1861 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1862 ADVERTISED_1000baseT_Full)
1863
1864 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1865 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1866
1867 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1868
1869 static void
1870 bnx2_set_default_remote_link(struct bnx2 *bp)
1871 {
1872 u32 link;
1873
1874 if (bp->phy_port == PORT_TP)
1875 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1876 else
1877 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1878
1879 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1880 bp->req_line_speed = 0;
1881 bp->autoneg |= AUTONEG_SPEED;
1882 bp->advertising = ADVERTISED_Autoneg;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1884 bp->advertising |= ADVERTISED_10baseT_Half;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1886 bp->advertising |= ADVERTISED_10baseT_Full;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1888 bp->advertising |= ADVERTISED_100baseT_Half;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1890 bp->advertising |= ADVERTISED_100baseT_Full;
1891 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1892 bp->advertising |= ADVERTISED_1000baseT_Full;
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1894 bp->advertising |= ADVERTISED_2500baseX_Full;
1895 } else {
1896 bp->autoneg = 0;
1897 bp->advertising = 0;
1898 bp->req_duplex = DUPLEX_FULL;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1900 bp->req_line_speed = SPEED_10;
1901 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1902 bp->req_duplex = DUPLEX_HALF;
1903 }
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1905 bp->req_line_speed = SPEED_100;
1906 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1907 bp->req_duplex = DUPLEX_HALF;
1908 }
1909 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1910 bp->req_line_speed = SPEED_1000;
1911 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1912 bp->req_line_speed = SPEED_2500;
1913 }
1914 }
1915
1916 static void
1917 bnx2_set_default_link(struct bnx2 *bp)
1918 {
1919 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1920 bnx2_set_default_remote_link(bp);
1921 return;
1922 }
1923
1924 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1925 bp->req_line_speed = 0;
1926 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1927 u32 reg;
1928
1929 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1930
1931 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1932 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1933 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1934 bp->autoneg = 0;
1935 bp->req_line_speed = bp->line_speed = SPEED_1000;
1936 bp->req_duplex = DUPLEX_FULL;
1937 }
1938 } else
1939 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1940 }
1941
1942 static void
1943 bnx2_send_heart_beat(struct bnx2 *bp)
1944 {
1945 u32 msg;
1946 u32 addr;
1947
1948 spin_lock(&bp->indirect_lock);
1949 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1950 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1951 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1952 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1953 spin_unlock(&bp->indirect_lock);
1954 }
1955
1956 static void
1957 bnx2_remote_phy_event(struct bnx2 *bp)
1958 {
1959 u32 msg;
1960 u8 link_up = bp->link_up;
1961 u8 old_port;
1962
1963 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1964
1965 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1966 bnx2_send_heart_beat(bp);
1967
1968 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1969
1970 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1971 bp->link_up = 0;
1972 else {
1973 u32 speed;
1974
1975 bp->link_up = 1;
1976 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1977 bp->duplex = DUPLEX_FULL;
1978 switch (speed) {
1979 case BNX2_LINK_STATUS_10HALF:
1980 bp->duplex = DUPLEX_HALF;
1981 case BNX2_LINK_STATUS_10FULL:
1982 bp->line_speed = SPEED_10;
1983 break;
1984 case BNX2_LINK_STATUS_100HALF:
1985 bp->duplex = DUPLEX_HALF;
1986 case BNX2_LINK_STATUS_100BASE_T4:
1987 case BNX2_LINK_STATUS_100FULL:
1988 bp->line_speed = SPEED_100;
1989 break;
1990 case BNX2_LINK_STATUS_1000HALF:
1991 bp->duplex = DUPLEX_HALF;
1992 case BNX2_LINK_STATUS_1000FULL:
1993 bp->line_speed = SPEED_1000;
1994 break;
1995 case BNX2_LINK_STATUS_2500HALF:
1996 bp->duplex = DUPLEX_HALF;
1997 case BNX2_LINK_STATUS_2500FULL:
1998 bp->line_speed = SPEED_2500;
1999 break;
2000 default:
2001 bp->line_speed = 0;
2002 break;
2003 }
2004
2005 bp->flow_ctrl = 0;
2006 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2007 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2008 if (bp->duplex == DUPLEX_FULL)
2009 bp->flow_ctrl = bp->req_flow_ctrl;
2010 } else {
2011 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2012 bp->flow_ctrl |= FLOW_CTRL_TX;
2013 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2014 bp->flow_ctrl |= FLOW_CTRL_RX;
2015 }
2016
2017 old_port = bp->phy_port;
2018 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2019 bp->phy_port = PORT_FIBRE;
2020 else
2021 bp->phy_port = PORT_TP;
2022
2023 if (old_port != bp->phy_port)
2024 bnx2_set_default_link(bp);
2025
2026 }
2027 if (bp->link_up != link_up)
2028 bnx2_report_link(bp);
2029
2030 bnx2_set_mac_link(bp);
2031 }
2032
2033 static int
2034 bnx2_set_remote_link(struct bnx2 *bp)
2035 {
2036 u32 evt_code;
2037
2038 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2039 switch (evt_code) {
2040 case BNX2_FW_EVT_CODE_LINK_EVENT:
2041 bnx2_remote_phy_event(bp);
2042 break;
2043 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2044 default:
2045 bnx2_send_heart_beat(bp);
2046 break;
2047 }
2048 return 0;
2049 }
2050
2051 static int
2052 bnx2_setup_copper_phy(struct bnx2 *bp)
2053 __releases(&bp->phy_lock)
2054 __acquires(&bp->phy_lock)
2055 {
2056 u32 bmcr;
2057 u32 new_bmcr;
2058
2059 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2060
2061 if (bp->autoneg & AUTONEG_SPEED) {
2062 u32 adv_reg, adv1000_reg;
2063 u32 new_adv_reg = 0;
2064 u32 new_adv1000_reg = 0;
2065
2066 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2067 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2068 ADVERTISE_PAUSE_ASYM);
2069
2070 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2071 adv1000_reg &= PHY_ALL_1000_SPEED;
2072
2073 if (bp->advertising & ADVERTISED_10baseT_Half)
2074 new_adv_reg |= ADVERTISE_10HALF;
2075 if (bp->advertising & ADVERTISED_10baseT_Full)
2076 new_adv_reg |= ADVERTISE_10FULL;
2077 if (bp->advertising & ADVERTISED_100baseT_Half)
2078 new_adv_reg |= ADVERTISE_100HALF;
2079 if (bp->advertising & ADVERTISED_100baseT_Full)
2080 new_adv_reg |= ADVERTISE_100FULL;
2081 if (bp->advertising & ADVERTISED_1000baseT_Full)
2082 new_adv1000_reg |= ADVERTISE_1000FULL;
2083
2084 new_adv_reg |= ADVERTISE_CSMA;
2085
2086 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2087
2088 if ((adv1000_reg != new_adv1000_reg) ||
2089 (adv_reg != new_adv_reg) ||
2090 ((bmcr & BMCR_ANENABLE) == 0)) {
2091
2092 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2093 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2094 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2095 BMCR_ANENABLE);
2096 }
2097 else if (bp->link_up) {
2098 /* Flow ctrl may have changed from auto to forced */
2099 /* or vice-versa. */
2100
2101 bnx2_resolve_flow_ctrl(bp);
2102 bnx2_set_mac_link(bp);
2103 }
2104 return 0;
2105 }
2106
2107 new_bmcr = 0;
2108 if (bp->req_line_speed == SPEED_100) {
2109 new_bmcr |= BMCR_SPEED100;
2110 }
2111 if (bp->req_duplex == DUPLEX_FULL) {
2112 new_bmcr |= BMCR_FULLDPLX;
2113 }
2114 if (new_bmcr != bmcr) {
2115 u32 bmsr;
2116
2117 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2118 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2119
2120 if (bmsr & BMSR_LSTATUS) {
2121 /* Force link down */
2122 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2123 spin_unlock_bh(&bp->phy_lock);
2124 msleep(50);
2125 spin_lock_bh(&bp->phy_lock);
2126
2127 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2128 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2129 }
2130
2131 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2132
2133 /* Normally, the new speed is setup after the link has
2134 * gone down and up again. In some cases, link will not go
2135 * down so we need to set up the new speed here.
2136 */
2137 if (bmsr & BMSR_LSTATUS) {
2138 bp->line_speed = bp->req_line_speed;
2139 bp->duplex = bp->req_duplex;
2140 bnx2_resolve_flow_ctrl(bp);
2141 bnx2_set_mac_link(bp);
2142 }
2143 } else {
2144 bnx2_resolve_flow_ctrl(bp);
2145 bnx2_set_mac_link(bp);
2146 }
2147 return 0;
2148 }
2149
2150 static int
2151 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2152 __releases(&bp->phy_lock)
2153 __acquires(&bp->phy_lock)
2154 {
2155 if (bp->loopback == MAC_LOOPBACK)
2156 return 0;
2157
2158 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2159 return (bnx2_setup_serdes_phy(bp, port));
2160 }
2161 else {
2162 return (bnx2_setup_copper_phy(bp));
2163 }
2164 }
2165
2166 static int
2167 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2168 {
2169 u32 val;
2170
2171 bp->mii_bmcr = MII_BMCR + 0x10;
2172 bp->mii_bmsr = MII_BMSR + 0x10;
2173 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2174 bp->mii_adv = MII_ADVERTISE + 0x10;
2175 bp->mii_lpa = MII_LPA + 0x10;
2176 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2177
2178 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2179 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2180
2181 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2182 if (reset_phy)
2183 bnx2_reset_phy(bp);
2184
2185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2186
2187 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2188 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2189 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2190 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2191
2192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2193 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2194 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2195 val |= BCM5708S_UP1_2G5;
2196 else
2197 val &= ~BCM5708S_UP1_2G5;
2198 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2199
2200 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2201 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2202 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2203 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2204
2205 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2206
2207 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2208 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2209 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2210
2211 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2212
2213 return 0;
2214 }
2215
2216 static int
2217 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2218 {
2219 u32 val;
2220
2221 if (reset_phy)
2222 bnx2_reset_phy(bp);
2223
2224 bp->mii_up1 = BCM5708S_UP1;
2225
2226 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2227 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2228 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2229
2230 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2231 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2232 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2233
2234 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2235 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2236 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2237
2238 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2239 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2240 val |= BCM5708S_UP1_2G5;
2241 bnx2_write_phy(bp, BCM5708S_UP1, val);
2242 }
2243
2244 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2245 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2246 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2247 /* increase tx signal amplitude */
2248 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2249 BCM5708S_BLK_ADDR_TX_MISC);
2250 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2251 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2252 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2253 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2254 }
2255
2256 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2257 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2258
2259 if (val) {
2260 u32 is_backplane;
2261
2262 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2263 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2264 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2265 BCM5708S_BLK_ADDR_TX_MISC);
2266 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2267 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2268 BCM5708S_BLK_ADDR_DIG);
2269 }
2270 }
2271 return 0;
2272 }
2273
2274 static int
2275 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2276 {
2277 if (reset_phy)
2278 bnx2_reset_phy(bp);
2279
2280 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2281
2282 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2283 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2284
2285 if (bp->dev->mtu > 1500) {
2286 u32 val;
2287
2288 /* Set extended packet length bit */
2289 bnx2_write_phy(bp, 0x18, 0x7);
2290 bnx2_read_phy(bp, 0x18, &val);
2291 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2292
2293 bnx2_write_phy(bp, 0x1c, 0x6c00);
2294 bnx2_read_phy(bp, 0x1c, &val);
2295 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2296 }
2297 else {
2298 u32 val;
2299
2300 bnx2_write_phy(bp, 0x18, 0x7);
2301 bnx2_read_phy(bp, 0x18, &val);
2302 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2303
2304 bnx2_write_phy(bp, 0x1c, 0x6c00);
2305 bnx2_read_phy(bp, 0x1c, &val);
2306 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2307 }
2308
2309 return 0;
2310 }
2311
2312 static int
2313 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2314 {
2315 u32 val;
2316
2317 if (reset_phy)
2318 bnx2_reset_phy(bp);
2319
2320 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2321 bnx2_write_phy(bp, 0x18, 0x0c00);
2322 bnx2_write_phy(bp, 0x17, 0x000a);
2323 bnx2_write_phy(bp, 0x15, 0x310b);
2324 bnx2_write_phy(bp, 0x17, 0x201f);
2325 bnx2_write_phy(bp, 0x15, 0x9506);
2326 bnx2_write_phy(bp, 0x17, 0x401f);
2327 bnx2_write_phy(bp, 0x15, 0x14e2);
2328 bnx2_write_phy(bp, 0x18, 0x0400);
2329 }
2330
2331 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2332 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2333 MII_BNX2_DSP_EXPAND_REG | 0x8);
2334 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2335 val &= ~(1 << 8);
2336 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2337 }
2338
2339 if (bp->dev->mtu > 1500) {
2340 /* Set extended packet length bit */
2341 bnx2_write_phy(bp, 0x18, 0x7);
2342 bnx2_read_phy(bp, 0x18, &val);
2343 bnx2_write_phy(bp, 0x18, val | 0x4000);
2344
2345 bnx2_read_phy(bp, 0x10, &val);
2346 bnx2_write_phy(bp, 0x10, val | 0x1);
2347 }
2348 else {
2349 bnx2_write_phy(bp, 0x18, 0x7);
2350 bnx2_read_phy(bp, 0x18, &val);
2351 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2352
2353 bnx2_read_phy(bp, 0x10, &val);
2354 bnx2_write_phy(bp, 0x10, val & ~0x1);
2355 }
2356
2357 /* ethernet@wirespeed */
2358 bnx2_write_phy(bp, 0x18, 0x7007);
2359 bnx2_read_phy(bp, 0x18, &val);
2360 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2361 return 0;
2362 }
2363
2364
2365 static int
2366 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2367 __releases(&bp->phy_lock)
2368 __acquires(&bp->phy_lock)
2369 {
2370 u32 val;
2371 int rc = 0;
2372
2373 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2374 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2375
2376 bp->mii_bmcr = MII_BMCR;
2377 bp->mii_bmsr = MII_BMSR;
2378 bp->mii_bmsr1 = MII_BMSR;
2379 bp->mii_adv = MII_ADVERTISE;
2380 bp->mii_lpa = MII_LPA;
2381
2382 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2383
2384 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2385 goto setup_phy;
2386
2387 bnx2_read_phy(bp, MII_PHYSID1, &val);
2388 bp->phy_id = val << 16;
2389 bnx2_read_phy(bp, MII_PHYSID2, &val);
2390 bp->phy_id |= val & 0xffff;
2391
2392 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2393 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2394 rc = bnx2_init_5706s_phy(bp, reset_phy);
2395 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2396 rc = bnx2_init_5708s_phy(bp, reset_phy);
2397 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2398 rc = bnx2_init_5709s_phy(bp, reset_phy);
2399 }
2400 else {
2401 rc = bnx2_init_copper_phy(bp, reset_phy);
2402 }
2403
2404 setup_phy:
2405 if (!rc)
2406 rc = bnx2_setup_phy(bp, bp->phy_port);
2407
2408 return rc;
2409 }
2410
2411 static int
2412 bnx2_set_mac_loopback(struct bnx2 *bp)
2413 {
2414 u32 mac_mode;
2415
2416 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2417 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2418 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2419 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2420 bp->link_up = 1;
2421 return 0;
2422 }
2423
2424 static int bnx2_test_link(struct bnx2 *);
2425
2426 static int
2427 bnx2_set_phy_loopback(struct bnx2 *bp)
2428 {
2429 u32 mac_mode;
2430 int rc, i;
2431
2432 spin_lock_bh(&bp->phy_lock);
2433 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2434 BMCR_SPEED1000);
2435 spin_unlock_bh(&bp->phy_lock);
2436 if (rc)
2437 return rc;
2438
2439 for (i = 0; i < 10; i++) {
2440 if (bnx2_test_link(bp) == 0)
2441 break;
2442 msleep(100);
2443 }
2444
2445 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2446 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2447 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2448 BNX2_EMAC_MODE_25G_MODE);
2449
2450 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2451 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2452 bp->link_up = 1;
2453 return 0;
2454 }
2455
2456 static int
2457 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2458 {
2459 int i;
2460 u32 val;
2461
2462 bp->fw_wr_seq++;
2463 msg_data |= bp->fw_wr_seq;
2464
2465 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2466
2467 if (!ack)
2468 return 0;
2469
2470 /* wait for an acknowledgement. */
2471 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2472 msleep(10);
2473
2474 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2475
2476 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2477 break;
2478 }
2479 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2480 return 0;
2481
2482 /* If we timed out, inform the firmware that this is the case. */
2483 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2484 if (!silent)
2485 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2486 "%x\n", msg_data);
2487
2488 msg_data &= ~BNX2_DRV_MSG_CODE;
2489 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2490
2491 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2492
2493 return -EBUSY;
2494 }
2495
2496 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2497 return -EIO;
2498
2499 return 0;
2500 }
2501
2502 static int
2503 bnx2_init_5709_context(struct bnx2 *bp)
2504 {
2505 int i, ret = 0;
2506 u32 val;
2507
2508 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2509 val |= (BCM_PAGE_BITS - 8) << 16;
2510 REG_WR(bp, BNX2_CTX_COMMAND, val);
2511 for (i = 0; i < 10; i++) {
2512 val = REG_RD(bp, BNX2_CTX_COMMAND);
2513 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2514 break;
2515 udelay(2);
2516 }
2517 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2518 return -EBUSY;
2519
2520 for (i = 0; i < bp->ctx_pages; i++) {
2521 int j;
2522
2523 if (bp->ctx_blk[i])
2524 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2525 else
2526 return -ENOMEM;
2527
2528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2529 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2530 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2531 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2532 (u64) bp->ctx_blk_mapping[i] >> 32);
2533 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2534 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2535 for (j = 0; j < 10; j++) {
2536
2537 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2538 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2539 break;
2540 udelay(5);
2541 }
2542 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2543 ret = -EBUSY;
2544 break;
2545 }
2546 }
2547 return ret;
2548 }
2549
2550 static void
2551 bnx2_init_context(struct bnx2 *bp)
2552 {
2553 u32 vcid;
2554
2555 vcid = 96;
2556 while (vcid) {
2557 u32 vcid_addr, pcid_addr, offset;
2558 int i;
2559
2560 vcid--;
2561
2562 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2563 u32 new_vcid;
2564
2565 vcid_addr = GET_PCID_ADDR(vcid);
2566 if (vcid & 0x8) {
2567 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2568 }
2569 else {
2570 new_vcid = vcid;
2571 }
2572 pcid_addr = GET_PCID_ADDR(new_vcid);
2573 }
2574 else {
2575 vcid_addr = GET_CID_ADDR(vcid);
2576 pcid_addr = vcid_addr;
2577 }
2578
2579 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2580 vcid_addr += (i << PHY_CTX_SHIFT);
2581 pcid_addr += (i << PHY_CTX_SHIFT);
2582
2583 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2584 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2585
2586 /* Zero out the context. */
2587 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2588 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2589 }
2590 }
2591 }
2592
2593 static int
2594 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2595 {
2596 u16 *good_mbuf;
2597 u32 good_mbuf_cnt;
2598 u32 val;
2599
2600 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2601 if (good_mbuf == NULL) {
2602 printk(KERN_ERR PFX "Failed to allocate memory in "
2603 "bnx2_alloc_bad_rbuf\n");
2604 return -ENOMEM;
2605 }
2606
2607 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2608 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2609
2610 good_mbuf_cnt = 0;
2611
2612 /* Allocate a bunch of mbufs and save the good ones in an array. */
2613 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2614 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2615 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2616 BNX2_RBUF_COMMAND_ALLOC_REQ);
2617
2618 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2619
2620 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2621
2622 /* The addresses with Bit 9 set are bad memory blocks. */
2623 if (!(val & (1 << 9))) {
2624 good_mbuf[good_mbuf_cnt] = (u16) val;
2625 good_mbuf_cnt++;
2626 }
2627
2628 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2629 }
2630
2631 /* Free the good ones back to the mbuf pool thus discarding
2632 * all the bad ones. */
2633 while (good_mbuf_cnt) {
2634 good_mbuf_cnt--;
2635
2636 val = good_mbuf[good_mbuf_cnt];
2637 val = (val << 9) | val | 1;
2638
2639 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2640 }
2641 kfree(good_mbuf);
2642 return 0;
2643 }
2644
2645 static void
2646 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2647 {
2648 u32 val;
2649
2650 val = (mac_addr[0] << 8) | mac_addr[1];
2651
2652 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2653
2654 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2655 (mac_addr[4] << 8) | mac_addr[5];
2656
2657 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2658 }
2659
2660 static inline int
2661 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2662 {
2663 dma_addr_t mapping;
2664 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2665 struct rx_bd *rxbd =
2666 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2667 struct page *page = alloc_page(GFP_ATOMIC);
2668
2669 if (!page)
2670 return -ENOMEM;
2671 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2672 PCI_DMA_FROMDEVICE);
2673 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2674 __free_page(page);
2675 return -EIO;
2676 }
2677
2678 rx_pg->page = page;
2679 pci_unmap_addr_set(rx_pg, mapping, mapping);
2680 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2681 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2682 return 0;
2683 }
2684
2685 static void
2686 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2687 {
2688 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2689 struct page *page = rx_pg->page;
2690
2691 if (!page)
2692 return;
2693
2694 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2695 PCI_DMA_FROMDEVICE);
2696
2697 __free_page(page);
2698 rx_pg->page = NULL;
2699 }
2700
2701 static inline int
2702 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2703 {
2704 struct sk_buff *skb;
2705 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2706 dma_addr_t mapping;
2707 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2708 unsigned long align;
2709
2710 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2711 if (skb == NULL) {
2712 return -ENOMEM;
2713 }
2714
2715 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2716 skb_reserve(skb, BNX2_RX_ALIGN - align);
2717
2718 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2719 PCI_DMA_FROMDEVICE);
2720 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2721 dev_kfree_skb(skb);
2722 return -EIO;
2723 }
2724
2725 rx_buf->skb = skb;
2726 pci_unmap_addr_set(rx_buf, mapping, mapping);
2727
2728 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2729 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2730
2731 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2732
2733 return 0;
2734 }
2735
2736 static int
2737 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2738 {
2739 struct status_block *sblk = bnapi->status_blk.msi;
2740 u32 new_link_state, old_link_state;
2741 int is_set = 1;
2742
2743 new_link_state = sblk->status_attn_bits & event;
2744 old_link_state = sblk->status_attn_bits_ack & event;
2745 if (new_link_state != old_link_state) {
2746 if (new_link_state)
2747 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2748 else
2749 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2750 } else
2751 is_set = 0;
2752
2753 return is_set;
2754 }
2755
2756 static void
2757 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2758 {
2759 spin_lock(&bp->phy_lock);
2760
2761 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2762 bnx2_set_link(bp);
2763 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2764 bnx2_set_remote_link(bp);
2765
2766 spin_unlock(&bp->phy_lock);
2767
2768 }
2769
2770 static inline u16
2771 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2772 {
2773 u16 cons;
2774
2775 /* Tell compiler that status block fields can change. */
2776 barrier();
2777 cons = *bnapi->hw_tx_cons_ptr;
2778 barrier();
2779 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2780 cons++;
2781 return cons;
2782 }
2783
2784 static int
2785 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2786 {
2787 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2788 u16 hw_cons, sw_cons, sw_ring_cons;
2789 int tx_pkt = 0, index;
2790 struct netdev_queue *txq;
2791
2792 index = (bnapi - bp->bnx2_napi);
2793 txq = netdev_get_tx_queue(bp->dev, index);
2794
2795 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2796 sw_cons = txr->tx_cons;
2797
2798 while (sw_cons != hw_cons) {
2799 struct sw_tx_bd *tx_buf;
2800 struct sk_buff *skb;
2801 int i, last;
2802
2803 sw_ring_cons = TX_RING_IDX(sw_cons);
2804
2805 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2806 skb = tx_buf->skb;
2807
2808 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2809 prefetch(&skb->end);
2810
2811 /* partial BD completions possible with TSO packets */
2812 if (tx_buf->is_gso) {
2813 u16 last_idx, last_ring_idx;
2814
2815 last_idx = sw_cons + tx_buf->nr_frags + 1;
2816 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2817 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2818 last_idx++;
2819 }
2820 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2821 break;
2822 }
2823 }
2824
2825 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2826 skb_headlen(skb), PCI_DMA_TODEVICE);
2827
2828 tx_buf->skb = NULL;
2829 last = tx_buf->nr_frags;
2830
2831 for (i = 0; i < last; i++) {
2832 sw_cons = NEXT_TX_BD(sw_cons);
2833
2834 pci_unmap_page(bp->pdev,
2835 pci_unmap_addr(
2836 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2837 mapping),
2838 skb_shinfo(skb)->frags[i].size,
2839 PCI_DMA_TODEVICE);
2840 }
2841
2842 sw_cons = NEXT_TX_BD(sw_cons);
2843
2844 dev_kfree_skb(skb);
2845 tx_pkt++;
2846 if (tx_pkt == budget)
2847 break;
2848
2849 if (hw_cons == sw_cons)
2850 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2851 }
2852
2853 txr->hw_tx_cons = hw_cons;
2854 txr->tx_cons = sw_cons;
2855
2856 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2857 * before checking for netif_tx_queue_stopped(). Without the
2858 * memory barrier, there is a small possibility that bnx2_start_xmit()
2859 * will miss it and cause the queue to be stopped forever.
2860 */
2861 smp_mb();
2862
2863 if (unlikely(netif_tx_queue_stopped(txq)) &&
2864 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2865 __netif_tx_lock(txq, smp_processor_id());
2866 if ((netif_tx_queue_stopped(txq)) &&
2867 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2868 netif_tx_wake_queue(txq);
2869 __netif_tx_unlock(txq);
2870 }
2871
2872 return tx_pkt;
2873 }
2874
2875 static void
2876 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2877 struct sk_buff *skb, int count)
2878 {
2879 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2880 struct rx_bd *cons_bd, *prod_bd;
2881 int i;
2882 u16 hw_prod, prod;
2883 u16 cons = rxr->rx_pg_cons;
2884
2885 cons_rx_pg = &rxr->rx_pg_ring[cons];
2886
2887 /* The caller was unable to allocate a new page to replace the
2888 * last one in the frags array, so we need to recycle that page
2889 * and then free the skb.
2890 */
2891 if (skb) {
2892 struct page *page;
2893 struct skb_shared_info *shinfo;
2894
2895 shinfo = skb_shinfo(skb);
2896 shinfo->nr_frags--;
2897 page = shinfo->frags[shinfo->nr_frags].page;
2898 shinfo->frags[shinfo->nr_frags].page = NULL;
2899
2900 cons_rx_pg->page = page;
2901 dev_kfree_skb(skb);
2902 }
2903
2904 hw_prod = rxr->rx_pg_prod;
2905
2906 for (i = 0; i < count; i++) {
2907 prod = RX_PG_RING_IDX(hw_prod);
2908
2909 prod_rx_pg = &rxr->rx_pg_ring[prod];
2910 cons_rx_pg = &rxr->rx_pg_ring[cons];
2911 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2912 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2913
2914 if (prod != cons) {
2915 prod_rx_pg->page = cons_rx_pg->page;
2916 cons_rx_pg->page = NULL;
2917 pci_unmap_addr_set(prod_rx_pg, mapping,
2918 pci_unmap_addr(cons_rx_pg, mapping));
2919
2920 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2921 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2922
2923 }
2924 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2925 hw_prod = NEXT_RX_BD(hw_prod);
2926 }
2927 rxr->rx_pg_prod = hw_prod;
2928 rxr->rx_pg_cons = cons;
2929 }
2930
2931 static inline void
2932 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2933 struct sk_buff *skb, u16 cons, u16 prod)
2934 {
2935 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2936 struct rx_bd *cons_bd, *prod_bd;
2937
2938 cons_rx_buf = &rxr->rx_buf_ring[cons];
2939 prod_rx_buf = &rxr->rx_buf_ring[prod];
2940
2941 pci_dma_sync_single_for_device(bp->pdev,
2942 pci_unmap_addr(cons_rx_buf, mapping),
2943 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2944
2945 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2946
2947 prod_rx_buf->skb = skb;
2948
2949 if (cons == prod)
2950 return;
2951
2952 pci_unmap_addr_set(prod_rx_buf, mapping,
2953 pci_unmap_addr(cons_rx_buf, mapping));
2954
2955 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2956 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2957 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2958 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2959 }
2960
2961 static int
2962 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2963 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2964 u32 ring_idx)
2965 {
2966 int err;
2967 u16 prod = ring_idx & 0xffff;
2968
2969 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2970 if (unlikely(err)) {
2971 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2972 if (hdr_len) {
2973 unsigned int raw_len = len + 4;
2974 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2975
2976 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2977 }
2978 return err;
2979 }
2980
2981 skb_reserve(skb, BNX2_RX_OFFSET);
2982 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2983 PCI_DMA_FROMDEVICE);
2984
2985 if (hdr_len == 0) {
2986 skb_put(skb, len);
2987 return 0;
2988 } else {
2989 unsigned int i, frag_len, frag_size, pages;
2990 struct sw_pg *rx_pg;
2991 u16 pg_cons = rxr->rx_pg_cons;
2992 u16 pg_prod = rxr->rx_pg_prod;
2993
2994 frag_size = len + 4 - hdr_len;
2995 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2996 skb_put(skb, hdr_len);
2997
2998 for (i = 0; i < pages; i++) {
2999 dma_addr_t mapping_old;
3000
3001 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3002 if (unlikely(frag_len <= 4)) {
3003 unsigned int tail = 4 - frag_len;
3004
3005 rxr->rx_pg_cons = pg_cons;
3006 rxr->rx_pg_prod = pg_prod;
3007 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3008 pages - i);
3009 skb->len -= tail;
3010 if (i == 0) {
3011 skb->tail -= tail;
3012 } else {
3013 skb_frag_t *frag =
3014 &skb_shinfo(skb)->frags[i - 1];
3015 frag->size -= tail;
3016 skb->data_len -= tail;
3017 skb->truesize -= tail;
3018 }
3019 return 0;
3020 }
3021 rx_pg = &rxr->rx_pg_ring[pg_cons];
3022
3023 /* Don't unmap yet. If we're unable to allocate a new
3024 * page, we need to recycle the page and the DMA addr.
3025 */
3026 mapping_old = pci_unmap_addr(rx_pg, mapping);
3027 if (i == pages - 1)
3028 frag_len -= 4;
3029
3030 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3031 rx_pg->page = NULL;
3032
3033 err = bnx2_alloc_rx_page(bp, rxr,
3034 RX_PG_RING_IDX(pg_prod));
3035 if (unlikely(err)) {
3036 rxr->rx_pg_cons = pg_cons;
3037 rxr->rx_pg_prod = pg_prod;
3038 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3039 pages - i);
3040 return err;
3041 }
3042
3043 pci_unmap_page(bp->pdev, mapping_old,
3044 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3045
3046 frag_size -= frag_len;
3047 skb->data_len += frag_len;
3048 skb->truesize += frag_len;
3049 skb->len += frag_len;
3050
3051 pg_prod = NEXT_RX_BD(pg_prod);
3052 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3053 }
3054 rxr->rx_pg_prod = pg_prod;
3055 rxr->rx_pg_cons = pg_cons;
3056 }
3057 return 0;
3058 }
3059
3060 static inline u16
3061 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3062 {
3063 u16 cons;
3064
3065 /* Tell compiler that status block fields can change. */
3066 barrier();
3067 cons = *bnapi->hw_rx_cons_ptr;
3068 barrier();
3069 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3070 cons++;
3071 return cons;
3072 }
3073
3074 static int
3075 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3076 {
3077 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3078 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3079 struct l2_fhdr *rx_hdr;
3080 int rx_pkt = 0, pg_ring_used = 0;
3081
3082 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3083 sw_cons = rxr->rx_cons;
3084 sw_prod = rxr->rx_prod;
3085
3086 /* Memory barrier necessary as speculative reads of the rx
3087 * buffer can be ahead of the index in the status block
3088 */
3089 rmb();
3090 while (sw_cons != hw_cons) {
3091 unsigned int len, hdr_len;
3092 u32 status;
3093 struct sw_bd *rx_buf;
3094 struct sk_buff *skb;
3095 dma_addr_t dma_addr;
3096 u16 vtag = 0;
3097 int hw_vlan __maybe_unused = 0;
3098
3099 sw_ring_cons = RX_RING_IDX(sw_cons);
3100 sw_ring_prod = RX_RING_IDX(sw_prod);
3101
3102 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3103 skb = rx_buf->skb;
3104
3105 rx_buf->skb = NULL;
3106
3107 dma_addr = pci_unmap_addr(rx_buf, mapping);
3108
3109 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3110 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3111 PCI_DMA_FROMDEVICE);
3112
3113 rx_hdr = (struct l2_fhdr *) skb->data;
3114 len = rx_hdr->l2_fhdr_pkt_len;
3115 status = rx_hdr->l2_fhdr_status;
3116
3117 hdr_len = 0;
3118 if (status & L2_FHDR_STATUS_SPLIT) {
3119 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3120 pg_ring_used = 1;
3121 } else if (len > bp->rx_jumbo_thresh) {
3122 hdr_len = bp->rx_jumbo_thresh;
3123 pg_ring_used = 1;
3124 }
3125
3126 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3127 L2_FHDR_ERRORS_PHY_DECODE |
3128 L2_FHDR_ERRORS_ALIGNMENT |
3129 L2_FHDR_ERRORS_TOO_SHORT |
3130 L2_FHDR_ERRORS_GIANT_FRAME))) {
3131
3132 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3133 sw_ring_prod);
3134 if (pg_ring_used) {
3135 int pages;
3136
3137 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3138
3139 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3140 }
3141 goto next_rx;
3142 }
3143
3144 len -= 4;
3145
3146 if (len <= bp->rx_copy_thresh) {
3147 struct sk_buff *new_skb;
3148
3149 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3150 if (new_skb == NULL) {
3151 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3152 sw_ring_prod);
3153 goto next_rx;
3154 }
3155
3156 /* aligned copy */
3157 skb_copy_from_linear_data_offset(skb,
3158 BNX2_RX_OFFSET - 6,
3159 new_skb->data, len + 6);
3160 skb_reserve(new_skb, 6);
3161 skb_put(new_skb, len);
3162
3163 bnx2_reuse_rx_skb(bp, rxr, skb,
3164 sw_ring_cons, sw_ring_prod);
3165
3166 skb = new_skb;
3167 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3168 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3169 goto next_rx;
3170
3171 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3172 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3173 vtag = rx_hdr->l2_fhdr_vlan_tag;
3174 #ifdef BCM_VLAN
3175 if (bp->vlgrp)
3176 hw_vlan = 1;
3177 else
3178 #endif
3179 {
3180 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3181 __skb_push(skb, 4);
3182
3183 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3184 ve->h_vlan_proto = htons(ETH_P_8021Q);
3185 ve->h_vlan_TCI = htons(vtag);
3186 len += 4;
3187 }
3188 }
3189
3190 skb->protocol = eth_type_trans(skb, bp->dev);
3191
3192 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3193 (ntohs(skb->protocol) != 0x8100)) {
3194
3195 dev_kfree_skb(skb);
3196 goto next_rx;
3197
3198 }
3199
3200 skb->ip_summed = CHECKSUM_NONE;
3201 if (bp->rx_csum &&
3202 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3203 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3204
3205 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3206 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3207 skb->ip_summed = CHECKSUM_UNNECESSARY;
3208 }
3209
3210 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3211
3212 #ifdef BCM_VLAN
3213 if (hw_vlan)
3214 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3215 else
3216 #endif
3217 netif_receive_skb(skb);
3218
3219 rx_pkt++;
3220
3221 next_rx:
3222 sw_cons = NEXT_RX_BD(sw_cons);
3223 sw_prod = NEXT_RX_BD(sw_prod);
3224
3225 if ((rx_pkt == budget))
3226 break;
3227
3228 /* Refresh hw_cons to see if there is new work */
3229 if (sw_cons == hw_cons) {
3230 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3231 rmb();
3232 }
3233 }
3234 rxr->rx_cons = sw_cons;
3235 rxr->rx_prod = sw_prod;
3236
3237 if (pg_ring_used)
3238 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3239
3240 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3241
3242 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3243
3244 mmiowb();
3245
3246 return rx_pkt;
3247
3248 }
3249
3250 /* MSI ISR - The only difference between this and the INTx ISR
3251 * is that the MSI interrupt is always serviced.
3252 */
3253 static irqreturn_t
3254 bnx2_msi(int irq, void *dev_instance)
3255 {
3256 struct bnx2_napi *bnapi = dev_instance;
3257 struct bnx2 *bp = bnapi->bp;
3258
3259 prefetch(bnapi->status_blk.msi);
3260 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3261 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3262 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3263
3264 /* Return here if interrupt is disabled. */
3265 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3266 return IRQ_HANDLED;
3267
3268 napi_schedule(&bnapi->napi);
3269
3270 return IRQ_HANDLED;
3271 }
3272
3273 static irqreturn_t
3274 bnx2_msi_1shot(int irq, void *dev_instance)
3275 {
3276 struct bnx2_napi *bnapi = dev_instance;
3277 struct bnx2 *bp = bnapi->bp;
3278
3279 prefetch(bnapi->status_blk.msi);
3280
3281 /* Return here if interrupt is disabled. */
3282 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3283 return IRQ_HANDLED;
3284
3285 napi_schedule(&bnapi->napi);
3286
3287 return IRQ_HANDLED;
3288 }
3289
3290 static irqreturn_t
3291 bnx2_interrupt(int irq, void *dev_instance)
3292 {
3293 struct bnx2_napi *bnapi = dev_instance;
3294 struct bnx2 *bp = bnapi->bp;
3295 struct status_block *sblk = bnapi->status_blk.msi;
3296
3297 /* When using INTx, it is possible for the interrupt to arrive
3298 * at the CPU before the status block posted prior to the
3299 * interrupt. Reading a register will flush the status block.
3300 * When using MSI, the MSI message will always complete after
3301 * the status block write.
3302 */
3303 if ((sblk->status_idx == bnapi->last_status_idx) &&
3304 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3305 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3306 return IRQ_NONE;
3307
3308 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3309 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3310 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3311
3312 /* Read back to deassert IRQ immediately to avoid too many
3313 * spurious interrupts.
3314 */
3315 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3316
3317 /* Return here if interrupt is shared and is disabled. */
3318 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3319 return IRQ_HANDLED;
3320
3321 if (napi_schedule_prep(&bnapi->napi)) {
3322 bnapi->last_status_idx = sblk->status_idx;
3323 __napi_schedule(&bnapi->napi);
3324 }
3325
3326 return IRQ_HANDLED;
3327 }
3328
3329 static inline int
3330 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3331 {
3332 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3333 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3334
3335 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3336 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3337 return 1;
3338 return 0;
3339 }
3340
3341 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3342 STATUS_ATTN_BITS_TIMER_ABORT)
3343
3344 static inline int
3345 bnx2_has_work(struct bnx2_napi *bnapi)
3346 {
3347 struct status_block *sblk = bnapi->status_blk.msi;
3348
3349 if (bnx2_has_fast_work(bnapi))
3350 return 1;
3351
3352 #ifdef BCM_CNIC
3353 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3354 return 1;
3355 #endif
3356
3357 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3358 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3359 return 1;
3360
3361 return 0;
3362 }
3363
3364 static void
3365 bnx2_chk_missed_msi(struct bnx2 *bp)
3366 {
3367 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3368 u32 msi_ctrl;
3369
3370 if (bnx2_has_work(bnapi)) {
3371 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3372 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3373 return;
3374
3375 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3376 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3377 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3378 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3379 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3380 }
3381 }
3382
3383 bp->idle_chk_status_idx = bnapi->last_status_idx;
3384 }
3385
3386 #ifdef BCM_CNIC
3387 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3388 {
3389 struct cnic_ops *c_ops;
3390
3391 if (!bnapi->cnic_present)
3392 return;
3393
3394 rcu_read_lock();
3395 c_ops = rcu_dereference(bp->cnic_ops);
3396 if (c_ops)
3397 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3398 bnapi->status_blk.msi);
3399 rcu_read_unlock();
3400 }
3401 #endif
3402
3403 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3404 {
3405 struct status_block *sblk = bnapi->status_blk.msi;
3406 u32 status_attn_bits = sblk->status_attn_bits;
3407 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3408
3409 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3410 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3411
3412 bnx2_phy_int(bp, bnapi);
3413
3414 /* This is needed to take care of transient status
3415 * during link changes.
3416 */
3417 REG_WR(bp, BNX2_HC_COMMAND,
3418 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3419 REG_RD(bp, BNX2_HC_COMMAND);
3420 }
3421 }
3422
3423 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3424 int work_done, int budget)
3425 {
3426 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3427 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3428
3429 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3430 bnx2_tx_int(bp, bnapi, 0);
3431
3432 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3433 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3434
3435 return work_done;
3436 }
3437
3438 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3439 {
3440 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3441 struct bnx2 *bp = bnapi->bp;
3442 int work_done = 0;
3443 struct status_block_msix *sblk = bnapi->status_blk.msix;
3444
3445 while (1) {
3446 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3447 if (unlikely(work_done >= budget))
3448 break;
3449
3450 bnapi->last_status_idx = sblk->status_idx;
3451 /* status idx must be read before checking for more work. */
3452 rmb();
3453 if (likely(!bnx2_has_fast_work(bnapi))) {
3454
3455 napi_complete(napi);
3456 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3457 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3458 bnapi->last_status_idx);
3459 break;
3460 }
3461 }
3462 return work_done;
3463 }
3464
3465 static int bnx2_poll(struct napi_struct *napi, int budget)
3466 {
3467 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3468 struct bnx2 *bp = bnapi->bp;
3469 int work_done = 0;
3470 struct status_block *sblk = bnapi->status_blk.msi;
3471
3472 while (1) {
3473 bnx2_poll_link(bp, bnapi);
3474
3475 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3476
3477 #ifdef BCM_CNIC
3478 bnx2_poll_cnic(bp, bnapi);
3479 #endif
3480
3481 /* bnapi->last_status_idx is used below to tell the hw how
3482 * much work has been processed, so we must read it before
3483 * checking for more work.
3484 */
3485 bnapi->last_status_idx = sblk->status_idx;
3486
3487 if (unlikely(work_done >= budget))
3488 break;
3489
3490 rmb();
3491 if (likely(!bnx2_has_work(bnapi))) {
3492 napi_complete(napi);
3493 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3494 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3495 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3496 bnapi->last_status_idx);
3497 break;
3498 }
3499 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3500 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3501 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3502 bnapi->last_status_idx);
3503
3504 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3505 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3506 bnapi->last_status_idx);
3507 break;
3508 }
3509 }
3510
3511 return work_done;
3512 }
3513
3514 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3515 * from set_multicast.
3516 */
3517 static void
3518 bnx2_set_rx_mode(struct net_device *dev)
3519 {
3520 struct bnx2 *bp = netdev_priv(dev);
3521 u32 rx_mode, sort_mode;
3522 struct netdev_hw_addr *ha;
3523 int i;
3524
3525 if (!netif_running(dev))
3526 return;
3527
3528 spin_lock_bh(&bp->phy_lock);
3529
3530 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3531 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3532 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3533 #ifdef BCM_VLAN
3534 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3535 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3536 #else
3537 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3538 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3539 #endif
3540 if (dev->flags & IFF_PROMISC) {
3541 /* Promiscuous mode. */
3542 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3543 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3544 BNX2_RPM_SORT_USER0_PROM_VLAN;
3545 }
3546 else if (dev->flags & IFF_ALLMULTI) {
3547 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3548 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3549 0xffffffff);
3550 }
3551 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3552 }
3553 else {
3554 /* Accept one or more multicast(s). */
3555 struct dev_mc_list *mclist;
3556 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3557 u32 regidx;
3558 u32 bit;
3559 u32 crc;
3560
3561 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3562
3563 for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
3564 i++, mclist = mclist->next) {
3565
3566 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3567 bit = crc & 0xff;
3568 regidx = (bit & 0xe0) >> 5;
3569 bit &= 0x1f;
3570 mc_filter[regidx] |= (1 << bit);
3571 }
3572
3573 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3574 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3575 mc_filter[i]);
3576 }
3577
3578 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3579 }
3580
3581 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3582 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3583 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3584 BNX2_RPM_SORT_USER0_PROM_VLAN;
3585 } else if (!(dev->flags & IFF_PROMISC)) {
3586 /* Add all entries into to the match filter list */
3587 i = 0;
3588 netdev_for_each_uc_addr(ha, dev) {
3589 bnx2_set_mac_addr(bp, ha->addr,
3590 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3591 sort_mode |= (1 <<
3592 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3593 i++;
3594 }
3595
3596 }
3597
3598 if (rx_mode != bp->rx_mode) {
3599 bp->rx_mode = rx_mode;
3600 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3601 }
3602
3603 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3604 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3605 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3606
3607 spin_unlock_bh(&bp->phy_lock);
3608 }
3609
3610 static int __devinit
3611 check_fw_section(const struct firmware *fw,
3612 const struct bnx2_fw_file_section *section,
3613 u32 alignment, bool non_empty)
3614 {
3615 u32 offset = be32_to_cpu(section->offset);
3616 u32 len = be32_to_cpu(section->len);
3617
3618 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3619 return -EINVAL;
3620 if ((non_empty && len == 0) || len > fw->size - offset ||
3621 len & (alignment - 1))
3622 return -EINVAL;
3623 return 0;
3624 }
3625
3626 static int __devinit
3627 check_mips_fw_entry(const struct firmware *fw,
3628 const struct bnx2_mips_fw_file_entry *entry)
3629 {
3630 if (check_fw_section(fw, &entry->text, 4, true) ||
3631 check_fw_section(fw, &entry->data, 4, false) ||
3632 check_fw_section(fw, &entry->rodata, 4, false))
3633 return -EINVAL;
3634 return 0;
3635 }
3636
3637 static int __devinit
3638 bnx2_request_firmware(struct bnx2 *bp)
3639 {
3640 const char *mips_fw_file, *rv2p_fw_file;
3641 const struct bnx2_mips_fw_file *mips_fw;
3642 const struct bnx2_rv2p_fw_file *rv2p_fw;
3643 int rc;
3644
3645 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3646 mips_fw_file = FW_MIPS_FILE_09;
3647 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3648 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3649 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3650 else
3651 rv2p_fw_file = FW_RV2P_FILE_09;
3652 } else {
3653 mips_fw_file = FW_MIPS_FILE_06;
3654 rv2p_fw_file = FW_RV2P_FILE_06;
3655 }
3656
3657 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3658 if (rc) {
3659 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3660 mips_fw_file);
3661 return rc;
3662 }
3663
3664 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3665 if (rc) {
3666 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3667 rv2p_fw_file);
3668 return rc;
3669 }
3670 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3671 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3672 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3673 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3674 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3675 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3676 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3677 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3678 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3679 mips_fw_file);
3680 return -EINVAL;
3681 }
3682 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3683 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3684 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3685 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3686 rv2p_fw_file);
3687 return -EINVAL;
3688 }
3689
3690 return 0;
3691 }
3692
3693 static u32
3694 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3695 {
3696 switch (idx) {
3697 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3698 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3699 rv2p_code |= RV2P_BD_PAGE_SIZE;
3700 break;
3701 }
3702 return rv2p_code;
3703 }
3704
3705 static int
3706 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3707 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3708 {
3709 u32 rv2p_code_len, file_offset;
3710 __be32 *rv2p_code;
3711 int i;
3712 u32 val, cmd, addr;
3713
3714 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3715 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3716
3717 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3718
3719 if (rv2p_proc == RV2P_PROC1) {
3720 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3721 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3722 } else {
3723 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3724 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3725 }
3726
3727 for (i = 0; i < rv2p_code_len; i += 8) {
3728 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3729 rv2p_code++;
3730 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3731 rv2p_code++;
3732
3733 val = (i / 8) | cmd;
3734 REG_WR(bp, addr, val);
3735 }
3736
3737 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3738 for (i = 0; i < 8; i++) {
3739 u32 loc, code;
3740
3741 loc = be32_to_cpu(fw_entry->fixup[i]);
3742 if (loc && ((loc * 4) < rv2p_code_len)) {
3743 code = be32_to_cpu(*(rv2p_code + loc - 1));
3744 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3745 code = be32_to_cpu(*(rv2p_code + loc));
3746 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3747 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3748
3749 val = (loc / 2) | cmd;
3750 REG_WR(bp, addr, val);
3751 }
3752 }
3753
3754 /* Reset the processor, un-stall is done later. */
3755 if (rv2p_proc == RV2P_PROC1) {
3756 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3757 }
3758 else {
3759 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3760 }
3761
3762 return 0;
3763 }
3764
3765 static int
3766 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3767 const struct bnx2_mips_fw_file_entry *fw_entry)
3768 {
3769 u32 addr, len, file_offset;
3770 __be32 *data;
3771 u32 offset;
3772 u32 val;
3773
3774 /* Halt the CPU. */
3775 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3776 val |= cpu_reg->mode_value_halt;
3777 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3778 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3779
3780 /* Load the Text area. */
3781 addr = be32_to_cpu(fw_entry->text.addr);
3782 len = be32_to_cpu(fw_entry->text.len);
3783 file_offset = be32_to_cpu(fw_entry->text.offset);
3784 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3785
3786 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3787 if (len) {
3788 int j;
3789
3790 for (j = 0; j < (len / 4); j++, offset += 4)
3791 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3792 }
3793
3794 /* Load the Data area. */
3795 addr = be32_to_cpu(fw_entry->data.addr);
3796 len = be32_to_cpu(fw_entry->data.len);
3797 file_offset = be32_to_cpu(fw_entry->data.offset);
3798 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3799
3800 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3801 if (len) {
3802 int j;
3803
3804 for (j = 0; j < (len / 4); j++, offset += 4)
3805 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3806 }
3807
3808 /* Load the Read-Only area. */
3809 addr = be32_to_cpu(fw_entry->rodata.addr);
3810 len = be32_to_cpu(fw_entry->rodata.len);
3811 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3812 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3813
3814 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3815 if (len) {
3816 int j;
3817
3818 for (j = 0; j < (len / 4); j++, offset += 4)
3819 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3820 }
3821
3822 /* Clear the pre-fetch instruction. */
3823 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3824
3825 val = be32_to_cpu(fw_entry->start_addr);
3826 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3827
3828 /* Start the CPU. */
3829 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3830 val &= ~cpu_reg->mode_value_halt;
3831 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3832 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3833
3834 return 0;
3835 }
3836
3837 static int
3838 bnx2_init_cpus(struct bnx2 *bp)
3839 {
3840 const struct bnx2_mips_fw_file *mips_fw =
3841 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3842 const struct bnx2_rv2p_fw_file *rv2p_fw =
3843 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3844 int rc;
3845
3846 /* Initialize the RV2P processor. */
3847 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3848 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3849
3850 /* Initialize the RX Processor. */
3851 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3852 if (rc)
3853 goto init_cpu_err;
3854
3855 /* Initialize the TX Processor. */
3856 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3857 if (rc)
3858 goto init_cpu_err;
3859
3860 /* Initialize the TX Patch-up Processor. */
3861 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3862 if (rc)
3863 goto init_cpu_err;
3864
3865 /* Initialize the Completion Processor. */
3866 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3867 if (rc)
3868 goto init_cpu_err;
3869
3870 /* Initialize the Command Processor. */
3871 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3872
3873 init_cpu_err:
3874 return rc;
3875 }
3876
3877 static int
3878 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3879 {
3880 u16 pmcsr;
3881
3882 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3883
3884 switch (state) {
3885 case PCI_D0: {
3886 u32 val;
3887
3888 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3889 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3890 PCI_PM_CTRL_PME_STATUS);
3891
3892 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3893 /* delay required during transition out of D3hot */
3894 msleep(20);
3895
3896 val = REG_RD(bp, BNX2_EMAC_MODE);
3897 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3898 val &= ~BNX2_EMAC_MODE_MPKT;
3899 REG_WR(bp, BNX2_EMAC_MODE, val);
3900
3901 val = REG_RD(bp, BNX2_RPM_CONFIG);
3902 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3903 REG_WR(bp, BNX2_RPM_CONFIG, val);
3904 break;
3905 }
3906 case PCI_D3hot: {
3907 int i;
3908 u32 val, wol_msg;
3909
3910 if (bp->wol) {
3911 u32 advertising;
3912 u8 autoneg;
3913
3914 autoneg = bp->autoneg;
3915 advertising = bp->advertising;
3916
3917 if (bp->phy_port == PORT_TP) {
3918 bp->autoneg = AUTONEG_SPEED;
3919 bp->advertising = ADVERTISED_10baseT_Half |
3920 ADVERTISED_10baseT_Full |
3921 ADVERTISED_100baseT_Half |
3922 ADVERTISED_100baseT_Full |
3923 ADVERTISED_Autoneg;
3924 }
3925
3926 spin_lock_bh(&bp->phy_lock);
3927 bnx2_setup_phy(bp, bp->phy_port);
3928 spin_unlock_bh(&bp->phy_lock);
3929
3930 bp->autoneg = autoneg;
3931 bp->advertising = advertising;
3932
3933 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3934
3935 val = REG_RD(bp, BNX2_EMAC_MODE);
3936
3937 /* Enable port mode. */
3938 val &= ~BNX2_EMAC_MODE_PORT;
3939 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3940 BNX2_EMAC_MODE_ACPI_RCVD |
3941 BNX2_EMAC_MODE_MPKT;
3942 if (bp->phy_port == PORT_TP)
3943 val |= BNX2_EMAC_MODE_PORT_MII;
3944 else {
3945 val |= BNX2_EMAC_MODE_PORT_GMII;
3946 if (bp->line_speed == SPEED_2500)
3947 val |= BNX2_EMAC_MODE_25G_MODE;
3948 }
3949
3950 REG_WR(bp, BNX2_EMAC_MODE, val);
3951
3952 /* receive all multicast */
3953 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3954 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3955 0xffffffff);
3956 }
3957 REG_WR(bp, BNX2_EMAC_RX_MODE,
3958 BNX2_EMAC_RX_MODE_SORT_MODE);
3959
3960 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3961 BNX2_RPM_SORT_USER0_MC_EN;
3962 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3963 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3964 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3965 BNX2_RPM_SORT_USER0_ENA);
3966
3967 /* Need to enable EMAC and RPM for WOL. */
3968 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3969 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3970 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3971 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3972
3973 val = REG_RD(bp, BNX2_RPM_CONFIG);
3974 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3975 REG_WR(bp, BNX2_RPM_CONFIG, val);
3976
3977 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3978 }
3979 else {
3980 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3981 }
3982
3983 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3984 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3985 1, 0);
3986
3987 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3988 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3989 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3990
3991 if (bp->wol)
3992 pmcsr |= 3;
3993 }
3994 else {
3995 pmcsr |= 3;
3996 }
3997 if (bp->wol) {
3998 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3999 }
4000 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4001 pmcsr);
4002
4003 /* No more memory access after this point until
4004 * device is brought back to D0.
4005 */
4006 udelay(50);
4007 break;
4008 }
4009 default:
4010 return -EINVAL;
4011 }
4012 return 0;
4013 }
4014
4015 static int
4016 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4017 {
4018 u32 val;
4019 int j;
4020
4021 /* Request access to the flash interface. */
4022 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4023 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4024 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4025 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4026 break;
4027
4028 udelay(5);
4029 }
4030
4031 if (j >= NVRAM_TIMEOUT_COUNT)
4032 return -EBUSY;
4033
4034 return 0;
4035 }
4036
4037 static int
4038 bnx2_release_nvram_lock(struct bnx2 *bp)
4039 {
4040 int j;
4041 u32 val;
4042
4043 /* Relinquish nvram interface. */
4044 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4045
4046 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4047 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4048 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4049 break;
4050
4051 udelay(5);
4052 }
4053
4054 if (j >= NVRAM_TIMEOUT_COUNT)
4055 return -EBUSY;
4056
4057 return 0;
4058 }
4059
4060
4061 static int
4062 bnx2_enable_nvram_write(struct bnx2 *bp)
4063 {
4064 u32 val;
4065
4066 val = REG_RD(bp, BNX2_MISC_CFG);
4067 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4068
4069 if (bp->flash_info->flags & BNX2_NV_WREN) {
4070 int j;
4071
4072 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4073 REG_WR(bp, BNX2_NVM_COMMAND,
4074 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4075
4076 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4077 udelay(5);
4078
4079 val = REG_RD(bp, BNX2_NVM_COMMAND);
4080 if (val & BNX2_NVM_COMMAND_DONE)
4081 break;
4082 }
4083
4084 if (j >= NVRAM_TIMEOUT_COUNT)
4085 return -EBUSY;
4086 }
4087 return 0;
4088 }
4089
4090 static void
4091 bnx2_disable_nvram_write(struct bnx2 *bp)
4092 {
4093 u32 val;
4094
4095 val = REG_RD(bp, BNX2_MISC_CFG);
4096 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4097 }
4098
4099
4100 static void
4101 bnx2_enable_nvram_access(struct bnx2 *bp)
4102 {
4103 u32 val;
4104
4105 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4106 /* Enable both bits, even on read. */
4107 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4108 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4109 }
4110
4111 static void
4112 bnx2_disable_nvram_access(struct bnx2 *bp)
4113 {
4114 u32 val;
4115
4116 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4117 /* Disable both bits, even after read. */
4118 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4119 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4120 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4121 }
4122
4123 static int
4124 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4125 {
4126 u32 cmd;
4127 int j;
4128
4129 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4130 /* Buffered flash, no erase needed */
4131 return 0;
4132
4133 /* Build an erase command */
4134 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4135 BNX2_NVM_COMMAND_DOIT;
4136
4137 /* Need to clear DONE bit separately. */
4138 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4139
4140 /* Address of the NVRAM to read from. */
4141 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4142
4143 /* Issue an erase command. */
4144 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4145
4146 /* Wait for completion. */
4147 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4148 u32 val;
4149
4150 udelay(5);
4151
4152 val = REG_RD(bp, BNX2_NVM_COMMAND);
4153 if (val & BNX2_NVM_COMMAND_DONE)
4154 break;
4155 }
4156
4157 if (j >= NVRAM_TIMEOUT_COUNT)
4158 return -EBUSY;
4159
4160 return 0;
4161 }
4162
4163 static int
4164 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4165 {
4166 u32 cmd;
4167 int j;
4168
4169 /* Build the command word. */
4170 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4171
4172 /* Calculate an offset of a buffered flash, not needed for 5709. */
4173 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4174 offset = ((offset / bp->flash_info->page_size) <<
4175 bp->flash_info->page_bits) +
4176 (offset % bp->flash_info->page_size);
4177 }
4178
4179 /* Need to clear DONE bit separately. */
4180 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4181
4182 /* Address of the NVRAM to read from. */
4183 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4184
4185 /* Issue a read command. */
4186 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4187
4188 /* Wait for completion. */
4189 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4190 u32 val;
4191
4192 udelay(5);
4193
4194 val = REG_RD(bp, BNX2_NVM_COMMAND);
4195 if (val & BNX2_NVM_COMMAND_DONE) {
4196 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4197 memcpy(ret_val, &v, 4);
4198 break;
4199 }
4200 }
4201 if (j >= NVRAM_TIMEOUT_COUNT)
4202 return -EBUSY;
4203
4204 return 0;
4205 }
4206
4207
4208 static int
4209 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4210 {
4211 u32 cmd;
4212 __be32 val32;
4213 int j;
4214
4215 /* Build the command word. */
4216 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4217
4218 /* Calculate an offset of a buffered flash, not needed for 5709. */
4219 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4220 offset = ((offset / bp->flash_info->page_size) <<
4221 bp->flash_info->page_bits) +
4222 (offset % bp->flash_info->page_size);
4223 }
4224
4225 /* Need to clear DONE bit separately. */
4226 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4227
4228 memcpy(&val32, val, 4);
4229
4230 /* Write the data. */
4231 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4232
4233 /* Address of the NVRAM to write to. */
4234 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4235
4236 /* Issue the write command. */
4237 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4238
4239 /* Wait for completion. */
4240 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4241 udelay(5);
4242
4243 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4244 break;
4245 }
4246 if (j >= NVRAM_TIMEOUT_COUNT)
4247 return -EBUSY;
4248
4249 return 0;
4250 }
4251
4252 static int
4253 bnx2_init_nvram(struct bnx2 *bp)
4254 {
4255 u32 val;
4256 int j, entry_count, rc = 0;
4257 const struct flash_spec *flash;
4258
4259 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4260 bp->flash_info = &flash_5709;
4261 goto get_flash_size;
4262 }
4263
4264 /* Determine the selected interface. */
4265 val = REG_RD(bp, BNX2_NVM_CFG1);
4266
4267 entry_count = ARRAY_SIZE(flash_table);
4268
4269 if (val & 0x40000000) {
4270
4271 /* Flash interface has been reconfigured */
4272 for (j = 0, flash = &flash_table[0]; j < entry_count;
4273 j++, flash++) {
4274 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4275 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4276 bp->flash_info = flash;
4277 break;
4278 }
4279 }
4280 }
4281 else {
4282 u32 mask;
4283 /* Not yet been reconfigured */
4284
4285 if (val & (1 << 23))
4286 mask = FLASH_BACKUP_STRAP_MASK;
4287 else
4288 mask = FLASH_STRAP_MASK;
4289
4290 for (j = 0, flash = &flash_table[0]; j < entry_count;
4291 j++, flash++) {
4292
4293 if ((val & mask) == (flash->strapping & mask)) {
4294 bp->flash_info = flash;
4295
4296 /* Request access to the flash interface. */
4297 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4298 return rc;
4299
4300 /* Enable access to flash interface */
4301 bnx2_enable_nvram_access(bp);
4302
4303 /* Reconfigure the flash interface */
4304 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4305 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4306 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4307 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4308
4309 /* Disable access to flash interface */
4310 bnx2_disable_nvram_access(bp);
4311 bnx2_release_nvram_lock(bp);
4312
4313 break;
4314 }
4315 }
4316 } /* if (val & 0x40000000) */
4317
4318 if (j == entry_count) {
4319 bp->flash_info = NULL;
4320 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4321 return -ENODEV;
4322 }
4323
4324 get_flash_size:
4325 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4326 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4327 if (val)
4328 bp->flash_size = val;
4329 else
4330 bp->flash_size = bp->flash_info->total_size;
4331
4332 return rc;
4333 }
4334
4335 static int
4336 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4337 int buf_size)
4338 {
4339 int rc = 0;
4340 u32 cmd_flags, offset32, len32, extra;
4341
4342 if (buf_size == 0)
4343 return 0;
4344
4345 /* Request access to the flash interface. */
4346 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4347 return rc;
4348
4349 /* Enable access to flash interface */
4350 bnx2_enable_nvram_access(bp);
4351
4352 len32 = buf_size;
4353 offset32 = offset;
4354 extra = 0;
4355
4356 cmd_flags = 0;
4357
4358 if (offset32 & 3) {
4359 u8 buf[4];
4360 u32 pre_len;
4361
4362 offset32 &= ~3;
4363 pre_len = 4 - (offset & 3);
4364
4365 if (pre_len >= len32) {
4366 pre_len = len32;
4367 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4368 BNX2_NVM_COMMAND_LAST;
4369 }
4370 else {
4371 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4372 }
4373
4374 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4375
4376 if (rc)
4377 return rc;
4378
4379 memcpy(ret_buf, buf + (offset & 3), pre_len);
4380
4381 offset32 += 4;
4382 ret_buf += pre_len;
4383 len32 -= pre_len;
4384 }
4385 if (len32 & 3) {
4386 extra = 4 - (len32 & 3);
4387 len32 = (len32 + 4) & ~3;
4388 }
4389
4390 if (len32 == 4) {
4391 u8 buf[4];
4392
4393 if (cmd_flags)
4394 cmd_flags = BNX2_NVM_COMMAND_LAST;
4395 else
4396 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4397 BNX2_NVM_COMMAND_LAST;
4398
4399 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4400
4401 memcpy(ret_buf, buf, 4 - extra);
4402 }
4403 else if (len32 > 0) {
4404 u8 buf[4];
4405
4406 /* Read the first word. */
4407 if (cmd_flags)
4408 cmd_flags = 0;
4409 else
4410 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4411
4412 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4413
4414 /* Advance to the next dword. */
4415 offset32 += 4;
4416 ret_buf += 4;
4417 len32 -= 4;
4418
4419 while (len32 > 4 && rc == 0) {
4420 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4421
4422 /* Advance to the next dword. */
4423 offset32 += 4;
4424 ret_buf += 4;
4425 len32 -= 4;
4426 }
4427
4428 if (rc)
4429 return rc;
4430
4431 cmd_flags = BNX2_NVM_COMMAND_LAST;
4432 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4433
4434 memcpy(ret_buf, buf, 4 - extra);
4435 }
4436
4437 /* Disable access to flash interface */
4438 bnx2_disable_nvram_access(bp);
4439
4440 bnx2_release_nvram_lock(bp);
4441
4442 return rc;
4443 }
4444
4445 static int
4446 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4447 int buf_size)
4448 {
4449 u32 written, offset32, len32;
4450 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4451 int rc = 0;
4452 int align_start, align_end;
4453
4454 buf = data_buf;
4455 offset32 = offset;
4456 len32 = buf_size;
4457 align_start = align_end = 0;
4458
4459 if ((align_start = (offset32 & 3))) {
4460 offset32 &= ~3;
4461 len32 += align_start;
4462 if (len32 < 4)
4463 len32 = 4;
4464 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4465 return rc;
4466 }
4467
4468 if (len32 & 3) {
4469 align_end = 4 - (len32 & 3);
4470 len32 += align_end;
4471 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4472 return rc;
4473 }
4474
4475 if (align_start || align_end) {
4476 align_buf = kmalloc(len32, GFP_KERNEL);
4477 if (align_buf == NULL)
4478 return -ENOMEM;
4479 if (align_start) {
4480 memcpy(align_buf, start, 4);
4481 }
4482 if (align_end) {
4483 memcpy(align_buf + len32 - 4, end, 4);
4484 }
4485 memcpy(align_buf + align_start, data_buf, buf_size);
4486 buf = align_buf;
4487 }
4488
4489 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4490 flash_buffer = kmalloc(264, GFP_KERNEL);
4491 if (flash_buffer == NULL) {
4492 rc = -ENOMEM;
4493 goto nvram_write_end;
4494 }
4495 }
4496
4497 written = 0;
4498 while ((written < len32) && (rc == 0)) {
4499 u32 page_start, page_end, data_start, data_end;
4500 u32 addr, cmd_flags;
4501 int i;
4502
4503 /* Find the page_start addr */
4504 page_start = offset32 + written;
4505 page_start -= (page_start % bp->flash_info->page_size);
4506 /* Find the page_end addr */
4507 page_end = page_start + bp->flash_info->page_size;
4508 /* Find the data_start addr */
4509 data_start = (written == 0) ? offset32 : page_start;
4510 /* Find the data_end addr */
4511 data_end = (page_end > offset32 + len32) ?
4512 (offset32 + len32) : page_end;
4513
4514 /* Request access to the flash interface. */
4515 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4516 goto nvram_write_end;
4517
4518 /* Enable access to flash interface */
4519 bnx2_enable_nvram_access(bp);
4520
4521 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4522 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4523 int j;
4524
4525 /* Read the whole page into the buffer
4526 * (non-buffer flash only) */
4527 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4528 if (j == (bp->flash_info->page_size - 4)) {
4529 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4530 }
4531 rc = bnx2_nvram_read_dword(bp,
4532 page_start + j,
4533 &flash_buffer[j],
4534 cmd_flags);
4535
4536 if (rc)
4537 goto nvram_write_end;
4538
4539 cmd_flags = 0;
4540 }
4541 }
4542
4543 /* Enable writes to flash interface (unlock write-protect) */
4544 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4545 goto nvram_write_end;
4546
4547 /* Loop to write back the buffer data from page_start to
4548 * data_start */
4549 i = 0;
4550 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4551 /* Erase the page */
4552 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4553 goto nvram_write_end;
4554
4555 /* Re-enable the write again for the actual write */
4556 bnx2_enable_nvram_write(bp);
4557
4558 for (addr = page_start; addr < data_start;
4559 addr += 4, i += 4) {
4560
4561 rc = bnx2_nvram_write_dword(bp, addr,
4562 &flash_buffer[i], cmd_flags);
4563
4564 if (rc != 0)
4565 goto nvram_write_end;
4566
4567 cmd_flags = 0;
4568 }
4569 }
4570
4571 /* Loop to write the new data from data_start to data_end */
4572 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4573 if ((addr == page_end - 4) ||
4574 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4575 (addr == data_end - 4))) {
4576
4577 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4578 }
4579 rc = bnx2_nvram_write_dword(bp, addr, buf,
4580 cmd_flags);
4581
4582 if (rc != 0)
4583 goto nvram_write_end;
4584
4585 cmd_flags = 0;
4586 buf += 4;
4587 }
4588
4589 /* Loop to write back the buffer data from data_end
4590 * to page_end */
4591 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4592 for (addr = data_end; addr < page_end;
4593 addr += 4, i += 4) {
4594
4595 if (addr == page_end-4) {
4596 cmd_flags = BNX2_NVM_COMMAND_LAST;
4597 }
4598 rc = bnx2_nvram_write_dword(bp, addr,
4599 &flash_buffer[i], cmd_flags);
4600
4601 if (rc != 0)
4602 goto nvram_write_end;
4603
4604 cmd_flags = 0;
4605 }
4606 }
4607
4608 /* Disable writes to flash interface (lock write-protect) */
4609 bnx2_disable_nvram_write(bp);
4610
4611 /* Disable access to flash interface */
4612 bnx2_disable_nvram_access(bp);
4613 bnx2_release_nvram_lock(bp);
4614
4615 /* Increment written */
4616 written += data_end - data_start;
4617 }
4618
4619 nvram_write_end:
4620 kfree(flash_buffer);
4621 kfree(align_buf);
4622 return rc;
4623 }
4624
4625 static void
4626 bnx2_init_fw_cap(struct bnx2 *bp)
4627 {
4628 u32 val, sig = 0;
4629
4630 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4631 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4632
4633 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4634 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4635
4636 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4637 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4638 return;
4639
4640 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4641 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4642 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4643 }
4644
4645 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4646 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4647 u32 link;
4648
4649 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4650
4651 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4652 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4653 bp->phy_port = PORT_FIBRE;
4654 else
4655 bp->phy_port = PORT_TP;
4656
4657 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4658 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4659 }
4660
4661 if (netif_running(bp->dev) && sig)
4662 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4663 }
4664
4665 static void
4666 bnx2_setup_msix_tbl(struct bnx2 *bp)
4667 {
4668 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4669
4670 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4671 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4672 }
4673
4674 static int
4675 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4676 {
4677 u32 val;
4678 int i, rc = 0;
4679 u8 old_port;
4680
4681 /* Wait for the current PCI transaction to complete before
4682 * issuing a reset. */
4683 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4684 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4685 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4686 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4687 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4688 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4689 udelay(5);
4690
4691 /* Wait for the firmware to tell us it is ok to issue a reset. */
4692 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4693
4694 /* Deposit a driver reset signature so the firmware knows that
4695 * this is a soft reset. */
4696 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4697 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4698
4699 /* Do a dummy read to force the chip to complete all current transaction
4700 * before we issue a reset. */
4701 val = REG_RD(bp, BNX2_MISC_ID);
4702
4703 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4704 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4705 REG_RD(bp, BNX2_MISC_COMMAND);
4706 udelay(5);
4707
4708 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4709 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4710
4711 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4712
4713 } else {
4714 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4715 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4716 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4717
4718 /* Chip reset. */
4719 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4720
4721 /* Reading back any register after chip reset will hang the
4722 * bus on 5706 A0 and A1. The msleep below provides plenty
4723 * of margin for write posting.
4724 */
4725 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4726 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4727 msleep(20);
4728
4729 /* Reset takes approximate 30 usec */
4730 for (i = 0; i < 10; i++) {
4731 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4732 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4733 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4734 break;
4735 udelay(10);
4736 }
4737
4738 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4739 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4740 printk(KERN_ERR PFX "Chip reset did not complete\n");
4741 return -EBUSY;
4742 }
4743 }
4744
4745 /* Make sure byte swapping is properly configured. */
4746 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4747 if (val != 0x01020304) {
4748 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4749 return -ENODEV;
4750 }
4751
4752 /* Wait for the firmware to finish its initialization. */
4753 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4754 if (rc)
4755 return rc;
4756
4757 spin_lock_bh(&bp->phy_lock);
4758 old_port = bp->phy_port;
4759 bnx2_init_fw_cap(bp);
4760 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4761 old_port != bp->phy_port)
4762 bnx2_set_default_remote_link(bp);
4763 spin_unlock_bh(&bp->phy_lock);
4764
4765 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4766 /* Adjust the voltage regular to two steps lower. The default
4767 * of this register is 0x0000000e. */
4768 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4769
4770 /* Remove bad rbuf memory from the free pool. */
4771 rc = bnx2_alloc_bad_rbuf(bp);
4772 }
4773
4774 if (bp->flags & BNX2_FLAG_USING_MSIX)
4775 bnx2_setup_msix_tbl(bp);
4776
4777 return rc;
4778 }
4779
4780 static int
4781 bnx2_init_chip(struct bnx2 *bp)
4782 {
4783 u32 val, mtu;
4784 int rc, i;
4785
4786 /* Make sure the interrupt is not active. */
4787 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4788
4789 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4790 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4791 #ifdef __BIG_ENDIAN
4792 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4793 #endif
4794 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4795 DMA_READ_CHANS << 12 |
4796 DMA_WRITE_CHANS << 16;
4797
4798 val |= (0x2 << 20) | (1 << 11);
4799
4800 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4801 val |= (1 << 23);
4802
4803 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4804 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4805 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4806
4807 REG_WR(bp, BNX2_DMA_CONFIG, val);
4808
4809 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4810 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4811 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4812 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4813 }
4814
4815 if (bp->flags & BNX2_FLAG_PCIX) {
4816 u16 val16;
4817
4818 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4819 &val16);
4820 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4821 val16 & ~PCI_X_CMD_ERO);
4822 }
4823
4824 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4825 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4826 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4827 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4828
4829 /* Initialize context mapping and zero out the quick contexts. The
4830 * context block must have already been enabled. */
4831 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4832 rc = bnx2_init_5709_context(bp);
4833 if (rc)
4834 return rc;
4835 } else
4836 bnx2_init_context(bp);
4837
4838 if ((rc = bnx2_init_cpus(bp)) != 0)
4839 return rc;
4840
4841 bnx2_init_nvram(bp);
4842
4843 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4844
4845 val = REG_RD(bp, BNX2_MQ_CONFIG);
4846 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4847 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4848 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4849 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4850 if (CHIP_REV(bp) == CHIP_REV_Ax)
4851 val |= BNX2_MQ_CONFIG_HALT_DIS;
4852 }
4853
4854 REG_WR(bp, BNX2_MQ_CONFIG, val);
4855
4856 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4857 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4858 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4859
4860 val = (BCM_PAGE_BITS - 8) << 24;
4861 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4862
4863 /* Configure page size. */
4864 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4865 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4866 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4867 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4868
4869 val = bp->mac_addr[0] +
4870 (bp->mac_addr[1] << 8) +
4871 (bp->mac_addr[2] << 16) +
4872 bp->mac_addr[3] +
4873 (bp->mac_addr[4] << 8) +
4874 (bp->mac_addr[5] << 16);
4875 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4876
4877 /* Program the MTU. Also include 4 bytes for CRC32. */
4878 mtu = bp->dev->mtu;
4879 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4880 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4881 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4882 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4883
4884 if (mtu < 1500)
4885 mtu = 1500;
4886
4887 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4888 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4889 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4890
4891 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4892 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4893 bp->bnx2_napi[i].last_status_idx = 0;
4894
4895 bp->idle_chk_status_idx = 0xffff;
4896
4897 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4898
4899 /* Set up how to generate a link change interrupt. */
4900 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4901
4902 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4903 (u64) bp->status_blk_mapping & 0xffffffff);
4904 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4905
4906 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4907 (u64) bp->stats_blk_mapping & 0xffffffff);
4908 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4909 (u64) bp->stats_blk_mapping >> 32);
4910
4911 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4912 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4913
4914 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4915 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4916
4917 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4918 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4919
4920 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4921
4922 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4923
4924 REG_WR(bp, BNX2_HC_COM_TICKS,
4925 (bp->com_ticks_int << 16) | bp->com_ticks);
4926
4927 REG_WR(bp, BNX2_HC_CMD_TICKS,
4928 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4929
4930 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4931 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4932 else
4933 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4934 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4935
4936 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4937 val = BNX2_HC_CONFIG_COLLECT_STATS;
4938 else {
4939 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4940 BNX2_HC_CONFIG_COLLECT_STATS;
4941 }
4942
4943 if (bp->irq_nvecs > 1) {
4944 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4945 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4946
4947 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4948 }
4949
4950 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4951 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4952
4953 REG_WR(bp, BNX2_HC_CONFIG, val);
4954
4955 for (i = 1; i < bp->irq_nvecs; i++) {
4956 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4957 BNX2_HC_SB_CONFIG_1;
4958
4959 REG_WR(bp, base,
4960 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4961 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4962 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4963
4964 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4965 (bp->tx_quick_cons_trip_int << 16) |
4966 bp->tx_quick_cons_trip);
4967
4968 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4969 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4970
4971 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4972 (bp->rx_quick_cons_trip_int << 16) |
4973 bp->rx_quick_cons_trip);
4974
4975 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4976 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4977 }
4978
4979 /* Clear internal stats counters. */
4980 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4981
4982 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4983
4984 /* Initialize the receive filter. */
4985 bnx2_set_rx_mode(bp->dev);
4986
4987 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4988 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4989 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4990 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4991 }
4992 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4993 1, 0);
4994
4995 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4996 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4997
4998 udelay(20);
4999
5000 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5001
5002 return rc;
5003 }
5004
5005 static void
5006 bnx2_clear_ring_states(struct bnx2 *bp)
5007 {
5008 struct bnx2_napi *bnapi;
5009 struct bnx2_tx_ring_info *txr;
5010 struct bnx2_rx_ring_info *rxr;
5011 int i;
5012
5013 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5014 bnapi = &bp->bnx2_napi[i];
5015 txr = &bnapi->tx_ring;
5016 rxr = &bnapi->rx_ring;
5017
5018 txr->tx_cons = 0;
5019 txr->hw_tx_cons = 0;
5020 rxr->rx_prod_bseq = 0;
5021 rxr->rx_prod = 0;
5022 rxr->rx_cons = 0;
5023 rxr->rx_pg_prod = 0;
5024 rxr->rx_pg_cons = 0;
5025 }
5026 }
5027
5028 static void
5029 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5030 {
5031 u32 val, offset0, offset1, offset2, offset3;
5032 u32 cid_addr = GET_CID_ADDR(cid);
5033
5034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5035 offset0 = BNX2_L2CTX_TYPE_XI;
5036 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5037 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5038 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5039 } else {
5040 offset0 = BNX2_L2CTX_TYPE;
5041 offset1 = BNX2_L2CTX_CMD_TYPE;
5042 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5043 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5044 }
5045 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5046 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5047
5048 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5049 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5050
5051 val = (u64) txr->tx_desc_mapping >> 32;
5052 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5053
5054 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5055 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5056 }
5057
5058 static void
5059 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5060 {
5061 struct tx_bd *txbd;
5062 u32 cid = TX_CID;
5063 struct bnx2_napi *bnapi;
5064 struct bnx2_tx_ring_info *txr;
5065
5066 bnapi = &bp->bnx2_napi[ring_num];
5067 txr = &bnapi->tx_ring;
5068
5069 if (ring_num == 0)
5070 cid = TX_CID;
5071 else
5072 cid = TX_TSS_CID + ring_num - 1;
5073
5074 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5075
5076 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5077
5078 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5079 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5080
5081 txr->tx_prod = 0;
5082 txr->tx_prod_bseq = 0;
5083
5084 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5085 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5086
5087 bnx2_init_tx_context(bp, cid, txr);
5088 }
5089
5090 static void
5091 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5092 int num_rings)
5093 {
5094 int i;
5095 struct rx_bd *rxbd;
5096
5097 for (i = 0; i < num_rings; i++) {
5098 int j;
5099
5100 rxbd = &rx_ring[i][0];
5101 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5102 rxbd->rx_bd_len = buf_size;
5103 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5104 }
5105 if (i == (num_rings - 1))
5106 j = 0;
5107 else
5108 j = i + 1;
5109 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5110 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5111 }
5112 }
5113
5114 static void
5115 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5116 {
5117 int i;
5118 u16 prod, ring_prod;
5119 u32 cid, rx_cid_addr, val;
5120 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5121 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5122
5123 if (ring_num == 0)
5124 cid = RX_CID;
5125 else
5126 cid = RX_RSS_CID + ring_num - 1;
5127
5128 rx_cid_addr = GET_CID_ADDR(cid);
5129
5130 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5131 bp->rx_buf_use_size, bp->rx_max_ring);
5132
5133 bnx2_init_rx_context(bp, cid);
5134
5135 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5136 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5137 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5138 }
5139
5140 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5141 if (bp->rx_pg_ring_size) {
5142 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5143 rxr->rx_pg_desc_mapping,
5144 PAGE_SIZE, bp->rx_max_pg_ring);
5145 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5146 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5147 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5148 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5149
5150 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5151 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5152
5153 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5154 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5155
5156 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5157 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5158 }
5159
5160 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5161 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5162
5163 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5164 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5165
5166 ring_prod = prod = rxr->rx_pg_prod;
5167 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5168 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5169 printk(KERN_WARNING PFX "%s: init'ed rx page ring %d "
5170 "with %d/%d pages only\n",
5171 bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
5172 break;
5173 }
5174 prod = NEXT_RX_BD(prod);
5175 ring_prod = RX_PG_RING_IDX(prod);
5176 }
5177 rxr->rx_pg_prod = prod;
5178
5179 ring_prod = prod = rxr->rx_prod;
5180 for (i = 0; i < bp->rx_ring_size; i++) {
5181 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5182 printk(KERN_WARNING PFX "%s: init'ed rx ring %d with "
5183 "%d/%d skbs only\n",
5184 bp->dev->name, ring_num, i, bp->rx_ring_size);
5185 break;
5186 }
5187 prod = NEXT_RX_BD(prod);
5188 ring_prod = RX_RING_IDX(prod);
5189 }
5190 rxr->rx_prod = prod;
5191
5192 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5193 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5194 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5195
5196 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5197 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5198
5199 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5200 }
5201
5202 static void
5203 bnx2_init_all_rings(struct bnx2 *bp)
5204 {
5205 int i;
5206 u32 val;
5207
5208 bnx2_clear_ring_states(bp);
5209
5210 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5211 for (i = 0; i < bp->num_tx_rings; i++)
5212 bnx2_init_tx_ring(bp, i);
5213
5214 if (bp->num_tx_rings > 1)
5215 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5216 (TX_TSS_CID << 7));
5217
5218 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5219 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5220
5221 for (i = 0; i < bp->num_rx_rings; i++)
5222 bnx2_init_rx_ring(bp, i);
5223
5224 if (bp->num_rx_rings > 1) {
5225 u32 tbl_32;
5226 u8 *tbl = (u8 *) &tbl_32;
5227
5228 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5229 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5230
5231 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5232 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5233 if ((i % 4) == 3)
5234 bnx2_reg_wr_ind(bp,
5235 BNX2_RXP_SCRATCH_RSS_TBL + i,
5236 cpu_to_be32(tbl_32));
5237 }
5238
5239 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5240 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5241
5242 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5243
5244 }
5245 }
5246
5247 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5248 {
5249 u32 max, num_rings = 1;
5250
5251 while (ring_size > MAX_RX_DESC_CNT) {
5252 ring_size -= MAX_RX_DESC_CNT;
5253 num_rings++;
5254 }
5255 /* round to next power of 2 */
5256 max = max_size;
5257 while ((max & num_rings) == 0)
5258 max >>= 1;
5259
5260 if (num_rings != max)
5261 max <<= 1;
5262
5263 return max;
5264 }
5265
5266 static void
5267 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5268 {
5269 u32 rx_size, rx_space, jumbo_size;
5270
5271 /* 8 for CRC and VLAN */
5272 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5273
5274 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5275 sizeof(struct skb_shared_info);
5276
5277 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5278 bp->rx_pg_ring_size = 0;
5279 bp->rx_max_pg_ring = 0;
5280 bp->rx_max_pg_ring_idx = 0;
5281 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5282 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5283
5284 jumbo_size = size * pages;
5285 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5286 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5287
5288 bp->rx_pg_ring_size = jumbo_size;
5289 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5290 MAX_RX_PG_RINGS);
5291 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5292 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5293 bp->rx_copy_thresh = 0;
5294 }
5295
5296 bp->rx_buf_use_size = rx_size;
5297 /* hw alignment */
5298 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5299 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5300 bp->rx_ring_size = size;
5301 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5302 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5303 }
5304
5305 static void
5306 bnx2_free_tx_skbs(struct bnx2 *bp)
5307 {
5308 int i;
5309
5310 for (i = 0; i < bp->num_tx_rings; i++) {
5311 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5312 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5313 int j;
5314
5315 if (txr->tx_buf_ring == NULL)
5316 continue;
5317
5318 for (j = 0; j < TX_DESC_CNT; ) {
5319 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5320 struct sk_buff *skb = tx_buf->skb;
5321 int k, last;
5322
5323 if (skb == NULL) {
5324 j++;
5325 continue;
5326 }
5327
5328 pci_unmap_single(bp->pdev,
5329 pci_unmap_addr(tx_buf, mapping),
5330 skb_headlen(skb),
5331 PCI_DMA_TODEVICE);
5332
5333 tx_buf->skb = NULL;
5334
5335 last = tx_buf->nr_frags;
5336 j++;
5337 for (k = 0; k < last; k++, j++) {
5338 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5339 pci_unmap_page(bp->pdev,
5340 pci_unmap_addr(tx_buf, mapping),
5341 skb_shinfo(skb)->frags[k].size,
5342 PCI_DMA_TODEVICE);
5343 }
5344 dev_kfree_skb(skb);
5345 }
5346 }
5347 }
5348
5349 static void
5350 bnx2_free_rx_skbs(struct bnx2 *bp)
5351 {
5352 int i;
5353
5354 for (i = 0; i < bp->num_rx_rings; i++) {
5355 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5356 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5357 int j;
5358
5359 if (rxr->rx_buf_ring == NULL)
5360 return;
5361
5362 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5363 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5364 struct sk_buff *skb = rx_buf->skb;
5365
5366 if (skb == NULL)
5367 continue;
5368
5369 pci_unmap_single(bp->pdev,
5370 pci_unmap_addr(rx_buf, mapping),
5371 bp->rx_buf_use_size,
5372 PCI_DMA_FROMDEVICE);
5373
5374 rx_buf->skb = NULL;
5375
5376 dev_kfree_skb(skb);
5377 }
5378 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5379 bnx2_free_rx_page(bp, rxr, j);
5380 }
5381 }
5382
5383 static void
5384 bnx2_free_skbs(struct bnx2 *bp)
5385 {
5386 bnx2_free_tx_skbs(bp);
5387 bnx2_free_rx_skbs(bp);
5388 }
5389
5390 static int
5391 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5392 {
5393 int rc;
5394
5395 rc = bnx2_reset_chip(bp, reset_code);
5396 bnx2_free_skbs(bp);
5397 if (rc)
5398 return rc;
5399
5400 if ((rc = bnx2_init_chip(bp)) != 0)
5401 return rc;
5402
5403 bnx2_init_all_rings(bp);
5404 return 0;
5405 }
5406
5407 static int
5408 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5409 {
5410 int rc;
5411
5412 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5413 return rc;
5414
5415 spin_lock_bh(&bp->phy_lock);
5416 bnx2_init_phy(bp, reset_phy);
5417 bnx2_set_link(bp);
5418 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5419 bnx2_remote_phy_event(bp);
5420 spin_unlock_bh(&bp->phy_lock);
5421 return 0;
5422 }
5423
5424 static int
5425 bnx2_shutdown_chip(struct bnx2 *bp)
5426 {
5427 u32 reset_code;
5428
5429 if (bp->flags & BNX2_FLAG_NO_WOL)
5430 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5431 else if (bp->wol)
5432 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5433 else
5434 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5435
5436 return bnx2_reset_chip(bp, reset_code);
5437 }
5438
5439 static int
5440 bnx2_test_registers(struct bnx2 *bp)
5441 {
5442 int ret;
5443 int i, is_5709;
5444 static const struct {
5445 u16 offset;
5446 u16 flags;
5447 #define BNX2_FL_NOT_5709 1
5448 u32 rw_mask;
5449 u32 ro_mask;
5450 } reg_tbl[] = {
5451 { 0x006c, 0, 0x00000000, 0x0000003f },
5452 { 0x0090, 0, 0xffffffff, 0x00000000 },
5453 { 0x0094, 0, 0x00000000, 0x00000000 },
5454
5455 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5456 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5457 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5458 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5459 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5460 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5461 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5462 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5463 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5464
5465 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5466 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5467 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5468 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5469 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5470 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5471
5472 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5473 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5474 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5475
5476 { 0x1000, 0, 0x00000000, 0x00000001 },
5477 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5478
5479 { 0x1408, 0, 0x01c00800, 0x00000000 },
5480 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5481 { 0x14a8, 0, 0x00000000, 0x000001ff },
5482 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5483 { 0x14b0, 0, 0x00000002, 0x00000001 },
5484 { 0x14b8, 0, 0x00000000, 0x00000000 },
5485 { 0x14c0, 0, 0x00000000, 0x00000009 },
5486 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5487 { 0x14cc, 0, 0x00000000, 0x00000001 },
5488 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5489
5490 { 0x1800, 0, 0x00000000, 0x00000001 },
5491 { 0x1804, 0, 0x00000000, 0x00000003 },
5492
5493 { 0x2800, 0, 0x00000000, 0x00000001 },
5494 { 0x2804, 0, 0x00000000, 0x00003f01 },
5495 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5496 { 0x2810, 0, 0xffff0000, 0x00000000 },
5497 { 0x2814, 0, 0xffff0000, 0x00000000 },
5498 { 0x2818, 0, 0xffff0000, 0x00000000 },
5499 { 0x281c, 0, 0xffff0000, 0x00000000 },
5500 { 0x2834, 0, 0xffffffff, 0x00000000 },
5501 { 0x2840, 0, 0x00000000, 0xffffffff },
5502 { 0x2844, 0, 0x00000000, 0xffffffff },
5503 { 0x2848, 0, 0xffffffff, 0x00000000 },
5504 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5505
5506 { 0x2c00, 0, 0x00000000, 0x00000011 },
5507 { 0x2c04, 0, 0x00000000, 0x00030007 },
5508
5509 { 0x3c00, 0, 0x00000000, 0x00000001 },
5510 { 0x3c04, 0, 0x00000000, 0x00070000 },
5511 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5512 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5513 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5514 { 0x3c14, 0, 0x00000000, 0xffffffff },
5515 { 0x3c18, 0, 0x00000000, 0xffffffff },
5516 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5517 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5518
5519 { 0x5004, 0, 0x00000000, 0x0000007f },
5520 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5521
5522 { 0x5c00, 0, 0x00000000, 0x00000001 },
5523 { 0x5c04, 0, 0x00000000, 0x0003000f },
5524 { 0x5c08, 0, 0x00000003, 0x00000000 },
5525 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5526 { 0x5c10, 0, 0x00000000, 0xffffffff },
5527 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5528 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5529 { 0x5c88, 0, 0x00000000, 0x00077373 },
5530 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5531
5532 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5533 { 0x680c, 0, 0xffffffff, 0x00000000 },
5534 { 0x6810, 0, 0xffffffff, 0x00000000 },
5535 { 0x6814, 0, 0xffffffff, 0x00000000 },
5536 { 0x6818, 0, 0xffffffff, 0x00000000 },
5537 { 0x681c, 0, 0xffffffff, 0x00000000 },
5538 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5539 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5540 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5541 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5542 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5543 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5544 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5545 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5546 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5547 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5548 { 0x684c, 0, 0xffffffff, 0x00000000 },
5549 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5550 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5551 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5552 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5553 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5554 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5555
5556 { 0xffff, 0, 0x00000000, 0x00000000 },
5557 };
5558
5559 ret = 0;
5560 is_5709 = 0;
5561 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5562 is_5709 = 1;
5563
5564 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5565 u32 offset, rw_mask, ro_mask, save_val, val;
5566 u16 flags = reg_tbl[i].flags;
5567
5568 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5569 continue;
5570
5571 offset = (u32) reg_tbl[i].offset;
5572 rw_mask = reg_tbl[i].rw_mask;
5573 ro_mask = reg_tbl[i].ro_mask;
5574
5575 save_val = readl(bp->regview + offset);
5576
5577 writel(0, bp->regview + offset);
5578
5579 val = readl(bp->regview + offset);
5580 if ((val & rw_mask) != 0) {
5581 goto reg_test_err;
5582 }
5583
5584 if ((val & ro_mask) != (save_val & ro_mask)) {
5585 goto reg_test_err;
5586 }
5587
5588 writel(0xffffffff, bp->regview + offset);
5589
5590 val = readl(bp->regview + offset);
5591 if ((val & rw_mask) != rw_mask) {
5592 goto reg_test_err;
5593 }
5594
5595 if ((val & ro_mask) != (save_val & ro_mask)) {
5596 goto reg_test_err;
5597 }
5598
5599 writel(save_val, bp->regview + offset);
5600 continue;
5601
5602 reg_test_err:
5603 writel(save_val, bp->regview + offset);
5604 ret = -ENODEV;
5605 break;
5606 }
5607 return ret;
5608 }
5609
5610 static int
5611 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5612 {
5613 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5614 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5615 int i;
5616
5617 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5618 u32 offset;
5619
5620 for (offset = 0; offset < size; offset += 4) {
5621
5622 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5623
5624 if (bnx2_reg_rd_ind(bp, start + offset) !=
5625 test_pattern[i]) {
5626 return -ENODEV;
5627 }
5628 }
5629 }
5630 return 0;
5631 }
5632
5633 static int
5634 bnx2_test_memory(struct bnx2 *bp)
5635 {
5636 int ret = 0;
5637 int i;
5638 static struct mem_entry {
5639 u32 offset;
5640 u32 len;
5641 } mem_tbl_5706[] = {
5642 { 0x60000, 0x4000 },
5643 { 0xa0000, 0x3000 },
5644 { 0xe0000, 0x4000 },
5645 { 0x120000, 0x4000 },
5646 { 0x1a0000, 0x4000 },
5647 { 0x160000, 0x4000 },
5648 { 0xffffffff, 0 },
5649 },
5650 mem_tbl_5709[] = {
5651 { 0x60000, 0x4000 },
5652 { 0xa0000, 0x3000 },
5653 { 0xe0000, 0x4000 },
5654 { 0x120000, 0x4000 },
5655 { 0x1a0000, 0x4000 },
5656 { 0xffffffff, 0 },
5657 };
5658 struct mem_entry *mem_tbl;
5659
5660 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5661 mem_tbl = mem_tbl_5709;
5662 else
5663 mem_tbl = mem_tbl_5706;
5664
5665 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5666 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5667 mem_tbl[i].len)) != 0) {
5668 return ret;
5669 }
5670 }
5671
5672 return ret;
5673 }
5674
5675 #define BNX2_MAC_LOOPBACK 0
5676 #define BNX2_PHY_LOOPBACK 1
5677
5678 static int
5679 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5680 {
5681 unsigned int pkt_size, num_pkts, i;
5682 struct sk_buff *skb, *rx_skb;
5683 unsigned char *packet;
5684 u16 rx_start_idx, rx_idx;
5685 dma_addr_t map;
5686 struct tx_bd *txbd;
5687 struct sw_bd *rx_buf;
5688 struct l2_fhdr *rx_hdr;
5689 int ret = -ENODEV;
5690 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5691 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5692 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5693
5694 tx_napi = bnapi;
5695
5696 txr = &tx_napi->tx_ring;
5697 rxr = &bnapi->rx_ring;
5698 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5699 bp->loopback = MAC_LOOPBACK;
5700 bnx2_set_mac_loopback(bp);
5701 }
5702 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5703 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5704 return 0;
5705
5706 bp->loopback = PHY_LOOPBACK;
5707 bnx2_set_phy_loopback(bp);
5708 }
5709 else
5710 return -EINVAL;
5711
5712 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5713 skb = netdev_alloc_skb(bp->dev, pkt_size);
5714 if (!skb)
5715 return -ENOMEM;
5716 packet = skb_put(skb, pkt_size);
5717 memcpy(packet, bp->dev->dev_addr, 6);
5718 memset(packet + 6, 0x0, 8);
5719 for (i = 14; i < pkt_size; i++)
5720 packet[i] = (unsigned char) (i & 0xff);
5721
5722 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5723 PCI_DMA_TODEVICE);
5724 if (pci_dma_mapping_error(bp->pdev, map)) {
5725 dev_kfree_skb(skb);
5726 return -EIO;
5727 }
5728
5729 REG_WR(bp, BNX2_HC_COMMAND,
5730 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5731
5732 REG_RD(bp, BNX2_HC_COMMAND);
5733
5734 udelay(5);
5735 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5736
5737 num_pkts = 0;
5738
5739 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5740
5741 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5742 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5743 txbd->tx_bd_mss_nbytes = pkt_size;
5744 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5745
5746 num_pkts++;
5747 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5748 txr->tx_prod_bseq += pkt_size;
5749
5750 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5751 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5752
5753 udelay(100);
5754
5755 REG_WR(bp, BNX2_HC_COMMAND,
5756 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5757
5758 REG_RD(bp, BNX2_HC_COMMAND);
5759
5760 udelay(5);
5761
5762 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5763 dev_kfree_skb(skb);
5764
5765 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5766 goto loopback_test_done;
5767
5768 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5769 if (rx_idx != rx_start_idx + num_pkts) {
5770 goto loopback_test_done;
5771 }
5772
5773 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5774 rx_skb = rx_buf->skb;
5775
5776 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5777 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5778
5779 pci_dma_sync_single_for_cpu(bp->pdev,
5780 pci_unmap_addr(rx_buf, mapping),
5781 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5782
5783 if (rx_hdr->l2_fhdr_status &
5784 (L2_FHDR_ERRORS_BAD_CRC |
5785 L2_FHDR_ERRORS_PHY_DECODE |
5786 L2_FHDR_ERRORS_ALIGNMENT |
5787 L2_FHDR_ERRORS_TOO_SHORT |
5788 L2_FHDR_ERRORS_GIANT_FRAME)) {
5789
5790 goto loopback_test_done;
5791 }
5792
5793 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5794 goto loopback_test_done;
5795 }
5796
5797 for (i = 14; i < pkt_size; i++) {
5798 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5799 goto loopback_test_done;
5800 }
5801 }
5802
5803 ret = 0;
5804
5805 loopback_test_done:
5806 bp->loopback = 0;
5807 return ret;
5808 }
5809
5810 #define BNX2_MAC_LOOPBACK_FAILED 1
5811 #define BNX2_PHY_LOOPBACK_FAILED 2
5812 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5813 BNX2_PHY_LOOPBACK_FAILED)
5814
5815 static int
5816 bnx2_test_loopback(struct bnx2 *bp)
5817 {
5818 int rc = 0;
5819
5820 if (!netif_running(bp->dev))
5821 return BNX2_LOOPBACK_FAILED;
5822
5823 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5824 spin_lock_bh(&bp->phy_lock);
5825 bnx2_init_phy(bp, 1);
5826 spin_unlock_bh(&bp->phy_lock);
5827 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5828 rc |= BNX2_MAC_LOOPBACK_FAILED;
5829 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5830 rc |= BNX2_PHY_LOOPBACK_FAILED;
5831 return rc;
5832 }
5833
5834 #define NVRAM_SIZE 0x200
5835 #define CRC32_RESIDUAL 0xdebb20e3
5836
5837 static int
5838 bnx2_test_nvram(struct bnx2 *bp)
5839 {
5840 __be32 buf[NVRAM_SIZE / 4];
5841 u8 *data = (u8 *) buf;
5842 int rc = 0;
5843 u32 magic, csum;
5844
5845 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5846 goto test_nvram_done;
5847
5848 magic = be32_to_cpu(buf[0]);
5849 if (magic != 0x669955aa) {
5850 rc = -ENODEV;
5851 goto test_nvram_done;
5852 }
5853
5854 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5855 goto test_nvram_done;
5856
5857 csum = ether_crc_le(0x100, data);
5858 if (csum != CRC32_RESIDUAL) {
5859 rc = -ENODEV;
5860 goto test_nvram_done;
5861 }
5862
5863 csum = ether_crc_le(0x100, data + 0x100);
5864 if (csum != CRC32_RESIDUAL) {
5865 rc = -ENODEV;
5866 }
5867
5868 test_nvram_done:
5869 return rc;
5870 }
5871
5872 static int
5873 bnx2_test_link(struct bnx2 *bp)
5874 {
5875 u32 bmsr;
5876
5877 if (!netif_running(bp->dev))
5878 return -ENODEV;
5879
5880 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5881 if (bp->link_up)
5882 return 0;
5883 return -ENODEV;
5884 }
5885 spin_lock_bh(&bp->phy_lock);
5886 bnx2_enable_bmsr1(bp);
5887 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5888 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5889 bnx2_disable_bmsr1(bp);
5890 spin_unlock_bh(&bp->phy_lock);
5891
5892 if (bmsr & BMSR_LSTATUS) {
5893 return 0;
5894 }
5895 return -ENODEV;
5896 }
5897
5898 static int
5899 bnx2_test_intr(struct bnx2 *bp)
5900 {
5901 int i;
5902 u16 status_idx;
5903
5904 if (!netif_running(bp->dev))
5905 return -ENODEV;
5906
5907 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5908
5909 /* This register is not touched during run-time. */
5910 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5911 REG_RD(bp, BNX2_HC_COMMAND);
5912
5913 for (i = 0; i < 10; i++) {
5914 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5915 status_idx) {
5916
5917 break;
5918 }
5919
5920 msleep_interruptible(10);
5921 }
5922 if (i < 10)
5923 return 0;
5924
5925 return -ENODEV;
5926 }
5927
5928 /* Determining link for parallel detection. */
5929 static int
5930 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5931 {
5932 u32 mode_ctl, an_dbg, exp;
5933
5934 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5935 return 0;
5936
5937 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5938 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5939
5940 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5941 return 0;
5942
5943 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5944 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5945 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5946
5947 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5948 return 0;
5949
5950 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5951 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5952 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5953
5954 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5955 return 0;
5956
5957 return 1;
5958 }
5959
5960 static void
5961 bnx2_5706_serdes_timer(struct bnx2 *bp)
5962 {
5963 int check_link = 1;
5964
5965 spin_lock(&bp->phy_lock);
5966 if (bp->serdes_an_pending) {
5967 bp->serdes_an_pending--;
5968 check_link = 0;
5969 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5970 u32 bmcr;
5971
5972 bp->current_interval = BNX2_TIMER_INTERVAL;
5973
5974 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5975
5976 if (bmcr & BMCR_ANENABLE) {
5977 if (bnx2_5706_serdes_has_link(bp)) {
5978 bmcr &= ~BMCR_ANENABLE;
5979 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5980 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5981 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5982 }
5983 }
5984 }
5985 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5986 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5987 u32 phy2;
5988
5989 bnx2_write_phy(bp, 0x17, 0x0f01);
5990 bnx2_read_phy(bp, 0x15, &phy2);
5991 if (phy2 & 0x20) {
5992 u32 bmcr;
5993
5994 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5995 bmcr |= BMCR_ANENABLE;
5996 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5997
5998 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5999 }
6000 } else
6001 bp->current_interval = BNX2_TIMER_INTERVAL;
6002
6003 if (check_link) {
6004 u32 val;
6005
6006 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6007 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6008 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6009
6010 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6011 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6012 bnx2_5706s_force_link_dn(bp, 1);
6013 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6014 } else
6015 bnx2_set_link(bp);
6016 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6017 bnx2_set_link(bp);
6018 }
6019 spin_unlock(&bp->phy_lock);
6020 }
6021
6022 static void
6023 bnx2_5708_serdes_timer(struct bnx2 *bp)
6024 {
6025 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6026 return;
6027
6028 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6029 bp->serdes_an_pending = 0;
6030 return;
6031 }
6032
6033 spin_lock(&bp->phy_lock);
6034 if (bp->serdes_an_pending)
6035 bp->serdes_an_pending--;
6036 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6037 u32 bmcr;
6038
6039 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6040 if (bmcr & BMCR_ANENABLE) {
6041 bnx2_enable_forced_2g5(bp);
6042 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6043 } else {
6044 bnx2_disable_forced_2g5(bp);
6045 bp->serdes_an_pending = 2;
6046 bp->current_interval = BNX2_TIMER_INTERVAL;
6047 }
6048
6049 } else
6050 bp->current_interval = BNX2_TIMER_INTERVAL;
6051
6052 spin_unlock(&bp->phy_lock);
6053 }
6054
6055 static void
6056 bnx2_timer(unsigned long data)
6057 {
6058 struct bnx2 *bp = (struct bnx2 *) data;
6059
6060 if (!netif_running(bp->dev))
6061 return;
6062
6063 if (atomic_read(&bp->intr_sem) != 0)
6064 goto bnx2_restart_timer;
6065
6066 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6067 BNX2_FLAG_USING_MSI)
6068 bnx2_chk_missed_msi(bp);
6069
6070 bnx2_send_heart_beat(bp);
6071
6072 bp->stats_blk->stat_FwRxDrop =
6073 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6074
6075 /* workaround occasional corrupted counters */
6076 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6077 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6078 BNX2_HC_COMMAND_STATS_NOW);
6079
6080 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6081 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6082 bnx2_5706_serdes_timer(bp);
6083 else
6084 bnx2_5708_serdes_timer(bp);
6085 }
6086
6087 bnx2_restart_timer:
6088 mod_timer(&bp->timer, jiffies + bp->current_interval);
6089 }
6090
6091 static int
6092 bnx2_request_irq(struct bnx2 *bp)
6093 {
6094 unsigned long flags;
6095 struct bnx2_irq *irq;
6096 int rc = 0, i;
6097
6098 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6099 flags = 0;
6100 else
6101 flags = IRQF_SHARED;
6102
6103 for (i = 0; i < bp->irq_nvecs; i++) {
6104 irq = &bp->irq_tbl[i];
6105 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6106 &bp->bnx2_napi[i]);
6107 if (rc)
6108 break;
6109 irq->requested = 1;
6110 }
6111 return rc;
6112 }
6113
6114 static void
6115 bnx2_free_irq(struct bnx2 *bp)
6116 {
6117 struct bnx2_irq *irq;
6118 int i;
6119
6120 for (i = 0; i < bp->irq_nvecs; i++) {
6121 irq = &bp->irq_tbl[i];
6122 if (irq->requested)
6123 free_irq(irq->vector, &bp->bnx2_napi[i]);
6124 irq->requested = 0;
6125 }
6126 if (bp->flags & BNX2_FLAG_USING_MSI)
6127 pci_disable_msi(bp->pdev);
6128 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6129 pci_disable_msix(bp->pdev);
6130
6131 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6132 }
6133
6134 static void
6135 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6136 {
6137 int i, rc;
6138 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6139 struct net_device *dev = bp->dev;
6140 const int len = sizeof(bp->irq_tbl[0].name);
6141
6142 bnx2_setup_msix_tbl(bp);
6143 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6144 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6145 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6146
6147 /* Need to flush the previous three writes to ensure MSI-X
6148 * is setup properly */
6149 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6150
6151 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6152 msix_ent[i].entry = i;
6153 msix_ent[i].vector = 0;
6154 }
6155
6156 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6157 if (rc != 0)
6158 return;
6159
6160 bp->irq_nvecs = msix_vecs;
6161 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6162 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6163 bp->irq_tbl[i].vector = msix_ent[i].vector;
6164 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6165 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6166 }
6167 }
6168
6169 static void
6170 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6171 {
6172 int cpus = num_online_cpus();
6173 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6174
6175 bp->irq_tbl[0].handler = bnx2_interrupt;
6176 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6177 bp->irq_nvecs = 1;
6178 bp->irq_tbl[0].vector = bp->pdev->irq;
6179
6180 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6181 bnx2_enable_msix(bp, msix_vecs);
6182
6183 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6184 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6185 if (pci_enable_msi(bp->pdev) == 0) {
6186 bp->flags |= BNX2_FLAG_USING_MSI;
6187 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6188 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6189 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6190 } else
6191 bp->irq_tbl[0].handler = bnx2_msi;
6192
6193 bp->irq_tbl[0].vector = bp->pdev->irq;
6194 }
6195 }
6196
6197 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6198 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6199
6200 bp->num_rx_rings = bp->irq_nvecs;
6201 }
6202
6203 /* Called with rtnl_lock */
6204 static int
6205 bnx2_open(struct net_device *dev)
6206 {
6207 struct bnx2 *bp = netdev_priv(dev);
6208 int rc;
6209
6210 netif_carrier_off(dev);
6211
6212 bnx2_set_power_state(bp, PCI_D0);
6213 bnx2_disable_int(bp);
6214
6215 bnx2_setup_int_mode(bp, disable_msi);
6216 bnx2_napi_enable(bp);
6217 rc = bnx2_alloc_mem(bp);
6218 if (rc)
6219 goto open_err;
6220
6221 rc = bnx2_request_irq(bp);
6222 if (rc)
6223 goto open_err;
6224
6225 rc = bnx2_init_nic(bp, 1);
6226 if (rc)
6227 goto open_err;
6228
6229 mod_timer(&bp->timer, jiffies + bp->current_interval);
6230
6231 atomic_set(&bp->intr_sem, 0);
6232
6233 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6234
6235 bnx2_enable_int(bp);
6236
6237 if (bp->flags & BNX2_FLAG_USING_MSI) {
6238 /* Test MSI to make sure it is working
6239 * If MSI test fails, go back to INTx mode
6240 */
6241 if (bnx2_test_intr(bp) != 0) {
6242 printk(KERN_WARNING PFX "%s: No interrupt was generated"
6243 " using MSI, switching to INTx mode. Please"
6244 " report this failure to the PCI maintainer"
6245 " and include system chipset information.\n",
6246 bp->dev->name);
6247
6248 bnx2_disable_int(bp);
6249 bnx2_free_irq(bp);
6250
6251 bnx2_setup_int_mode(bp, 1);
6252
6253 rc = bnx2_init_nic(bp, 0);
6254
6255 if (!rc)
6256 rc = bnx2_request_irq(bp);
6257
6258 if (rc) {
6259 del_timer_sync(&bp->timer);
6260 goto open_err;
6261 }
6262 bnx2_enable_int(bp);
6263 }
6264 }
6265 if (bp->flags & BNX2_FLAG_USING_MSI)
6266 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6267 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6268 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6269
6270 netif_tx_start_all_queues(dev);
6271
6272 return 0;
6273
6274 open_err:
6275 bnx2_napi_disable(bp);
6276 bnx2_free_skbs(bp);
6277 bnx2_free_irq(bp);
6278 bnx2_free_mem(bp);
6279 return rc;
6280 }
6281
6282 static void
6283 bnx2_reset_task(struct work_struct *work)
6284 {
6285 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6286
6287 rtnl_lock();
6288 if (!netif_running(bp->dev)) {
6289 rtnl_unlock();
6290 return;
6291 }
6292
6293 bnx2_netif_stop(bp);
6294
6295 bnx2_init_nic(bp, 1);
6296
6297 atomic_set(&bp->intr_sem, 1);
6298 bnx2_netif_start(bp);
6299 rtnl_unlock();
6300 }
6301
6302 static void
6303 bnx2_dump_state(struct bnx2 *bp)
6304 {
6305 struct net_device *dev = bp->dev;
6306
6307 printk(KERN_ERR PFX "%s DEBUG: intr_sem[%x]\n", dev->name,
6308 atomic_read(&bp->intr_sem));
6309 printk(KERN_ERR PFX "%s DEBUG: EMAC_TX_STATUS[%08x] "
6310 "RPM_MGMT_PKT_CTRL[%08x]\n", dev->name,
6311 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6312 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6313 printk(KERN_ERR PFX "%s DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6314 dev->name, bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6315 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6316 printk(KERN_ERR PFX "%s DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6317 dev->name, REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6318 if (bp->flags & BNX2_FLAG_USING_MSIX)
6319 printk(KERN_ERR PFX "%s DEBUG: PBA[%08x]\n", dev->name,
6320 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6321 }
6322
6323 static void
6324 bnx2_tx_timeout(struct net_device *dev)
6325 {
6326 struct bnx2 *bp = netdev_priv(dev);
6327
6328 bnx2_dump_state(bp);
6329
6330 /* This allows the netif to be shutdown gracefully before resetting */
6331 schedule_work(&bp->reset_task);
6332 }
6333
6334 #ifdef BCM_VLAN
6335 /* Called with rtnl_lock */
6336 static void
6337 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6338 {
6339 struct bnx2 *bp = netdev_priv(dev);
6340
6341 if (netif_running(dev))
6342 bnx2_netif_stop(bp);
6343
6344 bp->vlgrp = vlgrp;
6345
6346 if (!netif_running(dev))
6347 return;
6348
6349 bnx2_set_rx_mode(dev);
6350 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6351 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6352
6353 bnx2_netif_start(bp);
6354 }
6355 #endif
6356
6357 /* Called with netif_tx_lock.
6358 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6359 * netif_wake_queue().
6360 */
6361 static netdev_tx_t
6362 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6363 {
6364 struct bnx2 *bp = netdev_priv(dev);
6365 dma_addr_t mapping;
6366 struct tx_bd *txbd;
6367 struct sw_tx_bd *tx_buf;
6368 u32 len, vlan_tag_flags, last_frag, mss;
6369 u16 prod, ring_prod;
6370 int i;
6371 struct bnx2_napi *bnapi;
6372 struct bnx2_tx_ring_info *txr;
6373 struct netdev_queue *txq;
6374
6375 /* Determine which tx ring we will be placed on */
6376 i = skb_get_queue_mapping(skb);
6377 bnapi = &bp->bnx2_napi[i];
6378 txr = &bnapi->tx_ring;
6379 txq = netdev_get_tx_queue(dev, i);
6380
6381 if (unlikely(bnx2_tx_avail(bp, txr) <
6382 (skb_shinfo(skb)->nr_frags + 1))) {
6383 netif_tx_stop_queue(txq);
6384 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6385 dev->name);
6386
6387 return NETDEV_TX_BUSY;
6388 }
6389 len = skb_headlen(skb);
6390 prod = txr->tx_prod;
6391 ring_prod = TX_RING_IDX(prod);
6392
6393 vlan_tag_flags = 0;
6394 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6395 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6396 }
6397
6398 #ifdef BCM_VLAN
6399 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6400 vlan_tag_flags |=
6401 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6402 }
6403 #endif
6404 if ((mss = skb_shinfo(skb)->gso_size)) {
6405 u32 tcp_opt_len;
6406 struct iphdr *iph;
6407
6408 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6409
6410 tcp_opt_len = tcp_optlen(skb);
6411
6412 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6413 u32 tcp_off = skb_transport_offset(skb) -
6414 sizeof(struct ipv6hdr) - ETH_HLEN;
6415
6416 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6417 TX_BD_FLAGS_SW_FLAGS;
6418 if (likely(tcp_off == 0))
6419 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6420 else {
6421 tcp_off >>= 3;
6422 vlan_tag_flags |= ((tcp_off & 0x3) <<
6423 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6424 ((tcp_off & 0x10) <<
6425 TX_BD_FLAGS_TCP6_OFF4_SHL);
6426 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6427 }
6428 } else {
6429 iph = ip_hdr(skb);
6430 if (tcp_opt_len || (iph->ihl > 5)) {
6431 vlan_tag_flags |= ((iph->ihl - 5) +
6432 (tcp_opt_len >> 2)) << 8;
6433 }
6434 }
6435 } else
6436 mss = 0;
6437
6438 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6439 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6440 dev_kfree_skb(skb);
6441 return NETDEV_TX_OK;
6442 }
6443
6444 tx_buf = &txr->tx_buf_ring[ring_prod];
6445 tx_buf->skb = skb;
6446 pci_unmap_addr_set(tx_buf, mapping, mapping);
6447
6448 txbd = &txr->tx_desc_ring[ring_prod];
6449
6450 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6451 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6452 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6453 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6454
6455 last_frag = skb_shinfo(skb)->nr_frags;
6456 tx_buf->nr_frags = last_frag;
6457 tx_buf->is_gso = skb_is_gso(skb);
6458
6459 for (i = 0; i < last_frag; i++) {
6460 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6461
6462 prod = NEXT_TX_BD(prod);
6463 ring_prod = TX_RING_IDX(prod);
6464 txbd = &txr->tx_desc_ring[ring_prod];
6465
6466 len = frag->size;
6467 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6468 len, PCI_DMA_TODEVICE);
6469 if (pci_dma_mapping_error(bp->pdev, mapping))
6470 goto dma_error;
6471 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6472 mapping);
6473
6474 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6475 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6476 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6477 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6478
6479 }
6480 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6481
6482 prod = NEXT_TX_BD(prod);
6483 txr->tx_prod_bseq += skb->len;
6484
6485 REG_WR16(bp, txr->tx_bidx_addr, prod);
6486 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6487
6488 mmiowb();
6489
6490 txr->tx_prod = prod;
6491
6492 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6493 netif_tx_stop_queue(txq);
6494 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6495 netif_tx_wake_queue(txq);
6496 }
6497
6498 return NETDEV_TX_OK;
6499 dma_error:
6500 /* save value of frag that failed */
6501 last_frag = i;
6502
6503 /* start back at beginning and unmap skb */
6504 prod = txr->tx_prod;
6505 ring_prod = TX_RING_IDX(prod);
6506 tx_buf = &txr->tx_buf_ring[ring_prod];
6507 tx_buf->skb = NULL;
6508 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6509 skb_headlen(skb), PCI_DMA_TODEVICE);
6510
6511 /* unmap remaining mapped pages */
6512 for (i = 0; i < last_frag; i++) {
6513 prod = NEXT_TX_BD(prod);
6514 ring_prod = TX_RING_IDX(prod);
6515 tx_buf = &txr->tx_buf_ring[ring_prod];
6516 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6517 skb_shinfo(skb)->frags[i].size,
6518 PCI_DMA_TODEVICE);
6519 }
6520
6521 dev_kfree_skb(skb);
6522 return NETDEV_TX_OK;
6523 }
6524
6525 /* Called with rtnl_lock */
6526 static int
6527 bnx2_close(struct net_device *dev)
6528 {
6529 struct bnx2 *bp = netdev_priv(dev);
6530
6531 cancel_work_sync(&bp->reset_task);
6532
6533 bnx2_disable_int_sync(bp);
6534 bnx2_napi_disable(bp);
6535 del_timer_sync(&bp->timer);
6536 bnx2_shutdown_chip(bp);
6537 bnx2_free_irq(bp);
6538 bnx2_free_skbs(bp);
6539 bnx2_free_mem(bp);
6540 bp->link_up = 0;
6541 netif_carrier_off(bp->dev);
6542 bnx2_set_power_state(bp, PCI_D3hot);
6543 return 0;
6544 }
6545
6546 static void
6547 bnx2_save_stats(struct bnx2 *bp)
6548 {
6549 u32 *hw_stats = (u32 *) bp->stats_blk;
6550 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6551 int i;
6552
6553 /* The 1st 10 counters are 64-bit counters */
6554 for (i = 0; i < 20; i += 2) {
6555 u32 hi;
6556 u64 lo;
6557
6558 hi = *(temp_stats + i) + *(hw_stats + i);
6559 lo = *(temp_stats + i + 1) + *(hw_stats + i + 1);
6560 if (lo > 0xffffffff)
6561 hi++;
6562 *(temp_stats + i) = hi;
6563 *(temp_stats + i + 1) = lo & 0xffffffff;
6564 }
6565
6566 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6567 *(temp_stats + i) = *(temp_stats + i) + *(hw_stats + i);
6568 }
6569
6570 #define GET_64BIT_NET_STATS64(ctr) \
6571 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6572 (unsigned long) (ctr##_lo)
6573
6574 #define GET_64BIT_NET_STATS32(ctr) \
6575 (ctr##_lo)
6576
6577 #if (BITS_PER_LONG == 64)
6578 #define GET_64BIT_NET_STATS(ctr) \
6579 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6580 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6581 #else
6582 #define GET_64BIT_NET_STATS(ctr) \
6583 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6584 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6585 #endif
6586
6587 #define GET_32BIT_NET_STATS(ctr) \
6588 (unsigned long) (bp->stats_blk->ctr + \
6589 bp->temp_stats_blk->ctr)
6590
6591 static struct net_device_stats *
6592 bnx2_get_stats(struct net_device *dev)
6593 {
6594 struct bnx2 *bp = netdev_priv(dev);
6595 struct net_device_stats *net_stats = &dev->stats;
6596
6597 if (bp->stats_blk == NULL) {
6598 return net_stats;
6599 }
6600 net_stats->rx_packets =
6601 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6602 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6603 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6604
6605 net_stats->tx_packets =
6606 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6607 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6608 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6609
6610 net_stats->rx_bytes =
6611 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6612
6613 net_stats->tx_bytes =
6614 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6615
6616 net_stats->multicast =
6617 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6618
6619 net_stats->collisions =
6620 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6621
6622 net_stats->rx_length_errors =
6623 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6624 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6625
6626 net_stats->rx_over_errors =
6627 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6628 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6629
6630 net_stats->rx_frame_errors =
6631 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6632
6633 net_stats->rx_crc_errors =
6634 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6635
6636 net_stats->rx_errors = net_stats->rx_length_errors +
6637 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6638 net_stats->rx_crc_errors;
6639
6640 net_stats->tx_aborted_errors =
6641 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6642 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6643
6644 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6645 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6646 net_stats->tx_carrier_errors = 0;
6647 else {
6648 net_stats->tx_carrier_errors =
6649 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6650 }
6651
6652 net_stats->tx_errors =
6653 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6654 net_stats->tx_aborted_errors +
6655 net_stats->tx_carrier_errors;
6656
6657 net_stats->rx_missed_errors =
6658 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6659 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6660 GET_32BIT_NET_STATS(stat_FwRxDrop);
6661
6662 return net_stats;
6663 }
6664
6665 /* All ethtool functions called with rtnl_lock */
6666
6667 static int
6668 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6669 {
6670 struct bnx2 *bp = netdev_priv(dev);
6671 int support_serdes = 0, support_copper = 0;
6672
6673 cmd->supported = SUPPORTED_Autoneg;
6674 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6675 support_serdes = 1;
6676 support_copper = 1;
6677 } else if (bp->phy_port == PORT_FIBRE)
6678 support_serdes = 1;
6679 else
6680 support_copper = 1;
6681
6682 if (support_serdes) {
6683 cmd->supported |= SUPPORTED_1000baseT_Full |
6684 SUPPORTED_FIBRE;
6685 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6686 cmd->supported |= SUPPORTED_2500baseX_Full;
6687
6688 }
6689 if (support_copper) {
6690 cmd->supported |= SUPPORTED_10baseT_Half |
6691 SUPPORTED_10baseT_Full |
6692 SUPPORTED_100baseT_Half |
6693 SUPPORTED_100baseT_Full |
6694 SUPPORTED_1000baseT_Full |
6695 SUPPORTED_TP;
6696
6697 }
6698
6699 spin_lock_bh(&bp->phy_lock);
6700 cmd->port = bp->phy_port;
6701 cmd->advertising = bp->advertising;
6702
6703 if (bp->autoneg & AUTONEG_SPEED) {
6704 cmd->autoneg = AUTONEG_ENABLE;
6705 }
6706 else {
6707 cmd->autoneg = AUTONEG_DISABLE;
6708 }
6709
6710 if (netif_carrier_ok(dev)) {
6711 cmd->speed = bp->line_speed;
6712 cmd->duplex = bp->duplex;
6713 }
6714 else {
6715 cmd->speed = -1;
6716 cmd->duplex = -1;
6717 }
6718 spin_unlock_bh(&bp->phy_lock);
6719
6720 cmd->transceiver = XCVR_INTERNAL;
6721 cmd->phy_address = bp->phy_addr;
6722
6723 return 0;
6724 }
6725
6726 static int
6727 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6728 {
6729 struct bnx2 *bp = netdev_priv(dev);
6730 u8 autoneg = bp->autoneg;
6731 u8 req_duplex = bp->req_duplex;
6732 u16 req_line_speed = bp->req_line_speed;
6733 u32 advertising = bp->advertising;
6734 int err = -EINVAL;
6735
6736 spin_lock_bh(&bp->phy_lock);
6737
6738 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6739 goto err_out_unlock;
6740
6741 if (cmd->port != bp->phy_port &&
6742 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6743 goto err_out_unlock;
6744
6745 /* If device is down, we can store the settings only if the user
6746 * is setting the currently active port.
6747 */
6748 if (!netif_running(dev) && cmd->port != bp->phy_port)
6749 goto err_out_unlock;
6750
6751 if (cmd->autoneg == AUTONEG_ENABLE) {
6752 autoneg |= AUTONEG_SPEED;
6753
6754 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6755
6756 /* allow advertising 1 speed */
6757 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6758 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6759 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6760 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6761
6762 if (cmd->port == PORT_FIBRE)
6763 goto err_out_unlock;
6764
6765 advertising = cmd->advertising;
6766
6767 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6768 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6769 (cmd->port == PORT_TP))
6770 goto err_out_unlock;
6771 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6772 advertising = cmd->advertising;
6773 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6774 goto err_out_unlock;
6775 else {
6776 if (cmd->port == PORT_FIBRE)
6777 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6778 else
6779 advertising = ETHTOOL_ALL_COPPER_SPEED;
6780 }
6781 advertising |= ADVERTISED_Autoneg;
6782 }
6783 else {
6784 if (cmd->port == PORT_FIBRE) {
6785 if ((cmd->speed != SPEED_1000 &&
6786 cmd->speed != SPEED_2500) ||
6787 (cmd->duplex != DUPLEX_FULL))
6788 goto err_out_unlock;
6789
6790 if (cmd->speed == SPEED_2500 &&
6791 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6792 goto err_out_unlock;
6793 }
6794 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6795 goto err_out_unlock;
6796
6797 autoneg &= ~AUTONEG_SPEED;
6798 req_line_speed = cmd->speed;
6799 req_duplex = cmd->duplex;
6800 advertising = 0;
6801 }
6802
6803 bp->autoneg = autoneg;
6804 bp->advertising = advertising;
6805 bp->req_line_speed = req_line_speed;
6806 bp->req_duplex = req_duplex;
6807
6808 err = 0;
6809 /* If device is down, the new settings will be picked up when it is
6810 * brought up.
6811 */
6812 if (netif_running(dev))
6813 err = bnx2_setup_phy(bp, cmd->port);
6814
6815 err_out_unlock:
6816 spin_unlock_bh(&bp->phy_lock);
6817
6818 return err;
6819 }
6820
6821 static void
6822 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6823 {
6824 struct bnx2 *bp = netdev_priv(dev);
6825
6826 strcpy(info->driver, DRV_MODULE_NAME);
6827 strcpy(info->version, DRV_MODULE_VERSION);
6828 strcpy(info->bus_info, pci_name(bp->pdev));
6829 strcpy(info->fw_version, bp->fw_version);
6830 }
6831
6832 #define BNX2_REGDUMP_LEN (32 * 1024)
6833
6834 static int
6835 bnx2_get_regs_len(struct net_device *dev)
6836 {
6837 return BNX2_REGDUMP_LEN;
6838 }
6839
6840 static void
6841 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6842 {
6843 u32 *p = _p, i, offset;
6844 u8 *orig_p = _p;
6845 struct bnx2 *bp = netdev_priv(dev);
6846 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6847 0x0800, 0x0880, 0x0c00, 0x0c10,
6848 0x0c30, 0x0d08, 0x1000, 0x101c,
6849 0x1040, 0x1048, 0x1080, 0x10a4,
6850 0x1400, 0x1490, 0x1498, 0x14f0,
6851 0x1500, 0x155c, 0x1580, 0x15dc,
6852 0x1600, 0x1658, 0x1680, 0x16d8,
6853 0x1800, 0x1820, 0x1840, 0x1854,
6854 0x1880, 0x1894, 0x1900, 0x1984,
6855 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6856 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6857 0x2000, 0x2030, 0x23c0, 0x2400,
6858 0x2800, 0x2820, 0x2830, 0x2850,
6859 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6860 0x3c00, 0x3c94, 0x4000, 0x4010,
6861 0x4080, 0x4090, 0x43c0, 0x4458,
6862 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6863 0x4fc0, 0x5010, 0x53c0, 0x5444,
6864 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6865 0x5fc0, 0x6000, 0x6400, 0x6428,
6866 0x6800, 0x6848, 0x684c, 0x6860,
6867 0x6888, 0x6910, 0x8000 };
6868
6869 regs->version = 0;
6870
6871 memset(p, 0, BNX2_REGDUMP_LEN);
6872
6873 if (!netif_running(bp->dev))
6874 return;
6875
6876 i = 0;
6877 offset = reg_boundaries[0];
6878 p += offset;
6879 while (offset < BNX2_REGDUMP_LEN) {
6880 *p++ = REG_RD(bp, offset);
6881 offset += 4;
6882 if (offset == reg_boundaries[i + 1]) {
6883 offset = reg_boundaries[i + 2];
6884 p = (u32 *) (orig_p + offset);
6885 i += 2;
6886 }
6887 }
6888 }
6889
6890 static void
6891 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6892 {
6893 struct bnx2 *bp = netdev_priv(dev);
6894
6895 if (bp->flags & BNX2_FLAG_NO_WOL) {
6896 wol->supported = 0;
6897 wol->wolopts = 0;
6898 }
6899 else {
6900 wol->supported = WAKE_MAGIC;
6901 if (bp->wol)
6902 wol->wolopts = WAKE_MAGIC;
6903 else
6904 wol->wolopts = 0;
6905 }
6906 memset(&wol->sopass, 0, sizeof(wol->sopass));
6907 }
6908
6909 static int
6910 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6911 {
6912 struct bnx2 *bp = netdev_priv(dev);
6913
6914 if (wol->wolopts & ~WAKE_MAGIC)
6915 return -EINVAL;
6916
6917 if (wol->wolopts & WAKE_MAGIC) {
6918 if (bp->flags & BNX2_FLAG_NO_WOL)
6919 return -EINVAL;
6920
6921 bp->wol = 1;
6922 }
6923 else {
6924 bp->wol = 0;
6925 }
6926 return 0;
6927 }
6928
6929 static int
6930 bnx2_nway_reset(struct net_device *dev)
6931 {
6932 struct bnx2 *bp = netdev_priv(dev);
6933 u32 bmcr;
6934
6935 if (!netif_running(dev))
6936 return -EAGAIN;
6937
6938 if (!(bp->autoneg & AUTONEG_SPEED)) {
6939 return -EINVAL;
6940 }
6941
6942 spin_lock_bh(&bp->phy_lock);
6943
6944 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6945 int rc;
6946
6947 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6948 spin_unlock_bh(&bp->phy_lock);
6949 return rc;
6950 }
6951
6952 /* Force a link down visible on the other side */
6953 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6954 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6955 spin_unlock_bh(&bp->phy_lock);
6956
6957 msleep(20);
6958
6959 spin_lock_bh(&bp->phy_lock);
6960
6961 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6962 bp->serdes_an_pending = 1;
6963 mod_timer(&bp->timer, jiffies + bp->current_interval);
6964 }
6965
6966 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6967 bmcr &= ~BMCR_LOOPBACK;
6968 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6969
6970 spin_unlock_bh(&bp->phy_lock);
6971
6972 return 0;
6973 }
6974
6975 static u32
6976 bnx2_get_link(struct net_device *dev)
6977 {
6978 struct bnx2 *bp = netdev_priv(dev);
6979
6980 return bp->link_up;
6981 }
6982
6983 static int
6984 bnx2_get_eeprom_len(struct net_device *dev)
6985 {
6986 struct bnx2 *bp = netdev_priv(dev);
6987
6988 if (bp->flash_info == NULL)
6989 return 0;
6990
6991 return (int) bp->flash_size;
6992 }
6993
6994 static int
6995 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6996 u8 *eebuf)
6997 {
6998 struct bnx2 *bp = netdev_priv(dev);
6999 int rc;
7000
7001 if (!netif_running(dev))
7002 return -EAGAIN;
7003
7004 /* parameters already validated in ethtool_get_eeprom */
7005
7006 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7007
7008 return rc;
7009 }
7010
7011 static int
7012 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7013 u8 *eebuf)
7014 {
7015 struct bnx2 *bp = netdev_priv(dev);
7016 int rc;
7017
7018 if (!netif_running(dev))
7019 return -EAGAIN;
7020
7021 /* parameters already validated in ethtool_set_eeprom */
7022
7023 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7024
7025 return rc;
7026 }
7027
7028 static int
7029 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7030 {
7031 struct bnx2 *bp = netdev_priv(dev);
7032
7033 memset(coal, 0, sizeof(struct ethtool_coalesce));
7034
7035 coal->rx_coalesce_usecs = bp->rx_ticks;
7036 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7037 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7038 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7039
7040 coal->tx_coalesce_usecs = bp->tx_ticks;
7041 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7042 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7043 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7044
7045 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7046
7047 return 0;
7048 }
7049
7050 static int
7051 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7052 {
7053 struct bnx2 *bp = netdev_priv(dev);
7054
7055 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7056 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7057
7058 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7059 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7060
7061 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7062 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7063
7064 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7065 if (bp->rx_quick_cons_trip_int > 0xff)
7066 bp->rx_quick_cons_trip_int = 0xff;
7067
7068 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7069 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7070
7071 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7072 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7073
7074 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7075 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7076
7077 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7078 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7079 0xff;
7080
7081 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7082 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7083 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7084 bp->stats_ticks = USEC_PER_SEC;
7085 }
7086 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7087 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7088 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7089
7090 if (netif_running(bp->dev)) {
7091 bnx2_netif_stop(bp);
7092 bnx2_init_nic(bp, 0);
7093 bnx2_netif_start(bp);
7094 }
7095
7096 return 0;
7097 }
7098
7099 static void
7100 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7101 {
7102 struct bnx2 *bp = netdev_priv(dev);
7103
7104 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7105 ering->rx_mini_max_pending = 0;
7106 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7107
7108 ering->rx_pending = bp->rx_ring_size;
7109 ering->rx_mini_pending = 0;
7110 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7111
7112 ering->tx_max_pending = MAX_TX_DESC_CNT;
7113 ering->tx_pending = bp->tx_ring_size;
7114 }
7115
7116 static int
7117 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7118 {
7119 if (netif_running(bp->dev)) {
7120 /* Reset will erase chipset stats; save them */
7121 bnx2_save_stats(bp);
7122
7123 bnx2_netif_stop(bp);
7124 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7125 bnx2_free_skbs(bp);
7126 bnx2_free_mem(bp);
7127 }
7128
7129 bnx2_set_rx_ring_size(bp, rx);
7130 bp->tx_ring_size = tx;
7131
7132 if (netif_running(bp->dev)) {
7133 int rc;
7134
7135 rc = bnx2_alloc_mem(bp);
7136 if (!rc)
7137 rc = bnx2_init_nic(bp, 0);
7138
7139 if (rc) {
7140 bnx2_napi_enable(bp);
7141 dev_close(bp->dev);
7142 return rc;
7143 }
7144 bnx2_netif_start(bp);
7145 }
7146 return 0;
7147 }
7148
7149 static int
7150 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7151 {
7152 struct bnx2 *bp = netdev_priv(dev);
7153 int rc;
7154
7155 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7156 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7157 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7158
7159 return -EINVAL;
7160 }
7161 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7162 return rc;
7163 }
7164
7165 static void
7166 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7167 {
7168 struct bnx2 *bp = netdev_priv(dev);
7169
7170 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7171 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7172 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7173 }
7174
7175 static int
7176 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7177 {
7178 struct bnx2 *bp = netdev_priv(dev);
7179
7180 bp->req_flow_ctrl = 0;
7181 if (epause->rx_pause)
7182 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7183 if (epause->tx_pause)
7184 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7185
7186 if (epause->autoneg) {
7187 bp->autoneg |= AUTONEG_FLOW_CTRL;
7188 }
7189 else {
7190 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7191 }
7192
7193 if (netif_running(dev)) {
7194 spin_lock_bh(&bp->phy_lock);
7195 bnx2_setup_phy(bp, bp->phy_port);
7196 spin_unlock_bh(&bp->phy_lock);
7197 }
7198
7199 return 0;
7200 }
7201
7202 static u32
7203 bnx2_get_rx_csum(struct net_device *dev)
7204 {
7205 struct bnx2 *bp = netdev_priv(dev);
7206
7207 return bp->rx_csum;
7208 }
7209
7210 static int
7211 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7212 {
7213 struct bnx2 *bp = netdev_priv(dev);
7214
7215 bp->rx_csum = data;
7216 return 0;
7217 }
7218
7219 static int
7220 bnx2_set_tso(struct net_device *dev, u32 data)
7221 {
7222 struct bnx2 *bp = netdev_priv(dev);
7223
7224 if (data) {
7225 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7226 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7227 dev->features |= NETIF_F_TSO6;
7228 } else
7229 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7230 NETIF_F_TSO_ECN);
7231 return 0;
7232 }
7233
7234 static struct {
7235 char string[ETH_GSTRING_LEN];
7236 } bnx2_stats_str_arr[] = {
7237 { "rx_bytes" },
7238 { "rx_error_bytes" },
7239 { "tx_bytes" },
7240 { "tx_error_bytes" },
7241 { "rx_ucast_packets" },
7242 { "rx_mcast_packets" },
7243 { "rx_bcast_packets" },
7244 { "tx_ucast_packets" },
7245 { "tx_mcast_packets" },
7246 { "tx_bcast_packets" },
7247 { "tx_mac_errors" },
7248 { "tx_carrier_errors" },
7249 { "rx_crc_errors" },
7250 { "rx_align_errors" },
7251 { "tx_single_collisions" },
7252 { "tx_multi_collisions" },
7253 { "tx_deferred" },
7254 { "tx_excess_collisions" },
7255 { "tx_late_collisions" },
7256 { "tx_total_collisions" },
7257 { "rx_fragments" },
7258 { "rx_jabbers" },
7259 { "rx_undersize_packets" },
7260 { "rx_oversize_packets" },
7261 { "rx_64_byte_packets" },
7262 { "rx_65_to_127_byte_packets" },
7263 { "rx_128_to_255_byte_packets" },
7264 { "rx_256_to_511_byte_packets" },
7265 { "rx_512_to_1023_byte_packets" },
7266 { "rx_1024_to_1522_byte_packets" },
7267 { "rx_1523_to_9022_byte_packets" },
7268 { "tx_64_byte_packets" },
7269 { "tx_65_to_127_byte_packets" },
7270 { "tx_128_to_255_byte_packets" },
7271 { "tx_256_to_511_byte_packets" },
7272 { "tx_512_to_1023_byte_packets" },
7273 { "tx_1024_to_1522_byte_packets" },
7274 { "tx_1523_to_9022_byte_packets" },
7275 { "rx_xon_frames" },
7276 { "rx_xoff_frames" },
7277 { "tx_xon_frames" },
7278 { "tx_xoff_frames" },
7279 { "rx_mac_ctrl_frames" },
7280 { "rx_filtered_packets" },
7281 { "rx_ftq_discards" },
7282 { "rx_discards" },
7283 { "rx_fw_discards" },
7284 };
7285
7286 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7287 sizeof(bnx2_stats_str_arr[0]))
7288
7289 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7290
7291 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7292 STATS_OFFSET32(stat_IfHCInOctets_hi),
7293 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7294 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7295 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7296 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7297 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7298 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7299 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7300 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7301 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7302 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7303 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7304 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7305 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7306 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7307 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7308 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7309 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7310 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7311 STATS_OFFSET32(stat_EtherStatsCollisions),
7312 STATS_OFFSET32(stat_EtherStatsFragments),
7313 STATS_OFFSET32(stat_EtherStatsJabbers),
7314 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7315 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7316 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7317 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7318 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7319 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7320 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7321 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7322 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7323 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7324 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7325 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7326 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7327 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7328 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7329 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7330 STATS_OFFSET32(stat_XonPauseFramesReceived),
7331 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7332 STATS_OFFSET32(stat_OutXonSent),
7333 STATS_OFFSET32(stat_OutXoffSent),
7334 STATS_OFFSET32(stat_MacControlFramesReceived),
7335 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7336 STATS_OFFSET32(stat_IfInFTQDiscards),
7337 STATS_OFFSET32(stat_IfInMBUFDiscards),
7338 STATS_OFFSET32(stat_FwRxDrop),
7339 };
7340
7341 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7342 * skipped because of errata.
7343 */
7344 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7345 8,0,8,8,8,8,8,8,8,8,
7346 4,0,4,4,4,4,4,4,4,4,
7347 4,4,4,4,4,4,4,4,4,4,
7348 4,4,4,4,4,4,4,4,4,4,
7349 4,4,4,4,4,4,4,
7350 };
7351
7352 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7353 8,0,8,8,8,8,8,8,8,8,
7354 4,4,4,4,4,4,4,4,4,4,
7355 4,4,4,4,4,4,4,4,4,4,
7356 4,4,4,4,4,4,4,4,4,4,
7357 4,4,4,4,4,4,4,
7358 };
7359
7360 #define BNX2_NUM_TESTS 6
7361
7362 static struct {
7363 char string[ETH_GSTRING_LEN];
7364 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7365 { "register_test (offline)" },
7366 { "memory_test (offline)" },
7367 { "loopback_test (offline)" },
7368 { "nvram_test (online)" },
7369 { "interrupt_test (online)" },
7370 { "link_test (online)" },
7371 };
7372
7373 static int
7374 bnx2_get_sset_count(struct net_device *dev, int sset)
7375 {
7376 switch (sset) {
7377 case ETH_SS_TEST:
7378 return BNX2_NUM_TESTS;
7379 case ETH_SS_STATS:
7380 return BNX2_NUM_STATS;
7381 default:
7382 return -EOPNOTSUPP;
7383 }
7384 }
7385
7386 static void
7387 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7388 {
7389 struct bnx2 *bp = netdev_priv(dev);
7390
7391 bnx2_set_power_state(bp, PCI_D0);
7392
7393 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7394 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7395 int i;
7396
7397 bnx2_netif_stop(bp);
7398 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7399 bnx2_free_skbs(bp);
7400
7401 if (bnx2_test_registers(bp) != 0) {
7402 buf[0] = 1;
7403 etest->flags |= ETH_TEST_FL_FAILED;
7404 }
7405 if (bnx2_test_memory(bp) != 0) {
7406 buf[1] = 1;
7407 etest->flags |= ETH_TEST_FL_FAILED;
7408 }
7409 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7410 etest->flags |= ETH_TEST_FL_FAILED;
7411
7412 if (!netif_running(bp->dev))
7413 bnx2_shutdown_chip(bp);
7414 else {
7415 bnx2_init_nic(bp, 1);
7416 bnx2_netif_start(bp);
7417 }
7418
7419 /* wait for link up */
7420 for (i = 0; i < 7; i++) {
7421 if (bp->link_up)
7422 break;
7423 msleep_interruptible(1000);
7424 }
7425 }
7426
7427 if (bnx2_test_nvram(bp) != 0) {
7428 buf[3] = 1;
7429 etest->flags |= ETH_TEST_FL_FAILED;
7430 }
7431 if (bnx2_test_intr(bp) != 0) {
7432 buf[4] = 1;
7433 etest->flags |= ETH_TEST_FL_FAILED;
7434 }
7435
7436 if (bnx2_test_link(bp) != 0) {
7437 buf[5] = 1;
7438 etest->flags |= ETH_TEST_FL_FAILED;
7439
7440 }
7441 if (!netif_running(bp->dev))
7442 bnx2_set_power_state(bp, PCI_D3hot);
7443 }
7444
7445 static void
7446 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7447 {
7448 switch (stringset) {
7449 case ETH_SS_STATS:
7450 memcpy(buf, bnx2_stats_str_arr,
7451 sizeof(bnx2_stats_str_arr));
7452 break;
7453 case ETH_SS_TEST:
7454 memcpy(buf, bnx2_tests_str_arr,
7455 sizeof(bnx2_tests_str_arr));
7456 break;
7457 }
7458 }
7459
7460 static void
7461 bnx2_get_ethtool_stats(struct net_device *dev,
7462 struct ethtool_stats *stats, u64 *buf)
7463 {
7464 struct bnx2 *bp = netdev_priv(dev);
7465 int i;
7466 u32 *hw_stats = (u32 *) bp->stats_blk;
7467 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7468 u8 *stats_len_arr = NULL;
7469
7470 if (hw_stats == NULL) {
7471 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7472 return;
7473 }
7474
7475 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7476 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7477 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7478 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7479 stats_len_arr = bnx2_5706_stats_len_arr;
7480 else
7481 stats_len_arr = bnx2_5708_stats_len_arr;
7482
7483 for (i = 0; i < BNX2_NUM_STATS; i++) {
7484 unsigned long offset;
7485
7486 if (stats_len_arr[i] == 0) {
7487 /* skip this counter */
7488 buf[i] = 0;
7489 continue;
7490 }
7491
7492 offset = bnx2_stats_offset_arr[i];
7493 if (stats_len_arr[i] == 4) {
7494 /* 4-byte counter */
7495 buf[i] = (u64) *(hw_stats + offset) +
7496 *(temp_stats + offset);
7497 continue;
7498 }
7499 /* 8-byte counter */
7500 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7501 *(hw_stats + offset + 1) +
7502 (((u64) *(temp_stats + offset)) << 32) +
7503 *(temp_stats + offset + 1);
7504 }
7505 }
7506
7507 static int
7508 bnx2_phys_id(struct net_device *dev, u32 data)
7509 {
7510 struct bnx2 *bp = netdev_priv(dev);
7511 int i;
7512 u32 save;
7513
7514 bnx2_set_power_state(bp, PCI_D0);
7515
7516 if (data == 0)
7517 data = 2;
7518
7519 save = REG_RD(bp, BNX2_MISC_CFG);
7520 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7521
7522 for (i = 0; i < (data * 2); i++) {
7523 if ((i % 2) == 0) {
7524 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7525 }
7526 else {
7527 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7528 BNX2_EMAC_LED_1000MB_OVERRIDE |
7529 BNX2_EMAC_LED_100MB_OVERRIDE |
7530 BNX2_EMAC_LED_10MB_OVERRIDE |
7531 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7532 BNX2_EMAC_LED_TRAFFIC);
7533 }
7534 msleep_interruptible(500);
7535 if (signal_pending(current))
7536 break;
7537 }
7538 REG_WR(bp, BNX2_EMAC_LED, 0);
7539 REG_WR(bp, BNX2_MISC_CFG, save);
7540
7541 if (!netif_running(dev))
7542 bnx2_set_power_state(bp, PCI_D3hot);
7543
7544 return 0;
7545 }
7546
7547 static int
7548 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7549 {
7550 struct bnx2 *bp = netdev_priv(dev);
7551
7552 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7553 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7554 else
7555 return (ethtool_op_set_tx_csum(dev, data));
7556 }
7557
7558 static const struct ethtool_ops bnx2_ethtool_ops = {
7559 .get_settings = bnx2_get_settings,
7560 .set_settings = bnx2_set_settings,
7561 .get_drvinfo = bnx2_get_drvinfo,
7562 .get_regs_len = bnx2_get_regs_len,
7563 .get_regs = bnx2_get_regs,
7564 .get_wol = bnx2_get_wol,
7565 .set_wol = bnx2_set_wol,
7566 .nway_reset = bnx2_nway_reset,
7567 .get_link = bnx2_get_link,
7568 .get_eeprom_len = bnx2_get_eeprom_len,
7569 .get_eeprom = bnx2_get_eeprom,
7570 .set_eeprom = bnx2_set_eeprom,
7571 .get_coalesce = bnx2_get_coalesce,
7572 .set_coalesce = bnx2_set_coalesce,
7573 .get_ringparam = bnx2_get_ringparam,
7574 .set_ringparam = bnx2_set_ringparam,
7575 .get_pauseparam = bnx2_get_pauseparam,
7576 .set_pauseparam = bnx2_set_pauseparam,
7577 .get_rx_csum = bnx2_get_rx_csum,
7578 .set_rx_csum = bnx2_set_rx_csum,
7579 .set_tx_csum = bnx2_set_tx_csum,
7580 .set_sg = ethtool_op_set_sg,
7581 .set_tso = bnx2_set_tso,
7582 .self_test = bnx2_self_test,
7583 .get_strings = bnx2_get_strings,
7584 .phys_id = bnx2_phys_id,
7585 .get_ethtool_stats = bnx2_get_ethtool_stats,
7586 .get_sset_count = bnx2_get_sset_count,
7587 };
7588
7589 /* Called with rtnl_lock */
7590 static int
7591 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7592 {
7593 struct mii_ioctl_data *data = if_mii(ifr);
7594 struct bnx2 *bp = netdev_priv(dev);
7595 int err;
7596
7597 switch(cmd) {
7598 case SIOCGMIIPHY:
7599 data->phy_id = bp->phy_addr;
7600
7601 /* fallthru */
7602 case SIOCGMIIREG: {
7603 u32 mii_regval;
7604
7605 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7606 return -EOPNOTSUPP;
7607
7608 if (!netif_running(dev))
7609 return -EAGAIN;
7610
7611 spin_lock_bh(&bp->phy_lock);
7612 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7613 spin_unlock_bh(&bp->phy_lock);
7614
7615 data->val_out = mii_regval;
7616
7617 return err;
7618 }
7619
7620 case SIOCSMIIREG:
7621 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7622 return -EOPNOTSUPP;
7623
7624 if (!netif_running(dev))
7625 return -EAGAIN;
7626
7627 spin_lock_bh(&bp->phy_lock);
7628 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7629 spin_unlock_bh(&bp->phy_lock);
7630
7631 return err;
7632
7633 default:
7634 /* do nothing */
7635 break;
7636 }
7637 return -EOPNOTSUPP;
7638 }
7639
7640 /* Called with rtnl_lock */
7641 static int
7642 bnx2_change_mac_addr(struct net_device *dev, void *p)
7643 {
7644 struct sockaddr *addr = p;
7645 struct bnx2 *bp = netdev_priv(dev);
7646
7647 if (!is_valid_ether_addr(addr->sa_data))
7648 return -EINVAL;
7649
7650 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7651 if (netif_running(dev))
7652 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7653
7654 return 0;
7655 }
7656
7657 /* Called with rtnl_lock */
7658 static int
7659 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7660 {
7661 struct bnx2 *bp = netdev_priv(dev);
7662
7663 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7664 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7665 return -EINVAL;
7666
7667 dev->mtu = new_mtu;
7668 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7669 }
7670
7671 #ifdef CONFIG_NET_POLL_CONTROLLER
7672 static void
7673 poll_bnx2(struct net_device *dev)
7674 {
7675 struct bnx2 *bp = netdev_priv(dev);
7676 int i;
7677
7678 for (i = 0; i < bp->irq_nvecs; i++) {
7679 disable_irq(bp->irq_tbl[i].vector);
7680 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7681 enable_irq(bp->irq_tbl[i].vector);
7682 }
7683 }
7684 #endif
7685
7686 static void __devinit
7687 bnx2_get_5709_media(struct bnx2 *bp)
7688 {
7689 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7690 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7691 u32 strap;
7692
7693 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7694 return;
7695 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7696 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7697 return;
7698 }
7699
7700 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7701 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7702 else
7703 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7704
7705 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7706 switch (strap) {
7707 case 0x4:
7708 case 0x5:
7709 case 0x6:
7710 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7711 return;
7712 }
7713 } else {
7714 switch (strap) {
7715 case 0x1:
7716 case 0x2:
7717 case 0x4:
7718 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7719 return;
7720 }
7721 }
7722 }
7723
7724 static void __devinit
7725 bnx2_get_pci_speed(struct bnx2 *bp)
7726 {
7727 u32 reg;
7728
7729 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7730 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7731 u32 clkreg;
7732
7733 bp->flags |= BNX2_FLAG_PCIX;
7734
7735 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7736
7737 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7738 switch (clkreg) {
7739 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7740 bp->bus_speed_mhz = 133;
7741 break;
7742
7743 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7744 bp->bus_speed_mhz = 100;
7745 break;
7746
7747 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7748 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7749 bp->bus_speed_mhz = 66;
7750 break;
7751
7752 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7753 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7754 bp->bus_speed_mhz = 50;
7755 break;
7756
7757 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7758 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7759 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7760 bp->bus_speed_mhz = 33;
7761 break;
7762 }
7763 }
7764 else {
7765 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7766 bp->bus_speed_mhz = 66;
7767 else
7768 bp->bus_speed_mhz = 33;
7769 }
7770
7771 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7772 bp->flags |= BNX2_FLAG_PCI_32BIT;
7773
7774 }
7775
7776 static void __devinit
7777 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7778 {
7779 int rc, i, v0_len = 0;
7780 u8 *data;
7781 u8 *v0_str = NULL;
7782 bool mn_match = false;
7783
7784 #define BNX2_VPD_NVRAM_OFFSET 0x300
7785 #define BNX2_VPD_LEN 128
7786 #define BNX2_MAX_VER_SLEN 30
7787
7788 data = kmalloc(256, GFP_KERNEL);
7789 if (!data)
7790 return;
7791
7792 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7793 BNX2_VPD_LEN);
7794 if (rc)
7795 goto vpd_done;
7796
7797 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7798 data[i] = data[i + BNX2_VPD_LEN + 3];
7799 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7800 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7801 data[i + 3] = data[i + BNX2_VPD_LEN];
7802 }
7803
7804 for (i = 0; i <= BNX2_VPD_LEN - 3; ) {
7805 unsigned char val = data[i];
7806 unsigned int block_end;
7807
7808 if (val == 0x82 || val == 0x91) {
7809 i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7810 continue;
7811 }
7812
7813 if (val != 0x90)
7814 goto vpd_done;
7815
7816 block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7817 i += 3;
7818
7819 if (block_end > BNX2_VPD_LEN)
7820 goto vpd_done;
7821
7822 while (i < (block_end - 2)) {
7823 int len = data[i + 2];
7824
7825 if (i + 3 + len > block_end)
7826 goto vpd_done;
7827
7828 if (data[i] == 'M' && data[i + 1] == 'N') {
7829 if (len != 4 ||
7830 memcmp(&data[i + 3], "1028", 4))
7831 goto vpd_done;
7832 mn_match = true;
7833
7834 } else if (data[i] == 'V' && data[i + 1] == '0') {
7835 if (len > BNX2_MAX_VER_SLEN)
7836 goto vpd_done;
7837
7838 v0_len = len;
7839 v0_str = &data[i + 3];
7840 }
7841 i += 3 + len;
7842
7843 if (mn_match && v0_str) {
7844 memcpy(bp->fw_version, v0_str, v0_len);
7845 bp->fw_version[v0_len] = ' ';
7846 goto vpd_done;
7847 }
7848 }
7849 goto vpd_done;
7850 }
7851
7852 vpd_done:
7853 kfree(data);
7854 }
7855
7856 static int __devinit
7857 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7858 {
7859 struct bnx2 *bp;
7860 unsigned long mem_len;
7861 int rc, i, j;
7862 u32 reg;
7863 u64 dma_mask, persist_dma_mask;
7864
7865 SET_NETDEV_DEV(dev, &pdev->dev);
7866 bp = netdev_priv(dev);
7867
7868 bp->flags = 0;
7869 bp->phy_flags = 0;
7870
7871 bp->temp_stats_blk =
7872 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7873
7874 if (bp->temp_stats_blk == NULL) {
7875 rc = -ENOMEM;
7876 goto err_out;
7877 }
7878
7879 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7880 rc = pci_enable_device(pdev);
7881 if (rc) {
7882 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7883 goto err_out;
7884 }
7885
7886 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7887 dev_err(&pdev->dev,
7888 "Cannot find PCI device base address, aborting.\n");
7889 rc = -ENODEV;
7890 goto err_out_disable;
7891 }
7892
7893 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7894 if (rc) {
7895 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7896 goto err_out_disable;
7897 }
7898
7899 pci_set_master(pdev);
7900 pci_save_state(pdev);
7901
7902 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7903 if (bp->pm_cap == 0) {
7904 dev_err(&pdev->dev,
7905 "Cannot find power management capability, aborting.\n");
7906 rc = -EIO;
7907 goto err_out_release;
7908 }
7909
7910 bp->dev = dev;
7911 bp->pdev = pdev;
7912
7913 spin_lock_init(&bp->phy_lock);
7914 spin_lock_init(&bp->indirect_lock);
7915 #ifdef BCM_CNIC
7916 mutex_init(&bp->cnic_lock);
7917 #endif
7918 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7919
7920 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7921 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7922 dev->mem_end = dev->mem_start + mem_len;
7923 dev->irq = pdev->irq;
7924
7925 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7926
7927 if (!bp->regview) {
7928 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7929 rc = -ENOMEM;
7930 goto err_out_release;
7931 }
7932
7933 /* Configure byte swap and enable write to the reg_window registers.
7934 * Rely on CPU to do target byte swapping on big endian systems
7935 * The chip's target access swapping will not swap all accesses
7936 */
7937 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7938 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7939 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7940
7941 bnx2_set_power_state(bp, PCI_D0);
7942
7943 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7944
7945 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7946 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7947 dev_err(&pdev->dev,
7948 "Cannot find PCIE capability, aborting.\n");
7949 rc = -EIO;
7950 goto err_out_unmap;
7951 }
7952 bp->flags |= BNX2_FLAG_PCIE;
7953 if (CHIP_REV(bp) == CHIP_REV_Ax)
7954 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7955 } else {
7956 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7957 if (bp->pcix_cap == 0) {
7958 dev_err(&pdev->dev,
7959 "Cannot find PCIX capability, aborting.\n");
7960 rc = -EIO;
7961 goto err_out_unmap;
7962 }
7963 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7964 }
7965
7966 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7967 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7968 bp->flags |= BNX2_FLAG_MSIX_CAP;
7969 }
7970
7971 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7972 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7973 bp->flags |= BNX2_FLAG_MSI_CAP;
7974 }
7975
7976 /* 5708 cannot support DMA addresses > 40-bit. */
7977 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7978 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7979 else
7980 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7981
7982 /* Configure DMA attributes. */
7983 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7984 dev->features |= NETIF_F_HIGHDMA;
7985 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7986 if (rc) {
7987 dev_err(&pdev->dev,
7988 "pci_set_consistent_dma_mask failed, aborting.\n");
7989 goto err_out_unmap;
7990 }
7991 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7992 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7993 goto err_out_unmap;
7994 }
7995
7996 if (!(bp->flags & BNX2_FLAG_PCIE))
7997 bnx2_get_pci_speed(bp);
7998
7999 /* 5706A0 may falsely detect SERR and PERR. */
8000 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8001 reg = REG_RD(bp, PCI_COMMAND);
8002 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8003 REG_WR(bp, PCI_COMMAND, reg);
8004 }
8005 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8006 !(bp->flags & BNX2_FLAG_PCIX)) {
8007
8008 dev_err(&pdev->dev,
8009 "5706 A1 can only be used in a PCIX bus, aborting.\n");
8010 goto err_out_unmap;
8011 }
8012
8013 bnx2_init_nvram(bp);
8014
8015 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8016
8017 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8018 BNX2_SHM_HDR_SIGNATURE_SIG) {
8019 u32 off = PCI_FUNC(pdev->devfn) << 2;
8020
8021 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8022 } else
8023 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8024
8025 /* Get the permanent MAC address. First we need to make sure the
8026 * firmware is actually running.
8027 */
8028 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8029
8030 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8031 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8032 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
8033 rc = -ENODEV;
8034 goto err_out_unmap;
8035 }
8036
8037 bnx2_read_vpd_fw_ver(bp);
8038
8039 j = strlen(bp->fw_version);
8040 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8041 for (i = 0; i < 3 && j < 24; i++) {
8042 u8 num, k, skip0;
8043
8044 if (i == 0) {
8045 bp->fw_version[j++] = 'b';
8046 bp->fw_version[j++] = 'c';
8047 bp->fw_version[j++] = ' ';
8048 }
8049 num = (u8) (reg >> (24 - (i * 8)));
8050 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8051 if (num >= k || !skip0 || k == 1) {
8052 bp->fw_version[j++] = (num / k) + '0';
8053 skip0 = 0;
8054 }
8055 }
8056 if (i != 2)
8057 bp->fw_version[j++] = '.';
8058 }
8059 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8060 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8061 bp->wol = 1;
8062
8063 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8064 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8065
8066 for (i = 0; i < 30; i++) {
8067 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8068 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8069 break;
8070 msleep(10);
8071 }
8072 }
8073 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8074 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8075 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8076 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8077 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8078
8079 if (j < 32)
8080 bp->fw_version[j++] = ' ';
8081 for (i = 0; i < 3 && j < 28; i++) {
8082 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8083 reg = swab32(reg);
8084 memcpy(&bp->fw_version[j], &reg, 4);
8085 j += 4;
8086 }
8087 }
8088
8089 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8090 bp->mac_addr[0] = (u8) (reg >> 8);
8091 bp->mac_addr[1] = (u8) reg;
8092
8093 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8094 bp->mac_addr[2] = (u8) (reg >> 24);
8095 bp->mac_addr[3] = (u8) (reg >> 16);
8096 bp->mac_addr[4] = (u8) (reg >> 8);
8097 bp->mac_addr[5] = (u8) reg;
8098
8099 bp->tx_ring_size = MAX_TX_DESC_CNT;
8100 bnx2_set_rx_ring_size(bp, 255);
8101
8102 bp->rx_csum = 1;
8103
8104 bp->tx_quick_cons_trip_int = 2;
8105 bp->tx_quick_cons_trip = 20;
8106 bp->tx_ticks_int = 18;
8107 bp->tx_ticks = 80;
8108
8109 bp->rx_quick_cons_trip_int = 2;
8110 bp->rx_quick_cons_trip = 12;
8111 bp->rx_ticks_int = 18;
8112 bp->rx_ticks = 18;
8113
8114 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8115
8116 bp->current_interval = BNX2_TIMER_INTERVAL;
8117
8118 bp->phy_addr = 1;
8119
8120 /* Disable WOL support if we are running on a SERDES chip. */
8121 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8122 bnx2_get_5709_media(bp);
8123 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8124 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8125
8126 bp->phy_port = PORT_TP;
8127 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8128 bp->phy_port = PORT_FIBRE;
8129 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8130 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8131 bp->flags |= BNX2_FLAG_NO_WOL;
8132 bp->wol = 0;
8133 }
8134 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8135 /* Don't do parallel detect on this board because of
8136 * some board problems. The link will not go down
8137 * if we do parallel detect.
8138 */
8139 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8140 pdev->subsystem_device == 0x310c)
8141 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8142 } else {
8143 bp->phy_addr = 2;
8144 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8145 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8146 }
8147 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8148 CHIP_NUM(bp) == CHIP_NUM_5708)
8149 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8150 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8151 (CHIP_REV(bp) == CHIP_REV_Ax ||
8152 CHIP_REV(bp) == CHIP_REV_Bx))
8153 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8154
8155 bnx2_init_fw_cap(bp);
8156
8157 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8158 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8159 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8160 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8161 bp->flags |= BNX2_FLAG_NO_WOL;
8162 bp->wol = 0;
8163 }
8164
8165 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8166 bp->tx_quick_cons_trip_int =
8167 bp->tx_quick_cons_trip;
8168 bp->tx_ticks_int = bp->tx_ticks;
8169 bp->rx_quick_cons_trip_int =
8170 bp->rx_quick_cons_trip;
8171 bp->rx_ticks_int = bp->rx_ticks;
8172 bp->comp_prod_trip_int = bp->comp_prod_trip;
8173 bp->com_ticks_int = bp->com_ticks;
8174 bp->cmd_ticks_int = bp->cmd_ticks;
8175 }
8176
8177 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8178 *
8179 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8180 * with byte enables disabled on the unused 32-bit word. This is legal
8181 * but causes problems on the AMD 8132 which will eventually stop
8182 * responding after a while.
8183 *
8184 * AMD believes this incompatibility is unique to the 5706, and
8185 * prefers to locally disable MSI rather than globally disabling it.
8186 */
8187 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8188 struct pci_dev *amd_8132 = NULL;
8189
8190 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8191 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8192 amd_8132))) {
8193
8194 if (amd_8132->revision >= 0x10 &&
8195 amd_8132->revision <= 0x13) {
8196 disable_msi = 1;
8197 pci_dev_put(amd_8132);
8198 break;
8199 }
8200 }
8201 }
8202
8203 bnx2_set_default_link(bp);
8204 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8205
8206 init_timer(&bp->timer);
8207 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8208 bp->timer.data = (unsigned long) bp;
8209 bp->timer.function = bnx2_timer;
8210
8211 return 0;
8212
8213 err_out_unmap:
8214 if (bp->regview) {
8215 iounmap(bp->regview);
8216 bp->regview = NULL;
8217 }
8218
8219 err_out_release:
8220 pci_release_regions(pdev);
8221
8222 err_out_disable:
8223 pci_disable_device(pdev);
8224 pci_set_drvdata(pdev, NULL);
8225
8226 err_out:
8227 return rc;
8228 }
8229
8230 static char * __devinit
8231 bnx2_bus_string(struct bnx2 *bp, char *str)
8232 {
8233 char *s = str;
8234
8235 if (bp->flags & BNX2_FLAG_PCIE) {
8236 s += sprintf(s, "PCI Express");
8237 } else {
8238 s += sprintf(s, "PCI");
8239 if (bp->flags & BNX2_FLAG_PCIX)
8240 s += sprintf(s, "-X");
8241 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8242 s += sprintf(s, " 32-bit");
8243 else
8244 s += sprintf(s, " 64-bit");
8245 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8246 }
8247 return str;
8248 }
8249
8250 static void __devinit
8251 bnx2_init_napi(struct bnx2 *bp)
8252 {
8253 int i;
8254
8255 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
8256 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8257 int (*poll)(struct napi_struct *, int);
8258
8259 if (i == 0)
8260 poll = bnx2_poll;
8261 else
8262 poll = bnx2_poll_msix;
8263
8264 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8265 bnapi->bp = bp;
8266 }
8267 }
8268
8269 static const struct net_device_ops bnx2_netdev_ops = {
8270 .ndo_open = bnx2_open,
8271 .ndo_start_xmit = bnx2_start_xmit,
8272 .ndo_stop = bnx2_close,
8273 .ndo_get_stats = bnx2_get_stats,
8274 .ndo_set_rx_mode = bnx2_set_rx_mode,
8275 .ndo_do_ioctl = bnx2_ioctl,
8276 .ndo_validate_addr = eth_validate_addr,
8277 .ndo_set_mac_address = bnx2_change_mac_addr,
8278 .ndo_change_mtu = bnx2_change_mtu,
8279 .ndo_tx_timeout = bnx2_tx_timeout,
8280 #ifdef BCM_VLAN
8281 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8282 #endif
8283 #ifdef CONFIG_NET_POLL_CONTROLLER
8284 .ndo_poll_controller = poll_bnx2,
8285 #endif
8286 };
8287
8288 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8289 {
8290 #ifdef BCM_VLAN
8291 dev->vlan_features |= flags;
8292 #endif
8293 }
8294
8295 static int __devinit
8296 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8297 {
8298 static int version_printed = 0;
8299 struct net_device *dev = NULL;
8300 struct bnx2 *bp;
8301 int rc;
8302 char str[40];
8303
8304 if (version_printed++ == 0)
8305 printk(KERN_INFO "%s", version);
8306
8307 /* dev zeroed in init_etherdev */
8308 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8309
8310 if (!dev)
8311 return -ENOMEM;
8312
8313 rc = bnx2_init_board(pdev, dev);
8314 if (rc < 0) {
8315 free_netdev(dev);
8316 return rc;
8317 }
8318
8319 dev->netdev_ops = &bnx2_netdev_ops;
8320 dev->watchdog_timeo = TX_TIMEOUT;
8321 dev->ethtool_ops = &bnx2_ethtool_ops;
8322
8323 bp = netdev_priv(dev);
8324 bnx2_init_napi(bp);
8325
8326 pci_set_drvdata(pdev, dev);
8327
8328 rc = bnx2_request_firmware(bp);
8329 if (rc)
8330 goto error;
8331
8332 memcpy(dev->dev_addr, bp->mac_addr, 6);
8333 memcpy(dev->perm_addr, bp->mac_addr, 6);
8334
8335 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8336 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8337 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8338 dev->features |= NETIF_F_IPV6_CSUM;
8339 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8340 }
8341 #ifdef BCM_VLAN
8342 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8343 #endif
8344 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8345 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8346 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8347 dev->features |= NETIF_F_TSO6;
8348 vlan_features_add(dev, NETIF_F_TSO6);
8349 }
8350 if ((rc = register_netdev(dev))) {
8351 dev_err(&pdev->dev, "Cannot register net device\n");
8352 goto error;
8353 }
8354
8355 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8356 "IRQ %d, node addr %pM\n",
8357 dev->name,
8358 board_info[ent->driver_data].name,
8359 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8360 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8361 bnx2_bus_string(bp, str),
8362 dev->base_addr,
8363 bp->pdev->irq, dev->dev_addr);
8364
8365 return 0;
8366
8367 error:
8368 if (bp->mips_firmware)
8369 release_firmware(bp->mips_firmware);
8370 if (bp->rv2p_firmware)
8371 release_firmware(bp->rv2p_firmware);
8372
8373 if (bp->regview)
8374 iounmap(bp->regview);
8375 pci_release_regions(pdev);
8376 pci_disable_device(pdev);
8377 pci_set_drvdata(pdev, NULL);
8378 free_netdev(dev);
8379 return rc;
8380 }
8381
8382 static void __devexit
8383 bnx2_remove_one(struct pci_dev *pdev)
8384 {
8385 struct net_device *dev = pci_get_drvdata(pdev);
8386 struct bnx2 *bp = netdev_priv(dev);
8387
8388 flush_scheduled_work();
8389
8390 unregister_netdev(dev);
8391
8392 if (bp->mips_firmware)
8393 release_firmware(bp->mips_firmware);
8394 if (bp->rv2p_firmware)
8395 release_firmware(bp->rv2p_firmware);
8396
8397 if (bp->regview)
8398 iounmap(bp->regview);
8399
8400 kfree(bp->temp_stats_blk);
8401
8402 free_netdev(dev);
8403 pci_release_regions(pdev);
8404 pci_disable_device(pdev);
8405 pci_set_drvdata(pdev, NULL);
8406 }
8407
8408 static int
8409 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8410 {
8411 struct net_device *dev = pci_get_drvdata(pdev);
8412 struct bnx2 *bp = netdev_priv(dev);
8413
8414 /* PCI register 4 needs to be saved whether netif_running() or not.
8415 * MSI address and data need to be saved if using MSI and
8416 * netif_running().
8417 */
8418 pci_save_state(pdev);
8419 if (!netif_running(dev))
8420 return 0;
8421
8422 flush_scheduled_work();
8423 bnx2_netif_stop(bp);
8424 netif_device_detach(dev);
8425 del_timer_sync(&bp->timer);
8426 bnx2_shutdown_chip(bp);
8427 bnx2_free_skbs(bp);
8428 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8429 return 0;
8430 }
8431
8432 static int
8433 bnx2_resume(struct pci_dev *pdev)
8434 {
8435 struct net_device *dev = pci_get_drvdata(pdev);
8436 struct bnx2 *bp = netdev_priv(dev);
8437
8438 pci_restore_state(pdev);
8439 if (!netif_running(dev))
8440 return 0;
8441
8442 bnx2_set_power_state(bp, PCI_D0);
8443 netif_device_attach(dev);
8444 bnx2_init_nic(bp, 1);
8445 bnx2_netif_start(bp);
8446 return 0;
8447 }
8448
8449 /**
8450 * bnx2_io_error_detected - called when PCI error is detected
8451 * @pdev: Pointer to PCI device
8452 * @state: The current pci connection state
8453 *
8454 * This function is called after a PCI bus error affecting
8455 * this device has been detected.
8456 */
8457 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8458 pci_channel_state_t state)
8459 {
8460 struct net_device *dev = pci_get_drvdata(pdev);
8461 struct bnx2 *bp = netdev_priv(dev);
8462
8463 rtnl_lock();
8464 netif_device_detach(dev);
8465
8466 if (state == pci_channel_io_perm_failure) {
8467 rtnl_unlock();
8468 return PCI_ERS_RESULT_DISCONNECT;
8469 }
8470
8471 if (netif_running(dev)) {
8472 bnx2_netif_stop(bp);
8473 del_timer_sync(&bp->timer);
8474 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8475 }
8476
8477 pci_disable_device(pdev);
8478 rtnl_unlock();
8479
8480 /* Request a slot slot reset. */
8481 return PCI_ERS_RESULT_NEED_RESET;
8482 }
8483
8484 /**
8485 * bnx2_io_slot_reset - called after the pci bus has been reset.
8486 * @pdev: Pointer to PCI device
8487 *
8488 * Restart the card from scratch, as if from a cold-boot.
8489 */
8490 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8491 {
8492 struct net_device *dev = pci_get_drvdata(pdev);
8493 struct bnx2 *bp = netdev_priv(dev);
8494
8495 rtnl_lock();
8496 if (pci_enable_device(pdev)) {
8497 dev_err(&pdev->dev,
8498 "Cannot re-enable PCI device after reset.\n");
8499 rtnl_unlock();
8500 return PCI_ERS_RESULT_DISCONNECT;
8501 }
8502 pci_set_master(pdev);
8503 pci_restore_state(pdev);
8504 pci_save_state(pdev);
8505
8506 if (netif_running(dev)) {
8507 bnx2_set_power_state(bp, PCI_D0);
8508 bnx2_init_nic(bp, 1);
8509 }
8510
8511 rtnl_unlock();
8512 return PCI_ERS_RESULT_RECOVERED;
8513 }
8514
8515 /**
8516 * bnx2_io_resume - called when traffic can start flowing again.
8517 * @pdev: Pointer to PCI device
8518 *
8519 * This callback is called when the error recovery driver tells us that
8520 * its OK to resume normal operation.
8521 */
8522 static void bnx2_io_resume(struct pci_dev *pdev)
8523 {
8524 struct net_device *dev = pci_get_drvdata(pdev);
8525 struct bnx2 *bp = netdev_priv(dev);
8526
8527 rtnl_lock();
8528 if (netif_running(dev))
8529 bnx2_netif_start(bp);
8530
8531 netif_device_attach(dev);
8532 rtnl_unlock();
8533 }
8534
8535 static struct pci_error_handlers bnx2_err_handler = {
8536 .error_detected = bnx2_io_error_detected,
8537 .slot_reset = bnx2_io_slot_reset,
8538 .resume = bnx2_io_resume,
8539 };
8540
8541 static struct pci_driver bnx2_pci_driver = {
8542 .name = DRV_MODULE_NAME,
8543 .id_table = bnx2_pci_tbl,
8544 .probe = bnx2_init_one,
8545 .remove = __devexit_p(bnx2_remove_one),
8546 .suspend = bnx2_suspend,
8547 .resume = bnx2_resume,
8548 .err_handler = &bnx2_err_handler,
8549 };
8550
8551 static int __init bnx2_init(void)
8552 {
8553 return pci_register_driver(&bnx2_pci_driver);
8554 }
8555
8556 static void __exit bnx2_cleanup(void)
8557 {
8558 pci_unregister_driver(&bnx2_pci_driver);
8559 }
8560
8561 module_init(bnx2_init);
8562 module_exit(bnx2_cleanup);
8563
8564
8565
This page took 0.218857 seconds and 5 git commands to generate.