cxgb3: Wait longer for control packets on initialization
[deliverable/linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.0.8"
62 #define DRV_MODULE_RELDATE "Feb 15, 2010"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
73
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
98 BCM5708,
99 BCM5708S,
100 BCM5709,
101 BCM5709S,
102 BCM5716,
103 BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108 char *name;
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153 /* Slow EEPROM */
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253 u32 diff;
254
255 smp_mb();
256
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
259 */
260 diff = txr->tx_prod - txr->tx_cons;
261 if (unlikely(diff >= TX_DESC_CNT)) {
262 diff &= 0xffff;
263 if (diff == TX_DESC_CNT)
264 diff = MAX_TX_DESC_CNT;
265 }
266 return (bp->tx_ring_size - diff);
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272 u32 val;
273
274 spin_lock_bh(&bp->indirect_lock);
275 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277 spin_unlock_bh(&bp->indirect_lock);
278 return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284 spin_lock_bh(&bp->indirect_lock);
285 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287 spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305 offset += cid_addr;
306 spin_lock_bh(&bp->indirect_lock);
307 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308 int i;
309
310 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313 for (i = 0; i < 5; i++) {
314 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316 break;
317 udelay(5);
318 }
319 } else {
320 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321 REG_WR(bp, BNX2_CTX_DATA, val);
322 }
323 spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330 struct bnx2 *bp = netdev_priv(dev);
331 struct drv_ctl_io *io = &info->data.io;
332
333 switch (info->cmd) {
334 case DRV_CTL_IO_WR_CMD:
335 bnx2_reg_wr_ind(bp, io->offset, io->data);
336 break;
337 case DRV_CTL_IO_RD_CMD:
338 io->data = bnx2_reg_rd_ind(bp, io->offset);
339 break;
340 case DRV_CTL_CTX_WR_CMD:
341 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342 break;
343 default:
344 return -EINVAL;
345 }
346 return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353 int sb_id;
354
355 if (bp->flags & BNX2_FLAG_USING_MSIX) {
356 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357 bnapi->cnic_present = 0;
358 sb_id = bp->irq_nvecs;
359 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360 } else {
361 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362 bnapi->cnic_tag = bnapi->last_status_idx;
363 bnapi->cnic_present = 1;
364 sb_id = 0;
365 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366 }
367
368 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369 cp->irq_arr[0].status_blk = (void *)
370 ((unsigned long) bnapi->status_blk.msi +
371 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372 cp->irq_arr[0].status_blk_num = sb_id;
373 cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377 void *data)
378 {
379 struct bnx2 *bp = netdev_priv(dev);
380 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382 if (ops == NULL)
383 return -EINVAL;
384
385 if (cp->drv_state & CNIC_DRV_STATE_REGD)
386 return -EBUSY;
387
388 bp->cnic_data = data;
389 rcu_assign_pointer(bp->cnic_ops, ops);
390
391 cp->num_irq = 0;
392 cp->drv_state = CNIC_DRV_STATE_REGD;
393
394 bnx2_setup_cnic_irq_info(bp);
395
396 return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401 struct bnx2 *bp = netdev_priv(dev);
402 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405 mutex_lock(&bp->cnic_lock);
406 cp->drv_state = 0;
407 bnapi->cnic_present = 0;
408 rcu_assign_pointer(bp->cnic_ops, NULL);
409 mutex_unlock(&bp->cnic_lock);
410 synchronize_rcu();
411 return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416 struct bnx2 *bp = netdev_priv(dev);
417 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419 cp->drv_owner = THIS_MODULE;
420 cp->chip_id = bp->chip_id;
421 cp->pdev = bp->pdev;
422 cp->io_base = bp->regview;
423 cp->drv_ctl = bnx2_drv_ctl;
424 cp->drv_register_cnic = bnx2_register_cnic;
425 cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427 return cp;
428 }
429 EXPORT_SYMBOL(bnx2_cnic_probe);
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434 struct cnic_ops *c_ops;
435 struct cnic_ctl_info info;
436
437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops;
439 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info);
442 }
443 mutex_unlock(&bp->cnic_lock);
444 }
445
446 static void
447 bnx2_cnic_start(struct bnx2 *bp)
448 {
449 struct cnic_ops *c_ops;
450 struct cnic_ctl_info info;
451
452 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops;
454 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458 bnapi->cnic_tag = bnapi->last_status_idx;
459 }
460 info.cmd = CNIC_CTL_START_CMD;
461 c_ops->cnic_ctl(bp->cnic_data, &info);
462 }
463 mutex_unlock(&bp->cnic_lock);
464 }
465
466 #else
467
468 static void
469 bnx2_cnic_stop(struct bnx2 *bp)
470 {
471 }
472
473 static void
474 bnx2_cnic_start(struct bnx2 *bp)
475 {
476 }
477
478 #endif
479
480 static int
481 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482 {
483 u32 val1;
484 int i, ret;
485
486 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
487 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493 udelay(40);
494 }
495
496 val1 = (bp->phy_addr << 21) | (reg << 16) |
497 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498 BNX2_EMAC_MDIO_COMM_START_BUSY;
499 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501 for (i = 0; i < 50; i++) {
502 udelay(10);
503
504 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506 udelay(5);
507
508 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511 break;
512 }
513 }
514
515 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516 *val = 0x0;
517 ret = -EBUSY;
518 }
519 else {
520 *val = val1;
521 ret = 0;
522 }
523
524 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
525 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531 udelay(40);
532 }
533
534 return ret;
535 }
536
537 static int
538 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539 {
540 u32 val1;
541 int i, ret;
542
543 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
544 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550 udelay(40);
551 }
552
553 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
557
558 for (i = 0; i < 50; i++) {
559 udelay(10);
560
561 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563 udelay(5);
564 break;
565 }
566 }
567
568 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569 ret = -EBUSY;
570 else
571 ret = 0;
572
573 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
574 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580 udelay(40);
581 }
582
583 return ret;
584 }
585
586 static void
587 bnx2_disable_int(struct bnx2 *bp)
588 {
589 int i;
590 struct bnx2_napi *bnapi;
591
592 for (i = 0; i < bp->irq_nvecs; i++) {
593 bnapi = &bp->bnx2_napi[i];
594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596 }
597 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598 }
599
600 static void
601 bnx2_enable_int(struct bnx2 *bp)
602 {
603 int i;
604 struct bnx2_napi *bnapi;
605
606 for (i = 0; i < bp->irq_nvecs; i++) {
607 bnapi = &bp->bnx2_napi[i];
608
609 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612 bnapi->last_status_idx);
613
614 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616 bnapi->last_status_idx);
617 }
618 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
619 }
620
621 static void
622 bnx2_disable_int_sync(struct bnx2 *bp)
623 {
624 int i;
625
626 atomic_inc(&bp->intr_sem);
627 if (!netif_running(bp->dev))
628 return;
629
630 bnx2_disable_int(bp);
631 for (i = 0; i < bp->irq_nvecs; i++)
632 synchronize_irq(bp->irq_tbl[i].vector);
633 }
634
635 static void
636 bnx2_napi_disable(struct bnx2 *bp)
637 {
638 int i;
639
640 for (i = 0; i < bp->irq_nvecs; i++)
641 napi_disable(&bp->bnx2_napi[i].napi);
642 }
643
644 static void
645 bnx2_napi_enable(struct bnx2 *bp)
646 {
647 int i;
648
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_enable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_netif_stop(struct bnx2 *bp)
655 {
656 bnx2_cnic_stop(bp);
657 if (netif_running(bp->dev)) {
658 int i;
659
660 bnx2_napi_disable(bp);
661 netif_tx_disable(bp->dev);
662 /* prevent tx timeout */
663 for (i = 0; i < bp->dev->num_tx_queues; i++) {
664 struct netdev_queue *txq;
665
666 txq = netdev_get_tx_queue(bp->dev, i);
667 txq->trans_start = jiffies;
668 }
669 }
670 bnx2_disable_int_sync(bp);
671 }
672
673 static void
674 bnx2_netif_start(struct bnx2 *bp)
675 {
676 if (atomic_dec_and_test(&bp->intr_sem)) {
677 if (netif_running(bp->dev)) {
678 netif_tx_wake_all_queues(bp->dev);
679 bnx2_napi_enable(bp);
680 bnx2_enable_int(bp);
681 bnx2_cnic_start(bp);
682 }
683 }
684 }
685
686 static void
687 bnx2_free_tx_mem(struct bnx2 *bp)
688 {
689 int i;
690
691 for (i = 0; i < bp->num_tx_rings; i++) {
692 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
693 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
694
695 if (txr->tx_desc_ring) {
696 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
697 txr->tx_desc_ring,
698 txr->tx_desc_mapping);
699 txr->tx_desc_ring = NULL;
700 }
701 kfree(txr->tx_buf_ring);
702 txr->tx_buf_ring = NULL;
703 }
704 }
705
706 static void
707 bnx2_free_rx_mem(struct bnx2 *bp)
708 {
709 int i;
710
711 for (i = 0; i < bp->num_rx_rings; i++) {
712 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
713 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
714 int j;
715
716 for (j = 0; j < bp->rx_max_ring; j++) {
717 if (rxr->rx_desc_ring[j])
718 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
719 rxr->rx_desc_ring[j],
720 rxr->rx_desc_mapping[j]);
721 rxr->rx_desc_ring[j] = NULL;
722 }
723 vfree(rxr->rx_buf_ring);
724 rxr->rx_buf_ring = NULL;
725
726 for (j = 0; j < bp->rx_max_pg_ring; j++) {
727 if (rxr->rx_pg_desc_ring[j])
728 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
729 rxr->rx_pg_desc_ring[j],
730 rxr->rx_pg_desc_mapping[j]);
731 rxr->rx_pg_desc_ring[j] = NULL;
732 }
733 vfree(rxr->rx_pg_ring);
734 rxr->rx_pg_ring = NULL;
735 }
736 }
737
738 static int
739 bnx2_alloc_tx_mem(struct bnx2 *bp)
740 {
741 int i;
742
743 for (i = 0; i < bp->num_tx_rings; i++) {
744 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
745 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
746
747 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
748 if (txr->tx_buf_ring == NULL)
749 return -ENOMEM;
750
751 txr->tx_desc_ring =
752 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
753 &txr->tx_desc_mapping);
754 if (txr->tx_desc_ring == NULL)
755 return -ENOMEM;
756 }
757 return 0;
758 }
759
760 static int
761 bnx2_alloc_rx_mem(struct bnx2 *bp)
762 {
763 int i;
764
765 for (i = 0; i < bp->num_rx_rings; i++) {
766 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
767 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
768 int j;
769
770 rxr->rx_buf_ring =
771 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
772 if (rxr->rx_buf_ring == NULL)
773 return -ENOMEM;
774
775 memset(rxr->rx_buf_ring, 0,
776 SW_RXBD_RING_SIZE * bp->rx_max_ring);
777
778 for (j = 0; j < bp->rx_max_ring; j++) {
779 rxr->rx_desc_ring[j] =
780 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
781 &rxr->rx_desc_mapping[j]);
782 if (rxr->rx_desc_ring[j] == NULL)
783 return -ENOMEM;
784
785 }
786
787 if (bp->rx_pg_ring_size) {
788 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
789 bp->rx_max_pg_ring);
790 if (rxr->rx_pg_ring == NULL)
791 return -ENOMEM;
792
793 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
794 bp->rx_max_pg_ring);
795 }
796
797 for (j = 0; j < bp->rx_max_pg_ring; j++) {
798 rxr->rx_pg_desc_ring[j] =
799 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
800 &rxr->rx_pg_desc_mapping[j]);
801 if (rxr->rx_pg_desc_ring[j] == NULL)
802 return -ENOMEM;
803
804 }
805 }
806 return 0;
807 }
808
809 static void
810 bnx2_free_mem(struct bnx2 *bp)
811 {
812 int i;
813 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
814
815 bnx2_free_tx_mem(bp);
816 bnx2_free_rx_mem(bp);
817
818 for (i = 0; i < bp->ctx_pages; i++) {
819 if (bp->ctx_blk[i]) {
820 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
821 bp->ctx_blk[i],
822 bp->ctx_blk_mapping[i]);
823 bp->ctx_blk[i] = NULL;
824 }
825 }
826 if (bnapi->status_blk.msi) {
827 pci_free_consistent(bp->pdev, bp->status_stats_size,
828 bnapi->status_blk.msi,
829 bp->status_blk_mapping);
830 bnapi->status_blk.msi = NULL;
831 bp->stats_blk = NULL;
832 }
833 }
834
835 static int
836 bnx2_alloc_mem(struct bnx2 *bp)
837 {
838 int i, status_blk_size, err;
839 struct bnx2_napi *bnapi;
840 void *status_blk;
841
842 /* Combine status and statistics blocks into one allocation. */
843 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
844 if (bp->flags & BNX2_FLAG_MSIX_CAP)
845 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
846 BNX2_SBLK_MSIX_ALIGN_SIZE);
847 bp->status_stats_size = status_blk_size +
848 sizeof(struct statistics_block);
849
850 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
851 &bp->status_blk_mapping);
852 if (status_blk == NULL)
853 goto alloc_mem_err;
854
855 memset(status_blk, 0, bp->status_stats_size);
856
857 bnapi = &bp->bnx2_napi[0];
858 bnapi->status_blk.msi = status_blk;
859 bnapi->hw_tx_cons_ptr =
860 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
861 bnapi->hw_rx_cons_ptr =
862 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
863 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
864 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
865 struct status_block_msix *sblk;
866
867 bnapi = &bp->bnx2_napi[i];
868
869 sblk = (void *) (status_blk +
870 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
871 bnapi->status_blk.msix = sblk;
872 bnapi->hw_tx_cons_ptr =
873 &sblk->status_tx_quick_consumer_index;
874 bnapi->hw_rx_cons_ptr =
875 &sblk->status_rx_quick_consumer_index;
876 bnapi->int_num = i << 24;
877 }
878 }
879
880 bp->stats_blk = status_blk + status_blk_size;
881
882 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
883
884 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
885 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
886 if (bp->ctx_pages == 0)
887 bp->ctx_pages = 1;
888 for (i = 0; i < bp->ctx_pages; i++) {
889 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
890 BCM_PAGE_SIZE,
891 &bp->ctx_blk_mapping[i]);
892 if (bp->ctx_blk[i] == NULL)
893 goto alloc_mem_err;
894 }
895 }
896
897 err = bnx2_alloc_rx_mem(bp);
898 if (err)
899 goto alloc_mem_err;
900
901 err = bnx2_alloc_tx_mem(bp);
902 if (err)
903 goto alloc_mem_err;
904
905 return 0;
906
907 alloc_mem_err:
908 bnx2_free_mem(bp);
909 return -ENOMEM;
910 }
911
912 static void
913 bnx2_report_fw_link(struct bnx2 *bp)
914 {
915 u32 fw_link_status = 0;
916
917 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
918 return;
919
920 if (bp->link_up) {
921 u32 bmsr;
922
923 switch (bp->line_speed) {
924 case SPEED_10:
925 if (bp->duplex == DUPLEX_HALF)
926 fw_link_status = BNX2_LINK_STATUS_10HALF;
927 else
928 fw_link_status = BNX2_LINK_STATUS_10FULL;
929 break;
930 case SPEED_100:
931 if (bp->duplex == DUPLEX_HALF)
932 fw_link_status = BNX2_LINK_STATUS_100HALF;
933 else
934 fw_link_status = BNX2_LINK_STATUS_100FULL;
935 break;
936 case SPEED_1000:
937 if (bp->duplex == DUPLEX_HALF)
938 fw_link_status = BNX2_LINK_STATUS_1000HALF;
939 else
940 fw_link_status = BNX2_LINK_STATUS_1000FULL;
941 break;
942 case SPEED_2500:
943 if (bp->duplex == DUPLEX_HALF)
944 fw_link_status = BNX2_LINK_STATUS_2500HALF;
945 else
946 fw_link_status = BNX2_LINK_STATUS_2500FULL;
947 break;
948 }
949
950 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
951
952 if (bp->autoneg) {
953 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
954
955 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
957
958 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
959 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
960 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
961 else
962 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
963 }
964 }
965 else
966 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
967
968 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
969 }
970
971 static char *
972 bnx2_xceiver_str(struct bnx2 *bp)
973 {
974 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
975 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
976 "Copper"));
977 }
978
979 static void
980 bnx2_report_link(struct bnx2 *bp)
981 {
982 if (bp->link_up) {
983 netif_carrier_on(bp->dev);
984 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
985 bnx2_xceiver_str(bp),
986 bp->line_speed,
987 bp->duplex == DUPLEX_FULL ? "full" : "half");
988
989 if (bp->flow_ctrl) {
990 if (bp->flow_ctrl & FLOW_CTRL_RX) {
991 pr_cont(", receive ");
992 if (bp->flow_ctrl & FLOW_CTRL_TX)
993 pr_cont("& transmit ");
994 }
995 else {
996 pr_cont(", transmit ");
997 }
998 pr_cont("flow control ON");
999 }
1000 pr_cont("\n");
1001 } else {
1002 netif_carrier_off(bp->dev);
1003 netdev_err(bp->dev, "NIC %s Link is Down\n",
1004 bnx2_xceiver_str(bp));
1005 }
1006
1007 bnx2_report_fw_link(bp);
1008 }
1009
1010 static void
1011 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1012 {
1013 u32 local_adv, remote_adv;
1014
1015 bp->flow_ctrl = 0;
1016 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1017 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1018
1019 if (bp->duplex == DUPLEX_FULL) {
1020 bp->flow_ctrl = bp->req_flow_ctrl;
1021 }
1022 return;
1023 }
1024
1025 if (bp->duplex != DUPLEX_FULL) {
1026 return;
1027 }
1028
1029 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1030 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1031 u32 val;
1032
1033 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1034 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1035 bp->flow_ctrl |= FLOW_CTRL_TX;
1036 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1037 bp->flow_ctrl |= FLOW_CTRL_RX;
1038 return;
1039 }
1040
1041 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1043
1044 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1045 u32 new_local_adv = 0;
1046 u32 new_remote_adv = 0;
1047
1048 if (local_adv & ADVERTISE_1000XPAUSE)
1049 new_local_adv |= ADVERTISE_PAUSE_CAP;
1050 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1051 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1052 if (remote_adv & ADVERTISE_1000XPAUSE)
1053 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1054 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1055 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1056
1057 local_adv = new_local_adv;
1058 remote_adv = new_remote_adv;
1059 }
1060
1061 /* See Table 28B-3 of 802.3ab-1999 spec. */
1062 if (local_adv & ADVERTISE_PAUSE_CAP) {
1063 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1064 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1065 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1066 }
1067 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1068 bp->flow_ctrl = FLOW_CTRL_RX;
1069 }
1070 }
1071 else {
1072 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1073 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1074 }
1075 }
1076 }
1077 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1078 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1079 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1080
1081 bp->flow_ctrl = FLOW_CTRL_TX;
1082 }
1083 }
1084 }
1085
1086 static int
1087 bnx2_5709s_linkup(struct bnx2 *bp)
1088 {
1089 u32 val, speed;
1090
1091 bp->link_up = 1;
1092
1093 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1094 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1095 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1096
1097 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1098 bp->line_speed = bp->req_line_speed;
1099 bp->duplex = bp->req_duplex;
1100 return 0;
1101 }
1102 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1103 switch (speed) {
1104 case MII_BNX2_GP_TOP_AN_SPEED_10:
1105 bp->line_speed = SPEED_10;
1106 break;
1107 case MII_BNX2_GP_TOP_AN_SPEED_100:
1108 bp->line_speed = SPEED_100;
1109 break;
1110 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1111 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1112 bp->line_speed = SPEED_1000;
1113 break;
1114 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1115 bp->line_speed = SPEED_2500;
1116 break;
1117 }
1118 if (val & MII_BNX2_GP_TOP_AN_FD)
1119 bp->duplex = DUPLEX_FULL;
1120 else
1121 bp->duplex = DUPLEX_HALF;
1122 return 0;
1123 }
1124
1125 static int
1126 bnx2_5708s_linkup(struct bnx2 *bp)
1127 {
1128 u32 val;
1129
1130 bp->link_up = 1;
1131 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1132 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1133 case BCM5708S_1000X_STAT1_SPEED_10:
1134 bp->line_speed = SPEED_10;
1135 break;
1136 case BCM5708S_1000X_STAT1_SPEED_100:
1137 bp->line_speed = SPEED_100;
1138 break;
1139 case BCM5708S_1000X_STAT1_SPEED_1G:
1140 bp->line_speed = SPEED_1000;
1141 break;
1142 case BCM5708S_1000X_STAT1_SPEED_2G5:
1143 bp->line_speed = SPEED_2500;
1144 break;
1145 }
1146 if (val & BCM5708S_1000X_STAT1_FD)
1147 bp->duplex = DUPLEX_FULL;
1148 else
1149 bp->duplex = DUPLEX_HALF;
1150
1151 return 0;
1152 }
1153
1154 static int
1155 bnx2_5706s_linkup(struct bnx2 *bp)
1156 {
1157 u32 bmcr, local_adv, remote_adv, common;
1158
1159 bp->link_up = 1;
1160 bp->line_speed = SPEED_1000;
1161
1162 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1163 if (bmcr & BMCR_FULLDPLX) {
1164 bp->duplex = DUPLEX_FULL;
1165 }
1166 else {
1167 bp->duplex = DUPLEX_HALF;
1168 }
1169
1170 if (!(bmcr & BMCR_ANENABLE)) {
1171 return 0;
1172 }
1173
1174 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1175 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1176
1177 common = local_adv & remote_adv;
1178 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1179
1180 if (common & ADVERTISE_1000XFULL) {
1181 bp->duplex = DUPLEX_FULL;
1182 }
1183 else {
1184 bp->duplex = DUPLEX_HALF;
1185 }
1186 }
1187
1188 return 0;
1189 }
1190
1191 static int
1192 bnx2_copper_linkup(struct bnx2 *bp)
1193 {
1194 u32 bmcr;
1195
1196 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1197 if (bmcr & BMCR_ANENABLE) {
1198 u32 local_adv, remote_adv, common;
1199
1200 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1201 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1202
1203 common = local_adv & (remote_adv >> 2);
1204 if (common & ADVERTISE_1000FULL) {
1205 bp->line_speed = SPEED_1000;
1206 bp->duplex = DUPLEX_FULL;
1207 }
1208 else if (common & ADVERTISE_1000HALF) {
1209 bp->line_speed = SPEED_1000;
1210 bp->duplex = DUPLEX_HALF;
1211 }
1212 else {
1213 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1214 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1215
1216 common = local_adv & remote_adv;
1217 if (common & ADVERTISE_100FULL) {
1218 bp->line_speed = SPEED_100;
1219 bp->duplex = DUPLEX_FULL;
1220 }
1221 else if (common & ADVERTISE_100HALF) {
1222 bp->line_speed = SPEED_100;
1223 bp->duplex = DUPLEX_HALF;
1224 }
1225 else if (common & ADVERTISE_10FULL) {
1226 bp->line_speed = SPEED_10;
1227 bp->duplex = DUPLEX_FULL;
1228 }
1229 else if (common & ADVERTISE_10HALF) {
1230 bp->line_speed = SPEED_10;
1231 bp->duplex = DUPLEX_HALF;
1232 }
1233 else {
1234 bp->line_speed = 0;
1235 bp->link_up = 0;
1236 }
1237 }
1238 }
1239 else {
1240 if (bmcr & BMCR_SPEED100) {
1241 bp->line_speed = SPEED_100;
1242 }
1243 else {
1244 bp->line_speed = SPEED_10;
1245 }
1246 if (bmcr & BMCR_FULLDPLX) {
1247 bp->duplex = DUPLEX_FULL;
1248 }
1249 else {
1250 bp->duplex = DUPLEX_HALF;
1251 }
1252 }
1253
1254 return 0;
1255 }
1256
1257 static void
1258 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1259 {
1260 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1261
1262 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1263 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1264 val |= 0x02 << 8;
1265
1266 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1267 u32 lo_water, hi_water;
1268
1269 if (bp->flow_ctrl & FLOW_CTRL_TX)
1270 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1271 else
1272 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1273 if (lo_water >= bp->rx_ring_size)
1274 lo_water = 0;
1275
1276 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1277
1278 if (hi_water <= lo_water)
1279 lo_water = 0;
1280
1281 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1282 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1283
1284 if (hi_water > 0xf)
1285 hi_water = 0xf;
1286 else if (hi_water == 0)
1287 lo_water = 0;
1288 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1289 }
1290 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1291 }
1292
1293 static void
1294 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1295 {
1296 int i;
1297 u32 cid;
1298
1299 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1300 if (i == 1)
1301 cid = RX_RSS_CID;
1302 bnx2_init_rx_context(bp, cid);
1303 }
1304 }
1305
1306 static void
1307 bnx2_set_mac_link(struct bnx2 *bp)
1308 {
1309 u32 val;
1310
1311 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1312 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1313 (bp->duplex == DUPLEX_HALF)) {
1314 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1315 }
1316
1317 /* Configure the EMAC mode register. */
1318 val = REG_RD(bp, BNX2_EMAC_MODE);
1319
1320 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1321 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1322 BNX2_EMAC_MODE_25G_MODE);
1323
1324 if (bp->link_up) {
1325 switch (bp->line_speed) {
1326 case SPEED_10:
1327 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1328 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1329 break;
1330 }
1331 /* fall through */
1332 case SPEED_100:
1333 val |= BNX2_EMAC_MODE_PORT_MII;
1334 break;
1335 case SPEED_2500:
1336 val |= BNX2_EMAC_MODE_25G_MODE;
1337 /* fall through */
1338 case SPEED_1000:
1339 val |= BNX2_EMAC_MODE_PORT_GMII;
1340 break;
1341 }
1342 }
1343 else {
1344 val |= BNX2_EMAC_MODE_PORT_GMII;
1345 }
1346
1347 /* Set the MAC to operate in the appropriate duplex mode. */
1348 if (bp->duplex == DUPLEX_HALF)
1349 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1350 REG_WR(bp, BNX2_EMAC_MODE, val);
1351
1352 /* Enable/disable rx PAUSE. */
1353 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1354
1355 if (bp->flow_ctrl & FLOW_CTRL_RX)
1356 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1357 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1358
1359 /* Enable/disable tx PAUSE. */
1360 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1361 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1362
1363 if (bp->flow_ctrl & FLOW_CTRL_TX)
1364 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1365 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1366
1367 /* Acknowledge the interrupt. */
1368 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1369
1370 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1371 bnx2_init_all_rx_contexts(bp);
1372 }
1373
1374 static void
1375 bnx2_enable_bmsr1(struct bnx2 *bp)
1376 {
1377 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1378 (CHIP_NUM(bp) == CHIP_NUM_5709))
1379 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1380 MII_BNX2_BLK_ADDR_GP_STATUS);
1381 }
1382
1383 static void
1384 bnx2_disable_bmsr1(struct bnx2 *bp)
1385 {
1386 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1387 (CHIP_NUM(bp) == CHIP_NUM_5709))
1388 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1389 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1390 }
1391
1392 static int
1393 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1394 {
1395 u32 up1;
1396 int ret = 1;
1397
1398 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1399 return 0;
1400
1401 if (bp->autoneg & AUTONEG_SPEED)
1402 bp->advertising |= ADVERTISED_2500baseX_Full;
1403
1404 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1406
1407 bnx2_read_phy(bp, bp->mii_up1, &up1);
1408 if (!(up1 & BCM5708S_UP1_2G5)) {
1409 up1 |= BCM5708S_UP1_2G5;
1410 bnx2_write_phy(bp, bp->mii_up1, up1);
1411 ret = 0;
1412 }
1413
1414 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1416 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1417
1418 return ret;
1419 }
1420
1421 static int
1422 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1423 {
1424 u32 up1;
1425 int ret = 0;
1426
1427 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1428 return 0;
1429
1430 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1431 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1432
1433 bnx2_read_phy(bp, bp->mii_up1, &up1);
1434 if (up1 & BCM5708S_UP1_2G5) {
1435 up1 &= ~BCM5708S_UP1_2G5;
1436 bnx2_write_phy(bp, bp->mii_up1, up1);
1437 ret = 1;
1438 }
1439
1440 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1441 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1442 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1443
1444 return ret;
1445 }
1446
1447 static void
1448 bnx2_enable_forced_2g5(struct bnx2 *bp)
1449 {
1450 u32 bmcr;
1451
1452 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1453 return;
1454
1455 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1456 u32 val;
1457
1458 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459 MII_BNX2_BLK_ADDR_SERDES_DIG);
1460 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1461 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1462 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1463 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1464
1465 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1466 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468
1469 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1470 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1471 bmcr |= BCM5708S_BMCR_FORCE_2500;
1472 } else {
1473 return;
1474 }
1475
1476 if (bp->autoneg & AUTONEG_SPEED) {
1477 bmcr &= ~BMCR_ANENABLE;
1478 if (bp->req_duplex == DUPLEX_FULL)
1479 bmcr |= BMCR_FULLDPLX;
1480 }
1481 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1482 }
1483
1484 static void
1485 bnx2_disable_forced_2g5(struct bnx2 *bp)
1486 {
1487 u32 bmcr;
1488
1489 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1490 return;
1491
1492 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1493 u32 val;
1494
1495 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496 MII_BNX2_BLK_ADDR_SERDES_DIG);
1497 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1498 val &= ~MII_BNX2_SD_MISC1_FORCE;
1499 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1500
1501 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1502 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1503 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1504
1505 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1506 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1507 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1508 } else {
1509 return;
1510 }
1511
1512 if (bp->autoneg & AUTONEG_SPEED)
1513 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1514 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1515 }
1516
1517 static void
1518 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1519 {
1520 u32 val;
1521
1522 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1523 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1524 if (start)
1525 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1526 else
1527 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1528 }
1529
1530 static int
1531 bnx2_set_link(struct bnx2 *bp)
1532 {
1533 u32 bmsr;
1534 u8 link_up;
1535
1536 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1537 bp->link_up = 1;
1538 return 0;
1539 }
1540
1541 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1542 return 0;
1543
1544 link_up = bp->link_up;
1545
1546 bnx2_enable_bmsr1(bp);
1547 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1548 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1549 bnx2_disable_bmsr1(bp);
1550
1551 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1552 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1553 u32 val, an_dbg;
1554
1555 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1556 bnx2_5706s_force_link_dn(bp, 0);
1557 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1558 }
1559 val = REG_RD(bp, BNX2_EMAC_STATUS);
1560
1561 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1562 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1563 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1564
1565 if ((val & BNX2_EMAC_STATUS_LINK) &&
1566 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1567 bmsr |= BMSR_LSTATUS;
1568 else
1569 bmsr &= ~BMSR_LSTATUS;
1570 }
1571
1572 if (bmsr & BMSR_LSTATUS) {
1573 bp->link_up = 1;
1574
1575 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1576 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1577 bnx2_5706s_linkup(bp);
1578 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1579 bnx2_5708s_linkup(bp);
1580 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1581 bnx2_5709s_linkup(bp);
1582 }
1583 else {
1584 bnx2_copper_linkup(bp);
1585 }
1586 bnx2_resolve_flow_ctrl(bp);
1587 }
1588 else {
1589 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1590 (bp->autoneg & AUTONEG_SPEED))
1591 bnx2_disable_forced_2g5(bp);
1592
1593 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1594 u32 bmcr;
1595
1596 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1597 bmcr |= BMCR_ANENABLE;
1598 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1599
1600 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1601 }
1602 bp->link_up = 0;
1603 }
1604
1605 if (bp->link_up != link_up) {
1606 bnx2_report_link(bp);
1607 }
1608
1609 bnx2_set_mac_link(bp);
1610
1611 return 0;
1612 }
1613
1614 static int
1615 bnx2_reset_phy(struct bnx2 *bp)
1616 {
1617 int i;
1618 u32 reg;
1619
1620 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1621
1622 #define PHY_RESET_MAX_WAIT 100
1623 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1624 udelay(10);
1625
1626 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1627 if (!(reg & BMCR_RESET)) {
1628 udelay(20);
1629 break;
1630 }
1631 }
1632 if (i == PHY_RESET_MAX_WAIT) {
1633 return -EBUSY;
1634 }
1635 return 0;
1636 }
1637
1638 static u32
1639 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1640 {
1641 u32 adv = 0;
1642
1643 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1644 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1645
1646 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1647 adv = ADVERTISE_1000XPAUSE;
1648 }
1649 else {
1650 adv = ADVERTISE_PAUSE_CAP;
1651 }
1652 }
1653 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1654 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1655 adv = ADVERTISE_1000XPSE_ASYM;
1656 }
1657 else {
1658 adv = ADVERTISE_PAUSE_ASYM;
1659 }
1660 }
1661 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1662 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1663 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1664 }
1665 else {
1666 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1667 }
1668 }
1669 return adv;
1670 }
1671
1672 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1673
1674 static int
1675 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1676 __releases(&bp->phy_lock)
1677 __acquires(&bp->phy_lock)
1678 {
1679 u32 speed_arg = 0, pause_adv;
1680
1681 pause_adv = bnx2_phy_get_pause_adv(bp);
1682
1683 if (bp->autoneg & AUTONEG_SPEED) {
1684 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1685 if (bp->advertising & ADVERTISED_10baseT_Half)
1686 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1687 if (bp->advertising & ADVERTISED_10baseT_Full)
1688 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1689 if (bp->advertising & ADVERTISED_100baseT_Half)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1691 if (bp->advertising & ADVERTISED_100baseT_Full)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1693 if (bp->advertising & ADVERTISED_1000baseT_Full)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1695 if (bp->advertising & ADVERTISED_2500baseX_Full)
1696 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697 } else {
1698 if (bp->req_line_speed == SPEED_2500)
1699 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1700 else if (bp->req_line_speed == SPEED_1000)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1702 else if (bp->req_line_speed == SPEED_100) {
1703 if (bp->req_duplex == DUPLEX_FULL)
1704 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1705 else
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1707 } else if (bp->req_line_speed == SPEED_10) {
1708 if (bp->req_duplex == DUPLEX_FULL)
1709 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710 else
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1712 }
1713 }
1714
1715 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1716 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1717 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1718 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1719
1720 if (port == PORT_TP)
1721 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1722 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1723
1724 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1725
1726 spin_unlock_bh(&bp->phy_lock);
1727 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1728 spin_lock_bh(&bp->phy_lock);
1729
1730 return 0;
1731 }
1732
1733 static int
1734 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1735 __releases(&bp->phy_lock)
1736 __acquires(&bp->phy_lock)
1737 {
1738 u32 adv, bmcr;
1739 u32 new_adv = 0;
1740
1741 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1742 return (bnx2_setup_remote_phy(bp, port));
1743
1744 if (!(bp->autoneg & AUTONEG_SPEED)) {
1745 u32 new_bmcr;
1746 int force_link_down = 0;
1747
1748 if (bp->req_line_speed == SPEED_2500) {
1749 if (!bnx2_test_and_enable_2g5(bp))
1750 force_link_down = 1;
1751 } else if (bp->req_line_speed == SPEED_1000) {
1752 if (bnx2_test_and_disable_2g5(bp))
1753 force_link_down = 1;
1754 }
1755 bnx2_read_phy(bp, bp->mii_adv, &adv);
1756 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1757
1758 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1759 new_bmcr = bmcr & ~BMCR_ANENABLE;
1760 new_bmcr |= BMCR_SPEED1000;
1761
1762 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1763 if (bp->req_line_speed == SPEED_2500)
1764 bnx2_enable_forced_2g5(bp);
1765 else if (bp->req_line_speed == SPEED_1000) {
1766 bnx2_disable_forced_2g5(bp);
1767 new_bmcr &= ~0x2000;
1768 }
1769
1770 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1771 if (bp->req_line_speed == SPEED_2500)
1772 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1773 else
1774 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1775 }
1776
1777 if (bp->req_duplex == DUPLEX_FULL) {
1778 adv |= ADVERTISE_1000XFULL;
1779 new_bmcr |= BMCR_FULLDPLX;
1780 }
1781 else {
1782 adv |= ADVERTISE_1000XHALF;
1783 new_bmcr &= ~BMCR_FULLDPLX;
1784 }
1785 if ((new_bmcr != bmcr) || (force_link_down)) {
1786 /* Force a link down visible on the other side */
1787 if (bp->link_up) {
1788 bnx2_write_phy(bp, bp->mii_adv, adv &
1789 ~(ADVERTISE_1000XFULL |
1790 ADVERTISE_1000XHALF));
1791 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1792 BMCR_ANRESTART | BMCR_ANENABLE);
1793
1794 bp->link_up = 0;
1795 netif_carrier_off(bp->dev);
1796 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797 bnx2_report_link(bp);
1798 }
1799 bnx2_write_phy(bp, bp->mii_adv, adv);
1800 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1801 } else {
1802 bnx2_resolve_flow_ctrl(bp);
1803 bnx2_set_mac_link(bp);
1804 }
1805 return 0;
1806 }
1807
1808 bnx2_test_and_enable_2g5(bp);
1809
1810 if (bp->advertising & ADVERTISED_1000baseT_Full)
1811 new_adv |= ADVERTISE_1000XFULL;
1812
1813 new_adv |= bnx2_phy_get_pause_adv(bp);
1814
1815 bnx2_read_phy(bp, bp->mii_adv, &adv);
1816 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1817
1818 bp->serdes_an_pending = 0;
1819 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1820 /* Force a link down visible on the other side */
1821 if (bp->link_up) {
1822 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1823 spin_unlock_bh(&bp->phy_lock);
1824 msleep(20);
1825 spin_lock_bh(&bp->phy_lock);
1826 }
1827
1828 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1829 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1830 BMCR_ANENABLE);
1831 /* Speed up link-up time when the link partner
1832 * does not autonegotiate which is very common
1833 * in blade servers. Some blade servers use
1834 * IPMI for kerboard input and it's important
1835 * to minimize link disruptions. Autoneg. involves
1836 * exchanging base pages plus 3 next pages and
1837 * normally completes in about 120 msec.
1838 */
1839 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1840 bp->serdes_an_pending = 1;
1841 mod_timer(&bp->timer, jiffies + bp->current_interval);
1842 } else {
1843 bnx2_resolve_flow_ctrl(bp);
1844 bnx2_set_mac_link(bp);
1845 }
1846
1847 return 0;
1848 }
1849
1850 #define ETHTOOL_ALL_FIBRE_SPEED \
1851 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1852 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1853 (ADVERTISED_1000baseT_Full)
1854
1855 #define ETHTOOL_ALL_COPPER_SPEED \
1856 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1857 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1858 ADVERTISED_1000baseT_Full)
1859
1860 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1861 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1862
1863 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1864
1865 static void
1866 bnx2_set_default_remote_link(struct bnx2 *bp)
1867 {
1868 u32 link;
1869
1870 if (bp->phy_port == PORT_TP)
1871 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1872 else
1873 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1874
1875 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1876 bp->req_line_speed = 0;
1877 bp->autoneg |= AUTONEG_SPEED;
1878 bp->advertising = ADVERTISED_Autoneg;
1879 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1880 bp->advertising |= ADVERTISED_10baseT_Half;
1881 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1882 bp->advertising |= ADVERTISED_10baseT_Full;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1884 bp->advertising |= ADVERTISED_100baseT_Half;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1886 bp->advertising |= ADVERTISED_100baseT_Full;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1888 bp->advertising |= ADVERTISED_1000baseT_Full;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1890 bp->advertising |= ADVERTISED_2500baseX_Full;
1891 } else {
1892 bp->autoneg = 0;
1893 bp->advertising = 0;
1894 bp->req_duplex = DUPLEX_FULL;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1896 bp->req_line_speed = SPEED_10;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1898 bp->req_duplex = DUPLEX_HALF;
1899 }
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1901 bp->req_line_speed = SPEED_100;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1903 bp->req_duplex = DUPLEX_HALF;
1904 }
1905 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1906 bp->req_line_speed = SPEED_1000;
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1908 bp->req_line_speed = SPEED_2500;
1909 }
1910 }
1911
1912 static void
1913 bnx2_set_default_link(struct bnx2 *bp)
1914 {
1915 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1916 bnx2_set_default_remote_link(bp);
1917 return;
1918 }
1919
1920 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1921 bp->req_line_speed = 0;
1922 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1923 u32 reg;
1924
1925 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1926
1927 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1928 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1929 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1930 bp->autoneg = 0;
1931 bp->req_line_speed = bp->line_speed = SPEED_1000;
1932 bp->req_duplex = DUPLEX_FULL;
1933 }
1934 } else
1935 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1936 }
1937
1938 static void
1939 bnx2_send_heart_beat(struct bnx2 *bp)
1940 {
1941 u32 msg;
1942 u32 addr;
1943
1944 spin_lock(&bp->indirect_lock);
1945 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1946 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1947 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1948 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1949 spin_unlock(&bp->indirect_lock);
1950 }
1951
1952 static void
1953 bnx2_remote_phy_event(struct bnx2 *bp)
1954 {
1955 u32 msg;
1956 u8 link_up = bp->link_up;
1957 u8 old_port;
1958
1959 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1960
1961 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1962 bnx2_send_heart_beat(bp);
1963
1964 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1965
1966 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1967 bp->link_up = 0;
1968 else {
1969 u32 speed;
1970
1971 bp->link_up = 1;
1972 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1973 bp->duplex = DUPLEX_FULL;
1974 switch (speed) {
1975 case BNX2_LINK_STATUS_10HALF:
1976 bp->duplex = DUPLEX_HALF;
1977 case BNX2_LINK_STATUS_10FULL:
1978 bp->line_speed = SPEED_10;
1979 break;
1980 case BNX2_LINK_STATUS_100HALF:
1981 bp->duplex = DUPLEX_HALF;
1982 case BNX2_LINK_STATUS_100BASE_T4:
1983 case BNX2_LINK_STATUS_100FULL:
1984 bp->line_speed = SPEED_100;
1985 break;
1986 case BNX2_LINK_STATUS_1000HALF:
1987 bp->duplex = DUPLEX_HALF;
1988 case BNX2_LINK_STATUS_1000FULL:
1989 bp->line_speed = SPEED_1000;
1990 break;
1991 case BNX2_LINK_STATUS_2500HALF:
1992 bp->duplex = DUPLEX_HALF;
1993 case BNX2_LINK_STATUS_2500FULL:
1994 bp->line_speed = SPEED_2500;
1995 break;
1996 default:
1997 bp->line_speed = 0;
1998 break;
1999 }
2000
2001 bp->flow_ctrl = 0;
2002 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2003 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2004 if (bp->duplex == DUPLEX_FULL)
2005 bp->flow_ctrl = bp->req_flow_ctrl;
2006 } else {
2007 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2008 bp->flow_ctrl |= FLOW_CTRL_TX;
2009 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2010 bp->flow_ctrl |= FLOW_CTRL_RX;
2011 }
2012
2013 old_port = bp->phy_port;
2014 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2015 bp->phy_port = PORT_FIBRE;
2016 else
2017 bp->phy_port = PORT_TP;
2018
2019 if (old_port != bp->phy_port)
2020 bnx2_set_default_link(bp);
2021
2022 }
2023 if (bp->link_up != link_up)
2024 bnx2_report_link(bp);
2025
2026 bnx2_set_mac_link(bp);
2027 }
2028
2029 static int
2030 bnx2_set_remote_link(struct bnx2 *bp)
2031 {
2032 u32 evt_code;
2033
2034 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035 switch (evt_code) {
2036 case BNX2_FW_EVT_CODE_LINK_EVENT:
2037 bnx2_remote_phy_event(bp);
2038 break;
2039 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2040 default:
2041 bnx2_send_heart_beat(bp);
2042 break;
2043 }
2044 return 0;
2045 }
2046
2047 static int
2048 bnx2_setup_copper_phy(struct bnx2 *bp)
2049 __releases(&bp->phy_lock)
2050 __acquires(&bp->phy_lock)
2051 {
2052 u32 bmcr;
2053 u32 new_bmcr;
2054
2055 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2056
2057 if (bp->autoneg & AUTONEG_SPEED) {
2058 u32 adv_reg, adv1000_reg;
2059 u32 new_adv_reg = 0;
2060 u32 new_adv1000_reg = 0;
2061
2062 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2063 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2064 ADVERTISE_PAUSE_ASYM);
2065
2066 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2067 adv1000_reg &= PHY_ALL_1000_SPEED;
2068
2069 if (bp->advertising & ADVERTISED_10baseT_Half)
2070 new_adv_reg |= ADVERTISE_10HALF;
2071 if (bp->advertising & ADVERTISED_10baseT_Full)
2072 new_adv_reg |= ADVERTISE_10FULL;
2073 if (bp->advertising & ADVERTISED_100baseT_Half)
2074 new_adv_reg |= ADVERTISE_100HALF;
2075 if (bp->advertising & ADVERTISED_100baseT_Full)
2076 new_adv_reg |= ADVERTISE_100FULL;
2077 if (bp->advertising & ADVERTISED_1000baseT_Full)
2078 new_adv1000_reg |= ADVERTISE_1000FULL;
2079
2080 new_adv_reg |= ADVERTISE_CSMA;
2081
2082 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2083
2084 if ((adv1000_reg != new_adv1000_reg) ||
2085 (adv_reg != new_adv_reg) ||
2086 ((bmcr & BMCR_ANENABLE) == 0)) {
2087
2088 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2089 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2090 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2091 BMCR_ANENABLE);
2092 }
2093 else if (bp->link_up) {
2094 /* Flow ctrl may have changed from auto to forced */
2095 /* or vice-versa. */
2096
2097 bnx2_resolve_flow_ctrl(bp);
2098 bnx2_set_mac_link(bp);
2099 }
2100 return 0;
2101 }
2102
2103 new_bmcr = 0;
2104 if (bp->req_line_speed == SPEED_100) {
2105 new_bmcr |= BMCR_SPEED100;
2106 }
2107 if (bp->req_duplex == DUPLEX_FULL) {
2108 new_bmcr |= BMCR_FULLDPLX;
2109 }
2110 if (new_bmcr != bmcr) {
2111 u32 bmsr;
2112
2113 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115
2116 if (bmsr & BMSR_LSTATUS) {
2117 /* Force link down */
2118 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2119 spin_unlock_bh(&bp->phy_lock);
2120 msleep(50);
2121 spin_lock_bh(&bp->phy_lock);
2122
2123 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2124 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2125 }
2126
2127 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2128
2129 /* Normally, the new speed is setup after the link has
2130 * gone down and up again. In some cases, link will not go
2131 * down so we need to set up the new speed here.
2132 */
2133 if (bmsr & BMSR_LSTATUS) {
2134 bp->line_speed = bp->req_line_speed;
2135 bp->duplex = bp->req_duplex;
2136 bnx2_resolve_flow_ctrl(bp);
2137 bnx2_set_mac_link(bp);
2138 }
2139 } else {
2140 bnx2_resolve_flow_ctrl(bp);
2141 bnx2_set_mac_link(bp);
2142 }
2143 return 0;
2144 }
2145
2146 static int
2147 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2148 __releases(&bp->phy_lock)
2149 __acquires(&bp->phy_lock)
2150 {
2151 if (bp->loopback == MAC_LOOPBACK)
2152 return 0;
2153
2154 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2155 return (bnx2_setup_serdes_phy(bp, port));
2156 }
2157 else {
2158 return (bnx2_setup_copper_phy(bp));
2159 }
2160 }
2161
2162 static int
2163 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2164 {
2165 u32 val;
2166
2167 bp->mii_bmcr = MII_BMCR + 0x10;
2168 bp->mii_bmsr = MII_BMSR + 0x10;
2169 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2170 bp->mii_adv = MII_ADVERTISE + 0x10;
2171 bp->mii_lpa = MII_LPA + 0x10;
2172 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2173
2174 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2175 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2176
2177 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2178 if (reset_phy)
2179 bnx2_reset_phy(bp);
2180
2181 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2182
2183 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2184 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2185 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2186 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2187
2188 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2189 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2190 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2191 val |= BCM5708S_UP1_2G5;
2192 else
2193 val &= ~BCM5708S_UP1_2G5;
2194 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2195
2196 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2197 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2198 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2199 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2200
2201 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2202
2203 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2204 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2205 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2206
2207 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2208
2209 return 0;
2210 }
2211
2212 static int
2213 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2214 {
2215 u32 val;
2216
2217 if (reset_phy)
2218 bnx2_reset_phy(bp);
2219
2220 bp->mii_up1 = BCM5708S_UP1;
2221
2222 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2223 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2224 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2225
2226 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2227 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2228 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2229
2230 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2231 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2232 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2233
2234 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2235 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2236 val |= BCM5708S_UP1_2G5;
2237 bnx2_write_phy(bp, BCM5708S_UP1, val);
2238 }
2239
2240 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2241 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2242 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2243 /* increase tx signal amplitude */
2244 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2245 BCM5708S_BLK_ADDR_TX_MISC);
2246 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2247 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2248 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2250 }
2251
2252 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2253 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2254
2255 if (val) {
2256 u32 is_backplane;
2257
2258 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2259 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2260 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261 BCM5708S_BLK_ADDR_TX_MISC);
2262 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2263 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2264 BCM5708S_BLK_ADDR_DIG);
2265 }
2266 }
2267 return 0;
2268 }
2269
2270 static int
2271 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2272 {
2273 if (reset_phy)
2274 bnx2_reset_phy(bp);
2275
2276 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2277
2278 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2279 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2280
2281 if (bp->dev->mtu > 1500) {
2282 u32 val;
2283
2284 /* Set extended packet length bit */
2285 bnx2_write_phy(bp, 0x18, 0x7);
2286 bnx2_read_phy(bp, 0x18, &val);
2287 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2288
2289 bnx2_write_phy(bp, 0x1c, 0x6c00);
2290 bnx2_read_phy(bp, 0x1c, &val);
2291 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2292 }
2293 else {
2294 u32 val;
2295
2296 bnx2_write_phy(bp, 0x18, 0x7);
2297 bnx2_read_phy(bp, 0x18, &val);
2298 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2299
2300 bnx2_write_phy(bp, 0x1c, 0x6c00);
2301 bnx2_read_phy(bp, 0x1c, &val);
2302 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2303 }
2304
2305 return 0;
2306 }
2307
2308 static int
2309 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2310 {
2311 u32 val;
2312
2313 if (reset_phy)
2314 bnx2_reset_phy(bp);
2315
2316 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2317 bnx2_write_phy(bp, 0x18, 0x0c00);
2318 bnx2_write_phy(bp, 0x17, 0x000a);
2319 bnx2_write_phy(bp, 0x15, 0x310b);
2320 bnx2_write_phy(bp, 0x17, 0x201f);
2321 bnx2_write_phy(bp, 0x15, 0x9506);
2322 bnx2_write_phy(bp, 0x17, 0x401f);
2323 bnx2_write_phy(bp, 0x15, 0x14e2);
2324 bnx2_write_phy(bp, 0x18, 0x0400);
2325 }
2326
2327 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2328 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2329 MII_BNX2_DSP_EXPAND_REG | 0x8);
2330 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2331 val &= ~(1 << 8);
2332 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2333 }
2334
2335 if (bp->dev->mtu > 1500) {
2336 /* Set extended packet length bit */
2337 bnx2_write_phy(bp, 0x18, 0x7);
2338 bnx2_read_phy(bp, 0x18, &val);
2339 bnx2_write_phy(bp, 0x18, val | 0x4000);
2340
2341 bnx2_read_phy(bp, 0x10, &val);
2342 bnx2_write_phy(bp, 0x10, val | 0x1);
2343 }
2344 else {
2345 bnx2_write_phy(bp, 0x18, 0x7);
2346 bnx2_read_phy(bp, 0x18, &val);
2347 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2348
2349 bnx2_read_phy(bp, 0x10, &val);
2350 bnx2_write_phy(bp, 0x10, val & ~0x1);
2351 }
2352
2353 /* ethernet@wirespeed */
2354 bnx2_write_phy(bp, 0x18, 0x7007);
2355 bnx2_read_phy(bp, 0x18, &val);
2356 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2357 return 0;
2358 }
2359
2360
2361 static int
2362 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2363 __releases(&bp->phy_lock)
2364 __acquires(&bp->phy_lock)
2365 {
2366 u32 val;
2367 int rc = 0;
2368
2369 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2370 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2371
2372 bp->mii_bmcr = MII_BMCR;
2373 bp->mii_bmsr = MII_BMSR;
2374 bp->mii_bmsr1 = MII_BMSR;
2375 bp->mii_adv = MII_ADVERTISE;
2376 bp->mii_lpa = MII_LPA;
2377
2378 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2379
2380 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2381 goto setup_phy;
2382
2383 bnx2_read_phy(bp, MII_PHYSID1, &val);
2384 bp->phy_id = val << 16;
2385 bnx2_read_phy(bp, MII_PHYSID2, &val);
2386 bp->phy_id |= val & 0xffff;
2387
2388 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2389 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2390 rc = bnx2_init_5706s_phy(bp, reset_phy);
2391 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2392 rc = bnx2_init_5708s_phy(bp, reset_phy);
2393 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2394 rc = bnx2_init_5709s_phy(bp, reset_phy);
2395 }
2396 else {
2397 rc = bnx2_init_copper_phy(bp, reset_phy);
2398 }
2399
2400 setup_phy:
2401 if (!rc)
2402 rc = bnx2_setup_phy(bp, bp->phy_port);
2403
2404 return rc;
2405 }
2406
2407 static int
2408 bnx2_set_mac_loopback(struct bnx2 *bp)
2409 {
2410 u32 mac_mode;
2411
2412 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2413 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2414 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2415 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2416 bp->link_up = 1;
2417 return 0;
2418 }
2419
2420 static int bnx2_test_link(struct bnx2 *);
2421
2422 static int
2423 bnx2_set_phy_loopback(struct bnx2 *bp)
2424 {
2425 u32 mac_mode;
2426 int rc, i;
2427
2428 spin_lock_bh(&bp->phy_lock);
2429 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2430 BMCR_SPEED1000);
2431 spin_unlock_bh(&bp->phy_lock);
2432 if (rc)
2433 return rc;
2434
2435 for (i = 0; i < 10; i++) {
2436 if (bnx2_test_link(bp) == 0)
2437 break;
2438 msleep(100);
2439 }
2440
2441 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2442 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2443 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2444 BNX2_EMAC_MODE_25G_MODE);
2445
2446 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2447 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2448 bp->link_up = 1;
2449 return 0;
2450 }
2451
2452 static int
2453 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2454 {
2455 int i;
2456 u32 val;
2457
2458 bp->fw_wr_seq++;
2459 msg_data |= bp->fw_wr_seq;
2460
2461 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2462
2463 if (!ack)
2464 return 0;
2465
2466 /* wait for an acknowledgement. */
2467 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2468 msleep(10);
2469
2470 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2471
2472 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2473 break;
2474 }
2475 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2476 return 0;
2477
2478 /* If we timed out, inform the firmware that this is the case. */
2479 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2480 if (!silent)
2481 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2482
2483 msg_data &= ~BNX2_DRV_MSG_CODE;
2484 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2485
2486 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2487
2488 return -EBUSY;
2489 }
2490
2491 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2492 return -EIO;
2493
2494 return 0;
2495 }
2496
2497 static int
2498 bnx2_init_5709_context(struct bnx2 *bp)
2499 {
2500 int i, ret = 0;
2501 u32 val;
2502
2503 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2504 val |= (BCM_PAGE_BITS - 8) << 16;
2505 REG_WR(bp, BNX2_CTX_COMMAND, val);
2506 for (i = 0; i < 10; i++) {
2507 val = REG_RD(bp, BNX2_CTX_COMMAND);
2508 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2509 break;
2510 udelay(2);
2511 }
2512 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2513 return -EBUSY;
2514
2515 for (i = 0; i < bp->ctx_pages; i++) {
2516 int j;
2517
2518 if (bp->ctx_blk[i])
2519 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2520 else
2521 return -ENOMEM;
2522
2523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2524 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2526 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2527 (u64) bp->ctx_blk_mapping[i] >> 32);
2528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2530 for (j = 0; j < 10; j++) {
2531
2532 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2533 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2534 break;
2535 udelay(5);
2536 }
2537 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2538 ret = -EBUSY;
2539 break;
2540 }
2541 }
2542 return ret;
2543 }
2544
2545 static void
2546 bnx2_init_context(struct bnx2 *bp)
2547 {
2548 u32 vcid;
2549
2550 vcid = 96;
2551 while (vcid) {
2552 u32 vcid_addr, pcid_addr, offset;
2553 int i;
2554
2555 vcid--;
2556
2557 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2558 u32 new_vcid;
2559
2560 vcid_addr = GET_PCID_ADDR(vcid);
2561 if (vcid & 0x8) {
2562 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2563 }
2564 else {
2565 new_vcid = vcid;
2566 }
2567 pcid_addr = GET_PCID_ADDR(new_vcid);
2568 }
2569 else {
2570 vcid_addr = GET_CID_ADDR(vcid);
2571 pcid_addr = vcid_addr;
2572 }
2573
2574 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2575 vcid_addr += (i << PHY_CTX_SHIFT);
2576 pcid_addr += (i << PHY_CTX_SHIFT);
2577
2578 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2579 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2580
2581 /* Zero out the context. */
2582 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2583 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2584 }
2585 }
2586 }
2587
2588 static int
2589 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2590 {
2591 u16 *good_mbuf;
2592 u32 good_mbuf_cnt;
2593 u32 val;
2594
2595 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2596 if (good_mbuf == NULL) {
2597 pr_err("Failed to allocate memory in %s\n", __func__);
2598 return -ENOMEM;
2599 }
2600
2601 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2602 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2603
2604 good_mbuf_cnt = 0;
2605
2606 /* Allocate a bunch of mbufs and save the good ones in an array. */
2607 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2608 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2609 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2610 BNX2_RBUF_COMMAND_ALLOC_REQ);
2611
2612 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2613
2614 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2615
2616 /* The addresses with Bit 9 set are bad memory blocks. */
2617 if (!(val & (1 << 9))) {
2618 good_mbuf[good_mbuf_cnt] = (u16) val;
2619 good_mbuf_cnt++;
2620 }
2621
2622 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2623 }
2624
2625 /* Free the good ones back to the mbuf pool thus discarding
2626 * all the bad ones. */
2627 while (good_mbuf_cnt) {
2628 good_mbuf_cnt--;
2629
2630 val = good_mbuf[good_mbuf_cnt];
2631 val = (val << 9) | val | 1;
2632
2633 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2634 }
2635 kfree(good_mbuf);
2636 return 0;
2637 }
2638
2639 static void
2640 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2641 {
2642 u32 val;
2643
2644 val = (mac_addr[0] << 8) | mac_addr[1];
2645
2646 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2647
2648 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2649 (mac_addr[4] << 8) | mac_addr[5];
2650
2651 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2652 }
2653
2654 static inline int
2655 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2656 {
2657 dma_addr_t mapping;
2658 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2659 struct rx_bd *rxbd =
2660 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2661 struct page *page = alloc_page(GFP_ATOMIC);
2662
2663 if (!page)
2664 return -ENOMEM;
2665 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2666 PCI_DMA_FROMDEVICE);
2667 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2668 __free_page(page);
2669 return -EIO;
2670 }
2671
2672 rx_pg->page = page;
2673 pci_unmap_addr_set(rx_pg, mapping, mapping);
2674 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2675 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2676 return 0;
2677 }
2678
2679 static void
2680 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2681 {
2682 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2683 struct page *page = rx_pg->page;
2684
2685 if (!page)
2686 return;
2687
2688 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2689 PCI_DMA_FROMDEVICE);
2690
2691 __free_page(page);
2692 rx_pg->page = NULL;
2693 }
2694
2695 static inline int
2696 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2697 {
2698 struct sk_buff *skb;
2699 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2700 dma_addr_t mapping;
2701 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2702 unsigned long align;
2703
2704 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2705 if (skb == NULL) {
2706 return -ENOMEM;
2707 }
2708
2709 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2710 skb_reserve(skb, BNX2_RX_ALIGN - align);
2711
2712 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2713 PCI_DMA_FROMDEVICE);
2714 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2715 dev_kfree_skb(skb);
2716 return -EIO;
2717 }
2718
2719 rx_buf->skb = skb;
2720 pci_unmap_addr_set(rx_buf, mapping, mapping);
2721
2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2723 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2724
2725 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2726
2727 return 0;
2728 }
2729
2730 static int
2731 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2732 {
2733 struct status_block *sblk = bnapi->status_blk.msi;
2734 u32 new_link_state, old_link_state;
2735 int is_set = 1;
2736
2737 new_link_state = sblk->status_attn_bits & event;
2738 old_link_state = sblk->status_attn_bits_ack & event;
2739 if (new_link_state != old_link_state) {
2740 if (new_link_state)
2741 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2742 else
2743 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2744 } else
2745 is_set = 0;
2746
2747 return is_set;
2748 }
2749
2750 static void
2751 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2752 {
2753 spin_lock(&bp->phy_lock);
2754
2755 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2756 bnx2_set_link(bp);
2757 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2758 bnx2_set_remote_link(bp);
2759
2760 spin_unlock(&bp->phy_lock);
2761
2762 }
2763
2764 static inline u16
2765 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2766 {
2767 u16 cons;
2768
2769 /* Tell compiler that status block fields can change. */
2770 barrier();
2771 cons = *bnapi->hw_tx_cons_ptr;
2772 barrier();
2773 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2774 cons++;
2775 return cons;
2776 }
2777
2778 static int
2779 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2780 {
2781 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2782 u16 hw_cons, sw_cons, sw_ring_cons;
2783 int tx_pkt = 0, index;
2784 struct netdev_queue *txq;
2785
2786 index = (bnapi - bp->bnx2_napi);
2787 txq = netdev_get_tx_queue(bp->dev, index);
2788
2789 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2790 sw_cons = txr->tx_cons;
2791
2792 while (sw_cons != hw_cons) {
2793 struct sw_tx_bd *tx_buf;
2794 struct sk_buff *skb;
2795 int i, last;
2796
2797 sw_ring_cons = TX_RING_IDX(sw_cons);
2798
2799 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2800 skb = tx_buf->skb;
2801
2802 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2803 prefetch(&skb->end);
2804
2805 /* partial BD completions possible with TSO packets */
2806 if (tx_buf->is_gso) {
2807 u16 last_idx, last_ring_idx;
2808
2809 last_idx = sw_cons + tx_buf->nr_frags + 1;
2810 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2811 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2812 last_idx++;
2813 }
2814 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2815 break;
2816 }
2817 }
2818
2819 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2820 skb_headlen(skb), PCI_DMA_TODEVICE);
2821
2822 tx_buf->skb = NULL;
2823 last = tx_buf->nr_frags;
2824
2825 for (i = 0; i < last; i++) {
2826 sw_cons = NEXT_TX_BD(sw_cons);
2827
2828 pci_unmap_page(bp->pdev,
2829 pci_unmap_addr(
2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2831 mapping),
2832 skb_shinfo(skb)->frags[i].size,
2833 PCI_DMA_TODEVICE);
2834 }
2835
2836 sw_cons = NEXT_TX_BD(sw_cons);
2837
2838 dev_kfree_skb(skb);
2839 tx_pkt++;
2840 if (tx_pkt == budget)
2841 break;
2842
2843 if (hw_cons == sw_cons)
2844 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2845 }
2846
2847 txr->hw_tx_cons = hw_cons;
2848 txr->tx_cons = sw_cons;
2849
2850 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2851 * before checking for netif_tx_queue_stopped(). Without the
2852 * memory barrier, there is a small possibility that bnx2_start_xmit()
2853 * will miss it and cause the queue to be stopped forever.
2854 */
2855 smp_mb();
2856
2857 if (unlikely(netif_tx_queue_stopped(txq)) &&
2858 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2859 __netif_tx_lock(txq, smp_processor_id());
2860 if ((netif_tx_queue_stopped(txq)) &&
2861 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2862 netif_tx_wake_queue(txq);
2863 __netif_tx_unlock(txq);
2864 }
2865
2866 return tx_pkt;
2867 }
2868
2869 static void
2870 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2871 struct sk_buff *skb, int count)
2872 {
2873 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2874 struct rx_bd *cons_bd, *prod_bd;
2875 int i;
2876 u16 hw_prod, prod;
2877 u16 cons = rxr->rx_pg_cons;
2878
2879 cons_rx_pg = &rxr->rx_pg_ring[cons];
2880
2881 /* The caller was unable to allocate a new page to replace the
2882 * last one in the frags array, so we need to recycle that page
2883 * and then free the skb.
2884 */
2885 if (skb) {
2886 struct page *page;
2887 struct skb_shared_info *shinfo;
2888
2889 shinfo = skb_shinfo(skb);
2890 shinfo->nr_frags--;
2891 page = shinfo->frags[shinfo->nr_frags].page;
2892 shinfo->frags[shinfo->nr_frags].page = NULL;
2893
2894 cons_rx_pg->page = page;
2895 dev_kfree_skb(skb);
2896 }
2897
2898 hw_prod = rxr->rx_pg_prod;
2899
2900 for (i = 0; i < count; i++) {
2901 prod = RX_PG_RING_IDX(hw_prod);
2902
2903 prod_rx_pg = &rxr->rx_pg_ring[prod];
2904 cons_rx_pg = &rxr->rx_pg_ring[cons];
2905 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2906 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2907
2908 if (prod != cons) {
2909 prod_rx_pg->page = cons_rx_pg->page;
2910 cons_rx_pg->page = NULL;
2911 pci_unmap_addr_set(prod_rx_pg, mapping,
2912 pci_unmap_addr(cons_rx_pg, mapping));
2913
2914 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2915 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2916
2917 }
2918 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2919 hw_prod = NEXT_RX_BD(hw_prod);
2920 }
2921 rxr->rx_pg_prod = hw_prod;
2922 rxr->rx_pg_cons = cons;
2923 }
2924
2925 static inline void
2926 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2927 struct sk_buff *skb, u16 cons, u16 prod)
2928 {
2929 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2930 struct rx_bd *cons_bd, *prod_bd;
2931
2932 cons_rx_buf = &rxr->rx_buf_ring[cons];
2933 prod_rx_buf = &rxr->rx_buf_ring[prod];
2934
2935 pci_dma_sync_single_for_device(bp->pdev,
2936 pci_unmap_addr(cons_rx_buf, mapping),
2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2938
2939 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2940
2941 prod_rx_buf->skb = skb;
2942
2943 if (cons == prod)
2944 return;
2945
2946 pci_unmap_addr_set(prod_rx_buf, mapping,
2947 pci_unmap_addr(cons_rx_buf, mapping));
2948
2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2951 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2952 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2953 }
2954
2955 static int
2956 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2957 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2958 u32 ring_idx)
2959 {
2960 int err;
2961 u16 prod = ring_idx & 0xffff;
2962
2963 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2964 if (unlikely(err)) {
2965 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2966 if (hdr_len) {
2967 unsigned int raw_len = len + 4;
2968 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2969
2970 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2971 }
2972 return err;
2973 }
2974
2975 skb_reserve(skb, BNX2_RX_OFFSET);
2976 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2977 PCI_DMA_FROMDEVICE);
2978
2979 if (hdr_len == 0) {
2980 skb_put(skb, len);
2981 return 0;
2982 } else {
2983 unsigned int i, frag_len, frag_size, pages;
2984 struct sw_pg *rx_pg;
2985 u16 pg_cons = rxr->rx_pg_cons;
2986 u16 pg_prod = rxr->rx_pg_prod;
2987
2988 frag_size = len + 4 - hdr_len;
2989 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2990 skb_put(skb, hdr_len);
2991
2992 for (i = 0; i < pages; i++) {
2993 dma_addr_t mapping_old;
2994
2995 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2996 if (unlikely(frag_len <= 4)) {
2997 unsigned int tail = 4 - frag_len;
2998
2999 rxr->rx_pg_cons = pg_cons;
3000 rxr->rx_pg_prod = pg_prod;
3001 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3002 pages - i);
3003 skb->len -= tail;
3004 if (i == 0) {
3005 skb->tail -= tail;
3006 } else {
3007 skb_frag_t *frag =
3008 &skb_shinfo(skb)->frags[i - 1];
3009 frag->size -= tail;
3010 skb->data_len -= tail;
3011 skb->truesize -= tail;
3012 }
3013 return 0;
3014 }
3015 rx_pg = &rxr->rx_pg_ring[pg_cons];
3016
3017 /* Don't unmap yet. If we're unable to allocate a new
3018 * page, we need to recycle the page and the DMA addr.
3019 */
3020 mapping_old = pci_unmap_addr(rx_pg, mapping);
3021 if (i == pages - 1)
3022 frag_len -= 4;
3023
3024 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3025 rx_pg->page = NULL;
3026
3027 err = bnx2_alloc_rx_page(bp, rxr,
3028 RX_PG_RING_IDX(pg_prod));
3029 if (unlikely(err)) {
3030 rxr->rx_pg_cons = pg_cons;
3031 rxr->rx_pg_prod = pg_prod;
3032 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3033 pages - i);
3034 return err;
3035 }
3036
3037 pci_unmap_page(bp->pdev, mapping_old,
3038 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3039
3040 frag_size -= frag_len;
3041 skb->data_len += frag_len;
3042 skb->truesize += frag_len;
3043 skb->len += frag_len;
3044
3045 pg_prod = NEXT_RX_BD(pg_prod);
3046 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3047 }
3048 rxr->rx_pg_prod = pg_prod;
3049 rxr->rx_pg_cons = pg_cons;
3050 }
3051 return 0;
3052 }
3053
3054 static inline u16
3055 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3056 {
3057 u16 cons;
3058
3059 /* Tell compiler that status block fields can change. */
3060 barrier();
3061 cons = *bnapi->hw_rx_cons_ptr;
3062 barrier();
3063 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3064 cons++;
3065 return cons;
3066 }
3067
3068 static int
3069 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3070 {
3071 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3072 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3073 struct l2_fhdr *rx_hdr;
3074 int rx_pkt = 0, pg_ring_used = 0;
3075
3076 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3077 sw_cons = rxr->rx_cons;
3078 sw_prod = rxr->rx_prod;
3079
3080 /* Memory barrier necessary as speculative reads of the rx
3081 * buffer can be ahead of the index in the status block
3082 */
3083 rmb();
3084 while (sw_cons != hw_cons) {
3085 unsigned int len, hdr_len;
3086 u32 status;
3087 struct sw_bd *rx_buf;
3088 struct sk_buff *skb;
3089 dma_addr_t dma_addr;
3090 u16 vtag = 0;
3091 int hw_vlan __maybe_unused = 0;
3092
3093 sw_ring_cons = RX_RING_IDX(sw_cons);
3094 sw_ring_prod = RX_RING_IDX(sw_prod);
3095
3096 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3097 skb = rx_buf->skb;
3098
3099 rx_buf->skb = NULL;
3100
3101 dma_addr = pci_unmap_addr(rx_buf, mapping);
3102
3103 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3105 PCI_DMA_FROMDEVICE);
3106
3107 rx_hdr = (struct l2_fhdr *) skb->data;
3108 len = rx_hdr->l2_fhdr_pkt_len;
3109 status = rx_hdr->l2_fhdr_status;
3110
3111 hdr_len = 0;
3112 if (status & L2_FHDR_STATUS_SPLIT) {
3113 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3114 pg_ring_used = 1;
3115 } else if (len > bp->rx_jumbo_thresh) {
3116 hdr_len = bp->rx_jumbo_thresh;
3117 pg_ring_used = 1;
3118 }
3119
3120 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3121 L2_FHDR_ERRORS_PHY_DECODE |
3122 L2_FHDR_ERRORS_ALIGNMENT |
3123 L2_FHDR_ERRORS_TOO_SHORT |
3124 L2_FHDR_ERRORS_GIANT_FRAME))) {
3125
3126 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3127 sw_ring_prod);
3128 if (pg_ring_used) {
3129 int pages;
3130
3131 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3132
3133 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3134 }
3135 goto next_rx;
3136 }
3137
3138 len -= 4;
3139
3140 if (len <= bp->rx_copy_thresh) {
3141 struct sk_buff *new_skb;
3142
3143 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3144 if (new_skb == NULL) {
3145 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3146 sw_ring_prod);
3147 goto next_rx;
3148 }
3149
3150 /* aligned copy */
3151 skb_copy_from_linear_data_offset(skb,
3152 BNX2_RX_OFFSET - 6,
3153 new_skb->data, len + 6);
3154 skb_reserve(new_skb, 6);
3155 skb_put(new_skb, len);
3156
3157 bnx2_reuse_rx_skb(bp, rxr, skb,
3158 sw_ring_cons, sw_ring_prod);
3159
3160 skb = new_skb;
3161 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3162 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3163 goto next_rx;
3164
3165 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3166 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3167 vtag = rx_hdr->l2_fhdr_vlan_tag;
3168 #ifdef BCM_VLAN
3169 if (bp->vlgrp)
3170 hw_vlan = 1;
3171 else
3172 #endif
3173 {
3174 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3175 __skb_push(skb, 4);
3176
3177 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3178 ve->h_vlan_proto = htons(ETH_P_8021Q);
3179 ve->h_vlan_TCI = htons(vtag);
3180 len += 4;
3181 }
3182 }
3183
3184 skb->protocol = eth_type_trans(skb, bp->dev);
3185
3186 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3187 (ntohs(skb->protocol) != 0x8100)) {
3188
3189 dev_kfree_skb(skb);
3190 goto next_rx;
3191
3192 }
3193
3194 skb->ip_summed = CHECKSUM_NONE;
3195 if (bp->rx_csum &&
3196 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3197 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3198
3199 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3200 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3201 skb->ip_summed = CHECKSUM_UNNECESSARY;
3202 }
3203
3204 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3205
3206 #ifdef BCM_VLAN
3207 if (hw_vlan)
3208 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3209 else
3210 #endif
3211 netif_receive_skb(skb);
3212
3213 rx_pkt++;
3214
3215 next_rx:
3216 sw_cons = NEXT_RX_BD(sw_cons);
3217 sw_prod = NEXT_RX_BD(sw_prod);
3218
3219 if ((rx_pkt == budget))
3220 break;
3221
3222 /* Refresh hw_cons to see if there is new work */
3223 if (sw_cons == hw_cons) {
3224 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3225 rmb();
3226 }
3227 }
3228 rxr->rx_cons = sw_cons;
3229 rxr->rx_prod = sw_prod;
3230
3231 if (pg_ring_used)
3232 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3233
3234 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3235
3236 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3237
3238 mmiowb();
3239
3240 return rx_pkt;
3241
3242 }
3243
3244 /* MSI ISR - The only difference between this and the INTx ISR
3245 * is that the MSI interrupt is always serviced.
3246 */
3247 static irqreturn_t
3248 bnx2_msi(int irq, void *dev_instance)
3249 {
3250 struct bnx2_napi *bnapi = dev_instance;
3251 struct bnx2 *bp = bnapi->bp;
3252
3253 prefetch(bnapi->status_blk.msi);
3254 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3255 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3256 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3257
3258 /* Return here if interrupt is disabled. */
3259 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3260 return IRQ_HANDLED;
3261
3262 napi_schedule(&bnapi->napi);
3263
3264 return IRQ_HANDLED;
3265 }
3266
3267 static irqreturn_t
3268 bnx2_msi_1shot(int irq, void *dev_instance)
3269 {
3270 struct bnx2_napi *bnapi = dev_instance;
3271 struct bnx2 *bp = bnapi->bp;
3272
3273 prefetch(bnapi->status_blk.msi);
3274
3275 /* Return here if interrupt is disabled. */
3276 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3277 return IRQ_HANDLED;
3278
3279 napi_schedule(&bnapi->napi);
3280
3281 return IRQ_HANDLED;
3282 }
3283
3284 static irqreturn_t
3285 bnx2_interrupt(int irq, void *dev_instance)
3286 {
3287 struct bnx2_napi *bnapi = dev_instance;
3288 struct bnx2 *bp = bnapi->bp;
3289 struct status_block *sblk = bnapi->status_blk.msi;
3290
3291 /* When using INTx, it is possible for the interrupt to arrive
3292 * at the CPU before the status block posted prior to the
3293 * interrupt. Reading a register will flush the status block.
3294 * When using MSI, the MSI message will always complete after
3295 * the status block write.
3296 */
3297 if ((sblk->status_idx == bnapi->last_status_idx) &&
3298 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3299 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3300 return IRQ_NONE;
3301
3302 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3303 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3304 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3305
3306 /* Read back to deassert IRQ immediately to avoid too many
3307 * spurious interrupts.
3308 */
3309 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3310
3311 /* Return here if interrupt is shared and is disabled. */
3312 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3313 return IRQ_HANDLED;
3314
3315 if (napi_schedule_prep(&bnapi->napi)) {
3316 bnapi->last_status_idx = sblk->status_idx;
3317 __napi_schedule(&bnapi->napi);
3318 }
3319
3320 return IRQ_HANDLED;
3321 }
3322
3323 static inline int
3324 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3325 {
3326 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3327 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3328
3329 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3330 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3331 return 1;
3332 return 0;
3333 }
3334
3335 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3336 STATUS_ATTN_BITS_TIMER_ABORT)
3337
3338 static inline int
3339 bnx2_has_work(struct bnx2_napi *bnapi)
3340 {
3341 struct status_block *sblk = bnapi->status_blk.msi;
3342
3343 if (bnx2_has_fast_work(bnapi))
3344 return 1;
3345
3346 #ifdef BCM_CNIC
3347 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3348 return 1;
3349 #endif
3350
3351 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3352 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3353 return 1;
3354
3355 return 0;
3356 }
3357
3358 static void
3359 bnx2_chk_missed_msi(struct bnx2 *bp)
3360 {
3361 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3362 u32 msi_ctrl;
3363
3364 if (bnx2_has_work(bnapi)) {
3365 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3366 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3367 return;
3368
3369 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3370 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3371 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3372 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3373 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3374 }
3375 }
3376
3377 bp->idle_chk_status_idx = bnapi->last_status_idx;
3378 }
3379
3380 #ifdef BCM_CNIC
3381 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3382 {
3383 struct cnic_ops *c_ops;
3384
3385 if (!bnapi->cnic_present)
3386 return;
3387
3388 rcu_read_lock();
3389 c_ops = rcu_dereference(bp->cnic_ops);
3390 if (c_ops)
3391 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3392 bnapi->status_blk.msi);
3393 rcu_read_unlock();
3394 }
3395 #endif
3396
3397 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3398 {
3399 struct status_block *sblk = bnapi->status_blk.msi;
3400 u32 status_attn_bits = sblk->status_attn_bits;
3401 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3402
3403 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3404 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3405
3406 bnx2_phy_int(bp, bnapi);
3407
3408 /* This is needed to take care of transient status
3409 * during link changes.
3410 */
3411 REG_WR(bp, BNX2_HC_COMMAND,
3412 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3413 REG_RD(bp, BNX2_HC_COMMAND);
3414 }
3415 }
3416
3417 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3418 int work_done, int budget)
3419 {
3420 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3421 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3422
3423 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3424 bnx2_tx_int(bp, bnapi, 0);
3425
3426 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3427 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3428
3429 return work_done;
3430 }
3431
3432 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3433 {
3434 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3435 struct bnx2 *bp = bnapi->bp;
3436 int work_done = 0;
3437 struct status_block_msix *sblk = bnapi->status_blk.msix;
3438
3439 while (1) {
3440 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3441 if (unlikely(work_done >= budget))
3442 break;
3443
3444 bnapi->last_status_idx = sblk->status_idx;
3445 /* status idx must be read before checking for more work. */
3446 rmb();
3447 if (likely(!bnx2_has_fast_work(bnapi))) {
3448
3449 napi_complete(napi);
3450 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3451 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3452 bnapi->last_status_idx);
3453 break;
3454 }
3455 }
3456 return work_done;
3457 }
3458
3459 static int bnx2_poll(struct napi_struct *napi, int budget)
3460 {
3461 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3462 struct bnx2 *bp = bnapi->bp;
3463 int work_done = 0;
3464 struct status_block *sblk = bnapi->status_blk.msi;
3465
3466 while (1) {
3467 bnx2_poll_link(bp, bnapi);
3468
3469 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3470
3471 #ifdef BCM_CNIC
3472 bnx2_poll_cnic(bp, bnapi);
3473 #endif
3474
3475 /* bnapi->last_status_idx is used below to tell the hw how
3476 * much work has been processed, so we must read it before
3477 * checking for more work.
3478 */
3479 bnapi->last_status_idx = sblk->status_idx;
3480
3481 if (unlikely(work_done >= budget))
3482 break;
3483
3484 rmb();
3485 if (likely(!bnx2_has_work(bnapi))) {
3486 napi_complete(napi);
3487 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3488 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3489 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3490 bnapi->last_status_idx);
3491 break;
3492 }
3493 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3494 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3495 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3496 bnapi->last_status_idx);
3497
3498 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3499 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3500 bnapi->last_status_idx);
3501 break;
3502 }
3503 }
3504
3505 return work_done;
3506 }
3507
3508 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3509 * from set_multicast.
3510 */
3511 static void
3512 bnx2_set_rx_mode(struct net_device *dev)
3513 {
3514 struct bnx2 *bp = netdev_priv(dev);
3515 u32 rx_mode, sort_mode;
3516 struct netdev_hw_addr *ha;
3517 int i;
3518
3519 if (!netif_running(dev))
3520 return;
3521
3522 spin_lock_bh(&bp->phy_lock);
3523
3524 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3525 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3526 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3527 #ifdef BCM_VLAN
3528 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3529 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3530 #else
3531 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3532 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3533 #endif
3534 if (dev->flags & IFF_PROMISC) {
3535 /* Promiscuous mode. */
3536 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3537 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3538 BNX2_RPM_SORT_USER0_PROM_VLAN;
3539 }
3540 else if (dev->flags & IFF_ALLMULTI) {
3541 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3542 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3543 0xffffffff);
3544 }
3545 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3546 }
3547 else {
3548 /* Accept one or more multicast(s). */
3549 struct dev_mc_list *mclist;
3550 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3551 u32 regidx;
3552 u32 bit;
3553 u32 crc;
3554
3555 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3556
3557 netdev_for_each_mc_addr(mclist, dev) {
3558 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3559 bit = crc & 0xff;
3560 regidx = (bit & 0xe0) >> 5;
3561 bit &= 0x1f;
3562 mc_filter[regidx] |= (1 << bit);
3563 }
3564
3565 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567 mc_filter[i]);
3568 }
3569
3570 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3571 }
3572
3573 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3574 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3575 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3576 BNX2_RPM_SORT_USER0_PROM_VLAN;
3577 } else if (!(dev->flags & IFF_PROMISC)) {
3578 /* Add all entries into to the match filter list */
3579 i = 0;
3580 netdev_for_each_uc_addr(ha, dev) {
3581 bnx2_set_mac_addr(bp, ha->addr,
3582 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3583 sort_mode |= (1 <<
3584 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3585 i++;
3586 }
3587
3588 }
3589
3590 if (rx_mode != bp->rx_mode) {
3591 bp->rx_mode = rx_mode;
3592 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3593 }
3594
3595 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3596 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3597 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3598
3599 spin_unlock_bh(&bp->phy_lock);
3600 }
3601
3602 static int __devinit
3603 check_fw_section(const struct firmware *fw,
3604 const struct bnx2_fw_file_section *section,
3605 u32 alignment, bool non_empty)
3606 {
3607 u32 offset = be32_to_cpu(section->offset);
3608 u32 len = be32_to_cpu(section->len);
3609
3610 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3611 return -EINVAL;
3612 if ((non_empty && len == 0) || len > fw->size - offset ||
3613 len & (alignment - 1))
3614 return -EINVAL;
3615 return 0;
3616 }
3617
3618 static int __devinit
3619 check_mips_fw_entry(const struct firmware *fw,
3620 const struct bnx2_mips_fw_file_entry *entry)
3621 {
3622 if (check_fw_section(fw, &entry->text, 4, true) ||
3623 check_fw_section(fw, &entry->data, 4, false) ||
3624 check_fw_section(fw, &entry->rodata, 4, false))
3625 return -EINVAL;
3626 return 0;
3627 }
3628
3629 static int __devinit
3630 bnx2_request_firmware(struct bnx2 *bp)
3631 {
3632 const char *mips_fw_file, *rv2p_fw_file;
3633 const struct bnx2_mips_fw_file *mips_fw;
3634 const struct bnx2_rv2p_fw_file *rv2p_fw;
3635 int rc;
3636
3637 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3638 mips_fw_file = FW_MIPS_FILE_09;
3639 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3640 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3641 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3642 else
3643 rv2p_fw_file = FW_RV2P_FILE_09;
3644 } else {
3645 mips_fw_file = FW_MIPS_FILE_06;
3646 rv2p_fw_file = FW_RV2P_FILE_06;
3647 }
3648
3649 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3650 if (rc) {
3651 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3652 return rc;
3653 }
3654
3655 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3656 if (rc) {
3657 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3658 return rc;
3659 }
3660 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3661 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3662 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3663 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3664 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3665 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3666 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3667 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3668 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3669 return -EINVAL;
3670 }
3671 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3672 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3673 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3674 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3675 return -EINVAL;
3676 }
3677
3678 return 0;
3679 }
3680
3681 static u32
3682 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3683 {
3684 switch (idx) {
3685 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3686 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3687 rv2p_code |= RV2P_BD_PAGE_SIZE;
3688 break;
3689 }
3690 return rv2p_code;
3691 }
3692
3693 static int
3694 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3695 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3696 {
3697 u32 rv2p_code_len, file_offset;
3698 __be32 *rv2p_code;
3699 int i;
3700 u32 val, cmd, addr;
3701
3702 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3703 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3704
3705 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3706
3707 if (rv2p_proc == RV2P_PROC1) {
3708 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3709 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3710 } else {
3711 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3712 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3713 }
3714
3715 for (i = 0; i < rv2p_code_len; i += 8) {
3716 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3717 rv2p_code++;
3718 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3719 rv2p_code++;
3720
3721 val = (i / 8) | cmd;
3722 REG_WR(bp, addr, val);
3723 }
3724
3725 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3726 for (i = 0; i < 8; i++) {
3727 u32 loc, code;
3728
3729 loc = be32_to_cpu(fw_entry->fixup[i]);
3730 if (loc && ((loc * 4) < rv2p_code_len)) {
3731 code = be32_to_cpu(*(rv2p_code + loc - 1));
3732 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3733 code = be32_to_cpu(*(rv2p_code + loc));
3734 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3735 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3736
3737 val = (loc / 2) | cmd;
3738 REG_WR(bp, addr, val);
3739 }
3740 }
3741
3742 /* Reset the processor, un-stall is done later. */
3743 if (rv2p_proc == RV2P_PROC1) {
3744 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3745 }
3746 else {
3747 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3748 }
3749
3750 return 0;
3751 }
3752
3753 static int
3754 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3755 const struct bnx2_mips_fw_file_entry *fw_entry)
3756 {
3757 u32 addr, len, file_offset;
3758 __be32 *data;
3759 u32 offset;
3760 u32 val;
3761
3762 /* Halt the CPU. */
3763 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3764 val |= cpu_reg->mode_value_halt;
3765 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3766 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3767
3768 /* Load the Text area. */
3769 addr = be32_to_cpu(fw_entry->text.addr);
3770 len = be32_to_cpu(fw_entry->text.len);
3771 file_offset = be32_to_cpu(fw_entry->text.offset);
3772 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3773
3774 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3775 if (len) {
3776 int j;
3777
3778 for (j = 0; j < (len / 4); j++, offset += 4)
3779 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3780 }
3781
3782 /* Load the Data area. */
3783 addr = be32_to_cpu(fw_entry->data.addr);
3784 len = be32_to_cpu(fw_entry->data.len);
3785 file_offset = be32_to_cpu(fw_entry->data.offset);
3786 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3787
3788 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3789 if (len) {
3790 int j;
3791
3792 for (j = 0; j < (len / 4); j++, offset += 4)
3793 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3794 }
3795
3796 /* Load the Read-Only area. */
3797 addr = be32_to_cpu(fw_entry->rodata.addr);
3798 len = be32_to_cpu(fw_entry->rodata.len);
3799 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3800 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3801
3802 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3803 if (len) {
3804 int j;
3805
3806 for (j = 0; j < (len / 4); j++, offset += 4)
3807 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3808 }
3809
3810 /* Clear the pre-fetch instruction. */
3811 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3812
3813 val = be32_to_cpu(fw_entry->start_addr);
3814 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3815
3816 /* Start the CPU. */
3817 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3818 val &= ~cpu_reg->mode_value_halt;
3819 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3820 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3821
3822 return 0;
3823 }
3824
3825 static int
3826 bnx2_init_cpus(struct bnx2 *bp)
3827 {
3828 const struct bnx2_mips_fw_file *mips_fw =
3829 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3830 const struct bnx2_rv2p_fw_file *rv2p_fw =
3831 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3832 int rc;
3833
3834 /* Initialize the RV2P processor. */
3835 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3836 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3837
3838 /* Initialize the RX Processor. */
3839 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3840 if (rc)
3841 goto init_cpu_err;
3842
3843 /* Initialize the TX Processor. */
3844 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3845 if (rc)
3846 goto init_cpu_err;
3847
3848 /* Initialize the TX Patch-up Processor. */
3849 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3850 if (rc)
3851 goto init_cpu_err;
3852
3853 /* Initialize the Completion Processor. */
3854 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3855 if (rc)
3856 goto init_cpu_err;
3857
3858 /* Initialize the Command Processor. */
3859 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3860
3861 init_cpu_err:
3862 return rc;
3863 }
3864
3865 static int
3866 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3867 {
3868 u16 pmcsr;
3869
3870 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3871
3872 switch (state) {
3873 case PCI_D0: {
3874 u32 val;
3875
3876 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3877 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3878 PCI_PM_CTRL_PME_STATUS);
3879
3880 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3881 /* delay required during transition out of D3hot */
3882 msleep(20);
3883
3884 val = REG_RD(bp, BNX2_EMAC_MODE);
3885 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3886 val &= ~BNX2_EMAC_MODE_MPKT;
3887 REG_WR(bp, BNX2_EMAC_MODE, val);
3888
3889 val = REG_RD(bp, BNX2_RPM_CONFIG);
3890 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3891 REG_WR(bp, BNX2_RPM_CONFIG, val);
3892 break;
3893 }
3894 case PCI_D3hot: {
3895 int i;
3896 u32 val, wol_msg;
3897
3898 if (bp->wol) {
3899 u32 advertising;
3900 u8 autoneg;
3901
3902 autoneg = bp->autoneg;
3903 advertising = bp->advertising;
3904
3905 if (bp->phy_port == PORT_TP) {
3906 bp->autoneg = AUTONEG_SPEED;
3907 bp->advertising = ADVERTISED_10baseT_Half |
3908 ADVERTISED_10baseT_Full |
3909 ADVERTISED_100baseT_Half |
3910 ADVERTISED_100baseT_Full |
3911 ADVERTISED_Autoneg;
3912 }
3913
3914 spin_lock_bh(&bp->phy_lock);
3915 bnx2_setup_phy(bp, bp->phy_port);
3916 spin_unlock_bh(&bp->phy_lock);
3917
3918 bp->autoneg = autoneg;
3919 bp->advertising = advertising;
3920
3921 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3922
3923 val = REG_RD(bp, BNX2_EMAC_MODE);
3924
3925 /* Enable port mode. */
3926 val &= ~BNX2_EMAC_MODE_PORT;
3927 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3928 BNX2_EMAC_MODE_ACPI_RCVD |
3929 BNX2_EMAC_MODE_MPKT;
3930 if (bp->phy_port == PORT_TP)
3931 val |= BNX2_EMAC_MODE_PORT_MII;
3932 else {
3933 val |= BNX2_EMAC_MODE_PORT_GMII;
3934 if (bp->line_speed == SPEED_2500)
3935 val |= BNX2_EMAC_MODE_25G_MODE;
3936 }
3937
3938 REG_WR(bp, BNX2_EMAC_MODE, val);
3939
3940 /* receive all multicast */
3941 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3942 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3943 0xffffffff);
3944 }
3945 REG_WR(bp, BNX2_EMAC_RX_MODE,
3946 BNX2_EMAC_RX_MODE_SORT_MODE);
3947
3948 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3949 BNX2_RPM_SORT_USER0_MC_EN;
3950 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3951 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3952 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3953 BNX2_RPM_SORT_USER0_ENA);
3954
3955 /* Need to enable EMAC and RPM for WOL. */
3956 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3957 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3958 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3959 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3960
3961 val = REG_RD(bp, BNX2_RPM_CONFIG);
3962 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3963 REG_WR(bp, BNX2_RPM_CONFIG, val);
3964
3965 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3966 }
3967 else {
3968 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3969 }
3970
3971 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3972 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3973 1, 0);
3974
3975 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3976 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3977 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3978
3979 if (bp->wol)
3980 pmcsr |= 3;
3981 }
3982 else {
3983 pmcsr |= 3;
3984 }
3985 if (bp->wol) {
3986 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3987 }
3988 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3989 pmcsr);
3990
3991 /* No more memory access after this point until
3992 * device is brought back to D0.
3993 */
3994 udelay(50);
3995 break;
3996 }
3997 default:
3998 return -EINVAL;
3999 }
4000 return 0;
4001 }
4002
4003 static int
4004 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4005 {
4006 u32 val;
4007 int j;
4008
4009 /* Request access to the flash interface. */
4010 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4011 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4012 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4013 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4014 break;
4015
4016 udelay(5);
4017 }
4018
4019 if (j >= NVRAM_TIMEOUT_COUNT)
4020 return -EBUSY;
4021
4022 return 0;
4023 }
4024
4025 static int
4026 bnx2_release_nvram_lock(struct bnx2 *bp)
4027 {
4028 int j;
4029 u32 val;
4030
4031 /* Relinquish nvram interface. */
4032 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4033
4034 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4035 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4036 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4037 break;
4038
4039 udelay(5);
4040 }
4041
4042 if (j >= NVRAM_TIMEOUT_COUNT)
4043 return -EBUSY;
4044
4045 return 0;
4046 }
4047
4048
4049 static int
4050 bnx2_enable_nvram_write(struct bnx2 *bp)
4051 {
4052 u32 val;
4053
4054 val = REG_RD(bp, BNX2_MISC_CFG);
4055 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4056
4057 if (bp->flash_info->flags & BNX2_NV_WREN) {
4058 int j;
4059
4060 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4061 REG_WR(bp, BNX2_NVM_COMMAND,
4062 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4063
4064 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4065 udelay(5);
4066
4067 val = REG_RD(bp, BNX2_NVM_COMMAND);
4068 if (val & BNX2_NVM_COMMAND_DONE)
4069 break;
4070 }
4071
4072 if (j >= NVRAM_TIMEOUT_COUNT)
4073 return -EBUSY;
4074 }
4075 return 0;
4076 }
4077
4078 static void
4079 bnx2_disable_nvram_write(struct bnx2 *bp)
4080 {
4081 u32 val;
4082
4083 val = REG_RD(bp, BNX2_MISC_CFG);
4084 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4085 }
4086
4087
4088 static void
4089 bnx2_enable_nvram_access(struct bnx2 *bp)
4090 {
4091 u32 val;
4092
4093 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4094 /* Enable both bits, even on read. */
4095 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4096 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4097 }
4098
4099 static void
4100 bnx2_disable_nvram_access(struct bnx2 *bp)
4101 {
4102 u32 val;
4103
4104 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4105 /* Disable both bits, even after read. */
4106 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4107 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4108 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4109 }
4110
4111 static int
4112 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4113 {
4114 u32 cmd;
4115 int j;
4116
4117 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4118 /* Buffered flash, no erase needed */
4119 return 0;
4120
4121 /* Build an erase command */
4122 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4123 BNX2_NVM_COMMAND_DOIT;
4124
4125 /* Need to clear DONE bit separately. */
4126 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4127
4128 /* Address of the NVRAM to read from. */
4129 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4130
4131 /* Issue an erase command. */
4132 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4133
4134 /* Wait for completion. */
4135 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4136 u32 val;
4137
4138 udelay(5);
4139
4140 val = REG_RD(bp, BNX2_NVM_COMMAND);
4141 if (val & BNX2_NVM_COMMAND_DONE)
4142 break;
4143 }
4144
4145 if (j >= NVRAM_TIMEOUT_COUNT)
4146 return -EBUSY;
4147
4148 return 0;
4149 }
4150
4151 static int
4152 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4153 {
4154 u32 cmd;
4155 int j;
4156
4157 /* Build the command word. */
4158 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4159
4160 /* Calculate an offset of a buffered flash, not needed for 5709. */
4161 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4162 offset = ((offset / bp->flash_info->page_size) <<
4163 bp->flash_info->page_bits) +
4164 (offset % bp->flash_info->page_size);
4165 }
4166
4167 /* Need to clear DONE bit separately. */
4168 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4169
4170 /* Address of the NVRAM to read from. */
4171 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4172
4173 /* Issue a read command. */
4174 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4175
4176 /* Wait for completion. */
4177 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4178 u32 val;
4179
4180 udelay(5);
4181
4182 val = REG_RD(bp, BNX2_NVM_COMMAND);
4183 if (val & BNX2_NVM_COMMAND_DONE) {
4184 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4185 memcpy(ret_val, &v, 4);
4186 break;
4187 }
4188 }
4189 if (j >= NVRAM_TIMEOUT_COUNT)
4190 return -EBUSY;
4191
4192 return 0;
4193 }
4194
4195
4196 static int
4197 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4198 {
4199 u32 cmd;
4200 __be32 val32;
4201 int j;
4202
4203 /* Build the command word. */
4204 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4205
4206 /* Calculate an offset of a buffered flash, not needed for 5709. */
4207 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4208 offset = ((offset / bp->flash_info->page_size) <<
4209 bp->flash_info->page_bits) +
4210 (offset % bp->flash_info->page_size);
4211 }
4212
4213 /* Need to clear DONE bit separately. */
4214 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4215
4216 memcpy(&val32, val, 4);
4217
4218 /* Write the data. */
4219 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4220
4221 /* Address of the NVRAM to write to. */
4222 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4223
4224 /* Issue the write command. */
4225 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4226
4227 /* Wait for completion. */
4228 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4229 udelay(5);
4230
4231 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4232 break;
4233 }
4234 if (j >= NVRAM_TIMEOUT_COUNT)
4235 return -EBUSY;
4236
4237 return 0;
4238 }
4239
4240 static int
4241 bnx2_init_nvram(struct bnx2 *bp)
4242 {
4243 u32 val;
4244 int j, entry_count, rc = 0;
4245 const struct flash_spec *flash;
4246
4247 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4248 bp->flash_info = &flash_5709;
4249 goto get_flash_size;
4250 }
4251
4252 /* Determine the selected interface. */
4253 val = REG_RD(bp, BNX2_NVM_CFG1);
4254
4255 entry_count = ARRAY_SIZE(flash_table);
4256
4257 if (val & 0x40000000) {
4258
4259 /* Flash interface has been reconfigured */
4260 for (j = 0, flash = &flash_table[0]; j < entry_count;
4261 j++, flash++) {
4262 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4263 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4264 bp->flash_info = flash;
4265 break;
4266 }
4267 }
4268 }
4269 else {
4270 u32 mask;
4271 /* Not yet been reconfigured */
4272
4273 if (val & (1 << 23))
4274 mask = FLASH_BACKUP_STRAP_MASK;
4275 else
4276 mask = FLASH_STRAP_MASK;
4277
4278 for (j = 0, flash = &flash_table[0]; j < entry_count;
4279 j++, flash++) {
4280
4281 if ((val & mask) == (flash->strapping & mask)) {
4282 bp->flash_info = flash;
4283
4284 /* Request access to the flash interface. */
4285 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4286 return rc;
4287
4288 /* Enable access to flash interface */
4289 bnx2_enable_nvram_access(bp);
4290
4291 /* Reconfigure the flash interface */
4292 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4293 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4294 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4295 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4296
4297 /* Disable access to flash interface */
4298 bnx2_disable_nvram_access(bp);
4299 bnx2_release_nvram_lock(bp);
4300
4301 break;
4302 }
4303 }
4304 } /* if (val & 0x40000000) */
4305
4306 if (j == entry_count) {
4307 bp->flash_info = NULL;
4308 pr_alert("Unknown flash/EEPROM type\n");
4309 return -ENODEV;
4310 }
4311
4312 get_flash_size:
4313 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4314 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4315 if (val)
4316 bp->flash_size = val;
4317 else
4318 bp->flash_size = bp->flash_info->total_size;
4319
4320 return rc;
4321 }
4322
4323 static int
4324 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4325 int buf_size)
4326 {
4327 int rc = 0;
4328 u32 cmd_flags, offset32, len32, extra;
4329
4330 if (buf_size == 0)
4331 return 0;
4332
4333 /* Request access to the flash interface. */
4334 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4335 return rc;
4336
4337 /* Enable access to flash interface */
4338 bnx2_enable_nvram_access(bp);
4339
4340 len32 = buf_size;
4341 offset32 = offset;
4342 extra = 0;
4343
4344 cmd_flags = 0;
4345
4346 if (offset32 & 3) {
4347 u8 buf[4];
4348 u32 pre_len;
4349
4350 offset32 &= ~3;
4351 pre_len = 4 - (offset & 3);
4352
4353 if (pre_len >= len32) {
4354 pre_len = len32;
4355 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4356 BNX2_NVM_COMMAND_LAST;
4357 }
4358 else {
4359 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4360 }
4361
4362 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4363
4364 if (rc)
4365 return rc;
4366
4367 memcpy(ret_buf, buf + (offset & 3), pre_len);
4368
4369 offset32 += 4;
4370 ret_buf += pre_len;
4371 len32 -= pre_len;
4372 }
4373 if (len32 & 3) {
4374 extra = 4 - (len32 & 3);
4375 len32 = (len32 + 4) & ~3;
4376 }
4377
4378 if (len32 == 4) {
4379 u8 buf[4];
4380
4381 if (cmd_flags)
4382 cmd_flags = BNX2_NVM_COMMAND_LAST;
4383 else
4384 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4385 BNX2_NVM_COMMAND_LAST;
4386
4387 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4388
4389 memcpy(ret_buf, buf, 4 - extra);
4390 }
4391 else if (len32 > 0) {
4392 u8 buf[4];
4393
4394 /* Read the first word. */
4395 if (cmd_flags)
4396 cmd_flags = 0;
4397 else
4398 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4399
4400 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4401
4402 /* Advance to the next dword. */
4403 offset32 += 4;
4404 ret_buf += 4;
4405 len32 -= 4;
4406
4407 while (len32 > 4 && rc == 0) {
4408 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4409
4410 /* Advance to the next dword. */
4411 offset32 += 4;
4412 ret_buf += 4;
4413 len32 -= 4;
4414 }
4415
4416 if (rc)
4417 return rc;
4418
4419 cmd_flags = BNX2_NVM_COMMAND_LAST;
4420 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4421
4422 memcpy(ret_buf, buf, 4 - extra);
4423 }
4424
4425 /* Disable access to flash interface */
4426 bnx2_disable_nvram_access(bp);
4427
4428 bnx2_release_nvram_lock(bp);
4429
4430 return rc;
4431 }
4432
4433 static int
4434 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4435 int buf_size)
4436 {
4437 u32 written, offset32, len32;
4438 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4439 int rc = 0;
4440 int align_start, align_end;
4441
4442 buf = data_buf;
4443 offset32 = offset;
4444 len32 = buf_size;
4445 align_start = align_end = 0;
4446
4447 if ((align_start = (offset32 & 3))) {
4448 offset32 &= ~3;
4449 len32 += align_start;
4450 if (len32 < 4)
4451 len32 = 4;
4452 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4453 return rc;
4454 }
4455
4456 if (len32 & 3) {
4457 align_end = 4 - (len32 & 3);
4458 len32 += align_end;
4459 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4460 return rc;
4461 }
4462
4463 if (align_start || align_end) {
4464 align_buf = kmalloc(len32, GFP_KERNEL);
4465 if (align_buf == NULL)
4466 return -ENOMEM;
4467 if (align_start) {
4468 memcpy(align_buf, start, 4);
4469 }
4470 if (align_end) {
4471 memcpy(align_buf + len32 - 4, end, 4);
4472 }
4473 memcpy(align_buf + align_start, data_buf, buf_size);
4474 buf = align_buf;
4475 }
4476
4477 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4478 flash_buffer = kmalloc(264, GFP_KERNEL);
4479 if (flash_buffer == NULL) {
4480 rc = -ENOMEM;
4481 goto nvram_write_end;
4482 }
4483 }
4484
4485 written = 0;
4486 while ((written < len32) && (rc == 0)) {
4487 u32 page_start, page_end, data_start, data_end;
4488 u32 addr, cmd_flags;
4489 int i;
4490
4491 /* Find the page_start addr */
4492 page_start = offset32 + written;
4493 page_start -= (page_start % bp->flash_info->page_size);
4494 /* Find the page_end addr */
4495 page_end = page_start + bp->flash_info->page_size;
4496 /* Find the data_start addr */
4497 data_start = (written == 0) ? offset32 : page_start;
4498 /* Find the data_end addr */
4499 data_end = (page_end > offset32 + len32) ?
4500 (offset32 + len32) : page_end;
4501
4502 /* Request access to the flash interface. */
4503 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4504 goto nvram_write_end;
4505
4506 /* Enable access to flash interface */
4507 bnx2_enable_nvram_access(bp);
4508
4509 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4510 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4511 int j;
4512
4513 /* Read the whole page into the buffer
4514 * (non-buffer flash only) */
4515 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4516 if (j == (bp->flash_info->page_size - 4)) {
4517 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4518 }
4519 rc = bnx2_nvram_read_dword(bp,
4520 page_start + j,
4521 &flash_buffer[j],
4522 cmd_flags);
4523
4524 if (rc)
4525 goto nvram_write_end;
4526
4527 cmd_flags = 0;
4528 }
4529 }
4530
4531 /* Enable writes to flash interface (unlock write-protect) */
4532 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4533 goto nvram_write_end;
4534
4535 /* Loop to write back the buffer data from page_start to
4536 * data_start */
4537 i = 0;
4538 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4539 /* Erase the page */
4540 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4541 goto nvram_write_end;
4542
4543 /* Re-enable the write again for the actual write */
4544 bnx2_enable_nvram_write(bp);
4545
4546 for (addr = page_start; addr < data_start;
4547 addr += 4, i += 4) {
4548
4549 rc = bnx2_nvram_write_dword(bp, addr,
4550 &flash_buffer[i], cmd_flags);
4551
4552 if (rc != 0)
4553 goto nvram_write_end;
4554
4555 cmd_flags = 0;
4556 }
4557 }
4558
4559 /* Loop to write the new data from data_start to data_end */
4560 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4561 if ((addr == page_end - 4) ||
4562 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4563 (addr == data_end - 4))) {
4564
4565 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4566 }
4567 rc = bnx2_nvram_write_dword(bp, addr, buf,
4568 cmd_flags);
4569
4570 if (rc != 0)
4571 goto nvram_write_end;
4572
4573 cmd_flags = 0;
4574 buf += 4;
4575 }
4576
4577 /* Loop to write back the buffer data from data_end
4578 * to page_end */
4579 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4580 for (addr = data_end; addr < page_end;
4581 addr += 4, i += 4) {
4582
4583 if (addr == page_end-4) {
4584 cmd_flags = BNX2_NVM_COMMAND_LAST;
4585 }
4586 rc = bnx2_nvram_write_dword(bp, addr,
4587 &flash_buffer[i], cmd_flags);
4588
4589 if (rc != 0)
4590 goto nvram_write_end;
4591
4592 cmd_flags = 0;
4593 }
4594 }
4595
4596 /* Disable writes to flash interface (lock write-protect) */
4597 bnx2_disable_nvram_write(bp);
4598
4599 /* Disable access to flash interface */
4600 bnx2_disable_nvram_access(bp);
4601 bnx2_release_nvram_lock(bp);
4602
4603 /* Increment written */
4604 written += data_end - data_start;
4605 }
4606
4607 nvram_write_end:
4608 kfree(flash_buffer);
4609 kfree(align_buf);
4610 return rc;
4611 }
4612
4613 static void
4614 bnx2_init_fw_cap(struct bnx2 *bp)
4615 {
4616 u32 val, sig = 0;
4617
4618 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4619 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4620
4621 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4622 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4623
4624 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4625 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4626 return;
4627
4628 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4629 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4630 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4631 }
4632
4633 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4634 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4635 u32 link;
4636
4637 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4638
4639 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4640 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4641 bp->phy_port = PORT_FIBRE;
4642 else
4643 bp->phy_port = PORT_TP;
4644
4645 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4646 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4647 }
4648
4649 if (netif_running(bp->dev) && sig)
4650 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4651 }
4652
4653 static void
4654 bnx2_setup_msix_tbl(struct bnx2 *bp)
4655 {
4656 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4657
4658 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4659 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4660 }
4661
4662 static int
4663 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4664 {
4665 u32 val;
4666 int i, rc = 0;
4667 u8 old_port;
4668
4669 /* Wait for the current PCI transaction to complete before
4670 * issuing a reset. */
4671 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4672 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4673 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4674 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4675 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4676 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4677 udelay(5);
4678
4679 /* Wait for the firmware to tell us it is ok to issue a reset. */
4680 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4681
4682 /* Deposit a driver reset signature so the firmware knows that
4683 * this is a soft reset. */
4684 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4685 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4686
4687 /* Do a dummy read to force the chip to complete all current transaction
4688 * before we issue a reset. */
4689 val = REG_RD(bp, BNX2_MISC_ID);
4690
4691 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4692 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4693 REG_RD(bp, BNX2_MISC_COMMAND);
4694 udelay(5);
4695
4696 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4697 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4698
4699 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4700
4701 } else {
4702 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4703 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4704 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4705
4706 /* Chip reset. */
4707 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4708
4709 /* Reading back any register after chip reset will hang the
4710 * bus on 5706 A0 and A1. The msleep below provides plenty
4711 * of margin for write posting.
4712 */
4713 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4714 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4715 msleep(20);
4716
4717 /* Reset takes approximate 30 usec */
4718 for (i = 0; i < 10; i++) {
4719 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4720 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4721 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4722 break;
4723 udelay(10);
4724 }
4725
4726 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4727 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4728 pr_err("Chip reset did not complete\n");
4729 return -EBUSY;
4730 }
4731 }
4732
4733 /* Make sure byte swapping is properly configured. */
4734 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4735 if (val != 0x01020304) {
4736 pr_err("Chip not in correct endian mode\n");
4737 return -ENODEV;
4738 }
4739
4740 /* Wait for the firmware to finish its initialization. */
4741 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4742 if (rc)
4743 return rc;
4744
4745 spin_lock_bh(&bp->phy_lock);
4746 old_port = bp->phy_port;
4747 bnx2_init_fw_cap(bp);
4748 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4749 old_port != bp->phy_port)
4750 bnx2_set_default_remote_link(bp);
4751 spin_unlock_bh(&bp->phy_lock);
4752
4753 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4754 /* Adjust the voltage regular to two steps lower. The default
4755 * of this register is 0x0000000e. */
4756 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4757
4758 /* Remove bad rbuf memory from the free pool. */
4759 rc = bnx2_alloc_bad_rbuf(bp);
4760 }
4761
4762 if (bp->flags & BNX2_FLAG_USING_MSIX)
4763 bnx2_setup_msix_tbl(bp);
4764
4765 return rc;
4766 }
4767
4768 static int
4769 bnx2_init_chip(struct bnx2 *bp)
4770 {
4771 u32 val, mtu;
4772 int rc, i;
4773
4774 /* Make sure the interrupt is not active. */
4775 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4776
4777 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4778 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4779 #ifdef __BIG_ENDIAN
4780 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4781 #endif
4782 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4783 DMA_READ_CHANS << 12 |
4784 DMA_WRITE_CHANS << 16;
4785
4786 val |= (0x2 << 20) | (1 << 11);
4787
4788 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4789 val |= (1 << 23);
4790
4791 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4792 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4793 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4794
4795 REG_WR(bp, BNX2_DMA_CONFIG, val);
4796
4797 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4798 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4799 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4800 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4801 }
4802
4803 if (bp->flags & BNX2_FLAG_PCIX) {
4804 u16 val16;
4805
4806 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4807 &val16);
4808 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4809 val16 & ~PCI_X_CMD_ERO);
4810 }
4811
4812 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4813 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4814 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4815 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4816
4817 /* Initialize context mapping and zero out the quick contexts. The
4818 * context block must have already been enabled. */
4819 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4820 rc = bnx2_init_5709_context(bp);
4821 if (rc)
4822 return rc;
4823 } else
4824 bnx2_init_context(bp);
4825
4826 if ((rc = bnx2_init_cpus(bp)) != 0)
4827 return rc;
4828
4829 bnx2_init_nvram(bp);
4830
4831 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4832
4833 val = REG_RD(bp, BNX2_MQ_CONFIG);
4834 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4835 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4836 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4837 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4838 if (CHIP_REV(bp) == CHIP_REV_Ax)
4839 val |= BNX2_MQ_CONFIG_HALT_DIS;
4840 }
4841
4842 REG_WR(bp, BNX2_MQ_CONFIG, val);
4843
4844 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4845 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4846 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4847
4848 val = (BCM_PAGE_BITS - 8) << 24;
4849 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4850
4851 /* Configure page size. */
4852 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4853 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4854 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4855 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4856
4857 val = bp->mac_addr[0] +
4858 (bp->mac_addr[1] << 8) +
4859 (bp->mac_addr[2] << 16) +
4860 bp->mac_addr[3] +
4861 (bp->mac_addr[4] << 8) +
4862 (bp->mac_addr[5] << 16);
4863 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4864
4865 /* Program the MTU. Also include 4 bytes for CRC32. */
4866 mtu = bp->dev->mtu;
4867 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4868 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4869 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4870 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4871
4872 if (mtu < 1500)
4873 mtu = 1500;
4874
4875 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4876 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4877 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4878
4879 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4880 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4881 bp->bnx2_napi[i].last_status_idx = 0;
4882
4883 bp->idle_chk_status_idx = 0xffff;
4884
4885 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4886
4887 /* Set up how to generate a link change interrupt. */
4888 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4889
4890 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4891 (u64) bp->status_blk_mapping & 0xffffffff);
4892 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4893
4894 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4895 (u64) bp->stats_blk_mapping & 0xffffffff);
4896 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4897 (u64) bp->stats_blk_mapping >> 32);
4898
4899 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4900 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4901
4902 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4903 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4904
4905 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4906 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4907
4908 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4909
4910 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4911
4912 REG_WR(bp, BNX2_HC_COM_TICKS,
4913 (bp->com_ticks_int << 16) | bp->com_ticks);
4914
4915 REG_WR(bp, BNX2_HC_CMD_TICKS,
4916 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4917
4918 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4919 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4920 else
4921 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4922 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4923
4924 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4925 val = BNX2_HC_CONFIG_COLLECT_STATS;
4926 else {
4927 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4928 BNX2_HC_CONFIG_COLLECT_STATS;
4929 }
4930
4931 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4932 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4933 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4934
4935 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4936 }
4937
4938 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4939 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4940
4941 REG_WR(bp, BNX2_HC_CONFIG, val);
4942
4943 for (i = 1; i < bp->irq_nvecs; i++) {
4944 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4945 BNX2_HC_SB_CONFIG_1;
4946
4947 REG_WR(bp, base,
4948 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4949 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4950 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4951
4952 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4953 (bp->tx_quick_cons_trip_int << 16) |
4954 bp->tx_quick_cons_trip);
4955
4956 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4957 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4958
4959 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4960 (bp->rx_quick_cons_trip_int << 16) |
4961 bp->rx_quick_cons_trip);
4962
4963 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4964 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4965 }
4966
4967 /* Clear internal stats counters. */
4968 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4969
4970 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4971
4972 /* Initialize the receive filter. */
4973 bnx2_set_rx_mode(bp->dev);
4974
4975 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4976 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4977 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4978 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4979 }
4980 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4981 1, 0);
4982
4983 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4984 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4985
4986 udelay(20);
4987
4988 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4989
4990 return rc;
4991 }
4992
4993 static void
4994 bnx2_clear_ring_states(struct bnx2 *bp)
4995 {
4996 struct bnx2_napi *bnapi;
4997 struct bnx2_tx_ring_info *txr;
4998 struct bnx2_rx_ring_info *rxr;
4999 int i;
5000
5001 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5002 bnapi = &bp->bnx2_napi[i];
5003 txr = &bnapi->tx_ring;
5004 rxr = &bnapi->rx_ring;
5005
5006 txr->tx_cons = 0;
5007 txr->hw_tx_cons = 0;
5008 rxr->rx_prod_bseq = 0;
5009 rxr->rx_prod = 0;
5010 rxr->rx_cons = 0;
5011 rxr->rx_pg_prod = 0;
5012 rxr->rx_pg_cons = 0;
5013 }
5014 }
5015
5016 static void
5017 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5018 {
5019 u32 val, offset0, offset1, offset2, offset3;
5020 u32 cid_addr = GET_CID_ADDR(cid);
5021
5022 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5023 offset0 = BNX2_L2CTX_TYPE_XI;
5024 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5025 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5026 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5027 } else {
5028 offset0 = BNX2_L2CTX_TYPE;
5029 offset1 = BNX2_L2CTX_CMD_TYPE;
5030 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5031 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5032 }
5033 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5034 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5035
5036 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5037 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5038
5039 val = (u64) txr->tx_desc_mapping >> 32;
5040 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5041
5042 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5043 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5044 }
5045
5046 static void
5047 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5048 {
5049 struct tx_bd *txbd;
5050 u32 cid = TX_CID;
5051 struct bnx2_napi *bnapi;
5052 struct bnx2_tx_ring_info *txr;
5053
5054 bnapi = &bp->bnx2_napi[ring_num];
5055 txr = &bnapi->tx_ring;
5056
5057 if (ring_num == 0)
5058 cid = TX_CID;
5059 else
5060 cid = TX_TSS_CID + ring_num - 1;
5061
5062 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5063
5064 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5065
5066 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5067 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5068
5069 txr->tx_prod = 0;
5070 txr->tx_prod_bseq = 0;
5071
5072 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5073 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5074
5075 bnx2_init_tx_context(bp, cid, txr);
5076 }
5077
5078 static void
5079 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5080 int num_rings)
5081 {
5082 int i;
5083 struct rx_bd *rxbd;
5084
5085 for (i = 0; i < num_rings; i++) {
5086 int j;
5087
5088 rxbd = &rx_ring[i][0];
5089 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5090 rxbd->rx_bd_len = buf_size;
5091 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5092 }
5093 if (i == (num_rings - 1))
5094 j = 0;
5095 else
5096 j = i + 1;
5097 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5098 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5099 }
5100 }
5101
5102 static void
5103 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5104 {
5105 int i;
5106 u16 prod, ring_prod;
5107 u32 cid, rx_cid_addr, val;
5108 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5109 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5110
5111 if (ring_num == 0)
5112 cid = RX_CID;
5113 else
5114 cid = RX_RSS_CID + ring_num - 1;
5115
5116 rx_cid_addr = GET_CID_ADDR(cid);
5117
5118 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5119 bp->rx_buf_use_size, bp->rx_max_ring);
5120
5121 bnx2_init_rx_context(bp, cid);
5122
5123 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5124 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5125 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5126 }
5127
5128 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5129 if (bp->rx_pg_ring_size) {
5130 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5131 rxr->rx_pg_desc_mapping,
5132 PAGE_SIZE, bp->rx_max_pg_ring);
5133 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5134 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5135 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5136 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5137
5138 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5139 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5140
5141 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5142 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5143
5144 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5145 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5146 }
5147
5148 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5149 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5150
5151 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5152 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5153
5154 ring_prod = prod = rxr->rx_pg_prod;
5155 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5156 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5157 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5158 ring_num, i, bp->rx_pg_ring_size);
5159 break;
5160 }
5161 prod = NEXT_RX_BD(prod);
5162 ring_prod = RX_PG_RING_IDX(prod);
5163 }
5164 rxr->rx_pg_prod = prod;
5165
5166 ring_prod = prod = rxr->rx_prod;
5167 for (i = 0; i < bp->rx_ring_size; i++) {
5168 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5169 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5170 ring_num, i, bp->rx_ring_size);
5171 break;
5172 }
5173 prod = NEXT_RX_BD(prod);
5174 ring_prod = RX_RING_IDX(prod);
5175 }
5176 rxr->rx_prod = prod;
5177
5178 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5179 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5180 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5181
5182 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5183 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5184
5185 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5186 }
5187
5188 static void
5189 bnx2_init_all_rings(struct bnx2 *bp)
5190 {
5191 int i;
5192 u32 val;
5193
5194 bnx2_clear_ring_states(bp);
5195
5196 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5197 for (i = 0; i < bp->num_tx_rings; i++)
5198 bnx2_init_tx_ring(bp, i);
5199
5200 if (bp->num_tx_rings > 1)
5201 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5202 (TX_TSS_CID << 7));
5203
5204 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5205 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5206
5207 for (i = 0; i < bp->num_rx_rings; i++)
5208 bnx2_init_rx_ring(bp, i);
5209
5210 if (bp->num_rx_rings > 1) {
5211 u32 tbl_32;
5212 u8 *tbl = (u8 *) &tbl_32;
5213
5214 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5215 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5216
5217 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5218 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5219 if ((i % 4) == 3)
5220 bnx2_reg_wr_ind(bp,
5221 BNX2_RXP_SCRATCH_RSS_TBL + i,
5222 cpu_to_be32(tbl_32));
5223 }
5224
5225 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5226 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5227
5228 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5229
5230 }
5231 }
5232
5233 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5234 {
5235 u32 max, num_rings = 1;
5236
5237 while (ring_size > MAX_RX_DESC_CNT) {
5238 ring_size -= MAX_RX_DESC_CNT;
5239 num_rings++;
5240 }
5241 /* round to next power of 2 */
5242 max = max_size;
5243 while ((max & num_rings) == 0)
5244 max >>= 1;
5245
5246 if (num_rings != max)
5247 max <<= 1;
5248
5249 return max;
5250 }
5251
5252 static void
5253 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5254 {
5255 u32 rx_size, rx_space, jumbo_size;
5256
5257 /* 8 for CRC and VLAN */
5258 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5259
5260 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5261 sizeof(struct skb_shared_info);
5262
5263 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5264 bp->rx_pg_ring_size = 0;
5265 bp->rx_max_pg_ring = 0;
5266 bp->rx_max_pg_ring_idx = 0;
5267 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5268 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5269
5270 jumbo_size = size * pages;
5271 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5272 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5273
5274 bp->rx_pg_ring_size = jumbo_size;
5275 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5276 MAX_RX_PG_RINGS);
5277 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5278 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5279 bp->rx_copy_thresh = 0;
5280 }
5281
5282 bp->rx_buf_use_size = rx_size;
5283 /* hw alignment */
5284 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5285 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5286 bp->rx_ring_size = size;
5287 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5288 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5289 }
5290
5291 static void
5292 bnx2_free_tx_skbs(struct bnx2 *bp)
5293 {
5294 int i;
5295
5296 for (i = 0; i < bp->num_tx_rings; i++) {
5297 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5298 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5299 int j;
5300
5301 if (txr->tx_buf_ring == NULL)
5302 continue;
5303
5304 for (j = 0; j < TX_DESC_CNT; ) {
5305 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5306 struct sk_buff *skb = tx_buf->skb;
5307 int k, last;
5308
5309 if (skb == NULL) {
5310 j++;
5311 continue;
5312 }
5313
5314 pci_unmap_single(bp->pdev,
5315 pci_unmap_addr(tx_buf, mapping),
5316 skb_headlen(skb),
5317 PCI_DMA_TODEVICE);
5318
5319 tx_buf->skb = NULL;
5320
5321 last = tx_buf->nr_frags;
5322 j++;
5323 for (k = 0; k < last; k++, j++) {
5324 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5325 pci_unmap_page(bp->pdev,
5326 pci_unmap_addr(tx_buf, mapping),
5327 skb_shinfo(skb)->frags[k].size,
5328 PCI_DMA_TODEVICE);
5329 }
5330 dev_kfree_skb(skb);
5331 }
5332 }
5333 }
5334
5335 static void
5336 bnx2_free_rx_skbs(struct bnx2 *bp)
5337 {
5338 int i;
5339
5340 for (i = 0; i < bp->num_rx_rings; i++) {
5341 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5342 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5343 int j;
5344
5345 if (rxr->rx_buf_ring == NULL)
5346 return;
5347
5348 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5349 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5350 struct sk_buff *skb = rx_buf->skb;
5351
5352 if (skb == NULL)
5353 continue;
5354
5355 pci_unmap_single(bp->pdev,
5356 pci_unmap_addr(rx_buf, mapping),
5357 bp->rx_buf_use_size,
5358 PCI_DMA_FROMDEVICE);
5359
5360 rx_buf->skb = NULL;
5361
5362 dev_kfree_skb(skb);
5363 }
5364 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5365 bnx2_free_rx_page(bp, rxr, j);
5366 }
5367 }
5368
5369 static void
5370 bnx2_free_skbs(struct bnx2 *bp)
5371 {
5372 bnx2_free_tx_skbs(bp);
5373 bnx2_free_rx_skbs(bp);
5374 }
5375
5376 static int
5377 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5378 {
5379 int rc;
5380
5381 rc = bnx2_reset_chip(bp, reset_code);
5382 bnx2_free_skbs(bp);
5383 if (rc)
5384 return rc;
5385
5386 if ((rc = bnx2_init_chip(bp)) != 0)
5387 return rc;
5388
5389 bnx2_init_all_rings(bp);
5390 return 0;
5391 }
5392
5393 static int
5394 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5395 {
5396 int rc;
5397
5398 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5399 return rc;
5400
5401 spin_lock_bh(&bp->phy_lock);
5402 bnx2_init_phy(bp, reset_phy);
5403 bnx2_set_link(bp);
5404 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5405 bnx2_remote_phy_event(bp);
5406 spin_unlock_bh(&bp->phy_lock);
5407 return 0;
5408 }
5409
5410 static int
5411 bnx2_shutdown_chip(struct bnx2 *bp)
5412 {
5413 u32 reset_code;
5414
5415 if (bp->flags & BNX2_FLAG_NO_WOL)
5416 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5417 else if (bp->wol)
5418 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5419 else
5420 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5421
5422 return bnx2_reset_chip(bp, reset_code);
5423 }
5424
5425 static int
5426 bnx2_test_registers(struct bnx2 *bp)
5427 {
5428 int ret;
5429 int i, is_5709;
5430 static const struct {
5431 u16 offset;
5432 u16 flags;
5433 #define BNX2_FL_NOT_5709 1
5434 u32 rw_mask;
5435 u32 ro_mask;
5436 } reg_tbl[] = {
5437 { 0x006c, 0, 0x00000000, 0x0000003f },
5438 { 0x0090, 0, 0xffffffff, 0x00000000 },
5439 { 0x0094, 0, 0x00000000, 0x00000000 },
5440
5441 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5442 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5443 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5444 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5445 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5446 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5447 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5448 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5450
5451 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5452 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5453 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5455 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5456 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5457
5458 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5459 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5460 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5461
5462 { 0x1000, 0, 0x00000000, 0x00000001 },
5463 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5464
5465 { 0x1408, 0, 0x01c00800, 0x00000000 },
5466 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5467 { 0x14a8, 0, 0x00000000, 0x000001ff },
5468 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5469 { 0x14b0, 0, 0x00000002, 0x00000001 },
5470 { 0x14b8, 0, 0x00000000, 0x00000000 },
5471 { 0x14c0, 0, 0x00000000, 0x00000009 },
5472 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5473 { 0x14cc, 0, 0x00000000, 0x00000001 },
5474 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5475
5476 { 0x1800, 0, 0x00000000, 0x00000001 },
5477 { 0x1804, 0, 0x00000000, 0x00000003 },
5478
5479 { 0x2800, 0, 0x00000000, 0x00000001 },
5480 { 0x2804, 0, 0x00000000, 0x00003f01 },
5481 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5482 { 0x2810, 0, 0xffff0000, 0x00000000 },
5483 { 0x2814, 0, 0xffff0000, 0x00000000 },
5484 { 0x2818, 0, 0xffff0000, 0x00000000 },
5485 { 0x281c, 0, 0xffff0000, 0x00000000 },
5486 { 0x2834, 0, 0xffffffff, 0x00000000 },
5487 { 0x2840, 0, 0x00000000, 0xffffffff },
5488 { 0x2844, 0, 0x00000000, 0xffffffff },
5489 { 0x2848, 0, 0xffffffff, 0x00000000 },
5490 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5491
5492 { 0x2c00, 0, 0x00000000, 0x00000011 },
5493 { 0x2c04, 0, 0x00000000, 0x00030007 },
5494
5495 { 0x3c00, 0, 0x00000000, 0x00000001 },
5496 { 0x3c04, 0, 0x00000000, 0x00070000 },
5497 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5498 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5499 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5500 { 0x3c14, 0, 0x00000000, 0xffffffff },
5501 { 0x3c18, 0, 0x00000000, 0xffffffff },
5502 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5503 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5504
5505 { 0x5004, 0, 0x00000000, 0x0000007f },
5506 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5507
5508 { 0x5c00, 0, 0x00000000, 0x00000001 },
5509 { 0x5c04, 0, 0x00000000, 0x0003000f },
5510 { 0x5c08, 0, 0x00000003, 0x00000000 },
5511 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5512 { 0x5c10, 0, 0x00000000, 0xffffffff },
5513 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5514 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5515 { 0x5c88, 0, 0x00000000, 0x00077373 },
5516 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5517
5518 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5519 { 0x680c, 0, 0xffffffff, 0x00000000 },
5520 { 0x6810, 0, 0xffffffff, 0x00000000 },
5521 { 0x6814, 0, 0xffffffff, 0x00000000 },
5522 { 0x6818, 0, 0xffffffff, 0x00000000 },
5523 { 0x681c, 0, 0xffffffff, 0x00000000 },
5524 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5525 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5526 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5527 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5528 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5529 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5530 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5531 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5532 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5533 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5534 { 0x684c, 0, 0xffffffff, 0x00000000 },
5535 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5536 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5537 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5538 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5539 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5540 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5541
5542 { 0xffff, 0, 0x00000000, 0x00000000 },
5543 };
5544
5545 ret = 0;
5546 is_5709 = 0;
5547 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5548 is_5709 = 1;
5549
5550 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5551 u32 offset, rw_mask, ro_mask, save_val, val;
5552 u16 flags = reg_tbl[i].flags;
5553
5554 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5555 continue;
5556
5557 offset = (u32) reg_tbl[i].offset;
5558 rw_mask = reg_tbl[i].rw_mask;
5559 ro_mask = reg_tbl[i].ro_mask;
5560
5561 save_val = readl(bp->regview + offset);
5562
5563 writel(0, bp->regview + offset);
5564
5565 val = readl(bp->regview + offset);
5566 if ((val & rw_mask) != 0) {
5567 goto reg_test_err;
5568 }
5569
5570 if ((val & ro_mask) != (save_val & ro_mask)) {
5571 goto reg_test_err;
5572 }
5573
5574 writel(0xffffffff, bp->regview + offset);
5575
5576 val = readl(bp->regview + offset);
5577 if ((val & rw_mask) != rw_mask) {
5578 goto reg_test_err;
5579 }
5580
5581 if ((val & ro_mask) != (save_val & ro_mask)) {
5582 goto reg_test_err;
5583 }
5584
5585 writel(save_val, bp->regview + offset);
5586 continue;
5587
5588 reg_test_err:
5589 writel(save_val, bp->regview + offset);
5590 ret = -ENODEV;
5591 break;
5592 }
5593 return ret;
5594 }
5595
5596 static int
5597 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5598 {
5599 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5600 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5601 int i;
5602
5603 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5604 u32 offset;
5605
5606 for (offset = 0; offset < size; offset += 4) {
5607
5608 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5609
5610 if (bnx2_reg_rd_ind(bp, start + offset) !=
5611 test_pattern[i]) {
5612 return -ENODEV;
5613 }
5614 }
5615 }
5616 return 0;
5617 }
5618
5619 static int
5620 bnx2_test_memory(struct bnx2 *bp)
5621 {
5622 int ret = 0;
5623 int i;
5624 static struct mem_entry {
5625 u32 offset;
5626 u32 len;
5627 } mem_tbl_5706[] = {
5628 { 0x60000, 0x4000 },
5629 { 0xa0000, 0x3000 },
5630 { 0xe0000, 0x4000 },
5631 { 0x120000, 0x4000 },
5632 { 0x1a0000, 0x4000 },
5633 { 0x160000, 0x4000 },
5634 { 0xffffffff, 0 },
5635 },
5636 mem_tbl_5709[] = {
5637 { 0x60000, 0x4000 },
5638 { 0xa0000, 0x3000 },
5639 { 0xe0000, 0x4000 },
5640 { 0x120000, 0x4000 },
5641 { 0x1a0000, 0x4000 },
5642 { 0xffffffff, 0 },
5643 };
5644 struct mem_entry *mem_tbl;
5645
5646 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5647 mem_tbl = mem_tbl_5709;
5648 else
5649 mem_tbl = mem_tbl_5706;
5650
5651 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5652 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5653 mem_tbl[i].len)) != 0) {
5654 return ret;
5655 }
5656 }
5657
5658 return ret;
5659 }
5660
5661 #define BNX2_MAC_LOOPBACK 0
5662 #define BNX2_PHY_LOOPBACK 1
5663
5664 static int
5665 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5666 {
5667 unsigned int pkt_size, num_pkts, i;
5668 struct sk_buff *skb, *rx_skb;
5669 unsigned char *packet;
5670 u16 rx_start_idx, rx_idx;
5671 dma_addr_t map;
5672 struct tx_bd *txbd;
5673 struct sw_bd *rx_buf;
5674 struct l2_fhdr *rx_hdr;
5675 int ret = -ENODEV;
5676 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5677 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5678 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5679
5680 tx_napi = bnapi;
5681
5682 txr = &tx_napi->tx_ring;
5683 rxr = &bnapi->rx_ring;
5684 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5685 bp->loopback = MAC_LOOPBACK;
5686 bnx2_set_mac_loopback(bp);
5687 }
5688 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5689 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5690 return 0;
5691
5692 bp->loopback = PHY_LOOPBACK;
5693 bnx2_set_phy_loopback(bp);
5694 }
5695 else
5696 return -EINVAL;
5697
5698 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5699 skb = netdev_alloc_skb(bp->dev, pkt_size);
5700 if (!skb)
5701 return -ENOMEM;
5702 packet = skb_put(skb, pkt_size);
5703 memcpy(packet, bp->dev->dev_addr, 6);
5704 memset(packet + 6, 0x0, 8);
5705 for (i = 14; i < pkt_size; i++)
5706 packet[i] = (unsigned char) (i & 0xff);
5707
5708 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5709 PCI_DMA_TODEVICE);
5710 if (pci_dma_mapping_error(bp->pdev, map)) {
5711 dev_kfree_skb(skb);
5712 return -EIO;
5713 }
5714
5715 REG_WR(bp, BNX2_HC_COMMAND,
5716 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5717
5718 REG_RD(bp, BNX2_HC_COMMAND);
5719
5720 udelay(5);
5721 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5722
5723 num_pkts = 0;
5724
5725 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5726
5727 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5728 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5729 txbd->tx_bd_mss_nbytes = pkt_size;
5730 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5731
5732 num_pkts++;
5733 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5734 txr->tx_prod_bseq += pkt_size;
5735
5736 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5737 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5738
5739 udelay(100);
5740
5741 REG_WR(bp, BNX2_HC_COMMAND,
5742 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5743
5744 REG_RD(bp, BNX2_HC_COMMAND);
5745
5746 udelay(5);
5747
5748 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5749 dev_kfree_skb(skb);
5750
5751 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5752 goto loopback_test_done;
5753
5754 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5755 if (rx_idx != rx_start_idx + num_pkts) {
5756 goto loopback_test_done;
5757 }
5758
5759 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5760 rx_skb = rx_buf->skb;
5761
5762 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5763 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5764
5765 pci_dma_sync_single_for_cpu(bp->pdev,
5766 pci_unmap_addr(rx_buf, mapping),
5767 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5768
5769 if (rx_hdr->l2_fhdr_status &
5770 (L2_FHDR_ERRORS_BAD_CRC |
5771 L2_FHDR_ERRORS_PHY_DECODE |
5772 L2_FHDR_ERRORS_ALIGNMENT |
5773 L2_FHDR_ERRORS_TOO_SHORT |
5774 L2_FHDR_ERRORS_GIANT_FRAME)) {
5775
5776 goto loopback_test_done;
5777 }
5778
5779 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5780 goto loopback_test_done;
5781 }
5782
5783 for (i = 14; i < pkt_size; i++) {
5784 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5785 goto loopback_test_done;
5786 }
5787 }
5788
5789 ret = 0;
5790
5791 loopback_test_done:
5792 bp->loopback = 0;
5793 return ret;
5794 }
5795
5796 #define BNX2_MAC_LOOPBACK_FAILED 1
5797 #define BNX2_PHY_LOOPBACK_FAILED 2
5798 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5799 BNX2_PHY_LOOPBACK_FAILED)
5800
5801 static int
5802 bnx2_test_loopback(struct bnx2 *bp)
5803 {
5804 int rc = 0;
5805
5806 if (!netif_running(bp->dev))
5807 return BNX2_LOOPBACK_FAILED;
5808
5809 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5810 spin_lock_bh(&bp->phy_lock);
5811 bnx2_init_phy(bp, 1);
5812 spin_unlock_bh(&bp->phy_lock);
5813 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5814 rc |= BNX2_MAC_LOOPBACK_FAILED;
5815 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5816 rc |= BNX2_PHY_LOOPBACK_FAILED;
5817 return rc;
5818 }
5819
5820 #define NVRAM_SIZE 0x200
5821 #define CRC32_RESIDUAL 0xdebb20e3
5822
5823 static int
5824 bnx2_test_nvram(struct bnx2 *bp)
5825 {
5826 __be32 buf[NVRAM_SIZE / 4];
5827 u8 *data = (u8 *) buf;
5828 int rc = 0;
5829 u32 magic, csum;
5830
5831 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5832 goto test_nvram_done;
5833
5834 magic = be32_to_cpu(buf[0]);
5835 if (magic != 0x669955aa) {
5836 rc = -ENODEV;
5837 goto test_nvram_done;
5838 }
5839
5840 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5841 goto test_nvram_done;
5842
5843 csum = ether_crc_le(0x100, data);
5844 if (csum != CRC32_RESIDUAL) {
5845 rc = -ENODEV;
5846 goto test_nvram_done;
5847 }
5848
5849 csum = ether_crc_le(0x100, data + 0x100);
5850 if (csum != CRC32_RESIDUAL) {
5851 rc = -ENODEV;
5852 }
5853
5854 test_nvram_done:
5855 return rc;
5856 }
5857
5858 static int
5859 bnx2_test_link(struct bnx2 *bp)
5860 {
5861 u32 bmsr;
5862
5863 if (!netif_running(bp->dev))
5864 return -ENODEV;
5865
5866 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5867 if (bp->link_up)
5868 return 0;
5869 return -ENODEV;
5870 }
5871 spin_lock_bh(&bp->phy_lock);
5872 bnx2_enable_bmsr1(bp);
5873 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5874 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5875 bnx2_disable_bmsr1(bp);
5876 spin_unlock_bh(&bp->phy_lock);
5877
5878 if (bmsr & BMSR_LSTATUS) {
5879 return 0;
5880 }
5881 return -ENODEV;
5882 }
5883
5884 static int
5885 bnx2_test_intr(struct bnx2 *bp)
5886 {
5887 int i;
5888 u16 status_idx;
5889
5890 if (!netif_running(bp->dev))
5891 return -ENODEV;
5892
5893 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5894
5895 /* This register is not touched during run-time. */
5896 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5897 REG_RD(bp, BNX2_HC_COMMAND);
5898
5899 for (i = 0; i < 10; i++) {
5900 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5901 status_idx) {
5902
5903 break;
5904 }
5905
5906 msleep_interruptible(10);
5907 }
5908 if (i < 10)
5909 return 0;
5910
5911 return -ENODEV;
5912 }
5913
5914 /* Determining link for parallel detection. */
5915 static int
5916 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5917 {
5918 u32 mode_ctl, an_dbg, exp;
5919
5920 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5921 return 0;
5922
5923 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5924 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5925
5926 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5927 return 0;
5928
5929 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5930 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5931 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5932
5933 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5934 return 0;
5935
5936 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5937 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5938 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5939
5940 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5941 return 0;
5942
5943 return 1;
5944 }
5945
5946 static void
5947 bnx2_5706_serdes_timer(struct bnx2 *bp)
5948 {
5949 int check_link = 1;
5950
5951 spin_lock(&bp->phy_lock);
5952 if (bp->serdes_an_pending) {
5953 bp->serdes_an_pending--;
5954 check_link = 0;
5955 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5956 u32 bmcr;
5957
5958 bp->current_interval = BNX2_TIMER_INTERVAL;
5959
5960 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5961
5962 if (bmcr & BMCR_ANENABLE) {
5963 if (bnx2_5706_serdes_has_link(bp)) {
5964 bmcr &= ~BMCR_ANENABLE;
5965 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5966 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5967 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5968 }
5969 }
5970 }
5971 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5972 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5973 u32 phy2;
5974
5975 bnx2_write_phy(bp, 0x17, 0x0f01);
5976 bnx2_read_phy(bp, 0x15, &phy2);
5977 if (phy2 & 0x20) {
5978 u32 bmcr;
5979
5980 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5981 bmcr |= BMCR_ANENABLE;
5982 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5983
5984 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5985 }
5986 } else
5987 bp->current_interval = BNX2_TIMER_INTERVAL;
5988
5989 if (check_link) {
5990 u32 val;
5991
5992 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5993 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5994 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5995
5996 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5997 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5998 bnx2_5706s_force_link_dn(bp, 1);
5999 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6000 } else
6001 bnx2_set_link(bp);
6002 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6003 bnx2_set_link(bp);
6004 }
6005 spin_unlock(&bp->phy_lock);
6006 }
6007
6008 static void
6009 bnx2_5708_serdes_timer(struct bnx2 *bp)
6010 {
6011 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6012 return;
6013
6014 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6015 bp->serdes_an_pending = 0;
6016 return;
6017 }
6018
6019 spin_lock(&bp->phy_lock);
6020 if (bp->serdes_an_pending)
6021 bp->serdes_an_pending--;
6022 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6023 u32 bmcr;
6024
6025 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6026 if (bmcr & BMCR_ANENABLE) {
6027 bnx2_enable_forced_2g5(bp);
6028 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6029 } else {
6030 bnx2_disable_forced_2g5(bp);
6031 bp->serdes_an_pending = 2;
6032 bp->current_interval = BNX2_TIMER_INTERVAL;
6033 }
6034
6035 } else
6036 bp->current_interval = BNX2_TIMER_INTERVAL;
6037
6038 spin_unlock(&bp->phy_lock);
6039 }
6040
6041 static void
6042 bnx2_timer(unsigned long data)
6043 {
6044 struct bnx2 *bp = (struct bnx2 *) data;
6045
6046 if (!netif_running(bp->dev))
6047 return;
6048
6049 if (atomic_read(&bp->intr_sem) != 0)
6050 goto bnx2_restart_timer;
6051
6052 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6053 BNX2_FLAG_USING_MSI)
6054 bnx2_chk_missed_msi(bp);
6055
6056 bnx2_send_heart_beat(bp);
6057
6058 bp->stats_blk->stat_FwRxDrop =
6059 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6060
6061 /* workaround occasional corrupted counters */
6062 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6063 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6064 BNX2_HC_COMMAND_STATS_NOW);
6065
6066 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6067 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6068 bnx2_5706_serdes_timer(bp);
6069 else
6070 bnx2_5708_serdes_timer(bp);
6071 }
6072
6073 bnx2_restart_timer:
6074 mod_timer(&bp->timer, jiffies + bp->current_interval);
6075 }
6076
6077 static int
6078 bnx2_request_irq(struct bnx2 *bp)
6079 {
6080 unsigned long flags;
6081 struct bnx2_irq *irq;
6082 int rc = 0, i;
6083
6084 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6085 flags = 0;
6086 else
6087 flags = IRQF_SHARED;
6088
6089 for (i = 0; i < bp->irq_nvecs; i++) {
6090 irq = &bp->irq_tbl[i];
6091 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6092 &bp->bnx2_napi[i]);
6093 if (rc)
6094 break;
6095 irq->requested = 1;
6096 }
6097 return rc;
6098 }
6099
6100 static void
6101 bnx2_free_irq(struct bnx2 *bp)
6102 {
6103 struct bnx2_irq *irq;
6104 int i;
6105
6106 for (i = 0; i < bp->irq_nvecs; i++) {
6107 irq = &bp->irq_tbl[i];
6108 if (irq->requested)
6109 free_irq(irq->vector, &bp->bnx2_napi[i]);
6110 irq->requested = 0;
6111 }
6112 if (bp->flags & BNX2_FLAG_USING_MSI)
6113 pci_disable_msi(bp->pdev);
6114 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6115 pci_disable_msix(bp->pdev);
6116
6117 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6118 }
6119
6120 static void
6121 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6122 {
6123 int i, rc;
6124 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6125 struct net_device *dev = bp->dev;
6126 const int len = sizeof(bp->irq_tbl[0].name);
6127
6128 bnx2_setup_msix_tbl(bp);
6129 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6130 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6131 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6132
6133 /* Need to flush the previous three writes to ensure MSI-X
6134 * is setup properly */
6135 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6136
6137 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6138 msix_ent[i].entry = i;
6139 msix_ent[i].vector = 0;
6140 }
6141
6142 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6143 if (rc != 0)
6144 return;
6145
6146 bp->irq_nvecs = msix_vecs;
6147 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6148 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6149 bp->irq_tbl[i].vector = msix_ent[i].vector;
6150 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6151 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6152 }
6153 }
6154
6155 static void
6156 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6157 {
6158 int cpus = num_online_cpus();
6159 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6160
6161 bp->irq_tbl[0].handler = bnx2_interrupt;
6162 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6163 bp->irq_nvecs = 1;
6164 bp->irq_tbl[0].vector = bp->pdev->irq;
6165
6166 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6167 bnx2_enable_msix(bp, msix_vecs);
6168
6169 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6170 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6171 if (pci_enable_msi(bp->pdev) == 0) {
6172 bp->flags |= BNX2_FLAG_USING_MSI;
6173 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6174 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6175 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6176 } else
6177 bp->irq_tbl[0].handler = bnx2_msi;
6178
6179 bp->irq_tbl[0].vector = bp->pdev->irq;
6180 }
6181 }
6182
6183 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6184 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6185
6186 bp->num_rx_rings = bp->irq_nvecs;
6187 }
6188
6189 /* Called with rtnl_lock */
6190 static int
6191 bnx2_open(struct net_device *dev)
6192 {
6193 struct bnx2 *bp = netdev_priv(dev);
6194 int rc;
6195
6196 netif_carrier_off(dev);
6197
6198 bnx2_set_power_state(bp, PCI_D0);
6199 bnx2_disable_int(bp);
6200
6201 bnx2_setup_int_mode(bp, disable_msi);
6202 bnx2_init_napi(bp);
6203 bnx2_napi_enable(bp);
6204 rc = bnx2_alloc_mem(bp);
6205 if (rc)
6206 goto open_err;
6207
6208 rc = bnx2_request_irq(bp);
6209 if (rc)
6210 goto open_err;
6211
6212 rc = bnx2_init_nic(bp, 1);
6213 if (rc)
6214 goto open_err;
6215
6216 mod_timer(&bp->timer, jiffies + bp->current_interval);
6217
6218 atomic_set(&bp->intr_sem, 0);
6219
6220 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6221
6222 bnx2_enable_int(bp);
6223
6224 if (bp->flags & BNX2_FLAG_USING_MSI) {
6225 /* Test MSI to make sure it is working
6226 * If MSI test fails, go back to INTx mode
6227 */
6228 if (bnx2_test_intr(bp) != 0) {
6229 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6230
6231 bnx2_disable_int(bp);
6232 bnx2_free_irq(bp);
6233
6234 bnx2_setup_int_mode(bp, 1);
6235
6236 rc = bnx2_init_nic(bp, 0);
6237
6238 if (!rc)
6239 rc = bnx2_request_irq(bp);
6240
6241 if (rc) {
6242 del_timer_sync(&bp->timer);
6243 goto open_err;
6244 }
6245 bnx2_enable_int(bp);
6246 }
6247 }
6248 if (bp->flags & BNX2_FLAG_USING_MSI)
6249 netdev_info(dev, "using MSI\n");
6250 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6251 netdev_info(dev, "using MSIX\n");
6252
6253 netif_tx_start_all_queues(dev);
6254
6255 return 0;
6256
6257 open_err:
6258 bnx2_napi_disable(bp);
6259 bnx2_free_skbs(bp);
6260 bnx2_free_irq(bp);
6261 bnx2_free_mem(bp);
6262 return rc;
6263 }
6264
6265 static void
6266 bnx2_reset_task(struct work_struct *work)
6267 {
6268 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6269
6270 rtnl_lock();
6271 if (!netif_running(bp->dev)) {
6272 rtnl_unlock();
6273 return;
6274 }
6275
6276 bnx2_netif_stop(bp);
6277
6278 bnx2_init_nic(bp, 1);
6279
6280 atomic_set(&bp->intr_sem, 1);
6281 bnx2_netif_start(bp);
6282 rtnl_unlock();
6283 }
6284
6285 static void
6286 bnx2_dump_state(struct bnx2 *bp)
6287 {
6288 struct net_device *dev = bp->dev;
6289
6290 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6291 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6292 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6293 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6294 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6295 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6296 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6297 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6298 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6299 if (bp->flags & BNX2_FLAG_USING_MSIX)
6300 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6301 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6302 }
6303
6304 static void
6305 bnx2_tx_timeout(struct net_device *dev)
6306 {
6307 struct bnx2 *bp = netdev_priv(dev);
6308
6309 bnx2_dump_state(bp);
6310
6311 /* This allows the netif to be shutdown gracefully before resetting */
6312 schedule_work(&bp->reset_task);
6313 }
6314
6315 #ifdef BCM_VLAN
6316 /* Called with rtnl_lock */
6317 static void
6318 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6319 {
6320 struct bnx2 *bp = netdev_priv(dev);
6321
6322 if (netif_running(dev))
6323 bnx2_netif_stop(bp);
6324
6325 bp->vlgrp = vlgrp;
6326
6327 if (!netif_running(dev))
6328 return;
6329
6330 bnx2_set_rx_mode(dev);
6331 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6332 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6333
6334 bnx2_netif_start(bp);
6335 }
6336 #endif
6337
6338 /* Called with netif_tx_lock.
6339 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6340 * netif_wake_queue().
6341 */
6342 static netdev_tx_t
6343 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6344 {
6345 struct bnx2 *bp = netdev_priv(dev);
6346 dma_addr_t mapping;
6347 struct tx_bd *txbd;
6348 struct sw_tx_bd *tx_buf;
6349 u32 len, vlan_tag_flags, last_frag, mss;
6350 u16 prod, ring_prod;
6351 int i;
6352 struct bnx2_napi *bnapi;
6353 struct bnx2_tx_ring_info *txr;
6354 struct netdev_queue *txq;
6355
6356 /* Determine which tx ring we will be placed on */
6357 i = skb_get_queue_mapping(skb);
6358 bnapi = &bp->bnx2_napi[i];
6359 txr = &bnapi->tx_ring;
6360 txq = netdev_get_tx_queue(dev, i);
6361
6362 if (unlikely(bnx2_tx_avail(bp, txr) <
6363 (skb_shinfo(skb)->nr_frags + 1))) {
6364 netif_tx_stop_queue(txq);
6365 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6366
6367 return NETDEV_TX_BUSY;
6368 }
6369 len = skb_headlen(skb);
6370 prod = txr->tx_prod;
6371 ring_prod = TX_RING_IDX(prod);
6372
6373 vlan_tag_flags = 0;
6374 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6375 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6376 }
6377
6378 #ifdef BCM_VLAN
6379 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6380 vlan_tag_flags |=
6381 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6382 }
6383 #endif
6384 if ((mss = skb_shinfo(skb)->gso_size)) {
6385 u32 tcp_opt_len;
6386 struct iphdr *iph;
6387
6388 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6389
6390 tcp_opt_len = tcp_optlen(skb);
6391
6392 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6393 u32 tcp_off = skb_transport_offset(skb) -
6394 sizeof(struct ipv6hdr) - ETH_HLEN;
6395
6396 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6397 TX_BD_FLAGS_SW_FLAGS;
6398 if (likely(tcp_off == 0))
6399 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6400 else {
6401 tcp_off >>= 3;
6402 vlan_tag_flags |= ((tcp_off & 0x3) <<
6403 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6404 ((tcp_off & 0x10) <<
6405 TX_BD_FLAGS_TCP6_OFF4_SHL);
6406 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6407 }
6408 } else {
6409 iph = ip_hdr(skb);
6410 if (tcp_opt_len || (iph->ihl > 5)) {
6411 vlan_tag_flags |= ((iph->ihl - 5) +
6412 (tcp_opt_len >> 2)) << 8;
6413 }
6414 }
6415 } else
6416 mss = 0;
6417
6418 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6419 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6420 dev_kfree_skb(skb);
6421 return NETDEV_TX_OK;
6422 }
6423
6424 tx_buf = &txr->tx_buf_ring[ring_prod];
6425 tx_buf->skb = skb;
6426 pci_unmap_addr_set(tx_buf, mapping, mapping);
6427
6428 txbd = &txr->tx_desc_ring[ring_prod];
6429
6430 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6431 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6432 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6433 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6434
6435 last_frag = skb_shinfo(skb)->nr_frags;
6436 tx_buf->nr_frags = last_frag;
6437 tx_buf->is_gso = skb_is_gso(skb);
6438
6439 for (i = 0; i < last_frag; i++) {
6440 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6441
6442 prod = NEXT_TX_BD(prod);
6443 ring_prod = TX_RING_IDX(prod);
6444 txbd = &txr->tx_desc_ring[ring_prod];
6445
6446 len = frag->size;
6447 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6448 len, PCI_DMA_TODEVICE);
6449 if (pci_dma_mapping_error(bp->pdev, mapping))
6450 goto dma_error;
6451 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6452 mapping);
6453
6454 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6455 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6456 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6457 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6458
6459 }
6460 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6461
6462 prod = NEXT_TX_BD(prod);
6463 txr->tx_prod_bseq += skb->len;
6464
6465 REG_WR16(bp, txr->tx_bidx_addr, prod);
6466 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6467
6468 mmiowb();
6469
6470 txr->tx_prod = prod;
6471
6472 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6473 netif_tx_stop_queue(txq);
6474 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6475 netif_tx_wake_queue(txq);
6476 }
6477
6478 return NETDEV_TX_OK;
6479 dma_error:
6480 /* save value of frag that failed */
6481 last_frag = i;
6482
6483 /* start back at beginning and unmap skb */
6484 prod = txr->tx_prod;
6485 ring_prod = TX_RING_IDX(prod);
6486 tx_buf = &txr->tx_buf_ring[ring_prod];
6487 tx_buf->skb = NULL;
6488 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6489 skb_headlen(skb), PCI_DMA_TODEVICE);
6490
6491 /* unmap remaining mapped pages */
6492 for (i = 0; i < last_frag; i++) {
6493 prod = NEXT_TX_BD(prod);
6494 ring_prod = TX_RING_IDX(prod);
6495 tx_buf = &txr->tx_buf_ring[ring_prod];
6496 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6497 skb_shinfo(skb)->frags[i].size,
6498 PCI_DMA_TODEVICE);
6499 }
6500
6501 dev_kfree_skb(skb);
6502 return NETDEV_TX_OK;
6503 }
6504
6505 /* Called with rtnl_lock */
6506 static int
6507 bnx2_close(struct net_device *dev)
6508 {
6509 struct bnx2 *bp = netdev_priv(dev);
6510
6511 cancel_work_sync(&bp->reset_task);
6512
6513 bnx2_disable_int_sync(bp);
6514 bnx2_napi_disable(bp);
6515 del_timer_sync(&bp->timer);
6516 bnx2_shutdown_chip(bp);
6517 bnx2_free_irq(bp);
6518 bnx2_free_skbs(bp);
6519 bnx2_free_mem(bp);
6520 bp->link_up = 0;
6521 netif_carrier_off(bp->dev);
6522 bnx2_set_power_state(bp, PCI_D3hot);
6523 return 0;
6524 }
6525
6526 static void
6527 bnx2_save_stats(struct bnx2 *bp)
6528 {
6529 u32 *hw_stats = (u32 *) bp->stats_blk;
6530 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6531 int i;
6532
6533 /* The 1st 10 counters are 64-bit counters */
6534 for (i = 0; i < 20; i += 2) {
6535 u32 hi;
6536 u64 lo;
6537
6538 hi = temp_stats[i] + hw_stats[i];
6539 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6540 if (lo > 0xffffffff)
6541 hi++;
6542 temp_stats[i] = hi;
6543 temp_stats[i + 1] = lo & 0xffffffff;
6544 }
6545
6546 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6547 temp_stats[i] += hw_stats[i];
6548 }
6549
6550 #define GET_64BIT_NET_STATS64(ctr) \
6551 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6552 (unsigned long) (ctr##_lo)
6553
6554 #define GET_64BIT_NET_STATS32(ctr) \
6555 (ctr##_lo)
6556
6557 #if (BITS_PER_LONG == 64)
6558 #define GET_64BIT_NET_STATS(ctr) \
6559 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6560 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6561 #else
6562 #define GET_64BIT_NET_STATS(ctr) \
6563 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6564 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6565 #endif
6566
6567 #define GET_32BIT_NET_STATS(ctr) \
6568 (unsigned long) (bp->stats_blk->ctr + \
6569 bp->temp_stats_blk->ctr)
6570
6571 static struct net_device_stats *
6572 bnx2_get_stats(struct net_device *dev)
6573 {
6574 struct bnx2 *bp = netdev_priv(dev);
6575 struct net_device_stats *net_stats = &dev->stats;
6576
6577 if (bp->stats_blk == NULL) {
6578 return net_stats;
6579 }
6580 net_stats->rx_packets =
6581 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6582 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6583 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6584
6585 net_stats->tx_packets =
6586 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6587 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6588 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6589
6590 net_stats->rx_bytes =
6591 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6592
6593 net_stats->tx_bytes =
6594 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6595
6596 net_stats->multicast =
6597 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6598
6599 net_stats->collisions =
6600 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6601
6602 net_stats->rx_length_errors =
6603 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6604 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6605
6606 net_stats->rx_over_errors =
6607 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6608 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6609
6610 net_stats->rx_frame_errors =
6611 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6612
6613 net_stats->rx_crc_errors =
6614 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6615
6616 net_stats->rx_errors = net_stats->rx_length_errors +
6617 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6618 net_stats->rx_crc_errors;
6619
6620 net_stats->tx_aborted_errors =
6621 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6622 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6623
6624 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6625 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6626 net_stats->tx_carrier_errors = 0;
6627 else {
6628 net_stats->tx_carrier_errors =
6629 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6630 }
6631
6632 net_stats->tx_errors =
6633 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6634 net_stats->tx_aborted_errors +
6635 net_stats->tx_carrier_errors;
6636
6637 net_stats->rx_missed_errors =
6638 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6639 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6640 GET_32BIT_NET_STATS(stat_FwRxDrop);
6641
6642 return net_stats;
6643 }
6644
6645 /* All ethtool functions called with rtnl_lock */
6646
6647 static int
6648 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6649 {
6650 struct bnx2 *bp = netdev_priv(dev);
6651 int support_serdes = 0, support_copper = 0;
6652
6653 cmd->supported = SUPPORTED_Autoneg;
6654 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6655 support_serdes = 1;
6656 support_copper = 1;
6657 } else if (bp->phy_port == PORT_FIBRE)
6658 support_serdes = 1;
6659 else
6660 support_copper = 1;
6661
6662 if (support_serdes) {
6663 cmd->supported |= SUPPORTED_1000baseT_Full |
6664 SUPPORTED_FIBRE;
6665 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6666 cmd->supported |= SUPPORTED_2500baseX_Full;
6667
6668 }
6669 if (support_copper) {
6670 cmd->supported |= SUPPORTED_10baseT_Half |
6671 SUPPORTED_10baseT_Full |
6672 SUPPORTED_100baseT_Half |
6673 SUPPORTED_100baseT_Full |
6674 SUPPORTED_1000baseT_Full |
6675 SUPPORTED_TP;
6676
6677 }
6678
6679 spin_lock_bh(&bp->phy_lock);
6680 cmd->port = bp->phy_port;
6681 cmd->advertising = bp->advertising;
6682
6683 if (bp->autoneg & AUTONEG_SPEED) {
6684 cmd->autoneg = AUTONEG_ENABLE;
6685 }
6686 else {
6687 cmd->autoneg = AUTONEG_DISABLE;
6688 }
6689
6690 if (netif_carrier_ok(dev)) {
6691 cmd->speed = bp->line_speed;
6692 cmd->duplex = bp->duplex;
6693 }
6694 else {
6695 cmd->speed = -1;
6696 cmd->duplex = -1;
6697 }
6698 spin_unlock_bh(&bp->phy_lock);
6699
6700 cmd->transceiver = XCVR_INTERNAL;
6701 cmd->phy_address = bp->phy_addr;
6702
6703 return 0;
6704 }
6705
6706 static int
6707 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6708 {
6709 struct bnx2 *bp = netdev_priv(dev);
6710 u8 autoneg = bp->autoneg;
6711 u8 req_duplex = bp->req_duplex;
6712 u16 req_line_speed = bp->req_line_speed;
6713 u32 advertising = bp->advertising;
6714 int err = -EINVAL;
6715
6716 spin_lock_bh(&bp->phy_lock);
6717
6718 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6719 goto err_out_unlock;
6720
6721 if (cmd->port != bp->phy_port &&
6722 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6723 goto err_out_unlock;
6724
6725 /* If device is down, we can store the settings only if the user
6726 * is setting the currently active port.
6727 */
6728 if (!netif_running(dev) && cmd->port != bp->phy_port)
6729 goto err_out_unlock;
6730
6731 if (cmd->autoneg == AUTONEG_ENABLE) {
6732 autoneg |= AUTONEG_SPEED;
6733
6734 advertising = cmd->advertising;
6735 if (cmd->port == PORT_TP) {
6736 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6737 if (!advertising)
6738 advertising = ETHTOOL_ALL_COPPER_SPEED;
6739 } else {
6740 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6741 if (!advertising)
6742 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6743 }
6744 advertising |= ADVERTISED_Autoneg;
6745 }
6746 else {
6747 if (cmd->port == PORT_FIBRE) {
6748 if ((cmd->speed != SPEED_1000 &&
6749 cmd->speed != SPEED_2500) ||
6750 (cmd->duplex != DUPLEX_FULL))
6751 goto err_out_unlock;
6752
6753 if (cmd->speed == SPEED_2500 &&
6754 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6755 goto err_out_unlock;
6756 }
6757 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6758 goto err_out_unlock;
6759
6760 autoneg &= ~AUTONEG_SPEED;
6761 req_line_speed = cmd->speed;
6762 req_duplex = cmd->duplex;
6763 advertising = 0;
6764 }
6765
6766 bp->autoneg = autoneg;
6767 bp->advertising = advertising;
6768 bp->req_line_speed = req_line_speed;
6769 bp->req_duplex = req_duplex;
6770
6771 err = 0;
6772 /* If device is down, the new settings will be picked up when it is
6773 * brought up.
6774 */
6775 if (netif_running(dev))
6776 err = bnx2_setup_phy(bp, cmd->port);
6777
6778 err_out_unlock:
6779 spin_unlock_bh(&bp->phy_lock);
6780
6781 return err;
6782 }
6783
6784 static void
6785 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6786 {
6787 struct bnx2 *bp = netdev_priv(dev);
6788
6789 strcpy(info->driver, DRV_MODULE_NAME);
6790 strcpy(info->version, DRV_MODULE_VERSION);
6791 strcpy(info->bus_info, pci_name(bp->pdev));
6792 strcpy(info->fw_version, bp->fw_version);
6793 }
6794
6795 #define BNX2_REGDUMP_LEN (32 * 1024)
6796
6797 static int
6798 bnx2_get_regs_len(struct net_device *dev)
6799 {
6800 return BNX2_REGDUMP_LEN;
6801 }
6802
6803 static void
6804 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6805 {
6806 u32 *p = _p, i, offset;
6807 u8 *orig_p = _p;
6808 struct bnx2 *bp = netdev_priv(dev);
6809 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6810 0x0800, 0x0880, 0x0c00, 0x0c10,
6811 0x0c30, 0x0d08, 0x1000, 0x101c,
6812 0x1040, 0x1048, 0x1080, 0x10a4,
6813 0x1400, 0x1490, 0x1498, 0x14f0,
6814 0x1500, 0x155c, 0x1580, 0x15dc,
6815 0x1600, 0x1658, 0x1680, 0x16d8,
6816 0x1800, 0x1820, 0x1840, 0x1854,
6817 0x1880, 0x1894, 0x1900, 0x1984,
6818 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6819 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6820 0x2000, 0x2030, 0x23c0, 0x2400,
6821 0x2800, 0x2820, 0x2830, 0x2850,
6822 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6823 0x3c00, 0x3c94, 0x4000, 0x4010,
6824 0x4080, 0x4090, 0x43c0, 0x4458,
6825 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6826 0x4fc0, 0x5010, 0x53c0, 0x5444,
6827 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6828 0x5fc0, 0x6000, 0x6400, 0x6428,
6829 0x6800, 0x6848, 0x684c, 0x6860,
6830 0x6888, 0x6910, 0x8000 };
6831
6832 regs->version = 0;
6833
6834 memset(p, 0, BNX2_REGDUMP_LEN);
6835
6836 if (!netif_running(bp->dev))
6837 return;
6838
6839 i = 0;
6840 offset = reg_boundaries[0];
6841 p += offset;
6842 while (offset < BNX2_REGDUMP_LEN) {
6843 *p++ = REG_RD(bp, offset);
6844 offset += 4;
6845 if (offset == reg_boundaries[i + 1]) {
6846 offset = reg_boundaries[i + 2];
6847 p = (u32 *) (orig_p + offset);
6848 i += 2;
6849 }
6850 }
6851 }
6852
6853 static void
6854 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6855 {
6856 struct bnx2 *bp = netdev_priv(dev);
6857
6858 if (bp->flags & BNX2_FLAG_NO_WOL) {
6859 wol->supported = 0;
6860 wol->wolopts = 0;
6861 }
6862 else {
6863 wol->supported = WAKE_MAGIC;
6864 if (bp->wol)
6865 wol->wolopts = WAKE_MAGIC;
6866 else
6867 wol->wolopts = 0;
6868 }
6869 memset(&wol->sopass, 0, sizeof(wol->sopass));
6870 }
6871
6872 static int
6873 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6874 {
6875 struct bnx2 *bp = netdev_priv(dev);
6876
6877 if (wol->wolopts & ~WAKE_MAGIC)
6878 return -EINVAL;
6879
6880 if (wol->wolopts & WAKE_MAGIC) {
6881 if (bp->flags & BNX2_FLAG_NO_WOL)
6882 return -EINVAL;
6883
6884 bp->wol = 1;
6885 }
6886 else {
6887 bp->wol = 0;
6888 }
6889 return 0;
6890 }
6891
6892 static int
6893 bnx2_nway_reset(struct net_device *dev)
6894 {
6895 struct bnx2 *bp = netdev_priv(dev);
6896 u32 bmcr;
6897
6898 if (!netif_running(dev))
6899 return -EAGAIN;
6900
6901 if (!(bp->autoneg & AUTONEG_SPEED)) {
6902 return -EINVAL;
6903 }
6904
6905 spin_lock_bh(&bp->phy_lock);
6906
6907 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6908 int rc;
6909
6910 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6911 spin_unlock_bh(&bp->phy_lock);
6912 return rc;
6913 }
6914
6915 /* Force a link down visible on the other side */
6916 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6917 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6918 spin_unlock_bh(&bp->phy_lock);
6919
6920 msleep(20);
6921
6922 spin_lock_bh(&bp->phy_lock);
6923
6924 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6925 bp->serdes_an_pending = 1;
6926 mod_timer(&bp->timer, jiffies + bp->current_interval);
6927 }
6928
6929 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6930 bmcr &= ~BMCR_LOOPBACK;
6931 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6932
6933 spin_unlock_bh(&bp->phy_lock);
6934
6935 return 0;
6936 }
6937
6938 static u32
6939 bnx2_get_link(struct net_device *dev)
6940 {
6941 struct bnx2 *bp = netdev_priv(dev);
6942
6943 return bp->link_up;
6944 }
6945
6946 static int
6947 bnx2_get_eeprom_len(struct net_device *dev)
6948 {
6949 struct bnx2 *bp = netdev_priv(dev);
6950
6951 if (bp->flash_info == NULL)
6952 return 0;
6953
6954 return (int) bp->flash_size;
6955 }
6956
6957 static int
6958 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6959 u8 *eebuf)
6960 {
6961 struct bnx2 *bp = netdev_priv(dev);
6962 int rc;
6963
6964 if (!netif_running(dev))
6965 return -EAGAIN;
6966
6967 /* parameters already validated in ethtool_get_eeprom */
6968
6969 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6970
6971 return rc;
6972 }
6973
6974 static int
6975 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6976 u8 *eebuf)
6977 {
6978 struct bnx2 *bp = netdev_priv(dev);
6979 int rc;
6980
6981 if (!netif_running(dev))
6982 return -EAGAIN;
6983
6984 /* parameters already validated in ethtool_set_eeprom */
6985
6986 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6987
6988 return rc;
6989 }
6990
6991 static int
6992 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6993 {
6994 struct bnx2 *bp = netdev_priv(dev);
6995
6996 memset(coal, 0, sizeof(struct ethtool_coalesce));
6997
6998 coal->rx_coalesce_usecs = bp->rx_ticks;
6999 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7000 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7001 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7002
7003 coal->tx_coalesce_usecs = bp->tx_ticks;
7004 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7005 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7006 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7007
7008 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7009
7010 return 0;
7011 }
7012
7013 static int
7014 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7015 {
7016 struct bnx2 *bp = netdev_priv(dev);
7017
7018 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7019 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7020
7021 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7022 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7023
7024 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7025 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7026
7027 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7028 if (bp->rx_quick_cons_trip_int > 0xff)
7029 bp->rx_quick_cons_trip_int = 0xff;
7030
7031 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7032 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7033
7034 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7035 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7036
7037 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7038 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7039
7040 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7041 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7042 0xff;
7043
7044 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7045 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7046 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7047 bp->stats_ticks = USEC_PER_SEC;
7048 }
7049 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7050 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7051 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7052
7053 if (netif_running(bp->dev)) {
7054 bnx2_netif_stop(bp);
7055 bnx2_init_nic(bp, 0);
7056 bnx2_netif_start(bp);
7057 }
7058
7059 return 0;
7060 }
7061
7062 static void
7063 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7064 {
7065 struct bnx2 *bp = netdev_priv(dev);
7066
7067 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7068 ering->rx_mini_max_pending = 0;
7069 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7070
7071 ering->rx_pending = bp->rx_ring_size;
7072 ering->rx_mini_pending = 0;
7073 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7074
7075 ering->tx_max_pending = MAX_TX_DESC_CNT;
7076 ering->tx_pending = bp->tx_ring_size;
7077 }
7078
7079 static int
7080 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7081 {
7082 if (netif_running(bp->dev)) {
7083 /* Reset will erase chipset stats; save them */
7084 bnx2_save_stats(bp);
7085
7086 bnx2_netif_stop(bp);
7087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7088 bnx2_free_skbs(bp);
7089 bnx2_free_mem(bp);
7090 }
7091
7092 bnx2_set_rx_ring_size(bp, rx);
7093 bp->tx_ring_size = tx;
7094
7095 if (netif_running(bp->dev)) {
7096 int rc;
7097
7098 rc = bnx2_alloc_mem(bp);
7099 if (!rc)
7100 rc = bnx2_init_nic(bp, 0);
7101
7102 if (rc) {
7103 bnx2_napi_enable(bp);
7104 dev_close(bp->dev);
7105 return rc;
7106 }
7107 #ifdef BCM_CNIC
7108 mutex_lock(&bp->cnic_lock);
7109 /* Let cnic know about the new status block. */
7110 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7111 bnx2_setup_cnic_irq_info(bp);
7112 mutex_unlock(&bp->cnic_lock);
7113 #endif
7114 bnx2_netif_start(bp);
7115 }
7116 return 0;
7117 }
7118
7119 static int
7120 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7121 {
7122 struct bnx2 *bp = netdev_priv(dev);
7123 int rc;
7124
7125 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7126 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7127 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7128
7129 return -EINVAL;
7130 }
7131 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7132 return rc;
7133 }
7134
7135 static void
7136 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7137 {
7138 struct bnx2 *bp = netdev_priv(dev);
7139
7140 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7141 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7142 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7143 }
7144
7145 static int
7146 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7147 {
7148 struct bnx2 *bp = netdev_priv(dev);
7149
7150 bp->req_flow_ctrl = 0;
7151 if (epause->rx_pause)
7152 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7153 if (epause->tx_pause)
7154 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7155
7156 if (epause->autoneg) {
7157 bp->autoneg |= AUTONEG_FLOW_CTRL;
7158 }
7159 else {
7160 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7161 }
7162
7163 if (netif_running(dev)) {
7164 spin_lock_bh(&bp->phy_lock);
7165 bnx2_setup_phy(bp, bp->phy_port);
7166 spin_unlock_bh(&bp->phy_lock);
7167 }
7168
7169 return 0;
7170 }
7171
7172 static u32
7173 bnx2_get_rx_csum(struct net_device *dev)
7174 {
7175 struct bnx2 *bp = netdev_priv(dev);
7176
7177 return bp->rx_csum;
7178 }
7179
7180 static int
7181 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7182 {
7183 struct bnx2 *bp = netdev_priv(dev);
7184
7185 bp->rx_csum = data;
7186 return 0;
7187 }
7188
7189 static int
7190 bnx2_set_tso(struct net_device *dev, u32 data)
7191 {
7192 struct bnx2 *bp = netdev_priv(dev);
7193
7194 if (data) {
7195 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7196 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7197 dev->features |= NETIF_F_TSO6;
7198 } else
7199 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7200 NETIF_F_TSO_ECN);
7201 return 0;
7202 }
7203
7204 static struct {
7205 char string[ETH_GSTRING_LEN];
7206 } bnx2_stats_str_arr[] = {
7207 { "rx_bytes" },
7208 { "rx_error_bytes" },
7209 { "tx_bytes" },
7210 { "tx_error_bytes" },
7211 { "rx_ucast_packets" },
7212 { "rx_mcast_packets" },
7213 { "rx_bcast_packets" },
7214 { "tx_ucast_packets" },
7215 { "tx_mcast_packets" },
7216 { "tx_bcast_packets" },
7217 { "tx_mac_errors" },
7218 { "tx_carrier_errors" },
7219 { "rx_crc_errors" },
7220 { "rx_align_errors" },
7221 { "tx_single_collisions" },
7222 { "tx_multi_collisions" },
7223 { "tx_deferred" },
7224 { "tx_excess_collisions" },
7225 { "tx_late_collisions" },
7226 { "tx_total_collisions" },
7227 { "rx_fragments" },
7228 { "rx_jabbers" },
7229 { "rx_undersize_packets" },
7230 { "rx_oversize_packets" },
7231 { "rx_64_byte_packets" },
7232 { "rx_65_to_127_byte_packets" },
7233 { "rx_128_to_255_byte_packets" },
7234 { "rx_256_to_511_byte_packets" },
7235 { "rx_512_to_1023_byte_packets" },
7236 { "rx_1024_to_1522_byte_packets" },
7237 { "rx_1523_to_9022_byte_packets" },
7238 { "tx_64_byte_packets" },
7239 { "tx_65_to_127_byte_packets" },
7240 { "tx_128_to_255_byte_packets" },
7241 { "tx_256_to_511_byte_packets" },
7242 { "tx_512_to_1023_byte_packets" },
7243 { "tx_1024_to_1522_byte_packets" },
7244 { "tx_1523_to_9022_byte_packets" },
7245 { "rx_xon_frames" },
7246 { "rx_xoff_frames" },
7247 { "tx_xon_frames" },
7248 { "tx_xoff_frames" },
7249 { "rx_mac_ctrl_frames" },
7250 { "rx_filtered_packets" },
7251 { "rx_ftq_discards" },
7252 { "rx_discards" },
7253 { "rx_fw_discards" },
7254 };
7255
7256 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7257 sizeof(bnx2_stats_str_arr[0]))
7258
7259 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7260
7261 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7262 STATS_OFFSET32(stat_IfHCInOctets_hi),
7263 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7264 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7265 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7266 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7267 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7268 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7269 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7270 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7271 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7272 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7273 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7274 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7275 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7276 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7277 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7278 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7279 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7280 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7281 STATS_OFFSET32(stat_EtherStatsCollisions),
7282 STATS_OFFSET32(stat_EtherStatsFragments),
7283 STATS_OFFSET32(stat_EtherStatsJabbers),
7284 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7285 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7286 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7287 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7288 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7289 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7290 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7291 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7292 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7293 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7294 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7295 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7296 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7297 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7298 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7299 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7300 STATS_OFFSET32(stat_XonPauseFramesReceived),
7301 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7302 STATS_OFFSET32(stat_OutXonSent),
7303 STATS_OFFSET32(stat_OutXoffSent),
7304 STATS_OFFSET32(stat_MacControlFramesReceived),
7305 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7306 STATS_OFFSET32(stat_IfInFTQDiscards),
7307 STATS_OFFSET32(stat_IfInMBUFDiscards),
7308 STATS_OFFSET32(stat_FwRxDrop),
7309 };
7310
7311 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7312 * skipped because of errata.
7313 */
7314 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7315 8,0,8,8,8,8,8,8,8,8,
7316 4,0,4,4,4,4,4,4,4,4,
7317 4,4,4,4,4,4,4,4,4,4,
7318 4,4,4,4,4,4,4,4,4,4,
7319 4,4,4,4,4,4,4,
7320 };
7321
7322 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7323 8,0,8,8,8,8,8,8,8,8,
7324 4,4,4,4,4,4,4,4,4,4,
7325 4,4,4,4,4,4,4,4,4,4,
7326 4,4,4,4,4,4,4,4,4,4,
7327 4,4,4,4,4,4,4,
7328 };
7329
7330 #define BNX2_NUM_TESTS 6
7331
7332 static struct {
7333 char string[ETH_GSTRING_LEN];
7334 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7335 { "register_test (offline)" },
7336 { "memory_test (offline)" },
7337 { "loopback_test (offline)" },
7338 { "nvram_test (online)" },
7339 { "interrupt_test (online)" },
7340 { "link_test (online)" },
7341 };
7342
7343 static int
7344 bnx2_get_sset_count(struct net_device *dev, int sset)
7345 {
7346 switch (sset) {
7347 case ETH_SS_TEST:
7348 return BNX2_NUM_TESTS;
7349 case ETH_SS_STATS:
7350 return BNX2_NUM_STATS;
7351 default:
7352 return -EOPNOTSUPP;
7353 }
7354 }
7355
7356 static void
7357 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7358 {
7359 struct bnx2 *bp = netdev_priv(dev);
7360
7361 bnx2_set_power_state(bp, PCI_D0);
7362
7363 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7364 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7365 int i;
7366
7367 bnx2_netif_stop(bp);
7368 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7369 bnx2_free_skbs(bp);
7370
7371 if (bnx2_test_registers(bp) != 0) {
7372 buf[0] = 1;
7373 etest->flags |= ETH_TEST_FL_FAILED;
7374 }
7375 if (bnx2_test_memory(bp) != 0) {
7376 buf[1] = 1;
7377 etest->flags |= ETH_TEST_FL_FAILED;
7378 }
7379 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7380 etest->flags |= ETH_TEST_FL_FAILED;
7381
7382 if (!netif_running(bp->dev))
7383 bnx2_shutdown_chip(bp);
7384 else {
7385 bnx2_init_nic(bp, 1);
7386 bnx2_netif_start(bp);
7387 }
7388
7389 /* wait for link up */
7390 for (i = 0; i < 7; i++) {
7391 if (bp->link_up)
7392 break;
7393 msleep_interruptible(1000);
7394 }
7395 }
7396
7397 if (bnx2_test_nvram(bp) != 0) {
7398 buf[3] = 1;
7399 etest->flags |= ETH_TEST_FL_FAILED;
7400 }
7401 if (bnx2_test_intr(bp) != 0) {
7402 buf[4] = 1;
7403 etest->flags |= ETH_TEST_FL_FAILED;
7404 }
7405
7406 if (bnx2_test_link(bp) != 0) {
7407 buf[5] = 1;
7408 etest->flags |= ETH_TEST_FL_FAILED;
7409
7410 }
7411 if (!netif_running(bp->dev))
7412 bnx2_set_power_state(bp, PCI_D3hot);
7413 }
7414
7415 static void
7416 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7417 {
7418 switch (stringset) {
7419 case ETH_SS_STATS:
7420 memcpy(buf, bnx2_stats_str_arr,
7421 sizeof(bnx2_stats_str_arr));
7422 break;
7423 case ETH_SS_TEST:
7424 memcpy(buf, bnx2_tests_str_arr,
7425 sizeof(bnx2_tests_str_arr));
7426 break;
7427 }
7428 }
7429
7430 static void
7431 bnx2_get_ethtool_stats(struct net_device *dev,
7432 struct ethtool_stats *stats, u64 *buf)
7433 {
7434 struct bnx2 *bp = netdev_priv(dev);
7435 int i;
7436 u32 *hw_stats = (u32 *) bp->stats_blk;
7437 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7438 u8 *stats_len_arr = NULL;
7439
7440 if (hw_stats == NULL) {
7441 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7442 return;
7443 }
7444
7445 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7446 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7447 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7448 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7449 stats_len_arr = bnx2_5706_stats_len_arr;
7450 else
7451 stats_len_arr = bnx2_5708_stats_len_arr;
7452
7453 for (i = 0; i < BNX2_NUM_STATS; i++) {
7454 unsigned long offset;
7455
7456 if (stats_len_arr[i] == 0) {
7457 /* skip this counter */
7458 buf[i] = 0;
7459 continue;
7460 }
7461
7462 offset = bnx2_stats_offset_arr[i];
7463 if (stats_len_arr[i] == 4) {
7464 /* 4-byte counter */
7465 buf[i] = (u64) *(hw_stats + offset) +
7466 *(temp_stats + offset);
7467 continue;
7468 }
7469 /* 8-byte counter */
7470 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7471 *(hw_stats + offset + 1) +
7472 (((u64) *(temp_stats + offset)) << 32) +
7473 *(temp_stats + offset + 1);
7474 }
7475 }
7476
7477 static int
7478 bnx2_phys_id(struct net_device *dev, u32 data)
7479 {
7480 struct bnx2 *bp = netdev_priv(dev);
7481 int i;
7482 u32 save;
7483
7484 bnx2_set_power_state(bp, PCI_D0);
7485
7486 if (data == 0)
7487 data = 2;
7488
7489 save = REG_RD(bp, BNX2_MISC_CFG);
7490 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7491
7492 for (i = 0; i < (data * 2); i++) {
7493 if ((i % 2) == 0) {
7494 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7495 }
7496 else {
7497 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7498 BNX2_EMAC_LED_1000MB_OVERRIDE |
7499 BNX2_EMAC_LED_100MB_OVERRIDE |
7500 BNX2_EMAC_LED_10MB_OVERRIDE |
7501 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7502 BNX2_EMAC_LED_TRAFFIC);
7503 }
7504 msleep_interruptible(500);
7505 if (signal_pending(current))
7506 break;
7507 }
7508 REG_WR(bp, BNX2_EMAC_LED, 0);
7509 REG_WR(bp, BNX2_MISC_CFG, save);
7510
7511 if (!netif_running(dev))
7512 bnx2_set_power_state(bp, PCI_D3hot);
7513
7514 return 0;
7515 }
7516
7517 static int
7518 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7519 {
7520 struct bnx2 *bp = netdev_priv(dev);
7521
7522 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7523 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7524 else
7525 return (ethtool_op_set_tx_csum(dev, data));
7526 }
7527
7528 static const struct ethtool_ops bnx2_ethtool_ops = {
7529 .get_settings = bnx2_get_settings,
7530 .set_settings = bnx2_set_settings,
7531 .get_drvinfo = bnx2_get_drvinfo,
7532 .get_regs_len = bnx2_get_regs_len,
7533 .get_regs = bnx2_get_regs,
7534 .get_wol = bnx2_get_wol,
7535 .set_wol = bnx2_set_wol,
7536 .nway_reset = bnx2_nway_reset,
7537 .get_link = bnx2_get_link,
7538 .get_eeprom_len = bnx2_get_eeprom_len,
7539 .get_eeprom = bnx2_get_eeprom,
7540 .set_eeprom = bnx2_set_eeprom,
7541 .get_coalesce = bnx2_get_coalesce,
7542 .set_coalesce = bnx2_set_coalesce,
7543 .get_ringparam = bnx2_get_ringparam,
7544 .set_ringparam = bnx2_set_ringparam,
7545 .get_pauseparam = bnx2_get_pauseparam,
7546 .set_pauseparam = bnx2_set_pauseparam,
7547 .get_rx_csum = bnx2_get_rx_csum,
7548 .set_rx_csum = bnx2_set_rx_csum,
7549 .set_tx_csum = bnx2_set_tx_csum,
7550 .set_sg = ethtool_op_set_sg,
7551 .set_tso = bnx2_set_tso,
7552 .self_test = bnx2_self_test,
7553 .get_strings = bnx2_get_strings,
7554 .phys_id = bnx2_phys_id,
7555 .get_ethtool_stats = bnx2_get_ethtool_stats,
7556 .get_sset_count = bnx2_get_sset_count,
7557 };
7558
7559 /* Called with rtnl_lock */
7560 static int
7561 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7562 {
7563 struct mii_ioctl_data *data = if_mii(ifr);
7564 struct bnx2 *bp = netdev_priv(dev);
7565 int err;
7566
7567 switch(cmd) {
7568 case SIOCGMIIPHY:
7569 data->phy_id = bp->phy_addr;
7570
7571 /* fallthru */
7572 case SIOCGMIIREG: {
7573 u32 mii_regval;
7574
7575 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7576 return -EOPNOTSUPP;
7577
7578 if (!netif_running(dev))
7579 return -EAGAIN;
7580
7581 spin_lock_bh(&bp->phy_lock);
7582 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7583 spin_unlock_bh(&bp->phy_lock);
7584
7585 data->val_out = mii_regval;
7586
7587 return err;
7588 }
7589
7590 case SIOCSMIIREG:
7591 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7592 return -EOPNOTSUPP;
7593
7594 if (!netif_running(dev))
7595 return -EAGAIN;
7596
7597 spin_lock_bh(&bp->phy_lock);
7598 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7599 spin_unlock_bh(&bp->phy_lock);
7600
7601 return err;
7602
7603 default:
7604 /* do nothing */
7605 break;
7606 }
7607 return -EOPNOTSUPP;
7608 }
7609
7610 /* Called with rtnl_lock */
7611 static int
7612 bnx2_change_mac_addr(struct net_device *dev, void *p)
7613 {
7614 struct sockaddr *addr = p;
7615 struct bnx2 *bp = netdev_priv(dev);
7616
7617 if (!is_valid_ether_addr(addr->sa_data))
7618 return -EINVAL;
7619
7620 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7621 if (netif_running(dev))
7622 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7623
7624 return 0;
7625 }
7626
7627 /* Called with rtnl_lock */
7628 static int
7629 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7630 {
7631 struct bnx2 *bp = netdev_priv(dev);
7632
7633 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7634 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7635 return -EINVAL;
7636
7637 dev->mtu = new_mtu;
7638 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7639 }
7640
7641 #ifdef CONFIG_NET_POLL_CONTROLLER
7642 static void
7643 poll_bnx2(struct net_device *dev)
7644 {
7645 struct bnx2 *bp = netdev_priv(dev);
7646 int i;
7647
7648 for (i = 0; i < bp->irq_nvecs; i++) {
7649 struct bnx2_irq *irq = &bp->irq_tbl[i];
7650
7651 disable_irq(irq->vector);
7652 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7653 enable_irq(irq->vector);
7654 }
7655 }
7656 #endif
7657
7658 static void __devinit
7659 bnx2_get_5709_media(struct bnx2 *bp)
7660 {
7661 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7662 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7663 u32 strap;
7664
7665 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7666 return;
7667 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7668 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7669 return;
7670 }
7671
7672 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7673 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7674 else
7675 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7676
7677 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7678 switch (strap) {
7679 case 0x4:
7680 case 0x5:
7681 case 0x6:
7682 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7683 return;
7684 }
7685 } else {
7686 switch (strap) {
7687 case 0x1:
7688 case 0x2:
7689 case 0x4:
7690 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7691 return;
7692 }
7693 }
7694 }
7695
7696 static void __devinit
7697 bnx2_get_pci_speed(struct bnx2 *bp)
7698 {
7699 u32 reg;
7700
7701 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7702 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7703 u32 clkreg;
7704
7705 bp->flags |= BNX2_FLAG_PCIX;
7706
7707 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7708
7709 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7710 switch (clkreg) {
7711 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7712 bp->bus_speed_mhz = 133;
7713 break;
7714
7715 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7716 bp->bus_speed_mhz = 100;
7717 break;
7718
7719 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7720 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7721 bp->bus_speed_mhz = 66;
7722 break;
7723
7724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7725 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7726 bp->bus_speed_mhz = 50;
7727 break;
7728
7729 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7731 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7732 bp->bus_speed_mhz = 33;
7733 break;
7734 }
7735 }
7736 else {
7737 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7738 bp->bus_speed_mhz = 66;
7739 else
7740 bp->bus_speed_mhz = 33;
7741 }
7742
7743 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7744 bp->flags |= BNX2_FLAG_PCI_32BIT;
7745
7746 }
7747
7748 static void __devinit
7749 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7750 {
7751 int rc, i, j;
7752 u8 *data;
7753 unsigned int block_end, rosize, len;
7754
7755 #define BNX2_VPD_NVRAM_OFFSET 0x300
7756 #define BNX2_VPD_LEN 128
7757 #define BNX2_MAX_VER_SLEN 30
7758
7759 data = kmalloc(256, GFP_KERNEL);
7760 if (!data)
7761 return;
7762
7763 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7764 BNX2_VPD_LEN);
7765 if (rc)
7766 goto vpd_done;
7767
7768 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7769 data[i] = data[i + BNX2_VPD_LEN + 3];
7770 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7771 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7772 data[i + 3] = data[i + BNX2_VPD_LEN];
7773 }
7774
7775 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7776 if (i < 0)
7777 goto vpd_done;
7778
7779 rosize = pci_vpd_lrdt_size(&data[i]);
7780 i += PCI_VPD_LRDT_TAG_SIZE;
7781 block_end = i + rosize;
7782
7783 if (block_end > BNX2_VPD_LEN)
7784 goto vpd_done;
7785
7786 j = pci_vpd_find_info_keyword(data, i, rosize,
7787 PCI_VPD_RO_KEYWORD_MFR_ID);
7788 if (j < 0)
7789 goto vpd_done;
7790
7791 len = pci_vpd_info_field_size(&data[j]);
7792
7793 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7794 if (j + len > block_end || len != 4 ||
7795 memcmp(&data[j], "1028", 4))
7796 goto vpd_done;
7797
7798 j = pci_vpd_find_info_keyword(data, i, rosize,
7799 PCI_VPD_RO_KEYWORD_VENDOR0);
7800 if (j < 0)
7801 goto vpd_done;
7802
7803 len = pci_vpd_info_field_size(&data[j]);
7804
7805 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7806 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7807 goto vpd_done;
7808
7809 memcpy(bp->fw_version, &data[j], len);
7810 bp->fw_version[len] = ' ';
7811
7812 vpd_done:
7813 kfree(data);
7814 }
7815
7816 static int __devinit
7817 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7818 {
7819 struct bnx2 *bp;
7820 unsigned long mem_len;
7821 int rc, i, j;
7822 u32 reg;
7823 u64 dma_mask, persist_dma_mask;
7824
7825 SET_NETDEV_DEV(dev, &pdev->dev);
7826 bp = netdev_priv(dev);
7827
7828 bp->flags = 0;
7829 bp->phy_flags = 0;
7830
7831 bp->temp_stats_blk =
7832 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7833
7834 if (bp->temp_stats_blk == NULL) {
7835 rc = -ENOMEM;
7836 goto err_out;
7837 }
7838
7839 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7840 rc = pci_enable_device(pdev);
7841 if (rc) {
7842 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7843 goto err_out;
7844 }
7845
7846 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7847 dev_err(&pdev->dev,
7848 "Cannot find PCI device base address, aborting\n");
7849 rc = -ENODEV;
7850 goto err_out_disable;
7851 }
7852
7853 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7854 if (rc) {
7855 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7856 goto err_out_disable;
7857 }
7858
7859 pci_set_master(pdev);
7860 pci_save_state(pdev);
7861
7862 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7863 if (bp->pm_cap == 0) {
7864 dev_err(&pdev->dev,
7865 "Cannot find power management capability, aborting\n");
7866 rc = -EIO;
7867 goto err_out_release;
7868 }
7869
7870 bp->dev = dev;
7871 bp->pdev = pdev;
7872
7873 spin_lock_init(&bp->phy_lock);
7874 spin_lock_init(&bp->indirect_lock);
7875 #ifdef BCM_CNIC
7876 mutex_init(&bp->cnic_lock);
7877 #endif
7878 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7879
7880 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7881 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7882 dev->mem_end = dev->mem_start + mem_len;
7883 dev->irq = pdev->irq;
7884
7885 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7886
7887 if (!bp->regview) {
7888 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7889 rc = -ENOMEM;
7890 goto err_out_release;
7891 }
7892
7893 /* Configure byte swap and enable write to the reg_window registers.
7894 * Rely on CPU to do target byte swapping on big endian systems
7895 * The chip's target access swapping will not swap all accesses
7896 */
7897 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7898 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7899 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7900
7901 bnx2_set_power_state(bp, PCI_D0);
7902
7903 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7904
7905 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7906 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7907 dev_err(&pdev->dev,
7908 "Cannot find PCIE capability, aborting\n");
7909 rc = -EIO;
7910 goto err_out_unmap;
7911 }
7912 bp->flags |= BNX2_FLAG_PCIE;
7913 if (CHIP_REV(bp) == CHIP_REV_Ax)
7914 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7915 } else {
7916 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7917 if (bp->pcix_cap == 0) {
7918 dev_err(&pdev->dev,
7919 "Cannot find PCIX capability, aborting\n");
7920 rc = -EIO;
7921 goto err_out_unmap;
7922 }
7923 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7924 }
7925
7926 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7927 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7928 bp->flags |= BNX2_FLAG_MSIX_CAP;
7929 }
7930
7931 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7932 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7933 bp->flags |= BNX2_FLAG_MSI_CAP;
7934 }
7935
7936 /* 5708 cannot support DMA addresses > 40-bit. */
7937 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7938 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7939 else
7940 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7941
7942 /* Configure DMA attributes. */
7943 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7944 dev->features |= NETIF_F_HIGHDMA;
7945 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7946 if (rc) {
7947 dev_err(&pdev->dev,
7948 "pci_set_consistent_dma_mask failed, aborting\n");
7949 goto err_out_unmap;
7950 }
7951 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7952 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7953 goto err_out_unmap;
7954 }
7955
7956 if (!(bp->flags & BNX2_FLAG_PCIE))
7957 bnx2_get_pci_speed(bp);
7958
7959 /* 5706A0 may falsely detect SERR and PERR. */
7960 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7961 reg = REG_RD(bp, PCI_COMMAND);
7962 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7963 REG_WR(bp, PCI_COMMAND, reg);
7964 }
7965 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7966 !(bp->flags & BNX2_FLAG_PCIX)) {
7967
7968 dev_err(&pdev->dev,
7969 "5706 A1 can only be used in a PCIX bus, aborting\n");
7970 goto err_out_unmap;
7971 }
7972
7973 bnx2_init_nvram(bp);
7974
7975 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7976
7977 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7978 BNX2_SHM_HDR_SIGNATURE_SIG) {
7979 u32 off = PCI_FUNC(pdev->devfn) << 2;
7980
7981 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7982 } else
7983 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7984
7985 /* Get the permanent MAC address. First we need to make sure the
7986 * firmware is actually running.
7987 */
7988 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7989
7990 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7991 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7992 dev_err(&pdev->dev, "Firmware not running, aborting\n");
7993 rc = -ENODEV;
7994 goto err_out_unmap;
7995 }
7996
7997 bnx2_read_vpd_fw_ver(bp);
7998
7999 j = strlen(bp->fw_version);
8000 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8001 for (i = 0; i < 3 && j < 24; i++) {
8002 u8 num, k, skip0;
8003
8004 if (i == 0) {
8005 bp->fw_version[j++] = 'b';
8006 bp->fw_version[j++] = 'c';
8007 bp->fw_version[j++] = ' ';
8008 }
8009 num = (u8) (reg >> (24 - (i * 8)));
8010 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8011 if (num >= k || !skip0 || k == 1) {
8012 bp->fw_version[j++] = (num / k) + '0';
8013 skip0 = 0;
8014 }
8015 }
8016 if (i != 2)
8017 bp->fw_version[j++] = '.';
8018 }
8019 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8020 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8021 bp->wol = 1;
8022
8023 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8024 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8025
8026 for (i = 0; i < 30; i++) {
8027 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8028 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8029 break;
8030 msleep(10);
8031 }
8032 }
8033 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8034 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8035 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8036 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8037 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8038
8039 if (j < 32)
8040 bp->fw_version[j++] = ' ';
8041 for (i = 0; i < 3 && j < 28; i++) {
8042 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8043 reg = swab32(reg);
8044 memcpy(&bp->fw_version[j], &reg, 4);
8045 j += 4;
8046 }
8047 }
8048
8049 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8050 bp->mac_addr[0] = (u8) (reg >> 8);
8051 bp->mac_addr[1] = (u8) reg;
8052
8053 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8054 bp->mac_addr[2] = (u8) (reg >> 24);
8055 bp->mac_addr[3] = (u8) (reg >> 16);
8056 bp->mac_addr[4] = (u8) (reg >> 8);
8057 bp->mac_addr[5] = (u8) reg;
8058
8059 bp->tx_ring_size = MAX_TX_DESC_CNT;
8060 bnx2_set_rx_ring_size(bp, 255);
8061
8062 bp->rx_csum = 1;
8063
8064 bp->tx_quick_cons_trip_int = 2;
8065 bp->tx_quick_cons_trip = 20;
8066 bp->tx_ticks_int = 18;
8067 bp->tx_ticks = 80;
8068
8069 bp->rx_quick_cons_trip_int = 2;
8070 bp->rx_quick_cons_trip = 12;
8071 bp->rx_ticks_int = 18;
8072 bp->rx_ticks = 18;
8073
8074 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8075
8076 bp->current_interval = BNX2_TIMER_INTERVAL;
8077
8078 bp->phy_addr = 1;
8079
8080 /* Disable WOL support if we are running on a SERDES chip. */
8081 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8082 bnx2_get_5709_media(bp);
8083 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8084 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8085
8086 bp->phy_port = PORT_TP;
8087 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8088 bp->phy_port = PORT_FIBRE;
8089 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8090 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8091 bp->flags |= BNX2_FLAG_NO_WOL;
8092 bp->wol = 0;
8093 }
8094 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8095 /* Don't do parallel detect on this board because of
8096 * some board problems. The link will not go down
8097 * if we do parallel detect.
8098 */
8099 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8100 pdev->subsystem_device == 0x310c)
8101 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8102 } else {
8103 bp->phy_addr = 2;
8104 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8105 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8106 }
8107 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8108 CHIP_NUM(bp) == CHIP_NUM_5708)
8109 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8110 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8111 (CHIP_REV(bp) == CHIP_REV_Ax ||
8112 CHIP_REV(bp) == CHIP_REV_Bx))
8113 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8114
8115 bnx2_init_fw_cap(bp);
8116
8117 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8118 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8119 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8120 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8121 bp->flags |= BNX2_FLAG_NO_WOL;
8122 bp->wol = 0;
8123 }
8124
8125 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8126 bp->tx_quick_cons_trip_int =
8127 bp->tx_quick_cons_trip;
8128 bp->tx_ticks_int = bp->tx_ticks;
8129 bp->rx_quick_cons_trip_int =
8130 bp->rx_quick_cons_trip;
8131 bp->rx_ticks_int = bp->rx_ticks;
8132 bp->comp_prod_trip_int = bp->comp_prod_trip;
8133 bp->com_ticks_int = bp->com_ticks;
8134 bp->cmd_ticks_int = bp->cmd_ticks;
8135 }
8136
8137 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8138 *
8139 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8140 * with byte enables disabled on the unused 32-bit word. This is legal
8141 * but causes problems on the AMD 8132 which will eventually stop
8142 * responding after a while.
8143 *
8144 * AMD believes this incompatibility is unique to the 5706, and
8145 * prefers to locally disable MSI rather than globally disabling it.
8146 */
8147 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8148 struct pci_dev *amd_8132 = NULL;
8149
8150 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8151 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8152 amd_8132))) {
8153
8154 if (amd_8132->revision >= 0x10 &&
8155 amd_8132->revision <= 0x13) {
8156 disable_msi = 1;
8157 pci_dev_put(amd_8132);
8158 break;
8159 }
8160 }
8161 }
8162
8163 bnx2_set_default_link(bp);
8164 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8165
8166 init_timer(&bp->timer);
8167 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8168 bp->timer.data = (unsigned long) bp;
8169 bp->timer.function = bnx2_timer;
8170
8171 return 0;
8172
8173 err_out_unmap:
8174 if (bp->regview) {
8175 iounmap(bp->regview);
8176 bp->regview = NULL;
8177 }
8178
8179 err_out_release:
8180 pci_release_regions(pdev);
8181
8182 err_out_disable:
8183 pci_disable_device(pdev);
8184 pci_set_drvdata(pdev, NULL);
8185
8186 err_out:
8187 return rc;
8188 }
8189
8190 static char * __devinit
8191 bnx2_bus_string(struct bnx2 *bp, char *str)
8192 {
8193 char *s = str;
8194
8195 if (bp->flags & BNX2_FLAG_PCIE) {
8196 s += sprintf(s, "PCI Express");
8197 } else {
8198 s += sprintf(s, "PCI");
8199 if (bp->flags & BNX2_FLAG_PCIX)
8200 s += sprintf(s, "-X");
8201 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8202 s += sprintf(s, " 32-bit");
8203 else
8204 s += sprintf(s, " 64-bit");
8205 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8206 }
8207 return str;
8208 }
8209
8210 static void __devinit
8211 bnx2_init_napi(struct bnx2 *bp)
8212 {
8213 int i;
8214
8215 for (i = 0; i < bp->irq_nvecs; i++) {
8216 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8217 int (*poll)(struct napi_struct *, int);
8218
8219 if (i == 0)
8220 poll = bnx2_poll;
8221 else
8222 poll = bnx2_poll_msix;
8223
8224 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8225 bnapi->bp = bp;
8226 }
8227 }
8228
8229 static const struct net_device_ops bnx2_netdev_ops = {
8230 .ndo_open = bnx2_open,
8231 .ndo_start_xmit = bnx2_start_xmit,
8232 .ndo_stop = bnx2_close,
8233 .ndo_get_stats = bnx2_get_stats,
8234 .ndo_set_rx_mode = bnx2_set_rx_mode,
8235 .ndo_do_ioctl = bnx2_ioctl,
8236 .ndo_validate_addr = eth_validate_addr,
8237 .ndo_set_mac_address = bnx2_change_mac_addr,
8238 .ndo_change_mtu = bnx2_change_mtu,
8239 .ndo_tx_timeout = bnx2_tx_timeout,
8240 #ifdef BCM_VLAN
8241 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8242 #endif
8243 #ifdef CONFIG_NET_POLL_CONTROLLER
8244 .ndo_poll_controller = poll_bnx2,
8245 #endif
8246 };
8247
8248 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8249 {
8250 #ifdef BCM_VLAN
8251 dev->vlan_features |= flags;
8252 #endif
8253 }
8254
8255 static int __devinit
8256 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8257 {
8258 static int version_printed = 0;
8259 struct net_device *dev = NULL;
8260 struct bnx2 *bp;
8261 int rc;
8262 char str[40];
8263
8264 if (version_printed++ == 0)
8265 pr_info("%s", version);
8266
8267 /* dev zeroed in init_etherdev */
8268 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8269
8270 if (!dev)
8271 return -ENOMEM;
8272
8273 rc = bnx2_init_board(pdev, dev);
8274 if (rc < 0) {
8275 free_netdev(dev);
8276 return rc;
8277 }
8278
8279 dev->netdev_ops = &bnx2_netdev_ops;
8280 dev->watchdog_timeo = TX_TIMEOUT;
8281 dev->ethtool_ops = &bnx2_ethtool_ops;
8282
8283 bp = netdev_priv(dev);
8284
8285 pci_set_drvdata(pdev, dev);
8286
8287 rc = bnx2_request_firmware(bp);
8288 if (rc)
8289 goto error;
8290
8291 memcpy(dev->dev_addr, bp->mac_addr, 6);
8292 memcpy(dev->perm_addr, bp->mac_addr, 6);
8293
8294 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8295 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8296 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8297 dev->features |= NETIF_F_IPV6_CSUM;
8298 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8299 }
8300 #ifdef BCM_VLAN
8301 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8302 #endif
8303 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8304 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8305 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8306 dev->features |= NETIF_F_TSO6;
8307 vlan_features_add(dev, NETIF_F_TSO6);
8308 }
8309 if ((rc = register_netdev(dev))) {
8310 dev_err(&pdev->dev, "Cannot register net device\n");
8311 goto error;
8312 }
8313
8314 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8315 board_info[ent->driver_data].name,
8316 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8317 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8318 bnx2_bus_string(bp, str),
8319 dev->base_addr,
8320 bp->pdev->irq, dev->dev_addr);
8321
8322 return 0;
8323
8324 error:
8325 if (bp->mips_firmware)
8326 release_firmware(bp->mips_firmware);
8327 if (bp->rv2p_firmware)
8328 release_firmware(bp->rv2p_firmware);
8329
8330 if (bp->regview)
8331 iounmap(bp->regview);
8332 pci_release_regions(pdev);
8333 pci_disable_device(pdev);
8334 pci_set_drvdata(pdev, NULL);
8335 free_netdev(dev);
8336 return rc;
8337 }
8338
8339 static void __devexit
8340 bnx2_remove_one(struct pci_dev *pdev)
8341 {
8342 struct net_device *dev = pci_get_drvdata(pdev);
8343 struct bnx2 *bp = netdev_priv(dev);
8344
8345 flush_scheduled_work();
8346
8347 unregister_netdev(dev);
8348
8349 if (bp->mips_firmware)
8350 release_firmware(bp->mips_firmware);
8351 if (bp->rv2p_firmware)
8352 release_firmware(bp->rv2p_firmware);
8353
8354 if (bp->regview)
8355 iounmap(bp->regview);
8356
8357 kfree(bp->temp_stats_blk);
8358
8359 free_netdev(dev);
8360 pci_release_regions(pdev);
8361 pci_disable_device(pdev);
8362 pci_set_drvdata(pdev, NULL);
8363 }
8364
8365 static int
8366 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8367 {
8368 struct net_device *dev = pci_get_drvdata(pdev);
8369 struct bnx2 *bp = netdev_priv(dev);
8370
8371 /* PCI register 4 needs to be saved whether netif_running() or not.
8372 * MSI address and data need to be saved if using MSI and
8373 * netif_running().
8374 */
8375 pci_save_state(pdev);
8376 if (!netif_running(dev))
8377 return 0;
8378
8379 flush_scheduled_work();
8380 bnx2_netif_stop(bp);
8381 netif_device_detach(dev);
8382 del_timer_sync(&bp->timer);
8383 bnx2_shutdown_chip(bp);
8384 bnx2_free_skbs(bp);
8385 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8386 return 0;
8387 }
8388
8389 static int
8390 bnx2_resume(struct pci_dev *pdev)
8391 {
8392 struct net_device *dev = pci_get_drvdata(pdev);
8393 struct bnx2 *bp = netdev_priv(dev);
8394
8395 pci_restore_state(pdev);
8396 if (!netif_running(dev))
8397 return 0;
8398
8399 bnx2_set_power_state(bp, PCI_D0);
8400 netif_device_attach(dev);
8401 bnx2_init_nic(bp, 1);
8402 bnx2_netif_start(bp);
8403 return 0;
8404 }
8405
8406 /**
8407 * bnx2_io_error_detected - called when PCI error is detected
8408 * @pdev: Pointer to PCI device
8409 * @state: The current pci connection state
8410 *
8411 * This function is called after a PCI bus error affecting
8412 * this device has been detected.
8413 */
8414 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8415 pci_channel_state_t state)
8416 {
8417 struct net_device *dev = pci_get_drvdata(pdev);
8418 struct bnx2 *bp = netdev_priv(dev);
8419
8420 rtnl_lock();
8421 netif_device_detach(dev);
8422
8423 if (state == pci_channel_io_perm_failure) {
8424 rtnl_unlock();
8425 return PCI_ERS_RESULT_DISCONNECT;
8426 }
8427
8428 if (netif_running(dev)) {
8429 bnx2_netif_stop(bp);
8430 del_timer_sync(&bp->timer);
8431 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8432 }
8433
8434 pci_disable_device(pdev);
8435 rtnl_unlock();
8436
8437 /* Request a slot slot reset. */
8438 return PCI_ERS_RESULT_NEED_RESET;
8439 }
8440
8441 /**
8442 * bnx2_io_slot_reset - called after the pci bus has been reset.
8443 * @pdev: Pointer to PCI device
8444 *
8445 * Restart the card from scratch, as if from a cold-boot.
8446 */
8447 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8448 {
8449 struct net_device *dev = pci_get_drvdata(pdev);
8450 struct bnx2 *bp = netdev_priv(dev);
8451
8452 rtnl_lock();
8453 if (pci_enable_device(pdev)) {
8454 dev_err(&pdev->dev,
8455 "Cannot re-enable PCI device after reset\n");
8456 rtnl_unlock();
8457 return PCI_ERS_RESULT_DISCONNECT;
8458 }
8459 pci_set_master(pdev);
8460 pci_restore_state(pdev);
8461 pci_save_state(pdev);
8462
8463 if (netif_running(dev)) {
8464 bnx2_set_power_state(bp, PCI_D0);
8465 bnx2_init_nic(bp, 1);
8466 }
8467
8468 rtnl_unlock();
8469 return PCI_ERS_RESULT_RECOVERED;
8470 }
8471
8472 /**
8473 * bnx2_io_resume - called when traffic can start flowing again.
8474 * @pdev: Pointer to PCI device
8475 *
8476 * This callback is called when the error recovery driver tells us that
8477 * its OK to resume normal operation.
8478 */
8479 static void bnx2_io_resume(struct pci_dev *pdev)
8480 {
8481 struct net_device *dev = pci_get_drvdata(pdev);
8482 struct bnx2 *bp = netdev_priv(dev);
8483
8484 rtnl_lock();
8485 if (netif_running(dev))
8486 bnx2_netif_start(bp);
8487
8488 netif_device_attach(dev);
8489 rtnl_unlock();
8490 }
8491
8492 static struct pci_error_handlers bnx2_err_handler = {
8493 .error_detected = bnx2_io_error_detected,
8494 .slot_reset = bnx2_io_slot_reset,
8495 .resume = bnx2_io_resume,
8496 };
8497
8498 static struct pci_driver bnx2_pci_driver = {
8499 .name = DRV_MODULE_NAME,
8500 .id_table = bnx2_pci_tbl,
8501 .probe = bnx2_init_one,
8502 .remove = __devexit_p(bnx2_remove_one),
8503 .suspend = bnx2_suspend,
8504 .resume = bnx2_resume,
8505 .err_handler = &bnx2_err_handler,
8506 };
8507
8508 static int __init bnx2_init(void)
8509 {
8510 return pci_register_driver(&bnx2_pci_driver);
8511 }
8512
8513 static void __exit bnx2_cleanup(void)
8514 {
8515 pci_unregister_driver(&bnx2_pci_driver);
8516 }
8517
8518 module_init(bnx2_init);
8519 module_exit(bnx2_cleanup);
8520
8521
8522
This page took 0.31836 seconds and 5 git commands to generate.