PCI: Remove DEFINE_PCI_DEVICE_TABLE macro use
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2.c
CommitLineData
28c4ec0d 1/* bnx2.c: QLogic NX2 network driver.
b6016b76 2 *
28c4ec0d
JK
3 * Copyright (c) 2004-2014 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
b6016b76
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Michael Chan (mchan@broadcom.com)
11 */
12
3a9c6a49 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
f2a4f052
MC
14
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17
555069da 18#include <linux/stringify.h>
f2a4f052
MC
19#include <linux/kernel.h>
20#include <linux/timer.h>
21#include <linux/errno.h>
22#include <linux/ioport.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
f2a4f052
MC
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/dma-mapping.h>
1977f032 31#include <linux/bitops.h>
f2a4f052
MC
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <linux/delay.h>
35#include <asm/byteorder.h>
c86a31f4 36#include <asm/page.h>
f2a4f052
MC
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
01789349 40#include <linux/if.h>
f2a4f052 41#include <linux/if_vlan.h>
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
57579f76 49#include <linux/firmware.h>
706bf240 50#include <linux/log2.h>
cd709aa9 51#include <linux/aer.h>
f2a4f052 52
4edd473f
MC
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
b6016b76
MC
57#include "bnx2.h"
58#include "bnx2_fw.h"
b3448b0b 59
b6016b76 60#define DRV_MODULE_NAME "bnx2"
487d9edc
MC
61#define DRV_MODULE_VERSION "2.2.5"
62#define DRV_MODULE_RELDATE "December 20, 2013"
c2c20ef4 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
22fa159d 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
c2c20ef4 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
22fa159d
MC
66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
b6016b76
MC
68
69#define RUN_AT(x) (jiffies + (x))
70
71/* Time in jiffies before concluding the transmitter is hung. */
72#define TX_TIMEOUT (5*HZ)
73
cfd95a63 74static char version[] =
28c4ec0d 75 "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
b6016b76
MC
76
77MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
28c4ec0d 78MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
57579f76
MC
81MODULE_FIRMWARE(FW_MIPS_FILE_06);
82MODULE_FIRMWARE(FW_RV2P_FILE_06);
83MODULE_FIRMWARE(FW_MIPS_FILE_09);
84MODULE_FIRMWARE(FW_RV2P_FILE_09);
078b0735 85MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
b6016b76
MC
86
87static int disable_msi = 0;
88
1c8bb760 89module_param(disable_msi, int, S_IRUGO);
b6016b76
MC
90MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
5b0c76ad
MC
98 BCM5708,
99 BCM5708S,
bac0dff6 100 BCM5709,
27a005b8 101 BCM5709S,
7bb0a04f 102 BCM5716,
1caacecb 103 BCM5716S,
b6016b76
MC
104} board_t;
105
106/* indexed by board_t, above */
fefa8645 107static struct {
b6016b76 108 char *name;
cfd95a63 109} board_info[] = {
b6016b76
MC
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
121 };
122
9baa3c34 123static const struct pci_device_id bnx2_pci_tbl[] = {
b6016b76
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
146 { 0, }
147};
148
0ced9d01 149static const struct flash_spec flash_table[] =
b6016b76 150{
e30372c9
MC
151#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 153 /* Slow EEPROM */
37137709 154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
37137709
MC
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
b6016b76
MC
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
37137709 165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
37137709 171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
37137709
MC
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
b6016b76
MC
236};
237
0ced9d01 238static const struct flash_spec flash_5709 = {
e30372c9
MC
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
245};
246
b6016b76
MC
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
4327ba43 249static void bnx2_init_napi(struct bnx2 *bp);
f048fa9c 250static void bnx2_del_napi(struct bnx2 *bp);
4327ba43 251
35e9010b 252static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 253{
2f8af120 254 u32 diff;
e89bbf10 255
11848b96
MC
256 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257 barrier();
faac9c4b
MC
258
259 /* The ring uses 256 indices for 255 entries, one of them
260 * needs to be skipped.
261 */
35e9010b 262 diff = txr->tx_prod - txr->tx_cons;
2bc4078e 263 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
faac9c4b 264 diff &= 0xffff;
2bc4078e
MC
265 if (diff == BNX2_TX_DESC_CNT)
266 diff = BNX2_MAX_TX_DESC_CNT;
faac9c4b 267 }
807540ba 268 return bp->tx_ring_size - diff;
e89bbf10
MC
269}
270
b6016b76
MC
271static u32
272bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273{
1b8227c4
MC
274 u32 val;
275
276 spin_lock_bh(&bp->indirect_lock);
e503e066
MC
277 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
1b8227c4
MC
279 spin_unlock_bh(&bp->indirect_lock);
280 return val;
b6016b76
MC
281}
282
283static void
284bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285{
1b8227c4 286 spin_lock_bh(&bp->indirect_lock);
e503e066
MC
287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 289 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
290}
291
2726d6e1
MC
292static void
293bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294{
295 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296}
297
298static u32
299bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300{
807540ba 301 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
2726d6e1
MC
302}
303
b6016b76
MC
304static void
305bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306{
307 offset += cid_addr;
1b8227c4 308 spin_lock_bh(&bp->indirect_lock);
4ce45e02 309 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
59b47d8a
MC
310 int i;
311
e503e066
MC
312 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
59b47d8a 315 for (i = 0; i < 5; i++) {
e503e066 316 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
59b47d8a
MC
317 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 break;
319 udelay(5);
320 }
321 } else {
e503e066
MC
322 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 BNX2_WR(bp, BNX2_CTX_DATA, val);
59b47d8a 324 }
1b8227c4 325 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
326}
327
4edd473f
MC
328#ifdef BCM_CNIC
329static int
330bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331{
332 struct bnx2 *bp = netdev_priv(dev);
333 struct drv_ctl_io *io = &info->data.io;
334
335 switch (info->cmd) {
336 case DRV_CTL_IO_WR_CMD:
337 bnx2_reg_wr_ind(bp, io->offset, io->data);
338 break;
339 case DRV_CTL_IO_RD_CMD:
340 io->data = bnx2_reg_rd_ind(bp, io->offset);
341 break;
342 case DRV_CTL_CTX_WR_CMD:
343 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344 break;
345 default:
346 return -EINVAL;
347 }
348 return 0;
349}
350
351static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352{
353 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355 int sb_id;
356
357 if (bp->flags & BNX2_FLAG_USING_MSIX) {
358 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359 bnapi->cnic_present = 0;
360 sb_id = bp->irq_nvecs;
361 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 } else {
363 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364 bnapi->cnic_tag = bnapi->last_status_idx;
365 bnapi->cnic_present = 1;
366 sb_id = 0;
367 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368 }
369
370 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371 cp->irq_arr[0].status_blk = (void *)
372 ((unsigned long) bnapi->status_blk.msi +
373 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374 cp->irq_arr[0].status_blk_num = sb_id;
375 cp->num_irq = 1;
376}
377
378static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379 void *data)
380{
381 struct bnx2 *bp = netdev_priv(dev);
382 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384 if (ops == NULL)
385 return -EINVAL;
386
387 if (cp->drv_state & CNIC_DRV_STATE_REGD)
388 return -EBUSY;
389
41c2178a
MC
390 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391 return -ENODEV;
392
4edd473f
MC
393 bp->cnic_data = data;
394 rcu_assign_pointer(bp->cnic_ops, ops);
395
396 cp->num_irq = 0;
397 cp->drv_state = CNIC_DRV_STATE_REGD;
398
399 bnx2_setup_cnic_irq_info(bp);
400
401 return 0;
402}
403
404static int bnx2_unregister_cnic(struct net_device *dev)
405{
406 struct bnx2 *bp = netdev_priv(dev);
407 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409
c5a88950 410 mutex_lock(&bp->cnic_lock);
4edd473f
MC
411 cp->drv_state = 0;
412 bnapi->cnic_present = 0;
2cfa5a04 413 RCU_INIT_POINTER(bp->cnic_ops, NULL);
c5a88950 414 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
415 synchronize_rcu();
416 return 0;
417}
418
61c2fc4b 419static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
4edd473f
MC
420{
421 struct bnx2 *bp = netdev_priv(dev);
422 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423
7625eb2f
MC
424 if (!cp->max_iscsi_conn)
425 return NULL;
426
4edd473f
MC
427 cp->drv_owner = THIS_MODULE;
428 cp->chip_id = bp->chip_id;
429 cp->pdev = bp->pdev;
430 cp->io_base = bp->regview;
431 cp->drv_ctl = bnx2_drv_ctl;
432 cp->drv_register_cnic = bnx2_register_cnic;
433 cp->drv_unregister_cnic = bnx2_unregister_cnic;
434
435 return cp;
436}
4edd473f
MC
437
438static void
439bnx2_cnic_stop(struct bnx2 *bp)
440{
441 struct cnic_ops *c_ops;
442 struct cnic_ctl_info info;
443
c5a88950 444 mutex_lock(&bp->cnic_lock);
13707f9e
ED
445 c_ops = rcu_dereference_protected(bp->cnic_ops,
446 lockdep_is_held(&bp->cnic_lock));
4edd473f
MC
447 if (c_ops) {
448 info.cmd = CNIC_CTL_STOP_CMD;
449 c_ops->cnic_ctl(bp->cnic_data, &info);
450 }
c5a88950 451 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
452}
453
454static void
455bnx2_cnic_start(struct bnx2 *bp)
456{
457 struct cnic_ops *c_ops;
458 struct cnic_ctl_info info;
459
c5a88950 460 mutex_lock(&bp->cnic_lock);
13707f9e
ED
461 c_ops = rcu_dereference_protected(bp->cnic_ops,
462 lockdep_is_held(&bp->cnic_lock));
4edd473f
MC
463 if (c_ops) {
464 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466
467 bnapi->cnic_tag = bnapi->last_status_idx;
468 }
469 info.cmd = CNIC_CTL_START_CMD;
470 c_ops->cnic_ctl(bp->cnic_data, &info);
471 }
c5a88950 472 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
473}
474
475#else
476
477static void
478bnx2_cnic_stop(struct bnx2 *bp)
479{
480}
481
482static void
483bnx2_cnic_start(struct bnx2 *bp)
484{
485}
486
487#endif
488
b6016b76
MC
489static int
490bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491{
492 u32 val1;
493 int i, ret;
494
583c28e5 495 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
e503e066 496 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
b6016b76
MC
497 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498
e503e066
MC
499 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
b6016b76
MC
501
502 udelay(40);
503 }
504
505 val1 = (bp->phy_addr << 21) | (reg << 16) |
506 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 BNX2_EMAC_MDIO_COMM_START_BUSY;
e503e066 508 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
b6016b76
MC
509
510 for (i = 0; i < 50; i++) {
511 udelay(10);
512
e503e066 513 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
b6016b76
MC
514 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 udelay(5);
516
e503e066 517 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
b6016b76
MC
518 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519
520 break;
521 }
522 }
523
524 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 *val = 0x0;
526 ret = -EBUSY;
527 }
528 else {
529 *val = val1;
530 ret = 0;
531 }
532
583c28e5 533 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
e503e066 534 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
b6016b76
MC
535 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536
e503e066
MC
537 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
b6016b76
MC
539
540 udelay(40);
541 }
542
543 return ret;
544}
545
546static int
547bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548{
549 u32 val1;
550 int i, ret;
551
583c28e5 552 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
e503e066 553 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
b6016b76
MC
554 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555
e503e066
MC
556 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
b6016b76
MC
558
559 udelay(40);
560 }
561
562 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
e503e066 565 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 566
b6016b76
MC
567 for (i = 0; i < 50; i++) {
568 udelay(10);
569
e503e066 570 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
b6016b76
MC
571 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 udelay(5);
573 break;
574 }
575 }
576
577 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578 ret = -EBUSY;
579 else
580 ret = 0;
581
583c28e5 582 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
e503e066 583 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
b6016b76
MC
584 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585
e503e066
MC
586 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
b6016b76
MC
588
589 udelay(40);
590 }
591
592 return ret;
593}
594
595static void
596bnx2_disable_int(struct bnx2 *bp)
597{
b4b36042
MC
598 int i;
599 struct bnx2_napi *bnapi;
600
601 for (i = 0; i < bp->irq_nvecs; i++) {
602 bnapi = &bp->bnx2_napi[i];
e503e066 603 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
b4b36042
MC
604 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 }
e503e066 606 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
b6016b76
MC
607}
608
609static void
610bnx2_enable_int(struct bnx2 *bp)
611{
b4b36042
MC
612 int i;
613 struct bnx2_napi *bnapi;
35efa7c1 614
b4b36042
MC
615 for (i = 0; i < bp->irq_nvecs; i++) {
616 bnapi = &bp->bnx2_napi[i];
1269a8a6 617
e503e066
MC
618 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 bnapi->last_status_idx);
b6016b76 622
e503e066
MC
623 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 bnapi->last_status_idx);
b4b36042 626 }
e503e066 627 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
628}
629
630static void
631bnx2_disable_int_sync(struct bnx2 *bp)
632{
b4b36042
MC
633 int i;
634
b6016b76 635 atomic_inc(&bp->intr_sem);
3767546c
MC
636 if (!netif_running(bp->dev))
637 return;
638
b6016b76 639 bnx2_disable_int(bp);
b4b36042
MC
640 for (i = 0; i < bp->irq_nvecs; i++)
641 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
642}
643
35efa7c1
MC
644static void
645bnx2_napi_disable(struct bnx2 *bp)
646{
b4b36042
MC
647 int i;
648
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
651}
652
653static void
654bnx2_napi_enable(struct bnx2 *bp)
655{
b4b36042
MC
656 int i;
657
658 for (i = 0; i < bp->irq_nvecs; i++)
659 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
660}
661
b6016b76 662static void
212f9934 663bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
b6016b76 664{
212f9934
MC
665 if (stop_cnic)
666 bnx2_cnic_stop(bp);
b6016b76 667 if (netif_running(bp->dev)) {
35efa7c1 668 bnx2_napi_disable(bp);
b6016b76 669 netif_tx_disable(bp->dev);
b6016b76 670 }
b7466560 671 bnx2_disable_int_sync(bp);
a0ba6760 672 netif_carrier_off(bp->dev); /* prevent tx timeout */
b6016b76
MC
673}
674
675static void
212f9934 676bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
b6016b76
MC
677{
678 if (atomic_dec_and_test(&bp->intr_sem)) {
679 if (netif_running(bp->dev)) {
706bf240 680 netif_tx_wake_all_queues(bp->dev);
a0ba6760
MC
681 spin_lock_bh(&bp->phy_lock);
682 if (bp->link_up)
683 netif_carrier_on(bp->dev);
684 spin_unlock_bh(&bp->phy_lock);
35efa7c1 685 bnx2_napi_enable(bp);
b6016b76 686 bnx2_enable_int(bp);
212f9934
MC
687 if (start_cnic)
688 bnx2_cnic_start(bp);
b6016b76
MC
689 }
690 }
691}
692
35e9010b
MC
693static void
694bnx2_free_tx_mem(struct bnx2 *bp)
695{
696 int i;
697
698 for (i = 0; i < bp->num_tx_rings; i++) {
699 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701
702 if (txr->tx_desc_ring) {
36227e88
SG
703 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 txr->tx_desc_ring,
705 txr->tx_desc_mapping);
35e9010b
MC
706 txr->tx_desc_ring = NULL;
707 }
708 kfree(txr->tx_buf_ring);
709 txr->tx_buf_ring = NULL;
710 }
711}
712
bb4f98ab
MC
713static void
714bnx2_free_rx_mem(struct bnx2 *bp)
715{
716 int i;
717
718 for (i = 0; i < bp->num_rx_rings; i++) {
719 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 int j;
722
723 for (j = 0; j < bp->rx_max_ring; j++) {
724 if (rxr->rx_desc_ring[j])
36227e88
SG
725 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 rxr->rx_desc_ring[j],
727 rxr->rx_desc_mapping[j]);
bb4f98ab
MC
728 rxr->rx_desc_ring[j] = NULL;
729 }
25b0b999 730 vfree(rxr->rx_buf_ring);
bb4f98ab
MC
731 rxr->rx_buf_ring = NULL;
732
733 for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 if (rxr->rx_pg_desc_ring[j])
36227e88
SG
735 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 rxr->rx_pg_desc_ring[j],
737 rxr->rx_pg_desc_mapping[j]);
3298a738 738 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab 739 }
25b0b999 740 vfree(rxr->rx_pg_ring);
bb4f98ab
MC
741 rxr->rx_pg_ring = NULL;
742 }
743}
744
35e9010b
MC
745static int
746bnx2_alloc_tx_mem(struct bnx2 *bp)
747{
748 int i;
749
750 for (i = 0; i < bp->num_tx_rings; i++) {
751 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753
754 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 if (txr->tx_buf_ring == NULL)
756 return -ENOMEM;
757
758 txr->tx_desc_ring =
36227e88
SG
759 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 &txr->tx_desc_mapping, GFP_KERNEL);
35e9010b
MC
761 if (txr->tx_desc_ring == NULL)
762 return -ENOMEM;
763 }
764 return 0;
765}
766
bb4f98ab
MC
767static int
768bnx2_alloc_rx_mem(struct bnx2 *bp)
769{
770 int i;
771
772 for (i = 0; i < bp->num_rx_rings; i++) {
773 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 int j;
776
777 rxr->rx_buf_ring =
89bf67f1 778 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
bb4f98ab
MC
779 if (rxr->rx_buf_ring == NULL)
780 return -ENOMEM;
781
bb4f98ab
MC
782 for (j = 0; j < bp->rx_max_ring; j++) {
783 rxr->rx_desc_ring[j] =
36227e88
SG
784 dma_alloc_coherent(&bp->pdev->dev,
785 RXBD_RING_SIZE,
786 &rxr->rx_desc_mapping[j],
787 GFP_KERNEL);
bb4f98ab
MC
788 if (rxr->rx_desc_ring[j] == NULL)
789 return -ENOMEM;
790
791 }
792
793 if (bp->rx_pg_ring_size) {
89bf67f1 794 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
bb4f98ab
MC
795 bp->rx_max_pg_ring);
796 if (rxr->rx_pg_ring == NULL)
797 return -ENOMEM;
798
bb4f98ab
MC
799 }
800
801 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 rxr->rx_pg_desc_ring[j] =
36227e88
SG
803 dma_alloc_coherent(&bp->pdev->dev,
804 RXBD_RING_SIZE,
805 &rxr->rx_pg_desc_mapping[j],
806 GFP_KERNEL);
bb4f98ab
MC
807 if (rxr->rx_pg_desc_ring[j] == NULL)
808 return -ENOMEM;
809
810 }
811 }
812 return 0;
813}
814
b6016b76
MC
815static void
816bnx2_free_mem(struct bnx2 *bp)
817{
13daffa2 818 int i;
43e80b89 819 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 820
35e9010b 821 bnx2_free_tx_mem(bp);
bb4f98ab 822 bnx2_free_rx_mem(bp);
35e9010b 823
59b47d8a
MC
824 for (i = 0; i < bp->ctx_pages; i++) {
825 if (bp->ctx_blk[i]) {
2bc4078e 826 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
36227e88
SG
827 bp->ctx_blk[i],
828 bp->ctx_blk_mapping[i]);
59b47d8a
MC
829 bp->ctx_blk[i] = NULL;
830 }
831 }
43e80b89 832 if (bnapi->status_blk.msi) {
36227e88
SG
833 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 bnapi->status_blk.msi,
835 bp->status_blk_mapping);
43e80b89 836 bnapi->status_blk.msi = NULL;
0f31f994 837 bp->stats_blk = NULL;
b6016b76 838 }
b6016b76
MC
839}
840
841static int
842bnx2_alloc_mem(struct bnx2 *bp)
843{
35e9010b 844 int i, status_blk_size, err;
43e80b89
MC
845 struct bnx2_napi *bnapi;
846 void *status_blk;
b6016b76 847
0f31f994
MC
848 /* Combine status and statistics blocks into one allocation. */
849 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 850 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
851 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
853 bp->status_stats_size = status_blk_size +
854 sizeof(struct statistics_block);
855
ede23fa8
JP
856 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 &bp->status_blk_mapping, GFP_KERNEL);
43e80b89 858 if (status_blk == NULL)
b6016b76
MC
859 goto alloc_mem_err;
860
43e80b89
MC
861 bnapi = &bp->bnx2_napi[0];
862 bnapi->status_blk.msi = status_blk;
863 bnapi->hw_tx_cons_ptr =
864 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865 bnapi->hw_rx_cons_ptr =
866 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 867 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
379b39a2 868 for (i = 1; i < bp->irq_nvecs; i++) {
43e80b89
MC
869 struct status_block_msix *sblk;
870
871 bnapi = &bp->bnx2_napi[i];
b4b36042 872
64699336 873 sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
43e80b89
MC
874 bnapi->status_blk.msix = sblk;
875 bnapi->hw_tx_cons_ptr =
876 &sblk->status_tx_quick_consumer_index;
877 bnapi->hw_rx_cons_ptr =
878 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
879 bnapi->int_num = i << 24;
880 }
881 }
35efa7c1 882
43e80b89 883 bp->stats_blk = status_blk + status_blk_size;
b6016b76 884
0f31f994 885 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 886
4ce45e02 887 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2bc4078e 888 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
59b47d8a
MC
889 if (bp->ctx_pages == 0)
890 bp->ctx_pages = 1;
891 for (i = 0; i < bp->ctx_pages; i++) {
36227e88 892 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
2bc4078e 893 BNX2_PAGE_SIZE,
36227e88
SG
894 &bp->ctx_blk_mapping[i],
895 GFP_KERNEL);
59b47d8a
MC
896 if (bp->ctx_blk[i] == NULL)
897 goto alloc_mem_err;
898 }
899 }
35e9010b 900
bb4f98ab
MC
901 err = bnx2_alloc_rx_mem(bp);
902 if (err)
903 goto alloc_mem_err;
904
35e9010b
MC
905 err = bnx2_alloc_tx_mem(bp);
906 if (err)
907 goto alloc_mem_err;
908
b6016b76
MC
909 return 0;
910
911alloc_mem_err:
912 bnx2_free_mem(bp);
913 return -ENOMEM;
914}
915
e3648b3d
MC
916static void
917bnx2_report_fw_link(struct bnx2 *bp)
918{
919 u32 fw_link_status = 0;
920
583c28e5 921 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
922 return;
923
e3648b3d
MC
924 if (bp->link_up) {
925 u32 bmsr;
926
927 switch (bp->line_speed) {
928 case SPEED_10:
929 if (bp->duplex == DUPLEX_HALF)
930 fw_link_status = BNX2_LINK_STATUS_10HALF;
931 else
932 fw_link_status = BNX2_LINK_STATUS_10FULL;
933 break;
934 case SPEED_100:
935 if (bp->duplex == DUPLEX_HALF)
936 fw_link_status = BNX2_LINK_STATUS_100HALF;
937 else
938 fw_link_status = BNX2_LINK_STATUS_100FULL;
939 break;
940 case SPEED_1000:
941 if (bp->duplex == DUPLEX_HALF)
942 fw_link_status = BNX2_LINK_STATUS_1000HALF;
943 else
944 fw_link_status = BNX2_LINK_STATUS_1000FULL;
945 break;
946 case SPEED_2500:
947 if (bp->duplex == DUPLEX_HALF)
948 fw_link_status = BNX2_LINK_STATUS_2500HALF;
949 else
950 fw_link_status = BNX2_LINK_STATUS_2500FULL;
951 break;
952 }
953
954 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
955
956 if (bp->autoneg) {
957 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
958
ca58c3af
MC
959 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
961
962 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 963 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
964 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
965 else
966 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
967 }
968 }
969 else
970 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
971
2726d6e1 972 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
973}
974
9b1084b8
MC
975static char *
976bnx2_xceiver_str(struct bnx2 *bp)
977{
807540ba 978 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 979 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
807540ba 980 "Copper");
9b1084b8
MC
981}
982
b6016b76
MC
983static void
984bnx2_report_link(struct bnx2 *bp)
985{
986 if (bp->link_up) {
987 netif_carrier_on(bp->dev);
3a9c6a49
JP
988 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
989 bnx2_xceiver_str(bp),
990 bp->line_speed,
991 bp->duplex == DUPLEX_FULL ? "full" : "half");
b6016b76
MC
992
993 if (bp->flow_ctrl) {
994 if (bp->flow_ctrl & FLOW_CTRL_RX) {
3a9c6a49 995 pr_cont(", receive ");
b6016b76 996 if (bp->flow_ctrl & FLOW_CTRL_TX)
3a9c6a49 997 pr_cont("& transmit ");
b6016b76
MC
998 }
999 else {
3a9c6a49 1000 pr_cont(", transmit ");
b6016b76 1001 }
3a9c6a49 1002 pr_cont("flow control ON");
b6016b76 1003 }
3a9c6a49
JP
1004 pr_cont("\n");
1005 } else {
b6016b76 1006 netif_carrier_off(bp->dev);
3a9c6a49
JP
1007 netdev_err(bp->dev, "NIC %s Link is Down\n",
1008 bnx2_xceiver_str(bp));
b6016b76 1009 }
e3648b3d
MC
1010
1011 bnx2_report_fw_link(bp);
b6016b76
MC
1012}
1013
1014static void
1015bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1016{
1017 u32 local_adv, remote_adv;
1018
1019 bp->flow_ctrl = 0;
6aa20a22 1020 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
1021 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1022
1023 if (bp->duplex == DUPLEX_FULL) {
1024 bp->flow_ctrl = bp->req_flow_ctrl;
1025 }
1026 return;
1027 }
1028
1029 if (bp->duplex != DUPLEX_FULL) {
1030 return;
1031 }
1032
583c28e5 1033 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4ce45e02 1034 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
5b0c76ad
MC
1035 u32 val;
1036
1037 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1038 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1039 bp->flow_ctrl |= FLOW_CTRL_TX;
1040 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1041 bp->flow_ctrl |= FLOW_CTRL_RX;
1042 return;
1043 }
1044
ca58c3af
MC
1045 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 1047
583c28e5 1048 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1049 u32 new_local_adv = 0;
1050 u32 new_remote_adv = 0;
1051
1052 if (local_adv & ADVERTISE_1000XPAUSE)
1053 new_local_adv |= ADVERTISE_PAUSE_CAP;
1054 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1055 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1056 if (remote_adv & ADVERTISE_1000XPAUSE)
1057 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1058 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1059 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1060
1061 local_adv = new_local_adv;
1062 remote_adv = new_remote_adv;
1063 }
1064
1065 /* See Table 28B-3 of 802.3ab-1999 spec. */
1066 if (local_adv & ADVERTISE_PAUSE_CAP) {
1067 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1068 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1069 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1070 }
1071 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1072 bp->flow_ctrl = FLOW_CTRL_RX;
1073 }
1074 }
1075 else {
1076 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1077 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1078 }
1079 }
1080 }
1081 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1082 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1083 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1084
1085 bp->flow_ctrl = FLOW_CTRL_TX;
1086 }
1087 }
1088}
1089
27a005b8
MC
1090static int
1091bnx2_5709s_linkup(struct bnx2 *bp)
1092{
1093 u32 val, speed;
1094
1095 bp->link_up = 1;
1096
1097 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1098 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1099 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1100
1101 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1102 bp->line_speed = bp->req_line_speed;
1103 bp->duplex = bp->req_duplex;
1104 return 0;
1105 }
1106 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1107 switch (speed) {
1108 case MII_BNX2_GP_TOP_AN_SPEED_10:
1109 bp->line_speed = SPEED_10;
1110 break;
1111 case MII_BNX2_GP_TOP_AN_SPEED_100:
1112 bp->line_speed = SPEED_100;
1113 break;
1114 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1115 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1116 bp->line_speed = SPEED_1000;
1117 break;
1118 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1119 bp->line_speed = SPEED_2500;
1120 break;
1121 }
1122 if (val & MII_BNX2_GP_TOP_AN_FD)
1123 bp->duplex = DUPLEX_FULL;
1124 else
1125 bp->duplex = DUPLEX_HALF;
1126 return 0;
1127}
1128
b6016b76 1129static int
5b0c76ad
MC
1130bnx2_5708s_linkup(struct bnx2 *bp)
1131{
1132 u32 val;
1133
1134 bp->link_up = 1;
1135 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1136 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1137 case BCM5708S_1000X_STAT1_SPEED_10:
1138 bp->line_speed = SPEED_10;
1139 break;
1140 case BCM5708S_1000X_STAT1_SPEED_100:
1141 bp->line_speed = SPEED_100;
1142 break;
1143 case BCM5708S_1000X_STAT1_SPEED_1G:
1144 bp->line_speed = SPEED_1000;
1145 break;
1146 case BCM5708S_1000X_STAT1_SPEED_2G5:
1147 bp->line_speed = SPEED_2500;
1148 break;
1149 }
1150 if (val & BCM5708S_1000X_STAT1_FD)
1151 bp->duplex = DUPLEX_FULL;
1152 else
1153 bp->duplex = DUPLEX_HALF;
1154
1155 return 0;
1156}
1157
1158static int
1159bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
1160{
1161 u32 bmcr, local_adv, remote_adv, common;
1162
1163 bp->link_up = 1;
1164 bp->line_speed = SPEED_1000;
1165
ca58c3af 1166 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1167 if (bmcr & BMCR_FULLDPLX) {
1168 bp->duplex = DUPLEX_FULL;
1169 }
1170 else {
1171 bp->duplex = DUPLEX_HALF;
1172 }
1173
1174 if (!(bmcr & BMCR_ANENABLE)) {
1175 return 0;
1176 }
1177
ca58c3af
MC
1178 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1179 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1180
1181 common = local_adv & remote_adv;
1182 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1183
1184 if (common & ADVERTISE_1000XFULL) {
1185 bp->duplex = DUPLEX_FULL;
1186 }
1187 else {
1188 bp->duplex = DUPLEX_HALF;
1189 }
1190 }
1191
1192 return 0;
1193}
1194
1195static int
1196bnx2_copper_linkup(struct bnx2 *bp)
1197{
1198 u32 bmcr;
1199
4016badd
MC
1200 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1201
ca58c3af 1202 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1203 if (bmcr & BMCR_ANENABLE) {
1204 u32 local_adv, remote_adv, common;
1205
1206 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209 common = local_adv & (remote_adv >> 2);
1210 if (common & ADVERTISE_1000FULL) {
1211 bp->line_speed = SPEED_1000;
1212 bp->duplex = DUPLEX_FULL;
1213 }
1214 else if (common & ADVERTISE_1000HALF) {
1215 bp->line_speed = SPEED_1000;
1216 bp->duplex = DUPLEX_HALF;
1217 }
1218 else {
ca58c3af
MC
1219 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1221
1222 common = local_adv & remote_adv;
1223 if (common & ADVERTISE_100FULL) {
1224 bp->line_speed = SPEED_100;
1225 bp->duplex = DUPLEX_FULL;
1226 }
1227 else if (common & ADVERTISE_100HALF) {
1228 bp->line_speed = SPEED_100;
1229 bp->duplex = DUPLEX_HALF;
1230 }
1231 else if (common & ADVERTISE_10FULL) {
1232 bp->line_speed = SPEED_10;
1233 bp->duplex = DUPLEX_FULL;
1234 }
1235 else if (common & ADVERTISE_10HALF) {
1236 bp->line_speed = SPEED_10;
1237 bp->duplex = DUPLEX_HALF;
1238 }
1239 else {
1240 bp->line_speed = 0;
1241 bp->link_up = 0;
1242 }
1243 }
1244 }
1245 else {
1246 if (bmcr & BMCR_SPEED100) {
1247 bp->line_speed = SPEED_100;
1248 }
1249 else {
1250 bp->line_speed = SPEED_10;
1251 }
1252 if (bmcr & BMCR_FULLDPLX) {
1253 bp->duplex = DUPLEX_FULL;
1254 }
1255 else {
1256 bp->duplex = DUPLEX_HALF;
1257 }
1258 }
1259
4016badd
MC
1260 if (bp->link_up) {
1261 u32 ext_status;
1262
1263 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1264 if (ext_status & EXT_STATUS_MDIX)
1265 bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1266 }
1267
b6016b76
MC
1268 return 0;
1269}
1270
83e3fc89 1271static void
bb4f98ab 1272bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1273{
bb4f98ab 1274 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1275
1276 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1277 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1278 val |= 0x02 << 8;
1279
22fa159d
MC
1280 if (bp->flow_ctrl & FLOW_CTRL_TX)
1281 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
83e3fc89 1282
83e3fc89
MC
1283 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1284}
1285
bb4f98ab
MC
1286static void
1287bnx2_init_all_rx_contexts(struct bnx2 *bp)
1288{
1289 int i;
1290 u32 cid;
1291
1292 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1293 if (i == 1)
1294 cid = RX_RSS_CID;
1295 bnx2_init_rx_context(bp, cid);
1296 }
1297}
1298
344478db 1299static void
b6016b76
MC
1300bnx2_set_mac_link(struct bnx2 *bp)
1301{
1302 u32 val;
1303
e503e066 1304 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
b6016b76
MC
1305 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1306 (bp->duplex == DUPLEX_HALF)) {
e503e066 1307 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
b6016b76
MC
1308 }
1309
1310 /* Configure the EMAC mode register. */
e503e066 1311 val = BNX2_RD(bp, BNX2_EMAC_MODE);
b6016b76
MC
1312
1313 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1314 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1315 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1316
1317 if (bp->link_up) {
5b0c76ad
MC
1318 switch (bp->line_speed) {
1319 case SPEED_10:
4ce45e02 1320 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
59b47d8a 1321 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1322 break;
1323 }
1324 /* fall through */
1325 case SPEED_100:
1326 val |= BNX2_EMAC_MODE_PORT_MII;
1327 break;
1328 case SPEED_2500:
59b47d8a 1329 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1330 /* fall through */
1331 case SPEED_1000:
1332 val |= BNX2_EMAC_MODE_PORT_GMII;
1333 break;
1334 }
b6016b76
MC
1335 }
1336 else {
1337 val |= BNX2_EMAC_MODE_PORT_GMII;
1338 }
1339
1340 /* Set the MAC to operate in the appropriate duplex mode. */
1341 if (bp->duplex == DUPLEX_HALF)
1342 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
e503e066 1343 BNX2_WR(bp, BNX2_EMAC_MODE, val);
b6016b76
MC
1344
1345 /* Enable/disable rx PAUSE. */
1346 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1347
1348 if (bp->flow_ctrl & FLOW_CTRL_RX)
1349 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
e503e066 1350 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
b6016b76
MC
1351
1352 /* Enable/disable tx PAUSE. */
e503e066 1353 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
b6016b76
MC
1354 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1355
1356 if (bp->flow_ctrl & FLOW_CTRL_TX)
1357 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
e503e066 1358 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
b6016b76
MC
1359
1360 /* Acknowledge the interrupt. */
e503e066 1361 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
b6016b76 1362
22fa159d 1363 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1364}
1365
27a005b8
MC
1366static void
1367bnx2_enable_bmsr1(struct bnx2 *bp)
1368{
583c28e5 1369 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4ce45e02 1370 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
27a005b8
MC
1371 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372 MII_BNX2_BLK_ADDR_GP_STATUS);
1373}
1374
1375static void
1376bnx2_disable_bmsr1(struct bnx2 *bp)
1377{
583c28e5 1378 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4ce45e02 1379 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
27a005b8
MC
1380 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1381 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1382}
1383
605a9e20
MC
1384static int
1385bnx2_test_and_enable_2g5(struct bnx2 *bp)
1386{
1387 u32 up1;
1388 int ret = 1;
1389
583c28e5 1390 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1391 return 0;
1392
1393 if (bp->autoneg & AUTONEG_SPEED)
1394 bp->advertising |= ADVERTISED_2500baseX_Full;
1395
4ce45e02 1396 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
27a005b8
MC
1397 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1398
605a9e20
MC
1399 bnx2_read_phy(bp, bp->mii_up1, &up1);
1400 if (!(up1 & BCM5708S_UP1_2G5)) {
1401 up1 |= BCM5708S_UP1_2G5;
1402 bnx2_write_phy(bp, bp->mii_up1, up1);
1403 ret = 0;
1404 }
1405
4ce45e02 1406 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
27a005b8
MC
1407 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1408 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1409
605a9e20
MC
1410 return ret;
1411}
1412
1413static int
1414bnx2_test_and_disable_2g5(struct bnx2 *bp)
1415{
1416 u32 up1;
1417 int ret = 0;
1418
583c28e5 1419 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1420 return 0;
1421
4ce45e02 1422 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
27a005b8
MC
1423 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1424
605a9e20
MC
1425 bnx2_read_phy(bp, bp->mii_up1, &up1);
1426 if (up1 & BCM5708S_UP1_2G5) {
1427 up1 &= ~BCM5708S_UP1_2G5;
1428 bnx2_write_phy(bp, bp->mii_up1, up1);
1429 ret = 1;
1430 }
1431
4ce45e02 1432 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
27a005b8
MC
1433 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1434 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1435
605a9e20
MC
1436 return ret;
1437}
1438
1439static void
1440bnx2_enable_forced_2g5(struct bnx2 *bp)
1441{
cbd6890c
MC
1442 u32 uninitialized_var(bmcr);
1443 int err;
605a9e20 1444
583c28e5 1445 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1446 return;
1447
4ce45e02 1448 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
27a005b8
MC
1449 u32 val;
1450
1451 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452 MII_BNX2_BLK_ADDR_SERDES_DIG);
cbd6890c
MC
1453 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1454 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1455 val |= MII_BNX2_SD_MISC1_FORCE |
1456 MII_BNX2_SD_MISC1_FORCE_2_5G;
1457 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1458 }
27a005b8
MC
1459
1460 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
cbd6890c 1462 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
27a005b8 1463
4ce45e02 1464 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
cbd6890c
MC
1465 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466 if (!err)
1467 bmcr |= BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1468 } else {
1469 return;
605a9e20
MC
1470 }
1471
cbd6890c
MC
1472 if (err)
1473 return;
1474
605a9e20
MC
1475 if (bp->autoneg & AUTONEG_SPEED) {
1476 bmcr &= ~BMCR_ANENABLE;
1477 if (bp->req_duplex == DUPLEX_FULL)
1478 bmcr |= BMCR_FULLDPLX;
1479 }
1480 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1481}
1482
1483static void
1484bnx2_disable_forced_2g5(struct bnx2 *bp)
1485{
cbd6890c
MC
1486 u32 uninitialized_var(bmcr);
1487 int err;
605a9e20 1488
583c28e5 1489 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1490 return;
1491
4ce45e02 1492 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
27a005b8
MC
1493 u32 val;
1494
1495 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496 MII_BNX2_BLK_ADDR_SERDES_DIG);
cbd6890c
MC
1497 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1498 val &= ~MII_BNX2_SD_MISC1_FORCE;
1499 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1500 }
27a005b8
MC
1501
1502 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
cbd6890c 1504 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
27a005b8 1505
4ce45e02 1506 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
cbd6890c
MC
1507 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1508 if (!err)
1509 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1510 } else {
1511 return;
605a9e20
MC
1512 }
1513
cbd6890c
MC
1514 if (err)
1515 return;
1516
605a9e20
MC
1517 if (bp->autoneg & AUTONEG_SPEED)
1518 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1519 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1520}
1521
b2fadeae
MC
1522static void
1523bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1524{
1525 u32 val;
1526
1527 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1528 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1529 if (start)
1530 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1531 else
1532 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1533}
1534
b6016b76
MC
1535static int
1536bnx2_set_link(struct bnx2 *bp)
1537{
1538 u32 bmsr;
1539 u8 link_up;
1540
80be4434 1541 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1542 bp->link_up = 1;
1543 return 0;
1544 }
1545
583c28e5 1546 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1547 return 0;
1548
b6016b76
MC
1549 link_up = bp->link_up;
1550
27a005b8
MC
1551 bnx2_enable_bmsr1(bp);
1552 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1554 bnx2_disable_bmsr1(bp);
b6016b76 1555
583c28e5 1556 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4ce45e02 1557 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
a2724e25 1558 u32 val, an_dbg;
b6016b76 1559
583c28e5 1560 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1561 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1562 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1563 }
e503e066 1564 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1565
1566 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1567 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1568 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1569
1570 if ((val & BNX2_EMAC_STATUS_LINK) &&
1571 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1572 bmsr |= BMSR_LSTATUS;
1573 else
1574 bmsr &= ~BMSR_LSTATUS;
1575 }
1576
1577 if (bmsr & BMSR_LSTATUS) {
1578 bp->link_up = 1;
1579
583c28e5 1580 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
4ce45e02 1581 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
5b0c76ad 1582 bnx2_5706s_linkup(bp);
4ce45e02 1583 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
5b0c76ad 1584 bnx2_5708s_linkup(bp);
4ce45e02 1585 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
27a005b8 1586 bnx2_5709s_linkup(bp);
b6016b76
MC
1587 }
1588 else {
1589 bnx2_copper_linkup(bp);
1590 }
1591 bnx2_resolve_flow_ctrl(bp);
1592 }
1593 else {
583c28e5 1594 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1595 (bp->autoneg & AUTONEG_SPEED))
1596 bnx2_disable_forced_2g5(bp);
b6016b76 1597
583c28e5 1598 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1599 u32 bmcr;
1600
1601 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1602 bmcr |= BMCR_ANENABLE;
1603 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1604
583c28e5 1605 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1606 }
b6016b76
MC
1607 bp->link_up = 0;
1608 }
1609
1610 if (bp->link_up != link_up) {
1611 bnx2_report_link(bp);
1612 }
1613
1614 bnx2_set_mac_link(bp);
1615
1616 return 0;
1617}
1618
1619static int
1620bnx2_reset_phy(struct bnx2 *bp)
1621{
1622 int i;
1623 u32 reg;
1624
ca58c3af 1625 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1626
1627#define PHY_RESET_MAX_WAIT 100
1628 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1629 udelay(10);
1630
ca58c3af 1631 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1632 if (!(reg & BMCR_RESET)) {
1633 udelay(20);
1634 break;
1635 }
1636 }
1637 if (i == PHY_RESET_MAX_WAIT) {
1638 return -EBUSY;
1639 }
1640 return 0;
1641}
1642
1643static u32
1644bnx2_phy_get_pause_adv(struct bnx2 *bp)
1645{
1646 u32 adv = 0;
1647
1648 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1649 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1650
583c28e5 1651 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1652 adv = ADVERTISE_1000XPAUSE;
1653 }
1654 else {
1655 adv = ADVERTISE_PAUSE_CAP;
1656 }
1657 }
1658 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1659 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1660 adv = ADVERTISE_1000XPSE_ASYM;
1661 }
1662 else {
1663 adv = ADVERTISE_PAUSE_ASYM;
1664 }
1665 }
1666 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1667 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1668 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1669 }
1670 else {
1671 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1672 }
1673 }
1674 return adv;
1675}
1676
a2f13890 1677static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1678
b6016b76 1679static int
0d8a6571 1680bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1681__releases(&bp->phy_lock)
1682__acquires(&bp->phy_lock)
0d8a6571
MC
1683{
1684 u32 speed_arg = 0, pause_adv;
1685
1686 pause_adv = bnx2_phy_get_pause_adv(bp);
1687
1688 if (bp->autoneg & AUTONEG_SPEED) {
1689 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1690 if (bp->advertising & ADVERTISED_10baseT_Half)
1691 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1692 if (bp->advertising & ADVERTISED_10baseT_Full)
1693 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1694 if (bp->advertising & ADVERTISED_100baseT_Half)
1695 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1696 if (bp->advertising & ADVERTISED_100baseT_Full)
1697 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1698 if (bp->advertising & ADVERTISED_1000baseT_Full)
1699 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700 if (bp->advertising & ADVERTISED_2500baseX_Full)
1701 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702 } else {
1703 if (bp->req_line_speed == SPEED_2500)
1704 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1705 else if (bp->req_line_speed == SPEED_1000)
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707 else if (bp->req_line_speed == SPEED_100) {
1708 if (bp->req_duplex == DUPLEX_FULL)
1709 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1710 else
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712 } else if (bp->req_line_speed == SPEED_10) {
1713 if (bp->req_duplex == DUPLEX_FULL)
1714 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1715 else
1716 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1717 }
1718 }
1719
1720 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1721 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1722 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1723 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1724
1725 if (port == PORT_TP)
1726 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1727 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1728
2726d6e1 1729 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1730
1731 spin_unlock_bh(&bp->phy_lock);
a2f13890 1732 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1733 spin_lock_bh(&bp->phy_lock);
1734
1735 return 0;
1736}
1737
1738static int
1739bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1740__releases(&bp->phy_lock)
1741__acquires(&bp->phy_lock)
b6016b76 1742{
605a9e20 1743 u32 adv, bmcr;
b6016b76
MC
1744 u32 new_adv = 0;
1745
583c28e5 1746 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
807540ba 1747 return bnx2_setup_remote_phy(bp, port);
0d8a6571 1748
b6016b76
MC
1749 if (!(bp->autoneg & AUTONEG_SPEED)) {
1750 u32 new_bmcr;
5b0c76ad
MC
1751 int force_link_down = 0;
1752
605a9e20
MC
1753 if (bp->req_line_speed == SPEED_2500) {
1754 if (!bnx2_test_and_enable_2g5(bp))
1755 force_link_down = 1;
1756 } else if (bp->req_line_speed == SPEED_1000) {
1757 if (bnx2_test_and_disable_2g5(bp))
1758 force_link_down = 1;
1759 }
ca58c3af 1760 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1761 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1762
ca58c3af 1763 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1764 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1765 new_bmcr |= BMCR_SPEED1000;
605a9e20 1766
4ce45e02 1767 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
27a005b8
MC
1768 if (bp->req_line_speed == SPEED_2500)
1769 bnx2_enable_forced_2g5(bp);
1770 else if (bp->req_line_speed == SPEED_1000) {
1771 bnx2_disable_forced_2g5(bp);
1772 new_bmcr &= ~0x2000;
1773 }
1774
4ce45e02 1775 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
605a9e20
MC
1776 if (bp->req_line_speed == SPEED_2500)
1777 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1778 else
1779 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1780 }
1781
b6016b76 1782 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1783 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1784 new_bmcr |= BMCR_FULLDPLX;
1785 }
1786 else {
5b0c76ad 1787 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1788 new_bmcr &= ~BMCR_FULLDPLX;
1789 }
5b0c76ad 1790 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1791 /* Force a link down visible on the other side */
1792 if (bp->link_up) {
ca58c3af 1793 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1794 ~(ADVERTISE_1000XFULL |
1795 ADVERTISE_1000XHALF));
ca58c3af 1796 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1797 BMCR_ANRESTART | BMCR_ANENABLE);
1798
1799 bp->link_up = 0;
1800 netif_carrier_off(bp->dev);
ca58c3af 1801 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1802 bnx2_report_link(bp);
b6016b76 1803 }
ca58c3af
MC
1804 bnx2_write_phy(bp, bp->mii_adv, adv);
1805 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1806 } else {
1807 bnx2_resolve_flow_ctrl(bp);
1808 bnx2_set_mac_link(bp);
b6016b76
MC
1809 }
1810 return 0;
1811 }
1812
605a9e20 1813 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1814
b6016b76
MC
1815 if (bp->advertising & ADVERTISED_1000baseT_Full)
1816 new_adv |= ADVERTISE_1000XFULL;
1817
1818 new_adv |= bnx2_phy_get_pause_adv(bp);
1819
ca58c3af
MC
1820 bnx2_read_phy(bp, bp->mii_adv, &adv);
1821 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1822
1823 bp->serdes_an_pending = 0;
1824 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1825 /* Force a link down visible on the other side */
1826 if (bp->link_up) {
ca58c3af 1827 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1828 spin_unlock_bh(&bp->phy_lock);
1829 msleep(20);
1830 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1831 }
1832
ca58c3af
MC
1833 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1834 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1835 BMCR_ANENABLE);
f8dd064e
MC
1836 /* Speed up link-up time when the link partner
1837 * does not autonegotiate which is very common
1838 * in blade servers. Some blade servers use
1839 * IPMI for kerboard input and it's important
1840 * to minimize link disruptions. Autoneg. involves
1841 * exchanging base pages plus 3 next pages and
1842 * normally completes in about 120 msec.
1843 */
40105c0b 1844 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1845 bp->serdes_an_pending = 1;
1846 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1847 } else {
1848 bnx2_resolve_flow_ctrl(bp);
1849 bnx2_set_mac_link(bp);
b6016b76
MC
1850 }
1851
1852 return 0;
1853}
1854
1855#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1856 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1857 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1858 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1859
1860#define ETHTOOL_ALL_COPPER_SPEED \
1861 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1862 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1863 ADVERTISED_1000baseT_Full)
1864
1865#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1866 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1867
b6016b76
MC
1868#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1869
0d8a6571
MC
1870static void
1871bnx2_set_default_remote_link(struct bnx2 *bp)
1872{
1873 u32 link;
1874
1875 if (bp->phy_port == PORT_TP)
2726d6e1 1876 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1877 else
2726d6e1 1878 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1879
1880 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1881 bp->req_line_speed = 0;
1882 bp->autoneg |= AUTONEG_SPEED;
1883 bp->advertising = ADVERTISED_Autoneg;
1884 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1885 bp->advertising |= ADVERTISED_10baseT_Half;
1886 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1887 bp->advertising |= ADVERTISED_10baseT_Full;
1888 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1889 bp->advertising |= ADVERTISED_100baseT_Half;
1890 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1891 bp->advertising |= ADVERTISED_100baseT_Full;
1892 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1893 bp->advertising |= ADVERTISED_1000baseT_Full;
1894 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1895 bp->advertising |= ADVERTISED_2500baseX_Full;
1896 } else {
1897 bp->autoneg = 0;
1898 bp->advertising = 0;
1899 bp->req_duplex = DUPLEX_FULL;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1901 bp->req_line_speed = SPEED_10;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1903 bp->req_duplex = DUPLEX_HALF;
1904 }
1905 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1906 bp->req_line_speed = SPEED_100;
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1908 bp->req_duplex = DUPLEX_HALF;
1909 }
1910 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1911 bp->req_line_speed = SPEED_1000;
1912 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1913 bp->req_line_speed = SPEED_2500;
1914 }
1915}
1916
deaf391b
MC
1917static void
1918bnx2_set_default_link(struct bnx2 *bp)
1919{
ab59859d
HH
1920 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1921 bnx2_set_default_remote_link(bp);
1922 return;
1923 }
0d8a6571 1924
deaf391b
MC
1925 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1926 bp->req_line_speed = 0;
583c28e5 1927 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1928 u32 reg;
1929
1930 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1931
2726d6e1 1932 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1933 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1934 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1935 bp->autoneg = 0;
1936 bp->req_line_speed = bp->line_speed = SPEED_1000;
1937 bp->req_duplex = DUPLEX_FULL;
1938 }
1939 } else
1940 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1941}
1942
df149d70
MC
1943static void
1944bnx2_send_heart_beat(struct bnx2 *bp)
1945{
1946 u32 msg;
1947 u32 addr;
1948
1949 spin_lock(&bp->indirect_lock);
1950 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1951 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
e503e066
MC
1952 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1953 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
df149d70
MC
1954 spin_unlock(&bp->indirect_lock);
1955}
1956
0d8a6571
MC
1957static void
1958bnx2_remote_phy_event(struct bnx2 *bp)
1959{
1960 u32 msg;
1961 u8 link_up = bp->link_up;
1962 u8 old_port;
1963
2726d6e1 1964 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1965
df149d70
MC
1966 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1967 bnx2_send_heart_beat(bp);
1968
1969 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1970
0d8a6571
MC
1971 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1972 bp->link_up = 0;
1973 else {
1974 u32 speed;
1975
1976 bp->link_up = 1;
1977 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1978 bp->duplex = DUPLEX_FULL;
1979 switch (speed) {
1980 case BNX2_LINK_STATUS_10HALF:
1981 bp->duplex = DUPLEX_HALF;
7947c9ce 1982 /* fall through */
0d8a6571
MC
1983 case BNX2_LINK_STATUS_10FULL:
1984 bp->line_speed = SPEED_10;
1985 break;
1986 case BNX2_LINK_STATUS_100HALF:
1987 bp->duplex = DUPLEX_HALF;
7947c9ce 1988 /* fall through */
0d8a6571
MC
1989 case BNX2_LINK_STATUS_100BASE_T4:
1990 case BNX2_LINK_STATUS_100FULL:
1991 bp->line_speed = SPEED_100;
1992 break;
1993 case BNX2_LINK_STATUS_1000HALF:
1994 bp->duplex = DUPLEX_HALF;
7947c9ce 1995 /* fall through */
0d8a6571
MC
1996 case BNX2_LINK_STATUS_1000FULL:
1997 bp->line_speed = SPEED_1000;
1998 break;
1999 case BNX2_LINK_STATUS_2500HALF:
2000 bp->duplex = DUPLEX_HALF;
7947c9ce 2001 /* fall through */
0d8a6571
MC
2002 case BNX2_LINK_STATUS_2500FULL:
2003 bp->line_speed = SPEED_2500;
2004 break;
2005 default:
2006 bp->line_speed = 0;
2007 break;
2008 }
2009
0d8a6571
MC
2010 bp->flow_ctrl = 0;
2011 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2012 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2013 if (bp->duplex == DUPLEX_FULL)
2014 bp->flow_ctrl = bp->req_flow_ctrl;
2015 } else {
2016 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2017 bp->flow_ctrl |= FLOW_CTRL_TX;
2018 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2019 bp->flow_ctrl |= FLOW_CTRL_RX;
2020 }
2021
2022 old_port = bp->phy_port;
2023 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2024 bp->phy_port = PORT_FIBRE;
2025 else
2026 bp->phy_port = PORT_TP;
2027
2028 if (old_port != bp->phy_port)
2029 bnx2_set_default_link(bp);
2030
0d8a6571
MC
2031 }
2032 if (bp->link_up != link_up)
2033 bnx2_report_link(bp);
2034
2035 bnx2_set_mac_link(bp);
2036}
2037
2038static int
2039bnx2_set_remote_link(struct bnx2 *bp)
2040{
2041 u32 evt_code;
2042
2726d6e1 2043 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
2044 switch (evt_code) {
2045 case BNX2_FW_EVT_CODE_LINK_EVENT:
2046 bnx2_remote_phy_event(bp);
2047 break;
2048 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2049 default:
df149d70 2050 bnx2_send_heart_beat(bp);
0d8a6571
MC
2051 break;
2052 }
2053 return 0;
2054}
2055
b6016b76
MC
2056static int
2057bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
2058__releases(&bp->phy_lock)
2059__acquires(&bp->phy_lock)
b6016b76 2060{
d17e53bd 2061 u32 bmcr, adv_reg, new_adv = 0;
b6016b76
MC
2062 u32 new_bmcr;
2063
ca58c3af 2064 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 2065
d17e53bd
MC
2066 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2067 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2068 ADVERTISE_PAUSE_ASYM);
2069
2070 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2071
b6016b76 2072 if (bp->autoneg & AUTONEG_SPEED) {
d17e53bd 2073 u32 adv1000_reg;
37f07023 2074 u32 new_adv1000 = 0;
b6016b76 2075
d17e53bd 2076 new_adv |= bnx2_phy_get_pause_adv(bp);
b6016b76
MC
2077
2078 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079 adv1000_reg &= PHY_ALL_1000_SPEED;
2080
37f07023 2081 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
37f07023
MC
2082 if ((adv1000_reg != new_adv1000) ||
2083 (adv_reg != new_adv) ||
b6016b76
MC
2084 ((bmcr & BMCR_ANENABLE) == 0)) {
2085
37f07023
MC
2086 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2087 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
ca58c3af 2088 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
2089 BMCR_ANENABLE);
2090 }
2091 else if (bp->link_up) {
2092 /* Flow ctrl may have changed from auto to forced */
2093 /* or vice-versa. */
2094
2095 bnx2_resolve_flow_ctrl(bp);
2096 bnx2_set_mac_link(bp);
2097 }
2098 return 0;
2099 }
2100
d17e53bd
MC
2101 /* advertise nothing when forcing speed */
2102 if (adv_reg != new_adv)
2103 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2104
b6016b76
MC
2105 new_bmcr = 0;
2106 if (bp->req_line_speed == SPEED_100) {
2107 new_bmcr |= BMCR_SPEED100;
2108 }
2109 if (bp->req_duplex == DUPLEX_FULL) {
2110 new_bmcr |= BMCR_FULLDPLX;
2111 }
2112 if (new_bmcr != bmcr) {
2113 u32 bmsr;
b6016b76 2114
ca58c3af
MC
2115 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 2117
b6016b76
MC
2118 if (bmsr & BMSR_LSTATUS) {
2119 /* Force link down */
ca58c3af 2120 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
2121 spin_unlock_bh(&bp->phy_lock);
2122 msleep(50);
2123 spin_lock_bh(&bp->phy_lock);
2124
ca58c3af
MC
2125 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
2127 }
2128
ca58c3af 2129 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
2130
2131 /* Normally, the new speed is setup after the link has
2132 * gone down and up again. In some cases, link will not go
2133 * down so we need to set up the new speed here.
2134 */
2135 if (bmsr & BMSR_LSTATUS) {
2136 bp->line_speed = bp->req_line_speed;
2137 bp->duplex = bp->req_duplex;
2138 bnx2_resolve_flow_ctrl(bp);
2139 bnx2_set_mac_link(bp);
2140 }
27a005b8
MC
2141 } else {
2142 bnx2_resolve_flow_ctrl(bp);
2143 bnx2_set_mac_link(bp);
b6016b76
MC
2144 }
2145 return 0;
2146}
2147
2148static int
0d8a6571 2149bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
2150__releases(&bp->phy_lock)
2151__acquires(&bp->phy_lock)
b6016b76
MC
2152{
2153 if (bp->loopback == MAC_LOOPBACK)
2154 return 0;
2155
583c28e5 2156 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
807540ba 2157 return bnx2_setup_serdes_phy(bp, port);
b6016b76
MC
2158 }
2159 else {
807540ba 2160 return bnx2_setup_copper_phy(bp);
b6016b76
MC
2161 }
2162}
2163
27a005b8 2164static int
9a120bc5 2165bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
2166{
2167 u32 val;
2168
2169 bp->mii_bmcr = MII_BMCR + 0x10;
2170 bp->mii_bmsr = MII_BMSR + 0x10;
2171 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172 bp->mii_adv = MII_ADVERTISE + 0x10;
2173 bp->mii_lpa = MII_LPA + 0x10;
2174 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175
2176 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178
2179 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2180 if (reset_phy)
2181 bnx2_reset_phy(bp);
27a005b8
MC
2182
2183 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184
2185 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189
2190 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2192 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2193 val |= BCM5708S_UP1_2G5;
2194 else
2195 val &= ~BCM5708S_UP1_2G5;
2196 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197
2198 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202
2203 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204
2205 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208
2209 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210
2211 return 0;
2212}
2213
b6016b76 2214static int
9a120bc5 2215bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2216{
2217 u32 val;
2218
9a120bc5
MC
2219 if (reset_phy)
2220 bnx2_reset_phy(bp);
27a005b8
MC
2221
2222 bp->mii_up1 = BCM5708S_UP1;
2223
5b0c76ad
MC
2224 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227
2228 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231
2232 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235
583c28e5 2236 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2237 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238 val |= BCM5708S_UP1_2G5;
2239 bnx2_write_phy(bp, BCM5708S_UP1, val);
2240 }
2241
4ce45e02
MC
2242 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2243 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2244 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
5b0c76ad
MC
2245 /* increase tx signal amplitude */
2246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247 BCM5708S_BLK_ADDR_TX_MISC);
2248 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252 }
2253
2726d6e1 2254 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2255 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256
2257 if (val) {
2258 u32 is_backplane;
2259
2726d6e1 2260 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2261 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 BCM5708S_BLK_ADDR_TX_MISC);
2264 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266 BCM5708S_BLK_ADDR_DIG);
2267 }
2268 }
2269 return 0;
2270}
2271
2272static int
9a120bc5 2273bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2274{
9a120bc5
MC
2275 if (reset_phy)
2276 bnx2_reset_phy(bp);
27a005b8 2277
583c28e5 2278 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2279
4ce45e02 2280 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
e503e066 2281 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2282
2283 if (bp->dev->mtu > 1500) {
2284 u32 val;
2285
2286 /* Set extended packet length bit */
2287 bnx2_write_phy(bp, 0x18, 0x7);
2288 bnx2_read_phy(bp, 0x18, &val);
2289 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290
2291 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292 bnx2_read_phy(bp, 0x1c, &val);
2293 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294 }
2295 else {
2296 u32 val;
2297
2298 bnx2_write_phy(bp, 0x18, 0x7);
2299 bnx2_read_phy(bp, 0x18, &val);
2300 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301
2302 bnx2_write_phy(bp, 0x1c, 0x6c00);
2303 bnx2_read_phy(bp, 0x1c, &val);
2304 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305 }
2306
2307 return 0;
2308}
2309
2310static int
9a120bc5 2311bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2312{
5b0c76ad
MC
2313 u32 val;
2314
9a120bc5
MC
2315 if (reset_phy)
2316 bnx2_reset_phy(bp);
27a005b8 2317
583c28e5 2318 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2319 bnx2_write_phy(bp, 0x18, 0x0c00);
2320 bnx2_write_phy(bp, 0x17, 0x000a);
2321 bnx2_write_phy(bp, 0x15, 0x310b);
2322 bnx2_write_phy(bp, 0x17, 0x201f);
2323 bnx2_write_phy(bp, 0x15, 0x9506);
2324 bnx2_write_phy(bp, 0x17, 0x401f);
2325 bnx2_write_phy(bp, 0x15, 0x14e2);
2326 bnx2_write_phy(bp, 0x18, 0x0400);
2327 }
2328
583c28e5 2329 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2330 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331 MII_BNX2_DSP_EXPAND_REG | 0x8);
2332 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333 val &= ~(1 << 8);
2334 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335 }
2336
b6016b76 2337 if (bp->dev->mtu > 1500) {
b6016b76
MC
2338 /* Set extended packet length bit */
2339 bnx2_write_phy(bp, 0x18, 0x7);
2340 bnx2_read_phy(bp, 0x18, &val);
2341 bnx2_write_phy(bp, 0x18, val | 0x4000);
2342
2343 bnx2_read_phy(bp, 0x10, &val);
2344 bnx2_write_phy(bp, 0x10, val | 0x1);
2345 }
2346 else {
b6016b76
MC
2347 bnx2_write_phy(bp, 0x18, 0x7);
2348 bnx2_read_phy(bp, 0x18, &val);
2349 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350
2351 bnx2_read_phy(bp, 0x10, &val);
2352 bnx2_write_phy(bp, 0x10, val & ~0x1);
2353 }
2354
5b0c76ad 2355 /* ethernet@wirespeed */
41033b65
MC
2356 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2357 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2358 val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2359
2360 /* auto-mdix */
2361 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2362 val |= AUX_CTL_MISC_CTL_AUTOMDIX;
2363
2364 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
b6016b76
MC
2365 return 0;
2366}
2367
2368
2369static int
9a120bc5 2370bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2371__releases(&bp->phy_lock)
2372__acquires(&bp->phy_lock)
b6016b76
MC
2373{
2374 u32 val;
2375 int rc = 0;
2376
583c28e5
MC
2377 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2378 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2379
ca58c3af
MC
2380 bp->mii_bmcr = MII_BMCR;
2381 bp->mii_bmsr = MII_BMSR;
27a005b8 2382 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2383 bp->mii_adv = MII_ADVERTISE;
2384 bp->mii_lpa = MII_LPA;
2385
e503e066 2386 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
b6016b76 2387
583c28e5 2388 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2389 goto setup_phy;
2390
b6016b76
MC
2391 bnx2_read_phy(bp, MII_PHYSID1, &val);
2392 bp->phy_id = val << 16;
2393 bnx2_read_phy(bp, MII_PHYSID2, &val);
2394 bp->phy_id |= val & 0xffff;
2395
583c28e5 2396 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
4ce45e02 2397 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
9a120bc5 2398 rc = bnx2_init_5706s_phy(bp, reset_phy);
4ce45e02 2399 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
9a120bc5 2400 rc = bnx2_init_5708s_phy(bp, reset_phy);
4ce45e02 2401 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
9a120bc5 2402 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2403 }
2404 else {
9a120bc5 2405 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2406 }
2407
0d8a6571
MC
2408setup_phy:
2409 if (!rc)
2410 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2411
2412 return rc;
2413}
2414
2415static int
2416bnx2_set_mac_loopback(struct bnx2 *bp)
2417{
2418 u32 mac_mode;
2419
e503e066 2420 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
b6016b76
MC
2421 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2422 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
e503e066 2423 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
b6016b76
MC
2424 bp->link_up = 1;
2425 return 0;
2426}
2427
bc5a0690
MC
2428static int bnx2_test_link(struct bnx2 *);
2429
2430static int
2431bnx2_set_phy_loopback(struct bnx2 *bp)
2432{
2433 u32 mac_mode;
2434 int rc, i;
2435
2436 spin_lock_bh(&bp->phy_lock);
ca58c3af 2437 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2438 BMCR_SPEED1000);
2439 spin_unlock_bh(&bp->phy_lock);
2440 if (rc)
2441 return rc;
2442
2443 for (i = 0; i < 10; i++) {
2444 if (bnx2_test_link(bp) == 0)
2445 break;
80be4434 2446 msleep(100);
bc5a0690
MC
2447 }
2448
e503e066 2449 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
bc5a0690
MC
2450 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2451 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2452 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2453
2454 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
e503e066 2455 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
bc5a0690
MC
2456 bp->link_up = 1;
2457 return 0;
2458}
2459
ecdbf6e0
JH
2460static void
2461bnx2_dump_mcp_state(struct bnx2 *bp)
2462{
2463 struct net_device *dev = bp->dev;
2464 u32 mcp_p0, mcp_p1;
2465
2466 netdev_err(dev, "<--- start MCP states dump --->\n");
4ce45e02 2467 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
ecdbf6e0
JH
2468 mcp_p0 = BNX2_MCP_STATE_P0;
2469 mcp_p1 = BNX2_MCP_STATE_P1;
2470 } else {
2471 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2472 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2473 }
2474 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2475 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2476 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2477 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2478 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2479 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2480 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2481 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2482 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2483 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2484 netdev_err(dev, "DEBUG: shmem states:\n");
2485 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2486 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2487 bnx2_shmem_rd(bp, BNX2_FW_MB),
2488 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2489 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2490 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2491 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2492 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2493 pr_cont(" condition[%08x]\n",
2494 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
13e63517 2495 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
ecdbf6e0
JH
2496 DP_SHMEM_LINE(bp, 0x3cc);
2497 DP_SHMEM_LINE(bp, 0x3dc);
2498 DP_SHMEM_LINE(bp, 0x3ec);
2499 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2500 netdev_err(dev, "<--- end MCP states dump --->\n");
2501}
2502
b6016b76 2503static int
a2f13890 2504bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2505{
2506 int i;
2507 u32 val;
2508
b6016b76
MC
2509 bp->fw_wr_seq++;
2510 msg_data |= bp->fw_wr_seq;
a8d9bc2e 2511 bp->fw_last_msg = msg_data;
b6016b76 2512
2726d6e1 2513 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2514
a2f13890
MC
2515 if (!ack)
2516 return 0;
2517
b6016b76 2518 /* wait for an acknowledgement. */
40105c0b 2519 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2520 msleep(10);
b6016b76 2521
2726d6e1 2522 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2523
2524 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2525 break;
2526 }
b090ae2b
MC
2527 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2528 return 0;
b6016b76
MC
2529
2530 /* If we timed out, inform the firmware that this is the case. */
b090ae2b 2531 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
b6016b76
MC
2532 msg_data &= ~BNX2_DRV_MSG_CODE;
2533 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2534
2726d6e1 2535 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
ecdbf6e0
JH
2536 if (!silent) {
2537 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2538 bnx2_dump_mcp_state(bp);
2539 }
b6016b76 2540
b6016b76
MC
2541 return -EBUSY;
2542 }
2543
b090ae2b
MC
2544 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2545 return -EIO;
2546
b6016b76
MC
2547 return 0;
2548}
2549
59b47d8a
MC
2550static int
2551bnx2_init_5709_context(struct bnx2 *bp)
2552{
2553 int i, ret = 0;
2554 u32 val;
2555
2556 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2bc4078e 2557 val |= (BNX2_PAGE_BITS - 8) << 16;
e503e066 2558 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5 2559 for (i = 0; i < 10; i++) {
e503e066 2560 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
641bdcd5
MC
2561 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2562 break;
2563 udelay(2);
2564 }
2565 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2566 return -EBUSY;
2567
59b47d8a
MC
2568 for (i = 0; i < bp->ctx_pages; i++) {
2569 int j;
2570
352f7687 2571 if (bp->ctx_blk[i])
2bc4078e 2572 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
352f7687
MC
2573 else
2574 return -ENOMEM;
2575
e503e066
MC
2576 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2577 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2578 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2579 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2580 (u64) bp->ctx_blk_mapping[i] >> 32);
2581 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2582 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
59b47d8a
MC
2583 for (j = 0; j < 10; j++) {
2584
e503e066 2585 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
59b47d8a
MC
2586 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2587 break;
2588 udelay(5);
2589 }
2590 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2591 ret = -EBUSY;
2592 break;
2593 }
2594 }
2595 return ret;
2596}
2597
b6016b76
MC
2598static void
2599bnx2_init_context(struct bnx2 *bp)
2600{
2601 u32 vcid;
2602
2603 vcid = 96;
2604 while (vcid) {
2605 u32 vcid_addr, pcid_addr, offset;
7947b20e 2606 int i;
b6016b76
MC
2607
2608 vcid--;
2609
4ce45e02 2610 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
b6016b76
MC
2611 u32 new_vcid;
2612
2613 vcid_addr = GET_PCID_ADDR(vcid);
2614 if (vcid & 0x8) {
2615 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2616 }
2617 else {
2618 new_vcid = vcid;
2619 }
2620 pcid_addr = GET_PCID_ADDR(new_vcid);
2621 }
2622 else {
2623 vcid_addr = GET_CID_ADDR(vcid);
2624 pcid_addr = vcid_addr;
2625 }
2626
7947b20e
MC
2627 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2628 vcid_addr += (i << PHY_CTX_SHIFT);
2629 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2630
e503e066
MC
2631 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2632 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2633
7947b20e
MC
2634 /* Zero out the context. */
2635 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2636 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2637 }
b6016b76
MC
2638 }
2639}
2640
2641static int
2642bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2643{
2644 u16 *good_mbuf;
2645 u32 good_mbuf_cnt;
2646 u32 val;
2647
2648 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
e404decb 2649 if (good_mbuf == NULL)
b6016b76 2650 return -ENOMEM;
b6016b76 2651
e503e066 2652 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
b6016b76
MC
2653 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2654
2655 good_mbuf_cnt = 0;
2656
2657 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2658 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2659 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2660 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2661 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2662
2726d6e1 2663 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2664
2665 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2666
2667 /* The addresses with Bit 9 set are bad memory blocks. */
2668 if (!(val & (1 << 9))) {
2669 good_mbuf[good_mbuf_cnt] = (u16) val;
2670 good_mbuf_cnt++;
2671 }
2672
2726d6e1 2673 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2674 }
2675
2676 /* Free the good ones back to the mbuf pool thus discarding
2677 * all the bad ones. */
2678 while (good_mbuf_cnt) {
2679 good_mbuf_cnt--;
2680
2681 val = good_mbuf[good_mbuf_cnt];
2682 val = (val << 9) | val | 1;
2683
2726d6e1 2684 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2685 }
2686 kfree(good_mbuf);
2687 return 0;
2688}
2689
2690static void
5fcaed01 2691bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2692{
2693 u32 val;
b6016b76
MC
2694
2695 val = (mac_addr[0] << 8) | mac_addr[1];
2696
e503e066 2697 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2698
6aa20a22 2699 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2700 (mac_addr[4] << 8) | mac_addr[5];
2701
e503e066 2702 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2703}
2704
47bf4246 2705static inline int
a2df00aa 2706bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
47bf4246
MC
2707{
2708 dma_addr_t mapping;
2bc4078e
MC
2709 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2710 struct bnx2_rx_bd *rxbd =
2711 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
a2df00aa 2712 struct page *page = alloc_page(gfp);
47bf4246
MC
2713
2714 if (!page)
2715 return -ENOMEM;
36227e88 2716 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
47bf4246 2717 PCI_DMA_FROMDEVICE);
36227e88 2718 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
3d16af86
BL
2719 __free_page(page);
2720 return -EIO;
2721 }
2722
47bf4246 2723 rx_pg->page = page;
1a4ccc2d 2724 dma_unmap_addr_set(rx_pg, mapping, mapping);
47bf4246
MC
2725 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2726 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2727 return 0;
2728}
2729
2730static void
bb4f98ab 2731bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2732{
2bc4078e 2733 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2734 struct page *page = rx_pg->page;
2735
2736 if (!page)
2737 return;
2738
36227e88
SG
2739 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2740 PAGE_SIZE, PCI_DMA_FROMDEVICE);
47bf4246
MC
2741
2742 __free_page(page);
2743 rx_pg->page = NULL;
2744}
2745
b6016b76 2746static inline int
dd2bc8e9 2747bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
b6016b76 2748{
dd2bc8e9 2749 u8 *data;
2bc4078e 2750 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2751 dma_addr_t mapping;
2bc4078e
MC
2752 struct bnx2_rx_bd *rxbd =
2753 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
b6016b76 2754
dd2bc8e9
ED
2755 data = kmalloc(bp->rx_buf_size, gfp);
2756 if (!data)
b6016b76 2757 return -ENOMEM;
b6016b76 2758
dd2bc8e9
ED
2759 mapping = dma_map_single(&bp->pdev->dev,
2760 get_l2_fhdr(data),
2761 bp->rx_buf_use_size,
36227e88
SG
2762 PCI_DMA_FROMDEVICE);
2763 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
dd2bc8e9 2764 kfree(data);
3d16af86
BL
2765 return -EIO;
2766 }
b6016b76 2767
dd2bc8e9 2768 rx_buf->data = data;
1a4ccc2d 2769 dma_unmap_addr_set(rx_buf, mapping, mapping);
b6016b76
MC
2770
2771 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2772 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2773
bb4f98ab 2774 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2775
2776 return 0;
2777}
2778
da3e4fbe 2779static int
35efa7c1 2780bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2781{
43e80b89 2782 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2783 u32 new_link_state, old_link_state;
da3e4fbe 2784 int is_set = 1;
b6016b76 2785
da3e4fbe
MC
2786 new_link_state = sblk->status_attn_bits & event;
2787 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2788 if (new_link_state != old_link_state) {
da3e4fbe 2789 if (new_link_state)
e503e066 2790 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
da3e4fbe 2791 else
e503e066 2792 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
da3e4fbe
MC
2793 } else
2794 is_set = 0;
2795
2796 return is_set;
2797}
2798
2799static void
35efa7c1 2800bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2801{
74ecc62d
MC
2802 spin_lock(&bp->phy_lock);
2803
2804 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2805 bnx2_set_link(bp);
35efa7c1 2806 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2807 bnx2_set_remote_link(bp);
2808
74ecc62d
MC
2809 spin_unlock(&bp->phy_lock);
2810
b6016b76
MC
2811}
2812
ead7270b 2813static inline u16
35efa7c1 2814bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2815{
2816 u16 cons;
2817
43e80b89
MC
2818 /* Tell compiler that status block fields can change. */
2819 barrier();
2820 cons = *bnapi->hw_tx_cons_ptr;
581daf7e 2821 barrier();
2bc4078e 2822 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
ead7270b
MC
2823 cons++;
2824 return cons;
2825}
2826
57851d84
MC
2827static int
2828bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2829{
35e9010b 2830 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2831 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240 2832 int tx_pkt = 0, index;
e9831909 2833 unsigned int tx_bytes = 0;
706bf240
BL
2834 struct netdev_queue *txq;
2835
2836 index = (bnapi - bp->bnx2_napi);
2837 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2838
35efa7c1 2839 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2840 sw_cons = txr->tx_cons;
b6016b76
MC
2841
2842 while (sw_cons != hw_cons) {
2bc4078e 2843 struct bnx2_sw_tx_bd *tx_buf;
b6016b76
MC
2844 struct sk_buff *skb;
2845 int i, last;
2846
2bc4078e 2847 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
b6016b76 2848
35e9010b 2849 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2850 skb = tx_buf->skb;
1d39ed56 2851
d62fda08
ED
2852 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2853 prefetch(&skb->end);
2854
b6016b76 2855 /* partial BD completions possible with TSO packets */
d62fda08 2856 if (tx_buf->is_gso) {
b6016b76
MC
2857 u16 last_idx, last_ring_idx;
2858
d62fda08
ED
2859 last_idx = sw_cons + tx_buf->nr_frags + 1;
2860 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2bc4078e 2861 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
b6016b76
MC
2862 last_idx++;
2863 }
2864 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2865 break;
2866 }
2867 }
1d39ed56 2868
36227e88 2869 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
e95524a7 2870 skb_headlen(skb), PCI_DMA_TODEVICE);
b6016b76
MC
2871
2872 tx_buf->skb = NULL;
d62fda08 2873 last = tx_buf->nr_frags;
b6016b76
MC
2874
2875 for (i = 0; i < last; i++) {
2bc4078e 2876 struct bnx2_sw_tx_bd *tx_buf;
e95524a7 2877
2bc4078e
MC
2878 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2879
2880 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
36227e88 2881 dma_unmap_page(&bp->pdev->dev,
2bc4078e 2882 dma_unmap_addr(tx_buf, mapping),
9e903e08 2883 skb_frag_size(&skb_shinfo(skb)->frags[i]),
e95524a7 2884 PCI_DMA_TODEVICE);
b6016b76
MC
2885 }
2886
2bc4078e 2887 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
b6016b76 2888
e9831909 2889 tx_bytes += skb->len;
f458b2ee 2890 dev_kfree_skb_any(skb);
57851d84
MC
2891 tx_pkt++;
2892 if (tx_pkt == budget)
2893 break;
b6016b76 2894
d62fda08
ED
2895 if (hw_cons == sw_cons)
2896 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2897 }
2898
e9831909 2899 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
35e9010b
MC
2900 txr->hw_tx_cons = hw_cons;
2901 txr->tx_cons = sw_cons;
706bf240 2902
2f8af120 2903 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2904 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2905 * memory barrier, there is a small possibility that bnx2_start_xmit()
2906 * will miss it and cause the queue to be stopped forever.
2907 */
2908 smp_mb();
b6016b76 2909
706bf240 2910 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2911 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2912 __netif_tx_lock(txq, smp_processor_id());
2913 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2914 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2915 netif_tx_wake_queue(txq);
2916 __netif_tx_unlock(txq);
b6016b76 2917 }
706bf240 2918
57851d84 2919 return tx_pkt;
b6016b76
MC
2920}
2921
1db82f2a 2922static void
bb4f98ab 2923bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2924 struct sk_buff *skb, int count)
1db82f2a 2925{
2bc4078e
MC
2926 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2927 struct bnx2_rx_bd *cons_bd, *prod_bd;
1db82f2a 2928 int i;
3d16af86 2929 u16 hw_prod, prod;
bb4f98ab 2930 u16 cons = rxr->rx_pg_cons;
1db82f2a 2931
3d16af86
BL
2932 cons_rx_pg = &rxr->rx_pg_ring[cons];
2933
2934 /* The caller was unable to allocate a new page to replace the
2935 * last one in the frags array, so we need to recycle that page
2936 * and then free the skb.
2937 */
2938 if (skb) {
2939 struct page *page;
2940 struct skb_shared_info *shinfo;
2941
2942 shinfo = skb_shinfo(skb);
2943 shinfo->nr_frags--;
b7b6a688
IC
2944 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2945 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
3d16af86
BL
2946
2947 cons_rx_pg->page = page;
2948 dev_kfree_skb(skb);
2949 }
2950
2951 hw_prod = rxr->rx_pg_prod;
2952
1db82f2a 2953 for (i = 0; i < count; i++) {
2bc4078e 2954 prod = BNX2_RX_PG_RING_IDX(hw_prod);
1db82f2a 2955
bb4f98ab
MC
2956 prod_rx_pg = &rxr->rx_pg_ring[prod];
2957 cons_rx_pg = &rxr->rx_pg_ring[cons];
2bc4078e
MC
2958 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2959 [BNX2_RX_IDX(cons)];
2960 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2961 [BNX2_RX_IDX(prod)];
1db82f2a 2962
1db82f2a
MC
2963 if (prod != cons) {
2964 prod_rx_pg->page = cons_rx_pg->page;
2965 cons_rx_pg->page = NULL;
1a4ccc2d
FT
2966 dma_unmap_addr_set(prod_rx_pg, mapping,
2967 dma_unmap_addr(cons_rx_pg, mapping));
1db82f2a
MC
2968
2969 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2970 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2971
2972 }
2bc4078e
MC
2973 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2974 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
1db82f2a 2975 }
bb4f98ab
MC
2976 rxr->rx_pg_prod = hw_prod;
2977 rxr->rx_pg_cons = cons;
1db82f2a
MC
2978}
2979
b6016b76 2980static inline void
dd2bc8e9
ED
2981bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2982 u8 *data, u16 cons, u16 prod)
b6016b76 2983{
2bc4078e
MC
2984 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2985 struct bnx2_rx_bd *cons_bd, *prod_bd;
236b6394 2986
bb4f98ab
MC
2987 cons_rx_buf = &rxr->rx_buf_ring[cons];
2988 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76 2989
36227e88 2990 dma_sync_single_for_device(&bp->pdev->dev,
1a4ccc2d 2991 dma_unmap_addr(cons_rx_buf, mapping),
601d3d18 2992 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2993
bb4f98ab 2994 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2995
dd2bc8e9 2996 prod_rx_buf->data = data;
b6016b76 2997
236b6394
MC
2998 if (cons == prod)
2999 return;
b6016b76 3000
1a4ccc2d
FT
3001 dma_unmap_addr_set(prod_rx_buf, mapping,
3002 dma_unmap_addr(cons_rx_buf, mapping));
236b6394 3003
2bc4078e
MC
3004 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3005 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
236b6394
MC
3006 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3007 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
3008}
3009
dd2bc8e9
ED
3010static struct sk_buff *
3011bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
a1f60190
MC
3012 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3013 u32 ring_idx)
85833c62
MC
3014{
3015 int err;
3016 u16 prod = ring_idx & 0xffff;
dd2bc8e9 3017 struct sk_buff *skb;
85833c62 3018
dd2bc8e9 3019 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
85833c62 3020 if (unlikely(err)) {
dd2bc8e9
ED
3021 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3022error:
1db82f2a
MC
3023 if (hdr_len) {
3024 unsigned int raw_len = len + 4;
3025 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3026
bb4f98ab 3027 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 3028 }
dd2bc8e9 3029 return NULL;
85833c62
MC
3030 }
3031
36227e88 3032 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
85833c62 3033 PCI_DMA_FROMDEVICE);
d3836f21 3034 skb = build_skb(data, 0);
dd2bc8e9
ED
3035 if (!skb) {
3036 kfree(data);
3037 goto error;
3038 }
3039 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
1db82f2a
MC
3040 if (hdr_len == 0) {
3041 skb_put(skb, len);
dd2bc8e9 3042 return skb;
1db82f2a
MC
3043 } else {
3044 unsigned int i, frag_len, frag_size, pages;
2bc4078e 3045 struct bnx2_sw_pg *rx_pg;
bb4f98ab
MC
3046 u16 pg_cons = rxr->rx_pg_cons;
3047 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
3048
3049 frag_size = len + 4 - hdr_len;
3050 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3051 skb_put(skb, hdr_len);
3052
3053 for (i = 0; i < pages; i++) {
3d16af86
BL
3054 dma_addr_t mapping_old;
3055
1db82f2a
MC
3056 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3057 if (unlikely(frag_len <= 4)) {
3058 unsigned int tail = 4 - frag_len;
3059
bb4f98ab
MC
3060 rxr->rx_pg_cons = pg_cons;
3061 rxr->rx_pg_prod = pg_prod;
3062 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 3063 pages - i);
1db82f2a
MC
3064 skb->len -= tail;
3065 if (i == 0) {
3066 skb->tail -= tail;
3067 } else {
3068 skb_frag_t *frag =
3069 &skb_shinfo(skb)->frags[i - 1];
9e903e08 3070 skb_frag_size_sub(frag, tail);
1db82f2a 3071 skb->data_len -= tail;
1db82f2a 3072 }
dd2bc8e9 3073 return skb;
1db82f2a 3074 }
bb4f98ab 3075 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 3076
3d16af86
BL
3077 /* Don't unmap yet. If we're unable to allocate a new
3078 * page, we need to recycle the page and the DMA addr.
3079 */
1a4ccc2d 3080 mapping_old = dma_unmap_addr(rx_pg, mapping);
1db82f2a
MC
3081 if (i == pages - 1)
3082 frag_len -= 4;
3083
3084 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3085 rx_pg->page = NULL;
3086
bb4f98ab 3087 err = bnx2_alloc_rx_page(bp, rxr,
2bc4078e 3088 BNX2_RX_PG_RING_IDX(pg_prod),
a2df00aa 3089 GFP_ATOMIC);
1db82f2a 3090 if (unlikely(err)) {
bb4f98ab
MC
3091 rxr->rx_pg_cons = pg_cons;
3092 rxr->rx_pg_prod = pg_prod;
3093 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 3094 pages - i);
dd2bc8e9 3095 return NULL;
1db82f2a
MC
3096 }
3097
36227e88 3098 dma_unmap_page(&bp->pdev->dev, mapping_old,
3d16af86
BL
3099 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3100
1db82f2a
MC
3101 frag_size -= frag_len;
3102 skb->data_len += frag_len;
a1f4e8bc 3103 skb->truesize += PAGE_SIZE;
1db82f2a
MC
3104 skb->len += frag_len;
3105
2bc4078e
MC
3106 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3107 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
1db82f2a 3108 }
bb4f98ab
MC
3109 rxr->rx_pg_prod = pg_prod;
3110 rxr->rx_pg_cons = pg_cons;
1db82f2a 3111 }
dd2bc8e9 3112 return skb;
85833c62
MC
3113}
3114
c09c2627 3115static inline u16
35efa7c1 3116bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 3117{
bb4f98ab
MC
3118 u16 cons;
3119
43e80b89
MC
3120 /* Tell compiler that status block fields can change. */
3121 barrier();
3122 cons = *bnapi->hw_rx_cons_ptr;
581daf7e 3123 barrier();
2bc4078e 3124 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
c09c2627
MC
3125 cons++;
3126 return cons;
3127}
3128
b6016b76 3129static int
35efa7c1 3130bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 3131{
bb4f98ab 3132 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
3133 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3134 struct l2_fhdr *rx_hdr;
1db82f2a 3135 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 3136
310c4d4e
EB
3137 if (budget <= 0)
3138 return rx_pkt;
3139
35efa7c1 3140 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
3141 sw_cons = rxr->rx_cons;
3142 sw_prod = rxr->rx_prod;
b6016b76
MC
3143
3144 /* Memory barrier necessary as speculative reads of the rx
3145 * buffer can be ahead of the index in the status block
3146 */
3147 rmb();
3148 while (sw_cons != hw_cons) {
1db82f2a 3149 unsigned int len, hdr_len;
ade2bfe7 3150 u32 status;
2bc4078e 3151 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
b6016b76 3152 struct sk_buff *skb;
236b6394 3153 dma_addr_t dma_addr;
dd2bc8e9 3154 u8 *data;
2bc4078e 3155 u16 next_ring_idx;
b6016b76 3156
2bc4078e
MC
3157 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3158 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
b6016b76 3159
bb4f98ab 3160 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
dd2bc8e9
ED
3161 data = rx_buf->data;
3162 rx_buf->data = NULL;
aabef8b2 3163
dd2bc8e9
ED
3164 rx_hdr = get_l2_fhdr(data);
3165 prefetch(rx_hdr);
236b6394 3166
1a4ccc2d 3167 dma_addr = dma_unmap_addr(rx_buf, mapping);
236b6394 3168
36227e88 3169 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
601d3d18
BL
3170 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3171 PCI_DMA_FROMDEVICE);
b6016b76 3172
2bc4078e
MC
3173 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3174 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
dd2bc8e9
ED
3175 prefetch(get_l2_fhdr(next_rx_buf->data));
3176
1db82f2a 3177 len = rx_hdr->l2_fhdr_pkt_len;
990ec380 3178 status = rx_hdr->l2_fhdr_status;
b6016b76 3179
1db82f2a
MC
3180 hdr_len = 0;
3181 if (status & L2_FHDR_STATUS_SPLIT) {
3182 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3183 pg_ring_used = 1;
3184 } else if (len > bp->rx_jumbo_thresh) {
3185 hdr_len = bp->rx_jumbo_thresh;
3186 pg_ring_used = 1;
3187 }
3188
990ec380
MC
3189 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3190 L2_FHDR_ERRORS_PHY_DECODE |
3191 L2_FHDR_ERRORS_ALIGNMENT |
3192 L2_FHDR_ERRORS_TOO_SHORT |
3193 L2_FHDR_ERRORS_GIANT_FRAME))) {
3194
dd2bc8e9 3195 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
990ec380
MC
3196 sw_ring_prod);
3197 if (pg_ring_used) {
3198 int pages;
3199
3200 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3201
3202 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3203 }
3204 goto next_rx;
3205 }
3206
1db82f2a 3207 len -= 4;
b6016b76 3208
5d5d0015 3209 if (len <= bp->rx_copy_thresh) {
dd2bc8e9
ED
3210 skb = netdev_alloc_skb(bp->dev, len + 6);
3211 if (skb == NULL) {
3212 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
85833c62
MC
3213 sw_ring_prod);
3214 goto next_rx;
3215 }
b6016b76
MC
3216
3217 /* aligned copy */
dd2bc8e9
ED
3218 memcpy(skb->data,
3219 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3220 len + 6);
3221 skb_reserve(skb, 6);
3222 skb_put(skb, len);
b6016b76 3223
dd2bc8e9 3224 bnx2_reuse_rx_data(bp, rxr, data,
b6016b76
MC
3225 sw_ring_cons, sw_ring_prod);
3226
dd2bc8e9
ED
3227 } else {
3228 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3229 (sw_ring_cons << 16) | sw_ring_prod);
3230 if (!skb)
3231 goto next_rx;
3232 }
f22828e8 3233 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
7d0fd211 3234 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
86a9bad3 3235 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
f22828e8 3236
b6016b76
MC
3237 skb->protocol = eth_type_trans(skb, bp->dev);
3238
3239 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 3240 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 3241
745720e5 3242 dev_kfree_skb(skb);
b6016b76
MC
3243 goto next_rx;
3244
3245 }
3246
bc8acf2c 3247 skb_checksum_none_assert(skb);
8d7dfc2b 3248 if ((bp->dev->features & NETIF_F_RXCSUM) &&
b6016b76
MC
3249 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3250 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3251
ade2bfe7
MC
3252 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3253 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3254 skb->ip_summed = CHECKSUM_UNNECESSARY;
3255 }
fdc8541d
MC
3256 if ((bp->dev->features & NETIF_F_RXHASH) &&
3257 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3258 L2_FHDR_STATUS_USE_RXHASH))
cf1bfd6a
TH
3259 skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3260 PKT_HASH_TYPE_L3);
b6016b76 3261
0c8dfc83 3262 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
7d0fd211 3263 napi_gro_receive(&bnapi->napi, skb);
b6016b76
MC
3264 rx_pkt++;
3265
3266next_rx:
2bc4078e
MC
3267 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3268 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
b6016b76
MC
3269
3270 if ((rx_pkt == budget))
3271 break;
f4e418f7
MC
3272
3273 /* Refresh hw_cons to see if there is new work */
3274 if (sw_cons == hw_cons) {
35efa7c1 3275 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3276 rmb();
3277 }
b6016b76 3278 }
bb4f98ab
MC
3279 rxr->rx_cons = sw_cons;
3280 rxr->rx_prod = sw_prod;
b6016b76 3281
1db82f2a 3282 if (pg_ring_used)
e503e066 3283 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3284
e503e066 3285 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3286
e503e066 3287 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3288
3289 mmiowb();
3290
3291 return rx_pkt;
3292
3293}
3294
3295/* MSI ISR - The only difference between this and the INTx ISR
3296 * is that the MSI interrupt is always serviced.
3297 */
3298static irqreturn_t
7d12e780 3299bnx2_msi(int irq, void *dev_instance)
b6016b76 3300{
f0ea2e63
MC
3301 struct bnx2_napi *bnapi = dev_instance;
3302 struct bnx2 *bp = bnapi->bp;
b6016b76 3303
43e80b89 3304 prefetch(bnapi->status_blk.msi);
e503e066 3305 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
b6016b76
MC
3306 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3307 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3308
3309 /* Return here if interrupt is disabled. */
73eef4cd
MC
3310 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3311 return IRQ_HANDLED;
b6016b76 3312
288379f0 3313 napi_schedule(&bnapi->napi);
b6016b76 3314
73eef4cd 3315 return IRQ_HANDLED;
b6016b76
MC
3316}
3317
8e6a72c4
MC
3318static irqreturn_t
3319bnx2_msi_1shot(int irq, void *dev_instance)
3320{
f0ea2e63
MC
3321 struct bnx2_napi *bnapi = dev_instance;
3322 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3323
43e80b89 3324 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3325
3326 /* Return here if interrupt is disabled. */
3327 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3328 return IRQ_HANDLED;
3329
288379f0 3330 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3331
3332 return IRQ_HANDLED;
3333}
3334
b6016b76 3335static irqreturn_t
7d12e780 3336bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3337{
f0ea2e63
MC
3338 struct bnx2_napi *bnapi = dev_instance;
3339 struct bnx2 *bp = bnapi->bp;
43e80b89 3340 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3341
3342 /* When using INTx, it is possible for the interrupt to arrive
3343 * at the CPU before the status block posted prior to the
3344 * interrupt. Reading a register will flush the status block.
3345 * When using MSI, the MSI message will always complete after
3346 * the status block write.
3347 */
35efa7c1 3348 if ((sblk->status_idx == bnapi->last_status_idx) &&
e503e066 3349 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
b6016b76 3350 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3351 return IRQ_NONE;
b6016b76 3352
e503e066 3353 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
b6016b76
MC
3354 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3355 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3356
b8a7ce7b
MC
3357 /* Read back to deassert IRQ immediately to avoid too many
3358 * spurious interrupts.
3359 */
e503e066 3360 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
b8a7ce7b 3361
b6016b76 3362 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3363 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3364 return IRQ_HANDLED;
b6016b76 3365
288379f0 3366 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3367 bnapi->last_status_idx = sblk->status_idx;
288379f0 3368 __napi_schedule(&bnapi->napi);
b8a7ce7b 3369 }
b6016b76 3370
73eef4cd 3371 return IRQ_HANDLED;
b6016b76
MC
3372}
3373
f4e418f7 3374static inline int
43e80b89 3375bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3376{
35e9010b 3377 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3378 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3379
bb4f98ab 3380 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3381 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3382 return 1;
43e80b89
MC
3383 return 0;
3384}
3385
3386#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3387 STATUS_ATTN_BITS_TIMER_ABORT)
3388
3389static inline int
3390bnx2_has_work(struct bnx2_napi *bnapi)
3391{
3392 struct status_block *sblk = bnapi->status_blk.msi;
3393
3394 if (bnx2_has_fast_work(bnapi))
3395 return 1;
f4e418f7 3396
4edd473f
MC
3397#ifdef BCM_CNIC
3398 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3399 return 1;
3400#endif
3401
da3e4fbe
MC
3402 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3403 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3404 return 1;
3405
3406 return 0;
3407}
3408
efba0180
MC
3409static void
3410bnx2_chk_missed_msi(struct bnx2 *bp)
3411{
3412 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3413 u32 msi_ctrl;
3414
3415 if (bnx2_has_work(bnapi)) {
e503e066 3416 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
efba0180
MC
3417 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3418 return;
3419
3420 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
e503e066
MC
3421 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3422 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3423 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
efba0180
MC
3424 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3425 }
3426 }
3427
3428 bp->idle_chk_status_idx = bnapi->last_status_idx;
3429}
3430
4edd473f
MC
3431#ifdef BCM_CNIC
3432static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3433{
3434 struct cnic_ops *c_ops;
3435
3436 if (!bnapi->cnic_present)
3437 return;
3438
3439 rcu_read_lock();
3440 c_ops = rcu_dereference(bp->cnic_ops);
3441 if (c_ops)
3442 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3443 bnapi->status_blk.msi);
3444 rcu_read_unlock();
3445}
3446#endif
3447
43e80b89 3448static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3449{
43e80b89 3450 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3451 u32 status_attn_bits = sblk->status_attn_bits;
3452 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3453
da3e4fbe
MC
3454 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3455 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3456
35efa7c1 3457 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3458
3459 /* This is needed to take care of transient status
3460 * during link changes.
3461 */
e503e066
MC
3462 BNX2_WR(bp, BNX2_HC_COMMAND,
3463 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3464 BNX2_RD(bp, BNX2_HC_COMMAND);
b6016b76 3465 }
43e80b89
MC
3466}
3467
3468static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3469 int work_done, int budget)
3470{
3471 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3472 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3473
35e9010b 3474 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3475 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3476
bb4f98ab 3477 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3478 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3479
6f535763
DM
3480 return work_done;
3481}
3482
f0ea2e63
MC
3483static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3484{
3485 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3486 struct bnx2 *bp = bnapi->bp;
3487 int work_done = 0;
3488 struct status_block_msix *sblk = bnapi->status_blk.msix;
3489
3490 while (1) {
3491 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3492 if (unlikely(work_done >= budget))
3493 break;
3494
3495 bnapi->last_status_idx = sblk->status_idx;
3496 /* status idx must be read before checking for more work. */
3497 rmb();
3498 if (likely(!bnx2_has_fast_work(bnapi))) {
3499
288379f0 3500 napi_complete(napi);
e503e066
MC
3501 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3502 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3503 bnapi->last_status_idx);
f0ea2e63
MC
3504 break;
3505 }
3506 }
3507 return work_done;
3508}
3509
6f535763
DM
3510static int bnx2_poll(struct napi_struct *napi, int budget)
3511{
35efa7c1
MC
3512 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3513 struct bnx2 *bp = bnapi->bp;
6f535763 3514 int work_done = 0;
43e80b89 3515 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3516
3517 while (1) {
43e80b89
MC
3518 bnx2_poll_link(bp, bnapi);
3519
35efa7c1 3520 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3521
4edd473f
MC
3522#ifdef BCM_CNIC
3523 bnx2_poll_cnic(bp, bnapi);
3524#endif
3525
35efa7c1 3526 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3527 * much work has been processed, so we must read it before
3528 * checking for more work.
3529 */
35efa7c1 3530 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3531
3532 if (unlikely(work_done >= budget))
3533 break;
3534
6dee6421 3535 rmb();
35efa7c1 3536 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3537 napi_complete(napi);
f86e82fb 3538 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
e503e066
MC
3539 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3540 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3541 bnapi->last_status_idx);
6dee6421 3542 break;
6f535763 3543 }
e503e066
MC
3544 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3545 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3546 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3547 bnapi->last_status_idx);
3548
3549 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3550 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3551 bnapi->last_status_idx);
6f535763
DM
3552 break;
3553 }
b6016b76
MC
3554 }
3555
bea3348e 3556 return work_done;
b6016b76
MC
3557}
3558
932ff279 3559/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3560 * from set_multicast.
3561 */
3562static void
3563bnx2_set_rx_mode(struct net_device *dev)
3564{
972ec0d4 3565 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3566 u32 rx_mode, sort_mode;
ccffad25 3567 struct netdev_hw_addr *ha;
b6016b76 3568 int i;
b6016b76 3569
9f52b564
MC
3570 if (!netif_running(dev))
3571 return;
3572
c770a65c 3573 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3574
3575 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3576 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3577 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
f646968f 3578 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
7d0fd211 3579 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3580 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3581 if (dev->flags & IFF_PROMISC) {
3582 /* Promiscuous mode. */
3583 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3584 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3585 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3586 }
3587 else if (dev->flags & IFF_ALLMULTI) {
3588 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
e503e066
MC
3589 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3590 0xffffffff);
b6016b76
MC
3591 }
3592 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3593 }
3594 else {
3595 /* Accept one or more multicast(s). */
b6016b76
MC
3596 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3597 u32 regidx;
3598 u32 bit;
3599 u32 crc;
3600
3601 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3602
22bedad3
JP
3603 netdev_for_each_mc_addr(ha, dev) {
3604 crc = ether_crc_le(ETH_ALEN, ha->addr);
b6016b76
MC
3605 bit = crc & 0xff;
3606 regidx = (bit & 0xe0) >> 5;
3607 bit &= 0x1f;
3608 mc_filter[regidx] |= (1 << bit);
3609 }
3610
3611 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
e503e066
MC
3612 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3613 mc_filter[i]);
b6016b76
MC
3614 }
3615
3616 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3617 }
3618
32e7bfc4 3619 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
5fcaed01
BL
3620 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3621 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3622 BNX2_RPM_SORT_USER0_PROM_VLAN;
3623 } else if (!(dev->flags & IFF_PROMISC)) {
5fcaed01 3624 /* Add all entries into to the match filter list */
ccffad25 3625 i = 0;
32e7bfc4 3626 netdev_for_each_uc_addr(ha, dev) {
ccffad25 3627 bnx2_set_mac_addr(bp, ha->addr,
5fcaed01
BL
3628 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3629 sort_mode |= (1 <<
3630 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
ccffad25 3631 i++;
5fcaed01
BL
3632 }
3633
3634 }
3635
b6016b76
MC
3636 if (rx_mode != bp->rx_mode) {
3637 bp->rx_mode = rx_mode;
e503e066 3638 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
b6016b76
MC
3639 }
3640
e503e066
MC
3641 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3642 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3643 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
b6016b76 3644
c770a65c 3645 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3646}
3647
7880b72e 3648static int
57579f76
MC
3649check_fw_section(const struct firmware *fw,
3650 const struct bnx2_fw_file_section *section,
3651 u32 alignment, bool non_empty)
3652{
3653 u32 offset = be32_to_cpu(section->offset);
3654 u32 len = be32_to_cpu(section->len);
3655
3656 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3657 return -EINVAL;
3658 if ((non_empty && len == 0) || len > fw->size - offset ||
3659 len & (alignment - 1))
3660 return -EINVAL;
3661 return 0;
3662}
3663
7880b72e 3664static int
57579f76
MC
3665check_mips_fw_entry(const struct firmware *fw,
3666 const struct bnx2_mips_fw_file_entry *entry)
3667{
3668 if (check_fw_section(fw, &entry->text, 4, true) ||
3669 check_fw_section(fw, &entry->data, 4, false) ||
3670 check_fw_section(fw, &entry->rodata, 4, false))
3671 return -EINVAL;
3672 return 0;
3673}
3674
7880b72e 3675static void bnx2_release_firmware(struct bnx2 *bp)
3676{
3677 if (bp->rv2p_firmware) {
3678 release_firmware(bp->mips_firmware);
3679 release_firmware(bp->rv2p_firmware);
3680 bp->rv2p_firmware = NULL;
3681 }
3682}
3683
3684static int bnx2_request_uncached_firmware(struct bnx2 *bp)
b6016b76 3685{
57579f76 3686 const char *mips_fw_file, *rv2p_fw_file;
5ee1c326
BB
3687 const struct bnx2_mips_fw_file *mips_fw;
3688 const struct bnx2_rv2p_fw_file *rv2p_fw;
57579f76
MC
3689 int rc;
3690
4ce45e02 3691 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
57579f76 3692 mips_fw_file = FW_MIPS_FILE_09;
4ce45e02
MC
3693 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3694 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
078b0735
MC
3695 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3696 else
3697 rv2p_fw_file = FW_RV2P_FILE_09;
57579f76
MC
3698 } else {
3699 mips_fw_file = FW_MIPS_FILE_06;
3700 rv2p_fw_file = FW_RV2P_FILE_06;
3701 }
3702
3703 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3704 if (rc) {
3a9c6a49 3705 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
7880b72e 3706 goto out;
57579f76
MC
3707 }
3708
3709 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3710 if (rc) {
3a9c6a49 3711 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
7880b72e 3712 goto err_release_mips_firmware;
57579f76 3713 }
5ee1c326
BB
3714 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3715 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3716 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3717 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3718 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3719 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3720 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3721 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3a9c6a49 3722 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
7880b72e 3723 rc = -EINVAL;
3724 goto err_release_firmware;
57579f76 3725 }
5ee1c326
BB
3726 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3727 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3728 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3a9c6a49 3729 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
7880b72e 3730 rc = -EINVAL;
3731 goto err_release_firmware;
57579f76 3732 }
7880b72e 3733out:
3734 return rc;
57579f76 3735
7880b72e 3736err_release_firmware:
3737 release_firmware(bp->rv2p_firmware);
3738 bp->rv2p_firmware = NULL;
3739err_release_mips_firmware:
3740 release_firmware(bp->mips_firmware);
3741 goto out;
3742}
3743
3744static int bnx2_request_firmware(struct bnx2 *bp)
3745{
3746 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
57579f76
MC
3747}
3748
3749static u32
3750rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3751{
3752 switch (idx) {
3753 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3754 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3755 rv2p_code |= RV2P_BD_PAGE_SIZE;
3756 break;
3757 }
3758 return rv2p_code;
3759}
3760
3761static int
3762load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3763 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3764{
3765 u32 rv2p_code_len, file_offset;
3766 __be32 *rv2p_code;
b6016b76 3767 int i;
57579f76
MC
3768 u32 val, cmd, addr;
3769
3770 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3771 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3772
3773 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
b6016b76 3774
57579f76
MC
3775 if (rv2p_proc == RV2P_PROC1) {
3776 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3777 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3778 } else {
3779 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3780 addr = BNX2_RV2P_PROC2_ADDR_CMD;
d25be1d3 3781 }
b6016b76
MC
3782
3783 for (i = 0; i < rv2p_code_len; i += 8) {
e503e066 3784 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
b6016b76 3785 rv2p_code++;
e503e066 3786 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
b6016b76
MC
3787 rv2p_code++;
3788
57579f76 3789 val = (i / 8) | cmd;
e503e066 3790 BNX2_WR(bp, addr, val);
57579f76
MC
3791 }
3792
3793 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3794 for (i = 0; i < 8; i++) {
3795 u32 loc, code;
3796
3797 loc = be32_to_cpu(fw_entry->fixup[i]);
3798 if (loc && ((loc * 4) < rv2p_code_len)) {
3799 code = be32_to_cpu(*(rv2p_code + loc - 1));
e503e066 3800 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
57579f76
MC
3801 code = be32_to_cpu(*(rv2p_code + loc));
3802 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
e503e066 3803 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
57579f76
MC
3804
3805 val = (loc / 2) | cmd;
e503e066 3806 BNX2_WR(bp, addr, val);
b6016b76
MC
3807 }
3808 }
3809
3810 /* Reset the processor, un-stall is done later. */
3811 if (rv2p_proc == RV2P_PROC1) {
e503e066 3812 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
b6016b76
MC
3813 }
3814 else {
e503e066 3815 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
b6016b76 3816 }
57579f76
MC
3817
3818 return 0;
b6016b76
MC
3819}
3820
af3ee519 3821static int
57579f76
MC
3822load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3823 const struct bnx2_mips_fw_file_entry *fw_entry)
b6016b76 3824{
57579f76
MC
3825 u32 addr, len, file_offset;
3826 __be32 *data;
b6016b76
MC
3827 u32 offset;
3828 u32 val;
3829
3830 /* Halt the CPU. */
2726d6e1 3831 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3832 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3833 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3834 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3835
3836 /* Load the Text area. */
57579f76
MC
3837 addr = be32_to_cpu(fw_entry->text.addr);
3838 len = be32_to_cpu(fw_entry->text.len);
3839 file_offset = be32_to_cpu(fw_entry->text.offset);
3840 data = (__be32 *)(bp->mips_firmware->data + file_offset);
ea1f8d5c 3841
57579f76
MC
3842 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3843 if (len) {
b6016b76
MC
3844 int j;
3845
57579f76
MC
3846 for (j = 0; j < (len / 4); j++, offset += 4)
3847 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3848 }
3849
57579f76
MC
3850 /* Load the Data area. */
3851 addr = be32_to_cpu(fw_entry->data.addr);
3852 len = be32_to_cpu(fw_entry->data.len);
3853 file_offset = be32_to_cpu(fw_entry->data.offset);
3854 data = (__be32 *)(bp->mips_firmware->data + file_offset);
b6016b76 3855
57579f76
MC
3856 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3857 if (len) {
b6016b76
MC
3858 int j;
3859
57579f76
MC
3860 for (j = 0; j < (len / 4); j++, offset += 4)
3861 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3862 }
3863
3864 /* Load the Read-Only area. */
57579f76
MC
3865 addr = be32_to_cpu(fw_entry->rodata.addr);
3866 len = be32_to_cpu(fw_entry->rodata.len);
3867 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3868 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3869
3870 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3871 if (len) {
b6016b76
MC
3872 int j;
3873
57579f76
MC
3874 for (j = 0; j < (len / 4); j++, offset += 4)
3875 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3876 }
3877
3878 /* Clear the pre-fetch instruction. */
2726d6e1 3879 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
57579f76
MC
3880
3881 val = be32_to_cpu(fw_entry->start_addr);
3882 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
b6016b76
MC
3883
3884 /* Start the CPU. */
2726d6e1 3885 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3886 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3887 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3888 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3889
3890 return 0;
b6016b76
MC
3891}
3892
fba9fe91 3893static int
b6016b76
MC
3894bnx2_init_cpus(struct bnx2 *bp)
3895{
57579f76
MC
3896 const struct bnx2_mips_fw_file *mips_fw =
3897 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3898 const struct bnx2_rv2p_fw_file *rv2p_fw =
3899 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3900 int rc;
b6016b76
MC
3901
3902 /* Initialize the RV2P processor. */
57579f76
MC
3903 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3904 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
b6016b76
MC
3905
3906 /* Initialize the RX Processor. */
57579f76 3907 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
fba9fe91
MC
3908 if (rc)
3909 goto init_cpu_err;
3910
b6016b76 3911 /* Initialize the TX Processor. */
57579f76 3912 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
fba9fe91
MC
3913 if (rc)
3914 goto init_cpu_err;
3915
b6016b76 3916 /* Initialize the TX Patch-up Processor. */
57579f76 3917 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
fba9fe91
MC
3918 if (rc)
3919 goto init_cpu_err;
3920
b6016b76 3921 /* Initialize the Completion Processor. */
57579f76 3922 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
fba9fe91
MC
3923 if (rc)
3924 goto init_cpu_err;
3925
d43584c8 3926 /* Initialize the Command Processor. */
57579f76 3927 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
b6016b76 3928
fba9fe91 3929init_cpu_err:
fba9fe91 3930 return rc;
b6016b76
MC
3931}
3932
b6a23e91
MC
3933static void
3934bnx2_setup_wol(struct bnx2 *bp)
3935{
3936 int i;
3937 u32 val, wol_msg;
3938
3939 if (bp->wol) {
3940 u32 advertising;
3941 u8 autoneg;
3942
3943 autoneg = bp->autoneg;
3944 advertising = bp->advertising;
3945
3946 if (bp->phy_port == PORT_TP) {
3947 bp->autoneg = AUTONEG_SPEED;
3948 bp->advertising = ADVERTISED_10baseT_Half |
3949 ADVERTISED_10baseT_Full |
3950 ADVERTISED_100baseT_Half |
3951 ADVERTISED_100baseT_Full |
3952 ADVERTISED_Autoneg;
3953 }
3954
3955 spin_lock_bh(&bp->phy_lock);
3956 bnx2_setup_phy(bp, bp->phy_port);
3957 spin_unlock_bh(&bp->phy_lock);
3958
3959 bp->autoneg = autoneg;
3960 bp->advertising = advertising;
3961
3962 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3963
3964 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3965
3966 /* Enable port mode. */
3967 val &= ~BNX2_EMAC_MODE_PORT;
3968 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3969 BNX2_EMAC_MODE_ACPI_RCVD |
3970 BNX2_EMAC_MODE_MPKT;
3971 if (bp->phy_port == PORT_TP) {
3972 val |= BNX2_EMAC_MODE_PORT_MII;
3973 } else {
3974 val |= BNX2_EMAC_MODE_PORT_GMII;
3975 if (bp->line_speed == SPEED_2500)
3976 val |= BNX2_EMAC_MODE_25G_MODE;
3977 }
3978
3979 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3980
3981 /* receive all multicast */
3982 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3983 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3984 0xffffffff);
3985 }
3986 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3987
3988 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3989 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3990 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3991 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3992
3993 /* Need to enable EMAC and RPM for WOL. */
3994 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3995 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3996 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3997 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3998
3999 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4000 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4001 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4002
4003 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4004 } else {
4005 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4006 }
4007
a8d9bc2e
MC
4008 if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4009 u32 val;
4010
4011 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4012 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4013 bnx2_fw_sync(bp, wol_msg, 1, 0);
4014 return;
4015 }
4016 /* Tell firmware not to power down the PHY yet, otherwise
4017 * the chip will take a long time to respond to MMIO reads.
4018 */
4019 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4020 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4021 val | BNX2_PORT_FEATURE_ASF_ENABLED);
4022 bnx2_fw_sync(bp, wol_msg, 1, 0);
4023 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4024 }
b6a23e91
MC
4025
4026}
4027
b6016b76 4028static int
829ca9a3 4029bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76 4030{
b6016b76 4031 switch (state) {
829ca9a3 4032 case PCI_D0: {
b6016b76
MC
4033 u32 val;
4034
6d5e85c7
MC
4035 pci_enable_wake(bp->pdev, PCI_D0, false);
4036 pci_set_power_state(bp->pdev, PCI_D0);
b6016b76 4037
e503e066 4038 val = BNX2_RD(bp, BNX2_EMAC_MODE);
b6016b76
MC
4039 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4040 val &= ~BNX2_EMAC_MODE_MPKT;
e503e066 4041 BNX2_WR(bp, BNX2_EMAC_MODE, val);
b6016b76 4042
e503e066 4043 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
b6016b76 4044 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
e503e066 4045 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
b6016b76
MC
4046 break;
4047 }
829ca9a3 4048 case PCI_D3hot: {
b6a23e91 4049 bnx2_setup_wol(bp);
6d5e85c7 4050 pci_wake_from_d3(bp->pdev, bp->wol);
4ce45e02
MC
4051 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4052 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
b6016b76
MC
4053
4054 if (bp->wol)
6d5e85c7 4055 pci_set_power_state(bp->pdev, PCI_D3hot);
a8d9bc2e
MC
4056 break;
4057
4058 }
4059 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4060 u32 val;
4061
4062 /* Tell firmware not to power down the PHY yet,
4063 * otherwise the other port may not respond to
4064 * MMIO reads.
4065 */
4066 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4067 val &= ~BNX2_CONDITION_PM_STATE_MASK;
4068 val |= BNX2_CONDITION_PM_STATE_UNPREP;
4069 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
b6016b76 4070 }
a8d9bc2e 4071 pci_set_power_state(bp->pdev, PCI_D3hot);
b6016b76
MC
4072
4073 /* No more memory access after this point until
4074 * device is brought back to D0.
4075 */
b6016b76
MC
4076 break;
4077 }
4078 default:
4079 return -EINVAL;
4080 }
4081 return 0;
4082}
4083
4084static int
4085bnx2_acquire_nvram_lock(struct bnx2 *bp)
4086{
4087 u32 val;
4088 int j;
4089
4090 /* Request access to the flash interface. */
e503e066 4091 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
b6016b76 4092 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
e503e066 4093 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
b6016b76
MC
4094 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4095 break;
4096
4097 udelay(5);
4098 }
4099
4100 if (j >= NVRAM_TIMEOUT_COUNT)
4101 return -EBUSY;
4102
4103 return 0;
4104}
4105
4106static int
4107bnx2_release_nvram_lock(struct bnx2 *bp)
4108{
4109 int j;
4110 u32 val;
4111
4112 /* Relinquish nvram interface. */
e503e066 4113 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
b6016b76
MC
4114
4115 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
e503e066 4116 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
b6016b76
MC
4117 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4118 break;
4119
4120 udelay(5);
4121 }
4122
4123 if (j >= NVRAM_TIMEOUT_COUNT)
4124 return -EBUSY;
4125
4126 return 0;
4127}
4128
4129
4130static int
4131bnx2_enable_nvram_write(struct bnx2 *bp)
4132{
4133 u32 val;
4134
e503e066
MC
4135 val = BNX2_RD(bp, BNX2_MISC_CFG);
4136 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
b6016b76 4137
e30372c9 4138 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
4139 int j;
4140
e503e066
MC
4141 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4142 BNX2_WR(bp, BNX2_NVM_COMMAND,
4143 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
b6016b76
MC
4144
4145 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4146 udelay(5);
4147
e503e066 4148 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
b6016b76
MC
4149 if (val & BNX2_NVM_COMMAND_DONE)
4150 break;
4151 }
4152
4153 if (j >= NVRAM_TIMEOUT_COUNT)
4154 return -EBUSY;
4155 }
4156 return 0;
4157}
4158
4159static void
4160bnx2_disable_nvram_write(struct bnx2 *bp)
4161{
4162 u32 val;
4163
e503e066
MC
4164 val = BNX2_RD(bp, BNX2_MISC_CFG);
4165 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
b6016b76
MC
4166}
4167
4168
4169static void
4170bnx2_enable_nvram_access(struct bnx2 *bp)
4171{
4172 u32 val;
4173
e503e066 4174 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
b6016b76 4175 /* Enable both bits, even on read. */
e503e066
MC
4176 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4177 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
b6016b76
MC
4178}
4179
4180static void
4181bnx2_disable_nvram_access(struct bnx2 *bp)
4182{
4183 u32 val;
4184
e503e066 4185 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
b6016b76 4186 /* Disable both bits, even after read. */
e503e066 4187 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4188 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4189 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4190}
4191
4192static int
4193bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4194{
4195 u32 cmd;
4196 int j;
4197
e30372c9 4198 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
4199 /* Buffered flash, no erase needed */
4200 return 0;
4201
4202 /* Build an erase command */
4203 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4204 BNX2_NVM_COMMAND_DOIT;
4205
4206 /* Need to clear DONE bit separately. */
e503e066 4207 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
b6016b76
MC
4208
4209 /* Address of the NVRAM to read from. */
e503e066 4210 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
b6016b76
MC
4211
4212 /* Issue an erase command. */
e503e066 4213 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
b6016b76
MC
4214
4215 /* Wait for completion. */
4216 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4217 u32 val;
4218
4219 udelay(5);
4220
e503e066 4221 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
b6016b76
MC
4222 if (val & BNX2_NVM_COMMAND_DONE)
4223 break;
4224 }
4225
4226 if (j >= NVRAM_TIMEOUT_COUNT)
4227 return -EBUSY;
4228
4229 return 0;
4230}
4231
4232static int
4233bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4234{
4235 u32 cmd;
4236 int j;
4237
4238 /* Build the command word. */
4239 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4240
e30372c9
MC
4241 /* Calculate an offset of a buffered flash, not needed for 5709. */
4242 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4243 offset = ((offset / bp->flash_info->page_size) <<
4244 bp->flash_info->page_bits) +
4245 (offset % bp->flash_info->page_size);
4246 }
4247
4248 /* Need to clear DONE bit separately. */
e503e066 4249 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
b6016b76
MC
4250
4251 /* Address of the NVRAM to read from. */
e503e066 4252 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
b6016b76
MC
4253
4254 /* Issue a read command. */
e503e066 4255 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
b6016b76
MC
4256
4257 /* Wait for completion. */
4258 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4259 u32 val;
4260
4261 udelay(5);
4262
e503e066 4263 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
b6016b76 4264 if (val & BNX2_NVM_COMMAND_DONE) {
e503e066 4265 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
b491edd5 4266 memcpy(ret_val, &v, 4);
b6016b76
MC
4267 break;
4268 }
4269 }
4270 if (j >= NVRAM_TIMEOUT_COUNT)
4271 return -EBUSY;
4272
4273 return 0;
4274}
4275
4276
4277static int
4278bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4279{
b491edd5
AV
4280 u32 cmd;
4281 __be32 val32;
b6016b76
MC
4282 int j;
4283
4284 /* Build the command word. */
4285 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4286
e30372c9
MC
4287 /* Calculate an offset of a buffered flash, not needed for 5709. */
4288 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4289 offset = ((offset / bp->flash_info->page_size) <<
4290 bp->flash_info->page_bits) +
4291 (offset % bp->flash_info->page_size);
4292 }
4293
4294 /* Need to clear DONE bit separately. */
e503e066 4295 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
b6016b76
MC
4296
4297 memcpy(&val32, val, 4);
b6016b76
MC
4298
4299 /* Write the data. */
e503e066 4300 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
4301
4302 /* Address of the NVRAM to write to. */
e503e066 4303 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
b6016b76
MC
4304
4305 /* Issue the write command. */
e503e066 4306 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
b6016b76
MC
4307
4308 /* Wait for completion. */
4309 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4310 udelay(5);
4311
e503e066 4312 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
b6016b76
MC
4313 break;
4314 }
4315 if (j >= NVRAM_TIMEOUT_COUNT)
4316 return -EBUSY;
4317
4318 return 0;
4319}
4320
4321static int
4322bnx2_init_nvram(struct bnx2 *bp)
4323{
4324 u32 val;
e30372c9 4325 int j, entry_count, rc = 0;
0ced9d01 4326 const struct flash_spec *flash;
b6016b76 4327
4ce45e02 4328 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
e30372c9
MC
4329 bp->flash_info = &flash_5709;
4330 goto get_flash_size;
4331 }
4332
b6016b76 4333 /* Determine the selected interface. */
e503e066 4334 val = BNX2_RD(bp, BNX2_NVM_CFG1);
b6016b76 4335
ff8ac609 4336 entry_count = ARRAY_SIZE(flash_table);
b6016b76 4337
b6016b76
MC
4338 if (val & 0x40000000) {
4339
4340 /* Flash interface has been reconfigured */
4341 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
4342 j++, flash++) {
4343 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4344 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
4345 bp->flash_info = flash;
4346 break;
4347 }
4348 }
4349 }
4350 else {
37137709 4351 u32 mask;
b6016b76
MC
4352 /* Not yet been reconfigured */
4353
37137709
MC
4354 if (val & (1 << 23))
4355 mask = FLASH_BACKUP_STRAP_MASK;
4356 else
4357 mask = FLASH_STRAP_MASK;
4358
b6016b76
MC
4359 for (j = 0, flash = &flash_table[0]; j < entry_count;
4360 j++, flash++) {
4361
37137709 4362 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4363 bp->flash_info = flash;
4364
4365 /* Request access to the flash interface. */
4366 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4367 return rc;
4368
4369 /* Enable access to flash interface */
4370 bnx2_enable_nvram_access(bp);
4371
4372 /* Reconfigure the flash interface */
e503e066
MC
4373 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4374 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4375 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4376 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
b6016b76
MC
4377
4378 /* Disable access to flash interface */
4379 bnx2_disable_nvram_access(bp);
4380 bnx2_release_nvram_lock(bp);
4381
4382 break;
4383 }
4384 }
4385 } /* if (val & 0x40000000) */
4386
4387 if (j == entry_count) {
4388 bp->flash_info = NULL;
3a9c6a49 4389 pr_alert("Unknown flash/EEPROM type\n");
1122db71 4390 return -ENODEV;
b6016b76
MC
4391 }
4392
e30372c9 4393get_flash_size:
2726d6e1 4394 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4395 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4396 if (val)
4397 bp->flash_size = val;
4398 else
4399 bp->flash_size = bp->flash_info->total_size;
4400
b6016b76
MC
4401 return rc;
4402}
4403
4404static int
4405bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4406 int buf_size)
4407{
4408 int rc = 0;
4409 u32 cmd_flags, offset32, len32, extra;
4410
4411 if (buf_size == 0)
4412 return 0;
4413
4414 /* Request access to the flash interface. */
4415 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4416 return rc;
4417
4418 /* Enable access to flash interface */
4419 bnx2_enable_nvram_access(bp);
4420
4421 len32 = buf_size;
4422 offset32 = offset;
4423 extra = 0;
4424
4425 cmd_flags = 0;
4426
4427 if (offset32 & 3) {
4428 u8 buf[4];
4429 u32 pre_len;
4430
4431 offset32 &= ~3;
4432 pre_len = 4 - (offset & 3);
4433
4434 if (pre_len >= len32) {
4435 pre_len = len32;
4436 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4437 BNX2_NVM_COMMAND_LAST;
4438 }
4439 else {
4440 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4441 }
4442
4443 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4444
4445 if (rc)
4446 return rc;
4447
4448 memcpy(ret_buf, buf + (offset & 3), pre_len);
4449
4450 offset32 += 4;
4451 ret_buf += pre_len;
4452 len32 -= pre_len;
4453 }
4454 if (len32 & 3) {
4455 extra = 4 - (len32 & 3);
4456 len32 = (len32 + 4) & ~3;
4457 }
4458
4459 if (len32 == 4) {
4460 u8 buf[4];
4461
4462 if (cmd_flags)
4463 cmd_flags = BNX2_NVM_COMMAND_LAST;
4464 else
4465 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4466 BNX2_NVM_COMMAND_LAST;
4467
4468 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4469
4470 memcpy(ret_buf, buf, 4 - extra);
4471 }
4472 else if (len32 > 0) {
4473 u8 buf[4];
4474
4475 /* Read the first word. */
4476 if (cmd_flags)
4477 cmd_flags = 0;
4478 else
4479 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4480
4481 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4482
4483 /* Advance to the next dword. */
4484 offset32 += 4;
4485 ret_buf += 4;
4486 len32 -= 4;
4487
4488 while (len32 > 4 && rc == 0) {
4489 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4490
4491 /* Advance to the next dword. */
4492 offset32 += 4;
4493 ret_buf += 4;
4494 len32 -= 4;
4495 }
4496
4497 if (rc)
4498 return rc;
4499
4500 cmd_flags = BNX2_NVM_COMMAND_LAST;
4501 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4502
4503 memcpy(ret_buf, buf, 4 - extra);
4504 }
4505
4506 /* Disable access to flash interface */
4507 bnx2_disable_nvram_access(bp);
4508
4509 bnx2_release_nvram_lock(bp);
4510
4511 return rc;
4512}
4513
4514static int
4515bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4516 int buf_size)
4517{
4518 u32 written, offset32, len32;
e6be763f 4519 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4520 int rc = 0;
4521 int align_start, align_end;
4522
4523 buf = data_buf;
4524 offset32 = offset;
4525 len32 = buf_size;
4526 align_start = align_end = 0;
4527
4528 if ((align_start = (offset32 & 3))) {
4529 offset32 &= ~3;
c873879c
MC
4530 len32 += align_start;
4531 if (len32 < 4)
4532 len32 = 4;
b6016b76
MC
4533 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4534 return rc;
4535 }
4536
4537 if (len32 & 3) {
c873879c
MC
4538 align_end = 4 - (len32 & 3);
4539 len32 += align_end;
4540 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4541 return rc;
b6016b76
MC
4542 }
4543
4544 if (align_start || align_end) {
e6be763f
MC
4545 align_buf = kmalloc(len32, GFP_KERNEL);
4546 if (align_buf == NULL)
b6016b76
MC
4547 return -ENOMEM;
4548 if (align_start) {
e6be763f 4549 memcpy(align_buf, start, 4);
b6016b76
MC
4550 }
4551 if (align_end) {
e6be763f 4552 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4553 }
e6be763f
MC
4554 memcpy(align_buf + align_start, data_buf, buf_size);
4555 buf = align_buf;
b6016b76
MC
4556 }
4557
e30372c9 4558 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4559 flash_buffer = kmalloc(264, GFP_KERNEL);
4560 if (flash_buffer == NULL) {
4561 rc = -ENOMEM;
4562 goto nvram_write_end;
4563 }
4564 }
4565
b6016b76
MC
4566 written = 0;
4567 while ((written < len32) && (rc == 0)) {
4568 u32 page_start, page_end, data_start, data_end;
4569 u32 addr, cmd_flags;
4570 int i;
b6016b76
MC
4571
4572 /* Find the page_start addr */
4573 page_start = offset32 + written;
4574 page_start -= (page_start % bp->flash_info->page_size);
4575 /* Find the page_end addr */
4576 page_end = page_start + bp->flash_info->page_size;
4577 /* Find the data_start addr */
4578 data_start = (written == 0) ? offset32 : page_start;
4579 /* Find the data_end addr */
6aa20a22 4580 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4581 (offset32 + len32) : page_end;
4582
4583 /* Request access to the flash interface. */
4584 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4585 goto nvram_write_end;
4586
4587 /* Enable access to flash interface */
4588 bnx2_enable_nvram_access(bp);
4589
4590 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4591 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4592 int j;
4593
4594 /* Read the whole page into the buffer
4595 * (non-buffer flash only) */
4596 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4597 if (j == (bp->flash_info->page_size - 4)) {
4598 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4599 }
4600 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4601 page_start + j,
4602 &flash_buffer[j],
b6016b76
MC
4603 cmd_flags);
4604
4605 if (rc)
4606 goto nvram_write_end;
4607
4608 cmd_flags = 0;
4609 }
4610 }
4611
4612 /* Enable writes to flash interface (unlock write-protect) */
4613 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4614 goto nvram_write_end;
4615
b6016b76
MC
4616 /* Loop to write back the buffer data from page_start to
4617 * data_start */
4618 i = 0;
e30372c9 4619 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4620 /* Erase the page */
4621 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4622 goto nvram_write_end;
4623
4624 /* Re-enable the write again for the actual write */
4625 bnx2_enable_nvram_write(bp);
4626
b6016b76
MC
4627 for (addr = page_start; addr < data_start;
4628 addr += 4, i += 4) {
6aa20a22 4629
b6016b76
MC
4630 rc = bnx2_nvram_write_dword(bp, addr,
4631 &flash_buffer[i], cmd_flags);
4632
4633 if (rc != 0)
4634 goto nvram_write_end;
4635
4636 cmd_flags = 0;
4637 }
4638 }
4639
4640 /* Loop to write the new data from data_start to data_end */
bae25761 4641 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4642 if ((addr == page_end - 4) ||
e30372c9 4643 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4644 (addr == data_end - 4))) {
4645
4646 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4647 }
4648 rc = bnx2_nvram_write_dword(bp, addr, buf,
4649 cmd_flags);
4650
4651 if (rc != 0)
4652 goto nvram_write_end;
4653
4654 cmd_flags = 0;
4655 buf += 4;
4656 }
4657
4658 /* Loop to write back the buffer data from data_end
4659 * to page_end */
e30372c9 4660 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4661 for (addr = data_end; addr < page_end;
4662 addr += 4, i += 4) {
6aa20a22 4663
b6016b76
MC
4664 if (addr == page_end-4) {
4665 cmd_flags = BNX2_NVM_COMMAND_LAST;
4666 }
4667 rc = bnx2_nvram_write_dword(bp, addr,
4668 &flash_buffer[i], cmd_flags);
4669
4670 if (rc != 0)
4671 goto nvram_write_end;
4672
4673 cmd_flags = 0;
4674 }
4675 }
4676
4677 /* Disable writes to flash interface (lock write-protect) */
4678 bnx2_disable_nvram_write(bp);
4679
4680 /* Disable access to flash interface */
4681 bnx2_disable_nvram_access(bp);
4682 bnx2_release_nvram_lock(bp);
4683
4684 /* Increment written */
4685 written += data_end - data_start;
4686 }
4687
4688nvram_write_end:
e6be763f
MC
4689 kfree(flash_buffer);
4690 kfree(align_buf);
b6016b76
MC
4691 return rc;
4692}
4693
0d8a6571 4694static void
7c62e83b 4695bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4696{
7c62e83b 4697 u32 val, sig = 0;
0d8a6571 4698
583c28e5 4699 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4700 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4701
4702 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4703 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4704
2726d6e1 4705 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4706 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4707 return;
4708
7c62e83b
MC
4709 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4710 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4711 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4712 }
4713
4714 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4715 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4716 u32 link;
4717
583c28e5 4718 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4719
7c62e83b
MC
4720 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4721 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4722 bp->phy_port = PORT_FIBRE;
4723 else
4724 bp->phy_port = PORT_TP;
489310a4 4725
7c62e83b
MC
4726 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4727 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4728 }
7c62e83b
MC
4729
4730 if (netif_running(bp->dev) && sig)
4731 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4732}
4733
b4b36042
MC
4734static void
4735bnx2_setup_msix_tbl(struct bnx2 *bp)
4736{
e503e066 4737 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
b4b36042 4738
e503e066
MC
4739 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4740 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
b4b36042
MC
4741}
4742
b6016b76
MC
4743static int
4744bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4745{
4746 u32 val;
4747 int i, rc = 0;
489310a4 4748 u8 old_port;
b6016b76
MC
4749
4750 /* Wait for the current PCI transaction to complete before
4751 * issuing a reset. */
4ce45e02
MC
4752 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4753 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
e503e066
MC
4754 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4755 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4756 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4757 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4758 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4759 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
a5dac108
EW
4760 udelay(5);
4761 } else { /* 5709 */
e503e066 4762 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
a5dac108 4763 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
e503e066
MC
4764 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4765 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
a5dac108
EW
4766
4767 for (i = 0; i < 100; i++) {
4768 msleep(1);
e503e066 4769 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
a5dac108
EW
4770 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4771 break;
4772 }
4773 }
b6016b76 4774
b090ae2b 4775 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4776 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4777
b6016b76
MC
4778 /* Deposit a driver reset signature so the firmware knows that
4779 * this is a soft reset. */
2726d6e1
MC
4780 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4781 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4782
b6016b76
MC
4783 /* Do a dummy read to force the chip to complete all current transaction
4784 * before we issue a reset. */
e503e066 4785 val = BNX2_RD(bp, BNX2_MISC_ID);
b6016b76 4786
4ce45e02 4787 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
e503e066
MC
4788 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4789 BNX2_RD(bp, BNX2_MISC_COMMAND);
234754d5 4790 udelay(5);
b6016b76 4791
234754d5
MC
4792 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4793 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4794
e503e066 4795 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4796
234754d5
MC
4797 } else {
4798 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4799 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4800 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4801
4802 /* Chip reset. */
e503e066 4803 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
234754d5 4804
594a9dfa
MC
4805 /* Reading back any register after chip reset will hang the
4806 * bus on 5706 A0 and A1. The msleep below provides plenty
4807 * of margin for write posting.
4808 */
4ce45e02
MC
4809 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4810 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
8e545881 4811 msleep(20);
b6016b76 4812
234754d5
MC
4813 /* Reset takes approximate 30 usec */
4814 for (i = 0; i < 10; i++) {
e503e066 4815 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
234754d5
MC
4816 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4817 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4818 break;
4819 udelay(10);
4820 }
4821
4822 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4823 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3a9c6a49 4824 pr_err("Chip reset did not complete\n");
234754d5
MC
4825 return -EBUSY;
4826 }
b6016b76
MC
4827 }
4828
4829 /* Make sure byte swapping is properly configured. */
e503e066 4830 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
b6016b76 4831 if (val != 0x01020304) {
3a9c6a49 4832 pr_err("Chip not in correct endian mode\n");
b6016b76
MC
4833 return -ENODEV;
4834 }
4835
b6016b76 4836 /* Wait for the firmware to finish its initialization. */
a2f13890 4837 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4838 if (rc)
4839 return rc;
b6016b76 4840
0d8a6571 4841 spin_lock_bh(&bp->phy_lock);
489310a4 4842 old_port = bp->phy_port;
7c62e83b 4843 bnx2_init_fw_cap(bp);
583c28e5
MC
4844 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4845 old_port != bp->phy_port)
0d8a6571
MC
4846 bnx2_set_default_remote_link(bp);
4847 spin_unlock_bh(&bp->phy_lock);
4848
4ce45e02 4849 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
b6016b76
MC
4850 /* Adjust the voltage regular to two steps lower. The default
4851 * of this register is 0x0000000e. */
e503e066 4852 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
b6016b76
MC
4853
4854 /* Remove bad rbuf memory from the free pool. */
4855 rc = bnx2_alloc_bad_rbuf(bp);
4856 }
4857
c441b8d2 4858 if (bp->flags & BNX2_FLAG_USING_MSIX) {
b4b36042 4859 bnx2_setup_msix_tbl(bp);
c441b8d2 4860 /* Prevent MSIX table reads and write from timing out */
e503e066 4861 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
c441b8d2
MC
4862 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4863 }
b4b36042 4864
b6016b76
MC
4865 return rc;
4866}
4867
4868static int
4869bnx2_init_chip(struct bnx2 *bp)
4870{
d8026d93 4871 u32 val, mtu;
b4b36042 4872 int rc, i;
b6016b76
MC
4873
4874 /* Make sure the interrupt is not active. */
e503e066 4875 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
b6016b76
MC
4876
4877 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4878 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4879#ifdef __BIG_ENDIAN
6aa20a22 4880 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4881#endif
6aa20a22 4882 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4883 DMA_READ_CHANS << 12 |
4884 DMA_WRITE_CHANS << 16;
4885
4886 val |= (0x2 << 20) | (1 << 11);
4887
f86e82fb 4888 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4889 val |= (1 << 23);
4890
4ce45e02
MC
4891 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4892 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4893 !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4894 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4895
e503e066 4896 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
b6016b76 4897
4ce45e02 4898 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
e503e066 4899 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
b6016b76 4900 val |= BNX2_TDMA_CONFIG_ONE_DMA;
e503e066 4901 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
b6016b76
MC
4902 }
4903
f86e82fb 4904 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4905 u16 val16;
4906
4907 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4908 &val16);
4909 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4910 val16 & ~PCI_X_CMD_ERO);
4911 }
4912
e503e066
MC
4913 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4914 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4915 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4916 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
b6016b76
MC
4917
4918 /* Initialize context mapping and zero out the quick contexts. The
4919 * context block must have already been enabled. */
4ce45e02 4920 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
641bdcd5
MC
4921 rc = bnx2_init_5709_context(bp);
4922 if (rc)
4923 return rc;
4924 } else
59b47d8a 4925 bnx2_init_context(bp);
b6016b76 4926
fba9fe91
MC
4927 if ((rc = bnx2_init_cpus(bp)) != 0)
4928 return rc;
4929
b6016b76
MC
4930 bnx2_init_nvram(bp);
4931
5fcaed01 4932 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76 4933
e503e066 4934 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
b6016b76
MC
4935 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4936 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4ce45e02 4937 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4edd473f 4938 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4ce45e02 4939 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4edd473f
MC
4940 val |= BNX2_MQ_CONFIG_HALT_DIS;
4941 }
68c9f75a 4942
e503e066 4943 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
b6016b76
MC
4944
4945 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
e503e066
MC
4946 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4947 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
b6016b76 4948
2bc4078e 4949 val = (BNX2_PAGE_BITS - 8) << 24;
e503e066 4950 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
b6016b76
MC
4951
4952 /* Configure page size. */
e503e066 4953 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
b6016b76 4954 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
2bc4078e 4955 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
e503e066 4956 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
b6016b76
MC
4957
4958 val = bp->mac_addr[0] +
4959 (bp->mac_addr[1] << 8) +
4960 (bp->mac_addr[2] << 16) +
4961 bp->mac_addr[3] +
4962 (bp->mac_addr[4] << 8) +
4963 (bp->mac_addr[5] << 16);
e503e066 4964 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
b6016b76
MC
4965
4966 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4967 mtu = bp->dev->mtu;
4968 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4969 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4970 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
e503e066 4971 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
b6016b76 4972
d8026d93
MC
4973 if (mtu < 1500)
4974 mtu = 1500;
4975
4976 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4977 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4978 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4979
155d5561 4980 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
b4b36042
MC
4981 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4982 bp->bnx2_napi[i].last_status_idx = 0;
4983
efba0180
MC
4984 bp->idle_chk_status_idx = 0xffff;
4985
b6016b76
MC
4986 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4987
4988 /* Set up how to generate a link change interrupt. */
e503e066 4989 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
b6016b76 4990
e503e066
MC
4991 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4992 (u64) bp->status_blk_mapping & 0xffffffff);
4993 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
b6016b76 4994
e503e066
MC
4995 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4996 (u64) bp->stats_blk_mapping & 0xffffffff);
4997 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4998 (u64) bp->stats_blk_mapping >> 32);
b6016b76 4999
e503e066
MC
5000 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5001 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
b6016b76 5002
e503e066
MC
5003 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5004 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
b6016b76 5005
e503e066
MC
5006 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5007 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
b6016b76 5008
e503e066 5009 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
b6016b76 5010
e503e066 5011 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
b6016b76 5012
e503e066
MC
5013 BNX2_WR(bp, BNX2_HC_COM_TICKS,
5014 (bp->com_ticks_int << 16) | bp->com_ticks);
b6016b76 5015
e503e066
MC
5016 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5017 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
b6016b76 5018
61d9e3fa 5019 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
e503e066 5020 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
02537b06 5021 else
e503e066
MC
5022 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5023 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
b6016b76 5024
4ce45e02 5025 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
8e6a72c4 5026 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 5027 else {
8e6a72c4
MC
5028 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5029 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
5030 }
5031
efde73a3 5032 if (bp->flags & BNX2_FLAG_USING_MSIX) {
e503e066
MC
5033 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5034 BNX2_HC_MSIX_BIT_VECTOR_VAL);
c76c0475 5035
5e9ad9e1
MC
5036 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5037 }
5038
5039 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
cf7474a6 5040 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5e9ad9e1 5041
e503e066 5042 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5e9ad9e1 5043
22fa159d
MC
5044 if (bp->rx_ticks < 25)
5045 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5046 else
5047 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5048
5e9ad9e1
MC
5049 for (i = 1; i < bp->irq_nvecs; i++) {
5050 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5051 BNX2_HC_SB_CONFIG_1;
5052
e503e066 5053 BNX2_WR(bp, base,
c76c0475 5054 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 5055 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
5056 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5057
e503e066 5058 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
5059 (bp->tx_quick_cons_trip_int << 16) |
5060 bp->tx_quick_cons_trip);
5061
e503e066 5062 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
5063 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5064
e503e066
MC
5065 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5066 (bp->rx_quick_cons_trip_int << 16) |
5e9ad9e1 5067 bp->rx_quick_cons_trip);
8e6a72c4 5068
e503e066 5069 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5e9ad9e1
MC
5070 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5071 }
8e6a72c4 5072
b6016b76 5073 /* Clear internal stats counters. */
e503e066 5074 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
b6016b76 5075
e503e066 5076 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
5077
5078 /* Initialize the receive filter. */
5079 bnx2_set_rx_mode(bp->dev);
5080
4ce45e02 5081 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
e503e066 5082 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
0aa38df7 5083 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
e503e066 5084 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
0aa38df7 5085 }
b090ae2b 5086 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 5087 1, 0);
b6016b76 5088
e503e066
MC
5089 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5090 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
b6016b76
MC
5091
5092 udelay(20);
5093
e503e066 5094 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
bf5295bb 5095
b090ae2b 5096 return rc;
b6016b76
MC
5097}
5098
c76c0475
MC
5099static void
5100bnx2_clear_ring_states(struct bnx2 *bp)
5101{
5102 struct bnx2_napi *bnapi;
35e9010b 5103 struct bnx2_tx_ring_info *txr;
bb4f98ab 5104 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
5105 int i;
5106
5107 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5108 bnapi = &bp->bnx2_napi[i];
35e9010b 5109 txr = &bnapi->tx_ring;
bb4f98ab 5110 rxr = &bnapi->rx_ring;
c76c0475 5111
35e9010b
MC
5112 txr->tx_cons = 0;
5113 txr->hw_tx_cons = 0;
bb4f98ab
MC
5114 rxr->rx_prod_bseq = 0;
5115 rxr->rx_prod = 0;
5116 rxr->rx_cons = 0;
5117 rxr->rx_pg_prod = 0;
5118 rxr->rx_pg_cons = 0;
c76c0475
MC
5119 }
5120}
5121
59b47d8a 5122static void
35e9010b 5123bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
5124{
5125 u32 val, offset0, offset1, offset2, offset3;
62a8313c 5126 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a 5127
4ce45e02 5128 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
59b47d8a
MC
5129 offset0 = BNX2_L2CTX_TYPE_XI;
5130 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5131 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5132 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5133 } else {
5134 offset0 = BNX2_L2CTX_TYPE;
5135 offset1 = BNX2_L2CTX_CMD_TYPE;
5136 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5137 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5138 }
5139 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 5140 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
5141
5142 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 5143 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 5144
35e9010b 5145 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 5146 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 5147
35e9010b 5148 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 5149 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 5150}
b6016b76
MC
5151
5152static void
35e9010b 5153bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76 5154{
2bc4078e 5155 struct bnx2_tx_bd *txbd;
c76c0475
MC
5156 u32 cid = TX_CID;
5157 struct bnx2_napi *bnapi;
35e9010b 5158 struct bnx2_tx_ring_info *txr;
c76c0475 5159
35e9010b
MC
5160 bnapi = &bp->bnx2_napi[ring_num];
5161 txr = &bnapi->tx_ring;
5162
5163 if (ring_num == 0)
5164 cid = TX_CID;
5165 else
5166 cid = TX_TSS_CID + ring_num - 1;
b6016b76 5167
2f8af120
MC
5168 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5169
2bc4078e 5170 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
6aa20a22 5171
35e9010b
MC
5172 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5173 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 5174
35e9010b
MC
5175 txr->tx_prod = 0;
5176 txr->tx_prod_bseq = 0;
6aa20a22 5177
35e9010b
MC
5178 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5179 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 5180
35e9010b 5181 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
5182}
5183
5184static void
2bc4078e
MC
5185bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5186 u32 buf_size, int num_rings)
b6016b76 5187{
b6016b76 5188 int i;
2bc4078e 5189 struct bnx2_rx_bd *rxbd;
6aa20a22 5190
5d5d0015 5191 for (i = 0; i < num_rings; i++) {
13daffa2 5192 int j;
b6016b76 5193
5d5d0015 5194 rxbd = &rx_ring[i][0];
2bc4078e 5195 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 5196 rxbd->rx_bd_len = buf_size;
13daffa2
MC
5197 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5198 }
5d5d0015 5199 if (i == (num_rings - 1))
13daffa2
MC
5200 j = 0;
5201 else
5202 j = i + 1;
5d5d0015
MC
5203 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5204 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 5205 }
5d5d0015
MC
5206}
5207
5208static void
bb4f98ab 5209bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
5210{
5211 int i;
5212 u16 prod, ring_prod;
bb4f98ab
MC
5213 u32 cid, rx_cid_addr, val;
5214 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5215 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5216
5217 if (ring_num == 0)
5218 cid = RX_CID;
5219 else
5220 cid = RX_RSS_CID + ring_num - 1;
5221
5222 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 5223
bb4f98ab 5224 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
5225 bp->rx_buf_use_size, bp->rx_max_ring);
5226
bb4f98ab 5227 bnx2_init_rx_context(bp, cid);
83e3fc89 5228
4ce45e02 5229 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
e503e066
MC
5230 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5231 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
83e3fc89
MC
5232 }
5233
62a8313c 5234 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 5235 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
5236 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5237 rxr->rx_pg_desc_mapping,
47bf4246
MC
5238 PAGE_SIZE, bp->rx_max_pg_ring);
5239 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
5240 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5241 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 5242 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 5243
bb4f98ab 5244 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 5245 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 5246
bb4f98ab 5247 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 5248 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246 5249
4ce45e02 5250 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
e503e066 5251 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
47bf4246 5252 }
b6016b76 5253
bb4f98ab 5254 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 5255 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 5256
bb4f98ab 5257 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 5258 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 5259
bb4f98ab 5260 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 5261 for (i = 0; i < bp->rx_pg_ring_size; i++) {
a2df00aa 5262 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
3a9c6a49
JP
5263 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5264 ring_num, i, bp->rx_pg_ring_size);
47bf4246 5265 break;
b929e53c 5266 }
2bc4078e
MC
5267 prod = BNX2_NEXT_RX_BD(prod);
5268 ring_prod = BNX2_RX_PG_RING_IDX(prod);
47bf4246 5269 }
bb4f98ab 5270 rxr->rx_pg_prod = prod;
47bf4246 5271
bb4f98ab 5272 ring_prod = prod = rxr->rx_prod;
236b6394 5273 for (i = 0; i < bp->rx_ring_size; i++) {
dd2bc8e9 5274 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
3a9c6a49
JP
5275 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5276 ring_num, i, bp->rx_ring_size);
b6016b76 5277 break;
b929e53c 5278 }
2bc4078e
MC
5279 prod = BNX2_NEXT_RX_BD(prod);
5280 ring_prod = BNX2_RX_RING_IDX(prod);
b6016b76 5281 }
bb4f98ab 5282 rxr->rx_prod = prod;
b6016b76 5283
bb4f98ab
MC
5284 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5285 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5286 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 5287
e503e066
MC
5288 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5289 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
bb4f98ab 5290
e503e066 5291 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
5292}
5293
35e9010b
MC
5294static void
5295bnx2_init_all_rings(struct bnx2 *bp)
5296{
5297 int i;
5e9ad9e1 5298 u32 val;
35e9010b
MC
5299
5300 bnx2_clear_ring_states(bp);
5301
e503e066 5302 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
35e9010b
MC
5303 for (i = 0; i < bp->num_tx_rings; i++)
5304 bnx2_init_tx_ring(bp, i);
5305
5306 if (bp->num_tx_rings > 1)
e503e066
MC
5307 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5308 (TX_TSS_CID << 7));
35e9010b 5309
e503e066 5310 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5e9ad9e1
MC
5311 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5312
bb4f98ab
MC
5313 for (i = 0; i < bp->num_rx_rings; i++)
5314 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
5315
5316 if (bp->num_rx_rings > 1) {
22fa159d 5317 u32 tbl_32 = 0;
5e9ad9e1
MC
5318
5319 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
22fa159d
MC
5320 int shift = (i % 8) << 2;
5321
5322 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5323 if ((i % 8) == 7) {
e503e066
MC
5324 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5325 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
22fa159d
MC
5326 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5327 BNX2_RLUP_RSS_COMMAND_WRITE |
5328 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5329 tbl_32 = 0;
5330 }
5e9ad9e1
MC
5331 }
5332
5333 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5334 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5335
e503e066 5336 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5e9ad9e1
MC
5337
5338 }
35e9010b
MC
5339}
5340
5d5d0015 5341static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 5342{
5d5d0015 5343 u32 max, num_rings = 1;
13daffa2 5344
2bc4078e
MC
5345 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5346 ring_size -= BNX2_MAX_RX_DESC_CNT;
13daffa2
MC
5347 num_rings++;
5348 }
5349 /* round to next power of 2 */
5d5d0015 5350 max = max_size;
13daffa2
MC
5351 while ((max & num_rings) == 0)
5352 max >>= 1;
5353
5354 if (num_rings != max)
5355 max <<= 1;
5356
5d5d0015
MC
5357 return max;
5358}
5359
5360static void
5361bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5362{
84eaa187 5363 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
5364
5365 /* 8 for CRC and VLAN */
d89cb6af 5366 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 5367
84eaa187 5368 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
dd2bc8e9 5369 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
84eaa187 5370
601d3d18 5371 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
5372 bp->rx_pg_ring_size = 0;
5373 bp->rx_max_pg_ring = 0;
5374 bp->rx_max_pg_ring_idx = 0;
f86e82fb 5375 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
5376 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5377
5378 jumbo_size = size * pages;
2bc4078e
MC
5379 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5380 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
84eaa187
MC
5381
5382 bp->rx_pg_ring_size = jumbo_size;
5383 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
2bc4078e
MC
5384 BNX2_MAX_RX_PG_RINGS);
5385 bp->rx_max_pg_ring_idx =
5386 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
601d3d18 5387 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5388 bp->rx_copy_thresh = 0;
5389 }
5d5d0015
MC
5390
5391 bp->rx_buf_use_size = rx_size;
dd2bc8e9
ED
5392 /* hw alignment + build_skb() overhead*/
5393 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5394 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
d89cb6af 5395 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015 5396 bp->rx_ring_size = size;
2bc4078e
MC
5397 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5398 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
13daffa2
MC
5399}
5400
b6016b76
MC
5401static void
5402bnx2_free_tx_skbs(struct bnx2 *bp)
5403{
5404 int i;
5405
35e9010b
MC
5406 for (i = 0; i < bp->num_tx_rings; i++) {
5407 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5408 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5409 int j;
b6016b76 5410
35e9010b 5411 if (txr->tx_buf_ring == NULL)
b6016b76 5412 continue;
b6016b76 5413
2bc4078e
MC
5414 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5415 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5416 struct sk_buff *skb = tx_buf->skb;
e95524a7 5417 int k, last;
35e9010b
MC
5418
5419 if (skb == NULL) {
2bc4078e 5420 j = BNX2_NEXT_TX_BD(j);
35e9010b
MC
5421 continue;
5422 }
5423
36227e88 5424 dma_unmap_single(&bp->pdev->dev,
1a4ccc2d 5425 dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
5426 skb_headlen(skb),
5427 PCI_DMA_TODEVICE);
b6016b76 5428
35e9010b 5429 tx_buf->skb = NULL;
b6016b76 5430
e95524a7 5431 last = tx_buf->nr_frags;
2bc4078e
MC
5432 j = BNX2_NEXT_TX_BD(j);
5433 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5434 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
36227e88 5435 dma_unmap_page(&bp->pdev->dev,
1a4ccc2d 5436 dma_unmap_addr(tx_buf, mapping),
9e903e08 5437 skb_frag_size(&skb_shinfo(skb)->frags[k]),
e95524a7
AD
5438 PCI_DMA_TODEVICE);
5439 }
35e9010b 5440 dev_kfree_skb(skb);
b6016b76 5441 }
e9831909 5442 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
b6016b76 5443 }
b6016b76
MC
5444}
5445
5446static void
5447bnx2_free_rx_skbs(struct bnx2 *bp)
5448{
5449 int i;
5450
bb4f98ab
MC
5451 for (i = 0; i < bp->num_rx_rings; i++) {
5452 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5453 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5454 int j;
b6016b76 5455
bb4f98ab
MC
5456 if (rxr->rx_buf_ring == NULL)
5457 return;
b6016b76 5458
bb4f98ab 5459 for (j = 0; j < bp->rx_max_ring_idx; j++) {
2bc4078e 5460 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
dd2bc8e9 5461 u8 *data = rx_buf->data;
b6016b76 5462
dd2bc8e9 5463 if (data == NULL)
bb4f98ab 5464 continue;
b6016b76 5465
36227e88 5466 dma_unmap_single(&bp->pdev->dev,
1a4ccc2d 5467 dma_unmap_addr(rx_buf, mapping),
bb4f98ab
MC
5468 bp->rx_buf_use_size,
5469 PCI_DMA_FROMDEVICE);
b6016b76 5470
dd2bc8e9 5471 rx_buf->data = NULL;
bb4f98ab 5472
dd2bc8e9 5473 kfree(data);
bb4f98ab
MC
5474 }
5475 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5476 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5477 }
5478}
5479
5480static void
5481bnx2_free_skbs(struct bnx2 *bp)
5482{
5483 bnx2_free_tx_skbs(bp);
5484 bnx2_free_rx_skbs(bp);
5485}
5486
5487static int
5488bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5489{
5490 int rc;
5491
5492 rc = bnx2_reset_chip(bp, reset_code);
5493 bnx2_free_skbs(bp);
5494 if (rc)
5495 return rc;
5496
fba9fe91
MC
5497 if ((rc = bnx2_init_chip(bp)) != 0)
5498 return rc;
5499
35e9010b 5500 bnx2_init_all_rings(bp);
b6016b76
MC
5501 return 0;
5502}
5503
5504static int
9a120bc5 5505bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5506{
5507 int rc;
5508
5509 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5510 return rc;
5511
80be4434 5512 spin_lock_bh(&bp->phy_lock);
9a120bc5 5513 bnx2_init_phy(bp, reset_phy);
b6016b76 5514 bnx2_set_link(bp);
543a827d
MC
5515 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5516 bnx2_remote_phy_event(bp);
0d8a6571 5517 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5518 return 0;
5519}
5520
74bf4ba3
MC
5521static int
5522bnx2_shutdown_chip(struct bnx2 *bp)
5523{
5524 u32 reset_code;
5525
5526 if (bp->flags & BNX2_FLAG_NO_WOL)
5527 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5528 else if (bp->wol)
5529 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5530 else
5531 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5532
5533 return bnx2_reset_chip(bp, reset_code);
5534}
5535
b6016b76
MC
5536static int
5537bnx2_test_registers(struct bnx2 *bp)
5538{
5539 int ret;
5bae30c9 5540 int i, is_5709;
f71e1309 5541 static const struct {
b6016b76
MC
5542 u16 offset;
5543 u16 flags;
5bae30c9 5544#define BNX2_FL_NOT_5709 1
b6016b76
MC
5545 u32 rw_mask;
5546 u32 ro_mask;
5547 } reg_tbl[] = {
5548 { 0x006c, 0, 0x00000000, 0x0000003f },
5549 { 0x0090, 0, 0xffffffff, 0x00000000 },
5550 { 0x0094, 0, 0x00000000, 0x00000000 },
5551
5bae30c9
MC
5552 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5553 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5554 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5555 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5556 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5557 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5558 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5559 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5560 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5561
5562 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5563 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5564 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5565 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5566 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5567 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5568
5569 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5570 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5571 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5572
5573 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5574 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5575
5576 { 0x1408, 0, 0x01c00800, 0x00000000 },
5577 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5578 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5579 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5580 { 0x14b0, 0, 0x00000002, 0x00000001 },
5581 { 0x14b8, 0, 0x00000000, 0x00000000 },
5582 { 0x14c0, 0, 0x00000000, 0x00000009 },
5583 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5584 { 0x14cc, 0, 0x00000000, 0x00000001 },
5585 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5586
5587 { 0x1800, 0, 0x00000000, 0x00000001 },
5588 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5589
5590 { 0x2800, 0, 0x00000000, 0x00000001 },
5591 { 0x2804, 0, 0x00000000, 0x00003f01 },
5592 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5593 { 0x2810, 0, 0xffff0000, 0x00000000 },
5594 { 0x2814, 0, 0xffff0000, 0x00000000 },
5595 { 0x2818, 0, 0xffff0000, 0x00000000 },
5596 { 0x281c, 0, 0xffff0000, 0x00000000 },
5597 { 0x2834, 0, 0xffffffff, 0x00000000 },
5598 { 0x2840, 0, 0x00000000, 0xffffffff },
5599 { 0x2844, 0, 0x00000000, 0xffffffff },
5600 { 0x2848, 0, 0xffffffff, 0x00000000 },
5601 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5602
5603 { 0x2c00, 0, 0x00000000, 0x00000011 },
5604 { 0x2c04, 0, 0x00000000, 0x00030007 },
5605
b6016b76
MC
5606 { 0x3c00, 0, 0x00000000, 0x00000001 },
5607 { 0x3c04, 0, 0x00000000, 0x00070000 },
5608 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5609 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5610 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5611 { 0x3c14, 0, 0x00000000, 0xffffffff },
5612 { 0x3c18, 0, 0x00000000, 0xffffffff },
5613 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5614 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5615
5616 { 0x5004, 0, 0x00000000, 0x0000007f },
5617 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5618
b6016b76
MC
5619 { 0x5c00, 0, 0x00000000, 0x00000001 },
5620 { 0x5c04, 0, 0x00000000, 0x0003000f },
5621 { 0x5c08, 0, 0x00000003, 0x00000000 },
5622 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5623 { 0x5c10, 0, 0x00000000, 0xffffffff },
5624 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5625 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5626 { 0x5c88, 0, 0x00000000, 0x00077373 },
5627 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5628
5629 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5630 { 0x680c, 0, 0xffffffff, 0x00000000 },
5631 { 0x6810, 0, 0xffffffff, 0x00000000 },
5632 { 0x6814, 0, 0xffffffff, 0x00000000 },
5633 { 0x6818, 0, 0xffffffff, 0x00000000 },
5634 { 0x681c, 0, 0xffffffff, 0x00000000 },
5635 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5636 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5637 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5638 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5639 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5640 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5641 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5642 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5643 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5644 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5645 { 0x684c, 0, 0xffffffff, 0x00000000 },
5646 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5647 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5648 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5649 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5650 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5651 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5652
5653 { 0xffff, 0, 0x00000000, 0x00000000 },
5654 };
5655
5656 ret = 0;
5bae30c9 5657 is_5709 = 0;
4ce45e02 5658 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5bae30c9
MC
5659 is_5709 = 1;
5660
b6016b76
MC
5661 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5662 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5663 u16 flags = reg_tbl[i].flags;
5664
5665 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5666 continue;
b6016b76
MC
5667
5668 offset = (u32) reg_tbl[i].offset;
5669 rw_mask = reg_tbl[i].rw_mask;
5670 ro_mask = reg_tbl[i].ro_mask;
5671
14ab9b86 5672 save_val = readl(bp->regview + offset);
b6016b76 5673
14ab9b86 5674 writel(0, bp->regview + offset);
b6016b76 5675
14ab9b86 5676 val = readl(bp->regview + offset);
b6016b76
MC
5677 if ((val & rw_mask) != 0) {
5678 goto reg_test_err;
5679 }
5680
5681 if ((val & ro_mask) != (save_val & ro_mask)) {
5682 goto reg_test_err;
5683 }
5684
14ab9b86 5685 writel(0xffffffff, bp->regview + offset);
b6016b76 5686
14ab9b86 5687 val = readl(bp->regview + offset);
b6016b76
MC
5688 if ((val & rw_mask) != rw_mask) {
5689 goto reg_test_err;
5690 }
5691
5692 if ((val & ro_mask) != (save_val & ro_mask)) {
5693 goto reg_test_err;
5694 }
5695
14ab9b86 5696 writel(save_val, bp->regview + offset);
b6016b76
MC
5697 continue;
5698
5699reg_test_err:
14ab9b86 5700 writel(save_val, bp->regview + offset);
b6016b76
MC
5701 ret = -ENODEV;
5702 break;
5703 }
5704 return ret;
5705}
5706
5707static int
5708bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5709{
f71e1309 5710 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5711 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5712 int i;
5713
5714 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5715 u32 offset;
5716
5717 for (offset = 0; offset < size; offset += 4) {
5718
2726d6e1 5719 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5720
2726d6e1 5721 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5722 test_pattern[i]) {
5723 return -ENODEV;
5724 }
5725 }
5726 }
5727 return 0;
5728}
5729
5730static int
5731bnx2_test_memory(struct bnx2 *bp)
5732{
5733 int ret = 0;
5734 int i;
5bae30c9 5735 static struct mem_entry {
b6016b76
MC
5736 u32 offset;
5737 u32 len;
5bae30c9 5738 } mem_tbl_5706[] = {
b6016b76 5739 { 0x60000, 0x4000 },
5b0c76ad 5740 { 0xa0000, 0x3000 },
b6016b76
MC
5741 { 0xe0000, 0x4000 },
5742 { 0x120000, 0x4000 },
5743 { 0x1a0000, 0x4000 },
5744 { 0x160000, 0x4000 },
5745 { 0xffffffff, 0 },
5bae30c9
MC
5746 },
5747 mem_tbl_5709[] = {
5748 { 0x60000, 0x4000 },
5749 { 0xa0000, 0x3000 },
5750 { 0xe0000, 0x4000 },
5751 { 0x120000, 0x4000 },
5752 { 0x1a0000, 0x4000 },
5753 { 0xffffffff, 0 },
b6016b76 5754 };
5bae30c9
MC
5755 struct mem_entry *mem_tbl;
5756
4ce45e02 5757 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5bae30c9
MC
5758 mem_tbl = mem_tbl_5709;
5759 else
5760 mem_tbl = mem_tbl_5706;
b6016b76
MC
5761
5762 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5763 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5764 mem_tbl[i].len)) != 0) {
5765 return ret;
5766 }
5767 }
6aa20a22 5768
b6016b76
MC
5769 return ret;
5770}
5771
bc5a0690
MC
5772#define BNX2_MAC_LOOPBACK 0
5773#define BNX2_PHY_LOOPBACK 1
5774
b6016b76 5775static int
bc5a0690 5776bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5777{
5778 unsigned int pkt_size, num_pkts, i;
dd2bc8e9
ED
5779 struct sk_buff *skb;
5780 u8 *data;
b6016b76 5781 unsigned char *packet;
bc5a0690 5782 u16 rx_start_idx, rx_idx;
b6016b76 5783 dma_addr_t map;
2bc4078e
MC
5784 struct bnx2_tx_bd *txbd;
5785 struct bnx2_sw_bd *rx_buf;
b6016b76
MC
5786 struct l2_fhdr *rx_hdr;
5787 int ret = -ENODEV;
c76c0475 5788 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5789 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5790 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5791
5792 tx_napi = bnapi;
b6016b76 5793
35e9010b 5794 txr = &tx_napi->tx_ring;
bb4f98ab 5795 rxr = &bnapi->rx_ring;
bc5a0690
MC
5796 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5797 bp->loopback = MAC_LOOPBACK;
5798 bnx2_set_mac_loopback(bp);
5799 }
5800 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5801 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5802 return 0;
5803
80be4434 5804 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5805 bnx2_set_phy_loopback(bp);
5806 }
5807 else
5808 return -EINVAL;
b6016b76 5809
84eaa187 5810 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5811 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5812 if (!skb)
5813 return -ENOMEM;
b6016b76 5814 packet = skb_put(skb, pkt_size);
d458cdf7
JP
5815 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5816 memset(packet + ETH_ALEN, 0x0, 8);
b6016b76
MC
5817 for (i = 14; i < pkt_size; i++)
5818 packet[i] = (unsigned char) (i & 0xff);
5819
36227e88
SG
5820 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5821 PCI_DMA_TODEVICE);
5822 if (dma_mapping_error(&bp->pdev->dev, map)) {
3d16af86
BL
5823 dev_kfree_skb(skb);
5824 return -EIO;
5825 }
b6016b76 5826
e503e066
MC
5827 BNX2_WR(bp, BNX2_HC_COMMAND,
5828 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
bf5295bb 5829
e503e066 5830 BNX2_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
5831
5832 udelay(5);
35efa7c1 5833 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5834
b6016b76
MC
5835 num_pkts = 0;
5836
2bc4078e 5837 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5838
5839 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5840 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5841 txbd->tx_bd_mss_nbytes = pkt_size;
5842 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5843
5844 num_pkts++;
2bc4078e 5845 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
35e9010b 5846 txr->tx_prod_bseq += pkt_size;
b6016b76 5847
e503e066
MC
5848 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5849 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5850
5851 udelay(100);
5852
e503e066
MC
5853 BNX2_WR(bp, BNX2_HC_COMMAND,
5854 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
bf5295bb 5855
e503e066 5856 BNX2_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
5857
5858 udelay(5);
5859
36227e88 5860 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 5861 dev_kfree_skb(skb);
b6016b76 5862
35e9010b 5863 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5864 goto loopback_test_done;
b6016b76 5865
35efa7c1 5866 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5867 if (rx_idx != rx_start_idx + num_pkts) {
5868 goto loopback_test_done;
5869 }
5870
bb4f98ab 5871 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
dd2bc8e9 5872 data = rx_buf->data;
b6016b76 5873
dd2bc8e9
ED
5874 rx_hdr = get_l2_fhdr(data);
5875 data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
b6016b76 5876
36227e88 5877 dma_sync_single_for_cpu(&bp->pdev->dev,
1a4ccc2d 5878 dma_unmap_addr(rx_buf, mapping),
dd2bc8e9 5879 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
b6016b76 5880
ade2bfe7 5881 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5882 (L2_FHDR_ERRORS_BAD_CRC |
5883 L2_FHDR_ERRORS_PHY_DECODE |
5884 L2_FHDR_ERRORS_ALIGNMENT |
5885 L2_FHDR_ERRORS_TOO_SHORT |
5886 L2_FHDR_ERRORS_GIANT_FRAME)) {
5887
5888 goto loopback_test_done;
5889 }
5890
5891 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5892 goto loopback_test_done;
5893 }
5894
5895 for (i = 14; i < pkt_size; i++) {
dd2bc8e9 5896 if (*(data + i) != (unsigned char) (i & 0xff)) {
b6016b76
MC
5897 goto loopback_test_done;
5898 }
5899 }
5900
5901 ret = 0;
5902
5903loopback_test_done:
5904 bp->loopback = 0;
5905 return ret;
5906}
5907
bc5a0690
MC
5908#define BNX2_MAC_LOOPBACK_FAILED 1
5909#define BNX2_PHY_LOOPBACK_FAILED 2
5910#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5911 BNX2_PHY_LOOPBACK_FAILED)
5912
5913static int
5914bnx2_test_loopback(struct bnx2 *bp)
5915{
5916 int rc = 0;
5917
5918 if (!netif_running(bp->dev))
5919 return BNX2_LOOPBACK_FAILED;
5920
5921 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5922 spin_lock_bh(&bp->phy_lock);
9a120bc5 5923 bnx2_init_phy(bp, 1);
bc5a0690
MC
5924 spin_unlock_bh(&bp->phy_lock);
5925 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5926 rc |= BNX2_MAC_LOOPBACK_FAILED;
5927 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5928 rc |= BNX2_PHY_LOOPBACK_FAILED;
5929 return rc;
5930}
5931
b6016b76
MC
5932#define NVRAM_SIZE 0x200
5933#define CRC32_RESIDUAL 0xdebb20e3
5934
5935static int
5936bnx2_test_nvram(struct bnx2 *bp)
5937{
b491edd5 5938 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5939 u8 *data = (u8 *) buf;
5940 int rc = 0;
5941 u32 magic, csum;
5942
5943 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5944 goto test_nvram_done;
5945
5946 magic = be32_to_cpu(buf[0]);
5947 if (magic != 0x669955aa) {
5948 rc = -ENODEV;
5949 goto test_nvram_done;
5950 }
5951
5952 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5953 goto test_nvram_done;
5954
5955 csum = ether_crc_le(0x100, data);
5956 if (csum != CRC32_RESIDUAL) {
5957 rc = -ENODEV;
5958 goto test_nvram_done;
5959 }
5960
5961 csum = ether_crc_le(0x100, data + 0x100);
5962 if (csum != CRC32_RESIDUAL) {
5963 rc = -ENODEV;
5964 }
5965
5966test_nvram_done:
5967 return rc;
5968}
5969
5970static int
5971bnx2_test_link(struct bnx2 *bp)
5972{
5973 u32 bmsr;
5974
9f52b564
MC
5975 if (!netif_running(bp->dev))
5976 return -ENODEV;
5977
583c28e5 5978 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5979 if (bp->link_up)
5980 return 0;
5981 return -ENODEV;
5982 }
c770a65c 5983 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5984 bnx2_enable_bmsr1(bp);
5985 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5986 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5987 bnx2_disable_bmsr1(bp);
c770a65c 5988 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5989
b6016b76
MC
5990 if (bmsr & BMSR_LSTATUS) {
5991 return 0;
5992 }
5993 return -ENODEV;
5994}
5995
5996static int
5997bnx2_test_intr(struct bnx2 *bp)
5998{
5999 int i;
b6016b76
MC
6000 u16 status_idx;
6001
6002 if (!netif_running(bp->dev))
6003 return -ENODEV;
6004
e503e066 6005 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
b6016b76
MC
6006
6007 /* This register is not touched during run-time. */
e503e066
MC
6008 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6009 BNX2_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
6010
6011 for (i = 0; i < 10; i++) {
e503e066 6012 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
b6016b76
MC
6013 status_idx) {
6014
6015 break;
6016 }
6017
6018 msleep_interruptible(10);
6019 }
6020 if (i < 10)
6021 return 0;
6022
6023 return -ENODEV;
6024}
6025
38ea3686 6026/* Determining link for parallel detection. */
b2fadeae
MC
6027static int
6028bnx2_5706_serdes_has_link(struct bnx2 *bp)
6029{
6030 u32 mode_ctl, an_dbg, exp;
6031
38ea3686
MC
6032 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6033 return 0;
6034
b2fadeae
MC
6035 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6036 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6037
6038 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6039 return 0;
6040
6041 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6042 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6043 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6044
f3014c0c 6045 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
6046 return 0;
6047
6048 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6049 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6050 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6051
6052 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6053 return 0;
6054
6055 return 1;
6056}
6057
b6016b76 6058static void
48b01e2d 6059bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 6060{
b2fadeae
MC
6061 int check_link = 1;
6062
48b01e2d 6063 spin_lock(&bp->phy_lock);
b2fadeae 6064 if (bp->serdes_an_pending) {
48b01e2d 6065 bp->serdes_an_pending--;
b2fadeae
MC
6066 check_link = 0;
6067 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 6068 u32 bmcr;
b6016b76 6069
ac392abc 6070 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 6071
ca58c3af 6072 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6073
48b01e2d 6074 if (bmcr & BMCR_ANENABLE) {
b2fadeae 6075 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
6076 bmcr &= ~BMCR_ANENABLE;
6077 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 6078 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 6079 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 6080 }
b6016b76 6081 }
48b01e2d
MC
6082 }
6083 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 6084 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 6085 u32 phy2;
b6016b76 6086
48b01e2d
MC
6087 bnx2_write_phy(bp, 0x17, 0x0f01);
6088 bnx2_read_phy(bp, 0x15, &phy2);
6089 if (phy2 & 0x20) {
6090 u32 bmcr;
cd339a0e 6091
ca58c3af 6092 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 6093 bmcr |= BMCR_ANENABLE;
ca58c3af 6094 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 6095
583c28e5 6096 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
6097 }
6098 } else
ac392abc 6099 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6100
a2724e25 6101 if (check_link) {
b2fadeae
MC
6102 u32 val;
6103
6104 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6105 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6106 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6107
a2724e25
MC
6108 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6109 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6110 bnx2_5706s_force_link_dn(bp, 1);
6111 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6112 } else
6113 bnx2_set_link(bp);
6114 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6115 bnx2_set_link(bp);
b2fadeae 6116 }
48b01e2d
MC
6117 spin_unlock(&bp->phy_lock);
6118}
b6016b76 6119
f8dd064e
MC
6120static void
6121bnx2_5708_serdes_timer(struct bnx2 *bp)
6122{
583c28e5 6123 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
6124 return;
6125
583c28e5 6126 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
6127 bp->serdes_an_pending = 0;
6128 return;
6129 }
b6016b76 6130
f8dd064e
MC
6131 spin_lock(&bp->phy_lock);
6132 if (bp->serdes_an_pending)
6133 bp->serdes_an_pending--;
6134 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6135 u32 bmcr;
b6016b76 6136
ca58c3af 6137 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 6138 if (bmcr & BMCR_ANENABLE) {
605a9e20 6139 bnx2_enable_forced_2g5(bp);
40105c0b 6140 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 6141 } else {
605a9e20 6142 bnx2_disable_forced_2g5(bp);
f8dd064e 6143 bp->serdes_an_pending = 2;
ac392abc 6144 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6145 }
b6016b76 6146
f8dd064e 6147 } else
ac392abc 6148 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6149
f8dd064e
MC
6150 spin_unlock(&bp->phy_lock);
6151}
6152
48b01e2d
MC
6153static void
6154bnx2_timer(unsigned long data)
6155{
6156 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 6157
48b01e2d
MC
6158 if (!netif_running(bp->dev))
6159 return;
b6016b76 6160
48b01e2d
MC
6161 if (atomic_read(&bp->intr_sem) != 0)
6162 goto bnx2_restart_timer;
b6016b76 6163
efba0180
MC
6164 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6165 BNX2_FLAG_USING_MSI)
6166 bnx2_chk_missed_msi(bp);
6167
df149d70 6168 bnx2_send_heart_beat(bp);
b6016b76 6169
2726d6e1
MC
6170 bp->stats_blk->stat_FwRxDrop =
6171 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 6172
02537b06 6173 /* workaround occasional corrupted counters */
61d9e3fa 6174 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
e503e066
MC
6175 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6176 BNX2_HC_COMMAND_STATS_NOW);
02537b06 6177
583c28e5 6178 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
4ce45e02 6179 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
f8dd064e 6180 bnx2_5706_serdes_timer(bp);
27a005b8 6181 else
f8dd064e 6182 bnx2_5708_serdes_timer(bp);
b6016b76
MC
6183 }
6184
6185bnx2_restart_timer:
cd339a0e 6186 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6187}
6188
8e6a72c4
MC
6189static int
6190bnx2_request_irq(struct bnx2 *bp)
6191{
6d866ffc 6192 unsigned long flags;
b4b36042
MC
6193 struct bnx2_irq *irq;
6194 int rc = 0, i;
8e6a72c4 6195
f86e82fb 6196 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
6197 flags = 0;
6198 else
6199 flags = IRQF_SHARED;
b4b36042
MC
6200
6201 for (i = 0; i < bp->irq_nvecs; i++) {
6202 irq = &bp->irq_tbl[i];
c76c0475 6203 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 6204 &bp->bnx2_napi[i]);
b4b36042
MC
6205 if (rc)
6206 break;
6207 irq->requested = 1;
6208 }
8e6a72c4
MC
6209 return rc;
6210}
6211
6212static void
a29ba9d2 6213__bnx2_free_irq(struct bnx2 *bp)
8e6a72c4 6214{
b4b36042
MC
6215 struct bnx2_irq *irq;
6216 int i;
8e6a72c4 6217
b4b36042
MC
6218 for (i = 0; i < bp->irq_nvecs; i++) {
6219 irq = &bp->irq_tbl[i];
6220 if (irq->requested)
f0ea2e63 6221 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 6222 irq->requested = 0;
6d866ffc 6223 }
a29ba9d2
MC
6224}
6225
6226static void
6227bnx2_free_irq(struct bnx2 *bp)
6228{
6229
6230 __bnx2_free_irq(bp);
f86e82fb 6231 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 6232 pci_disable_msi(bp->pdev);
f86e82fb 6233 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
6234 pci_disable_msix(bp->pdev);
6235
f86e82fb 6236 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
6237}
6238
6239static void
5e9ad9e1 6240bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 6241{
f2a2dfeb 6242 int i, total_vecs;
57851d84 6243 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
6244 struct net_device *dev = bp->dev;
6245 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 6246
b4b36042 6247 bnx2_setup_msix_tbl(bp);
e503e066
MC
6248 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6249 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6250 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84 6251
e2eb8e38
BL
6252 /* Need to flush the previous three writes to ensure MSI-X
6253 * is setup properly */
e503e066 6254 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
e2eb8e38 6255
57851d84
MC
6256 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6257 msix_ent[i].entry = i;
6258 msix_ent[i].vector = 0;
6259 }
6260
379b39a2
MC
6261 total_vecs = msix_vecs;
6262#ifdef BCM_CNIC
6263 total_vecs++;
6264#endif
f2a2dfeb
AG
6265 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6266 BNX2_MIN_MSIX_VEC, total_vecs);
6267 if (total_vecs < 0)
57851d84
MC
6268 return;
6269
379b39a2
MC
6270 msix_vecs = total_vecs;
6271#ifdef BCM_CNIC
6272 msix_vecs--;
6273#endif
5e9ad9e1 6274 bp->irq_nvecs = msix_vecs;
f86e82fb 6275 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
379b39a2 6276 for (i = 0; i < total_vecs; i++) {
57851d84 6277 bp->irq_tbl[i].vector = msix_ent[i].vector;
69010313
MC
6278 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6279 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6280 }
6d866ffc
MC
6281}
6282
657d92fe 6283static int
6d866ffc
MC
6284bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6285{
0a742128 6286 int cpus = netif_get_num_default_rss_queues();
b033281f
MC
6287 int msix_vecs;
6288
6289 if (!bp->num_req_rx_rings)
6290 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6291 else if (!bp->num_req_tx_rings)
6292 msix_vecs = max(cpus, bp->num_req_rx_rings);
6293 else
6294 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6295
6296 msix_vecs = min(msix_vecs, RX_MAX_RINGS);
5e9ad9e1 6297
6d866ffc
MC
6298 bp->irq_tbl[0].handler = bnx2_interrupt;
6299 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
6300 bp->irq_nvecs = 1;
6301 bp->irq_tbl[0].vector = bp->pdev->irq;
6302
3d5f3a7b 6303 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5e9ad9e1 6304 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 6305
f86e82fb
DM
6306 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6307 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 6308 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 6309 bp->flags |= BNX2_FLAG_USING_MSI;
4ce45e02 6310 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
f86e82fb 6311 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
6312 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6313 } else
6314 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
6315
6316 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
6317 }
6318 }
706bf240 6319
b033281f
MC
6320 if (!bp->num_req_tx_rings)
6321 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6322 else
6323 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6324
6325 if (!bp->num_req_rx_rings)
6326 bp->num_rx_rings = bp->irq_nvecs;
6327 else
6328 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6329
657d92fe 6330 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
706bf240 6331
657d92fe 6332 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
8e6a72c4
MC
6333}
6334
b6016b76
MC
6335/* Called with rtnl_lock */
6336static int
6337bnx2_open(struct net_device *dev)
6338{
972ec0d4 6339 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6340 int rc;
6341
7880b72e 6342 rc = bnx2_request_firmware(bp);
6343 if (rc < 0)
6344 goto out;
6345
1b2f922f
MC
6346 netif_carrier_off(dev);
6347
b6016b76
MC
6348 bnx2_disable_int(bp);
6349
657d92fe
BH
6350 rc = bnx2_setup_int_mode(bp, disable_msi);
6351 if (rc)
6352 goto open_err;
4327ba43 6353 bnx2_init_napi(bp);
35e9010b 6354 bnx2_napi_enable(bp);
b6016b76 6355 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
6356 if (rc)
6357 goto open_err;
b6016b76 6358
8e6a72c4 6359 rc = bnx2_request_irq(bp);
2739a8bb
MC
6360 if (rc)
6361 goto open_err;
b6016b76 6362
9a120bc5 6363 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
6364 if (rc)
6365 goto open_err;
6aa20a22 6366
cd339a0e 6367 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6368
6369 atomic_set(&bp->intr_sem, 0);
6370
354fcd77
MC
6371 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6372
b6016b76
MC
6373 bnx2_enable_int(bp);
6374
f86e82fb 6375 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
6376 /* Test MSI to make sure it is working
6377 * If MSI test fails, go back to INTx mode
6378 */
6379 if (bnx2_test_intr(bp) != 0) {
3a9c6a49 6380 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
b6016b76
MC
6381
6382 bnx2_disable_int(bp);
8e6a72c4 6383 bnx2_free_irq(bp);
b6016b76 6384
6d866ffc
MC
6385 bnx2_setup_int_mode(bp, 1);
6386
9a120bc5 6387 rc = bnx2_init_nic(bp, 0);
b6016b76 6388
8e6a72c4
MC
6389 if (!rc)
6390 rc = bnx2_request_irq(bp);
6391
b6016b76 6392 if (rc) {
b6016b76 6393 del_timer_sync(&bp->timer);
2739a8bb 6394 goto open_err;
b6016b76
MC
6395 }
6396 bnx2_enable_int(bp);
6397 }
6398 }
f86e82fb 6399 if (bp->flags & BNX2_FLAG_USING_MSI)
3a9c6a49 6400 netdev_info(dev, "using MSI\n");
f86e82fb 6401 else if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49 6402 netdev_info(dev, "using MSIX\n");
b6016b76 6403
706bf240 6404 netif_tx_start_all_queues(dev);
7880b72e 6405out:
6406 return rc;
2739a8bb
MC
6407
6408open_err:
6409 bnx2_napi_disable(bp);
6410 bnx2_free_skbs(bp);
6411 bnx2_free_irq(bp);
6412 bnx2_free_mem(bp);
f048fa9c 6413 bnx2_del_napi(bp);
7880b72e 6414 bnx2_release_firmware(bp);
6415 goto out;
b6016b76
MC
6416}
6417
6418static void
c4028958 6419bnx2_reset_task(struct work_struct *work)
b6016b76 6420{
c4028958 6421 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
cd634019 6422 int rc;
efdfad32 6423 u16 pcicmd;
b6016b76 6424
51bf6bb4
MC
6425 rtnl_lock();
6426 if (!netif_running(bp->dev)) {
6427 rtnl_unlock();
afdc08b9 6428 return;
51bf6bb4 6429 }
afdc08b9 6430
212f9934 6431 bnx2_netif_stop(bp, true);
b6016b76 6432
efdfad32
MC
6433 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6434 if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6435 /* in case PCI block has reset */
6436 pci_restore_state(bp->pdev);
6437 pci_save_state(bp->pdev);
6438 }
cd634019
MC
6439 rc = bnx2_init_nic(bp, 1);
6440 if (rc) {
6441 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6442 bnx2_napi_enable(bp);
6443 dev_close(bp->dev);
6444 rtnl_unlock();
6445 return;
6446 }
b6016b76
MC
6447
6448 atomic_set(&bp->intr_sem, 1);
212f9934 6449 bnx2_netif_start(bp, true);
51bf6bb4 6450 rtnl_unlock();
b6016b76
MC
6451}
6452
555069da
MC
6453#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6454
6455static void
6456bnx2_dump_ftq(struct bnx2 *bp)
6457{
6458 int i;
6459 u32 reg, bdidx, cid, valid;
6460 struct net_device *dev = bp->dev;
6461 static const struct ftq_reg {
6462 char *name;
6463 u32 off;
6464 } ftq_arr[] = {
6465 BNX2_FTQ_ENTRY(RV2P_P),
6466 BNX2_FTQ_ENTRY(RV2P_T),
6467 BNX2_FTQ_ENTRY(RV2P_M),
6468 BNX2_FTQ_ENTRY(TBDR_),
6469 BNX2_FTQ_ENTRY(TDMA_),
6470 BNX2_FTQ_ENTRY(TXP_),
6471 BNX2_FTQ_ENTRY(TXP_),
6472 BNX2_FTQ_ENTRY(TPAT_),
6473 BNX2_FTQ_ENTRY(RXP_C),
6474 BNX2_FTQ_ENTRY(RXP_),
6475 BNX2_FTQ_ENTRY(COM_COMXQ_),
6476 BNX2_FTQ_ENTRY(COM_COMTQ_),
6477 BNX2_FTQ_ENTRY(COM_COMQ_),
6478 BNX2_FTQ_ENTRY(CP_CPQ_),
6479 };
6480
6481 netdev_err(dev, "<--- start FTQ dump --->\n");
6482 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6483 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6484 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6485
6486 netdev_err(dev, "CPU states:\n");
6487 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6488 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6489 reg, bnx2_reg_rd_ind(bp, reg),
6490 bnx2_reg_rd_ind(bp, reg + 4),
6491 bnx2_reg_rd_ind(bp, reg + 8),
6492 bnx2_reg_rd_ind(bp, reg + 0x1c),
6493 bnx2_reg_rd_ind(bp, reg + 0x1c),
6494 bnx2_reg_rd_ind(bp, reg + 0x20));
6495
6496 netdev_err(dev, "<--- end FTQ dump --->\n");
6497 netdev_err(dev, "<--- start TBDC dump --->\n");
6498 netdev_err(dev, "TBDC free cnt: %ld\n",
e503e066 6499 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
555069da
MC
6500 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6501 for (i = 0; i < 0x20; i++) {
6502 int j = 0;
6503
e503e066
MC
6504 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6505 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6506 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6507 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6508 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
555069da
MC
6509 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6510 j++;
6511
e503e066
MC
6512 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6513 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6514 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
555069da
MC
6515 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6516 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6517 bdidx >> 24, (valid >> 8) & 0x0ff);
6518 }
6519 netdev_err(dev, "<--- end TBDC dump --->\n");
6520}
6521
20175c57
MC
6522static void
6523bnx2_dump_state(struct bnx2 *bp)
6524{
6525 struct net_device *dev = bp->dev;
ecdbf6e0 6526 u32 val1, val2;
5804a8fb
MC
6527
6528 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6529 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6530 atomic_read(&bp->intr_sem), val1);
6531 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6532 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6533 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
b98eba52 6534 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
e503e066
MC
6535 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6536 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
b98eba52 6537 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
e503e066 6538 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
3a9c6a49 6539 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
e503e066 6540 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
20175c57 6541 if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49 6542 netdev_err(dev, "DEBUG: PBA[%08x]\n",
e503e066 6543 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
20175c57
MC
6544}
6545
b6016b76
MC
6546static void
6547bnx2_tx_timeout(struct net_device *dev)
6548{
972ec0d4 6549 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6550
555069da 6551 bnx2_dump_ftq(bp);
20175c57 6552 bnx2_dump_state(bp);
ecdbf6e0 6553 bnx2_dump_mcp_state(bp);
20175c57 6554
b6016b76
MC
6555 /* This allows the netif to be shutdown gracefully before resetting */
6556 schedule_work(&bp->reset_task);
6557}
6558
932ff279 6559/* Called with netif_tx_lock.
2f8af120
MC
6560 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6561 * netif_wake_queue().
b6016b76 6562 */
61357325 6563static netdev_tx_t
b6016b76
MC
6564bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6565{
972ec0d4 6566 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6567 dma_addr_t mapping;
2bc4078e
MC
6568 struct bnx2_tx_bd *txbd;
6569 struct bnx2_sw_tx_bd *tx_buf;
b6016b76
MC
6570 u32 len, vlan_tag_flags, last_frag, mss;
6571 u16 prod, ring_prod;
6572 int i;
706bf240
BL
6573 struct bnx2_napi *bnapi;
6574 struct bnx2_tx_ring_info *txr;
6575 struct netdev_queue *txq;
6576
6577 /* Determine which tx ring we will be placed on */
6578 i = skb_get_queue_mapping(skb);
6579 bnapi = &bp->bnx2_napi[i];
6580 txr = &bnapi->tx_ring;
6581 txq = netdev_get_tx_queue(dev, i);
b6016b76 6582
35e9010b 6583 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6584 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6585 netif_tx_stop_queue(txq);
3a9c6a49 6586 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
b6016b76
MC
6587
6588 return NETDEV_TX_BUSY;
6589 }
6590 len = skb_headlen(skb);
35e9010b 6591 prod = txr->tx_prod;
2bc4078e 6592 ring_prod = BNX2_TX_RING_IDX(prod);
b6016b76
MC
6593
6594 vlan_tag_flags = 0;
84fa7933 6595 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6596 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6597 }
6598
eab6d18d 6599 if (vlan_tx_tag_present(skb)) {
b6016b76
MC
6600 vlan_tag_flags |=
6601 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6602 }
7d0fd211 6603
fde82055 6604 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6605 u32 tcp_opt_len;
eddc9ec5 6606 struct iphdr *iph;
b6016b76 6607
b6016b76
MC
6608 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6609
4666f87a
MC
6610 tcp_opt_len = tcp_optlen(skb);
6611
6612 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6613 u32 tcp_off = skb_transport_offset(skb) -
6614 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6615
4666f87a
MC
6616 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6617 TX_BD_FLAGS_SW_FLAGS;
6618 if (likely(tcp_off == 0))
6619 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6620 else {
6621 tcp_off >>= 3;
6622 vlan_tag_flags |= ((tcp_off & 0x3) <<
6623 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6624 ((tcp_off & 0x10) <<
6625 TX_BD_FLAGS_TCP6_OFF4_SHL);
6626 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6627 }
6628 } else {
4666f87a 6629 iph = ip_hdr(skb);
4666f87a
MC
6630 if (tcp_opt_len || (iph->ihl > 5)) {
6631 vlan_tag_flags |= ((iph->ihl - 5) +
6632 (tcp_opt_len >> 2)) << 8;
6633 }
b6016b76 6634 }
4666f87a 6635 } else
b6016b76 6636 mss = 0;
b6016b76 6637
36227e88
SG
6638 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6639 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
f458b2ee 6640 dev_kfree_skb_any(skb);
3d16af86
BL
6641 return NETDEV_TX_OK;
6642 }
6643
35e9010b 6644 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6645 tx_buf->skb = skb;
1a4ccc2d 6646 dma_unmap_addr_set(tx_buf, mapping, mapping);
b6016b76 6647
35e9010b 6648 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6649
6650 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6651 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6652 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6653 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6654
6655 last_frag = skb_shinfo(skb)->nr_frags;
d62fda08
ED
6656 tx_buf->nr_frags = last_frag;
6657 tx_buf->is_gso = skb_is_gso(skb);
b6016b76
MC
6658
6659 for (i = 0; i < last_frag; i++) {
9e903e08 6660 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
b6016b76 6661
2bc4078e
MC
6662 prod = BNX2_NEXT_TX_BD(prod);
6663 ring_prod = BNX2_TX_RING_IDX(prod);
35e9010b 6664 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76 6665
9e903e08 6666 len = skb_frag_size(frag);
b7b6a688 6667 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
5d6bcdfe 6668 DMA_TO_DEVICE);
36227e88 6669 if (dma_mapping_error(&bp->pdev->dev, mapping))
e95524a7 6670 goto dma_error;
1a4ccc2d 6671 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
e95524a7 6672 mapping);
b6016b76
MC
6673
6674 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6675 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6676 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6677 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6678
6679 }
6680 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6681
94bf91ba
VZ
6682 /* Sync BD data before updating TX mailbox */
6683 wmb();
6684
e9831909
ED
6685 netdev_tx_sent_queue(txq, skb->len);
6686
2bc4078e 6687 prod = BNX2_NEXT_TX_BD(prod);
35e9010b 6688 txr->tx_prod_bseq += skb->len;
b6016b76 6689
e503e066
MC
6690 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6691 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6692
6693 mmiowb();
6694
35e9010b 6695 txr->tx_prod = prod;
b6016b76 6696
35e9010b 6697 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6698 netif_tx_stop_queue(txq);
11848b96
MC
6699
6700 /* netif_tx_stop_queue() must be done before checking
6701 * tx index in bnx2_tx_avail() below, because in
6702 * bnx2_tx_int(), we update tx index before checking for
6703 * netif_tx_queue_stopped().
6704 */
6705 smp_mb();
35e9010b 6706 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6707 netif_tx_wake_queue(txq);
b6016b76
MC
6708 }
6709
e95524a7
AD
6710 return NETDEV_TX_OK;
6711dma_error:
6712 /* save value of frag that failed */
6713 last_frag = i;
6714
6715 /* start back at beginning and unmap skb */
6716 prod = txr->tx_prod;
2bc4078e 6717 ring_prod = BNX2_TX_RING_IDX(prod);
e95524a7
AD
6718 tx_buf = &txr->tx_buf_ring[ring_prod];
6719 tx_buf->skb = NULL;
36227e88 6720 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
6721 skb_headlen(skb), PCI_DMA_TODEVICE);
6722
6723 /* unmap remaining mapped pages */
6724 for (i = 0; i < last_frag; i++) {
2bc4078e
MC
6725 prod = BNX2_NEXT_TX_BD(prod);
6726 ring_prod = BNX2_TX_RING_IDX(prod);
e95524a7 6727 tx_buf = &txr->tx_buf_ring[ring_prod];
36227e88 6728 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
9e903e08 6729 skb_frag_size(&skb_shinfo(skb)->frags[i]),
e95524a7
AD
6730 PCI_DMA_TODEVICE);
6731 }
6732
f458b2ee 6733 dev_kfree_skb_any(skb);
b6016b76
MC
6734 return NETDEV_TX_OK;
6735}
6736
6737/* Called with rtnl_lock */
6738static int
6739bnx2_close(struct net_device *dev)
6740{
972ec0d4 6741 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6742
bea3348e 6743 bnx2_disable_int_sync(bp);
35efa7c1 6744 bnx2_napi_disable(bp);
d2e553bc 6745 netif_tx_disable(dev);
b6016b76 6746 del_timer_sync(&bp->timer);
74bf4ba3 6747 bnx2_shutdown_chip(bp);
8e6a72c4 6748 bnx2_free_irq(bp);
b6016b76
MC
6749 bnx2_free_skbs(bp);
6750 bnx2_free_mem(bp);
f048fa9c 6751 bnx2_del_napi(bp);
b6016b76
MC
6752 bp->link_up = 0;
6753 netif_carrier_off(bp->dev);
b6016b76
MC
6754 return 0;
6755}
6756
354fcd77
MC
6757static void
6758bnx2_save_stats(struct bnx2 *bp)
6759{
6760 u32 *hw_stats = (u32 *) bp->stats_blk;
6761 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6762 int i;
6763
6764 /* The 1st 10 counters are 64-bit counters */
6765 for (i = 0; i < 20; i += 2) {
6766 u32 hi;
6767 u64 lo;
6768
c9885fe5
PR
6769 hi = temp_stats[i] + hw_stats[i];
6770 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
354fcd77
MC
6771 if (lo > 0xffffffff)
6772 hi++;
c9885fe5
PR
6773 temp_stats[i] = hi;
6774 temp_stats[i + 1] = lo & 0xffffffff;
354fcd77
MC
6775 }
6776
6777 for ( ; i < sizeof(struct statistics_block) / 4; i++)
c9885fe5 6778 temp_stats[i] += hw_stats[i];
354fcd77
MC
6779}
6780
5d07bf26
ED
6781#define GET_64BIT_NET_STATS64(ctr) \
6782 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
b6016b76 6783
a4743058 6784#define GET_64BIT_NET_STATS(ctr) \
354fcd77
MC
6785 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6786 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
b6016b76 6787
a4743058 6788#define GET_32BIT_NET_STATS(ctr) \
354fcd77
MC
6789 (unsigned long) (bp->stats_blk->ctr + \
6790 bp->temp_stats_blk->ctr)
a4743058 6791
5d07bf26
ED
6792static struct rtnl_link_stats64 *
6793bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
b6016b76 6794{
972ec0d4 6795 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6796
5d07bf26 6797 if (bp->stats_blk == NULL)
b6016b76 6798 return net_stats;
5d07bf26 6799
b6016b76 6800 net_stats->rx_packets =
a4743058
MC
6801 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6802 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6803 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
b6016b76
MC
6804
6805 net_stats->tx_packets =
a4743058
MC
6806 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6807 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6808 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
b6016b76
MC
6809
6810 net_stats->rx_bytes =
a4743058 6811 GET_64BIT_NET_STATS(stat_IfHCInOctets);
b6016b76
MC
6812
6813 net_stats->tx_bytes =
a4743058 6814 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
b6016b76 6815
6aa20a22 6816 net_stats->multicast =
6fdae995 6817 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
b6016b76 6818
6aa20a22 6819 net_stats->collisions =
a4743058 6820 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
b6016b76 6821
6aa20a22 6822 net_stats->rx_length_errors =
a4743058
MC
6823 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6824 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
b6016b76 6825
6aa20a22 6826 net_stats->rx_over_errors =
a4743058
MC
6827 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6828 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
b6016b76 6829
6aa20a22 6830 net_stats->rx_frame_errors =
a4743058 6831 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
b6016b76 6832
6aa20a22 6833 net_stats->rx_crc_errors =
a4743058 6834 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
b6016b76
MC
6835
6836 net_stats->rx_errors = net_stats->rx_length_errors +
6837 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6838 net_stats->rx_crc_errors;
6839
6840 net_stats->tx_aborted_errors =
a4743058
MC
6841 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6842 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
b6016b76 6843
4ce45e02
MC
6844 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6845 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
b6016b76
MC
6846 net_stats->tx_carrier_errors = 0;
6847 else {
6848 net_stats->tx_carrier_errors =
a4743058 6849 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
b6016b76
MC
6850 }
6851
6852 net_stats->tx_errors =
a4743058 6853 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
b6016b76
MC
6854 net_stats->tx_aborted_errors +
6855 net_stats->tx_carrier_errors;
6856
cea94db9 6857 net_stats->rx_missed_errors =
a4743058
MC
6858 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6859 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6860 GET_32BIT_NET_STATS(stat_FwRxDrop);
cea94db9 6861
b6016b76
MC
6862 return net_stats;
6863}
6864
6865/* All ethtool functions called with rtnl_lock */
6866
6867static int
6868bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6869{
972ec0d4 6870 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6871 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6872
6873 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6874 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6875 support_serdes = 1;
6876 support_copper = 1;
6877 } else if (bp->phy_port == PORT_FIBRE)
6878 support_serdes = 1;
6879 else
6880 support_copper = 1;
6881
6882 if (support_serdes) {
b6016b76
MC
6883 cmd->supported |= SUPPORTED_1000baseT_Full |
6884 SUPPORTED_FIBRE;
583c28e5 6885 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6886 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6887
b6016b76 6888 }
7b6b8347 6889 if (support_copper) {
b6016b76
MC
6890 cmd->supported |= SUPPORTED_10baseT_Half |
6891 SUPPORTED_10baseT_Full |
6892 SUPPORTED_100baseT_Half |
6893 SUPPORTED_100baseT_Full |
6894 SUPPORTED_1000baseT_Full |
6895 SUPPORTED_TP;
6896
b6016b76
MC
6897 }
6898
7b6b8347
MC
6899 spin_lock_bh(&bp->phy_lock);
6900 cmd->port = bp->phy_port;
b6016b76
MC
6901 cmd->advertising = bp->advertising;
6902
6903 if (bp->autoneg & AUTONEG_SPEED) {
6904 cmd->autoneg = AUTONEG_ENABLE;
70739497 6905 } else {
b6016b76
MC
6906 cmd->autoneg = AUTONEG_DISABLE;
6907 }
6908
6909 if (netif_carrier_ok(dev)) {
70739497 6910 ethtool_cmd_speed_set(cmd, bp->line_speed);
b6016b76 6911 cmd->duplex = bp->duplex;
4016badd
MC
6912 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6913 if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6914 cmd->eth_tp_mdix = ETH_TP_MDI_X;
6915 else
6916 cmd->eth_tp_mdix = ETH_TP_MDI;
6917 }
b6016b76
MC
6918 }
6919 else {
537fae01
JP
6920 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6921 cmd->duplex = DUPLEX_UNKNOWN;
b6016b76 6922 }
7b6b8347 6923 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6924
6925 cmd->transceiver = XCVR_INTERNAL;
6926 cmd->phy_address = bp->phy_addr;
6927
6928 return 0;
6929}
6aa20a22 6930
b6016b76
MC
6931static int
6932bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6933{
972ec0d4 6934 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6935 u8 autoneg = bp->autoneg;
6936 u8 req_duplex = bp->req_duplex;
6937 u16 req_line_speed = bp->req_line_speed;
6938 u32 advertising = bp->advertising;
7b6b8347
MC
6939 int err = -EINVAL;
6940
6941 spin_lock_bh(&bp->phy_lock);
6942
6943 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6944 goto err_out_unlock;
6945
583c28e5
MC
6946 if (cmd->port != bp->phy_port &&
6947 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6948 goto err_out_unlock;
b6016b76 6949
d6b14486
MC
6950 /* If device is down, we can store the settings only if the user
6951 * is setting the currently active port.
6952 */
6953 if (!netif_running(dev) && cmd->port != bp->phy_port)
6954 goto err_out_unlock;
6955
b6016b76
MC
6956 if (cmd->autoneg == AUTONEG_ENABLE) {
6957 autoneg |= AUTONEG_SPEED;
6958
beb499af
MC
6959 advertising = cmd->advertising;
6960 if (cmd->port == PORT_TP) {
6961 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6962 if (!advertising)
b6016b76 6963 advertising = ETHTOOL_ALL_COPPER_SPEED;
beb499af
MC
6964 } else {
6965 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6966 if (!advertising)
6967 advertising = ETHTOOL_ALL_FIBRE_SPEED;
b6016b76
MC
6968 }
6969 advertising |= ADVERTISED_Autoneg;
6970 }
6971 else {
25db0338 6972 u32 speed = ethtool_cmd_speed(cmd);
7b6b8347 6973 if (cmd->port == PORT_FIBRE) {
25db0338
DD
6974 if ((speed != SPEED_1000 &&
6975 speed != SPEED_2500) ||
80be4434 6976 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6977 goto err_out_unlock;
80be4434 6978
25db0338 6979 if (speed == SPEED_2500 &&
583c28e5 6980 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6981 goto err_out_unlock;
25db0338 6982 } else if (speed == SPEED_1000 || speed == SPEED_2500)
7b6b8347
MC
6983 goto err_out_unlock;
6984
b6016b76 6985 autoneg &= ~AUTONEG_SPEED;
25db0338 6986 req_line_speed = speed;
b6016b76
MC
6987 req_duplex = cmd->duplex;
6988 advertising = 0;
6989 }
6990
6991 bp->autoneg = autoneg;
6992 bp->advertising = advertising;
6993 bp->req_line_speed = req_line_speed;
6994 bp->req_duplex = req_duplex;
6995
d6b14486
MC
6996 err = 0;
6997 /* If device is down, the new settings will be picked up when it is
6998 * brought up.
6999 */
7000 if (netif_running(dev))
7001 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 7002
7b6b8347 7003err_out_unlock:
c770a65c 7004 spin_unlock_bh(&bp->phy_lock);
b6016b76 7005
7b6b8347 7006 return err;
b6016b76
MC
7007}
7008
7009static void
7010bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7011{
972ec0d4 7012 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7013
68aad78c
RJ
7014 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7015 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7016 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7017 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
b6016b76
MC
7018}
7019
244ac4f4
MC
7020#define BNX2_REGDUMP_LEN (32 * 1024)
7021
7022static int
7023bnx2_get_regs_len(struct net_device *dev)
7024{
7025 return BNX2_REGDUMP_LEN;
7026}
7027
7028static void
7029bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7030{
7031 u32 *p = _p, i, offset;
7032 u8 *orig_p = _p;
7033 struct bnx2 *bp = netdev_priv(dev);
b6bc7650
JP
7034 static const u32 reg_boundaries[] = {
7035 0x0000, 0x0098, 0x0400, 0x045c,
7036 0x0800, 0x0880, 0x0c00, 0x0c10,
7037 0x0c30, 0x0d08, 0x1000, 0x101c,
7038 0x1040, 0x1048, 0x1080, 0x10a4,
7039 0x1400, 0x1490, 0x1498, 0x14f0,
7040 0x1500, 0x155c, 0x1580, 0x15dc,
7041 0x1600, 0x1658, 0x1680, 0x16d8,
7042 0x1800, 0x1820, 0x1840, 0x1854,
7043 0x1880, 0x1894, 0x1900, 0x1984,
7044 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7045 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7046 0x2000, 0x2030, 0x23c0, 0x2400,
7047 0x2800, 0x2820, 0x2830, 0x2850,
7048 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7049 0x3c00, 0x3c94, 0x4000, 0x4010,
7050 0x4080, 0x4090, 0x43c0, 0x4458,
7051 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7052 0x4fc0, 0x5010, 0x53c0, 0x5444,
7053 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7054 0x5fc0, 0x6000, 0x6400, 0x6428,
7055 0x6800, 0x6848, 0x684c, 0x6860,
7056 0x6888, 0x6910, 0x8000
7057 };
244ac4f4
MC
7058
7059 regs->version = 0;
7060
7061 memset(p, 0, BNX2_REGDUMP_LEN);
7062
7063 if (!netif_running(bp->dev))
7064 return;
7065
7066 i = 0;
7067 offset = reg_boundaries[0];
7068 p += offset;
7069 while (offset < BNX2_REGDUMP_LEN) {
e503e066 7070 *p++ = BNX2_RD(bp, offset);
244ac4f4
MC
7071 offset += 4;
7072 if (offset == reg_boundaries[i + 1]) {
7073 offset = reg_boundaries[i + 2];
7074 p = (u32 *) (orig_p + offset);
7075 i += 2;
7076 }
7077 }
7078}
7079
b6016b76
MC
7080static void
7081bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7082{
972ec0d4 7083 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7084
f86e82fb 7085 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
7086 wol->supported = 0;
7087 wol->wolopts = 0;
7088 }
7089 else {
7090 wol->supported = WAKE_MAGIC;
7091 if (bp->wol)
7092 wol->wolopts = WAKE_MAGIC;
7093 else
7094 wol->wolopts = 0;
7095 }
7096 memset(&wol->sopass, 0, sizeof(wol->sopass));
7097}
7098
7099static int
7100bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7101{
972ec0d4 7102 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7103
7104 if (wol->wolopts & ~WAKE_MAGIC)
7105 return -EINVAL;
7106
7107 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 7108 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
7109 return -EINVAL;
7110
7111 bp->wol = 1;
7112 }
7113 else {
7114 bp->wol = 0;
7115 }
6d5e85c7
MC
7116
7117 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7118
b6016b76
MC
7119 return 0;
7120}
7121
7122static int
7123bnx2_nway_reset(struct net_device *dev)
7124{
972ec0d4 7125 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7126 u32 bmcr;
7127
9f52b564
MC
7128 if (!netif_running(dev))
7129 return -EAGAIN;
7130
b6016b76
MC
7131 if (!(bp->autoneg & AUTONEG_SPEED)) {
7132 return -EINVAL;
7133 }
7134
c770a65c 7135 spin_lock_bh(&bp->phy_lock);
b6016b76 7136
583c28e5 7137 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
7138 int rc;
7139
7140 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7141 spin_unlock_bh(&bp->phy_lock);
7142 return rc;
7143 }
7144
b6016b76 7145 /* Force a link down visible on the other side */
583c28e5 7146 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 7147 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 7148 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7149
7150 msleep(20);
7151
c770a65c 7152 spin_lock_bh(&bp->phy_lock);
f8dd064e 7153
40105c0b 7154 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
7155 bp->serdes_an_pending = 1;
7156 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
7157 }
7158
ca58c3af 7159 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 7160 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 7161 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 7162
c770a65c 7163 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7164
7165 return 0;
7166}
7167
7959ea25
ON
7168static u32
7169bnx2_get_link(struct net_device *dev)
7170{
7171 struct bnx2 *bp = netdev_priv(dev);
7172
7173 return bp->link_up;
7174}
7175
b6016b76
MC
7176static int
7177bnx2_get_eeprom_len(struct net_device *dev)
7178{
972ec0d4 7179 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7180
1122db71 7181 if (bp->flash_info == NULL)
b6016b76
MC
7182 return 0;
7183
1122db71 7184 return (int) bp->flash_size;
b6016b76
MC
7185}
7186
7187static int
7188bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7189 u8 *eebuf)
7190{
972ec0d4 7191 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7192 int rc;
7193
1064e944 7194 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
7195
7196 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7197
7198 return rc;
7199}
7200
7201static int
7202bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7203 u8 *eebuf)
7204{
972ec0d4 7205 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7206 int rc;
7207
1064e944 7208 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
7209
7210 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7211
7212 return rc;
7213}
7214
7215static int
7216bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7217{
972ec0d4 7218 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7219
7220 memset(coal, 0, sizeof(struct ethtool_coalesce));
7221
7222 coal->rx_coalesce_usecs = bp->rx_ticks;
7223 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7224 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7225 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7226
7227 coal->tx_coalesce_usecs = bp->tx_ticks;
7228 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7229 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7230 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7231
7232 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7233
7234 return 0;
7235}
7236
7237static int
7238bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7239{
972ec0d4 7240 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7241
7242 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7243 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7244
6aa20a22 7245 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
7246 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7247
7248 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7249 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7250
7251 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7252 if (bp->rx_quick_cons_trip_int > 0xff)
7253 bp->rx_quick_cons_trip_int = 0xff;
7254
7255 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7256 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7257
7258 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7259 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7260
7261 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7262 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7263
7264 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7265 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7266 0xff;
7267
7268 bp->stats_ticks = coal->stats_block_coalesce_usecs;
61d9e3fa 7269 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
02537b06
MC
7270 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7271 bp->stats_ticks = USEC_PER_SEC;
7272 }
7ea6920e
MC
7273 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7274 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7275 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
7276
7277 if (netif_running(bp->dev)) {
212f9934 7278 bnx2_netif_stop(bp, true);
9a120bc5 7279 bnx2_init_nic(bp, 0);
212f9934 7280 bnx2_netif_start(bp, true);
b6016b76
MC
7281 }
7282
7283 return 0;
7284}
7285
7286static void
7287bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7288{
972ec0d4 7289 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7290
2bc4078e
MC
7291 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7292 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
7293
7294 ering->rx_pending = bp->rx_ring_size;
47bf4246 7295 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76 7296
2bc4078e 7297 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
b6016b76
MC
7298 ering->tx_pending = bp->tx_ring_size;
7299}
7300
7301static int
b033281f 7302bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
b6016b76 7303{
13daffa2 7304 if (netif_running(bp->dev)) {
354fcd77
MC
7305 /* Reset will erase chipset stats; save them */
7306 bnx2_save_stats(bp);
7307
212f9934 7308 bnx2_netif_stop(bp, true);
13daffa2 7309 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
b033281f
MC
7310 if (reset_irq) {
7311 bnx2_free_irq(bp);
7312 bnx2_del_napi(bp);
7313 } else {
7314 __bnx2_free_irq(bp);
7315 }
13daffa2
MC
7316 bnx2_free_skbs(bp);
7317 bnx2_free_mem(bp);
7318 }
7319
5d5d0015
MC
7320 bnx2_set_rx_ring_size(bp, rx);
7321 bp->tx_ring_size = tx;
b6016b76
MC
7322
7323 if (netif_running(bp->dev)) {
b033281f
MC
7324 int rc = 0;
7325
7326 if (reset_irq) {
7327 rc = bnx2_setup_int_mode(bp, disable_msi);
7328 bnx2_init_napi(bp);
7329 }
7330
7331 if (!rc)
7332 rc = bnx2_alloc_mem(bp);
13daffa2 7333
a29ba9d2
MC
7334 if (!rc)
7335 rc = bnx2_request_irq(bp);
7336
6fefb65e
MC
7337 if (!rc)
7338 rc = bnx2_init_nic(bp, 0);
7339
7340 if (rc) {
7341 bnx2_napi_enable(bp);
7342 dev_close(bp->dev);
13daffa2 7343 return rc;
6fefb65e 7344 }
e9f26c49
MC
7345#ifdef BCM_CNIC
7346 mutex_lock(&bp->cnic_lock);
7347 /* Let cnic know about the new status block. */
7348 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7349 bnx2_setup_cnic_irq_info(bp);
7350 mutex_unlock(&bp->cnic_lock);
7351#endif
212f9934 7352 bnx2_netif_start(bp, true);
b6016b76 7353 }
b6016b76
MC
7354 return 0;
7355}
7356
5d5d0015
MC
7357static int
7358bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7359{
7360 struct bnx2 *bp = netdev_priv(dev);
7361 int rc;
7362
2bc4078e
MC
7363 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7364 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
5d5d0015
MC
7365 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7366
7367 return -EINVAL;
7368 }
b033281f
MC
7369 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7370 false);
5d5d0015
MC
7371 return rc;
7372}
7373
b6016b76
MC
7374static void
7375bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7376{
972ec0d4 7377 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7378
7379 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7380 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7381 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7382}
7383
7384static int
7385bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7386{
972ec0d4 7387 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7388
7389 bp->req_flow_ctrl = 0;
7390 if (epause->rx_pause)
7391 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7392 if (epause->tx_pause)
7393 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7394
7395 if (epause->autoneg) {
7396 bp->autoneg |= AUTONEG_FLOW_CTRL;
7397 }
7398 else {
7399 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7400 }
7401
9f52b564
MC
7402 if (netif_running(dev)) {
7403 spin_lock_bh(&bp->phy_lock);
7404 bnx2_setup_phy(bp, bp->phy_port);
7405 spin_unlock_bh(&bp->phy_lock);
7406 }
b6016b76
MC
7407
7408 return 0;
7409}
7410
14ab9b86 7411static struct {
b6016b76 7412 char string[ETH_GSTRING_LEN];
790dab2f 7413} bnx2_stats_str_arr[] = {
b6016b76
MC
7414 { "rx_bytes" },
7415 { "rx_error_bytes" },
7416 { "tx_bytes" },
7417 { "tx_error_bytes" },
7418 { "rx_ucast_packets" },
7419 { "rx_mcast_packets" },
7420 { "rx_bcast_packets" },
7421 { "tx_ucast_packets" },
7422 { "tx_mcast_packets" },
7423 { "tx_bcast_packets" },
7424 { "tx_mac_errors" },
7425 { "tx_carrier_errors" },
7426 { "rx_crc_errors" },
7427 { "rx_align_errors" },
7428 { "tx_single_collisions" },
7429 { "tx_multi_collisions" },
7430 { "tx_deferred" },
7431 { "tx_excess_collisions" },
7432 { "tx_late_collisions" },
7433 { "tx_total_collisions" },
7434 { "rx_fragments" },
7435 { "rx_jabbers" },
7436 { "rx_undersize_packets" },
7437 { "rx_oversize_packets" },
7438 { "rx_64_byte_packets" },
7439 { "rx_65_to_127_byte_packets" },
7440 { "rx_128_to_255_byte_packets" },
7441 { "rx_256_to_511_byte_packets" },
7442 { "rx_512_to_1023_byte_packets" },
7443 { "rx_1024_to_1522_byte_packets" },
7444 { "rx_1523_to_9022_byte_packets" },
7445 { "tx_64_byte_packets" },
7446 { "tx_65_to_127_byte_packets" },
7447 { "tx_128_to_255_byte_packets" },
7448 { "tx_256_to_511_byte_packets" },
7449 { "tx_512_to_1023_byte_packets" },
7450 { "tx_1024_to_1522_byte_packets" },
7451 { "tx_1523_to_9022_byte_packets" },
7452 { "rx_xon_frames" },
7453 { "rx_xoff_frames" },
7454 { "tx_xon_frames" },
7455 { "tx_xoff_frames" },
7456 { "rx_mac_ctrl_frames" },
7457 { "rx_filtered_packets" },
790dab2f 7458 { "rx_ftq_discards" },
b6016b76 7459 { "rx_discards" },
cea94db9 7460 { "rx_fw_discards" },
b6016b76
MC
7461};
7462
0db83cd8 7463#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
790dab2f 7464
b6016b76
MC
7465#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7466
f71e1309 7467static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7468 STATS_OFFSET32(stat_IfHCInOctets_hi),
7469 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7470 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7471 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7472 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7473 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7474 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7475 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7476 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7477 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7478 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
7479 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7480 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7481 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7482 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7483 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7484 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7485 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7486 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7487 STATS_OFFSET32(stat_EtherStatsCollisions),
7488 STATS_OFFSET32(stat_EtherStatsFragments),
7489 STATS_OFFSET32(stat_EtherStatsJabbers),
7490 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7491 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7492 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7493 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7494 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7495 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7496 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7497 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7498 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7499 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7500 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7501 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7502 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7503 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7504 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7505 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7506 STATS_OFFSET32(stat_XonPauseFramesReceived),
7507 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7508 STATS_OFFSET32(stat_OutXonSent),
7509 STATS_OFFSET32(stat_OutXoffSent),
7510 STATS_OFFSET32(stat_MacControlFramesReceived),
7511 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
790dab2f 7512 STATS_OFFSET32(stat_IfInFTQDiscards),
6aa20a22 7513 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 7514 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
7515};
7516
7517/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7518 * skipped because of errata.
6aa20a22 7519 */
14ab9b86 7520static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7521 8,0,8,8,8,8,8,8,8,8,
7522 4,0,4,4,4,4,4,4,4,4,
7523 4,4,4,4,4,4,4,4,4,4,
7524 4,4,4,4,4,4,4,4,4,4,
790dab2f 7525 4,4,4,4,4,4,4,
b6016b76
MC
7526};
7527
5b0c76ad
MC
7528static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7529 8,0,8,8,8,8,8,8,8,8,
7530 4,4,4,4,4,4,4,4,4,4,
7531 4,4,4,4,4,4,4,4,4,4,
7532 4,4,4,4,4,4,4,4,4,4,
790dab2f 7533 4,4,4,4,4,4,4,
5b0c76ad
MC
7534};
7535
b6016b76
MC
7536#define BNX2_NUM_TESTS 6
7537
14ab9b86 7538static struct {
b6016b76
MC
7539 char string[ETH_GSTRING_LEN];
7540} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7541 { "register_test (offline)" },
7542 { "memory_test (offline)" },
7543 { "loopback_test (offline)" },
7544 { "nvram_test (online)" },
7545 { "interrupt_test (online)" },
7546 { "link_test (online)" },
7547};
7548
7549static int
b9f2c044 7550bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 7551{
b9f2c044
JG
7552 switch (sset) {
7553 case ETH_SS_TEST:
7554 return BNX2_NUM_TESTS;
7555 case ETH_SS_STATS:
7556 return BNX2_NUM_STATS;
7557 default:
7558 return -EOPNOTSUPP;
7559 }
b6016b76
MC
7560}
7561
7562static void
7563bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7564{
972ec0d4 7565 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7566
7567 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7568 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
7569 int i;
7570
212f9934 7571 bnx2_netif_stop(bp, true);
b6016b76
MC
7572 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7573 bnx2_free_skbs(bp);
7574
7575 if (bnx2_test_registers(bp) != 0) {
7576 buf[0] = 1;
7577 etest->flags |= ETH_TEST_FL_FAILED;
7578 }
7579 if (bnx2_test_memory(bp) != 0) {
7580 buf[1] = 1;
7581 etest->flags |= ETH_TEST_FL_FAILED;
7582 }
bc5a0690 7583 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 7584 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 7585
9f52b564
MC
7586 if (!netif_running(bp->dev))
7587 bnx2_shutdown_chip(bp);
b6016b76 7588 else {
9a120bc5 7589 bnx2_init_nic(bp, 1);
212f9934 7590 bnx2_netif_start(bp, true);
b6016b76
MC
7591 }
7592
7593 /* wait for link up */
80be4434
MC
7594 for (i = 0; i < 7; i++) {
7595 if (bp->link_up)
7596 break;
7597 msleep_interruptible(1000);
7598 }
b6016b76
MC
7599 }
7600
7601 if (bnx2_test_nvram(bp) != 0) {
7602 buf[3] = 1;
7603 etest->flags |= ETH_TEST_FL_FAILED;
7604 }
7605 if (bnx2_test_intr(bp) != 0) {
7606 buf[4] = 1;
7607 etest->flags |= ETH_TEST_FL_FAILED;
7608 }
7609
7610 if (bnx2_test_link(bp) != 0) {
7611 buf[5] = 1;
7612 etest->flags |= ETH_TEST_FL_FAILED;
7613
7614 }
7615}
7616
7617static void
7618bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7619{
7620 switch (stringset) {
7621 case ETH_SS_STATS:
7622 memcpy(buf, bnx2_stats_str_arr,
7623 sizeof(bnx2_stats_str_arr));
7624 break;
7625 case ETH_SS_TEST:
7626 memcpy(buf, bnx2_tests_str_arr,
7627 sizeof(bnx2_tests_str_arr));
7628 break;
7629 }
7630}
7631
b6016b76
MC
7632static void
7633bnx2_get_ethtool_stats(struct net_device *dev,
7634 struct ethtool_stats *stats, u64 *buf)
7635{
972ec0d4 7636 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7637 int i;
7638 u32 *hw_stats = (u32 *) bp->stats_blk;
354fcd77 7639 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
14ab9b86 7640 u8 *stats_len_arr = NULL;
b6016b76
MC
7641
7642 if (hw_stats == NULL) {
7643 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7644 return;
7645 }
7646
4ce45e02
MC
7647 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7648 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7649 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7650 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
b6016b76 7651 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7652 else
7653 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7654
7655 for (i = 0; i < BNX2_NUM_STATS; i++) {
354fcd77
MC
7656 unsigned long offset;
7657
b6016b76
MC
7658 if (stats_len_arr[i] == 0) {
7659 /* skip this counter */
7660 buf[i] = 0;
7661 continue;
7662 }
354fcd77
MC
7663
7664 offset = bnx2_stats_offset_arr[i];
b6016b76
MC
7665 if (stats_len_arr[i] == 4) {
7666 /* 4-byte counter */
354fcd77
MC
7667 buf[i] = (u64) *(hw_stats + offset) +
7668 *(temp_stats + offset);
b6016b76
MC
7669 continue;
7670 }
7671 /* 8-byte counter */
354fcd77
MC
7672 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7673 *(hw_stats + offset + 1) +
7674 (((u64) *(temp_stats + offset)) << 32) +
7675 *(temp_stats + offset + 1);
b6016b76
MC
7676 }
7677}
7678
7679static int
2e17e1aa 7680bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
b6016b76 7681{
972ec0d4 7682 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7683
2e17e1aa 7684 switch (state) {
7685 case ETHTOOL_ID_ACTIVE:
e503e066
MC
7686 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7687 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
fce55922 7688 return 1; /* cycle on/off once per second */
b6016b76 7689
2e17e1aa 7690 case ETHTOOL_ID_ON:
e503e066
MC
7691 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7692 BNX2_EMAC_LED_1000MB_OVERRIDE |
7693 BNX2_EMAC_LED_100MB_OVERRIDE |
7694 BNX2_EMAC_LED_10MB_OVERRIDE |
7695 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7696 BNX2_EMAC_LED_TRAFFIC);
2e17e1aa 7697 break;
b6016b76 7698
2e17e1aa 7699 case ETHTOOL_ID_OFF:
e503e066 7700 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
2e17e1aa 7701 break;
9f52b564 7702
2e17e1aa 7703 case ETHTOOL_ID_INACTIVE:
e503e066
MC
7704 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7705 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
2e17e1aa 7706 break;
7707 }
9f52b564 7708
b6016b76
MC
7709 return 0;
7710}
7711
c8f44aff
MM
7712static netdev_features_t
7713bnx2_fix_features(struct net_device *dev, netdev_features_t features)
4666f87a
MC
7714{
7715 struct bnx2 *bp = netdev_priv(dev);
7716
8d7dfc2b 7717 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
f646968f 7718 features |= NETIF_F_HW_VLAN_CTAG_RX;
8d7dfc2b
MM
7719
7720 return features;
4666f87a
MC
7721}
7722
fdc8541d 7723static int
c8f44aff 7724bnx2_set_features(struct net_device *dev, netdev_features_t features)
fdc8541d 7725{
7d0fd211 7726 struct bnx2 *bp = netdev_priv(dev);
7d0fd211 7727
7c810477 7728 /* TSO with VLAN tag won't work with current firmware */
f646968f 7729 if (features & NETIF_F_HW_VLAN_CTAG_TX)
8d7dfc2b
MM
7730 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7731 else
7732 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7d0fd211 7733
f646968f 7734 if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7d0fd211
JG
7735 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7736 netif_running(dev)) {
7737 bnx2_netif_stop(bp, false);
8d7dfc2b 7738 dev->features = features;
7d0fd211
JG
7739 bnx2_set_rx_mode(dev);
7740 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7741 bnx2_netif_start(bp, false);
8d7dfc2b 7742 return 1;
7d0fd211
JG
7743 }
7744
7745 return 0;
fdc8541d
MC
7746}
7747
b033281f
MC
7748static void bnx2_get_channels(struct net_device *dev,
7749 struct ethtool_channels *channels)
7750{
7751 struct bnx2 *bp = netdev_priv(dev);
7752 u32 max_rx_rings = 1;
7753 u32 max_tx_rings = 1;
7754
7755 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7756 max_rx_rings = RX_MAX_RINGS;
7757 max_tx_rings = TX_MAX_RINGS;
7758 }
7759
7760 channels->max_rx = max_rx_rings;
7761 channels->max_tx = max_tx_rings;
7762 channels->max_other = 0;
7763 channels->max_combined = 0;
7764 channels->rx_count = bp->num_rx_rings;
7765 channels->tx_count = bp->num_tx_rings;
7766 channels->other_count = 0;
7767 channels->combined_count = 0;
7768}
7769
7770static int bnx2_set_channels(struct net_device *dev,
7771 struct ethtool_channels *channels)
7772{
7773 struct bnx2 *bp = netdev_priv(dev);
7774 u32 max_rx_rings = 1;
7775 u32 max_tx_rings = 1;
7776 int rc = 0;
7777
7778 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7779 max_rx_rings = RX_MAX_RINGS;
7780 max_tx_rings = TX_MAX_RINGS;
7781 }
7782 if (channels->rx_count > max_rx_rings ||
7783 channels->tx_count > max_tx_rings)
7784 return -EINVAL;
7785
7786 bp->num_req_rx_rings = channels->rx_count;
7787 bp->num_req_tx_rings = channels->tx_count;
7788
7789 if (netif_running(dev))
7790 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7791 bp->tx_ring_size, true);
7792
7793 return rc;
7794}
7795
7282d491 7796static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7797 .get_settings = bnx2_get_settings,
7798 .set_settings = bnx2_set_settings,
7799 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7800 .get_regs_len = bnx2_get_regs_len,
7801 .get_regs = bnx2_get_regs,
b6016b76
MC
7802 .get_wol = bnx2_get_wol,
7803 .set_wol = bnx2_set_wol,
7804 .nway_reset = bnx2_nway_reset,
7959ea25 7805 .get_link = bnx2_get_link,
b6016b76
MC
7806 .get_eeprom_len = bnx2_get_eeprom_len,
7807 .get_eeprom = bnx2_get_eeprom,
7808 .set_eeprom = bnx2_set_eeprom,
7809 .get_coalesce = bnx2_get_coalesce,
7810 .set_coalesce = bnx2_set_coalesce,
7811 .get_ringparam = bnx2_get_ringparam,
7812 .set_ringparam = bnx2_set_ringparam,
7813 .get_pauseparam = bnx2_get_pauseparam,
7814 .set_pauseparam = bnx2_set_pauseparam,
b6016b76
MC
7815 .self_test = bnx2_self_test,
7816 .get_strings = bnx2_get_strings,
2e17e1aa 7817 .set_phys_id = bnx2_set_phys_id,
b6016b76 7818 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7819 .get_sset_count = bnx2_get_sset_count,
b033281f
MC
7820 .get_channels = bnx2_get_channels,
7821 .set_channels = bnx2_set_channels,
b6016b76
MC
7822};
7823
7824/* Called with rtnl_lock */
7825static int
7826bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7827{
14ab9b86 7828 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7829 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7830 int err;
7831
7832 switch(cmd) {
7833 case SIOCGMIIPHY:
7834 data->phy_id = bp->phy_addr;
7835
7836 /* fallthru */
7837 case SIOCGMIIREG: {
7838 u32 mii_regval;
7839
583c28e5 7840 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7841 return -EOPNOTSUPP;
7842
dad3e452
MC
7843 if (!netif_running(dev))
7844 return -EAGAIN;
7845
c770a65c 7846 spin_lock_bh(&bp->phy_lock);
b6016b76 7847 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7848 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7849
7850 data->val_out = mii_regval;
7851
7852 return err;
7853 }
7854
7855 case SIOCSMIIREG:
583c28e5 7856 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7857 return -EOPNOTSUPP;
7858
dad3e452
MC
7859 if (!netif_running(dev))
7860 return -EAGAIN;
7861
c770a65c 7862 spin_lock_bh(&bp->phy_lock);
b6016b76 7863 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7864 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7865
7866 return err;
7867
7868 default:
7869 /* do nothing */
7870 break;
7871 }
7872 return -EOPNOTSUPP;
7873}
7874
7875/* Called with rtnl_lock */
7876static int
7877bnx2_change_mac_addr(struct net_device *dev, void *p)
7878{
7879 struct sockaddr *addr = p;
972ec0d4 7880 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7881
73eef4cd 7882 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 7883 return -EADDRNOTAVAIL;
73eef4cd 7884
b6016b76
MC
7885 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7886 if (netif_running(dev))
5fcaed01 7887 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7888
7889 return 0;
7890}
7891
7892/* Called with rtnl_lock */
7893static int
7894bnx2_change_mtu(struct net_device *dev, int new_mtu)
7895{
972ec0d4 7896 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7897
7898 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7899 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7900 return -EINVAL;
7901
7902 dev->mtu = new_mtu;
b033281f
MC
7903 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7904 false);
b6016b76
MC
7905}
7906
257ddbda 7907#ifdef CONFIG_NET_POLL_CONTROLLER
b6016b76
MC
7908static void
7909poll_bnx2(struct net_device *dev)
7910{
972ec0d4 7911 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7912 int i;
b6016b76 7913
b2af2c1d 7914 for (i = 0; i < bp->irq_nvecs; i++) {
1bf1e347
MC
7915 struct bnx2_irq *irq = &bp->irq_tbl[i];
7916
7917 disable_irq(irq->vector);
7918 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7919 enable_irq(irq->vector);
b2af2c1d 7920 }
b6016b76
MC
7921}
7922#endif
7923
cfd95a63 7924static void
253c8b75
MC
7925bnx2_get_5709_media(struct bnx2 *bp)
7926{
e503e066 7927 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
253c8b75
MC
7928 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7929 u32 strap;
7930
7931 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7932 return;
7933 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7934 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7935 return;
7936 }
7937
7938 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7939 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7940 else
7941 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7942
aefd90e4 7943 if (bp->func == 0) {
253c8b75
MC
7944 switch (strap) {
7945 case 0x4:
7946 case 0x5:
7947 case 0x6:
583c28e5 7948 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7949 return;
7950 }
7951 } else {
7952 switch (strap) {
7953 case 0x1:
7954 case 0x2:
7955 case 0x4:
583c28e5 7956 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7957 return;
7958 }
7959 }
7960}
7961
cfd95a63 7962static void
883e5151
MC
7963bnx2_get_pci_speed(struct bnx2 *bp)
7964{
7965 u32 reg;
7966
e503e066 7967 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
883e5151
MC
7968 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7969 u32 clkreg;
7970
f86e82fb 7971 bp->flags |= BNX2_FLAG_PCIX;
883e5151 7972
e503e066 7973 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
883e5151
MC
7974
7975 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7976 switch (clkreg) {
7977 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7978 bp->bus_speed_mhz = 133;
7979 break;
7980
7981 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7982 bp->bus_speed_mhz = 100;
7983 break;
7984
7985 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7986 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7987 bp->bus_speed_mhz = 66;
7988 break;
7989
7990 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7991 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7992 bp->bus_speed_mhz = 50;
7993 break;
7994
7995 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7996 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7997 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7998 bp->bus_speed_mhz = 33;
7999 break;
8000 }
8001 }
8002 else {
8003 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8004 bp->bus_speed_mhz = 66;
8005 else
8006 bp->bus_speed_mhz = 33;
8007 }
8008
8009 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 8010 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
8011
8012}
8013
cfd95a63 8014static void
76d99061
MC
8015bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8016{
df25bc38 8017 int rc, i, j;
76d99061 8018 u8 *data;
df25bc38 8019 unsigned int block_end, rosize, len;
76d99061 8020
012093f6
MC
8021#define BNX2_VPD_NVRAM_OFFSET 0x300
8022#define BNX2_VPD_LEN 128
76d99061
MC
8023#define BNX2_MAX_VER_SLEN 30
8024
8025 data = kmalloc(256, GFP_KERNEL);
8026 if (!data)
8027 return;
8028
012093f6
MC
8029 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8030 BNX2_VPD_LEN);
76d99061
MC
8031 if (rc)
8032 goto vpd_done;
8033
012093f6
MC
8034 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8035 data[i] = data[i + BNX2_VPD_LEN + 3];
8036 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8037 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8038 data[i + 3] = data[i + BNX2_VPD_LEN];
76d99061
MC
8039 }
8040
df25bc38
MC
8041 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8042 if (i < 0)
8043 goto vpd_done;
76d99061 8044
df25bc38
MC
8045 rosize = pci_vpd_lrdt_size(&data[i]);
8046 i += PCI_VPD_LRDT_TAG_SIZE;
8047 block_end = i + rosize;
76d99061 8048
df25bc38
MC
8049 if (block_end > BNX2_VPD_LEN)
8050 goto vpd_done;
76d99061 8051
df25bc38
MC
8052 j = pci_vpd_find_info_keyword(data, i, rosize,
8053 PCI_VPD_RO_KEYWORD_MFR_ID);
8054 if (j < 0)
8055 goto vpd_done;
76d99061 8056
df25bc38 8057 len = pci_vpd_info_field_size(&data[j]);
76d99061 8058
df25bc38
MC
8059 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8060 if (j + len > block_end || len != 4 ||
8061 memcmp(&data[j], "1028", 4))
8062 goto vpd_done;
4067a854 8063
df25bc38
MC
8064 j = pci_vpd_find_info_keyword(data, i, rosize,
8065 PCI_VPD_RO_KEYWORD_VENDOR0);
8066 if (j < 0)
8067 goto vpd_done;
4067a854 8068
df25bc38 8069 len = pci_vpd_info_field_size(&data[j]);
4067a854 8070
df25bc38
MC
8071 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8072 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
76d99061 8073 goto vpd_done;
df25bc38
MC
8074
8075 memcpy(bp->fw_version, &data[j], len);
8076 bp->fw_version[len] = ' ';
76d99061
MC
8077
8078vpd_done:
8079 kfree(data);
8080}
8081
cfd95a63 8082static int
b6016b76
MC
8083bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8084{
8085 struct bnx2 *bp;
58fc2ea4 8086 int rc, i, j;
b6016b76 8087 u32 reg;
40453c83 8088 u64 dma_mask, persist_dma_mask;
cd709aa9 8089 int err;
b6016b76 8090
b6016b76 8091 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 8092 bp = netdev_priv(dev);
b6016b76
MC
8093
8094 bp->flags = 0;
8095 bp->phy_flags = 0;
8096
354fcd77
MC
8097 bp->temp_stats_blk =
8098 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8099
8100 if (bp->temp_stats_blk == NULL) {
8101 rc = -ENOMEM;
8102 goto err_out;
8103 }
8104
b6016b76
MC
8105 /* enable device (incl. PCI PM wakeup), and bus-mastering */
8106 rc = pci_enable_device(pdev);
8107 if (rc) {
3a9c6a49 8108 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
b6016b76
MC
8109 goto err_out;
8110 }
8111
8112 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 8113 dev_err(&pdev->dev,
3a9c6a49 8114 "Cannot find PCI device base address, aborting\n");
b6016b76
MC
8115 rc = -ENODEV;
8116 goto err_out_disable;
8117 }
8118
8119 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8120 if (rc) {
3a9c6a49 8121 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
b6016b76
MC
8122 goto err_out_disable;
8123 }
8124
8125 pci_set_master(pdev);
8126
85768271 8127 bp->pm_cap = pdev->pm_cap;
b6016b76 8128 if (bp->pm_cap == 0) {
9b91cf9d 8129 dev_err(&pdev->dev,
3a9c6a49 8130 "Cannot find power management capability, aborting\n");
b6016b76
MC
8131 rc = -EIO;
8132 goto err_out_release;
8133 }
8134
b6016b76
MC
8135 bp->dev = dev;
8136 bp->pdev = pdev;
8137
8138 spin_lock_init(&bp->phy_lock);
1b8227c4 8139 spin_lock_init(&bp->indirect_lock);
c5a88950
MC
8140#ifdef BCM_CNIC
8141 mutex_init(&bp->cnic_lock);
8142#endif
c4028958 8143 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76 8144
c0357e97
FR
8145 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8146 TX_MAX_TSS_RINGS + 1));
b6016b76 8147 if (!bp->regview) {
3a9c6a49 8148 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
b6016b76
MC
8149 rc = -ENOMEM;
8150 goto err_out_release;
8151 }
8152
8153 /* Configure byte swap and enable write to the reg_window registers.
8154 * Rely on CPU to do target byte swapping on big endian systems
8155 * The chip's target access swapping will not swap all accesses
8156 */
e503e066
MC
8157 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8158 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8159 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
b6016b76 8160
e503e066 8161 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
b6016b76 8162
4ce45e02 8163 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
e82760e7
JM
8164 if (!pci_is_pcie(pdev)) {
8165 dev_err(&pdev->dev, "Not PCIE, aborting\n");
883e5151
MC
8166 rc = -EIO;
8167 goto err_out_unmap;
8168 }
f86e82fb 8169 bp->flags |= BNX2_FLAG_PCIE;
4ce45e02 8170 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
f86e82fb 8171 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
c239f279
MC
8172
8173 /* AER (Advanced Error Reporting) hooks */
8174 err = pci_enable_pcie_error_reporting(pdev);
4bb9ebc7
MC
8175 if (!err)
8176 bp->flags |= BNX2_FLAG_AER_ENABLED;
c239f279 8177
883e5151 8178 } else {
59b47d8a
MC
8179 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8180 if (bp->pcix_cap == 0) {
8181 dev_err(&pdev->dev,
3a9c6a49 8182 "Cannot find PCIX capability, aborting\n");
59b47d8a
MC
8183 rc = -EIO;
8184 goto err_out_unmap;
8185 }
61d9e3fa 8186 bp->flags |= BNX2_FLAG_BROKEN_STATS;
59b47d8a
MC
8187 }
8188
4ce45e02
MC
8189 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8190 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
555a8428 8191 if (pdev->msix_cap)
f86e82fb 8192 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
8193 }
8194
4ce45e02
MC
8195 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8196 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
555a8428 8197 if (pdev->msi_cap)
f86e82fb 8198 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
8199 }
8200
40453c83 8201 /* 5708 cannot support DMA addresses > 40-bit. */
4ce45e02 8202 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
50cf156a 8203 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
40453c83 8204 else
6a35528a 8205 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
40453c83
MC
8206
8207 /* Configure DMA attributes. */
8208 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8209 dev->features |= NETIF_F_HIGHDMA;
8210 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8211 if (rc) {
8212 dev_err(&pdev->dev,
3a9c6a49 8213 "pci_set_consistent_dma_mask failed, aborting\n");
40453c83
MC
8214 goto err_out_unmap;
8215 }
284901a9 8216 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3a9c6a49 8217 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
40453c83
MC
8218 goto err_out_unmap;
8219 }
8220
f86e82fb 8221 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 8222 bnx2_get_pci_speed(bp);
b6016b76
MC
8223
8224 /* 5706A0 may falsely detect SERR and PERR. */
4ce45e02 8225 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
e503e066 8226 reg = BNX2_RD(bp, PCI_COMMAND);
b6016b76 8227 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
e503e066 8228 BNX2_WR(bp, PCI_COMMAND, reg);
4ce45e02 8229 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
f86e82fb 8230 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 8231
9b91cf9d 8232 dev_err(&pdev->dev,
3a9c6a49 8233 "5706 A1 can only be used in a PCIX bus, aborting\n");
b6016b76
MC
8234 goto err_out_unmap;
8235 }
8236
8237 bnx2_init_nvram(bp);
8238
2726d6e1 8239 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d 8240
aefd90e4
MC
8241 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8242 bp->func = 1;
8243
e3648b3d 8244 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b 8245 BNX2_SHM_HDR_SIGNATURE_SIG) {
aefd90e4 8246 u32 off = bp->func << 2;
24cb230b 8247
2726d6e1 8248 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 8249 } else
e3648b3d
MC
8250 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8251
b6016b76
MC
8252 /* Get the permanent MAC address. First we need to make sure the
8253 * firmware is actually running.
8254 */
2726d6e1 8255 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
8256
8257 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8258 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
3a9c6a49 8259 dev_err(&pdev->dev, "Firmware not running, aborting\n");
b6016b76
MC
8260 rc = -ENODEV;
8261 goto err_out_unmap;
8262 }
8263
76d99061
MC
8264 bnx2_read_vpd_fw_ver(bp);
8265
8266 j = strlen(bp->fw_version);
2726d6e1 8267 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
76d99061 8268 for (i = 0; i < 3 && j < 24; i++) {
58fc2ea4
MC
8269 u8 num, k, skip0;
8270
76d99061
MC
8271 if (i == 0) {
8272 bp->fw_version[j++] = 'b';
8273 bp->fw_version[j++] = 'c';
8274 bp->fw_version[j++] = ' ';
8275 }
58fc2ea4
MC
8276 num = (u8) (reg >> (24 - (i * 8)));
8277 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8278 if (num >= k || !skip0 || k == 1) {
8279 bp->fw_version[j++] = (num / k) + '0';
8280 skip0 = 0;
8281 }
8282 }
8283 if (i != 2)
8284 bp->fw_version[j++] = '.';
8285 }
2726d6e1 8286 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
8287 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8288 bp->wol = 1;
8289
8290 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 8291 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
8292
8293 for (i = 0; i < 30; i++) {
2726d6e1 8294 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
8295 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8296 break;
8297 msleep(10);
8298 }
8299 }
2726d6e1 8300 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
8301 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8302 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8303 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 8304 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4 8305
76d99061
MC
8306 if (j < 32)
8307 bp->fw_version[j++] = ' ';
8308 for (i = 0; i < 3 && j < 28; i++) {
2726d6e1 8309 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
3aeb7d22 8310 reg = be32_to_cpu(reg);
58fc2ea4
MC
8311 memcpy(&bp->fw_version[j], &reg, 4);
8312 j += 4;
8313 }
8314 }
b6016b76 8315
2726d6e1 8316 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
8317 bp->mac_addr[0] = (u8) (reg >> 8);
8318 bp->mac_addr[1] = (u8) reg;
8319
2726d6e1 8320 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
8321 bp->mac_addr[2] = (u8) (reg >> 24);
8322 bp->mac_addr[3] = (u8) (reg >> 16);
8323 bp->mac_addr[4] = (u8) (reg >> 8);
8324 bp->mac_addr[5] = (u8) reg;
8325
2bc4078e 8326 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
932f3772 8327 bnx2_set_rx_ring_size(bp, 255);
b6016b76 8328
cf7474a6 8329 bp->tx_quick_cons_trip_int = 2;
b6016b76 8330 bp->tx_quick_cons_trip = 20;
cf7474a6 8331 bp->tx_ticks_int = 18;
b6016b76 8332 bp->tx_ticks = 80;
6aa20a22 8333
cf7474a6
MC
8334 bp->rx_quick_cons_trip_int = 2;
8335 bp->rx_quick_cons_trip = 12;
b6016b76
MC
8336 bp->rx_ticks_int = 18;
8337 bp->rx_ticks = 18;
8338
7ea6920e 8339 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 8340
ac392abc 8341 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 8342
5b0c76ad
MC
8343 bp->phy_addr = 1;
8344
b6016b76 8345 /* Disable WOL support if we are running on a SERDES chip. */
4ce45e02 8346 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
253c8b75 8347 bnx2_get_5709_media(bp);
4ce45e02 8348 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
583c28e5 8349 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 8350
0d8a6571 8351 bp->phy_port = PORT_TP;
583c28e5 8352 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 8353 bp->phy_port = PORT_FIBRE;
2726d6e1 8354 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 8355 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 8356 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8357 bp->wol = 0;
8358 }
4ce45e02 8359 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
38ea3686
MC
8360 /* Don't do parallel detect on this board because of
8361 * some board problems. The link will not go down
8362 * if we do parallel detect.
8363 */
8364 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8365 pdev->subsystem_device == 0x310c)
8366 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8367 } else {
5b0c76ad 8368 bp->phy_addr = 2;
5b0c76ad 8369 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 8370 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 8371 }
4ce45e02
MC
8372 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8373 BNX2_CHIP(bp) == BNX2_CHIP_5708)
583c28e5 8374 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
4ce45e02
MC
8375 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8376 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8377 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
583c28e5 8378 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 8379
7c62e83b
MC
8380 bnx2_init_fw_cap(bp);
8381
4ce45e02
MC
8382 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8383 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8384 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
e503e066 8385 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 8386 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8387 bp->wol = 0;
8388 }
dda1e390 8389
6d5e85c7
MC
8390 if (bp->flags & BNX2_FLAG_NO_WOL)
8391 device_set_wakeup_capable(&bp->pdev->dev, false);
8392 else
8393 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8394
4ce45e02 8395 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
b6016b76
MC
8396 bp->tx_quick_cons_trip_int =
8397 bp->tx_quick_cons_trip;
8398 bp->tx_ticks_int = bp->tx_ticks;
8399 bp->rx_quick_cons_trip_int =
8400 bp->rx_quick_cons_trip;
8401 bp->rx_ticks_int = bp->rx_ticks;
8402 bp->comp_prod_trip_int = bp->comp_prod_trip;
8403 bp->com_ticks_int = bp->com_ticks;
8404 bp->cmd_ticks_int = bp->cmd_ticks;
8405 }
8406
f9317a40
MC
8407 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8408 *
8409 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8410 * with byte enables disabled on the unused 32-bit word. This is legal
8411 * but causes problems on the AMD 8132 which will eventually stop
8412 * responding after a while.
8413 *
8414 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 8415 * prefers to locally disable MSI rather than globally disabling it.
f9317a40 8416 */
4ce45e02 8417 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
f9317a40
MC
8418 struct pci_dev *amd_8132 = NULL;
8419
8420 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8421 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8422 amd_8132))) {
f9317a40 8423
44c10138
AK
8424 if (amd_8132->revision >= 0x10 &&
8425 amd_8132->revision <= 0x13) {
f9317a40
MC
8426 disable_msi = 1;
8427 pci_dev_put(amd_8132);
8428 break;
8429 }
8430 }
8431 }
8432
deaf391b 8433 bnx2_set_default_link(bp);
b6016b76
MC
8434 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8435
cd339a0e 8436 init_timer(&bp->timer);
ac392abc 8437 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
8438 bp->timer.data = (unsigned long) bp;
8439 bp->timer.function = bnx2_timer;
8440
7625eb2f 8441#ifdef BCM_CNIC
41c2178a
MC
8442 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8443 bp->cnic_eth_dev.max_iscsi_conn =
8444 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8445 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
4bd9b0ff 8446 bp->cnic_probe = bnx2_cnic_probe;
7625eb2f 8447#endif
c239f279
MC
8448 pci_save_state(pdev);
8449
b6016b76
MC
8450 return 0;
8451
8452err_out_unmap:
4bb9ebc7 8453 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
c239f279 8454 pci_disable_pcie_error_reporting(pdev);
4bb9ebc7
MC
8455 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8456 }
c239f279 8457
c0357e97
FR
8458 pci_iounmap(pdev, bp->regview);
8459 bp->regview = NULL;
b6016b76
MC
8460
8461err_out_release:
8462 pci_release_regions(pdev);
8463
8464err_out_disable:
8465 pci_disable_device(pdev);
b6016b76
MC
8466
8467err_out:
8468 return rc;
8469}
8470
cfd95a63 8471static char *
883e5151
MC
8472bnx2_bus_string(struct bnx2 *bp, char *str)
8473{
8474 char *s = str;
8475
f86e82fb 8476 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
8477 s += sprintf(s, "PCI Express");
8478 } else {
8479 s += sprintf(s, "PCI");
f86e82fb 8480 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 8481 s += sprintf(s, "-X");
f86e82fb 8482 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
8483 s += sprintf(s, " 32-bit");
8484 else
8485 s += sprintf(s, " 64-bit");
8486 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8487 }
8488 return str;
8489}
8490
f048fa9c
MC
8491static void
8492bnx2_del_napi(struct bnx2 *bp)
8493{
8494 int i;
8495
8496 for (i = 0; i < bp->irq_nvecs; i++)
8497 netif_napi_del(&bp->bnx2_napi[i].napi);
8498}
8499
8500static void
35efa7c1
MC
8501bnx2_init_napi(struct bnx2 *bp)
8502{
b4b36042 8503 int i;
35efa7c1 8504
4327ba43 8505 for (i = 0; i < bp->irq_nvecs; i++) {
35e9010b
MC
8506 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8507 int (*poll)(struct napi_struct *, int);
8508
8509 if (i == 0)
8510 poll = bnx2_poll;
8511 else
f0ea2e63 8512 poll = bnx2_poll_msix;
35e9010b
MC
8513
8514 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
8515 bnapi->bp = bp;
8516 }
35efa7c1
MC
8517}
8518
0421eae6
SH
8519static const struct net_device_ops bnx2_netdev_ops = {
8520 .ndo_open = bnx2_open,
8521 .ndo_start_xmit = bnx2_start_xmit,
8522 .ndo_stop = bnx2_close,
5d07bf26 8523 .ndo_get_stats64 = bnx2_get_stats64,
0421eae6
SH
8524 .ndo_set_rx_mode = bnx2_set_rx_mode,
8525 .ndo_do_ioctl = bnx2_ioctl,
8526 .ndo_validate_addr = eth_validate_addr,
8527 .ndo_set_mac_address = bnx2_change_mac_addr,
8528 .ndo_change_mtu = bnx2_change_mtu,
8d7dfc2b
MM
8529 .ndo_fix_features = bnx2_fix_features,
8530 .ndo_set_features = bnx2_set_features,
0421eae6 8531 .ndo_tx_timeout = bnx2_tx_timeout,
257ddbda 8532#ifdef CONFIG_NET_POLL_CONTROLLER
0421eae6
SH
8533 .ndo_poll_controller = poll_bnx2,
8534#endif
8535};
8536
cfd95a63 8537static int
b6016b76
MC
8538bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8539{
8540 static int version_printed = 0;
c0357e97 8541 struct net_device *dev;
b6016b76 8542 struct bnx2 *bp;
0795af57 8543 int rc;
883e5151 8544 char str[40];
b6016b76
MC
8545
8546 if (version_printed++ == 0)
3a9c6a49 8547 pr_info("%s", version);
b6016b76
MC
8548
8549 /* dev zeroed in init_etherdev */
706bf240 8550 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
8551 if (!dev)
8552 return -ENOMEM;
8553
8554 rc = bnx2_init_board(pdev, dev);
c0357e97
FR
8555 if (rc < 0)
8556 goto err_free;
b6016b76 8557
0421eae6 8558 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 8559 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 8560 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 8561
972ec0d4 8562 bp = netdev_priv(dev);
b6016b76 8563
1b2f922f
MC
8564 pci_set_drvdata(pdev, dev);
8565
d458cdf7 8566 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
1b2f922f 8567
8d7dfc2b
MM
8568 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8569 NETIF_F_TSO | NETIF_F_TSO_ECN |
8570 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8571
4ce45e02 8572 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8d7dfc2b
MM
8573 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8574
8575 dev->vlan_features = dev->hw_features;
f646968f 8576 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8d7dfc2b 8577 dev->features |= dev->hw_features;
01789349 8578 dev->priv_flags |= IFF_UNICAST_FLT;
8d7dfc2b 8579
b6016b76 8580 if ((rc = register_netdev(dev))) {
9b91cf9d 8581 dev_err(&pdev->dev, "Cannot register net device\n");
57579f76 8582 goto error;
b6016b76
MC
8583 }
8584
c0357e97
FR
8585 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8586 "node addr %pM\n", board_info[ent->driver_data].name,
4ce45e02
MC
8587 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8588 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
c0357e97
FR
8589 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8590 pdev->irq, dev->dev_addr);
b6016b76 8591
b6016b76 8592 return 0;
57579f76
MC
8593
8594error:
fda4d85d 8595 pci_iounmap(pdev, bp->regview);
57579f76
MC
8596 pci_release_regions(pdev);
8597 pci_disable_device(pdev);
c0357e97 8598err_free:
57579f76
MC
8599 free_netdev(dev);
8600 return rc;
b6016b76
MC
8601}
8602
cfd95a63 8603static void
b6016b76
MC
8604bnx2_remove_one(struct pci_dev *pdev)
8605{
8606 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8607 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
8608
8609 unregister_netdev(dev);
8610
8333a46a 8611 del_timer_sync(&bp->timer);
cd634019 8612 cancel_work_sync(&bp->reset_task);
8333a46a 8613
c0357e97 8614 pci_iounmap(bp->pdev, bp->regview);
b6016b76 8615
354fcd77
MC
8616 kfree(bp->temp_stats_blk);
8617
4bb9ebc7 8618 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
c239f279 8619 pci_disable_pcie_error_reporting(pdev);
4bb9ebc7
MC
8620 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8621 }
cd709aa9 8622
7880b72e 8623 bnx2_release_firmware(bp);
8624
c239f279 8625 free_netdev(dev);
cd709aa9 8626
b6016b76
MC
8627 pci_release_regions(pdev);
8628 pci_disable_device(pdev);
b6016b76
MC
8629}
8630
77d149c4 8631#ifdef CONFIG_PM_SLEEP
b6016b76 8632static int
28fb4eb4 8633bnx2_suspend(struct device *device)
b6016b76 8634{
28fb4eb4 8635 struct pci_dev *pdev = to_pci_dev(device);
b6016b76 8636 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8637 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8638
28fb4eb4
MC
8639 if (netif_running(dev)) {
8640 cancel_work_sync(&bp->reset_task);
8641 bnx2_netif_stop(bp, true);
8642 netif_device_detach(dev);
8643 del_timer_sync(&bp->timer);
8644 bnx2_shutdown_chip(bp);
8645 __bnx2_free_irq(bp);
8646 bnx2_free_skbs(bp);
8647 }
8648 bnx2_setup_wol(bp);
b6016b76
MC
8649 return 0;
8650}
8651
8652static int
28fb4eb4 8653bnx2_resume(struct device *device)
b6016b76 8654{
28fb4eb4 8655 struct pci_dev *pdev = to_pci_dev(device);
b6016b76 8656 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8657 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
8658
8659 if (!netif_running(dev))
8660 return 0;
8661
829ca9a3 8662 bnx2_set_power_state(bp, PCI_D0);
b6016b76 8663 netif_device_attach(dev);
28fb4eb4 8664 bnx2_request_irq(bp);
9a120bc5 8665 bnx2_init_nic(bp, 1);
212f9934 8666 bnx2_netif_start(bp, true);
b6016b76
MC
8667 return 0;
8668}
8669
28fb4eb4
MC
8670static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8671#define BNX2_PM_OPS (&bnx2_pm_ops)
8672
8673#else
8674
8675#define BNX2_PM_OPS NULL
8676
8677#endif /* CONFIG_PM_SLEEP */
6ff2da49
WX
8678/**
8679 * bnx2_io_error_detected - called when PCI error is detected
8680 * @pdev: Pointer to PCI device
8681 * @state: The current pci connection state
8682 *
8683 * This function is called after a PCI bus error affecting
8684 * this device has been detected.
8685 */
8686static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8687 pci_channel_state_t state)
8688{
8689 struct net_device *dev = pci_get_drvdata(pdev);
8690 struct bnx2 *bp = netdev_priv(dev);
8691
8692 rtnl_lock();
8693 netif_device_detach(dev);
8694
2ec3de26
DN
8695 if (state == pci_channel_io_perm_failure) {
8696 rtnl_unlock();
8697 return PCI_ERS_RESULT_DISCONNECT;
8698 }
8699
6ff2da49 8700 if (netif_running(dev)) {
212f9934 8701 bnx2_netif_stop(bp, true);
6ff2da49
WX
8702 del_timer_sync(&bp->timer);
8703 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8704 }
8705
8706 pci_disable_device(pdev);
8707 rtnl_unlock();
8708
8709 /* Request a slot slot reset. */
8710 return PCI_ERS_RESULT_NEED_RESET;
8711}
8712
8713/**
8714 * bnx2_io_slot_reset - called after the pci bus has been reset.
8715 * @pdev: Pointer to PCI device
8716 *
8717 * Restart the card from scratch, as if from a cold-boot.
8718 */
8719static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8720{
8721 struct net_device *dev = pci_get_drvdata(pdev);
8722 struct bnx2 *bp = netdev_priv(dev);
02481bc6
MC
8723 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8724 int err = 0;
6ff2da49
WX
8725
8726 rtnl_lock();
8727 if (pci_enable_device(pdev)) {
8728 dev_err(&pdev->dev,
3a9c6a49 8729 "Cannot re-enable PCI device after reset\n");
cd709aa9
JF
8730 } else {
8731 pci_set_master(pdev);
8732 pci_restore_state(pdev);
8733 pci_save_state(pdev);
8734
25bfb1dd 8735 if (netif_running(dev))
02481bc6 8736 err = bnx2_init_nic(bp, 1);
25bfb1dd 8737
02481bc6
MC
8738 if (!err)
8739 result = PCI_ERS_RESULT_RECOVERED;
8740 }
8741
8742 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8743 bnx2_napi_enable(bp);
8744 dev_close(dev);
6ff2da49 8745 }
cd709aa9 8746 rtnl_unlock();
6ff2da49 8747
4bb9ebc7 8748 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
c239f279
MC
8749 return result;
8750
cd709aa9
JF
8751 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8752 if (err) {
8753 dev_err(&pdev->dev,
8754 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8755 err); /* non-fatal, continue */
6ff2da49
WX
8756 }
8757
cd709aa9 8758 return result;
6ff2da49
WX
8759}
8760
8761/**
8762 * bnx2_io_resume - called when traffic can start flowing again.
8763 * @pdev: Pointer to PCI device
8764 *
8765 * This callback is called when the error recovery driver tells us that
8766 * its OK to resume normal operation.
8767 */
8768static void bnx2_io_resume(struct pci_dev *pdev)
8769{
8770 struct net_device *dev = pci_get_drvdata(pdev);
8771 struct bnx2 *bp = netdev_priv(dev);
8772
8773 rtnl_lock();
8774 if (netif_running(dev))
212f9934 8775 bnx2_netif_start(bp, true);
6ff2da49
WX
8776
8777 netif_device_attach(dev);
8778 rtnl_unlock();
8779}
8780
25bfb1dd
MC
8781static void bnx2_shutdown(struct pci_dev *pdev)
8782{
8783 struct net_device *dev = pci_get_drvdata(pdev);
8784 struct bnx2 *bp;
8785
8786 if (!dev)
8787 return;
8788
8789 bp = netdev_priv(dev);
8790 if (!bp)
8791 return;
8792
8793 rtnl_lock();
8794 if (netif_running(dev))
8795 dev_close(bp->dev);
8796
8797 if (system_state == SYSTEM_POWER_OFF)
8798 bnx2_set_power_state(bp, PCI_D3hot);
8799
8800 rtnl_unlock();
8801}
8802
fda4d85d 8803static const struct pci_error_handlers bnx2_err_handler = {
6ff2da49
WX
8804 .error_detected = bnx2_io_error_detected,
8805 .slot_reset = bnx2_io_slot_reset,
8806 .resume = bnx2_io_resume,
8807};
8808
b6016b76 8809static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
8810 .name = DRV_MODULE_NAME,
8811 .id_table = bnx2_pci_tbl,
8812 .probe = bnx2_init_one,
cfd95a63 8813 .remove = bnx2_remove_one,
28fb4eb4 8814 .driver.pm = BNX2_PM_OPS,
6ff2da49 8815 .err_handler = &bnx2_err_handler,
25bfb1dd 8816 .shutdown = bnx2_shutdown,
b6016b76
MC
8817};
8818
5a4123f3 8819module_pci_driver(bnx2_pci_driver);
This page took 1.94235 seconds and 5 git commands to generate.