bnx2x: Prefetch the page containing the BD descriptor
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
9898f86d 104static int poll;
a2fbb9ea 105module_param(poll, int, 0);
9898f86d 106MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
107
108static int mrrs = -1;
109module_param(mrrs, int, 0);
110MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
9898f86d 112static int debug;
a2fbb9ea 113module_param(debug, int, 0);
9898f86d
EG
114MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 117
1cf167f2 118static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
119
120enum bnx2x_board_type {
121 BCM57710 = 0,
34f80b04
EG
122 BCM57711 = 1,
123 BCM57711E = 2,
a2fbb9ea
ET
124};
125
34f80b04 126/* indexed by board_type, above */
53a10565 127static struct {
a2fbb9ea
ET
128 char *name;
129} board_info[] __devinitdata = {
34f80b04
EG
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
133};
134
34f80b04 135
a2fbb9ea
ET
136static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
155static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea
ET
174
175static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
183static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184 int idx)
185{
186 u32 cmd_offset;
187 int i;
188
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
ad8d3948
EG
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
195 }
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
197}
198
ad8d3948
EG
199void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200 u32 len32)
a2fbb9ea 201{
ad8d3948 202 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
204 int cnt = 200;
205
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212 return;
213 }
214
215 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
216
217 memset(dmae, 0, sizeof(struct dmae_command));
218
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222#ifdef __BIG_ENDIAN
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224#else
225 DMAE_CMD_ENDIANITY_DW_SWAP |
226#endif
34f80b04
EG
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
233 dmae->len = len32;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 236 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 237
c3eefaf6 238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
248
249 *wb_comp = 0;
250
34f80b04 251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
252
253 udelay(5);
ad8d3948
EG
254
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
ad8d3948 258 if (!cnt) {
c3eefaf6 259 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
260 break;
261 }
ad8d3948 262 cnt--;
12469401
YG
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
265 msleep(100);
266 else
267 udelay(5);
a2fbb9ea 268 }
ad8d3948
EG
269
270 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
271}
272
c18487ee 273void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 274{
ad8d3948 275 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
277 int cnt = 200;
278
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
281 int i;
282
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287 return;
288 }
289
290 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
291
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
294
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298#ifdef __BIG_ENDIAN
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
300#else
301 DMAE_CMD_ENDIANITY_DW_SWAP |
302#endif
34f80b04
EG
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309 dmae->len = len32;
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 312 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 313
c3eefaf6 314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
321
322 *wb_comp = 0;
323
34f80b04 324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
325
326 udelay(5);
ad8d3948
EG
327
328 while (*wb_comp != DMAE_COMP_VAL) {
329
ad8d3948 330 if (!cnt) {
c3eefaf6 331 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
332 break;
333 }
ad8d3948 334 cnt--;
12469401
YG
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
337 msleep(100);
338 else
339 udelay(5);
a2fbb9ea 340 }
ad8d3948 341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
344
345 mutex_unlock(&bp->dmae_mutex);
346}
347
348/* used only for slowpath so not inlined */
349static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350{
351 u32 wb_write[2];
352
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 356}
a2fbb9ea 357
ad8d3948
EG
358#ifdef USE_WB_RD
359static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360{
361 u32 wb_data[2];
362
363 REG_RD_DMAE(bp, reg, wb_data, 2);
364
365 return HILO_U64(wb_data[0], wb_data[1]);
366}
367#endif
368
a2fbb9ea
ET
369static int bnx2x_mc_assert(struct bnx2x *bp)
370{
a2fbb9ea 371 char last_idx;
34f80b04
EG
372 int i, rc = 0;
373 u32 row0, row1, row2, row3;
374
375 /* XSTORM */
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
378 if (last_idx)
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
397 rc++;
398 } else {
399 break;
400 }
401 }
402
403 /* TSTORM */
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
406 if (last_idx)
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
425 rc++;
426 } else {
427 break;
428 }
429 }
430
431 /* CSTORM */
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
434 if (last_idx)
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
453 rc++;
454 } else {
455 break;
456 }
457 }
458
459 /* USTORM */
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
462 if (last_idx)
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
481 rc++;
482 } else {
483 break;
a2fbb9ea
ET
484 }
485 }
34f80b04 486
a2fbb9ea
ET
487 return rc;
488}
c14423fe 489
a2fbb9ea
ET
490static void bnx2x_fw_dump(struct bnx2x *bp)
491{
492 u32 mark, offset;
4781bfad 493 __be32 data[9];
a2fbb9ea
ET
494 int word;
495
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 497 mark = ((mark + 0x3) & ~0x3);
ad361c98 498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 499
ad361c98 500 printk(KERN_ERR PFX);
a2fbb9ea
ET
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504 offset + 4*word));
505 data[8] = 0x0;
49d66772 506 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
507 }
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511 offset + 4*word));
512 data[8] = 0x0;
49d66772 513 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 514 }
ad361c98 515 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
516}
517
518static void bnx2x_panic_dump(struct bnx2x *bp)
519{
520 int i;
521 u16 j, start, end;
522
66e855f3
YG
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
a2fbb9ea
ET
526 BNX2X_ERR("begin crash dump -----------------\n");
527
8440d2b6
EG
528 /* Indices */
529 /* Common */
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536 /* Rx */
537 for_each_rx_queue(bp, i) {
a2fbb9ea 538 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 539
c3eefaf6 540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 543 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
551 }
a2fbb9ea 552
8440d2b6
EG
553 /* Tx */
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 556
c3eefaf6 557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 563 fp->status_blk->c_status_block.status_block_index,
ca00392c 564 fp->tx_db.data.prod);
8440d2b6 565 }
a2fbb9ea 566
8440d2b6
EG
567 /* Rings */
568 /* Rx */
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
571
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 574 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
c3eefaf6
EG
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
580 }
581
3196a88a
EG
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
8440d2b6 584 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
c3eefaf6
EG
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
590 }
591
a2fbb9ea
ET
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
599 }
600 }
601
8440d2b6
EG
602 /* Tx */
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
c3eefaf6
EG
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
613 }
614
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
c3eefaf6
EG
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
622 }
623 }
a2fbb9ea 624
34f80b04 625 bnx2x_fw_dump(bp);
a2fbb9ea
ET
626 bnx2x_mc_assert(bp);
627 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
628}
629
615f8fd9 630static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 631{
34f80b04 632 int port = BP_PORT(bp);
a2fbb9ea
ET
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
637
638 if (msix) {
8badd27a
EG
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
643 } else if (msi) {
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
648 } else {
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 653
8badd27a
EG
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655 val, port, addr);
615f8fd9
ET
656
657 REG_WR(bp, addr, val);
658
a2fbb9ea
ET
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660 }
661
8badd27a
EG
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
664
665 REG_WR(bp, addr, val);
37dbbf32
EG
666 /*
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
668 */
669 mmiowb();
670 barrier();
34f80b04
EG
671
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
674 if (IS_E1HMF(bp)) {
8badd27a 675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 676 if (bp->port.pmf)
4acac6a5
EG
677 /* enable nig and gpio3 attention */
678 val |= 0x1100;
34f80b04
EG
679 } else
680 val = 0xffff;
681
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684 }
37dbbf32
EG
685
686 /* Make sure that interrupts are indeed enabled from here on */
687 mmiowb();
a2fbb9ea
ET
688}
689
615f8fd9 690static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 691{
34f80b04 692 int port = BP_PORT(bp);
a2fbb9ea
ET
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
695
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702 val, port, addr);
703
8badd27a
EG
704 /* flush all outstanding writes */
705 mmiowb();
706
a2fbb9ea
ET
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 710
a2fbb9ea
ET
711}
712
f8ef6e44 713static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 714{
a2fbb9ea 715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 716 int i, offset;
a2fbb9ea 717
34f80b04 718 /* disable interrupt handling */
a2fbb9ea 719 atomic_inc(&bp->intr_sem);
e1510706
EG
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
f8ef6e44
YG
722 if (disable_hw)
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
a2fbb9ea
ET
725
726 /* make sure all ISRs are done */
727 if (msix) {
8badd27a
EG
728 synchronize_irq(bp->msix_table[0].vector);
729 offset = 1;
a2fbb9ea 730 for_each_queue(bp, i)
8badd27a 731 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
732 } else
733 synchronize_irq(bp->pdev->irq);
734
735 /* make sure sp_task is not running */
1cf167f2
EG
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
738}
739
34f80b04 740/* fast path */
a2fbb9ea
ET
741
742/*
34f80b04 743 * General service functions
a2fbb9ea
ET
744 */
745
34f80b04 746static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
747 u8 storm, u16 index, u8 op, u8 update)
748{
5c862848
EG
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
751 struct igu_ack_register igu_ack;
752
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
34f80b04 755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
5c862848
EG
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
763
764 /* Make sure that ACK is written */
765 mmiowb();
766 barrier();
a2fbb9ea
ET
767}
768
769static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770{
771 struct host_status_block *fpsb = fp->status_blk;
772 u16 rc = 0;
773
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777 rc |= 1;
778 }
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781 rc |= 2;
782 }
783 return rc;
784}
785
a2fbb9ea
ET
786static u16 bnx2x_ack_int(struct bnx2x *bp)
787{
5c862848
EG
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 791
5c862848
EG
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793 result, hc_addr);
a2fbb9ea 794
a2fbb9ea
ET
795 return result;
796}
797
798
799/*
800 * fast path service functions
801 */
802
e8b5fc51
VZ
803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804{
805 /* Tell compiler that consumer and producer can change */
806 barrier();
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
808}
809
a2fbb9ea
ET
810/* free skb in the packet ring at pos idx
811 * return idx of last bd freed
812 */
813static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx)
815{
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 819 struct sk_buff *skb = tx_buf->skb;
34f80b04 820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
821 int nbd;
822
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
824 idx, tx_buf, skb);
825
826 /* unmap first bd */
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 831
ca00392c 832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 833#ifdef BNX2X_STOP_ON_ERROR
ca00392c 834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 835 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
836 bnx2x_panic();
837 }
838#endif
ca00392c 839 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 840
ca00392c
EG
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 843
ca00392c
EG
844 /* Skip a parse bd... */
845 --nbd;
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850 --nbd;
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
852 }
853
854 /* now free frags */
855 while (nbd > 0) {
856
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
861 if (--nbd)
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 }
864
865 /* release skb */
53e5e96e 866 WARN_ON(!skb);
ca00392c 867 dev_kfree_skb_any(skb);
a2fbb9ea
ET
868 tx_buf->first_bd = 0;
869 tx_buf->skb = NULL;
870
34f80b04 871 return new_cons;
a2fbb9ea
ET
872}
873
34f80b04 874static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 875{
34f80b04
EG
876 s16 used;
877 u16 prod;
878 u16 cons;
a2fbb9ea 879
34f80b04 880 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
883
34f80b04
EG
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 887
34f80b04 888#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
889 WARN_ON(used < 0);
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 892#endif
a2fbb9ea 893
34f80b04 894 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
895}
896
7961f791 897static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
898{
899 struct bnx2x *bp = fp->bp;
555f6c78 900 struct netdev_queue *txq;
a2fbb9ea
ET
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902 int done = 0;
903
904#ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
906 return;
907#endif
908
ca00392c 909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
912
913 while (sw_cons != hw_cons) {
914 u16 pkt_cons;
915
916 pkt_cons = TX_BD(sw_cons);
917
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
34f80b04 920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
921 hw_cons, sw_cons, pkt_cons);
922
34f80b04 923/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
924 rmb();
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926 }
927*/
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929 sw_cons++;
930 done++;
a2fbb9ea
ET
931 }
932
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
935
a2fbb9ea 936 /* TBD need a thresh? */
555f6c78 937 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 938
6044735d
EG
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
943 * forever.
944 */
945 smp_mb();
946
555f6c78 947 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 948 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 950 netif_tx_wake_queue(txq);
a2fbb9ea
ET
951 }
952}
953
3196a88a 954
a2fbb9ea
ET
955static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
957{
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
34f80b04 962 DP(BNX2X_MSG_SP,
a2fbb9ea 963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 964 fp->index, cid, command, bp->state,
34f80b04 965 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
966
967 bp->spq_left++;
968
0626b899 969 if (fp->index) {
a2fbb9ea
ET
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974 cid);
975 fp->state = BNX2X_FP_STATE_OPEN;
976 break;
977
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980 cid);
981 fp->state = BNX2X_FP_STATE_HALTED;
982 break;
983
984 default:
34f80b04
EG
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990 return;
991 }
c14423fe 992
a2fbb9ea
ET
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
a2fbb9ea 1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1008 break;
1009
3196a88a 1010
a2fbb9ea 1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1014 bp->set_mac_pending = 0;
a2fbb9ea
ET
1015 break;
1016
49d66772 1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1020 break;
1021
a2fbb9ea 1022 default:
34f80b04 1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1024 command, bp->state);
34f80b04 1025 break;
a2fbb9ea 1026 }
34f80b04 1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1028}
1029
7a9b2557
VZ
1030static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032{
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037 /* Skip "next page" elements */
1038 if (!page)
1039 return;
1040
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045 sw_buf->page = NULL;
1046 sge->addr_hi = 0;
1047 sge->addr_lo = 0;
1048}
1049
1050static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1052{
1053 int i;
1054
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1057}
1058
1059static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065 dma_addr_t mapping;
1066
1067 if (unlikely(page == NULL))
1068 return -ENOMEM;
1069
4f40f2cb 1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1071 PCI_DMA_FROMDEVICE);
8d8bb39b 1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 return -ENOMEM;
1075 }
1076
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083 return 0;
1084}
1085
a2fbb9ea
ET
1086static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1088{
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092 dma_addr_t mapping;
1093
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1096 return -ENOMEM;
1097
437cf2f1 1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1099 PCI_DMA_FROMDEVICE);
8d8bb39b 1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1101 dev_kfree_skb(skb);
1102 return -ENOMEM;
1103 }
1104
1105 rx_buf->skb = skb;
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111 return 0;
1112}
1113
1114/* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1118 */
1119static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1121{
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1131
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1136}
1137
7a9b2557
VZ
1138static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139 u16 idx)
1140{
1141 u16 last_max = fp->last_max_sge;
1142
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1145}
1146
1147static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148{
1149 int i, j;
1150
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1153
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1156 idx--;
1157 }
1158 }
1159}
1160
1161static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1163{
1164 struct bnx2x *bp = fp->bp;
4f40f2cb 1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1167 SGE_PAGE_SHIFT;
7a9b2557
VZ
1168 u16 last_max, last_elem, first_elem;
1169 u16 delta = 0;
1170 u16 i;
1171
1172 if (!sge_len)
1173 return;
1174
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1192 last_elem++;
1193
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1197 break;
1198
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1201 }
1202
1203 if (delta > 0) {
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1207 }
1208
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1212}
1213
1214static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215{
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
33471629
EG
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1225}
1226
1227static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1229{
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234 dma_addr_t mapping;
1235
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255#ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257#ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259#else
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261#endif
1262 fp->tpa_queue_used);
1263#endif
1264}
1265
1266static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1274 int err;
1275 int j;
1276
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1279
1280 /* This is needed in order to enable forwarding support */
1281 if (frag_size)
4f40f2cb 1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1283 max(frag_size, (u32)len_on_bd));
1284
1285#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1286 if (pages >
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289 pages, cqe_idx);
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1292 bnx2x_panic();
1293 return -EINVAL;
1294 }
1295#endif
1296
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1304 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1305 old_rx_pg = *rx_pg;
1306
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
de832a55 1311 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1312 return err;
1313 }
1314
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1318
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1325
1326 frag_size -= frag_len;
1327 }
1328
1329 return 0;
1330}
1331
1332static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334 u16 cqe_idx)
1335{
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1338 /* alloc new skb */
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343 fails. */
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1346
7a9b2557 1347 if (likely(new_skb)) {
66e855f3
YG
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
0c6671b0
EG
1350#ifdef BCM_VLAN
1351 int is_vlan_cqe =
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356#endif
7a9b2557
VZ
1357
1358 prefetch(skb);
1359 prefetch(((char *)(skb)) + 128);
1360
7a9b2557
VZ
1361#ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1366 bnx2x_panic();
1367 return;
1368 }
1369#endif
1370
1371 skb_reserve(skb, pad);
1372 skb_put(skb, len);
1373
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377 {
1378 struct iphdr *iph;
1379
1380 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1381#ifdef BCM_VLAN
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386#endif
7a9b2557
VZ
1387 iph->check = 0;
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389 }
1390
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1393#ifdef BCM_VLAN
0c6671b0
EG
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1398 vlan_tag));
1399 else
1400#endif
1401 netif_receive_skb(skb);
1402 } else {
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1405 dev_kfree_skb(skb);
1406 }
1407
7a9b2557
VZ
1408
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1411
1412 } else {
66e855f3 1413 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
de832a55 1416 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1417 }
1418
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420}
1421
1422static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1425 u16 rx_sge_prod)
1426{
8d9c5f34 1427 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1428 int i;
1429
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1434
58f4c4cf
EG
1435 /*
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1438 * is updated.
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1442 */
1443 wmb();
1444
8d9c5f34
EG
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1448 ((u32 *)&rx_prods)[i]);
1449
58f4c4cf
EG
1450 mmiowb(); /* keep prod updates ordered */
1451
7a9b2557 1452 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1455}
1456
a2fbb9ea
ET
1457static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458{
1459 struct bnx2x *bp = fp->bp;
34f80b04 1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462 int rx_pkt = 0;
1463
1464#ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1466 return 0;
1467#endif
1468
34f80b04
EG
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
a2fbb9ea
ET
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473 hw_comp_cons++;
1474
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
34f80b04 1477 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1480
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1483 */
1484 rmb();
1485
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1488 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1489
1490 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1491 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
34f80b04
EG
1494 u8 cqe_fp_flags;
1495 u16 len, pad;
a2fbb9ea
ET
1496
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1500
619e7a66
EG
1501 /* Prefetch the page containing the BD descriptor
1502 at producer's index. It will be needed when new skb is
1503 allocated */
1504 prefetch((void *)(PAGE_ALIGN((unsigned long)
1505 (&fp->rx_desc_ring[bd_prod])) -
1506 PAGE_SIZE + 1));
1507
a2fbb9ea 1508 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1509 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1510
a2fbb9ea 1511 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1512 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1513 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1514 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1515 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1516 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1517
1518 /* is this a slowpath msg? */
34f80b04 1519 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1520 bnx2x_sp_event(fp, cqe);
1521 goto next_cqe;
1522
1523 /* this is an rx packet */
1524 } else {
1525 rx_buf = &fp->rx_buf_ring[bd_cons];
1526 skb = rx_buf->skb;
a2fbb9ea
ET
1527 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1528 pad = cqe->fast_path_cqe.placement_offset;
1529
7a9b2557
VZ
1530 /* If CQE is marked both TPA_START and TPA_END
1531 it is a non-TPA CQE */
1532 if ((!fp->disable_tpa) &&
1533 (TPA_TYPE(cqe_fp_flags) !=
1534 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1535 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1536
1537 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1538 DP(NETIF_MSG_RX_STATUS,
1539 "calling tpa_start on queue %d\n",
1540 queue);
1541
1542 bnx2x_tpa_start(fp, queue, skb,
1543 bd_cons, bd_prod);
1544 goto next_rx;
1545 }
1546
1547 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1548 DP(NETIF_MSG_RX_STATUS,
1549 "calling tpa_stop on queue %d\n",
1550 queue);
1551
1552 if (!BNX2X_RX_SUM_FIX(cqe))
1553 BNX2X_ERR("STOP on none TCP "
1554 "data\n");
1555
1556 /* This is a size of the linear data
1557 on this skb */
1558 len = le16_to_cpu(cqe->fast_path_cqe.
1559 len_on_bd);
1560 bnx2x_tpa_stop(bp, fp, queue, pad,
1561 len, cqe, comp_ring_cons);
1562#ifdef BNX2X_STOP_ON_ERROR
1563 if (bp->panic)
17cb4006 1564 return 0;
7a9b2557
VZ
1565#endif
1566
1567 bnx2x_update_sge_prod(fp,
1568 &cqe->fast_path_cqe);
1569 goto next_cqe;
1570 }
1571 }
1572
a2fbb9ea
ET
1573 pci_dma_sync_single_for_device(bp->pdev,
1574 pci_unmap_addr(rx_buf, mapping),
1575 pad + RX_COPY_THRESH,
1576 PCI_DMA_FROMDEVICE);
1577 prefetch(skb);
1578 prefetch(((char *)(skb)) + 128);
1579
1580 /* is this an error packet? */
34f80b04 1581 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1582 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1583 "ERROR flags %x rx packet %u\n",
1584 cqe_fp_flags, sw_comp_cons);
de832a55 1585 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1586 goto reuse_rx;
1587 }
1588
1589 /* Since we don't have a jumbo ring
1590 * copy small packets if mtu > 1500
1591 */
1592 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1593 (len <= RX_COPY_THRESH)) {
1594 struct sk_buff *new_skb;
1595
1596 new_skb = netdev_alloc_skb(bp->dev,
1597 len + pad);
1598 if (new_skb == NULL) {
1599 DP(NETIF_MSG_RX_ERR,
34f80b04 1600 "ERROR packet dropped "
a2fbb9ea 1601 "because of alloc failure\n");
de832a55 1602 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1603 goto reuse_rx;
1604 }
1605
1606 /* aligned copy */
1607 skb_copy_from_linear_data_offset(skb, pad,
1608 new_skb->data + pad, len);
1609 skb_reserve(new_skb, pad);
1610 skb_put(new_skb, len);
1611
1612 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1613
1614 skb = new_skb;
1615
1616 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1617 pci_unmap_single(bp->pdev,
1618 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1619 bp->rx_buf_size,
a2fbb9ea
ET
1620 PCI_DMA_FROMDEVICE);
1621 skb_reserve(skb, pad);
1622 skb_put(skb, len);
1623
1624 } else {
1625 DP(NETIF_MSG_RX_ERR,
34f80b04 1626 "ERROR packet dropped because "
a2fbb9ea 1627 "of alloc failure\n");
de832a55 1628 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1629reuse_rx:
1630 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1631 goto next_rx;
1632 }
1633
1634 skb->protocol = eth_type_trans(skb, bp->dev);
1635
1636 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1637 if (bp->rx_csum) {
1adcd8be
EG
1638 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1639 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1640 else
de832a55 1641 fp->eth_q_stats.hw_csum_err++;
66e855f3 1642 }
a2fbb9ea
ET
1643 }
1644
748e5439 1645 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1646#ifdef BCM_VLAN
0c6671b0 1647 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1648 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1649 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1650 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1651 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1652 else
1653#endif
34f80b04 1654 netif_receive_skb(skb);
a2fbb9ea 1655
a2fbb9ea
ET
1656
1657next_rx:
1658 rx_buf->skb = NULL;
1659
1660 bd_cons = NEXT_RX_IDX(bd_cons);
1661 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1662 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1663 rx_pkt++;
a2fbb9ea
ET
1664next_cqe:
1665 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1666 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1667
34f80b04 1668 if (rx_pkt == budget)
a2fbb9ea
ET
1669 break;
1670 } /* while */
1671
1672 fp->rx_bd_cons = bd_cons;
34f80b04 1673 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1674 fp->rx_comp_cons = sw_comp_cons;
1675 fp->rx_comp_prod = sw_comp_prod;
1676
7a9b2557
VZ
1677 /* Update producers */
1678 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1679 fp->rx_sge_prod);
a2fbb9ea
ET
1680
1681 fp->rx_pkt += rx_pkt;
1682 fp->rx_calls++;
1683
1684 return rx_pkt;
1685}
1686
1687static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1688{
1689 struct bnx2x_fastpath *fp = fp_cookie;
1690 struct bnx2x *bp = fp->bp;
a2fbb9ea 1691
da5a662a
VZ
1692 /* Return here if interrupt is disabled */
1693 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1695 return IRQ_HANDLED;
1696 }
1697
34f80b04 1698 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1699 fp->index, fp->sb_id);
0626b899 1700 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1701
1702#ifdef BNX2X_STOP_ON_ERROR
1703 if (unlikely(bp->panic))
1704 return IRQ_HANDLED;
1705#endif
ca00392c
EG
1706 /* Handle Rx or Tx according to MSI-X vector */
1707 if (fp->is_rx_queue) {
1708 prefetch(fp->rx_cons_sb);
1709 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1710
ca00392c 1711 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1712
ca00392c
EG
1713 } else {
1714 prefetch(fp->tx_cons_sb);
1715 prefetch(&fp->status_blk->c_status_block.status_block_index);
1716
1717 bnx2x_update_fpsb_idx(fp);
1718 rmb();
1719 bnx2x_tx_int(fp);
1720
1721 /* Re-enable interrupts */
1722 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1723 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1724 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1725 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1726 }
34f80b04 1727
a2fbb9ea
ET
1728 return IRQ_HANDLED;
1729}
1730
1731static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1732{
555f6c78 1733 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1734 u16 status = bnx2x_ack_int(bp);
34f80b04 1735 u16 mask;
ca00392c 1736 int i;
a2fbb9ea 1737
34f80b04 1738 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1739 if (unlikely(status == 0)) {
1740 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1741 return IRQ_NONE;
1742 }
f5372251 1743 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1744
34f80b04 1745 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1746 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1747 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1748 return IRQ_HANDLED;
1749 }
1750
3196a88a
EG
1751#ifdef BNX2X_STOP_ON_ERROR
1752 if (unlikely(bp->panic))
1753 return IRQ_HANDLED;
1754#endif
1755
ca00392c
EG
1756 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1757 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1758
ca00392c
EG
1759 mask = 0x2 << fp->sb_id;
1760 if (status & mask) {
1761 /* Handle Rx or Tx according to SB id */
1762 if (fp->is_rx_queue) {
1763 prefetch(fp->rx_cons_sb);
1764 prefetch(&fp->status_blk->u_status_block.
1765 status_block_index);
a2fbb9ea 1766
ca00392c 1767 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1768
ca00392c
EG
1769 } else {
1770 prefetch(fp->tx_cons_sb);
1771 prefetch(&fp->status_blk->c_status_block.
1772 status_block_index);
1773
1774 bnx2x_update_fpsb_idx(fp);
1775 rmb();
1776 bnx2x_tx_int(fp);
1777
1778 /* Re-enable interrupts */
1779 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1780 le16_to_cpu(fp->fp_u_idx),
1781 IGU_INT_NOP, 1);
1782 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1783 le16_to_cpu(fp->fp_c_idx),
1784 IGU_INT_ENABLE, 1);
1785 }
1786 status &= ~mask;
1787 }
a2fbb9ea
ET
1788 }
1789
a2fbb9ea 1790
34f80b04 1791 if (unlikely(status & 0x1)) {
1cf167f2 1792 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1793
1794 status &= ~0x1;
1795 if (!status)
1796 return IRQ_HANDLED;
1797 }
1798
34f80b04
EG
1799 if (status)
1800 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1801 status);
a2fbb9ea 1802
c18487ee 1803 return IRQ_HANDLED;
a2fbb9ea
ET
1804}
1805
c18487ee 1806/* end of fast path */
a2fbb9ea 1807
bb2a0f7a 1808static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1809
c18487ee
YR
1810/* Link */
1811
1812/*
1813 * General service functions
1814 */
a2fbb9ea 1815
4a37fb66 1816static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1817{
1818 u32 lock_status;
1819 u32 resource_bit = (1 << resource);
4a37fb66
YG
1820 int func = BP_FUNC(bp);
1821 u32 hw_lock_control_reg;
c18487ee 1822 int cnt;
a2fbb9ea 1823
c18487ee
YR
1824 /* Validating that the resource is within range */
1825 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1826 DP(NETIF_MSG_HW,
1827 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1828 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1829 return -EINVAL;
1830 }
a2fbb9ea 1831
4a37fb66
YG
1832 if (func <= 5) {
1833 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1834 } else {
1835 hw_lock_control_reg =
1836 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1837 }
1838
c18487ee 1839 /* Validating that the resource is not already taken */
4a37fb66 1840 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1841 if (lock_status & resource_bit) {
1842 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1843 lock_status, resource_bit);
1844 return -EEXIST;
1845 }
a2fbb9ea 1846
46230476
EG
1847 /* Try for 5 second every 5ms */
1848 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1849 /* Try to acquire the lock */
4a37fb66
YG
1850 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1851 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1852 if (lock_status & resource_bit)
1853 return 0;
a2fbb9ea 1854
c18487ee 1855 msleep(5);
a2fbb9ea 1856 }
c18487ee
YR
1857 DP(NETIF_MSG_HW, "Timeout\n");
1858 return -EAGAIN;
1859}
a2fbb9ea 1860
4a37fb66 1861static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1862{
1863 u32 lock_status;
1864 u32 resource_bit = (1 << resource);
4a37fb66
YG
1865 int func = BP_FUNC(bp);
1866 u32 hw_lock_control_reg;
a2fbb9ea 1867
c18487ee
YR
1868 /* Validating that the resource is within range */
1869 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1870 DP(NETIF_MSG_HW,
1871 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1872 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1873 return -EINVAL;
1874 }
1875
4a37fb66
YG
1876 if (func <= 5) {
1877 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1878 } else {
1879 hw_lock_control_reg =
1880 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1881 }
1882
c18487ee 1883 /* Validating that the resource is currently taken */
4a37fb66 1884 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1885 if (!(lock_status & resource_bit)) {
1886 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1887 lock_status, resource_bit);
1888 return -EFAULT;
a2fbb9ea
ET
1889 }
1890
4a37fb66 1891 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1892 return 0;
1893}
1894
1895/* HW Lock for shared dual port PHYs */
4a37fb66 1896static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1897{
34f80b04 1898 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1899
46c6a674
EG
1900 if (bp->port.need_hw_lock)
1901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1902}
a2fbb9ea 1903
4a37fb66 1904static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1905{
46c6a674
EG
1906 if (bp->port.need_hw_lock)
1907 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1908
34f80b04 1909 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1910}
a2fbb9ea 1911
4acac6a5
EG
1912int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1913{
1914 /* The GPIO should be swapped if swap register is set and active */
1915 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1916 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1917 int gpio_shift = gpio_num +
1918 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1919 u32 gpio_mask = (1 << gpio_shift);
1920 u32 gpio_reg;
1921 int value;
1922
1923 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1924 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1925 return -EINVAL;
1926 }
1927
1928 /* read GPIO value */
1929 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1930
1931 /* get the requested pin value */
1932 if ((gpio_reg & gpio_mask) == gpio_mask)
1933 value = 1;
1934 else
1935 value = 0;
1936
1937 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1938
1939 return value;
1940}
1941
17de50b7 1942int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1943{
1944 /* The GPIO should be swapped if swap register is set and active */
1945 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1946 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1947 int gpio_shift = gpio_num +
1948 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1949 u32 gpio_mask = (1 << gpio_shift);
1950 u32 gpio_reg;
a2fbb9ea 1951
c18487ee
YR
1952 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1953 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1954 return -EINVAL;
1955 }
a2fbb9ea 1956
4a37fb66 1957 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1958 /* read GPIO and mask except the float bits */
1959 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1960
c18487ee
YR
1961 switch (mode) {
1962 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1963 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1964 gpio_num, gpio_shift);
1965 /* clear FLOAT and set CLR */
1966 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1967 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1968 break;
a2fbb9ea 1969
c18487ee
YR
1970 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1971 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1972 gpio_num, gpio_shift);
1973 /* clear FLOAT and set SET */
1974 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1976 break;
a2fbb9ea 1977
17de50b7 1978 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1979 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1980 gpio_num, gpio_shift);
1981 /* set FLOAT */
1982 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1983 break;
a2fbb9ea 1984
c18487ee
YR
1985 default:
1986 break;
a2fbb9ea
ET
1987 }
1988
c18487ee 1989 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1990 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1991
c18487ee 1992 return 0;
a2fbb9ea
ET
1993}
1994
4acac6a5
EG
1995int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1996{
1997 /* The GPIO should be swapped if swap register is set and active */
1998 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1999 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2000 int gpio_shift = gpio_num +
2001 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2002 u32 gpio_mask = (1 << gpio_shift);
2003 u32 gpio_reg;
2004
2005 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2006 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2007 return -EINVAL;
2008 }
2009
2010 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2011 /* read GPIO int */
2012 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2013
2014 switch (mode) {
2015 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2016 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2017 "output low\n", gpio_num, gpio_shift);
2018 /* clear SET and set CLR */
2019 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2020 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 break;
2022
2023 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2024 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2025 "output high\n", gpio_num, gpio_shift);
2026 /* clear CLR and set SET */
2027 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2028 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2029 break;
2030
2031 default:
2032 break;
2033 }
2034
2035 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2036 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2037
2038 return 0;
2039}
2040
c18487ee 2041static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2042{
c18487ee
YR
2043 u32 spio_mask = (1 << spio_num);
2044 u32 spio_reg;
a2fbb9ea 2045
c18487ee
YR
2046 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2047 (spio_num > MISC_REGISTERS_SPIO_7)) {
2048 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2049 return -EINVAL;
a2fbb9ea
ET
2050 }
2051
4a37fb66 2052 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2053 /* read SPIO and mask except the float bits */
2054 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2055
c18487ee 2056 switch (mode) {
6378c025 2057 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2059 /* clear FLOAT and set CLR */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2062 break;
a2fbb9ea 2063
6378c025 2064 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2066 /* clear FLOAT and set SET */
2067 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2069 break;
a2fbb9ea 2070
c18487ee
YR
2071 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2072 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2073 /* set FLOAT */
2074 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2075 break;
a2fbb9ea 2076
c18487ee
YR
2077 default:
2078 break;
a2fbb9ea
ET
2079 }
2080
c18487ee 2081 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2082 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2083
a2fbb9ea
ET
2084 return 0;
2085}
2086
c18487ee 2087static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2088{
ad33ea3a
EG
2089 switch (bp->link_vars.ieee_fc &
2090 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2091 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2092 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2093 ADVERTISED_Pause);
2094 break;
356e2385 2095
c18487ee 2096 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2097 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2098 ADVERTISED_Pause);
2099 break;
356e2385 2100
c18487ee 2101 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2102 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2103 break;
356e2385 2104
c18487ee 2105 default:
34f80b04 2106 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2107 ADVERTISED_Pause);
2108 break;
2109 }
2110}
f1410647 2111
c18487ee
YR
2112static void bnx2x_link_report(struct bnx2x *bp)
2113{
2691d51d
EG
2114 if (bp->state == BNX2X_STATE_DISABLED) {
2115 netif_carrier_off(bp->dev);
2116 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2117 return;
2118 }
2119
c18487ee
YR
2120 if (bp->link_vars.link_up) {
2121 if (bp->state == BNX2X_STATE_OPEN)
2122 netif_carrier_on(bp->dev);
2123 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2124
c18487ee 2125 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2126
c18487ee
YR
2127 if (bp->link_vars.duplex == DUPLEX_FULL)
2128 printk("full duplex");
2129 else
2130 printk("half duplex");
f1410647 2131
c0700f90
DM
2132 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2133 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2134 printk(", receive ");
356e2385
EG
2135 if (bp->link_vars.flow_ctrl &
2136 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2137 printk("& transmit ");
2138 } else {
2139 printk(", transmit ");
2140 }
2141 printk("flow control ON");
2142 }
2143 printk("\n");
f1410647 2144
c18487ee
YR
2145 } else { /* link_down */
2146 netif_carrier_off(bp->dev);
2147 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2148 }
c18487ee
YR
2149}
2150
b5bf9068 2151static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2152{
19680c48
EG
2153 if (!BP_NOMCP(bp)) {
2154 u8 rc;
a2fbb9ea 2155
19680c48 2156 /* Initialize link parameters structure variables */
8c99e7b0
YR
2157 /* It is recommended to turn off RX FC for jumbo frames
2158 for better performance */
0c593270 2159 if (bp->dev->mtu > 5000)
c0700f90 2160 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2161 else
c0700f90 2162 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2163
4a37fb66 2164 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2165
2166 if (load_mode == LOAD_DIAG)
2167 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2168
19680c48 2169 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2170
4a37fb66 2171 bnx2x_release_phy_lock(bp);
a2fbb9ea 2172
3c96c68b
EG
2173 bnx2x_calc_fc_adv(bp);
2174
b5bf9068
EG
2175 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2176 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2177 bnx2x_link_report(bp);
b5bf9068 2178 }
34f80b04 2179
19680c48
EG
2180 return rc;
2181 }
f5372251 2182 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2183 return -EINVAL;
a2fbb9ea
ET
2184}
2185
c18487ee 2186static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2187{
19680c48 2188 if (!BP_NOMCP(bp)) {
4a37fb66 2189 bnx2x_acquire_phy_lock(bp);
19680c48 2190 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2191 bnx2x_release_phy_lock(bp);
a2fbb9ea 2192
19680c48
EG
2193 bnx2x_calc_fc_adv(bp);
2194 } else
f5372251 2195 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2196}
a2fbb9ea 2197
c18487ee
YR
2198static void bnx2x__link_reset(struct bnx2x *bp)
2199{
19680c48 2200 if (!BP_NOMCP(bp)) {
4a37fb66 2201 bnx2x_acquire_phy_lock(bp);
589abe3a 2202 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2203 bnx2x_release_phy_lock(bp);
19680c48 2204 } else
f5372251 2205 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2206}
a2fbb9ea 2207
c18487ee
YR
2208static u8 bnx2x_link_test(struct bnx2x *bp)
2209{
2210 u8 rc;
a2fbb9ea 2211
4a37fb66 2212 bnx2x_acquire_phy_lock(bp);
c18487ee 2213 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2214 bnx2x_release_phy_lock(bp);
a2fbb9ea 2215
c18487ee
YR
2216 return rc;
2217}
a2fbb9ea 2218
8a1c38d1 2219static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2220{
8a1c38d1
EG
2221 u32 r_param = bp->link_vars.line_speed / 8;
2222 u32 fair_periodic_timeout_usec;
2223 u32 t_fair;
34f80b04 2224
8a1c38d1
EG
2225 memset(&(bp->cmng.rs_vars), 0,
2226 sizeof(struct rate_shaping_vars_per_port));
2227 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2228
8a1c38d1
EG
2229 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2230 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2231
8a1c38d1
EG
2232 /* this is the threshold below which no timer arming will occur
2233 1.25 coefficient is for the threshold to be a little bigger
2234 than the real time, to compensate for timer in-accuracy */
2235 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2236 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2237
8a1c38d1
EG
2238 /* resolution of fairness timer */
2239 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2240 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2241 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2242
8a1c38d1
EG
2243 /* this is the threshold below which we won't arm the timer anymore */
2244 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2245
8a1c38d1
EG
2246 /* we multiply by 1e3/8 to get bytes/msec.
2247 We don't want the credits to pass a credit
2248 of the t_fair*FAIR_MEM (algorithm resolution) */
2249 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2250 /* since each tick is 4 usec */
2251 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2252}
2253
2691d51d
EG
2254/* Calculates the sum of vn_min_rates.
2255 It's needed for further normalizing of the min_rates.
2256 Returns:
2257 sum of vn_min_rates.
2258 or
2259 0 - if all the min_rates are 0.
2260 In the later case fainess algorithm should be deactivated.
2261 If not all min_rates are zero then those that are zeroes will be set to 1.
2262 */
2263static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2264{
2265 int all_zero = 1;
2266 int port = BP_PORT(bp);
2267 int vn;
2268
2269 bp->vn_weight_sum = 0;
2270 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2271 int func = 2*vn + port;
2272 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2273 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2274 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2275
2276 /* Skip hidden vns */
2277 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2278 continue;
2279
2280 /* If min rate is zero - set it to 1 */
2281 if (!vn_min_rate)
2282 vn_min_rate = DEF_MIN_RATE;
2283 else
2284 all_zero = 0;
2285
2286 bp->vn_weight_sum += vn_min_rate;
2287 }
2288
2289 /* ... only if all min rates are zeros - disable fairness */
2290 if (all_zero)
2291 bp->vn_weight_sum = 0;
2292}
2293
8a1c38d1 2294static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2295{
2296 struct rate_shaping_vars_per_vn m_rs_vn;
2297 struct fairness_vars_per_vn m_fair_vn;
2298 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2299 u16 vn_min_rate, vn_max_rate;
2300 int i;
2301
2302 /* If function is hidden - set min and max to zeroes */
2303 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2304 vn_min_rate = 0;
2305 vn_max_rate = 0;
2306
2307 } else {
2308 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2310 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2311 if current min rate is zero - set it to 1.
33471629 2312 This is a requirement of the algorithm. */
8a1c38d1 2313 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2314 vn_min_rate = DEF_MIN_RATE;
2315 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2316 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2317 }
2318
8a1c38d1
EG
2319 DP(NETIF_MSG_IFUP,
2320 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2321 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2322
2323 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2324 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2325
2326 /* global vn counter - maximal Mbps for this vn */
2327 m_rs_vn.vn_counter.rate = vn_max_rate;
2328
2329 /* quota - number of bytes transmitted in this period */
2330 m_rs_vn.vn_counter.quota =
2331 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2332
8a1c38d1 2333 if (bp->vn_weight_sum) {
34f80b04
EG
2334 /* credit for each period of the fairness algorithm:
2335 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2336 vn_weight_sum should not be larger than 10000, thus
2337 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2338 than zero */
34f80b04 2339 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2340 max((u32)(vn_min_rate * (T_FAIR_COEF /
2341 (8 * bp->vn_weight_sum))),
2342 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2343 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2344 m_fair_vn.vn_credit_delta);
2345 }
2346
34f80b04
EG
2347 /* Store it to internal memory */
2348 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2349 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2351 ((u32 *)(&m_rs_vn))[i]);
2352
2353 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2354 REG_WR(bp, BAR_XSTRORM_INTMEM +
2355 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2356 ((u32 *)(&m_fair_vn))[i]);
2357}
2358
8a1c38d1 2359
c18487ee
YR
2360/* This function is called upon link interrupt */
2361static void bnx2x_link_attn(struct bnx2x *bp)
2362{
bb2a0f7a
YG
2363 /* Make sure that we are synced with the current statistics */
2364 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2365
c18487ee 2366 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2367
bb2a0f7a
YG
2368 if (bp->link_vars.link_up) {
2369
1c06328c
EG
2370 /* dropless flow control */
2371 if (CHIP_IS_E1H(bp)) {
2372 int port = BP_PORT(bp);
2373 u32 pause_enabled = 0;
2374
2375 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2376 pause_enabled = 1;
2377
2378 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2379 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2380 pause_enabled);
2381 }
2382
bb2a0f7a
YG
2383 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2384 struct host_port_stats *pstats;
2385
2386 pstats = bnx2x_sp(bp, port_stats);
2387 /* reset old bmac stats */
2388 memset(&(pstats->mac_stx[0]), 0,
2389 sizeof(struct mac_stx));
2390 }
2391 if ((bp->state == BNX2X_STATE_OPEN) ||
2392 (bp->state == BNX2X_STATE_DISABLED))
2393 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2394 }
2395
c18487ee
YR
2396 /* indicate link status */
2397 bnx2x_link_report(bp);
34f80b04
EG
2398
2399 if (IS_E1HMF(bp)) {
8a1c38d1 2400 int port = BP_PORT(bp);
34f80b04 2401 int func;
8a1c38d1 2402 int vn;
34f80b04
EG
2403
2404 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2405 if (vn == BP_E1HVN(bp))
2406 continue;
2407
8a1c38d1 2408 func = ((vn << 1) | port);
34f80b04
EG
2409
2410 /* Set the attention towards other drivers
2411 on the same port */
2412 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2413 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2414 }
34f80b04 2415
8a1c38d1
EG
2416 if (bp->link_vars.link_up) {
2417 int i;
2418
2419 /* Init rate shaping and fairness contexts */
2420 bnx2x_init_port_minmax(bp);
34f80b04 2421
34f80b04 2422 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2423 bnx2x_init_vn_minmax(bp, 2*vn + port);
2424
2425 /* Store it to internal memory */
2426 for (i = 0;
2427 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2428 REG_WR(bp, BAR_XSTRORM_INTMEM +
2429 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2430 ((u32 *)(&bp->cmng))[i]);
2431 }
34f80b04 2432 }
c18487ee 2433}
a2fbb9ea 2434
c18487ee
YR
2435static void bnx2x__link_status_update(struct bnx2x *bp)
2436{
2691d51d
EG
2437 int func = BP_FUNC(bp);
2438
c18487ee
YR
2439 if (bp->state != BNX2X_STATE_OPEN)
2440 return;
a2fbb9ea 2441
c18487ee 2442 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2443
bb2a0f7a
YG
2444 if (bp->link_vars.link_up)
2445 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2446 else
2447 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2448
2691d51d
EG
2449 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2450 bnx2x_calc_vn_weight_sum(bp);
2451
c18487ee
YR
2452 /* indicate link status */
2453 bnx2x_link_report(bp);
a2fbb9ea 2454}
a2fbb9ea 2455
34f80b04
EG
2456static void bnx2x_pmf_update(struct bnx2x *bp)
2457{
2458 int port = BP_PORT(bp);
2459 u32 val;
2460
2461 bp->port.pmf = 1;
2462 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2463
2464 /* enable nig attention */
2465 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2466 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2467 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2468
2469 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2470}
2471
c18487ee 2472/* end of Link */
a2fbb9ea
ET
2473
2474/* slow path */
2475
2476/*
2477 * General service functions
2478 */
2479
2691d51d
EG
2480/* send the MCP a request, block until there is a reply */
2481u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2482{
2483 int func = BP_FUNC(bp);
2484 u32 seq = ++bp->fw_seq;
2485 u32 rc = 0;
2486 u32 cnt = 1;
2487 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2488
2489 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2490 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2491
2492 do {
2493 /* let the FW do it's magic ... */
2494 msleep(delay);
2495
2496 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2497
2498 /* Give the FW up to 2 second (200*10ms) */
2499 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2500
2501 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2502 cnt*delay, rc, seq);
2503
2504 /* is this a reply to our command? */
2505 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2506 rc &= FW_MSG_CODE_MASK;
2507 else {
2508 /* FW BUG! */
2509 BNX2X_ERR("FW failed to respond!\n");
2510 bnx2x_fw_dump(bp);
2511 rc = 0;
2512 }
2513
2514 return rc;
2515}
2516
2517static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2518static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2519static void bnx2x_set_rx_mode(struct net_device *dev);
2520
2521static void bnx2x_e1h_disable(struct bnx2x *bp)
2522{
2523 int port = BP_PORT(bp);
2524 int i;
2525
2526 bp->rx_mode = BNX2X_RX_MODE_NONE;
2527 bnx2x_set_storm_rx_mode(bp);
2528
2529 netif_tx_disable(bp->dev);
2530 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2531
2532 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2533
2534 bnx2x_set_mac_addr_e1h(bp, 0);
2535
2536 for (i = 0; i < MC_HASH_SIZE; i++)
2537 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2538
2539 netif_carrier_off(bp->dev);
2540}
2541
2542static void bnx2x_e1h_enable(struct bnx2x *bp)
2543{
2544 int port = BP_PORT(bp);
2545
2546 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2547
2548 bnx2x_set_mac_addr_e1h(bp, 1);
2549
2550 /* Tx queue should be only reenabled */
2551 netif_tx_wake_all_queues(bp->dev);
2552
2553 /* Initialize the receive filter. */
2554 bnx2x_set_rx_mode(bp->dev);
2555}
2556
2557static void bnx2x_update_min_max(struct bnx2x *bp)
2558{
2559 int port = BP_PORT(bp);
2560 int vn, i;
2561
2562 /* Init rate shaping and fairness contexts */
2563 bnx2x_init_port_minmax(bp);
2564
2565 bnx2x_calc_vn_weight_sum(bp);
2566
2567 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2568 bnx2x_init_vn_minmax(bp, 2*vn + port);
2569
2570 if (bp->port.pmf) {
2571 int func;
2572
2573 /* Set the attention towards other drivers on the same port */
2574 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2575 if (vn == BP_E1HVN(bp))
2576 continue;
2577
2578 func = ((vn << 1) | port);
2579 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2580 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2581 }
2582
2583 /* Store it to internal memory */
2584 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2585 REG_WR(bp, BAR_XSTRORM_INTMEM +
2586 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2587 ((u32 *)(&bp->cmng))[i]);
2588 }
2589}
2590
2591static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2592{
2593 int func = BP_FUNC(bp);
2594
2595 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2596 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2597
2598 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2599
2600 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2601 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2602 bp->state = BNX2X_STATE_DISABLED;
2603
2604 bnx2x_e1h_disable(bp);
2605 } else {
2606 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2607 bp->state = BNX2X_STATE_OPEN;
2608
2609 bnx2x_e1h_enable(bp);
2610 }
2611 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2612 }
2613 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2614
2615 bnx2x_update_min_max(bp);
2616 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2617 }
2618
2619 /* Report results to MCP */
2620 if (dcc_event)
2621 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2622 else
2623 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2624}
2625
a2fbb9ea
ET
2626/* the slow path queue is odd since completions arrive on the fastpath ring */
2627static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2628 u32 data_hi, u32 data_lo, int common)
2629{
34f80b04 2630 int func = BP_FUNC(bp);
a2fbb9ea 2631
34f80b04
EG
2632 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2633 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2634 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2635 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2636 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2637
2638#ifdef BNX2X_STOP_ON_ERROR
2639 if (unlikely(bp->panic))
2640 return -EIO;
2641#endif
2642
34f80b04 2643 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2644
2645 if (!bp->spq_left) {
2646 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2647 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2648 bnx2x_panic();
2649 return -EBUSY;
2650 }
f1410647 2651
a2fbb9ea
ET
2652 /* CID needs port number to be encoded int it */
2653 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2654 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2655 HW_CID(bp, cid)));
2656 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2657 if (common)
2658 bp->spq_prod_bd->hdr.type |=
2659 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2660
2661 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2662 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2663
2664 bp->spq_left--;
2665
2666 if (bp->spq_prod_bd == bp->spq_last_bd) {
2667 bp->spq_prod_bd = bp->spq;
2668 bp->spq_prod_idx = 0;
2669 DP(NETIF_MSG_TIMER, "end of spq\n");
2670
2671 } else {
2672 bp->spq_prod_bd++;
2673 bp->spq_prod_idx++;
2674 }
2675
37dbbf32
EG
2676 /* Make sure that BD data is updated before writing the producer */
2677 wmb();
2678
34f80b04 2679 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2680 bp->spq_prod_idx);
2681
37dbbf32
EG
2682 mmiowb();
2683
34f80b04 2684 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2685 return 0;
2686}
2687
2688/* acquire split MCP access lock register */
4a37fb66 2689static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2690{
a2fbb9ea 2691 u32 i, j, val;
34f80b04 2692 int rc = 0;
a2fbb9ea
ET
2693
2694 might_sleep();
2695 i = 100;
2696 for (j = 0; j < i*10; j++) {
2697 val = (1UL << 31);
2698 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2699 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2700 if (val & (1L << 31))
2701 break;
2702
2703 msleep(5);
2704 }
a2fbb9ea 2705 if (!(val & (1L << 31))) {
19680c48 2706 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2707 rc = -EBUSY;
2708 }
2709
2710 return rc;
2711}
2712
4a37fb66
YG
2713/* release split MCP access lock register */
2714static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2715{
2716 u32 val = 0;
2717
2718 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2719}
2720
2721static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2722{
2723 struct host_def_status_block *def_sb = bp->def_status_blk;
2724 u16 rc = 0;
2725
2726 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2727 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2728 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2729 rc |= 1;
2730 }
2731 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2732 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2733 rc |= 2;
2734 }
2735 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2736 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2737 rc |= 4;
2738 }
2739 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2740 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2741 rc |= 8;
2742 }
2743 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2744 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2745 rc |= 16;
2746 }
2747 return rc;
2748}
2749
2750/*
2751 * slow path service functions
2752 */
2753
2754static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2755{
34f80b04 2756 int port = BP_PORT(bp);
5c862848
EG
2757 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2758 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2759 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2760 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2761 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2762 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2763 u32 aeu_mask;
87942b46 2764 u32 nig_mask = 0;
a2fbb9ea 2765
a2fbb9ea
ET
2766 if (bp->attn_state & asserted)
2767 BNX2X_ERR("IGU ERROR\n");
2768
3fcaf2e5
EG
2769 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2770 aeu_mask = REG_RD(bp, aeu_addr);
2771
a2fbb9ea 2772 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2773 aeu_mask, asserted);
2774 aeu_mask &= ~(asserted & 0xff);
2775 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2776
3fcaf2e5
EG
2777 REG_WR(bp, aeu_addr, aeu_mask);
2778 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2779
3fcaf2e5 2780 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2781 bp->attn_state |= asserted;
3fcaf2e5 2782 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2783
2784 if (asserted & ATTN_HARD_WIRED_MASK) {
2785 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2786
a5e9a7cf
EG
2787 bnx2x_acquire_phy_lock(bp);
2788
877e9aa4 2789 /* save nig interrupt mask */
87942b46 2790 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2791 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2792
c18487ee 2793 bnx2x_link_attn(bp);
a2fbb9ea
ET
2794
2795 /* handle unicore attn? */
2796 }
2797 if (asserted & ATTN_SW_TIMER_4_FUNC)
2798 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2799
2800 if (asserted & GPIO_2_FUNC)
2801 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2802
2803 if (asserted & GPIO_3_FUNC)
2804 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2805
2806 if (asserted & GPIO_4_FUNC)
2807 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2808
2809 if (port == 0) {
2810 if (asserted & ATTN_GENERAL_ATTN_1) {
2811 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2812 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2813 }
2814 if (asserted & ATTN_GENERAL_ATTN_2) {
2815 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2816 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2817 }
2818 if (asserted & ATTN_GENERAL_ATTN_3) {
2819 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2821 }
2822 } else {
2823 if (asserted & ATTN_GENERAL_ATTN_4) {
2824 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2825 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2826 }
2827 if (asserted & ATTN_GENERAL_ATTN_5) {
2828 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2829 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2830 }
2831 if (asserted & ATTN_GENERAL_ATTN_6) {
2832 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2833 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2834 }
2835 }
2836
2837 } /* if hardwired */
2838
5c862848
EG
2839 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2840 asserted, hc_addr);
2841 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2842
2843 /* now set back the mask */
a5e9a7cf 2844 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2845 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2846 bnx2x_release_phy_lock(bp);
2847 }
a2fbb9ea
ET
2848}
2849
fd4ef40d
EG
2850static inline void bnx2x_fan_failure(struct bnx2x *bp)
2851{
2852 int port = BP_PORT(bp);
2853
2854 /* mark the failure */
2855 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2856 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2857 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2858 bp->link_params.ext_phy_config);
2859
2860 /* log the failure */
2861 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2862 " the driver to shutdown the card to prevent permanent"
2863 " damage. Please contact Dell Support for assistance\n",
2864 bp->dev->name);
2865}
877e9aa4 2866static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2867{
34f80b04 2868 int port = BP_PORT(bp);
877e9aa4 2869 int reg_offset;
4d295db0 2870 u32 val, swap_val, swap_override;
877e9aa4 2871
34f80b04
EG
2872 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2873 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2874
34f80b04 2875 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2876
2877 val = REG_RD(bp, reg_offset);
2878 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2879 REG_WR(bp, reg_offset, val);
2880
2881 BNX2X_ERR("SPIO5 hw attention\n");
2882
fd4ef40d 2883 /* Fan failure attention */
35b19ba5
EG
2884 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2885 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2886 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2887 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2888 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2889 /* The PHY reset is controlled by GPIO 1 */
2890 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2891 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2892 break;
2893
4d295db0
EG
2894 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2895 /* The PHY reset is controlled by GPIO 1 */
2896 /* fake the port number to cancel the swap done in
2897 set_gpio() */
2898 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2899 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2900 port = (swap_val && swap_override) ^ 1;
2901 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2902 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2903 break;
2904
877e9aa4
ET
2905 default:
2906 break;
2907 }
fd4ef40d 2908 bnx2x_fan_failure(bp);
877e9aa4 2909 }
34f80b04 2910
589abe3a
EG
2911 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2912 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2913 bnx2x_acquire_phy_lock(bp);
2914 bnx2x_handle_module_detect_int(&bp->link_params);
2915 bnx2x_release_phy_lock(bp);
2916 }
2917
34f80b04
EG
2918 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2919
2920 val = REG_RD(bp, reg_offset);
2921 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2922 REG_WR(bp, reg_offset, val);
2923
2924 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2925 (attn & HW_INTERRUT_ASSERT_SET_0));
2926 bnx2x_panic();
2927 }
877e9aa4
ET
2928}
2929
2930static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2931{
2932 u32 val;
2933
0626b899 2934 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2935
2936 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2937 BNX2X_ERR("DB hw attention 0x%x\n", val);
2938 /* DORQ discard attention */
2939 if (val & 0x2)
2940 BNX2X_ERR("FATAL error from DORQ\n");
2941 }
34f80b04
EG
2942
2943 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2944
2945 int port = BP_PORT(bp);
2946 int reg_offset;
2947
2948 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2949 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2950
2951 val = REG_RD(bp, reg_offset);
2952 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2953 REG_WR(bp, reg_offset, val);
2954
2955 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2956 (attn & HW_INTERRUT_ASSERT_SET_1));
2957 bnx2x_panic();
2958 }
877e9aa4
ET
2959}
2960
2961static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2962{
2963 u32 val;
2964
2965 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2966
2967 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2968 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2969 /* CFC error attention */
2970 if (val & 0x2)
2971 BNX2X_ERR("FATAL error from CFC\n");
2972 }
2973
2974 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2975
2976 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2977 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2978 /* RQ_USDMDP_FIFO_OVERFLOW */
2979 if (val & 0x18000)
2980 BNX2X_ERR("FATAL error from PXP\n");
2981 }
34f80b04
EG
2982
2983 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2984
2985 int port = BP_PORT(bp);
2986 int reg_offset;
2987
2988 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2989 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2990
2991 val = REG_RD(bp, reg_offset);
2992 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2993 REG_WR(bp, reg_offset, val);
2994
2995 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2996 (attn & HW_INTERRUT_ASSERT_SET_2));
2997 bnx2x_panic();
2998 }
877e9aa4
ET
2999}
3000
3001static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3002{
34f80b04
EG
3003 u32 val;
3004
877e9aa4
ET
3005 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3006
34f80b04
EG
3007 if (attn & BNX2X_PMF_LINK_ASSERT) {
3008 int func = BP_FUNC(bp);
3009
3010 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3011 val = SHMEM_RD(bp, func_mb[func].drv_status);
3012 if (val & DRV_STATUS_DCC_EVENT_MASK)
3013 bnx2x_dcc_event(bp,
3014 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3015 bnx2x__link_status_update(bp);
2691d51d 3016 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3017 bnx2x_pmf_update(bp);
3018
3019 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3020
3021 BNX2X_ERR("MC assert!\n");
3022 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3023 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3024 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3025 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3026 bnx2x_panic();
3027
3028 } else if (attn & BNX2X_MCP_ASSERT) {
3029
3030 BNX2X_ERR("MCP assert!\n");
3031 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3032 bnx2x_fw_dump(bp);
877e9aa4
ET
3033
3034 } else
3035 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3036 }
3037
3038 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3039 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3040 if (attn & BNX2X_GRC_TIMEOUT) {
3041 val = CHIP_IS_E1H(bp) ?
3042 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3043 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3044 }
3045 if (attn & BNX2X_GRC_RSV) {
3046 val = CHIP_IS_E1H(bp) ?
3047 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3048 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3049 }
877e9aa4 3050 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3051 }
3052}
3053
3054static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3055{
a2fbb9ea
ET
3056 struct attn_route attn;
3057 struct attn_route group_mask;
34f80b04 3058 int port = BP_PORT(bp);
877e9aa4 3059 int index;
a2fbb9ea
ET
3060 u32 reg_addr;
3061 u32 val;
3fcaf2e5 3062 u32 aeu_mask;
a2fbb9ea
ET
3063
3064 /* need to take HW lock because MCP or other port might also
3065 try to handle this event */
4a37fb66 3066 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3067
3068 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3069 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3070 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3071 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3072 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3073 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3074
3075 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3076 if (deasserted & (1 << index)) {
3077 group_mask = bp->attn_group[index];
3078
34f80b04
EG
3079 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3080 index, group_mask.sig[0], group_mask.sig[1],
3081 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3082
877e9aa4
ET
3083 bnx2x_attn_int_deasserted3(bp,
3084 attn.sig[3] & group_mask.sig[3]);
3085 bnx2x_attn_int_deasserted1(bp,
3086 attn.sig[1] & group_mask.sig[1]);
3087 bnx2x_attn_int_deasserted2(bp,
3088 attn.sig[2] & group_mask.sig[2]);
3089 bnx2x_attn_int_deasserted0(bp,
3090 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3091
a2fbb9ea
ET
3092 if ((attn.sig[0] & group_mask.sig[0] &
3093 HW_PRTY_ASSERT_SET_0) ||
3094 (attn.sig[1] & group_mask.sig[1] &
3095 HW_PRTY_ASSERT_SET_1) ||
3096 (attn.sig[2] & group_mask.sig[2] &
3097 HW_PRTY_ASSERT_SET_2))
6378c025 3098 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3099 }
3100 }
3101
4a37fb66 3102 bnx2x_release_alr(bp);
a2fbb9ea 3103
5c862848 3104 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3105
3106 val = ~deasserted;
3fcaf2e5
EG
3107 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3108 val, reg_addr);
5c862848 3109 REG_WR(bp, reg_addr, val);
a2fbb9ea 3110
a2fbb9ea 3111 if (~bp->attn_state & deasserted)
3fcaf2e5 3112 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3113
3114 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3115 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3116
3fcaf2e5
EG
3117 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3118 aeu_mask = REG_RD(bp, reg_addr);
3119
3120 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3121 aeu_mask, deasserted);
3122 aeu_mask |= (deasserted & 0xff);
3123 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3124
3fcaf2e5
EG
3125 REG_WR(bp, reg_addr, aeu_mask);
3126 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3127
3128 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3129 bp->attn_state &= ~deasserted;
3130 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3131}
3132
3133static void bnx2x_attn_int(struct bnx2x *bp)
3134{
3135 /* read local copy of bits */
68d59484
EG
3136 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3137 attn_bits);
3138 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3139 attn_bits_ack);
a2fbb9ea
ET
3140 u32 attn_state = bp->attn_state;
3141
3142 /* look for changed bits */
3143 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3144 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3145
3146 DP(NETIF_MSG_HW,
3147 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3148 attn_bits, attn_ack, asserted, deasserted);
3149
3150 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3151 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3152
3153 /* handle bits that were raised */
3154 if (asserted)
3155 bnx2x_attn_int_asserted(bp, asserted);
3156
3157 if (deasserted)
3158 bnx2x_attn_int_deasserted(bp, deasserted);
3159}
3160
3161static void bnx2x_sp_task(struct work_struct *work)
3162{
1cf167f2 3163 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3164 u16 status;
3165
34f80b04 3166
a2fbb9ea
ET
3167 /* Return here if interrupt is disabled */
3168 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3169 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3170 return;
3171 }
3172
3173 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3174/* if (status == 0) */
3175/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3176
3196a88a 3177 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3178
877e9aa4
ET
3179 /* HW attentions */
3180 if (status & 0x1)
a2fbb9ea 3181 bnx2x_attn_int(bp);
a2fbb9ea 3182
68d59484 3183 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3184 IGU_INT_NOP, 1);
3185 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3186 IGU_INT_NOP, 1);
3187 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3188 IGU_INT_NOP, 1);
3189 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3190 IGU_INT_NOP, 1);
3191 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3192 IGU_INT_ENABLE, 1);
877e9aa4 3193
a2fbb9ea
ET
3194}
3195
3196static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3197{
3198 struct net_device *dev = dev_instance;
3199 struct bnx2x *bp = netdev_priv(dev);
3200
3201 /* Return here if interrupt is disabled */
3202 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3203 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3204 return IRQ_HANDLED;
3205 }
3206
8d9c5f34 3207 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3208
3209#ifdef BNX2X_STOP_ON_ERROR
3210 if (unlikely(bp->panic))
3211 return IRQ_HANDLED;
3212#endif
3213
1cf167f2 3214 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3215
3216 return IRQ_HANDLED;
3217}
3218
3219/* end of slow path */
3220
3221/* Statistics */
3222
3223/****************************************************************************
3224* Macros
3225****************************************************************************/
3226
a2fbb9ea
ET
3227/* sum[hi:lo] += add[hi:lo] */
3228#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3229 do { \
3230 s_lo += a_lo; \
f5ba6772 3231 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3232 } while (0)
3233
3234/* difference = minuend - subtrahend */
3235#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3236 do { \
bb2a0f7a
YG
3237 if (m_lo < s_lo) { \
3238 /* underflow */ \
a2fbb9ea 3239 d_hi = m_hi - s_hi; \
bb2a0f7a 3240 if (d_hi > 0) { \
6378c025 3241 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3242 d_hi--; \
3243 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3244 } else { \
6378c025 3245 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3246 d_hi = 0; \
3247 d_lo = 0; \
3248 } \
bb2a0f7a
YG
3249 } else { \
3250 /* m_lo >= s_lo */ \
a2fbb9ea 3251 if (m_hi < s_hi) { \
bb2a0f7a
YG
3252 d_hi = 0; \
3253 d_lo = 0; \
3254 } else { \
6378c025 3255 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3256 d_hi = m_hi - s_hi; \
3257 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3258 } \
3259 } \
3260 } while (0)
3261
bb2a0f7a 3262#define UPDATE_STAT64(s, t) \
a2fbb9ea 3263 do { \
bb2a0f7a
YG
3264 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3265 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3266 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3267 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3268 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3269 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3270 } while (0)
3271
bb2a0f7a 3272#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3273 do { \
bb2a0f7a
YG
3274 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3275 diff.lo, new->s##_lo, old->s##_lo); \
3276 ADD_64(estats->t##_hi, diff.hi, \
3277 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3278 } while (0)
3279
3280/* sum[hi:lo] += add */
3281#define ADD_EXTEND_64(s_hi, s_lo, a) \
3282 do { \
3283 s_lo += a; \
3284 s_hi += (s_lo < a) ? 1 : 0; \
3285 } while (0)
3286
bb2a0f7a 3287#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3288 do { \
bb2a0f7a
YG
3289 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3290 pstats->mac_stx[1].s##_lo, \
3291 new->s); \
a2fbb9ea
ET
3292 } while (0)
3293
bb2a0f7a 3294#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3295 do { \
4781bfad
EG
3296 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3297 old_tclient->s = tclient->s; \
de832a55
EG
3298 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3299 } while (0)
3300
3301#define UPDATE_EXTEND_USTAT(s, t) \
3302 do { \
3303 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3304 old_uclient->s = uclient->s; \
3305 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3306 } while (0)
3307
3308#define UPDATE_EXTEND_XSTAT(s, t) \
3309 do { \
4781bfad
EG
3310 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3311 old_xclient->s = xclient->s; \
de832a55
EG
3312 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3313 } while (0)
3314
3315/* minuend -= subtrahend */
3316#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3317 do { \
3318 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3319 } while (0)
3320
3321/* minuend[hi:lo] -= subtrahend */
3322#define SUB_EXTEND_64(m_hi, m_lo, s) \
3323 do { \
3324 SUB_64(m_hi, 0, m_lo, s); \
3325 } while (0)
3326
3327#define SUB_EXTEND_USTAT(s, t) \
3328 do { \
3329 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3330 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3331 } while (0)
3332
3333/*
3334 * General service functions
3335 */
3336
3337static inline long bnx2x_hilo(u32 *hiref)
3338{
3339 u32 lo = *(hiref + 1);
3340#if (BITS_PER_LONG == 64)
3341 u32 hi = *hiref;
3342
3343 return HILO_U64(hi, lo);
3344#else
3345 return lo;
3346#endif
3347}
3348
3349/*
3350 * Init service functions
3351 */
3352
bb2a0f7a
YG
3353static void bnx2x_storm_stats_post(struct bnx2x *bp)
3354{
3355 if (!bp->stats_pending) {
3356 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3357 int i, rc;
bb2a0f7a
YG
3358
3359 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3360 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3361 for_each_queue(bp, i)
3362 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3363
3364 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3365 ((u32 *)&ramrod_data)[1],
3366 ((u32 *)&ramrod_data)[0], 0);
3367 if (rc == 0) {
3368 /* stats ramrod has it's own slot on the spq */
3369 bp->spq_left++;
3370 bp->stats_pending = 1;
3371 }
3372 }
3373}
3374
bb2a0f7a
YG
3375static void bnx2x_hw_stats_post(struct bnx2x *bp)
3376{
3377 struct dmae_command *dmae = &bp->stats_dmae;
3378 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3379
3380 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3381 if (CHIP_REV_IS_SLOW(bp))
3382 return;
bb2a0f7a
YG
3383
3384 /* loader */
3385 if (bp->executer_idx) {
3386 int loader_idx = PMF_DMAE_C(bp);
3387
3388 memset(dmae, 0, sizeof(struct dmae_command));
3389
3390 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3391 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3392 DMAE_CMD_DST_RESET |
3393#ifdef __BIG_ENDIAN
3394 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3395#else
3396 DMAE_CMD_ENDIANITY_DW_SWAP |
3397#endif
3398 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3399 DMAE_CMD_PORT_0) |
3400 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3401 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3402 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3403 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3404 sizeof(struct dmae_command) *
3405 (loader_idx + 1)) >> 2;
3406 dmae->dst_addr_hi = 0;
3407 dmae->len = sizeof(struct dmae_command) >> 2;
3408 if (CHIP_IS_E1(bp))
3409 dmae->len--;
3410 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3411 dmae->comp_addr_hi = 0;
3412 dmae->comp_val = 1;
3413
3414 *stats_comp = 0;
3415 bnx2x_post_dmae(bp, dmae, loader_idx);
3416
3417 } else if (bp->func_stx) {
3418 *stats_comp = 0;
3419 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3420 }
3421}
3422
3423static int bnx2x_stats_comp(struct bnx2x *bp)
3424{
3425 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3426 int cnt = 10;
3427
3428 might_sleep();
3429 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3430 if (!cnt) {
3431 BNX2X_ERR("timeout waiting for stats finished\n");
3432 break;
3433 }
3434 cnt--;
12469401 3435 msleep(1);
bb2a0f7a
YG
3436 }
3437 return 1;
3438}
3439
3440/*
3441 * Statistics service functions
3442 */
3443
3444static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3445{
3446 struct dmae_command *dmae;
3447 u32 opcode;
3448 int loader_idx = PMF_DMAE_C(bp);
3449 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3450
3451 /* sanity */
3452 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3453 BNX2X_ERR("BUG!\n");
3454 return;
3455 }
3456
3457 bp->executer_idx = 0;
3458
3459 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3460 DMAE_CMD_C_ENABLE |
3461 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3462#ifdef __BIG_ENDIAN
3463 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3464#else
3465 DMAE_CMD_ENDIANITY_DW_SWAP |
3466#endif
3467 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3468 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3469
3470 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3471 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3472 dmae->src_addr_lo = bp->port.port_stx >> 2;
3473 dmae->src_addr_hi = 0;
3474 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3475 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3476 dmae->len = DMAE_LEN32_RD_MAX;
3477 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3478 dmae->comp_addr_hi = 0;
3479 dmae->comp_val = 1;
3480
3481 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3483 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3484 dmae->src_addr_hi = 0;
7a9b2557
VZ
3485 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3486 DMAE_LEN32_RD_MAX * 4);
3487 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3488 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3489 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3490 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3491 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3492 dmae->comp_val = DMAE_COMP_VAL;
3493
3494 *stats_comp = 0;
3495 bnx2x_hw_stats_post(bp);
3496 bnx2x_stats_comp(bp);
3497}
3498
3499static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3500{
3501 struct dmae_command *dmae;
34f80b04 3502 int port = BP_PORT(bp);
bb2a0f7a 3503 int vn = BP_E1HVN(bp);
a2fbb9ea 3504 u32 opcode;
bb2a0f7a 3505 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3506 u32 mac_addr;
bb2a0f7a
YG
3507 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3508
3509 /* sanity */
3510 if (!bp->link_vars.link_up || !bp->port.pmf) {
3511 BNX2X_ERR("BUG!\n");
3512 return;
3513 }
a2fbb9ea
ET
3514
3515 bp->executer_idx = 0;
bb2a0f7a
YG
3516
3517 /* MCP */
3518 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3519 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3520 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3521#ifdef __BIG_ENDIAN
bb2a0f7a 3522 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3523#else
bb2a0f7a 3524 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3525#endif
bb2a0f7a
YG
3526 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3527 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3528
bb2a0f7a 3529 if (bp->port.port_stx) {
a2fbb9ea
ET
3530
3531 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3532 dmae->opcode = opcode;
bb2a0f7a
YG
3533 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3534 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3535 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3536 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3537 dmae->len = sizeof(struct host_port_stats) >> 2;
3538 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3539 dmae->comp_addr_hi = 0;
3540 dmae->comp_val = 1;
a2fbb9ea
ET
3541 }
3542
bb2a0f7a
YG
3543 if (bp->func_stx) {
3544
3545 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3546 dmae->opcode = opcode;
3547 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3548 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3549 dmae->dst_addr_lo = bp->func_stx >> 2;
3550 dmae->dst_addr_hi = 0;
3551 dmae->len = sizeof(struct host_func_stats) >> 2;
3552 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3553 dmae->comp_addr_hi = 0;
3554 dmae->comp_val = 1;
a2fbb9ea
ET
3555 }
3556
bb2a0f7a 3557 /* MAC */
a2fbb9ea
ET
3558 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3559 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3560 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3561#ifdef __BIG_ENDIAN
3562 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3563#else
3564 DMAE_CMD_ENDIANITY_DW_SWAP |
3565#endif
bb2a0f7a
YG
3566 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3567 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3568
c18487ee 3569 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3570
3571 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3572 NIG_REG_INGRESS_BMAC0_MEM);
3573
3574 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3575 BIGMAC_REGISTER_TX_STAT_GTBYT */
3576 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3577 dmae->opcode = opcode;
3578 dmae->src_addr_lo = (mac_addr +
3579 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3580 dmae->src_addr_hi = 0;
3581 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3582 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3583 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3584 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3585 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3586 dmae->comp_addr_hi = 0;
3587 dmae->comp_val = 1;
3588
3589 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3590 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3591 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3592 dmae->opcode = opcode;
3593 dmae->src_addr_lo = (mac_addr +
3594 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3595 dmae->src_addr_hi = 0;
3596 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3597 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3598 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3599 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3600 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3601 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3602 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3603 dmae->comp_addr_hi = 0;
3604 dmae->comp_val = 1;
3605
c18487ee 3606 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3607
3608 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3609
3610 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3611 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3612 dmae->opcode = opcode;
3613 dmae->src_addr_lo = (mac_addr +
3614 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3615 dmae->src_addr_hi = 0;
3616 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3617 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3618 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3619 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3620 dmae->comp_addr_hi = 0;
3621 dmae->comp_val = 1;
3622
3623 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3624 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3625 dmae->opcode = opcode;
3626 dmae->src_addr_lo = (mac_addr +
3627 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3628 dmae->src_addr_hi = 0;
3629 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3630 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3631 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3632 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3633 dmae->len = 1;
3634 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3635 dmae->comp_addr_hi = 0;
3636 dmae->comp_val = 1;
3637
3638 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3639 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3640 dmae->opcode = opcode;
3641 dmae->src_addr_lo = (mac_addr +
3642 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3643 dmae->src_addr_hi = 0;
3644 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3645 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3646 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3647 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3648 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3649 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3650 dmae->comp_addr_hi = 0;
3651 dmae->comp_val = 1;
3652 }
3653
3654 /* NIG */
bb2a0f7a
YG
3655 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3656 dmae->opcode = opcode;
3657 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3658 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3659 dmae->src_addr_hi = 0;
3660 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3661 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3662 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3663 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3664 dmae->comp_addr_hi = 0;
3665 dmae->comp_val = 1;
3666
3667 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3668 dmae->opcode = opcode;
3669 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3670 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3671 dmae->src_addr_hi = 0;
3672 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3673 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3674 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3675 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3676 dmae->len = (2*sizeof(u32)) >> 2;
3677 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3678 dmae->comp_addr_hi = 0;
3679 dmae->comp_val = 1;
3680
a2fbb9ea
ET
3681 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3682 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3683 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3684 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3685#ifdef __BIG_ENDIAN
3686 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3687#else
3688 DMAE_CMD_ENDIANITY_DW_SWAP |
3689#endif
bb2a0f7a
YG
3690 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3691 (vn << DMAE_CMD_E1HVN_SHIFT));
3692 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3693 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3694 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3695 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3696 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3697 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3698 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3699 dmae->len = (2*sizeof(u32)) >> 2;
3700 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3701 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3702 dmae->comp_val = DMAE_COMP_VAL;
3703
3704 *stats_comp = 0;
a2fbb9ea
ET
3705}
3706
bb2a0f7a 3707static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3708{
bb2a0f7a
YG
3709 struct dmae_command *dmae = &bp->stats_dmae;
3710 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3711
bb2a0f7a
YG
3712 /* sanity */
3713 if (!bp->func_stx) {
3714 BNX2X_ERR("BUG!\n");
3715 return;
3716 }
a2fbb9ea 3717
bb2a0f7a
YG
3718 bp->executer_idx = 0;
3719 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3720
bb2a0f7a
YG
3721 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3722 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3723 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3724#ifdef __BIG_ENDIAN
3725 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3726#else
3727 DMAE_CMD_ENDIANITY_DW_SWAP |
3728#endif
3729 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3730 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3731 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3732 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3733 dmae->dst_addr_lo = bp->func_stx >> 2;
3734 dmae->dst_addr_hi = 0;
3735 dmae->len = sizeof(struct host_func_stats) >> 2;
3736 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3737 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3738 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3739
bb2a0f7a
YG
3740 *stats_comp = 0;
3741}
a2fbb9ea 3742
bb2a0f7a
YG
3743static void bnx2x_stats_start(struct bnx2x *bp)
3744{
3745 if (bp->port.pmf)
3746 bnx2x_port_stats_init(bp);
3747
3748 else if (bp->func_stx)
3749 bnx2x_func_stats_init(bp);
3750
3751 bnx2x_hw_stats_post(bp);
3752 bnx2x_storm_stats_post(bp);
3753}
3754
3755static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3756{
3757 bnx2x_stats_comp(bp);
3758 bnx2x_stats_pmf_update(bp);
3759 bnx2x_stats_start(bp);
3760}
3761
3762static void bnx2x_stats_restart(struct bnx2x *bp)
3763{
3764 bnx2x_stats_comp(bp);
3765 bnx2x_stats_start(bp);
3766}
3767
3768static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3769{
3770 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3771 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3772 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3773 struct {
3774 u32 lo;
3775 u32 hi;
3776 } diff;
bb2a0f7a
YG
3777
3778 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3779 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3780 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3781 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3782 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3783 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3784 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3785 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3786 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3787 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3788 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3789 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3790 UPDATE_STAT64(tx_stat_gt127,
3791 tx_stat_etherstatspkts65octetsto127octets);
3792 UPDATE_STAT64(tx_stat_gt255,
3793 tx_stat_etherstatspkts128octetsto255octets);
3794 UPDATE_STAT64(tx_stat_gt511,
3795 tx_stat_etherstatspkts256octetsto511octets);
3796 UPDATE_STAT64(tx_stat_gt1023,
3797 tx_stat_etherstatspkts512octetsto1023octets);
3798 UPDATE_STAT64(tx_stat_gt1518,
3799 tx_stat_etherstatspkts1024octetsto1522octets);
3800 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3801 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3802 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3803 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3804 UPDATE_STAT64(tx_stat_gterr,
3805 tx_stat_dot3statsinternalmactransmiterrors);
3806 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3807
3808 estats->pause_frames_received_hi =
3809 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3810 estats->pause_frames_received_lo =
3811 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3812
3813 estats->pause_frames_sent_hi =
3814 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3815 estats->pause_frames_sent_lo =
3816 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3817}
3818
3819static void bnx2x_emac_stats_update(struct bnx2x *bp)
3820{
3821 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3822 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3823 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3824
3825 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3826 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3827 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3828 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3829 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3830 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3831 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3832 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3833 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3834 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3835 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3836 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3837 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3838 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3839 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3840 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3841 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3842 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3843 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3844 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3845 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3846 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3847 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3848 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3849 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3850 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3851 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3852 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3853 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3854 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3855 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3856
3857 estats->pause_frames_received_hi =
3858 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3859 estats->pause_frames_received_lo =
3860 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3861 ADD_64(estats->pause_frames_received_hi,
3862 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3863 estats->pause_frames_received_lo,
3864 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3865
3866 estats->pause_frames_sent_hi =
3867 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3868 estats->pause_frames_sent_lo =
3869 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3870 ADD_64(estats->pause_frames_sent_hi,
3871 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3872 estats->pause_frames_sent_lo,
3873 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3874}
3875
3876static int bnx2x_hw_stats_update(struct bnx2x *bp)
3877{
3878 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3879 struct nig_stats *old = &(bp->port.old_nig_stats);
3880 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3881 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3882 struct {
3883 u32 lo;
3884 u32 hi;
3885 } diff;
de832a55 3886 u32 nig_timer_max;
bb2a0f7a
YG
3887
3888 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3889 bnx2x_bmac_stats_update(bp);
3890
3891 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3892 bnx2x_emac_stats_update(bp);
3893
3894 else { /* unreached */
c3eefaf6 3895 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3896 return -1;
3897 }
a2fbb9ea 3898
bb2a0f7a
YG
3899 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3900 new->brb_discard - old->brb_discard);
66e855f3
YG
3901 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3902 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3903
bb2a0f7a
YG
3904 UPDATE_STAT64_NIG(egress_mac_pkt0,
3905 etherstatspkts1024octetsto1522octets);
3906 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3907
bb2a0f7a 3908 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3909
bb2a0f7a
YG
3910 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3911 sizeof(struct mac_stx));
3912 estats->brb_drop_hi = pstats->brb_drop_hi;
3913 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3914
bb2a0f7a 3915 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3916
de832a55
EG
3917 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3918 if (nig_timer_max != estats->nig_timer_max) {
3919 estats->nig_timer_max = nig_timer_max;
3920 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3921 }
3922
bb2a0f7a 3923 return 0;
a2fbb9ea
ET
3924}
3925
bb2a0f7a 3926static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3927{
3928 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3929 struct tstorm_per_port_stats *tport =
de832a55 3930 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3931 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3932 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3933 int i;
3934
6fe49bb9
EG
3935 memcpy(&(fstats->total_bytes_received_hi),
3936 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3937 sizeof(struct host_func_stats) - 2*sizeof(u32));
3938 estats->error_bytes_received_hi = 0;
3939 estats->error_bytes_received_lo = 0;
3940 estats->etherstatsoverrsizepkts_hi = 0;
3941 estats->etherstatsoverrsizepkts_lo = 0;
3942 estats->no_buff_discard_hi = 0;
3943 estats->no_buff_discard_lo = 0;
a2fbb9ea 3944
ca00392c 3945 for_each_rx_queue(bp, i) {
de832a55
EG
3946 struct bnx2x_fastpath *fp = &bp->fp[i];
3947 int cl_id = fp->cl_id;
3948 struct tstorm_per_client_stats *tclient =
3949 &stats->tstorm_common.client_statistics[cl_id];
3950 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3951 struct ustorm_per_client_stats *uclient =
3952 &stats->ustorm_common.client_statistics[cl_id];
3953 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3954 struct xstorm_per_client_stats *xclient =
3955 &stats->xstorm_common.client_statistics[cl_id];
3956 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3957 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3958 u32 diff;
3959
3960 /* are storm stats valid? */
3961 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3962 bp->stats_counter) {
de832a55
EG
3963 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3964 " xstorm counter (%d) != stats_counter (%d)\n",
3965 i, xclient->stats_counter, bp->stats_counter);
3966 return -1;
3967 }
3968 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3969 bp->stats_counter) {
de832a55
EG
3970 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3971 " tstorm counter (%d) != stats_counter (%d)\n",
3972 i, tclient->stats_counter, bp->stats_counter);
3973 return -2;
3974 }
3975 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3976 bp->stats_counter) {
3977 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3978 " ustorm counter (%d) != stats_counter (%d)\n",
3979 i, uclient->stats_counter, bp->stats_counter);
3980 return -4;
3981 }
a2fbb9ea 3982
de832a55 3983 qstats->total_bytes_received_hi =
ca00392c 3984 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 3985 qstats->total_bytes_received_lo =
ca00392c
EG
3986 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3987
3988 ADD_64(qstats->total_bytes_received_hi,
3989 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3990 qstats->total_bytes_received_lo,
3991 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3992
3993 ADD_64(qstats->total_bytes_received_hi,
3994 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3995 qstats->total_bytes_received_lo,
3996 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3997
3998 qstats->valid_bytes_received_hi =
3999 qstats->total_bytes_received_hi;
de832a55 4000 qstats->valid_bytes_received_lo =
ca00392c 4001 qstats->total_bytes_received_lo;
bb2a0f7a 4002
de832a55 4003 qstats->error_bytes_received_hi =
bb2a0f7a 4004 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4005 qstats->error_bytes_received_lo =
bb2a0f7a 4006 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4007
de832a55
EG
4008 ADD_64(qstats->total_bytes_received_hi,
4009 qstats->error_bytes_received_hi,
4010 qstats->total_bytes_received_lo,
4011 qstats->error_bytes_received_lo);
4012
4013 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4014 total_unicast_packets_received);
4015 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4016 total_multicast_packets_received);
4017 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4018 total_broadcast_packets_received);
4019 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4020 etherstatsoverrsizepkts);
4021 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4022
4023 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4024 total_unicast_packets_received);
4025 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4026 total_multicast_packets_received);
4027 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4028 total_broadcast_packets_received);
4029 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4030 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4031 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4032
4033 qstats->total_bytes_transmitted_hi =
ca00392c 4034 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4035 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4036 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4037
4038 ADD_64(qstats->total_bytes_transmitted_hi,
4039 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4040 qstats->total_bytes_transmitted_lo,
4041 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4042
4043 ADD_64(qstats->total_bytes_transmitted_hi,
4044 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4045 qstats->total_bytes_transmitted_lo,
4046 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4047
de832a55
EG
4048 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4049 total_unicast_packets_transmitted);
4050 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4051 total_multicast_packets_transmitted);
4052 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4053 total_broadcast_packets_transmitted);
4054
4055 old_tclient->checksum_discard = tclient->checksum_discard;
4056 old_tclient->ttl0_discard = tclient->ttl0_discard;
4057
4058 ADD_64(fstats->total_bytes_received_hi,
4059 qstats->total_bytes_received_hi,
4060 fstats->total_bytes_received_lo,
4061 qstats->total_bytes_received_lo);
4062 ADD_64(fstats->total_bytes_transmitted_hi,
4063 qstats->total_bytes_transmitted_hi,
4064 fstats->total_bytes_transmitted_lo,
4065 qstats->total_bytes_transmitted_lo);
4066 ADD_64(fstats->total_unicast_packets_received_hi,
4067 qstats->total_unicast_packets_received_hi,
4068 fstats->total_unicast_packets_received_lo,
4069 qstats->total_unicast_packets_received_lo);
4070 ADD_64(fstats->total_multicast_packets_received_hi,
4071 qstats->total_multicast_packets_received_hi,
4072 fstats->total_multicast_packets_received_lo,
4073 qstats->total_multicast_packets_received_lo);
4074 ADD_64(fstats->total_broadcast_packets_received_hi,
4075 qstats->total_broadcast_packets_received_hi,
4076 fstats->total_broadcast_packets_received_lo,
4077 qstats->total_broadcast_packets_received_lo);
4078 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4079 qstats->total_unicast_packets_transmitted_hi,
4080 fstats->total_unicast_packets_transmitted_lo,
4081 qstats->total_unicast_packets_transmitted_lo);
4082 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4083 qstats->total_multicast_packets_transmitted_hi,
4084 fstats->total_multicast_packets_transmitted_lo,
4085 qstats->total_multicast_packets_transmitted_lo);
4086 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4087 qstats->total_broadcast_packets_transmitted_hi,
4088 fstats->total_broadcast_packets_transmitted_lo,
4089 qstats->total_broadcast_packets_transmitted_lo);
4090 ADD_64(fstats->valid_bytes_received_hi,
4091 qstats->valid_bytes_received_hi,
4092 fstats->valid_bytes_received_lo,
4093 qstats->valid_bytes_received_lo);
4094
4095 ADD_64(estats->error_bytes_received_hi,
4096 qstats->error_bytes_received_hi,
4097 estats->error_bytes_received_lo,
4098 qstats->error_bytes_received_lo);
4099 ADD_64(estats->etherstatsoverrsizepkts_hi,
4100 qstats->etherstatsoverrsizepkts_hi,
4101 estats->etherstatsoverrsizepkts_lo,
4102 qstats->etherstatsoverrsizepkts_lo);
4103 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4104 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4105 }
4106
4107 ADD_64(fstats->total_bytes_received_hi,
4108 estats->rx_stat_ifhcinbadoctets_hi,
4109 fstats->total_bytes_received_lo,
4110 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4111
4112 memcpy(estats, &(fstats->total_bytes_received_hi),
4113 sizeof(struct host_func_stats) - 2*sizeof(u32));
4114
de832a55
EG
4115 ADD_64(estats->etherstatsoverrsizepkts_hi,
4116 estats->rx_stat_dot3statsframestoolong_hi,
4117 estats->etherstatsoverrsizepkts_lo,
4118 estats->rx_stat_dot3statsframestoolong_lo);
4119 ADD_64(estats->error_bytes_received_hi,
4120 estats->rx_stat_ifhcinbadoctets_hi,
4121 estats->error_bytes_received_lo,
4122 estats->rx_stat_ifhcinbadoctets_lo);
4123
4124 if (bp->port.pmf) {
4125 estats->mac_filter_discard =
4126 le32_to_cpu(tport->mac_filter_discard);
4127 estats->xxoverflow_discard =
4128 le32_to_cpu(tport->xxoverflow_discard);
4129 estats->brb_truncate_discard =
bb2a0f7a 4130 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4131 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4132 }
bb2a0f7a
YG
4133
4134 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4135
de832a55
EG
4136 bp->stats_pending = 0;
4137
a2fbb9ea
ET
4138 return 0;
4139}
4140
bb2a0f7a 4141static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4142{
bb2a0f7a 4143 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4144 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4145 int i;
a2fbb9ea
ET
4146
4147 nstats->rx_packets =
4148 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4149 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4150 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4151
4152 nstats->tx_packets =
4153 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4154 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4155 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4156
de832a55 4157 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4158
0e39e645 4159 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4160
de832a55 4161 nstats->rx_dropped = estats->mac_discard;
ca00392c 4162 for_each_rx_queue(bp, i)
de832a55
EG
4163 nstats->rx_dropped +=
4164 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4165
a2fbb9ea
ET
4166 nstats->tx_dropped = 0;
4167
4168 nstats->multicast =
de832a55 4169 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4170
bb2a0f7a 4171 nstats->collisions =
de832a55 4172 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4173
4174 nstats->rx_length_errors =
de832a55
EG
4175 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4176 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4177 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4178 bnx2x_hilo(&estats->brb_truncate_hi);
4179 nstats->rx_crc_errors =
4180 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4181 nstats->rx_frame_errors =
4182 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4183 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4184 nstats->rx_missed_errors = estats->xxoverflow_discard;
4185
4186 nstats->rx_errors = nstats->rx_length_errors +
4187 nstats->rx_over_errors +
4188 nstats->rx_crc_errors +
4189 nstats->rx_frame_errors +
0e39e645
ET
4190 nstats->rx_fifo_errors +
4191 nstats->rx_missed_errors;
a2fbb9ea 4192
bb2a0f7a 4193 nstats->tx_aborted_errors =
de832a55
EG
4194 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4195 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4196 nstats->tx_carrier_errors =
4197 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4198 nstats->tx_fifo_errors = 0;
4199 nstats->tx_heartbeat_errors = 0;
4200 nstats->tx_window_errors = 0;
4201
4202 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4203 nstats->tx_carrier_errors +
4204 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4205}
4206
4207static void bnx2x_drv_stats_update(struct bnx2x *bp)
4208{
4209 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4210 int i;
4211
4212 estats->driver_xoff = 0;
4213 estats->rx_err_discard_pkt = 0;
4214 estats->rx_skb_alloc_failed = 0;
4215 estats->hw_csum_err = 0;
ca00392c 4216 for_each_rx_queue(bp, i) {
de832a55
EG
4217 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4218
4219 estats->driver_xoff += qstats->driver_xoff;
4220 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4221 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4222 estats->hw_csum_err += qstats->hw_csum_err;
4223 }
a2fbb9ea
ET
4224}
4225
bb2a0f7a 4226static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4227{
bb2a0f7a 4228 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4229
bb2a0f7a
YG
4230 if (*stats_comp != DMAE_COMP_VAL)
4231 return;
4232
4233 if (bp->port.pmf)
de832a55 4234 bnx2x_hw_stats_update(bp);
a2fbb9ea 4235
de832a55
EG
4236 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4237 BNX2X_ERR("storm stats were not updated for 3 times\n");
4238 bnx2x_panic();
4239 return;
a2fbb9ea
ET
4240 }
4241
de832a55
EG
4242 bnx2x_net_stats_update(bp);
4243 bnx2x_drv_stats_update(bp);
4244
a2fbb9ea 4245 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4246 struct bnx2x_fastpath *fp0_rx = bp->fp;
4247 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4248 struct tstorm_per_client_stats *old_tclient =
4249 &bp->fp->old_tclient;
4250 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4251 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4252 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4253 int i;
a2fbb9ea
ET
4254
4255 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4256 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4257 " tx pkt (%lx)\n",
ca00392c
EG
4258 bnx2x_tx_avail(fp0_tx),
4259 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4260 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4261 " rx pkt (%lx)\n",
ca00392c
EG
4262 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4263 fp0_rx->rx_comp_cons),
4264 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4265 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4266 "brb truncate %u\n",
4267 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4268 qstats->driver_xoff,
4269 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4270 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4271 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4272 "mac_discard %u mac_filter_discard %u "
4273 "xxovrflow_discard %u brb_truncate_discard %u "
4274 "ttl0_discard %u\n",
4781bfad 4275 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4276 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4277 bnx2x_hilo(&qstats->no_buff_discard_hi),
4278 estats->mac_discard, estats->mac_filter_discard,
4279 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4280 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4281
4282 for_each_queue(bp, i) {
4283 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4284 bnx2x_fp(bp, i, tx_pkt),
4285 bnx2x_fp(bp, i, rx_pkt),
4286 bnx2x_fp(bp, i, rx_calls));
4287 }
4288 }
4289
bb2a0f7a
YG
4290 bnx2x_hw_stats_post(bp);
4291 bnx2x_storm_stats_post(bp);
4292}
a2fbb9ea 4293
bb2a0f7a
YG
4294static void bnx2x_port_stats_stop(struct bnx2x *bp)
4295{
4296 struct dmae_command *dmae;
4297 u32 opcode;
4298 int loader_idx = PMF_DMAE_C(bp);
4299 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4300
bb2a0f7a 4301 bp->executer_idx = 0;
a2fbb9ea 4302
bb2a0f7a
YG
4303 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4304 DMAE_CMD_C_ENABLE |
4305 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4306#ifdef __BIG_ENDIAN
bb2a0f7a 4307 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4308#else
bb2a0f7a 4309 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4310#endif
bb2a0f7a
YG
4311 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4312 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4313
4314 if (bp->port.port_stx) {
4315
4316 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4317 if (bp->func_stx)
4318 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4319 else
4320 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4321 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4322 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4323 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4324 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4325 dmae->len = sizeof(struct host_port_stats) >> 2;
4326 if (bp->func_stx) {
4327 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4328 dmae->comp_addr_hi = 0;
4329 dmae->comp_val = 1;
4330 } else {
4331 dmae->comp_addr_lo =
4332 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4333 dmae->comp_addr_hi =
4334 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4335 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4336
bb2a0f7a
YG
4337 *stats_comp = 0;
4338 }
a2fbb9ea
ET
4339 }
4340
bb2a0f7a
YG
4341 if (bp->func_stx) {
4342
4343 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4344 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4345 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4346 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4347 dmae->dst_addr_lo = bp->func_stx >> 2;
4348 dmae->dst_addr_hi = 0;
4349 dmae->len = sizeof(struct host_func_stats) >> 2;
4350 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4351 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4352 dmae->comp_val = DMAE_COMP_VAL;
4353
4354 *stats_comp = 0;
a2fbb9ea 4355 }
bb2a0f7a
YG
4356}
4357
4358static void bnx2x_stats_stop(struct bnx2x *bp)
4359{
4360 int update = 0;
4361
4362 bnx2x_stats_comp(bp);
4363
4364 if (bp->port.pmf)
4365 update = (bnx2x_hw_stats_update(bp) == 0);
4366
4367 update |= (bnx2x_storm_stats_update(bp) == 0);
4368
4369 if (update) {
4370 bnx2x_net_stats_update(bp);
a2fbb9ea 4371
bb2a0f7a
YG
4372 if (bp->port.pmf)
4373 bnx2x_port_stats_stop(bp);
4374
4375 bnx2x_hw_stats_post(bp);
4376 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4377 }
4378}
4379
bb2a0f7a
YG
4380static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4381{
4382}
4383
4384static const struct {
4385 void (*action)(struct bnx2x *bp);
4386 enum bnx2x_stats_state next_state;
4387} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4388/* state event */
4389{
4390/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4391/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4392/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4393/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4394},
4395{
4396/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4397/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4398/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4399/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4400}
4401};
4402
4403static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4404{
4405 enum bnx2x_stats_state state = bp->stats_state;
4406
4407 bnx2x_stats_stm[state][event].action(bp);
4408 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4409
4410 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4411 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4412 state, event, bp->stats_state);
4413}
4414
6fe49bb9
EG
4415static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4416{
4417 struct dmae_command *dmae;
4418 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4419
4420 /* sanity */
4421 if (!bp->port.pmf || !bp->port.port_stx) {
4422 BNX2X_ERR("BUG!\n");
4423 return;
4424 }
4425
4426 bp->executer_idx = 0;
4427
4428 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4429 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4430 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4431 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4432#ifdef __BIG_ENDIAN
4433 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4434#else
4435 DMAE_CMD_ENDIANITY_DW_SWAP |
4436#endif
4437 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4438 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4439 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4440 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4441 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4442 dmae->dst_addr_hi = 0;
4443 dmae->len = sizeof(struct host_port_stats) >> 2;
4444 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4445 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4446 dmae->comp_val = DMAE_COMP_VAL;
4447
4448 *stats_comp = 0;
4449 bnx2x_hw_stats_post(bp);
4450 bnx2x_stats_comp(bp);
4451}
4452
4453static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4454{
4455 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4456 int port = BP_PORT(bp);
4457 int func;
4458 u32 func_stx;
4459
4460 /* sanity */
4461 if (!bp->port.pmf || !bp->func_stx) {
4462 BNX2X_ERR("BUG!\n");
4463 return;
4464 }
4465
4466 /* save our func_stx */
4467 func_stx = bp->func_stx;
4468
4469 for (vn = VN_0; vn < vn_max; vn++) {
4470 func = 2*vn + port;
4471
4472 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4473 bnx2x_func_stats_init(bp);
4474 bnx2x_hw_stats_post(bp);
4475 bnx2x_stats_comp(bp);
4476 }
4477
4478 /* restore our func_stx */
4479 bp->func_stx = func_stx;
4480}
4481
4482static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4483{
4484 struct dmae_command *dmae = &bp->stats_dmae;
4485 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4486
4487 /* sanity */
4488 if (!bp->func_stx) {
4489 BNX2X_ERR("BUG!\n");
4490 return;
4491 }
4492
4493 bp->executer_idx = 0;
4494 memset(dmae, 0, sizeof(struct dmae_command));
4495
4496 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4497 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4498 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4499#ifdef __BIG_ENDIAN
4500 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4501#else
4502 DMAE_CMD_ENDIANITY_DW_SWAP |
4503#endif
4504 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4505 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4506 dmae->src_addr_lo = bp->func_stx >> 2;
4507 dmae->src_addr_hi = 0;
4508 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4509 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4510 dmae->len = sizeof(struct host_func_stats) >> 2;
4511 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4512 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4513 dmae->comp_val = DMAE_COMP_VAL;
4514
4515 *stats_comp = 0;
4516 bnx2x_hw_stats_post(bp);
4517 bnx2x_stats_comp(bp);
4518}
4519
4520static void bnx2x_stats_init(struct bnx2x *bp)
4521{
4522 int port = BP_PORT(bp);
4523 int func = BP_FUNC(bp);
4524 int i;
4525
4526 bp->stats_pending = 0;
4527 bp->executer_idx = 0;
4528 bp->stats_counter = 0;
4529
4530 /* port and func stats for management */
4531 if (!BP_NOMCP(bp)) {
4532 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4533 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4534
4535 } else {
4536 bp->port.port_stx = 0;
4537 bp->func_stx = 0;
4538 }
4539 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4540 bp->port.port_stx, bp->func_stx);
4541
4542 /* port stats */
4543 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4544 bp->port.old_nig_stats.brb_discard =
4545 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4546 bp->port.old_nig_stats.brb_truncate =
4547 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4548 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4549 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4550 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4551 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4552
4553 /* function stats */
4554 for_each_queue(bp, i) {
4555 struct bnx2x_fastpath *fp = &bp->fp[i];
4556
4557 memset(&fp->old_tclient, 0,
4558 sizeof(struct tstorm_per_client_stats));
4559 memset(&fp->old_uclient, 0,
4560 sizeof(struct ustorm_per_client_stats));
4561 memset(&fp->old_xclient, 0,
4562 sizeof(struct xstorm_per_client_stats));
4563 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4564 }
4565
4566 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4567 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4568
4569 bp->stats_state = STATS_STATE_DISABLED;
4570
4571 if (bp->port.pmf) {
4572 if (bp->port.port_stx)
4573 bnx2x_port_stats_base_init(bp);
4574
4575 if (bp->func_stx)
4576 bnx2x_func_stats_base_init(bp);
4577
4578 } else if (bp->func_stx)
4579 bnx2x_func_stats_base_update(bp);
4580}
4581
a2fbb9ea
ET
4582static void bnx2x_timer(unsigned long data)
4583{
4584 struct bnx2x *bp = (struct bnx2x *) data;
4585
4586 if (!netif_running(bp->dev))
4587 return;
4588
4589 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4590 goto timer_restart;
a2fbb9ea
ET
4591
4592 if (poll) {
4593 struct bnx2x_fastpath *fp = &bp->fp[0];
4594 int rc;
4595
7961f791 4596 bnx2x_tx_int(fp);
a2fbb9ea
ET
4597 rc = bnx2x_rx_int(fp, 1000);
4598 }
4599
34f80b04
EG
4600 if (!BP_NOMCP(bp)) {
4601 int func = BP_FUNC(bp);
a2fbb9ea
ET
4602 u32 drv_pulse;
4603 u32 mcp_pulse;
4604
4605 ++bp->fw_drv_pulse_wr_seq;
4606 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4607 /* TBD - add SYSTEM_TIME */
4608 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4609 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4610
34f80b04 4611 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4612 MCP_PULSE_SEQ_MASK);
4613 /* The delta between driver pulse and mcp response
4614 * should be 1 (before mcp response) or 0 (after mcp response)
4615 */
4616 if ((drv_pulse != mcp_pulse) &&
4617 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4618 /* someone lost a heartbeat... */
4619 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4620 drv_pulse, mcp_pulse);
4621 }
4622 }
4623
bb2a0f7a
YG
4624 if ((bp->state == BNX2X_STATE_OPEN) ||
4625 (bp->state == BNX2X_STATE_DISABLED))
4626 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4627
f1410647 4628timer_restart:
a2fbb9ea
ET
4629 mod_timer(&bp->timer, jiffies + bp->current_interval);
4630}
4631
4632/* end of Statistics */
4633
4634/* nic init */
4635
4636/*
4637 * nic init service functions
4638 */
4639
34f80b04 4640static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4641{
34f80b04
EG
4642 int port = BP_PORT(bp);
4643
ca00392c
EG
4644 /* "CSTORM" */
4645 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4646 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4647 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4648 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4649 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4650 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4651}
4652
5c862848
EG
4653static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4654 dma_addr_t mapping, int sb_id)
34f80b04
EG
4655{
4656 int port = BP_PORT(bp);
bb2a0f7a 4657 int func = BP_FUNC(bp);
a2fbb9ea 4658 int index;
34f80b04 4659 u64 section;
a2fbb9ea
ET
4660
4661 /* USTORM */
4662 section = ((u64)mapping) + offsetof(struct host_status_block,
4663 u_status_block);
34f80b04 4664 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4665
ca00392c
EG
4666 REG_WR(bp, BAR_CSTRORM_INTMEM +
4667 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4668 REG_WR(bp, BAR_CSTRORM_INTMEM +
4669 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4670 U64_HI(section));
ca00392c
EG
4671 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4672 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4673
4674 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4675 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4676 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4677
4678 /* CSTORM */
4679 section = ((u64)mapping) + offsetof(struct host_status_block,
4680 c_status_block);
34f80b04 4681 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4682
4683 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4684 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4685 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4686 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4687 U64_HI(section));
7a9b2557 4688 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4689 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4690
4691 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4692 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4693 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4694
4695 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4696}
4697
4698static void bnx2x_zero_def_sb(struct bnx2x *bp)
4699{
4700 int func = BP_FUNC(bp);
a2fbb9ea 4701
ca00392c 4702 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4703 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4704 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4705 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4706 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4707 sizeof(struct cstorm_def_status_block_u)/4);
4708 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4709 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4710 sizeof(struct cstorm_def_status_block_c)/4);
4711 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4712 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4713 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4714}
4715
4716static void bnx2x_init_def_sb(struct bnx2x *bp,
4717 struct host_def_status_block *def_sb,
34f80b04 4718 dma_addr_t mapping, int sb_id)
a2fbb9ea 4719{
34f80b04
EG
4720 int port = BP_PORT(bp);
4721 int func = BP_FUNC(bp);
a2fbb9ea
ET
4722 int index, val, reg_offset;
4723 u64 section;
4724
4725 /* ATTN */
4726 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4727 atten_status_block);
34f80b04 4728 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4729
49d66772
ET
4730 bp->attn_state = 0;
4731
a2fbb9ea
ET
4732 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4733 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4734
34f80b04 4735 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4736 bp->attn_group[index].sig[0] = REG_RD(bp,
4737 reg_offset + 0x10*index);
4738 bp->attn_group[index].sig[1] = REG_RD(bp,
4739 reg_offset + 0x4 + 0x10*index);
4740 bp->attn_group[index].sig[2] = REG_RD(bp,
4741 reg_offset + 0x8 + 0x10*index);
4742 bp->attn_group[index].sig[3] = REG_RD(bp,
4743 reg_offset + 0xc + 0x10*index);
4744 }
4745
a2fbb9ea
ET
4746 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4747 HC_REG_ATTN_MSG0_ADDR_L);
4748
4749 REG_WR(bp, reg_offset, U64_LO(section));
4750 REG_WR(bp, reg_offset + 4, U64_HI(section));
4751
4752 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4753
4754 val = REG_RD(bp, reg_offset);
34f80b04 4755 val |= sb_id;
a2fbb9ea
ET
4756 REG_WR(bp, reg_offset, val);
4757
4758 /* USTORM */
4759 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4760 u_def_status_block);
34f80b04 4761 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4762
ca00392c
EG
4763 REG_WR(bp, BAR_CSTRORM_INTMEM +
4764 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4765 REG_WR(bp, BAR_CSTRORM_INTMEM +
4766 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4767 U64_HI(section));
ca00392c
EG
4768 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4769 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4770
4771 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4772 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4773 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4774
4775 /* CSTORM */
4776 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4777 c_def_status_block);
34f80b04 4778 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4779
4780 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4781 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4782 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4783 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4784 U64_HI(section));
5c862848 4785 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4786 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4787
4788 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4789 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4790 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4791
4792 /* TSTORM */
4793 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4794 t_def_status_block);
34f80b04 4795 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4796
4797 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4798 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4799 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4800 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4801 U64_HI(section));
5c862848 4802 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4803 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4804
4805 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4806 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4807 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4808
4809 /* XSTORM */
4810 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4811 x_def_status_block);
34f80b04 4812 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4813
4814 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4815 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4816 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4817 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4818 U64_HI(section));
5c862848 4819 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4820 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4821
4822 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4823 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4824 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4825
bb2a0f7a 4826 bp->stats_pending = 0;
66e855f3 4827 bp->set_mac_pending = 0;
bb2a0f7a 4828
34f80b04 4829 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4830}
4831
4832static void bnx2x_update_coalesce(struct bnx2x *bp)
4833{
34f80b04 4834 int port = BP_PORT(bp);
a2fbb9ea
ET
4835 int i;
4836
4837 for_each_queue(bp, i) {
34f80b04 4838 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4839
4840 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4841 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4842 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4843 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4844 bp->rx_ticks/12);
ca00392c
EG
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4847 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4848 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4849
4850 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4851 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4852 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4853 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4854 bp->tx_ticks/12);
a2fbb9ea 4855 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4856 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4857 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4858 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4859 }
4860}
4861
7a9b2557
VZ
4862static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4863 struct bnx2x_fastpath *fp, int last)
4864{
4865 int i;
4866
4867 for (i = 0; i < last; i++) {
4868 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4869 struct sk_buff *skb = rx_buf->skb;
4870
4871 if (skb == NULL) {
4872 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4873 continue;
4874 }
4875
4876 if (fp->tpa_state[i] == BNX2X_TPA_START)
4877 pci_unmap_single(bp->pdev,
4878 pci_unmap_addr(rx_buf, mapping),
356e2385 4879 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4880
4881 dev_kfree_skb(skb);
4882 rx_buf->skb = NULL;
4883 }
4884}
4885
a2fbb9ea
ET
4886static void bnx2x_init_rx_rings(struct bnx2x *bp)
4887{
7a9b2557 4888 int func = BP_FUNC(bp);
32626230
EG
4889 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4890 ETH_MAX_AGGREGATION_QUEUES_E1H;
4891 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4892 int i, j;
a2fbb9ea 4893
87942b46 4894 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4895 DP(NETIF_MSG_IFUP,
4896 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4897
7a9b2557 4898 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4899
555f6c78 4900 for_each_rx_queue(bp, j) {
32626230 4901 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4902
32626230 4903 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4904 fp->tpa_pool[i].skb =
4905 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4906 if (!fp->tpa_pool[i].skb) {
4907 BNX2X_ERR("Failed to allocate TPA "
4908 "skb pool for queue[%d] - "
4909 "disabling TPA on this "
4910 "queue!\n", j);
4911 bnx2x_free_tpa_pool(bp, fp, i);
4912 fp->disable_tpa = 1;
4913 break;
4914 }
4915 pci_unmap_addr_set((struct sw_rx_bd *)
4916 &bp->fp->tpa_pool[i],
4917 mapping, 0);
4918 fp->tpa_state[i] = BNX2X_TPA_STOP;
4919 }
4920 }
4921 }
4922
555f6c78 4923 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4924 struct bnx2x_fastpath *fp = &bp->fp[j];
4925
4926 fp->rx_bd_cons = 0;
4927 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4928 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4929
ca00392c
EG
4930 /* Mark queue as Rx */
4931 fp->is_rx_queue = 1;
4932
7a9b2557
VZ
4933 /* "next page" elements initialization */
4934 /* SGE ring */
4935 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4936 struct eth_rx_sge *sge;
4937
4938 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4939 sge->addr_hi =
4940 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4941 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4942 sge->addr_lo =
4943 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4944 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4945 }
4946
4947 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4948
7a9b2557 4949 /* RX BD ring */
a2fbb9ea
ET
4950 for (i = 1; i <= NUM_RX_RINGS; i++) {
4951 struct eth_rx_bd *rx_bd;
4952
4953 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4954 rx_bd->addr_hi =
4955 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4956 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4957 rx_bd->addr_lo =
4958 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4959 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4960 }
4961
34f80b04 4962 /* CQ ring */
a2fbb9ea
ET
4963 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4964 struct eth_rx_cqe_next_page *nextpg;
4965
4966 nextpg = (struct eth_rx_cqe_next_page *)
4967 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4968 nextpg->addr_hi =
4969 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4970 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4971 nextpg->addr_lo =
4972 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4973 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4974 }
4975
7a9b2557
VZ
4976 /* Allocate SGEs and initialize the ring elements */
4977 for (i = 0, ring_prod = 0;
4978 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4979
7a9b2557
VZ
4980 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4981 BNX2X_ERR("was only able to allocate "
4982 "%d rx sges\n", i);
4983 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4984 /* Cleanup already allocated elements */
4985 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4986 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4987 fp->disable_tpa = 1;
4988 ring_prod = 0;
4989 break;
4990 }
4991 ring_prod = NEXT_SGE_IDX(ring_prod);
4992 }
4993 fp->rx_sge_prod = ring_prod;
4994
4995 /* Allocate BDs and initialize BD ring */
66e855f3 4996 fp->rx_comp_cons = 0;
7a9b2557 4997 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4998 for (i = 0; i < bp->rx_ring_size; i++) {
4999 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5000 BNX2X_ERR("was only able to allocate "
de832a55
EG
5001 "%d rx skbs on queue[%d]\n", i, j);
5002 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5003 break;
5004 }
5005 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5006 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5007 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5008 }
5009
7a9b2557
VZ
5010 fp->rx_bd_prod = ring_prod;
5011 /* must not have more available CQEs than BDs */
5012 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5013 cqe_ring_prod);
a2fbb9ea
ET
5014 fp->rx_pkt = fp->rx_calls = 0;
5015
7a9b2557
VZ
5016 /* Warning!
5017 * this will generate an interrupt (to the TSTORM)
5018 * must only be done after chip is initialized
5019 */
5020 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5021 fp->rx_sge_prod);
a2fbb9ea
ET
5022 if (j != 0)
5023 continue;
5024
5025 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5026 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5027 U64_LO(fp->rx_comp_mapping));
5028 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5029 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5030 U64_HI(fp->rx_comp_mapping));
5031 }
5032}
5033
5034static void bnx2x_init_tx_ring(struct bnx2x *bp)
5035{
5036 int i, j;
5037
555f6c78 5038 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5039 struct bnx2x_fastpath *fp = &bp->fp[j];
5040
5041 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5042 struct eth_tx_next_bd *tx_next_bd =
5043 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5044
ca00392c 5045 tx_next_bd->addr_hi =
a2fbb9ea 5046 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5047 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5048 tx_next_bd->addr_lo =
a2fbb9ea 5049 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5050 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5051 }
5052
ca00392c
EG
5053 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5054 fp->tx_db.data.zero_fill1 = 0;
5055 fp->tx_db.data.prod = 0;
5056
a2fbb9ea
ET
5057 fp->tx_pkt_prod = 0;
5058 fp->tx_pkt_cons = 0;
5059 fp->tx_bd_prod = 0;
5060 fp->tx_bd_cons = 0;
5061 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5062 fp->tx_pkt = 0;
5063 }
6fe49bb9
EG
5064
5065 /* clean tx statistics */
5066 for_each_rx_queue(bp, i)
5067 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5068}
5069
5070static void bnx2x_init_sp_ring(struct bnx2x *bp)
5071{
34f80b04 5072 int func = BP_FUNC(bp);
a2fbb9ea
ET
5073
5074 spin_lock_init(&bp->spq_lock);
5075
5076 bp->spq_left = MAX_SPQ_PENDING;
5077 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5078 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5079 bp->spq_prod_bd = bp->spq;
5080 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5081
34f80b04 5082 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5083 U64_LO(bp->spq_mapping));
34f80b04
EG
5084 REG_WR(bp,
5085 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5086 U64_HI(bp->spq_mapping));
5087
34f80b04 5088 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5089 bp->spq_prod_idx);
5090}
5091
5092static void bnx2x_init_context(struct bnx2x *bp)
5093{
5094 int i;
5095
ca00392c 5096 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5097 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5098 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5099 u8 cl_id = fp->cl_id;
a2fbb9ea 5100
34f80b04
EG
5101 context->ustorm_st_context.common.sb_index_numbers =
5102 BNX2X_RX_SB_INDEX_NUM;
0626b899 5103 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5104 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5105 context->ustorm_st_context.common.flags =
de832a55
EG
5106 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5107 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5108 context->ustorm_st_context.common.statistics_counter_id =
5109 cl_id;
8d9c5f34 5110 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5111 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5112 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5113 bp->rx_buf_size;
34f80b04 5114 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5115 U64_HI(fp->rx_desc_mapping);
34f80b04 5116 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5117 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5118 if (!fp->disable_tpa) {
5119 context->ustorm_st_context.common.flags |=
ca00392c 5120 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5121 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5122 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5123 (u32)0xffff);
7a9b2557
VZ
5124 context->ustorm_st_context.common.sge_page_base_hi =
5125 U64_HI(fp->rx_sge_mapping);
5126 context->ustorm_st_context.common.sge_page_base_lo =
5127 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5128
5129 context->ustorm_st_context.common.max_sges_for_packet =
5130 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5131 context->ustorm_st_context.common.max_sges_for_packet =
5132 ((context->ustorm_st_context.common.
5133 max_sges_for_packet + PAGES_PER_SGE - 1) &
5134 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5135 }
5136
8d9c5f34
EG
5137 context->ustorm_ag_context.cdu_usage =
5138 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5139 CDU_REGION_NUMBER_UCM_AG,
5140 ETH_CONNECTION_TYPE);
5141
ca00392c
EG
5142 context->xstorm_ag_context.cdu_reserved =
5143 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5144 CDU_REGION_NUMBER_XCM_AG,
5145 ETH_CONNECTION_TYPE);
5146 }
5147
5148 for_each_tx_queue(bp, i) {
5149 struct bnx2x_fastpath *fp = &bp->fp[i];
5150 struct eth_context *context =
5151 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5152
5153 context->cstorm_st_context.sb_index_number =
5154 C_SB_ETH_TX_CQ_INDEX;
5155 context->cstorm_st_context.status_block_id = fp->sb_id;
5156
8d9c5f34
EG
5157 context->xstorm_st_context.tx_bd_page_base_hi =
5158 U64_HI(fp->tx_desc_mapping);
5159 context->xstorm_st_context.tx_bd_page_base_lo =
5160 U64_LO(fp->tx_desc_mapping);
ca00392c 5161 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5162 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5163 }
5164}
5165
5166static void bnx2x_init_ind_table(struct bnx2x *bp)
5167{
26c8fa4d 5168 int func = BP_FUNC(bp);
a2fbb9ea
ET
5169 int i;
5170
555f6c78 5171 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5172 return;
5173
555f6c78
EG
5174 DP(NETIF_MSG_IFUP,
5175 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5176 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5177 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5178 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5179 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5180}
5181
49d66772
ET
5182static void bnx2x_set_client_config(struct bnx2x *bp)
5183{
49d66772 5184 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5185 int port = BP_PORT(bp);
5186 int i;
49d66772 5187
e7799c5f 5188 tstorm_client.mtu = bp->dev->mtu;
49d66772 5189 tstorm_client.config_flags =
de832a55
EG
5190 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5191 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5192#ifdef BCM_VLAN
0c6671b0 5193 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5194 tstorm_client.config_flags |=
8d9c5f34 5195 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5196 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5197 }
5198#endif
49d66772
ET
5199
5200 for_each_queue(bp, i) {
de832a55
EG
5201 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5202
49d66772 5203 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5204 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5205 ((u32 *)&tstorm_client)[0]);
5206 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5207 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5208 ((u32 *)&tstorm_client)[1]);
5209 }
5210
34f80b04
EG
5211 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5212 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5213}
5214
a2fbb9ea
ET
5215static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5216{
a2fbb9ea 5217 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5218 int mode = bp->rx_mode;
5219 int mask = (1 << BP_L_ID(bp));
5220 int func = BP_FUNC(bp);
581ce43d 5221 int port = BP_PORT(bp);
a2fbb9ea 5222 int i;
581ce43d
EG
5223 /* All but management unicast packets should pass to the host as well */
5224 u32 llh_mask =
5225 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5226 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5227 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5228 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5229
3196a88a 5230 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5231
5232 switch (mode) {
5233 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5234 tstorm_mac_filter.ucast_drop_all = mask;
5235 tstorm_mac_filter.mcast_drop_all = mask;
5236 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5237 break;
356e2385 5238
a2fbb9ea 5239 case BNX2X_RX_MODE_NORMAL:
34f80b04 5240 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5241 break;
356e2385 5242
a2fbb9ea 5243 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5244 tstorm_mac_filter.mcast_accept_all = mask;
5245 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5246 break;
356e2385 5247
a2fbb9ea 5248 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5249 tstorm_mac_filter.ucast_accept_all = mask;
5250 tstorm_mac_filter.mcast_accept_all = mask;
5251 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5252 /* pass management unicast packets as well */
5253 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5254 break;
356e2385 5255
a2fbb9ea 5256 default:
34f80b04
EG
5257 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5258 break;
a2fbb9ea
ET
5259 }
5260
581ce43d
EG
5261 REG_WR(bp,
5262 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5263 llh_mask);
5264
a2fbb9ea
ET
5265 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5266 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5267 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5268 ((u32 *)&tstorm_mac_filter)[i]);
5269
34f80b04 5270/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5271 ((u32 *)&tstorm_mac_filter)[i]); */
5272 }
a2fbb9ea 5273
49d66772
ET
5274 if (mode != BNX2X_RX_MODE_NONE)
5275 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5276}
5277
471de716
EG
5278static void bnx2x_init_internal_common(struct bnx2x *bp)
5279{
5280 int i;
5281
5282 /* Zero this manually as its initialization is
5283 currently missing in the initTool */
5284 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5285 REG_WR(bp, BAR_USTRORM_INTMEM +
5286 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5287}
5288
5289static void bnx2x_init_internal_port(struct bnx2x *bp)
5290{
5291 int port = BP_PORT(bp);
5292
ca00392c
EG
5293 REG_WR(bp,
5294 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5295 REG_WR(bp,
5296 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5297 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5298 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5299}
5300
5301static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5302{
a2fbb9ea
ET
5303 struct tstorm_eth_function_common_config tstorm_config = {0};
5304 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5305 int port = BP_PORT(bp);
5306 int func = BP_FUNC(bp);
de832a55
EG
5307 int i, j;
5308 u32 offset;
471de716 5309 u16 max_agg_size;
a2fbb9ea
ET
5310
5311 if (is_multi(bp)) {
555f6c78 5312 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5313 tstorm_config.rss_result_mask = MULTI_MASK;
5314 }
ca00392c
EG
5315
5316 /* Enable TPA if needed */
5317 if (bp->flags & TPA_ENABLE_FLAG)
5318 tstorm_config.config_flags |=
5319 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5320
8d9c5f34
EG
5321 if (IS_E1HMF(bp))
5322 tstorm_config.config_flags |=
5323 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5324
34f80b04
EG
5325 tstorm_config.leading_client_id = BP_L_ID(bp);
5326
a2fbb9ea 5327 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5328 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5329 (*(u32 *)&tstorm_config));
5330
c14423fe 5331 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5332 bnx2x_set_storm_rx_mode(bp);
5333
de832a55
EG
5334 for_each_queue(bp, i) {
5335 u8 cl_id = bp->fp[i].cl_id;
5336
5337 /* reset xstorm per client statistics */
5338 offset = BAR_XSTRORM_INTMEM +
5339 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5340 for (j = 0;
5341 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5342 REG_WR(bp, offset + j*4, 0);
5343
5344 /* reset tstorm per client statistics */
5345 offset = BAR_TSTRORM_INTMEM +
5346 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5347 for (j = 0;
5348 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5349 REG_WR(bp, offset + j*4, 0);
5350
5351 /* reset ustorm per client statistics */
5352 offset = BAR_USTRORM_INTMEM +
5353 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5354 for (j = 0;
5355 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5356 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5357 }
5358
5359 /* Init statistics related context */
34f80b04 5360 stats_flags.collect_eth = 1;
a2fbb9ea 5361
66e855f3 5362 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5363 ((u32 *)&stats_flags)[0]);
66e855f3 5364 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5365 ((u32 *)&stats_flags)[1]);
5366
66e855f3 5367 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5368 ((u32 *)&stats_flags)[0]);
66e855f3 5369 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5370 ((u32 *)&stats_flags)[1]);
5371
de832a55
EG
5372 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5373 ((u32 *)&stats_flags)[0]);
5374 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5375 ((u32 *)&stats_flags)[1]);
5376
66e855f3 5377 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5378 ((u32 *)&stats_flags)[0]);
66e855f3 5379 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5380 ((u32 *)&stats_flags)[1]);
5381
66e855f3
YG
5382 REG_WR(bp, BAR_XSTRORM_INTMEM +
5383 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5384 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5385 REG_WR(bp, BAR_XSTRORM_INTMEM +
5386 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5387 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5388
5389 REG_WR(bp, BAR_TSTRORM_INTMEM +
5390 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5391 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5392 REG_WR(bp, BAR_TSTRORM_INTMEM +
5393 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5394 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5395
de832a55
EG
5396 REG_WR(bp, BAR_USTRORM_INTMEM +
5397 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5398 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5399 REG_WR(bp, BAR_USTRORM_INTMEM +
5400 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5401 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5402
34f80b04
EG
5403 if (CHIP_IS_E1H(bp)) {
5404 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5405 IS_E1HMF(bp));
5406 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5407 IS_E1HMF(bp));
5408 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5409 IS_E1HMF(bp));
5410 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5411 IS_E1HMF(bp));
5412
7a9b2557
VZ
5413 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5414 bp->e1hov);
34f80b04
EG
5415 }
5416
4f40f2cb
EG
5417 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5418 max_agg_size =
5419 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5420 SGE_PAGE_SIZE * PAGES_PER_SGE),
5421 (u32)0xffff);
555f6c78 5422 for_each_rx_queue(bp, i) {
7a9b2557 5423 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5424
5425 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5426 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5427 U64_LO(fp->rx_comp_mapping));
5428 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5429 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5430 U64_HI(fp->rx_comp_mapping));
5431
ca00392c
EG
5432 /* Next page */
5433 REG_WR(bp, BAR_USTRORM_INTMEM +
5434 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5435 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5436 REG_WR(bp, BAR_USTRORM_INTMEM +
5437 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5438 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5439
7a9b2557 5440 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5441 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5442 max_agg_size);
5443 }
8a1c38d1 5444
1c06328c
EG
5445 /* dropless flow control */
5446 if (CHIP_IS_E1H(bp)) {
5447 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5448
5449 rx_pause.bd_thr_low = 250;
5450 rx_pause.cqe_thr_low = 250;
5451 rx_pause.cos = 1;
5452 rx_pause.sge_thr_low = 0;
5453 rx_pause.bd_thr_high = 350;
5454 rx_pause.cqe_thr_high = 350;
5455 rx_pause.sge_thr_high = 0;
5456
5457 for_each_rx_queue(bp, i) {
5458 struct bnx2x_fastpath *fp = &bp->fp[i];
5459
5460 if (!fp->disable_tpa) {
5461 rx_pause.sge_thr_low = 150;
5462 rx_pause.sge_thr_high = 250;
5463 }
5464
5465
5466 offset = BAR_USTRORM_INTMEM +
5467 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5468 fp->cl_id);
5469 for (j = 0;
5470 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5471 j++)
5472 REG_WR(bp, offset + j*4,
5473 ((u32 *)&rx_pause)[j]);
5474 }
5475 }
5476
8a1c38d1
EG
5477 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5478
5479 /* Init rate shaping and fairness contexts */
5480 if (IS_E1HMF(bp)) {
5481 int vn;
5482
5483 /* During init there is no active link
5484 Until link is up, set link rate to 10Gbps */
5485 bp->link_vars.line_speed = SPEED_10000;
5486 bnx2x_init_port_minmax(bp);
5487
5488 bnx2x_calc_vn_weight_sum(bp);
5489
5490 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5491 bnx2x_init_vn_minmax(bp, 2*vn + port);
5492
5493 /* Enable rate shaping and fairness */
5494 bp->cmng.flags.cmng_enables =
5495 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5496 if (bp->vn_weight_sum)
5497 bp->cmng.flags.cmng_enables |=
5498 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5499 else
5500 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5501 " fairness will be disabled\n");
5502 } else {
5503 /* rate shaping and fairness are disabled */
5504 DP(NETIF_MSG_IFUP,
5505 "single function mode minmax will be disabled\n");
5506 }
5507
5508
5509 /* Store it to internal memory */
5510 if (bp->port.pmf)
5511 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5512 REG_WR(bp, BAR_XSTRORM_INTMEM +
5513 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5514 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5515}
5516
471de716
EG
5517static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5518{
5519 switch (load_code) {
5520 case FW_MSG_CODE_DRV_LOAD_COMMON:
5521 bnx2x_init_internal_common(bp);
5522 /* no break */
5523
5524 case FW_MSG_CODE_DRV_LOAD_PORT:
5525 bnx2x_init_internal_port(bp);
5526 /* no break */
5527
5528 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5529 bnx2x_init_internal_func(bp);
5530 break;
5531
5532 default:
5533 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5534 break;
5535 }
5536}
5537
5538static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5539{
5540 int i;
5541
5542 for_each_queue(bp, i) {
5543 struct bnx2x_fastpath *fp = &bp->fp[i];
5544
34f80b04 5545 fp->bp = bp;
a2fbb9ea 5546 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5547 fp->index = i;
34f80b04
EG
5548 fp->cl_id = BP_L_ID(bp) + i;
5549 fp->sb_id = fp->cl_id;
ca00392c
EG
5550 /* Suitable Rx and Tx SBs are served by the same client */
5551 if (i >= bp->num_rx_queues)
5552 fp->cl_id -= bp->num_rx_queues;
34f80b04 5553 DP(NETIF_MSG_IFUP,
f5372251
EG
5554 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5555 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5556 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5557 fp->sb_id);
5c862848 5558 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5559 }
5560
16119785
EG
5561 /* ensure status block indices were read */
5562 rmb();
5563
5564
5c862848
EG
5565 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5566 DEF_SB_ID);
5567 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5568 bnx2x_update_coalesce(bp);
5569 bnx2x_init_rx_rings(bp);
5570 bnx2x_init_tx_ring(bp);
5571 bnx2x_init_sp_ring(bp);
5572 bnx2x_init_context(bp);
471de716 5573 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5574 bnx2x_init_ind_table(bp);
0ef00459
EG
5575 bnx2x_stats_init(bp);
5576
5577 /* At this point, we are ready for interrupts */
5578 atomic_set(&bp->intr_sem, 0);
5579
5580 /* flush all before enabling interrupts */
5581 mb();
5582 mmiowb();
5583
615f8fd9 5584 bnx2x_int_enable(bp);
eb8da205
EG
5585
5586 /* Check for SPIO5 */
5587 bnx2x_attn_int_deasserted0(bp,
5588 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5589 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5590}
5591
5592/* end of nic init */
5593
5594/*
5595 * gzip service functions
5596 */
5597
5598static int bnx2x_gunzip_init(struct bnx2x *bp)
5599{
5600 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5601 &bp->gunzip_mapping);
5602 if (bp->gunzip_buf == NULL)
5603 goto gunzip_nomem1;
5604
5605 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5606 if (bp->strm == NULL)
5607 goto gunzip_nomem2;
5608
5609 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5610 GFP_KERNEL);
5611 if (bp->strm->workspace == NULL)
5612 goto gunzip_nomem3;
5613
5614 return 0;
5615
5616gunzip_nomem3:
5617 kfree(bp->strm);
5618 bp->strm = NULL;
5619
5620gunzip_nomem2:
5621 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5622 bp->gunzip_mapping);
5623 bp->gunzip_buf = NULL;
5624
5625gunzip_nomem1:
5626 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5627 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5628 return -ENOMEM;
5629}
5630
5631static void bnx2x_gunzip_end(struct bnx2x *bp)
5632{
5633 kfree(bp->strm->workspace);
5634
5635 kfree(bp->strm);
5636 bp->strm = NULL;
5637
5638 if (bp->gunzip_buf) {
5639 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5640 bp->gunzip_mapping);
5641 bp->gunzip_buf = NULL;
5642 }
5643}
5644
94a78b79 5645static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5646{
5647 int n, rc;
5648
5649 /* check gzip header */
94a78b79
VZ
5650 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5651 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5652 return -EINVAL;
94a78b79 5653 }
a2fbb9ea
ET
5654
5655 n = 10;
5656
34f80b04 5657#define FNAME 0x8
a2fbb9ea
ET
5658
5659 if (zbuf[3] & FNAME)
5660 while ((zbuf[n++] != 0) && (n < len));
5661
94a78b79 5662 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5663 bp->strm->avail_in = len - n;
5664 bp->strm->next_out = bp->gunzip_buf;
5665 bp->strm->avail_out = FW_BUF_SIZE;
5666
5667 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5668 if (rc != Z_OK)
5669 return rc;
5670
5671 rc = zlib_inflate(bp->strm, Z_FINISH);
5672 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5673 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5674 bp->dev->name, bp->strm->msg);
5675
5676 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5677 if (bp->gunzip_outlen & 0x3)
5678 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5679 " gunzip_outlen (%d) not aligned\n",
5680 bp->dev->name, bp->gunzip_outlen);
5681 bp->gunzip_outlen >>= 2;
5682
5683 zlib_inflateEnd(bp->strm);
5684
5685 if (rc == Z_STREAM_END)
5686 return 0;
5687
5688 return rc;
5689}
5690
5691/* nic load/unload */
5692
5693/*
34f80b04 5694 * General service functions
a2fbb9ea
ET
5695 */
5696
5697/* send a NIG loopback debug packet */
5698static void bnx2x_lb_pckt(struct bnx2x *bp)
5699{
a2fbb9ea 5700 u32 wb_write[3];
a2fbb9ea
ET
5701
5702 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5703 wb_write[0] = 0x55555555;
5704 wb_write[1] = 0x55555555;
34f80b04 5705 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5706 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5707
5708 /* NON-IP protocol */
a2fbb9ea
ET
5709 wb_write[0] = 0x09000000;
5710 wb_write[1] = 0x55555555;
34f80b04 5711 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5712 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5713}
5714
5715/* some of the internal memories
5716 * are not directly readable from the driver
5717 * to test them we send debug packets
5718 */
5719static int bnx2x_int_mem_test(struct bnx2x *bp)
5720{
5721 int factor;
5722 int count, i;
5723 u32 val = 0;
5724
ad8d3948 5725 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5726 factor = 120;
ad8d3948
EG
5727 else if (CHIP_REV_IS_EMUL(bp))
5728 factor = 200;
5729 else
a2fbb9ea 5730 factor = 1;
a2fbb9ea
ET
5731
5732 DP(NETIF_MSG_HW, "start part1\n");
5733
5734 /* Disable inputs of parser neighbor blocks */
5735 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5736 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5737 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5738 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5739
5740 /* Write 0 to parser credits for CFC search request */
5741 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5742
5743 /* send Ethernet packet */
5744 bnx2x_lb_pckt(bp);
5745
5746 /* TODO do i reset NIG statistic? */
5747 /* Wait until NIG register shows 1 packet of size 0x10 */
5748 count = 1000 * factor;
5749 while (count) {
34f80b04 5750
a2fbb9ea
ET
5751 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5752 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5753 if (val == 0x10)
5754 break;
5755
5756 msleep(10);
5757 count--;
5758 }
5759 if (val != 0x10) {
5760 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5761 return -1;
5762 }
5763
5764 /* Wait until PRS register shows 1 packet */
5765 count = 1000 * factor;
5766 while (count) {
5767 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5768 if (val == 1)
5769 break;
5770
5771 msleep(10);
5772 count--;
5773 }
5774 if (val != 0x1) {
5775 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5776 return -2;
5777 }
5778
5779 /* Reset and init BRB, PRS */
34f80b04 5780 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5781 msleep(50);
34f80b04 5782 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5783 msleep(50);
94a78b79
VZ
5784 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5785 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5786
5787 DP(NETIF_MSG_HW, "part2\n");
5788
5789 /* Disable inputs of parser neighbor blocks */
5790 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5791 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5792 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5793 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5794
5795 /* Write 0 to parser credits for CFC search request */
5796 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5797
5798 /* send 10 Ethernet packets */
5799 for (i = 0; i < 10; i++)
5800 bnx2x_lb_pckt(bp);
5801
5802 /* Wait until NIG register shows 10 + 1
5803 packets of size 11*0x10 = 0xb0 */
5804 count = 1000 * factor;
5805 while (count) {
34f80b04 5806
a2fbb9ea
ET
5807 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5808 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5809 if (val == 0xb0)
5810 break;
5811
5812 msleep(10);
5813 count--;
5814 }
5815 if (val != 0xb0) {
5816 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5817 return -3;
5818 }
5819
5820 /* Wait until PRS register shows 2 packets */
5821 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5822 if (val != 2)
5823 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5824
5825 /* Write 1 to parser credits for CFC search request */
5826 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5827
5828 /* Wait until PRS register shows 3 packets */
5829 msleep(10 * factor);
5830 /* Wait until NIG register shows 1 packet of size 0x10 */
5831 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5832 if (val != 3)
5833 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5834
5835 /* clear NIG EOP FIFO */
5836 for (i = 0; i < 11; i++)
5837 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5838 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5839 if (val != 1) {
5840 BNX2X_ERR("clear of NIG failed\n");
5841 return -4;
5842 }
5843
5844 /* Reset and init BRB, PRS, NIG */
5845 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5846 msleep(50);
5847 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5848 msleep(50);
94a78b79
VZ
5849 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5850 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5851#ifndef BCM_ISCSI
5852 /* set NIC mode */
5853 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5854#endif
5855
5856 /* Enable inputs of parser neighbor blocks */
5857 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5858 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5859 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5860 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5861
5862 DP(NETIF_MSG_HW, "done\n");
5863
5864 return 0; /* OK */
5865}
5866
5867static void enable_blocks_attention(struct bnx2x *bp)
5868{
5869 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5870 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5871 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5872 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5873 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5874 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5875 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5876 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5877 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5878/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5879/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5880 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5881 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5882 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5883/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5884/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5885 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5886 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5887 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5888 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5889/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5890/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5891 if (CHIP_REV_IS_FPGA(bp))
5892 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5893 else
5894 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5895 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5896 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5897 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5898/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5899/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5900 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5901 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5902/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5903 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5904}
5905
34f80b04 5906
81f75bbf
EG
5907static void bnx2x_reset_common(struct bnx2x *bp)
5908{
5909 /* reset_common */
5910 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5911 0xd3ffff7f);
5912 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5913}
5914
fd4ef40d
EG
5915
5916static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5917{
5918 u32 val;
5919 u8 port;
5920 u8 is_required = 0;
5921
5922 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5923 SHARED_HW_CFG_FAN_FAILURE_MASK;
5924
5925 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5926 is_required = 1;
5927
5928 /*
5929 * The fan failure mechanism is usually related to the PHY type since
5930 * the power consumption of the board is affected by the PHY. Currently,
5931 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5932 */
5933 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5934 for (port = PORT_0; port < PORT_MAX; port++) {
5935 u32 phy_type =
5936 SHMEM_RD(bp, dev_info.port_hw_config[port].
5937 external_phy_config) &
5938 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5939 is_required |=
5940 ((phy_type ==
5941 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5942 (phy_type ==
5943 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5944 (phy_type ==
5945 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5946 }
5947
5948 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5949
5950 if (is_required == 0)
5951 return;
5952
5953 /* Fan failure is indicated by SPIO 5 */
5954 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5955 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5956
5957 /* set to active low mode */
5958 val = REG_RD(bp, MISC_REG_SPIO_INT);
5959 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5960 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5961 REG_WR(bp, MISC_REG_SPIO_INT, val);
5962
5963 /* enable interrupt to signal the IGU */
5964 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5965 val |= (1 << MISC_REGISTERS_SPIO_5);
5966 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5967}
5968
34f80b04 5969static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5970{
a2fbb9ea 5971 u32 val, i;
a2fbb9ea 5972
34f80b04 5973 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5974
81f75bbf 5975 bnx2x_reset_common(bp);
34f80b04
EG
5976 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5977 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5978
94a78b79 5979 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5980 if (CHIP_IS_E1H(bp))
5981 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5982
34f80b04
EG
5983 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5984 msleep(30);
5985 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5986
94a78b79 5987 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5988 if (CHIP_IS_E1(bp)) {
5989 /* enable HW interrupt from PXP on USDM overflow
5990 bit 16 on INT_MASK_0 */
5991 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5992 }
a2fbb9ea 5993
94a78b79 5994 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5995 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5996
5997#ifdef __BIG_ENDIAN
34f80b04
EG
5998 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5999 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6000 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6001 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6002 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6003 /* make sure this value is 0 */
6004 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6005
6006/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6007 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6008 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6009 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6010 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6011#endif
6012
34f80b04 6013 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 6014#ifdef BCM_ISCSI
34f80b04
EG
6015 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6016 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6017 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6018#endif
6019
34f80b04
EG
6020 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6021 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6022
34f80b04
EG
6023 /* let the HW do it's magic ... */
6024 msleep(100);
6025 /* finish PXP init */
6026 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6027 if (val != 1) {
6028 BNX2X_ERR("PXP2 CFG failed\n");
6029 return -EBUSY;
6030 }
6031 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6032 if (val != 1) {
6033 BNX2X_ERR("PXP2 RD_INIT failed\n");
6034 return -EBUSY;
6035 }
a2fbb9ea 6036
34f80b04
EG
6037 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6038 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6039
94a78b79 6040 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6041
34f80b04
EG
6042 /* clean the DMAE memory */
6043 bp->dmae_ready = 1;
6044 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6045
94a78b79
VZ
6046 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6047 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6048 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6049 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6050
34f80b04
EG
6051 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6052 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6053 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6054 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6055
94a78b79 6056 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
6057 /* soft reset pulse */
6058 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6059 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
6060
6061#ifdef BCM_ISCSI
94a78b79 6062 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6063#endif
a2fbb9ea 6064
94a78b79 6065 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6066 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6067 if (!CHIP_REV_IS_SLOW(bp)) {
6068 /* enable hw interrupt from doorbell Q */
6069 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6070 }
a2fbb9ea 6071
94a78b79
VZ
6072 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6073 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6074 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
6075 /* set NIC mode */
6076 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
6077 if (CHIP_IS_E1H(bp))
6078 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6079
94a78b79
VZ
6080 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6081 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6082 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6083 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6084
ca00392c
EG
6085 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6086 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6087 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6088 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6089
94a78b79
VZ
6090 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6091 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6092 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6093 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6094
34f80b04
EG
6095 /* sync semi rtc */
6096 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6097 0x80000000);
6098 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6099 0x80000000);
a2fbb9ea 6100
94a78b79
VZ
6101 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6102 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6103 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6104
34f80b04
EG
6105 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6106 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6107 REG_WR(bp, i, 0xc0cac01a);
6108 /* TODO: replace with something meaningful */
6109 }
94a78b79 6110 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 6111 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6112
34f80b04
EG
6113 if (sizeof(union cdu_context) != 1024)
6114 /* we currently assume that a context is 1024 bytes */
6115 printk(KERN_ALERT PFX "please adjust the size of"
6116 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6117
94a78b79 6118 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6119 val = (4 << 24) + (0 << 12) + 1024;
6120 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6121
94a78b79 6122 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6123 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6124 /* enable context validation interrupt from CFC */
6125 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6126
6127 /* set the thresholds to prevent CFC/CDU race */
6128 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6129
94a78b79
VZ
6130 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6131 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6132
94a78b79 6133 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6134 /* Reset PCIE errors for debug */
6135 REG_WR(bp, 0x2814, 0xffffffff);
6136 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6137
94a78b79 6138 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6139 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6140 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6141 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6142
94a78b79 6143 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6144 if (CHIP_IS_E1H(bp)) {
6145 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6146 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6147 }
6148
6149 if (CHIP_REV_IS_SLOW(bp))
6150 msleep(200);
6151
6152 /* finish CFC init */
6153 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6154 if (val != 1) {
6155 BNX2X_ERR("CFC LL_INIT failed\n");
6156 return -EBUSY;
6157 }
6158 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6159 if (val != 1) {
6160 BNX2X_ERR("CFC AC_INIT failed\n");
6161 return -EBUSY;
6162 }
6163 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6164 if (val != 1) {
6165 BNX2X_ERR("CFC CAM_INIT failed\n");
6166 return -EBUSY;
6167 }
6168 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6169
34f80b04
EG
6170 /* read NIG statistic
6171 to see if this is our first up since powerup */
6172 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6173 val = *bnx2x_sp(bp, wb_data[0]);
6174
6175 /* do internal memory self test */
6176 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6177 BNX2X_ERR("internal mem self test failed\n");
6178 return -EBUSY;
6179 }
6180
35b19ba5 6181 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6182 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6183 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6184 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6185 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6186 bp->port.need_hw_lock = 1;
6187 break;
6188
34f80b04
EG
6189 default:
6190 break;
6191 }
f1410647 6192
fd4ef40d
EG
6193 bnx2x_setup_fan_failure_detection(bp);
6194
34f80b04
EG
6195 /* clear PXP2 attentions */
6196 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6197
34f80b04 6198 enable_blocks_attention(bp);
a2fbb9ea 6199
6bbca910
YR
6200 if (!BP_NOMCP(bp)) {
6201 bnx2x_acquire_phy_lock(bp);
6202 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6203 bnx2x_release_phy_lock(bp);
6204 } else
6205 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6206
34f80b04
EG
6207 return 0;
6208}
a2fbb9ea 6209
34f80b04
EG
6210static int bnx2x_init_port(struct bnx2x *bp)
6211{
6212 int port = BP_PORT(bp);
94a78b79 6213 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6214 u32 low, high;
34f80b04 6215 u32 val;
a2fbb9ea 6216
34f80b04
EG
6217 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6218
6219 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6220
94a78b79 6221 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6222 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6223
6224 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6225 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6226 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6227#ifdef BCM_ISCSI
6228 /* Port0 1
6229 * Port1 385 */
6230 i++;
6231 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6232 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6233 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6234 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6235
6236 /* Port0 2
6237 * Port1 386 */
6238 i++;
6239 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6240 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6241 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6242 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6243
6244 /* Port0 3
6245 * Port1 387 */
6246 i++;
6247 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6248 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6249 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6250 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6251#endif
94a78b79 6252 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6253
a2fbb9ea
ET
6254#ifdef BCM_ISCSI
6255 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6256 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6257
94a78b79 6258 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6259#endif
94a78b79 6260 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6261
94a78b79 6262 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6263 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6264 /* no pause for emulation and FPGA */
6265 low = 0;
6266 high = 513;
6267 } else {
6268 if (IS_E1HMF(bp))
6269 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6270 else if (bp->dev->mtu > 4096) {
6271 if (bp->flags & ONE_PORT_FLAG)
6272 low = 160;
6273 else {
6274 val = bp->dev->mtu;
6275 /* (24*1024 + val*4)/256 */
6276 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6277 }
6278 } else
6279 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6280 high = low + 56; /* 14*1024/256 */
6281 }
6282 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6283 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6284
6285
94a78b79 6286 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6287
94a78b79 6288 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6289 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6290 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6291 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6292
94a78b79
VZ
6293 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6294 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6295 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6296 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6297
94a78b79 6298 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6299 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6300
94a78b79 6301 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6302
6303 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6304 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6305
6306 /* update threshold */
34f80b04 6307 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6308 /* update init credit */
34f80b04 6309 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6310
6311 /* probe changes */
34f80b04 6312 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6313 msleep(5);
34f80b04 6314 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6315
6316#ifdef BCM_ISCSI
6317 /* tell the searcher where the T2 table is */
6318 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6319
6320 wb_write[0] = U64_LO(bp->t2_mapping);
6321 wb_write[1] = U64_HI(bp->t2_mapping);
6322 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6323 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6324 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6325 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6326
6327 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6328#endif
94a78b79 6329 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6330 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6331
6332 if (CHIP_IS_E1(bp)) {
6333 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6334 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6335 }
94a78b79 6336 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6337
94a78b79 6338 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6339 /* init aeu_mask_attn_func_0/1:
6340 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6341 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6342 * bits 4-7 are used for "per vn group attention" */
6343 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6344 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6345
94a78b79 6346 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6347 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6348 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6349 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6350 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6351
94a78b79 6352 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6353
6354 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6355
6356 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6357 /* 0x2 disable e1hov, 0x1 enable */
6358 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6359 (IS_E1HMF(bp) ? 0x1 : 0x2));
6360
1c06328c
EG
6361 /* support pause requests from USDM, TSDM and BRB */
6362 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6363
6364 {
6365 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6366 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6367 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6368 }
34f80b04
EG
6369 }
6370
94a78b79 6371 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6372 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6373
35b19ba5 6374 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6375 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6376 {
6377 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6378
6379 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6380 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6381
6382 /* The GPIO should be swapped if the swap register is
6383 set and active */
6384 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6385 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6386
6387 /* Select function upon port-swap configuration */
6388 if (port == 0) {
6389 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6390 aeu_gpio_mask = (swap_val && swap_override) ?
6391 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6392 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6393 } else {
6394 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6395 aeu_gpio_mask = (swap_val && swap_override) ?
6396 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6397 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6398 }
6399 val = REG_RD(bp, offset);
6400 /* add GPIO3 to group */
6401 val |= aeu_gpio_mask;
6402 REG_WR(bp, offset, val);
6403 }
6404 break;
6405
35b19ba5 6406 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6407 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6408 /* add SPIO 5 to group 0 */
4d295db0
EG
6409 {
6410 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6411 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6412 val = REG_RD(bp, reg_addr);
f1410647 6413 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6414 REG_WR(bp, reg_addr, val);
6415 }
f1410647
ET
6416 break;
6417
6418 default:
6419 break;
6420 }
6421
c18487ee 6422 bnx2x__link_reset(bp);
a2fbb9ea 6423
34f80b04
EG
6424 return 0;
6425}
6426
6427#define ILT_PER_FUNC (768/2)
6428#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6429/* the phys address is shifted right 12 bits and has an added
6430 1=valid bit added to the 53rd bit
6431 then since this is a wide register(TM)
6432 we split it into two 32 bit writes
6433 */
6434#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6435#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6436#define PXP_ONE_ILT(x) (((x) << 10) | x)
6437#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6438
6439#define CNIC_ILT_LINES 0
6440
6441static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6442{
6443 int reg;
6444
6445 if (CHIP_IS_E1H(bp))
6446 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6447 else /* E1 */
6448 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6449
6450 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6451}
6452
6453static int bnx2x_init_func(struct bnx2x *bp)
6454{
6455 int port = BP_PORT(bp);
6456 int func = BP_FUNC(bp);
8badd27a 6457 u32 addr, val;
34f80b04
EG
6458 int i;
6459
6460 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6461
8badd27a
EG
6462 /* set MSI reconfigure capability */
6463 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6464 val = REG_RD(bp, addr);
6465 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6466 REG_WR(bp, addr, val);
6467
34f80b04
EG
6468 i = FUNC_ILT_BASE(func);
6469
6470 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6471 if (CHIP_IS_E1H(bp)) {
6472 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6473 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6474 } else /* E1 */
6475 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6476 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6477
6478
6479 if (CHIP_IS_E1H(bp)) {
6480 for (i = 0; i < 9; i++)
6481 bnx2x_init_block(bp,
94a78b79 6482 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6483
6484 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6485 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6486 }
6487
6488 /* HC init per function */
6489 if (CHIP_IS_E1H(bp)) {
6490 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6491
6492 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6493 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6494 }
94a78b79 6495 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6496
c14423fe 6497 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6498 REG_WR(bp, 0x2114, 0xffffffff);
6499 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6500
34f80b04
EG
6501 return 0;
6502}
6503
6504static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6505{
6506 int i, rc = 0;
a2fbb9ea 6507
34f80b04
EG
6508 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6509 BP_FUNC(bp), load_code);
a2fbb9ea 6510
34f80b04
EG
6511 bp->dmae_ready = 0;
6512 mutex_init(&bp->dmae_mutex);
6513 bnx2x_gunzip_init(bp);
a2fbb9ea 6514
34f80b04
EG
6515 switch (load_code) {
6516 case FW_MSG_CODE_DRV_LOAD_COMMON:
6517 rc = bnx2x_init_common(bp);
6518 if (rc)
6519 goto init_hw_err;
6520 /* no break */
6521
6522 case FW_MSG_CODE_DRV_LOAD_PORT:
6523 bp->dmae_ready = 1;
6524 rc = bnx2x_init_port(bp);
6525 if (rc)
6526 goto init_hw_err;
6527 /* no break */
6528
6529 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6530 bp->dmae_ready = 1;
6531 rc = bnx2x_init_func(bp);
6532 if (rc)
6533 goto init_hw_err;
6534 break;
6535
6536 default:
6537 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6538 break;
6539 }
6540
6541 if (!BP_NOMCP(bp)) {
6542 int func = BP_FUNC(bp);
a2fbb9ea
ET
6543
6544 bp->fw_drv_pulse_wr_seq =
34f80b04 6545 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6546 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6547 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6548 }
a2fbb9ea 6549
34f80b04
EG
6550 /* this needs to be done before gunzip end */
6551 bnx2x_zero_def_sb(bp);
6552 for_each_queue(bp, i)
6553 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6554
6555init_hw_err:
6556 bnx2x_gunzip_end(bp);
6557
6558 return rc;
a2fbb9ea
ET
6559}
6560
a2fbb9ea
ET
6561static void bnx2x_free_mem(struct bnx2x *bp)
6562{
6563
6564#define BNX2X_PCI_FREE(x, y, size) \
6565 do { \
6566 if (x) { \
6567 pci_free_consistent(bp->pdev, size, x, y); \
6568 x = NULL; \
6569 y = 0; \
6570 } \
6571 } while (0)
6572
6573#define BNX2X_FREE(x) \
6574 do { \
6575 if (x) { \
6576 vfree(x); \
6577 x = NULL; \
6578 } \
6579 } while (0)
6580
6581 int i;
6582
6583 /* fastpath */
555f6c78 6584 /* Common */
a2fbb9ea
ET
6585 for_each_queue(bp, i) {
6586
555f6c78 6587 /* status blocks */
a2fbb9ea
ET
6588 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6589 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6590 sizeof(struct host_status_block));
555f6c78
EG
6591 }
6592 /* Rx */
6593 for_each_rx_queue(bp, i) {
a2fbb9ea 6594
555f6c78 6595 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6596 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6597 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6598 bnx2x_fp(bp, i, rx_desc_mapping),
6599 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6600
6601 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6602 bnx2x_fp(bp, i, rx_comp_mapping),
6603 sizeof(struct eth_fast_path_rx_cqe) *
6604 NUM_RCQ_BD);
a2fbb9ea 6605
7a9b2557 6606 /* SGE ring */
32626230 6607 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6608 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6609 bnx2x_fp(bp, i, rx_sge_mapping),
6610 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6611 }
555f6c78
EG
6612 /* Tx */
6613 for_each_tx_queue(bp, i) {
6614
6615 /* fastpath tx rings: tx_buf tx_desc */
6616 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6617 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6618 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6619 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6620 }
a2fbb9ea
ET
6621 /* end of fastpath */
6622
6623 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6624 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6625
6626 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6627 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6628
6629#ifdef BCM_ISCSI
6630 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6631 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6632 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6633 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6634#endif
7a9b2557 6635 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6636
6637#undef BNX2X_PCI_FREE
6638#undef BNX2X_KFREE
6639}
6640
6641static int bnx2x_alloc_mem(struct bnx2x *bp)
6642{
6643
6644#define BNX2X_PCI_ALLOC(x, y, size) \
6645 do { \
6646 x = pci_alloc_consistent(bp->pdev, size, y); \
6647 if (x == NULL) \
6648 goto alloc_mem_err; \
6649 memset(x, 0, size); \
6650 } while (0)
6651
6652#define BNX2X_ALLOC(x, size) \
6653 do { \
6654 x = vmalloc(size); \
6655 if (x == NULL) \
6656 goto alloc_mem_err; \
6657 memset(x, 0, size); \
6658 } while (0)
6659
6660 int i;
6661
6662 /* fastpath */
555f6c78 6663 /* Common */
a2fbb9ea
ET
6664 for_each_queue(bp, i) {
6665 bnx2x_fp(bp, i, bp) = bp;
6666
555f6c78 6667 /* status blocks */
a2fbb9ea
ET
6668 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6669 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6670 sizeof(struct host_status_block));
555f6c78
EG
6671 }
6672 /* Rx */
6673 for_each_rx_queue(bp, i) {
a2fbb9ea 6674
555f6c78 6675 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6676 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6677 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6678 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6679 &bnx2x_fp(bp, i, rx_desc_mapping),
6680 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6681
6682 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6683 &bnx2x_fp(bp, i, rx_comp_mapping),
6684 sizeof(struct eth_fast_path_rx_cqe) *
6685 NUM_RCQ_BD);
6686
7a9b2557
VZ
6687 /* SGE ring */
6688 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6689 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6690 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6691 &bnx2x_fp(bp, i, rx_sge_mapping),
6692 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6693 }
555f6c78
EG
6694 /* Tx */
6695 for_each_tx_queue(bp, i) {
6696
555f6c78
EG
6697 /* fastpath tx rings: tx_buf tx_desc */
6698 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6699 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6700 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6701 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6702 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6703 }
a2fbb9ea
ET
6704 /* end of fastpath */
6705
6706 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6707 sizeof(struct host_def_status_block));
6708
6709 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6710 sizeof(struct bnx2x_slowpath));
6711
6712#ifdef BCM_ISCSI
6713 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6714
6715 /* Initialize T1 */
6716 for (i = 0; i < 64*1024; i += 64) {
6717 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6718 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6719 }
6720
6721 /* allocate searcher T2 table
6722 we allocate 1/4 of alloc num for T2
6723 (which is not entered into the ILT) */
6724 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6725
6726 /* Initialize T2 */
6727 for (i = 0; i < 16*1024; i += 64)
6728 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6729
c14423fe 6730 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6731 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6732
6733 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6734 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6735
6736 /* QM queues (128*MAX_CONN) */
6737 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6738#endif
6739
6740 /* Slow path ring */
6741 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6742
6743 return 0;
6744
6745alloc_mem_err:
6746 bnx2x_free_mem(bp);
6747 return -ENOMEM;
6748
6749#undef BNX2X_PCI_ALLOC
6750#undef BNX2X_ALLOC
6751}
6752
6753static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6754{
6755 int i;
6756
555f6c78 6757 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6758 struct bnx2x_fastpath *fp = &bp->fp[i];
6759
6760 u16 bd_cons = fp->tx_bd_cons;
6761 u16 sw_prod = fp->tx_pkt_prod;
6762 u16 sw_cons = fp->tx_pkt_cons;
6763
a2fbb9ea
ET
6764 while (sw_cons != sw_prod) {
6765 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6766 sw_cons++;
6767 }
6768 }
6769}
6770
6771static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6772{
6773 int i, j;
6774
555f6c78 6775 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6776 struct bnx2x_fastpath *fp = &bp->fp[j];
6777
a2fbb9ea
ET
6778 for (i = 0; i < NUM_RX_BD; i++) {
6779 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6780 struct sk_buff *skb = rx_buf->skb;
6781
6782 if (skb == NULL)
6783 continue;
6784
6785 pci_unmap_single(bp->pdev,
6786 pci_unmap_addr(rx_buf, mapping),
356e2385 6787 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6788
6789 rx_buf->skb = NULL;
6790 dev_kfree_skb(skb);
6791 }
7a9b2557 6792 if (!fp->disable_tpa)
32626230
EG
6793 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6794 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6795 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6796 }
6797}
6798
6799static void bnx2x_free_skbs(struct bnx2x *bp)
6800{
6801 bnx2x_free_tx_skbs(bp);
6802 bnx2x_free_rx_skbs(bp);
6803}
6804
6805static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6806{
34f80b04 6807 int i, offset = 1;
a2fbb9ea
ET
6808
6809 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6810 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6811 bp->msix_table[0].vector);
6812
6813 for_each_queue(bp, i) {
c14423fe 6814 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6815 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6816 bnx2x_fp(bp, i, state));
6817
34f80b04 6818 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6819 }
a2fbb9ea
ET
6820}
6821
6822static void bnx2x_free_irq(struct bnx2x *bp)
6823{
a2fbb9ea 6824 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6825 bnx2x_free_msix_irqs(bp);
6826 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6827 bp->flags &= ~USING_MSIX_FLAG;
6828
8badd27a
EG
6829 } else if (bp->flags & USING_MSI_FLAG) {
6830 free_irq(bp->pdev->irq, bp->dev);
6831 pci_disable_msi(bp->pdev);
6832 bp->flags &= ~USING_MSI_FLAG;
6833
a2fbb9ea
ET
6834 } else
6835 free_irq(bp->pdev->irq, bp->dev);
6836}
6837
6838static int bnx2x_enable_msix(struct bnx2x *bp)
6839{
8badd27a
EG
6840 int i, rc, offset = 1;
6841 int igu_vec = 0;
a2fbb9ea 6842
8badd27a
EG
6843 bp->msix_table[0].entry = igu_vec;
6844 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6845
34f80b04 6846 for_each_queue(bp, i) {
8badd27a 6847 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6848 bp->msix_table[i + offset].entry = igu_vec;
6849 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6850 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6851 }
6852
34f80b04 6853 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6854 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6855 if (rc) {
8badd27a
EG
6856 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6857 return rc;
34f80b04 6858 }
8badd27a 6859
a2fbb9ea
ET
6860 bp->flags |= USING_MSIX_FLAG;
6861
6862 return 0;
a2fbb9ea
ET
6863}
6864
a2fbb9ea
ET
6865static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6866{
34f80b04 6867 int i, rc, offset = 1;
a2fbb9ea 6868
a2fbb9ea
ET
6869 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6870 bp->dev->name, bp->dev);
a2fbb9ea
ET
6871 if (rc) {
6872 BNX2X_ERR("request sp irq failed\n");
6873 return -EBUSY;
6874 }
6875
6876 for_each_queue(bp, i) {
555f6c78
EG
6877 struct bnx2x_fastpath *fp = &bp->fp[i];
6878
ca00392c
EG
6879 if (i < bp->num_rx_queues)
6880 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6881 else
6882 sprintf(fp->name, "%s-tx-%d",
6883 bp->dev->name, i - bp->num_rx_queues);
6884
34f80b04 6885 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6886 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6887 if (rc) {
555f6c78 6888 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6889 bnx2x_free_msix_irqs(bp);
6890 return -EBUSY;
6891 }
6892
555f6c78 6893 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6894 }
6895
555f6c78 6896 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6897 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6898 " ... fp[%d] %d\n",
6899 bp->dev->name, bp->msix_table[0].vector,
6900 0, bp->msix_table[offset].vector,
6901 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6902
a2fbb9ea 6903 return 0;
a2fbb9ea
ET
6904}
6905
8badd27a
EG
6906static int bnx2x_enable_msi(struct bnx2x *bp)
6907{
6908 int rc;
6909
6910 rc = pci_enable_msi(bp->pdev);
6911 if (rc) {
6912 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6913 return -1;
6914 }
6915 bp->flags |= USING_MSI_FLAG;
6916
6917 return 0;
6918}
6919
a2fbb9ea
ET
6920static int bnx2x_req_irq(struct bnx2x *bp)
6921{
8badd27a 6922 unsigned long flags;
34f80b04 6923 int rc;
a2fbb9ea 6924
8badd27a
EG
6925 if (bp->flags & USING_MSI_FLAG)
6926 flags = 0;
6927 else
6928 flags = IRQF_SHARED;
6929
6930 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6931 bp->dev->name, bp->dev);
a2fbb9ea
ET
6932 if (!rc)
6933 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6934
6935 return rc;
a2fbb9ea
ET
6936}
6937
65abd74d
YG
6938static void bnx2x_napi_enable(struct bnx2x *bp)
6939{
6940 int i;
6941
555f6c78 6942 for_each_rx_queue(bp, i)
65abd74d
YG
6943 napi_enable(&bnx2x_fp(bp, i, napi));
6944}
6945
6946static void bnx2x_napi_disable(struct bnx2x *bp)
6947{
6948 int i;
6949
555f6c78 6950 for_each_rx_queue(bp, i)
65abd74d
YG
6951 napi_disable(&bnx2x_fp(bp, i, napi));
6952}
6953
6954static void bnx2x_netif_start(struct bnx2x *bp)
6955{
e1510706
EG
6956 int intr_sem;
6957
6958 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6959 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6960
6961 if (intr_sem) {
65abd74d 6962 if (netif_running(bp->dev)) {
65abd74d
YG
6963 bnx2x_napi_enable(bp);
6964 bnx2x_int_enable(bp);
555f6c78
EG
6965 if (bp->state == BNX2X_STATE_OPEN)
6966 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6967 }
6968 }
6969}
6970
f8ef6e44 6971static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6972{
f8ef6e44 6973 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6974 bnx2x_napi_disable(bp);
762d5f6c
EG
6975 netif_tx_disable(bp->dev);
6976 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6977}
6978
a2fbb9ea
ET
6979/*
6980 * Init service functions
6981 */
6982
3101c2bc 6983static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6984{
6985 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6986 int port = BP_PORT(bp);
a2fbb9ea
ET
6987
6988 /* CAM allocation
6989 * unicasts 0-31:port0 32-63:port1
6990 * multicast 64-127:port0 128-191:port1
6991 */
8d9c5f34 6992 config->hdr.length = 2;
af246401 6993 config->hdr.offset = port ? 32 : 0;
0626b899 6994 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6995 config->hdr.reserved1 = 0;
6996
6997 /* primary MAC */
6998 config->config_table[0].cam_entry.msb_mac_addr =
6999 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7000 config->config_table[0].cam_entry.middle_mac_addr =
7001 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7002 config->config_table[0].cam_entry.lsb_mac_addr =
7003 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 7004 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7005 if (set)
7006 config->config_table[0].target_table_entry.flags = 0;
7007 else
7008 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
7009 config->config_table[0].target_table_entry.clients_bit_vector =
7010 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7011 config->config_table[0].target_table_entry.vlan_id = 0;
7012
3101c2bc
YG
7013 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7014 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7015 config->config_table[0].cam_entry.msb_mac_addr,
7016 config->config_table[0].cam_entry.middle_mac_addr,
7017 config->config_table[0].cam_entry.lsb_mac_addr);
7018
7019 /* broadcast */
4781bfad
EG
7020 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7021 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7022 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 7023 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7024 if (set)
7025 config->config_table[1].target_table_entry.flags =
a2fbb9ea 7026 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
7027 else
7028 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
7029 config->config_table[1].target_table_entry.clients_bit_vector =
7030 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7031 config->config_table[1].target_table_entry.vlan_id = 0;
7032
7033 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7034 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7035 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7036}
7037
3101c2bc 7038static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
7039{
7040 struct mac_configuration_cmd_e1h *config =
7041 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7042
34f80b04
EG
7043 /* CAM allocation for E1H
7044 * unicasts: by func number
7045 * multicast: 20+FUNC*20, 20 each
7046 */
8d9c5f34 7047 config->hdr.length = 1;
34f80b04 7048 config->hdr.offset = BP_FUNC(bp);
0626b899 7049 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
7050 config->hdr.reserved1 = 0;
7051
7052 /* primary MAC */
7053 config->config_table[0].msb_mac_addr =
7054 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7055 config->config_table[0].middle_mac_addr =
7056 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7057 config->config_table[0].lsb_mac_addr =
7058 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
7059 config->config_table[0].clients_bit_vector =
7060 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
7061 config->config_table[0].vlan_id = 0;
7062 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7063 if (set)
7064 config->config_table[0].flags = BP_PORT(bp);
7065 else
7066 config->config_table[0].flags =
7067 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7068
3101c2bc
YG
7069 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7070 (set ? "setting" : "clearing"),
34f80b04
EG
7071 config->config_table[0].msb_mac_addr,
7072 config->config_table[0].middle_mac_addr,
7073 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7074
7075 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7076 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7077 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7078}
7079
a2fbb9ea
ET
7080static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7081 int *state_p, int poll)
7082{
7083 /* can take a while if any port is running */
8b3a0f0b 7084 int cnt = 5000;
a2fbb9ea 7085
c14423fe
ET
7086 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7087 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7088
7089 might_sleep();
34f80b04 7090 while (cnt--) {
a2fbb9ea
ET
7091 if (poll) {
7092 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7093 /* if index is different from 0
7094 * the reply for some commands will
3101c2bc 7095 * be on the non default queue
a2fbb9ea
ET
7096 */
7097 if (idx)
7098 bnx2x_rx_int(&bp->fp[idx], 10);
7099 }
a2fbb9ea 7100
3101c2bc 7101 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7102 if (*state_p == state) {
7103#ifdef BNX2X_STOP_ON_ERROR
7104 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7105#endif
a2fbb9ea 7106 return 0;
8b3a0f0b 7107 }
a2fbb9ea 7108
a2fbb9ea 7109 msleep(1);
a2fbb9ea
ET
7110 }
7111
a2fbb9ea 7112 /* timeout! */
49d66772
ET
7113 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7114 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7115#ifdef BNX2X_STOP_ON_ERROR
7116 bnx2x_panic();
7117#endif
a2fbb9ea 7118
49d66772 7119 return -EBUSY;
a2fbb9ea
ET
7120}
7121
7122static int bnx2x_setup_leading(struct bnx2x *bp)
7123{
34f80b04 7124 int rc;
a2fbb9ea 7125
c14423fe 7126 /* reset IGU state */
34f80b04 7127 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7128
7129 /* SETUP ramrod */
7130 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7131
34f80b04
EG
7132 /* Wait for completion */
7133 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7134
34f80b04 7135 return rc;
a2fbb9ea
ET
7136}
7137
7138static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7139{
555f6c78
EG
7140 struct bnx2x_fastpath *fp = &bp->fp[index];
7141
a2fbb9ea 7142 /* reset IGU state */
555f6c78 7143 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7144
228241eb 7145 /* SETUP ramrod */
555f6c78
EG
7146 fp->state = BNX2X_FP_STATE_OPENING;
7147 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7148 fp->cl_id, 0);
a2fbb9ea
ET
7149
7150 /* Wait for completion */
7151 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7152 &(fp->state), 0);
a2fbb9ea
ET
7153}
7154
a2fbb9ea 7155static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7156
ca00392c
EG
7157static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7158 int *num_tx_queues_out)
7159{
7160 int _num_rx_queues = 0, _num_tx_queues = 0;
7161
7162 switch (bp->multi_mode) {
7163 case ETH_RSS_MODE_DISABLED:
7164 _num_rx_queues = 1;
7165 _num_tx_queues = 1;
7166 break;
7167
7168 case ETH_RSS_MODE_REGULAR:
7169 if (num_rx_queues)
7170 _num_rx_queues = min_t(u32, num_rx_queues,
7171 BNX2X_MAX_QUEUES(bp));
7172 else
7173 _num_rx_queues = min_t(u32, num_online_cpus(),
7174 BNX2X_MAX_QUEUES(bp));
7175
7176 if (num_tx_queues)
7177 _num_tx_queues = min_t(u32, num_tx_queues,
7178 BNX2X_MAX_QUEUES(bp));
7179 else
7180 _num_tx_queues = min_t(u32, num_online_cpus(),
7181 BNX2X_MAX_QUEUES(bp));
7182
7183 /* There must be not more Tx queues than Rx queues */
7184 if (_num_tx_queues > _num_rx_queues) {
7185 BNX2X_ERR("number of tx queues (%d) > "
7186 "number of rx queues (%d)"
7187 " defaulting to %d\n",
7188 _num_tx_queues, _num_rx_queues,
7189 _num_rx_queues);
7190 _num_tx_queues = _num_rx_queues;
7191 }
7192 break;
7193
7194
7195 default:
7196 _num_rx_queues = 1;
7197 _num_tx_queues = 1;
7198 break;
7199 }
7200
7201 *num_rx_queues_out = _num_rx_queues;
7202 *num_tx_queues_out = _num_tx_queues;
7203}
7204
7205static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7206{
ca00392c 7207 int rc = 0;
a2fbb9ea 7208
8badd27a
EG
7209 switch (int_mode) {
7210 case INT_MODE_INTx:
7211 case INT_MODE_MSI:
ca00392c
EG
7212 bp->num_rx_queues = 1;
7213 bp->num_tx_queues = 1;
7214 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7215 break;
7216
7217 case INT_MODE_MSIX:
7218 default:
ca00392c
EG
7219 /* Set interrupt mode according to bp->multi_mode value */
7220 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7221 &bp->num_tx_queues);
7222
7223 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7224 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7225
2dfe0e1f
EG
7226 /* if we can't use MSI-X we only need one fp,
7227 * so try to enable MSI-X with the requested number of fp's
7228 * and fallback to MSI or legacy INTx with one fp
7229 */
ca00392c
EG
7230 rc = bnx2x_enable_msix(bp);
7231 if (rc) {
34f80b04 7232 /* failed to enable MSI-X */
555f6c78
EG
7233 if (bp->multi_mode)
7234 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7235 "enable MSI-X (rx %d tx %d), "
7236 "set number of queues to 1\n",
7237 bp->num_rx_queues, bp->num_tx_queues);
7238 bp->num_rx_queues = 1;
7239 bp->num_tx_queues = 1;
a2fbb9ea 7240 }
8badd27a 7241 break;
a2fbb9ea 7242 }
555f6c78 7243 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7244 return rc;
8badd27a
EG
7245}
7246
8badd27a
EG
7247
7248/* must be called with rtnl_lock */
7249static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7250{
7251 u32 load_code;
ca00392c
EG
7252 int i, rc;
7253
8badd27a 7254#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7255 if (unlikely(bp->panic))
7256 return -EPERM;
7257#endif
7258
7259 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7260
ca00392c 7261 rc = bnx2x_set_int_mode(bp);
c14423fe 7262
a2fbb9ea
ET
7263 if (bnx2x_alloc_mem(bp))
7264 return -ENOMEM;
7265
555f6c78 7266 for_each_rx_queue(bp, i)
7a9b2557
VZ
7267 bnx2x_fp(bp, i, disable_tpa) =
7268 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7269
555f6c78 7270 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7271 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7272 bnx2x_poll, 128);
7273
2dfe0e1f
EG
7274 bnx2x_napi_enable(bp);
7275
34f80b04
EG
7276 if (bp->flags & USING_MSIX_FLAG) {
7277 rc = bnx2x_req_msix_irqs(bp);
7278 if (rc) {
7279 pci_disable_msix(bp->pdev);
2dfe0e1f 7280 goto load_error1;
34f80b04
EG
7281 }
7282 } else {
ca00392c
EG
7283 /* Fall to INTx if failed to enable MSI-X due to lack of
7284 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7285 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7286 bnx2x_enable_msi(bp);
34f80b04
EG
7287 bnx2x_ack_int(bp);
7288 rc = bnx2x_req_irq(bp);
7289 if (rc) {
2dfe0e1f 7290 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7291 if (bp->flags & USING_MSI_FLAG)
7292 pci_disable_msi(bp->pdev);
2dfe0e1f 7293 goto load_error1;
a2fbb9ea 7294 }
8badd27a
EG
7295 if (bp->flags & USING_MSI_FLAG) {
7296 bp->dev->irq = bp->pdev->irq;
7297 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7298 bp->dev->name, bp->pdev->irq);
7299 }
a2fbb9ea
ET
7300 }
7301
2dfe0e1f
EG
7302 /* Send LOAD_REQUEST command to MCP
7303 Returns the type of LOAD command:
7304 if it is the first port to be initialized
7305 common blocks should be initialized, otherwise - not
7306 */
7307 if (!BP_NOMCP(bp)) {
7308 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7309 if (!load_code) {
7310 BNX2X_ERR("MCP response failure, aborting\n");
7311 rc = -EBUSY;
7312 goto load_error2;
7313 }
7314 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7315 rc = -EBUSY; /* other port in diagnostic mode */
7316 goto load_error2;
7317 }
7318
7319 } else {
7320 int port = BP_PORT(bp);
7321
f5372251 7322 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7323 load_count[0], load_count[1], load_count[2]);
7324 load_count[0]++;
7325 load_count[1 + port]++;
f5372251 7326 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7327 load_count[0], load_count[1], load_count[2]);
7328 if (load_count[0] == 1)
7329 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7330 else if (load_count[1 + port] == 1)
7331 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7332 else
7333 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7334 }
7335
7336 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7337 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7338 bp->port.pmf = 1;
7339 else
7340 bp->port.pmf = 0;
7341 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7342
a2fbb9ea 7343 /* Initialize HW */
34f80b04
EG
7344 rc = bnx2x_init_hw(bp, load_code);
7345 if (rc) {
a2fbb9ea 7346 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7347 goto load_error2;
a2fbb9ea
ET
7348 }
7349
a2fbb9ea 7350 /* Setup NIC internals and enable interrupts */
471de716 7351 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7352
2691d51d
EG
7353 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7354 (bp->common.shmem2_base))
7355 SHMEM2_WR(bp, dcc_support,
7356 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7357 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7358
a2fbb9ea 7359 /* Send LOAD_DONE command to MCP */
34f80b04 7360 if (!BP_NOMCP(bp)) {
228241eb
ET
7361 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7362 if (!load_code) {
da5a662a 7363 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7364 rc = -EBUSY;
2dfe0e1f 7365 goto load_error3;
a2fbb9ea
ET
7366 }
7367 }
7368
7369 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7370
34f80b04
EG
7371 rc = bnx2x_setup_leading(bp);
7372 if (rc) {
da5a662a 7373 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7374 goto load_error3;
34f80b04 7375 }
a2fbb9ea 7376
34f80b04
EG
7377 if (CHIP_IS_E1H(bp))
7378 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7379 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7380 bp->state = BNX2X_STATE_DISABLED;
7381 }
a2fbb9ea 7382
ca00392c 7383 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7384 for_each_nondefault_queue(bp, i) {
7385 rc = bnx2x_setup_multi(bp, i);
7386 if (rc)
2dfe0e1f 7387 goto load_error3;
34f80b04 7388 }
a2fbb9ea 7389
ca00392c
EG
7390 if (CHIP_IS_E1(bp))
7391 bnx2x_set_mac_addr_e1(bp, 1);
7392 else
7393 bnx2x_set_mac_addr_e1h(bp, 1);
7394 }
34f80b04
EG
7395
7396 if (bp->port.pmf)
b5bf9068 7397 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7398
7399 /* Start fast path */
34f80b04
EG
7400 switch (load_mode) {
7401 case LOAD_NORMAL:
ca00392c
EG
7402 if (bp->state == BNX2X_STATE_OPEN) {
7403 /* Tx queue should be only reenabled */
7404 netif_tx_wake_all_queues(bp->dev);
7405 }
2dfe0e1f 7406 /* Initialize the receive filter. */
34f80b04
EG
7407 bnx2x_set_rx_mode(bp->dev);
7408 break;
7409
7410 case LOAD_OPEN:
555f6c78 7411 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7412 if (bp->state != BNX2X_STATE_OPEN)
7413 netif_tx_disable(bp->dev);
2dfe0e1f 7414 /* Initialize the receive filter. */
34f80b04 7415 bnx2x_set_rx_mode(bp->dev);
34f80b04 7416 break;
a2fbb9ea 7417
34f80b04 7418 case LOAD_DIAG:
2dfe0e1f 7419 /* Initialize the receive filter. */
a2fbb9ea 7420 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7421 bp->state = BNX2X_STATE_DIAG;
7422 break;
7423
7424 default:
7425 break;
a2fbb9ea
ET
7426 }
7427
34f80b04
EG
7428 if (!bp->port.pmf)
7429 bnx2x__link_status_update(bp);
7430
a2fbb9ea
ET
7431 /* start the timer */
7432 mod_timer(&bp->timer, jiffies + bp->current_interval);
7433
34f80b04 7434
a2fbb9ea
ET
7435 return 0;
7436
2dfe0e1f
EG
7437load_error3:
7438 bnx2x_int_disable_sync(bp, 1);
7439 if (!BP_NOMCP(bp)) {
7440 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7441 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7442 }
7443 bp->port.pmf = 0;
7a9b2557
VZ
7444 /* Free SKBs, SGEs, TPA pool and driver internals */
7445 bnx2x_free_skbs(bp);
555f6c78 7446 for_each_rx_queue(bp, i)
3196a88a 7447 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7448load_error2:
d1014634
YG
7449 /* Release IRQs */
7450 bnx2x_free_irq(bp);
2dfe0e1f
EG
7451load_error1:
7452 bnx2x_napi_disable(bp);
555f6c78 7453 for_each_rx_queue(bp, i)
7cde1c8b 7454 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7455 bnx2x_free_mem(bp);
7456
34f80b04 7457 return rc;
a2fbb9ea
ET
7458}
7459
7460static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7461{
555f6c78 7462 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7463 int rc;
7464
c14423fe 7465 /* halt the connection */
555f6c78
EG
7466 fp->state = BNX2X_FP_STATE_HALTING;
7467 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7468
34f80b04 7469 /* Wait for completion */
a2fbb9ea 7470 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7471 &(fp->state), 1);
c14423fe 7472 if (rc) /* timeout */
a2fbb9ea
ET
7473 return rc;
7474
7475 /* delete cfc entry */
7476 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7477
34f80b04
EG
7478 /* Wait for completion */
7479 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7480 &(fp->state), 1);
34f80b04 7481 return rc;
a2fbb9ea
ET
7482}
7483
da5a662a 7484static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7485{
4781bfad 7486 __le16 dsb_sp_prod_idx;
c14423fe 7487 /* if the other port is handling traffic,
a2fbb9ea 7488 this can take a lot of time */
34f80b04
EG
7489 int cnt = 500;
7490 int rc;
a2fbb9ea
ET
7491
7492 might_sleep();
7493
7494 /* Send HALT ramrod */
7495 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7496 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7497
34f80b04
EG
7498 /* Wait for completion */
7499 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7500 &(bp->fp[0].state), 1);
7501 if (rc) /* timeout */
da5a662a 7502 return rc;
a2fbb9ea 7503
49d66772 7504 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7505
228241eb 7506 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7507 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7508
49d66772 7509 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7510 we are going to reset the chip anyway
7511 so there is not much to do if this times out
7512 */
34f80b04 7513 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7514 if (!cnt) {
7515 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7516 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7517 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7518#ifdef BNX2X_STOP_ON_ERROR
7519 bnx2x_panic();
7520#endif
36e552ab 7521 rc = -EBUSY;
34f80b04
EG
7522 break;
7523 }
7524 cnt--;
da5a662a 7525 msleep(1);
5650d9d4 7526 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7527 }
7528 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7529 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7530
7531 return rc;
a2fbb9ea
ET
7532}
7533
34f80b04
EG
7534static void bnx2x_reset_func(struct bnx2x *bp)
7535{
7536 int port = BP_PORT(bp);
7537 int func = BP_FUNC(bp);
7538 int base, i;
7539
7540 /* Configure IGU */
7541 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7542 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7543
34f80b04
EG
7544 /* Clear ILT */
7545 base = FUNC_ILT_BASE(func);
7546 for (i = base; i < base + ILT_PER_FUNC; i++)
7547 bnx2x_ilt_wr(bp, i, 0);
7548}
7549
7550static void bnx2x_reset_port(struct bnx2x *bp)
7551{
7552 int port = BP_PORT(bp);
7553 u32 val;
7554
7555 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7556
7557 /* Do not rcv packets to BRB */
7558 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7559 /* Do not direct rcv packets that are not for MCP to the BRB */
7560 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7561 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7562
7563 /* Configure AEU */
7564 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7565
7566 msleep(100);
7567 /* Check for BRB port occupancy */
7568 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7569 if (val)
7570 DP(NETIF_MSG_IFDOWN,
33471629 7571 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7572
7573 /* TODO: Close Doorbell port? */
7574}
7575
34f80b04
EG
7576static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7577{
7578 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7579 BP_FUNC(bp), reset_code);
7580
7581 switch (reset_code) {
7582 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7583 bnx2x_reset_port(bp);
7584 bnx2x_reset_func(bp);
7585 bnx2x_reset_common(bp);
7586 break;
7587
7588 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7589 bnx2x_reset_port(bp);
7590 bnx2x_reset_func(bp);
7591 break;
7592
7593 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7594 bnx2x_reset_func(bp);
7595 break;
49d66772 7596
34f80b04
EG
7597 default:
7598 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7599 break;
7600 }
7601}
7602
33471629 7603/* must be called with rtnl_lock */
34f80b04 7604static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7605{
da5a662a 7606 int port = BP_PORT(bp);
a2fbb9ea 7607 u32 reset_code = 0;
da5a662a 7608 int i, cnt, rc;
a2fbb9ea
ET
7609
7610 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7611
228241eb
ET
7612 bp->rx_mode = BNX2X_RX_MODE_NONE;
7613 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7614
f8ef6e44 7615 bnx2x_netif_stop(bp, 1);
e94d8af3 7616
34f80b04
EG
7617 del_timer_sync(&bp->timer);
7618 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7619 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7620 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7621
70b9986c
EG
7622 /* Release IRQs */
7623 bnx2x_free_irq(bp);
7624
555f6c78
EG
7625 /* Wait until tx fastpath tasks complete */
7626 for_each_tx_queue(bp, i) {
228241eb
ET
7627 struct bnx2x_fastpath *fp = &bp->fp[i];
7628
34f80b04 7629 cnt = 1000;
e8b5fc51 7630 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7631
7961f791 7632 bnx2x_tx_int(fp);
34f80b04
EG
7633 if (!cnt) {
7634 BNX2X_ERR("timeout waiting for queue[%d]\n",
7635 i);
7636#ifdef BNX2X_STOP_ON_ERROR
7637 bnx2x_panic();
7638 return -EBUSY;
7639#else
7640 break;
7641#endif
7642 }
7643 cnt--;
da5a662a 7644 msleep(1);
34f80b04 7645 }
228241eb 7646 }
da5a662a
VZ
7647 /* Give HW time to discard old tx messages */
7648 msleep(1);
a2fbb9ea 7649
3101c2bc
YG
7650 if (CHIP_IS_E1(bp)) {
7651 struct mac_configuration_cmd *config =
7652 bnx2x_sp(bp, mcast_config);
7653
7654 bnx2x_set_mac_addr_e1(bp, 0);
7655
8d9c5f34 7656 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7657 CAM_INVALIDATE(config->config_table[i]);
7658
8d9c5f34 7659 config->hdr.length = i;
3101c2bc
YG
7660 if (CHIP_REV_IS_SLOW(bp))
7661 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7662 else
7663 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7664 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7665 config->hdr.reserved1 = 0;
7666
7667 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7668 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7669 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7670
7671 } else { /* E1H */
65abd74d
YG
7672 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7673
3101c2bc
YG
7674 bnx2x_set_mac_addr_e1h(bp, 0);
7675
7676 for (i = 0; i < MC_HASH_SIZE; i++)
7677 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7678
7679 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7680 }
7681
65abd74d
YG
7682 if (unload_mode == UNLOAD_NORMAL)
7683 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7684
7d0446c2 7685 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7686 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7687
7d0446c2 7688 else if (bp->wol) {
65abd74d
YG
7689 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7690 u8 *mac_addr = bp->dev->dev_addr;
7691 u32 val;
7692 /* The mac address is written to entries 1-4 to
7693 preserve entry 0 which is used by the PMF */
7694 u8 entry = (BP_E1HVN(bp) + 1)*8;
7695
7696 val = (mac_addr[0] << 8) | mac_addr[1];
7697 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7698
7699 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7700 (mac_addr[4] << 8) | mac_addr[5];
7701 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7702
7703 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7704
7705 } else
7706 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7707
34f80b04
EG
7708 /* Close multi and leading connections
7709 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7710 for_each_nondefault_queue(bp, i)
7711 if (bnx2x_stop_multi(bp, i))
228241eb 7712 goto unload_error;
a2fbb9ea 7713
da5a662a
VZ
7714 rc = bnx2x_stop_leading(bp);
7715 if (rc) {
34f80b04 7716 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7717#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7718 return -EBUSY;
da5a662a
VZ
7719#else
7720 goto unload_error;
34f80b04 7721#endif
228241eb
ET
7722 }
7723
7724unload_error:
34f80b04 7725 if (!BP_NOMCP(bp))
228241eb 7726 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7727 else {
f5372251 7728 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7729 load_count[0], load_count[1], load_count[2]);
7730 load_count[0]--;
da5a662a 7731 load_count[1 + port]--;
f5372251 7732 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7733 load_count[0], load_count[1], load_count[2]);
7734 if (load_count[0] == 0)
7735 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7736 else if (load_count[1 + port] == 0)
34f80b04
EG
7737 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7738 else
7739 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7740 }
a2fbb9ea 7741
34f80b04
EG
7742 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7743 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7744 bnx2x__link_reset(bp);
a2fbb9ea
ET
7745
7746 /* Reset the chip */
228241eb 7747 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7748
7749 /* Report UNLOAD_DONE to MCP */
34f80b04 7750 if (!BP_NOMCP(bp))
a2fbb9ea 7751 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7752
9a035440 7753 bp->port.pmf = 0;
a2fbb9ea 7754
7a9b2557 7755 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7756 bnx2x_free_skbs(bp);
555f6c78 7757 for_each_rx_queue(bp, i)
3196a88a 7758 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7759 for_each_rx_queue(bp, i)
7cde1c8b 7760 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7761 bnx2x_free_mem(bp);
7762
7763 bp->state = BNX2X_STATE_CLOSED;
228241eb 7764
a2fbb9ea
ET
7765 netif_carrier_off(bp->dev);
7766
7767 return 0;
7768}
7769
34f80b04
EG
7770static void bnx2x_reset_task(struct work_struct *work)
7771{
7772 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7773
7774#ifdef BNX2X_STOP_ON_ERROR
7775 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7776 " so reset not done to allow debug dump,\n"
ad361c98 7777 " you will need to reboot when done\n");
34f80b04
EG
7778 return;
7779#endif
7780
7781 rtnl_lock();
7782
7783 if (!netif_running(bp->dev))
7784 goto reset_task_exit;
7785
7786 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7787 bnx2x_nic_load(bp, LOAD_NORMAL);
7788
7789reset_task_exit:
7790 rtnl_unlock();
7791}
7792
a2fbb9ea
ET
7793/* end of nic load/unload */
7794
7795/* ethtool_ops */
7796
7797/*
7798 * Init service functions
7799 */
7800
f1ef27ef
EG
7801static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7802{
7803 switch (func) {
7804 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7805 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7806 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7807 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7808 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7809 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7810 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7811 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7812 default:
7813 BNX2X_ERR("Unsupported function index: %d\n", func);
7814 return (u32)(-1);
7815 }
7816}
7817
7818static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7819{
7820 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7821
7822 /* Flush all outstanding writes */
7823 mmiowb();
7824
7825 /* Pretend to be function 0 */
7826 REG_WR(bp, reg, 0);
7827 /* Flush the GRC transaction (in the chip) */
7828 new_val = REG_RD(bp, reg);
7829 if (new_val != 0) {
7830 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7831 new_val);
7832 BUG();
7833 }
7834
7835 /* From now we are in the "like-E1" mode */
7836 bnx2x_int_disable(bp);
7837
7838 /* Flush all outstanding writes */
7839 mmiowb();
7840
7841 /* Restore the original funtion settings */
7842 REG_WR(bp, reg, orig_func);
7843 new_val = REG_RD(bp, reg);
7844 if (new_val != orig_func) {
7845 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7846 orig_func, new_val);
7847 BUG();
7848 }
7849}
7850
7851static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7852{
7853 if (CHIP_IS_E1H(bp))
7854 bnx2x_undi_int_disable_e1h(bp, func);
7855 else
7856 bnx2x_int_disable(bp);
7857}
7858
34f80b04
EG
7859static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7860{
7861 u32 val;
7862
7863 /* Check if there is any driver already loaded */
7864 val = REG_RD(bp, MISC_REG_UNPREPARED);
7865 if (val == 0x1) {
7866 /* Check if it is the UNDI driver
7867 * UNDI driver initializes CID offset for normal bell to 0x7
7868 */
4a37fb66 7869 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7870 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7871 if (val == 0x7) {
7872 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7873 /* save our func */
34f80b04 7874 int func = BP_FUNC(bp);
da5a662a
VZ
7875 u32 swap_en;
7876 u32 swap_val;
34f80b04 7877
b4661739
EG
7878 /* clear the UNDI indication */
7879 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7880
34f80b04
EG
7881 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7882
7883 /* try unload UNDI on port 0 */
7884 bp->func = 0;
da5a662a
VZ
7885 bp->fw_seq =
7886 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7887 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7888 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7889
7890 /* if UNDI is loaded on the other port */
7891 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7892
da5a662a
VZ
7893 /* send "DONE" for previous unload */
7894 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7895
7896 /* unload UNDI on port 1 */
34f80b04 7897 bp->func = 1;
da5a662a
VZ
7898 bp->fw_seq =
7899 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7900 DRV_MSG_SEQ_NUMBER_MASK);
7901 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7902
7903 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7904 }
7905
b4661739
EG
7906 /* now it's safe to release the lock */
7907 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7908
f1ef27ef 7909 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7910
7911 /* close input traffic and wait for it */
7912 /* Do not rcv packets to BRB */
7913 REG_WR(bp,
7914 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7915 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7916 /* Do not direct rcv packets that are not for MCP to
7917 * the BRB */
7918 REG_WR(bp,
7919 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7920 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7921 /* clear AEU */
7922 REG_WR(bp,
7923 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7924 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7925 msleep(10);
7926
7927 /* save NIG port swap info */
7928 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7929 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7930 /* reset device */
7931 REG_WR(bp,
7932 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7933 0xd3ffffff);
34f80b04
EG
7934 REG_WR(bp,
7935 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7936 0x1403);
da5a662a
VZ
7937 /* take the NIG out of reset and restore swap values */
7938 REG_WR(bp,
7939 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7940 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7941 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7942 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7943
7944 /* send unload done to the MCP */
7945 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7946
7947 /* restore our func and fw_seq */
7948 bp->func = func;
7949 bp->fw_seq =
7950 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7951 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7952
7953 } else
7954 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7955 }
7956}
7957
7958static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7959{
7960 u32 val, val2, val3, val4, id;
72ce58c3 7961 u16 pmc;
34f80b04
EG
7962
7963 /* Get the chip revision id and number. */
7964 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7965 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7966 id = ((val & 0xffff) << 16);
7967 val = REG_RD(bp, MISC_REG_CHIP_REV);
7968 id |= ((val & 0xf) << 12);
7969 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7970 id |= ((val & 0xff) << 4);
5a40e08e 7971 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7972 id |= (val & 0xf);
7973 bp->common.chip_id = id;
7974 bp->link_params.chip_id = bp->common.chip_id;
7975 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7976
1c06328c
EG
7977 val = (REG_RD(bp, 0x2874) & 0x55);
7978 if ((bp->common.chip_id & 0x1) ||
7979 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7980 bp->flags |= ONE_PORT_FLAG;
7981 BNX2X_DEV_INFO("single port device\n");
7982 }
7983
34f80b04
EG
7984 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7985 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7986 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7987 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7988 bp->common.flash_size, bp->common.flash_size);
7989
7990 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 7991 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 7992 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
7993 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7994 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
7995
7996 if (!bp->common.shmem_base ||
7997 (bp->common.shmem_base < 0xA0000) ||
7998 (bp->common.shmem_base >= 0xC0000)) {
7999 BNX2X_DEV_INFO("MCP not active\n");
8000 bp->flags |= NO_MCP_FLAG;
8001 return;
8002 }
8003
8004 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8005 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8006 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8007 BNX2X_ERR("BAD MCP validity signature\n");
8008
8009 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8010 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8011
8012 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8013 SHARED_HW_CFG_LED_MODE_MASK) >>
8014 SHARED_HW_CFG_LED_MODE_SHIFT);
8015
c2c8b03e
EG
8016 bp->link_params.feature_config_flags = 0;
8017 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8018 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8019 bp->link_params.feature_config_flags |=
8020 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8021 else
8022 bp->link_params.feature_config_flags &=
8023 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8024
34f80b04
EG
8025 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8026 bp->common.bc_ver = val;
8027 BNX2X_DEV_INFO("bc_ver %X\n", val);
8028 if (val < BNX2X_BC_VER) {
8029 /* for now only warn
8030 * later we might need to enforce this */
8031 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8032 " please upgrade BC\n", BNX2X_BC_VER, val);
8033 }
4d295db0
EG
8034 bp->link_params.feature_config_flags |=
8035 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8036 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8037
8038 if (BP_E1HVN(bp) == 0) {
8039 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8040 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8041 } else {
8042 /* no WOL capability for E1HVN != 0 */
8043 bp->flags |= NO_WOL_FLAG;
8044 }
8045 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8046 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8047
8048 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8049 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8050 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8051 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8052
8053 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8054 val, val2, val3, val4);
8055}
8056
8057static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8058 u32 switch_cfg)
a2fbb9ea 8059{
34f80b04 8060 int port = BP_PORT(bp);
a2fbb9ea
ET
8061 u32 ext_phy_type;
8062
a2fbb9ea
ET
8063 switch (switch_cfg) {
8064 case SWITCH_CFG_1G:
8065 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8066
c18487ee
YR
8067 ext_phy_type =
8068 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8069 switch (ext_phy_type) {
8070 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8071 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8072 ext_phy_type);
8073
34f80b04
EG
8074 bp->port.supported |= (SUPPORTED_10baseT_Half |
8075 SUPPORTED_10baseT_Full |
8076 SUPPORTED_100baseT_Half |
8077 SUPPORTED_100baseT_Full |
8078 SUPPORTED_1000baseT_Full |
8079 SUPPORTED_2500baseX_Full |
8080 SUPPORTED_TP |
8081 SUPPORTED_FIBRE |
8082 SUPPORTED_Autoneg |
8083 SUPPORTED_Pause |
8084 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8085 break;
8086
8087 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8088 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8089 ext_phy_type);
8090
34f80b04
EG
8091 bp->port.supported |= (SUPPORTED_10baseT_Half |
8092 SUPPORTED_10baseT_Full |
8093 SUPPORTED_100baseT_Half |
8094 SUPPORTED_100baseT_Full |
8095 SUPPORTED_1000baseT_Full |
8096 SUPPORTED_TP |
8097 SUPPORTED_FIBRE |
8098 SUPPORTED_Autoneg |
8099 SUPPORTED_Pause |
8100 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8101 break;
8102
8103 default:
8104 BNX2X_ERR("NVRAM config error. "
8105 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8106 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8107 return;
8108 }
8109
34f80b04
EG
8110 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8111 port*0x10);
8112 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8113 break;
8114
8115 case SWITCH_CFG_10G:
8116 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8117
c18487ee
YR
8118 ext_phy_type =
8119 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8120 switch (ext_phy_type) {
8121 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8122 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8123 ext_phy_type);
8124
34f80b04
EG
8125 bp->port.supported |= (SUPPORTED_10baseT_Half |
8126 SUPPORTED_10baseT_Full |
8127 SUPPORTED_100baseT_Half |
8128 SUPPORTED_100baseT_Full |
8129 SUPPORTED_1000baseT_Full |
8130 SUPPORTED_2500baseX_Full |
8131 SUPPORTED_10000baseT_Full |
8132 SUPPORTED_TP |
8133 SUPPORTED_FIBRE |
8134 SUPPORTED_Autoneg |
8135 SUPPORTED_Pause |
8136 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8137 break;
8138
589abe3a
EG
8139 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8140 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8141 ext_phy_type);
f1410647 8142
34f80b04 8143 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8144 SUPPORTED_1000baseT_Full |
34f80b04 8145 SUPPORTED_FIBRE |
589abe3a 8146 SUPPORTED_Autoneg |
34f80b04
EG
8147 SUPPORTED_Pause |
8148 SUPPORTED_Asym_Pause);
f1410647
ET
8149 break;
8150
589abe3a
EG
8151 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8152 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8153 ext_phy_type);
8154
34f80b04 8155 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8156 SUPPORTED_2500baseX_Full |
34f80b04 8157 SUPPORTED_1000baseT_Full |
589abe3a
EG
8158 SUPPORTED_FIBRE |
8159 SUPPORTED_Autoneg |
8160 SUPPORTED_Pause |
8161 SUPPORTED_Asym_Pause);
8162 break;
8163
8164 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8165 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8166 ext_phy_type);
8167
8168 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8169 SUPPORTED_FIBRE |
8170 SUPPORTED_Pause |
8171 SUPPORTED_Asym_Pause);
f1410647
ET
8172 break;
8173
589abe3a
EG
8174 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8175 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8176 ext_phy_type);
8177
34f80b04
EG
8178 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8179 SUPPORTED_1000baseT_Full |
8180 SUPPORTED_FIBRE |
34f80b04
EG
8181 SUPPORTED_Pause |
8182 SUPPORTED_Asym_Pause);
f1410647
ET
8183 break;
8184
589abe3a
EG
8185 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8186 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8187 ext_phy_type);
8188
34f80b04 8189 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8190 SUPPORTED_1000baseT_Full |
34f80b04 8191 SUPPORTED_Autoneg |
589abe3a 8192 SUPPORTED_FIBRE |
34f80b04
EG
8193 SUPPORTED_Pause |
8194 SUPPORTED_Asym_Pause);
c18487ee
YR
8195 break;
8196
4d295db0
EG
8197 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8198 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8199 ext_phy_type);
8200
8201 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8202 SUPPORTED_1000baseT_Full |
8203 SUPPORTED_Autoneg |
8204 SUPPORTED_FIBRE |
8205 SUPPORTED_Pause |
8206 SUPPORTED_Asym_Pause);
8207 break;
8208
f1410647
ET
8209 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8210 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8211 ext_phy_type);
8212
34f80b04
EG
8213 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8214 SUPPORTED_TP |
8215 SUPPORTED_Autoneg |
8216 SUPPORTED_Pause |
8217 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8218 break;
8219
28577185
EG
8220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8221 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8222 ext_phy_type);
8223
8224 bp->port.supported |= (SUPPORTED_10baseT_Half |
8225 SUPPORTED_10baseT_Full |
8226 SUPPORTED_100baseT_Half |
8227 SUPPORTED_100baseT_Full |
8228 SUPPORTED_1000baseT_Full |
8229 SUPPORTED_10000baseT_Full |
8230 SUPPORTED_TP |
8231 SUPPORTED_Autoneg |
8232 SUPPORTED_Pause |
8233 SUPPORTED_Asym_Pause);
8234 break;
8235
c18487ee
YR
8236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8237 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8238 bp->link_params.ext_phy_config);
8239 break;
8240
a2fbb9ea
ET
8241 default:
8242 BNX2X_ERR("NVRAM config error. "
8243 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8244 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8245 return;
8246 }
8247
34f80b04
EG
8248 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8249 port*0x18);
8250 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8251
a2fbb9ea
ET
8252 break;
8253
8254 default:
8255 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8256 bp->port.link_config);
a2fbb9ea
ET
8257 return;
8258 }
34f80b04 8259 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8260
8261 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8262 if (!(bp->link_params.speed_cap_mask &
8263 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8264 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8265
c18487ee
YR
8266 if (!(bp->link_params.speed_cap_mask &
8267 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8268 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8269
c18487ee
YR
8270 if (!(bp->link_params.speed_cap_mask &
8271 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8272 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8273
c18487ee
YR
8274 if (!(bp->link_params.speed_cap_mask &
8275 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8276 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8277
c18487ee
YR
8278 if (!(bp->link_params.speed_cap_mask &
8279 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8280 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8281 SUPPORTED_1000baseT_Full);
a2fbb9ea 8282
c18487ee
YR
8283 if (!(bp->link_params.speed_cap_mask &
8284 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8285 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8286
c18487ee
YR
8287 if (!(bp->link_params.speed_cap_mask &
8288 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8289 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8290
34f80b04 8291 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8292}
8293
34f80b04 8294static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8295{
c18487ee 8296 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8297
34f80b04 8298 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8299 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8300 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8301 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8302 bp->port.advertising = bp->port.supported;
a2fbb9ea 8303 } else {
c18487ee
YR
8304 u32 ext_phy_type =
8305 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8306
8307 if ((ext_phy_type ==
8308 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8309 (ext_phy_type ==
8310 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8311 /* force 10G, no AN */
c18487ee 8312 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8313 bp->port.advertising =
a2fbb9ea
ET
8314 (ADVERTISED_10000baseT_Full |
8315 ADVERTISED_FIBRE);
8316 break;
8317 }
8318 BNX2X_ERR("NVRAM config error. "
8319 "Invalid link_config 0x%x"
8320 " Autoneg not supported\n",
34f80b04 8321 bp->port.link_config);
a2fbb9ea
ET
8322 return;
8323 }
8324 break;
8325
8326 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8327 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8328 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8329 bp->port.advertising = (ADVERTISED_10baseT_Full |
8330 ADVERTISED_TP);
a2fbb9ea
ET
8331 } else {
8332 BNX2X_ERR("NVRAM config error. "
8333 "Invalid link_config 0x%x"
8334 " speed_cap_mask 0x%x\n",
34f80b04 8335 bp->port.link_config,
c18487ee 8336 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8337 return;
8338 }
8339 break;
8340
8341 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8342 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8343 bp->link_params.req_line_speed = SPEED_10;
8344 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8345 bp->port.advertising = (ADVERTISED_10baseT_Half |
8346 ADVERTISED_TP);
a2fbb9ea
ET
8347 } else {
8348 BNX2X_ERR("NVRAM config error. "
8349 "Invalid link_config 0x%x"
8350 " speed_cap_mask 0x%x\n",
34f80b04 8351 bp->port.link_config,
c18487ee 8352 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8353 return;
8354 }
8355 break;
8356
8357 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8358 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8359 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8360 bp->port.advertising = (ADVERTISED_100baseT_Full |
8361 ADVERTISED_TP);
a2fbb9ea
ET
8362 } else {
8363 BNX2X_ERR("NVRAM config error. "
8364 "Invalid link_config 0x%x"
8365 " speed_cap_mask 0x%x\n",
34f80b04 8366 bp->port.link_config,
c18487ee 8367 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8368 return;
8369 }
8370 break;
8371
8372 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8373 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8374 bp->link_params.req_line_speed = SPEED_100;
8375 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8376 bp->port.advertising = (ADVERTISED_100baseT_Half |
8377 ADVERTISED_TP);
a2fbb9ea
ET
8378 } else {
8379 BNX2X_ERR("NVRAM config error. "
8380 "Invalid link_config 0x%x"
8381 " speed_cap_mask 0x%x\n",
34f80b04 8382 bp->port.link_config,
c18487ee 8383 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8384 return;
8385 }
8386 break;
8387
8388 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8389 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8390 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8391 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8392 ADVERTISED_TP);
a2fbb9ea
ET
8393 } else {
8394 BNX2X_ERR("NVRAM config error. "
8395 "Invalid link_config 0x%x"
8396 " speed_cap_mask 0x%x\n",
34f80b04 8397 bp->port.link_config,
c18487ee 8398 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8399 return;
8400 }
8401 break;
8402
8403 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8404 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8405 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8406 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8407 ADVERTISED_TP);
a2fbb9ea
ET
8408 } else {
8409 BNX2X_ERR("NVRAM config error. "
8410 "Invalid link_config 0x%x"
8411 " speed_cap_mask 0x%x\n",
34f80b04 8412 bp->port.link_config,
c18487ee 8413 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8414 return;
8415 }
8416 break;
8417
8418 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8419 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8420 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8421 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8422 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8423 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8424 ADVERTISED_FIBRE);
a2fbb9ea
ET
8425 } else {
8426 BNX2X_ERR("NVRAM config error. "
8427 "Invalid link_config 0x%x"
8428 " speed_cap_mask 0x%x\n",
34f80b04 8429 bp->port.link_config,
c18487ee 8430 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8431 return;
8432 }
8433 break;
8434
8435 default:
8436 BNX2X_ERR("NVRAM config error. "
8437 "BAD link speed link_config 0x%x\n",
34f80b04 8438 bp->port.link_config);
c18487ee 8439 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8440 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8441 break;
8442 }
a2fbb9ea 8443
34f80b04
EG
8444 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8445 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8446 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8447 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8448 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8449
c18487ee 8450 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8451 " advertising 0x%x\n",
c18487ee
YR
8452 bp->link_params.req_line_speed,
8453 bp->link_params.req_duplex,
34f80b04 8454 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8455}
8456
34f80b04 8457static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8458{
34f80b04
EG
8459 int port = BP_PORT(bp);
8460 u32 val, val2;
589abe3a 8461 u32 config;
c2c8b03e 8462 u16 i;
01cd4528 8463 u32 ext_phy_type;
a2fbb9ea 8464
c18487ee 8465 bp->link_params.bp = bp;
34f80b04 8466 bp->link_params.port = port;
c18487ee 8467
c18487ee 8468 bp->link_params.lane_config =
a2fbb9ea 8469 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8470 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8471 SHMEM_RD(bp,
8472 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8473 /* BCM8727_NOC => BCM8727 no over current */
8474 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8475 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8476 bp->link_params.ext_phy_config &=
8477 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8478 bp->link_params.ext_phy_config |=
8479 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8480 bp->link_params.feature_config_flags |=
8481 FEATURE_CONFIG_BCM8727_NOC;
8482 }
8483
c18487ee 8484 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8485 SHMEM_RD(bp,
8486 dev_info.port_hw_config[port].speed_capability_mask);
8487
34f80b04 8488 bp->port.link_config =
a2fbb9ea
ET
8489 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8490
c2c8b03e
EG
8491 /* Get the 4 lanes xgxs config rx and tx */
8492 for (i = 0; i < 2; i++) {
8493 val = SHMEM_RD(bp,
8494 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8495 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8496 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8497
8498 val = SHMEM_RD(bp,
8499 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8500 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8501 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8502 }
8503
3ce2c3f9
EG
8504 /* If the device is capable of WoL, set the default state according
8505 * to the HW
8506 */
4d295db0 8507 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8508 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8509 (config & PORT_FEATURE_WOL_ENABLED));
8510
c2c8b03e
EG
8511 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8512 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8513 bp->link_params.lane_config,
8514 bp->link_params.ext_phy_config,
34f80b04 8515 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8516
4d295db0
EG
8517 bp->link_params.switch_cfg |= (bp->port.link_config &
8518 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8519 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8520
8521 bnx2x_link_settings_requested(bp);
8522
01cd4528
EG
8523 /*
8524 * If connected directly, work with the internal PHY, otherwise, work
8525 * with the external PHY
8526 */
8527 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8528 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8529 bp->mdio.prtad = bp->link_params.phy_addr;
8530
8531 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8532 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8533 bp->mdio.prtad =
8534 (bp->link_params.ext_phy_config &
8535 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8536 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8537
a2fbb9ea
ET
8538 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8539 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8540 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8541 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8542 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8543 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8544 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8545 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8546 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8547 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8548}
8549
8550static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8551{
8552 int func = BP_FUNC(bp);
8553 u32 val, val2;
8554 int rc = 0;
a2fbb9ea 8555
34f80b04 8556 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8557
34f80b04
EG
8558 bp->e1hov = 0;
8559 bp->e1hmf = 0;
8560 if (CHIP_IS_E1H(bp)) {
8561 bp->mf_config =
8562 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8563
2691d51d 8564 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8565 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8566 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8567 bp->e1hmf = 1;
2691d51d
EG
8568 BNX2X_DEV_INFO("%s function mode\n",
8569 IS_E1HMF(bp) ? "multi" : "single");
8570
8571 if (IS_E1HMF(bp)) {
8572 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8573 e1hov_tag) &
8574 FUNC_MF_CFG_E1HOV_TAG_MASK);
8575 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8576 bp->e1hov = val;
8577 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8578 "(0x%04x)\n",
8579 func, bp->e1hov, bp->e1hov);
8580 } else {
34f80b04
EG
8581 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8582 " aborting\n", func);
8583 rc = -EPERM;
8584 }
2691d51d
EG
8585 } else {
8586 if (BP_E1HVN(bp)) {
8587 BNX2X_ERR("!!! VN %d in single function mode,"
8588 " aborting\n", BP_E1HVN(bp));
8589 rc = -EPERM;
8590 }
34f80b04
EG
8591 }
8592 }
a2fbb9ea 8593
34f80b04
EG
8594 if (!BP_NOMCP(bp)) {
8595 bnx2x_get_port_hwinfo(bp);
8596
8597 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8598 DRV_MSG_SEQ_NUMBER_MASK);
8599 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8600 }
8601
8602 if (IS_E1HMF(bp)) {
8603 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8604 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8605 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8606 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8607 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8608 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8609 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8610 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8611 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8612 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8613 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8614 ETH_ALEN);
8615 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8616 ETH_ALEN);
a2fbb9ea 8617 }
34f80b04
EG
8618
8619 return rc;
a2fbb9ea
ET
8620 }
8621
34f80b04
EG
8622 if (BP_NOMCP(bp)) {
8623 /* only supposed to happen on emulation/FPGA */
33471629 8624 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8625 random_ether_addr(bp->dev->dev_addr);
8626 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8627 }
a2fbb9ea 8628
34f80b04
EG
8629 return rc;
8630}
8631
8632static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8633{
8634 int func = BP_FUNC(bp);
87942b46 8635 int timer_interval;
34f80b04
EG
8636 int rc;
8637
da5a662a
VZ
8638 /* Disable interrupt handling until HW is initialized */
8639 atomic_set(&bp->intr_sem, 1);
e1510706 8640 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8641
34f80b04 8642 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8643
1cf167f2 8644 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8645 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8646
8647 rc = bnx2x_get_hwinfo(bp);
8648
8649 /* need to reset chip if undi was active */
8650 if (!BP_NOMCP(bp))
8651 bnx2x_undi_unload(bp);
8652
8653 if (CHIP_REV_IS_FPGA(bp))
8654 printk(KERN_ERR PFX "FPGA detected\n");
8655
8656 if (BP_NOMCP(bp) && (func == 0))
8657 printk(KERN_ERR PFX
8658 "MCP disabled, must load devices in order!\n");
8659
555f6c78 8660 /* Set multi queue mode */
8badd27a
EG
8661 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8662 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8663 printk(KERN_ERR PFX
8badd27a 8664 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8665 multi_mode = ETH_RSS_MODE_DISABLED;
8666 }
8667 bp->multi_mode = multi_mode;
8668
8669
7a9b2557
VZ
8670 /* Set TPA flags */
8671 if (disable_tpa) {
8672 bp->flags &= ~TPA_ENABLE_FLAG;
8673 bp->dev->features &= ~NETIF_F_LRO;
8674 } else {
8675 bp->flags |= TPA_ENABLE_FLAG;
8676 bp->dev->features |= NETIF_F_LRO;
8677 }
8678
8d5726c4 8679 bp->mrrs = mrrs;
7a9b2557 8680
34f80b04
EG
8681 bp->tx_ring_size = MAX_TX_AVAIL;
8682 bp->rx_ring_size = MAX_RX_AVAIL;
8683
8684 bp->rx_csum = 1;
34f80b04
EG
8685
8686 bp->tx_ticks = 50;
8687 bp->rx_ticks = 25;
8688
87942b46
EG
8689 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8690 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8691
8692 init_timer(&bp->timer);
8693 bp->timer.expires = jiffies + bp->current_interval;
8694 bp->timer.data = (unsigned long) bp;
8695 bp->timer.function = bnx2x_timer;
8696
8697 return rc;
a2fbb9ea
ET
8698}
8699
8700/*
8701 * ethtool service functions
8702 */
8703
8704/* All ethtool functions called with rtnl_lock */
8705
8706static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8707{
8708 struct bnx2x *bp = netdev_priv(dev);
8709
34f80b04
EG
8710 cmd->supported = bp->port.supported;
8711 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8712
8713 if (netif_carrier_ok(dev)) {
c18487ee
YR
8714 cmd->speed = bp->link_vars.line_speed;
8715 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8716 } else {
c18487ee
YR
8717 cmd->speed = bp->link_params.req_line_speed;
8718 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8719 }
34f80b04
EG
8720 if (IS_E1HMF(bp)) {
8721 u16 vn_max_rate;
8722
8723 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8724 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8725 if (vn_max_rate < cmd->speed)
8726 cmd->speed = vn_max_rate;
8727 }
a2fbb9ea 8728
c18487ee
YR
8729 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8730 u32 ext_phy_type =
8731 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8732
8733 switch (ext_phy_type) {
8734 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8735 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8741 cmd->port = PORT_FIBRE;
8742 break;
8743
8744 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8745 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8746 cmd->port = PORT_TP;
8747 break;
8748
c18487ee
YR
8749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8750 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8751 bp->link_params.ext_phy_config);
8752 break;
8753
f1410647
ET
8754 default:
8755 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8756 bp->link_params.ext_phy_config);
8757 break;
f1410647
ET
8758 }
8759 } else
a2fbb9ea 8760 cmd->port = PORT_TP;
a2fbb9ea 8761
01cd4528 8762 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8763 cmd->transceiver = XCVR_INTERNAL;
8764
c18487ee 8765 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8766 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8767 else
a2fbb9ea 8768 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8769
8770 cmd->maxtxpkt = 0;
8771 cmd->maxrxpkt = 0;
8772
8773 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8774 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8775 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8776 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8777 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8778 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8779 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8780
8781 return 0;
8782}
8783
8784static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8785{
8786 struct bnx2x *bp = netdev_priv(dev);
8787 u32 advertising;
8788
34f80b04
EG
8789 if (IS_E1HMF(bp))
8790 return 0;
8791
a2fbb9ea
ET
8792 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8793 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8794 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8795 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8796 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8797 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8798 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8799
a2fbb9ea 8800 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8801 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8802 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8803 return -EINVAL;
f1410647 8804 }
a2fbb9ea
ET
8805
8806 /* advertise the requested speed and duplex if supported */
34f80b04 8807 cmd->advertising &= bp->port.supported;
a2fbb9ea 8808
c18487ee
YR
8809 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8810 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8811 bp->port.advertising |= (ADVERTISED_Autoneg |
8812 cmd->advertising);
a2fbb9ea
ET
8813
8814 } else { /* forced speed */
8815 /* advertise the requested speed and duplex if supported */
8816 switch (cmd->speed) {
8817 case SPEED_10:
8818 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8819 if (!(bp->port.supported &
f1410647
ET
8820 SUPPORTED_10baseT_Full)) {
8821 DP(NETIF_MSG_LINK,
8822 "10M full not supported\n");
a2fbb9ea 8823 return -EINVAL;
f1410647 8824 }
a2fbb9ea
ET
8825
8826 advertising = (ADVERTISED_10baseT_Full |
8827 ADVERTISED_TP);
8828 } else {
34f80b04 8829 if (!(bp->port.supported &
f1410647
ET
8830 SUPPORTED_10baseT_Half)) {
8831 DP(NETIF_MSG_LINK,
8832 "10M half not supported\n");
a2fbb9ea 8833 return -EINVAL;
f1410647 8834 }
a2fbb9ea
ET
8835
8836 advertising = (ADVERTISED_10baseT_Half |
8837 ADVERTISED_TP);
8838 }
8839 break;
8840
8841 case SPEED_100:
8842 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8843 if (!(bp->port.supported &
f1410647
ET
8844 SUPPORTED_100baseT_Full)) {
8845 DP(NETIF_MSG_LINK,
8846 "100M full not supported\n");
a2fbb9ea 8847 return -EINVAL;
f1410647 8848 }
a2fbb9ea
ET
8849
8850 advertising = (ADVERTISED_100baseT_Full |
8851 ADVERTISED_TP);
8852 } else {
34f80b04 8853 if (!(bp->port.supported &
f1410647
ET
8854 SUPPORTED_100baseT_Half)) {
8855 DP(NETIF_MSG_LINK,
8856 "100M half not supported\n");
a2fbb9ea 8857 return -EINVAL;
f1410647 8858 }
a2fbb9ea
ET
8859
8860 advertising = (ADVERTISED_100baseT_Half |
8861 ADVERTISED_TP);
8862 }
8863 break;
8864
8865 case SPEED_1000:
f1410647
ET
8866 if (cmd->duplex != DUPLEX_FULL) {
8867 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8868 return -EINVAL;
f1410647 8869 }
a2fbb9ea 8870
34f80b04 8871 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8872 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8873 return -EINVAL;
f1410647 8874 }
a2fbb9ea
ET
8875
8876 advertising = (ADVERTISED_1000baseT_Full |
8877 ADVERTISED_TP);
8878 break;
8879
8880 case SPEED_2500:
f1410647
ET
8881 if (cmd->duplex != DUPLEX_FULL) {
8882 DP(NETIF_MSG_LINK,
8883 "2.5G half not supported\n");
a2fbb9ea 8884 return -EINVAL;
f1410647 8885 }
a2fbb9ea 8886
34f80b04 8887 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8888 DP(NETIF_MSG_LINK,
8889 "2.5G full not supported\n");
a2fbb9ea 8890 return -EINVAL;
f1410647 8891 }
a2fbb9ea 8892
f1410647 8893 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8894 ADVERTISED_TP);
8895 break;
8896
8897 case SPEED_10000:
f1410647
ET
8898 if (cmd->duplex != DUPLEX_FULL) {
8899 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8900 return -EINVAL;
f1410647 8901 }
a2fbb9ea 8902
34f80b04 8903 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8904 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8905 return -EINVAL;
f1410647 8906 }
a2fbb9ea
ET
8907
8908 advertising = (ADVERTISED_10000baseT_Full |
8909 ADVERTISED_FIBRE);
8910 break;
8911
8912 default:
f1410647 8913 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8914 return -EINVAL;
8915 }
8916
c18487ee
YR
8917 bp->link_params.req_line_speed = cmd->speed;
8918 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8919 bp->port.advertising = advertising;
a2fbb9ea
ET
8920 }
8921
c18487ee 8922 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8923 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8924 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8925 bp->port.advertising);
a2fbb9ea 8926
34f80b04 8927 if (netif_running(dev)) {
bb2a0f7a 8928 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8929 bnx2x_link_set(bp);
8930 }
a2fbb9ea
ET
8931
8932 return 0;
8933}
8934
c18487ee
YR
8935#define PHY_FW_VER_LEN 10
8936
a2fbb9ea
ET
8937static void bnx2x_get_drvinfo(struct net_device *dev,
8938 struct ethtool_drvinfo *info)
8939{
8940 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8941 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8942
8943 strcpy(info->driver, DRV_MODULE_NAME);
8944 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8945
8946 phy_fw_ver[0] = '\0';
34f80b04 8947 if (bp->port.pmf) {
4a37fb66 8948 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8949 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8950 (bp->state != BNX2X_STATE_CLOSED),
8951 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8952 bnx2x_release_phy_lock(bp);
34f80b04 8953 }
c18487ee 8954
f0e53a84
EG
8955 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8956 (bp->common.bc_ver & 0xff0000) >> 16,
8957 (bp->common.bc_ver & 0xff00) >> 8,
8958 (bp->common.bc_ver & 0xff),
8959 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8960 strcpy(info->bus_info, pci_name(bp->pdev));
8961 info->n_stats = BNX2X_NUM_STATS;
8962 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8963 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8964 info->regdump_len = 0;
8965}
8966
0a64ea57
EG
8967#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8968#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8969
8970static int bnx2x_get_regs_len(struct net_device *dev)
8971{
8972 static u32 regdump_len;
8973 struct bnx2x *bp = netdev_priv(dev);
8974 int i;
8975
8976 if (regdump_len)
8977 return regdump_len;
8978
8979 if (CHIP_IS_E1(bp)) {
8980 for (i = 0; i < REGS_COUNT; i++)
8981 if (IS_E1_ONLINE(reg_addrs[i].info))
8982 regdump_len += reg_addrs[i].size;
8983
8984 for (i = 0; i < WREGS_COUNT_E1; i++)
8985 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8986 regdump_len += wreg_addrs_e1[i].size *
8987 (1 + wreg_addrs_e1[i].read_regs_count);
8988
8989 } else { /* E1H */
8990 for (i = 0; i < REGS_COUNT; i++)
8991 if (IS_E1H_ONLINE(reg_addrs[i].info))
8992 regdump_len += reg_addrs[i].size;
8993
8994 for (i = 0; i < WREGS_COUNT_E1H; i++)
8995 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8996 regdump_len += wreg_addrs_e1h[i].size *
8997 (1 + wreg_addrs_e1h[i].read_regs_count);
8998 }
8999 regdump_len *= 4;
9000 regdump_len += sizeof(struct dump_hdr);
9001
9002 return regdump_len;
9003}
9004
9005static void bnx2x_get_regs(struct net_device *dev,
9006 struct ethtool_regs *regs, void *_p)
9007{
9008 u32 *p = _p, i, j;
9009 struct bnx2x *bp = netdev_priv(dev);
9010 struct dump_hdr dump_hdr = {0};
9011
9012 regs->version = 0;
9013 memset(p, 0, regs->len);
9014
9015 if (!netif_running(bp->dev))
9016 return;
9017
9018 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9019 dump_hdr.dump_sign = dump_sign_all;
9020 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9021 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9022 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9023 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9024 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9025
9026 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9027 p += dump_hdr.hdr_size + 1;
9028
9029 if (CHIP_IS_E1(bp)) {
9030 for (i = 0; i < REGS_COUNT; i++)
9031 if (IS_E1_ONLINE(reg_addrs[i].info))
9032 for (j = 0; j < reg_addrs[i].size; j++)
9033 *p++ = REG_RD(bp,
9034 reg_addrs[i].addr + j*4);
9035
9036 } else { /* E1H */
9037 for (i = 0; i < REGS_COUNT; i++)
9038 if (IS_E1H_ONLINE(reg_addrs[i].info))
9039 for (j = 0; j < reg_addrs[i].size; j++)
9040 *p++ = REG_RD(bp,
9041 reg_addrs[i].addr + j*4);
9042 }
9043}
9044
a2fbb9ea
ET
9045static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9046{
9047 struct bnx2x *bp = netdev_priv(dev);
9048
9049 if (bp->flags & NO_WOL_FLAG) {
9050 wol->supported = 0;
9051 wol->wolopts = 0;
9052 } else {
9053 wol->supported = WAKE_MAGIC;
9054 if (bp->wol)
9055 wol->wolopts = WAKE_MAGIC;
9056 else
9057 wol->wolopts = 0;
9058 }
9059 memset(&wol->sopass, 0, sizeof(wol->sopass));
9060}
9061
9062static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9063{
9064 struct bnx2x *bp = netdev_priv(dev);
9065
9066 if (wol->wolopts & ~WAKE_MAGIC)
9067 return -EINVAL;
9068
9069 if (wol->wolopts & WAKE_MAGIC) {
9070 if (bp->flags & NO_WOL_FLAG)
9071 return -EINVAL;
9072
9073 bp->wol = 1;
34f80b04 9074 } else
a2fbb9ea 9075 bp->wol = 0;
34f80b04 9076
a2fbb9ea
ET
9077 return 0;
9078}
9079
9080static u32 bnx2x_get_msglevel(struct net_device *dev)
9081{
9082 struct bnx2x *bp = netdev_priv(dev);
9083
9084 return bp->msglevel;
9085}
9086
9087static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9088{
9089 struct bnx2x *bp = netdev_priv(dev);
9090
9091 if (capable(CAP_NET_ADMIN))
9092 bp->msglevel = level;
9093}
9094
9095static int bnx2x_nway_reset(struct net_device *dev)
9096{
9097 struct bnx2x *bp = netdev_priv(dev);
9098
34f80b04
EG
9099 if (!bp->port.pmf)
9100 return 0;
a2fbb9ea 9101
34f80b04 9102 if (netif_running(dev)) {
bb2a0f7a 9103 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9104 bnx2x_link_set(bp);
9105 }
a2fbb9ea
ET
9106
9107 return 0;
9108}
9109
01e53298
NO
9110static u32
9111bnx2x_get_link(struct net_device *dev)
9112{
9113 struct bnx2x *bp = netdev_priv(dev);
9114
9115 return bp->link_vars.link_up;
9116}
9117
a2fbb9ea
ET
9118static int bnx2x_get_eeprom_len(struct net_device *dev)
9119{
9120 struct bnx2x *bp = netdev_priv(dev);
9121
34f80b04 9122 return bp->common.flash_size;
a2fbb9ea
ET
9123}
9124
9125static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9126{
34f80b04 9127 int port = BP_PORT(bp);
a2fbb9ea
ET
9128 int count, i;
9129 u32 val = 0;
9130
9131 /* adjust timeout for emulation/FPGA */
9132 count = NVRAM_TIMEOUT_COUNT;
9133 if (CHIP_REV_IS_SLOW(bp))
9134 count *= 100;
9135
9136 /* request access to nvram interface */
9137 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9138 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9139
9140 for (i = 0; i < count*10; i++) {
9141 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9142 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9143 break;
9144
9145 udelay(5);
9146 }
9147
9148 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9149 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9150 return -EBUSY;
9151 }
9152
9153 return 0;
9154}
9155
9156static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9157{
34f80b04 9158 int port = BP_PORT(bp);
a2fbb9ea
ET
9159 int count, i;
9160 u32 val = 0;
9161
9162 /* adjust timeout for emulation/FPGA */
9163 count = NVRAM_TIMEOUT_COUNT;
9164 if (CHIP_REV_IS_SLOW(bp))
9165 count *= 100;
9166
9167 /* relinquish nvram interface */
9168 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9169 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9170
9171 for (i = 0; i < count*10; i++) {
9172 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9173 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9174 break;
9175
9176 udelay(5);
9177 }
9178
9179 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9180 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9181 return -EBUSY;
9182 }
9183
9184 return 0;
9185}
9186
9187static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9188{
9189 u32 val;
9190
9191 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9192
9193 /* enable both bits, even on read */
9194 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9195 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9196 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9197}
9198
9199static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9200{
9201 u32 val;
9202
9203 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9204
9205 /* disable both bits, even after read */
9206 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9207 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9208 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9209}
9210
4781bfad 9211static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9212 u32 cmd_flags)
9213{
f1410647 9214 int count, i, rc;
a2fbb9ea
ET
9215 u32 val;
9216
9217 /* build the command word */
9218 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9219
9220 /* need to clear DONE bit separately */
9221 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9222
9223 /* address of the NVRAM to read from */
9224 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9225 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9226
9227 /* issue a read command */
9228 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9229
9230 /* adjust timeout for emulation/FPGA */
9231 count = NVRAM_TIMEOUT_COUNT;
9232 if (CHIP_REV_IS_SLOW(bp))
9233 count *= 100;
9234
9235 /* wait for completion */
9236 *ret_val = 0;
9237 rc = -EBUSY;
9238 for (i = 0; i < count; i++) {
9239 udelay(5);
9240 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9241
9242 if (val & MCPR_NVM_COMMAND_DONE) {
9243 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9244 /* we read nvram data in cpu order
9245 * but ethtool sees it as an array of bytes
9246 * converting to big-endian will do the work */
4781bfad 9247 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9248 rc = 0;
9249 break;
9250 }
9251 }
9252
9253 return rc;
9254}
9255
9256static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9257 int buf_size)
9258{
9259 int rc;
9260 u32 cmd_flags;
4781bfad 9261 __be32 val;
a2fbb9ea
ET
9262
9263 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9264 DP(BNX2X_MSG_NVM,
c14423fe 9265 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9266 offset, buf_size);
9267 return -EINVAL;
9268 }
9269
34f80b04
EG
9270 if (offset + buf_size > bp->common.flash_size) {
9271 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9272 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9273 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9274 return -EINVAL;
9275 }
9276
9277 /* request access to nvram interface */
9278 rc = bnx2x_acquire_nvram_lock(bp);
9279 if (rc)
9280 return rc;
9281
9282 /* enable access to nvram interface */
9283 bnx2x_enable_nvram_access(bp);
9284
9285 /* read the first word(s) */
9286 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9287 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9288 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9289 memcpy(ret_buf, &val, 4);
9290
9291 /* advance to the next dword */
9292 offset += sizeof(u32);
9293 ret_buf += sizeof(u32);
9294 buf_size -= sizeof(u32);
9295 cmd_flags = 0;
9296 }
9297
9298 if (rc == 0) {
9299 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9300 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9301 memcpy(ret_buf, &val, 4);
9302 }
9303
9304 /* disable access to nvram interface */
9305 bnx2x_disable_nvram_access(bp);
9306 bnx2x_release_nvram_lock(bp);
9307
9308 return rc;
9309}
9310
9311static int bnx2x_get_eeprom(struct net_device *dev,
9312 struct ethtool_eeprom *eeprom, u8 *eebuf)
9313{
9314 struct bnx2x *bp = netdev_priv(dev);
9315 int rc;
9316
2add3acb
EG
9317 if (!netif_running(dev))
9318 return -EAGAIN;
9319
34f80b04 9320 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9321 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9322 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9323 eeprom->len, eeprom->len);
9324
9325 /* parameters already validated in ethtool_get_eeprom */
9326
9327 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9328
9329 return rc;
9330}
9331
9332static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9333 u32 cmd_flags)
9334{
f1410647 9335 int count, i, rc;
a2fbb9ea
ET
9336
9337 /* build the command word */
9338 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9339
9340 /* need to clear DONE bit separately */
9341 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9342
9343 /* write the data */
9344 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9345
9346 /* address of the NVRAM to write to */
9347 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9348 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9349
9350 /* issue the write command */
9351 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9352
9353 /* adjust timeout for emulation/FPGA */
9354 count = NVRAM_TIMEOUT_COUNT;
9355 if (CHIP_REV_IS_SLOW(bp))
9356 count *= 100;
9357
9358 /* wait for completion */
9359 rc = -EBUSY;
9360 for (i = 0; i < count; i++) {
9361 udelay(5);
9362 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9363 if (val & MCPR_NVM_COMMAND_DONE) {
9364 rc = 0;
9365 break;
9366 }
9367 }
9368
9369 return rc;
9370}
9371
f1410647 9372#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9373
9374static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9375 int buf_size)
9376{
9377 int rc;
9378 u32 cmd_flags;
9379 u32 align_offset;
4781bfad 9380 __be32 val;
a2fbb9ea 9381
34f80b04
EG
9382 if (offset + buf_size > bp->common.flash_size) {
9383 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9384 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9385 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9386 return -EINVAL;
9387 }
9388
9389 /* request access to nvram interface */
9390 rc = bnx2x_acquire_nvram_lock(bp);
9391 if (rc)
9392 return rc;
9393
9394 /* enable access to nvram interface */
9395 bnx2x_enable_nvram_access(bp);
9396
9397 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9398 align_offset = (offset & ~0x03);
9399 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9400
9401 if (rc == 0) {
9402 val &= ~(0xff << BYTE_OFFSET(offset));
9403 val |= (*data_buf << BYTE_OFFSET(offset));
9404
9405 /* nvram data is returned as an array of bytes
9406 * convert it back to cpu order */
9407 val = be32_to_cpu(val);
9408
a2fbb9ea
ET
9409 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9410 cmd_flags);
9411 }
9412
9413 /* disable access to nvram interface */
9414 bnx2x_disable_nvram_access(bp);
9415 bnx2x_release_nvram_lock(bp);
9416
9417 return rc;
9418}
9419
9420static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9421 int buf_size)
9422{
9423 int rc;
9424 u32 cmd_flags;
9425 u32 val;
9426 u32 written_so_far;
9427
34f80b04 9428 if (buf_size == 1) /* ethtool */
a2fbb9ea 9429 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9430
9431 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9432 DP(BNX2X_MSG_NVM,
c14423fe 9433 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9434 offset, buf_size);
9435 return -EINVAL;
9436 }
9437
34f80b04
EG
9438 if (offset + buf_size > bp->common.flash_size) {
9439 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9440 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9441 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9442 return -EINVAL;
9443 }
9444
9445 /* request access to nvram interface */
9446 rc = bnx2x_acquire_nvram_lock(bp);
9447 if (rc)
9448 return rc;
9449
9450 /* enable access to nvram interface */
9451 bnx2x_enable_nvram_access(bp);
9452
9453 written_so_far = 0;
9454 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9455 while ((written_so_far < buf_size) && (rc == 0)) {
9456 if (written_so_far == (buf_size - sizeof(u32)))
9457 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9458 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9459 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9460 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9461 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9462
9463 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9464
9465 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9466
9467 /* advance to the next dword */
9468 offset += sizeof(u32);
9469 data_buf += sizeof(u32);
9470 written_so_far += sizeof(u32);
9471 cmd_flags = 0;
9472 }
9473
9474 /* disable access to nvram interface */
9475 bnx2x_disable_nvram_access(bp);
9476 bnx2x_release_nvram_lock(bp);
9477
9478 return rc;
9479}
9480
9481static int bnx2x_set_eeprom(struct net_device *dev,
9482 struct ethtool_eeprom *eeprom, u8 *eebuf)
9483{
9484 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9485 int port = BP_PORT(bp);
9486 int rc = 0;
a2fbb9ea 9487
9f4c9583
EG
9488 if (!netif_running(dev))
9489 return -EAGAIN;
9490
34f80b04 9491 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9492 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9493 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9494 eeprom->len, eeprom->len);
9495
9496 /* parameters already validated in ethtool_set_eeprom */
9497
f57a6025
EG
9498 /* PHY eeprom can be accessed only by the PMF */
9499 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9500 !bp->port.pmf)
9501 return -EINVAL;
9502
9503 if (eeprom->magic == 0x50485950) {
9504 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9505 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9506
f57a6025
EG
9507 bnx2x_acquire_phy_lock(bp);
9508 rc |= bnx2x_link_reset(&bp->link_params,
9509 &bp->link_vars, 0);
9510 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9511 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9512 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9513 MISC_REGISTERS_GPIO_HIGH, port);
9514 bnx2x_release_phy_lock(bp);
9515 bnx2x_link_report(bp);
9516
9517 } else if (eeprom->magic == 0x50485952) {
9518 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9519 if ((bp->state == BNX2X_STATE_OPEN) ||
9520 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9521 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9522 rc |= bnx2x_link_reset(&bp->link_params,
9523 &bp->link_vars, 1);
9524
9525 rc |= bnx2x_phy_init(&bp->link_params,
9526 &bp->link_vars);
4a37fb66 9527 bnx2x_release_phy_lock(bp);
f57a6025
EG
9528 bnx2x_calc_fc_adv(bp);
9529 }
9530 } else if (eeprom->magic == 0x53985943) {
9531 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9532 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9533 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9534 u8 ext_phy_addr =
9535 (bp->link_params.ext_phy_config &
9536 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9537 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9538
9539 /* DSP Remove Download Mode */
9540 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9541 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9542
f57a6025
EG
9543 bnx2x_acquire_phy_lock(bp);
9544
9545 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9546
9547 /* wait 0.5 sec to allow it to run */
9548 msleep(500);
9549 bnx2x_ext_phy_hw_reset(bp, port);
9550 msleep(500);
9551 bnx2x_release_phy_lock(bp);
9552 }
9553 } else
c18487ee 9554 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9555
9556 return rc;
9557}
9558
9559static int bnx2x_get_coalesce(struct net_device *dev,
9560 struct ethtool_coalesce *coal)
9561{
9562 struct bnx2x *bp = netdev_priv(dev);
9563
9564 memset(coal, 0, sizeof(struct ethtool_coalesce));
9565
9566 coal->rx_coalesce_usecs = bp->rx_ticks;
9567 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9568
9569 return 0;
9570}
9571
ca00392c 9572#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9573static int bnx2x_set_coalesce(struct net_device *dev,
9574 struct ethtool_coalesce *coal)
9575{
9576 struct bnx2x *bp = netdev_priv(dev);
9577
9578 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9579 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9580 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9581
9582 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9583 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9584 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9585
34f80b04 9586 if (netif_running(dev))
a2fbb9ea
ET
9587 bnx2x_update_coalesce(bp);
9588
9589 return 0;
9590}
9591
9592static void bnx2x_get_ringparam(struct net_device *dev,
9593 struct ethtool_ringparam *ering)
9594{
9595 struct bnx2x *bp = netdev_priv(dev);
9596
9597 ering->rx_max_pending = MAX_RX_AVAIL;
9598 ering->rx_mini_max_pending = 0;
9599 ering->rx_jumbo_max_pending = 0;
9600
9601 ering->rx_pending = bp->rx_ring_size;
9602 ering->rx_mini_pending = 0;
9603 ering->rx_jumbo_pending = 0;
9604
9605 ering->tx_max_pending = MAX_TX_AVAIL;
9606 ering->tx_pending = bp->tx_ring_size;
9607}
9608
9609static int bnx2x_set_ringparam(struct net_device *dev,
9610 struct ethtool_ringparam *ering)
9611{
9612 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9613 int rc = 0;
a2fbb9ea
ET
9614
9615 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9616 (ering->tx_pending > MAX_TX_AVAIL) ||
9617 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9618 return -EINVAL;
9619
9620 bp->rx_ring_size = ering->rx_pending;
9621 bp->tx_ring_size = ering->tx_pending;
9622
34f80b04
EG
9623 if (netif_running(dev)) {
9624 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9625 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9626 }
9627
34f80b04 9628 return rc;
a2fbb9ea
ET
9629}
9630
9631static void bnx2x_get_pauseparam(struct net_device *dev,
9632 struct ethtool_pauseparam *epause)
9633{
9634 struct bnx2x *bp = netdev_priv(dev);
9635
356e2385
EG
9636 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9637 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9638 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9639
c0700f90
DM
9640 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9641 BNX2X_FLOW_CTRL_RX);
9642 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9643 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9644
9645 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9646 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9647 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9648}
9649
9650static int bnx2x_set_pauseparam(struct net_device *dev,
9651 struct ethtool_pauseparam *epause)
9652{
9653 struct bnx2x *bp = netdev_priv(dev);
9654
34f80b04
EG
9655 if (IS_E1HMF(bp))
9656 return 0;
9657
a2fbb9ea
ET
9658 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9659 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9660 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9661
c0700f90 9662 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9663
f1410647 9664 if (epause->rx_pause)
c0700f90 9665 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9666
f1410647 9667 if (epause->tx_pause)
c0700f90 9668 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9669
c0700f90
DM
9670 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9671 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9672
c18487ee 9673 if (epause->autoneg) {
34f80b04 9674 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9675 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9676 return -EINVAL;
9677 }
a2fbb9ea 9678
c18487ee 9679 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9680 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9681 }
a2fbb9ea 9682
c18487ee
YR
9683 DP(NETIF_MSG_LINK,
9684 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9685
9686 if (netif_running(dev)) {
bb2a0f7a 9687 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9688 bnx2x_link_set(bp);
9689 }
a2fbb9ea
ET
9690
9691 return 0;
9692}
9693
df0f2343
VZ
9694static int bnx2x_set_flags(struct net_device *dev, u32 data)
9695{
9696 struct bnx2x *bp = netdev_priv(dev);
9697 int changed = 0;
9698 int rc = 0;
9699
9700 /* TPA requires Rx CSUM offloading */
9701 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9702 if (!(dev->features & NETIF_F_LRO)) {
9703 dev->features |= NETIF_F_LRO;
9704 bp->flags |= TPA_ENABLE_FLAG;
9705 changed = 1;
9706 }
9707
9708 } else if (dev->features & NETIF_F_LRO) {
9709 dev->features &= ~NETIF_F_LRO;
9710 bp->flags &= ~TPA_ENABLE_FLAG;
9711 changed = 1;
9712 }
9713
9714 if (changed && netif_running(dev)) {
9715 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9716 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9717 }
9718
9719 return rc;
9720}
9721
a2fbb9ea
ET
9722static u32 bnx2x_get_rx_csum(struct net_device *dev)
9723{
9724 struct bnx2x *bp = netdev_priv(dev);
9725
9726 return bp->rx_csum;
9727}
9728
9729static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9730{
9731 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9732 int rc = 0;
a2fbb9ea
ET
9733
9734 bp->rx_csum = data;
df0f2343
VZ
9735
9736 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9737 TPA'ed packets will be discarded due to wrong TCP CSUM */
9738 if (!data) {
9739 u32 flags = ethtool_op_get_flags(dev);
9740
9741 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9742 }
9743
9744 return rc;
a2fbb9ea
ET
9745}
9746
9747static int bnx2x_set_tso(struct net_device *dev, u32 data)
9748{
755735eb 9749 if (data) {
a2fbb9ea 9750 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9751 dev->features |= NETIF_F_TSO6;
9752 } else {
a2fbb9ea 9753 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9754 dev->features &= ~NETIF_F_TSO6;
9755 }
9756
a2fbb9ea
ET
9757 return 0;
9758}
9759
f3c87cdd 9760static const struct {
a2fbb9ea
ET
9761 char string[ETH_GSTRING_LEN];
9762} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9763 { "register_test (offline)" },
9764 { "memory_test (offline)" },
9765 { "loopback_test (offline)" },
9766 { "nvram_test (online)" },
9767 { "interrupt_test (online)" },
9768 { "link_test (online)" },
d3d4f495 9769 { "idle check (online)" }
a2fbb9ea
ET
9770};
9771
9772static int bnx2x_self_test_count(struct net_device *dev)
9773{
9774 return BNX2X_NUM_TESTS;
9775}
9776
f3c87cdd
YG
9777static int bnx2x_test_registers(struct bnx2x *bp)
9778{
9779 int idx, i, rc = -ENODEV;
9780 u32 wr_val = 0;
9dabc424 9781 int port = BP_PORT(bp);
f3c87cdd
YG
9782 static const struct {
9783 u32 offset0;
9784 u32 offset1;
9785 u32 mask;
9786 } reg_tbl[] = {
9787/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9788 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9789 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9790 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9791 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9792 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9793 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9794 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9795 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9796 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9797/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9798 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9799 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9800 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9801 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9802 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9803 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9804 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9805 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9806 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9807/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9808 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9809 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9810 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9811 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9812 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9813 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9814 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9815 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9816 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9817/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9818 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9819 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9820 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9821 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9822 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9823 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9824
9825 { 0xffffffff, 0, 0x00000000 }
9826 };
9827
9828 if (!netif_running(bp->dev))
9829 return rc;
9830
9831 /* Repeat the test twice:
9832 First by writing 0x00000000, second by writing 0xffffffff */
9833 for (idx = 0; idx < 2; idx++) {
9834
9835 switch (idx) {
9836 case 0:
9837 wr_val = 0;
9838 break;
9839 case 1:
9840 wr_val = 0xffffffff;
9841 break;
9842 }
9843
9844 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9845 u32 offset, mask, save_val, val;
f3c87cdd
YG
9846
9847 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9848 mask = reg_tbl[i].mask;
9849
9850 save_val = REG_RD(bp, offset);
9851
9852 REG_WR(bp, offset, wr_val);
9853 val = REG_RD(bp, offset);
9854
9855 /* Restore the original register's value */
9856 REG_WR(bp, offset, save_val);
9857
9858 /* verify that value is as expected value */
9859 if ((val & mask) != (wr_val & mask))
9860 goto test_reg_exit;
9861 }
9862 }
9863
9864 rc = 0;
9865
9866test_reg_exit:
9867 return rc;
9868}
9869
9870static int bnx2x_test_memory(struct bnx2x *bp)
9871{
9872 int i, j, rc = -ENODEV;
9873 u32 val;
9874 static const struct {
9875 u32 offset;
9876 int size;
9877 } mem_tbl[] = {
9878 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9879 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9880 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9881 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9882 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9883 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9884 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9885
9886 { 0xffffffff, 0 }
9887 };
9888 static const struct {
9889 char *name;
9890 u32 offset;
9dabc424
YG
9891 u32 e1_mask;
9892 u32 e1h_mask;
f3c87cdd 9893 } prty_tbl[] = {
9dabc424
YG
9894 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9895 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9896 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9897 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9898 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9899 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9900
9901 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9902 };
9903
9904 if (!netif_running(bp->dev))
9905 return rc;
9906
9907 /* Go through all the memories */
9908 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9909 for (j = 0; j < mem_tbl[i].size; j++)
9910 REG_RD(bp, mem_tbl[i].offset + j*4);
9911
9912 /* Check the parity status */
9913 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9914 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9915 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9916 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9917 DP(NETIF_MSG_HW,
9918 "%s is 0x%x\n", prty_tbl[i].name, val);
9919 goto test_mem_exit;
9920 }
9921 }
9922
9923 rc = 0;
9924
9925test_mem_exit:
9926 return rc;
9927}
9928
f3c87cdd
YG
9929static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9930{
9931 int cnt = 1000;
9932
9933 if (link_up)
9934 while (bnx2x_link_test(bp) && cnt--)
9935 msleep(10);
9936}
9937
9938static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9939{
9940 unsigned int pkt_size, num_pkts, i;
9941 struct sk_buff *skb;
9942 unsigned char *packet;
ca00392c
EG
9943 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9944 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9945 u16 tx_start_idx, tx_idx;
9946 u16 rx_start_idx, rx_idx;
ca00392c 9947 u16 pkt_prod, bd_prod;
f3c87cdd 9948 struct sw_tx_bd *tx_buf;
ca00392c
EG
9949 struct eth_tx_start_bd *tx_start_bd;
9950 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
9951 dma_addr_t mapping;
9952 union eth_rx_cqe *cqe;
9953 u8 cqe_fp_flags;
9954 struct sw_rx_bd *rx_buf;
9955 u16 len;
9956 int rc = -ENODEV;
9957
b5bf9068
EG
9958 /* check the loopback mode */
9959 switch (loopback_mode) {
9960 case BNX2X_PHY_LOOPBACK:
9961 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9962 return -EINVAL;
9963 break;
9964 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9965 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9966 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9967 break;
9968 default:
f3c87cdd 9969 return -EINVAL;
b5bf9068 9970 }
f3c87cdd 9971
b5bf9068
EG
9972 /* prepare the loopback packet */
9973 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9974 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9975 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9976 if (!skb) {
9977 rc = -ENOMEM;
9978 goto test_loopback_exit;
9979 }
9980 packet = skb_put(skb, pkt_size);
9981 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
9982 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9983 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
9984 for (i = ETH_HLEN; i < pkt_size; i++)
9985 packet[i] = (unsigned char) (i & 0xff);
9986
b5bf9068 9987 /* send the loopback packet */
f3c87cdd 9988 num_pkts = 0;
ca00392c
EG
9989 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9990 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 9991
ca00392c
EG
9992 pkt_prod = fp_tx->tx_pkt_prod++;
9993 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9994 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 9995 tx_buf->skb = skb;
ca00392c 9996 tx_buf->flags = 0;
f3c87cdd 9997
ca00392c
EG
9998 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9999 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10000 mapping = pci_map_single(bp->pdev, skb->data,
10001 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10002 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10003 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10004 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10005 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10006 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10007 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10008 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10009 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10010
10011 /* turn on parsing and get a BD */
10012 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10013 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10014
10015 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10016
58f4c4cf
EG
10017 wmb();
10018
ca00392c
EG
10019 fp_tx->tx_db.data.prod += 2;
10020 barrier();
10021 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10022
10023 mmiowb();
10024
10025 num_pkts++;
ca00392c 10026 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10027 bp->dev->trans_start = jiffies;
10028
10029 udelay(100);
10030
ca00392c 10031 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10032 if (tx_idx != tx_start_idx + num_pkts)
10033 goto test_loopback_exit;
10034
ca00392c 10035 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10036 if (rx_idx != rx_start_idx + num_pkts)
10037 goto test_loopback_exit;
10038
ca00392c 10039 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10040 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10041 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10042 goto test_loopback_rx_exit;
10043
10044 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10045 if (len != pkt_size)
10046 goto test_loopback_rx_exit;
10047
ca00392c 10048 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10049 skb = rx_buf->skb;
10050 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10051 for (i = ETH_HLEN; i < pkt_size; i++)
10052 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10053 goto test_loopback_rx_exit;
10054
10055 rc = 0;
10056
10057test_loopback_rx_exit:
f3c87cdd 10058
ca00392c
EG
10059 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10060 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10061 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10062 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10063
10064 /* Update producers */
ca00392c
EG
10065 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10066 fp_rx->rx_sge_prod);
f3c87cdd
YG
10067
10068test_loopback_exit:
10069 bp->link_params.loopback_mode = LOOPBACK_NONE;
10070
10071 return rc;
10072}
10073
10074static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10075{
b5bf9068 10076 int rc = 0, res;
f3c87cdd
YG
10077
10078 if (!netif_running(bp->dev))
10079 return BNX2X_LOOPBACK_FAILED;
10080
f8ef6e44 10081 bnx2x_netif_stop(bp, 1);
3910c8ae 10082 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10083
b5bf9068
EG
10084 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10085 if (res) {
10086 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10087 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10088 }
10089
b5bf9068
EG
10090 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10091 if (res) {
10092 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10093 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10094 }
10095
3910c8ae 10096 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10097 bnx2x_netif_start(bp);
10098
10099 return rc;
10100}
10101
10102#define CRC32_RESIDUAL 0xdebb20e3
10103
10104static int bnx2x_test_nvram(struct bnx2x *bp)
10105{
10106 static const struct {
10107 int offset;
10108 int size;
10109 } nvram_tbl[] = {
10110 { 0, 0x14 }, /* bootstrap */
10111 { 0x14, 0xec }, /* dir */
10112 { 0x100, 0x350 }, /* manuf_info */
10113 { 0x450, 0xf0 }, /* feature_info */
10114 { 0x640, 0x64 }, /* upgrade_key_info */
10115 { 0x6a4, 0x64 },
10116 { 0x708, 0x70 }, /* manuf_key_info */
10117 { 0x778, 0x70 },
10118 { 0, 0 }
10119 };
4781bfad 10120 __be32 buf[0x350 / 4];
f3c87cdd
YG
10121 u8 *data = (u8 *)buf;
10122 int i, rc;
10123 u32 magic, csum;
10124
10125 rc = bnx2x_nvram_read(bp, 0, data, 4);
10126 if (rc) {
f5372251 10127 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10128 goto test_nvram_exit;
10129 }
10130
10131 magic = be32_to_cpu(buf[0]);
10132 if (magic != 0x669955aa) {
10133 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10134 rc = -ENODEV;
10135 goto test_nvram_exit;
10136 }
10137
10138 for (i = 0; nvram_tbl[i].size; i++) {
10139
10140 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10141 nvram_tbl[i].size);
10142 if (rc) {
10143 DP(NETIF_MSG_PROBE,
f5372251 10144 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10145 goto test_nvram_exit;
10146 }
10147
10148 csum = ether_crc_le(nvram_tbl[i].size, data);
10149 if (csum != CRC32_RESIDUAL) {
10150 DP(NETIF_MSG_PROBE,
10151 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10152 rc = -ENODEV;
10153 goto test_nvram_exit;
10154 }
10155 }
10156
10157test_nvram_exit:
10158 return rc;
10159}
10160
10161static int bnx2x_test_intr(struct bnx2x *bp)
10162{
10163 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10164 int i, rc;
10165
10166 if (!netif_running(bp->dev))
10167 return -ENODEV;
10168
8d9c5f34 10169 config->hdr.length = 0;
af246401
EG
10170 if (CHIP_IS_E1(bp))
10171 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10172 else
10173 config->hdr.offset = BP_FUNC(bp);
0626b899 10174 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10175 config->hdr.reserved1 = 0;
10176
10177 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10178 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10179 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10180 if (rc == 0) {
10181 bp->set_mac_pending++;
10182 for (i = 0; i < 10; i++) {
10183 if (!bp->set_mac_pending)
10184 break;
10185 msleep_interruptible(10);
10186 }
10187 if (i == 10)
10188 rc = -ENODEV;
10189 }
10190
10191 return rc;
10192}
10193
a2fbb9ea
ET
10194static void bnx2x_self_test(struct net_device *dev,
10195 struct ethtool_test *etest, u64 *buf)
10196{
10197 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10198
10199 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10200
f3c87cdd 10201 if (!netif_running(dev))
a2fbb9ea 10202 return;
a2fbb9ea 10203
33471629 10204 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10205 if (IS_E1HMF(bp))
10206 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10207
10208 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10209 int port = BP_PORT(bp);
10210 u32 val;
f3c87cdd
YG
10211 u8 link_up;
10212
279abdf5
EG
10213 /* save current value of input enable for TX port IF */
10214 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10215 /* disable input for TX port IF */
10216 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10217
f3c87cdd
YG
10218 link_up = bp->link_vars.link_up;
10219 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10220 bnx2x_nic_load(bp, LOAD_DIAG);
10221 /* wait until link state is restored */
10222 bnx2x_wait_for_link(bp, link_up);
10223
10224 if (bnx2x_test_registers(bp) != 0) {
10225 buf[0] = 1;
10226 etest->flags |= ETH_TEST_FL_FAILED;
10227 }
10228 if (bnx2x_test_memory(bp) != 0) {
10229 buf[1] = 1;
10230 etest->flags |= ETH_TEST_FL_FAILED;
10231 }
10232 buf[2] = bnx2x_test_loopback(bp, link_up);
10233 if (buf[2] != 0)
10234 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10235
f3c87cdd 10236 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10237
10238 /* restore input for TX port IF */
10239 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10240
f3c87cdd
YG
10241 bnx2x_nic_load(bp, LOAD_NORMAL);
10242 /* wait until link state is restored */
10243 bnx2x_wait_for_link(bp, link_up);
10244 }
10245 if (bnx2x_test_nvram(bp) != 0) {
10246 buf[3] = 1;
a2fbb9ea
ET
10247 etest->flags |= ETH_TEST_FL_FAILED;
10248 }
f3c87cdd
YG
10249 if (bnx2x_test_intr(bp) != 0) {
10250 buf[4] = 1;
10251 etest->flags |= ETH_TEST_FL_FAILED;
10252 }
10253 if (bp->port.pmf)
10254 if (bnx2x_link_test(bp) != 0) {
10255 buf[5] = 1;
10256 etest->flags |= ETH_TEST_FL_FAILED;
10257 }
f3c87cdd
YG
10258
10259#ifdef BNX2X_EXTRA_DEBUG
10260 bnx2x_panic_dump(bp);
10261#endif
a2fbb9ea
ET
10262}
10263
de832a55
EG
10264static const struct {
10265 long offset;
10266 int size;
10267 u8 string[ETH_GSTRING_LEN];
10268} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10269/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10270 { Q_STATS_OFFSET32(error_bytes_received_hi),
10271 8, "[%d]: rx_error_bytes" },
10272 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10273 8, "[%d]: rx_ucast_packets" },
10274 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10275 8, "[%d]: rx_mcast_packets" },
10276 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10277 8, "[%d]: rx_bcast_packets" },
10278 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10279 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10280 4, "[%d]: rx_phy_ip_err_discards"},
10281 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10282 4, "[%d]: rx_skb_alloc_discard" },
10283 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10284
10285/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10286 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10287 8, "[%d]: tx_packets" }
10288};
10289
bb2a0f7a
YG
10290static const struct {
10291 long offset;
10292 int size;
10293 u32 flags;
66e855f3
YG
10294#define STATS_FLAGS_PORT 1
10295#define STATS_FLAGS_FUNC 2
de832a55 10296#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10297 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10298} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10299/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10300 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10301 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10302 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10303 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10304 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10305 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10306 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10307 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10308 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10309 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10310 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10311 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10312 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10313 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10314 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10315 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10316 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10317/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10318 8, STATS_FLAGS_PORT, "rx_fragments" },
10319 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10320 8, STATS_FLAGS_PORT, "rx_jabbers" },
10321 { STATS_OFFSET32(no_buff_discard_hi),
10322 8, STATS_FLAGS_BOTH, "rx_discards" },
10323 { STATS_OFFSET32(mac_filter_discard),
10324 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10325 { STATS_OFFSET32(xxoverflow_discard),
10326 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10327 { STATS_OFFSET32(brb_drop_hi),
10328 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10329 { STATS_OFFSET32(brb_truncate_hi),
10330 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10331 { STATS_OFFSET32(pause_frames_received_hi),
10332 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10333 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10334 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10335 { STATS_OFFSET32(nig_timer_max),
10336 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10337/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10338 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10339 { STATS_OFFSET32(rx_skb_alloc_failed),
10340 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10341 { STATS_OFFSET32(hw_csum_err),
10342 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10343
10344 { STATS_OFFSET32(total_bytes_transmitted_hi),
10345 8, STATS_FLAGS_BOTH, "tx_bytes" },
10346 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10347 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10348 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10349 8, STATS_FLAGS_BOTH, "tx_packets" },
10350 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10351 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10352 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10353 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10354 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10355 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10356 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10357 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10358/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10359 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10360 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10361 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10362 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10363 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10364 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10365 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10366 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10367 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10368 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10369 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10370 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10371 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10372 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10373 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10374 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10375 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10376 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10377 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10378/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10379 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10380 { STATS_OFFSET32(pause_frames_sent_hi),
10381 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10382};
10383
de832a55
EG
10384#define IS_PORT_STAT(i) \
10385 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10386#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10387#define IS_E1HMF_MODE_STAT(bp) \
10388 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10389
a2fbb9ea
ET
10390static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10391{
bb2a0f7a 10392 struct bnx2x *bp = netdev_priv(dev);
de832a55 10393 int i, j, k;
bb2a0f7a 10394
a2fbb9ea
ET
10395 switch (stringset) {
10396 case ETH_SS_STATS:
de832a55
EG
10397 if (is_multi(bp)) {
10398 k = 0;
ca00392c 10399 for_each_rx_queue(bp, i) {
de832a55
EG
10400 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10401 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10402 bnx2x_q_stats_arr[j].string, i);
10403 k += BNX2X_NUM_Q_STATS;
10404 }
10405 if (IS_E1HMF_MODE_STAT(bp))
10406 break;
10407 for (j = 0; j < BNX2X_NUM_STATS; j++)
10408 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10409 bnx2x_stats_arr[j].string);
10410 } else {
10411 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10412 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10413 continue;
10414 strcpy(buf + j*ETH_GSTRING_LEN,
10415 bnx2x_stats_arr[i].string);
10416 j++;
10417 }
bb2a0f7a 10418 }
a2fbb9ea
ET
10419 break;
10420
10421 case ETH_SS_TEST:
10422 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10423 break;
10424 }
10425}
10426
10427static int bnx2x_get_stats_count(struct net_device *dev)
10428{
bb2a0f7a 10429 struct bnx2x *bp = netdev_priv(dev);
de832a55 10430 int i, num_stats;
bb2a0f7a 10431
de832a55 10432 if (is_multi(bp)) {
ca00392c 10433 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10434 if (!IS_E1HMF_MODE_STAT(bp))
10435 num_stats += BNX2X_NUM_STATS;
10436 } else {
10437 if (IS_E1HMF_MODE_STAT(bp)) {
10438 num_stats = 0;
10439 for (i = 0; i < BNX2X_NUM_STATS; i++)
10440 if (IS_FUNC_STAT(i))
10441 num_stats++;
10442 } else
10443 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10444 }
de832a55 10445
bb2a0f7a 10446 return num_stats;
a2fbb9ea
ET
10447}
10448
10449static void bnx2x_get_ethtool_stats(struct net_device *dev,
10450 struct ethtool_stats *stats, u64 *buf)
10451{
10452 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10453 u32 *hw_stats, *offset;
10454 int i, j, k;
bb2a0f7a 10455
de832a55
EG
10456 if (is_multi(bp)) {
10457 k = 0;
ca00392c 10458 for_each_rx_queue(bp, i) {
de832a55
EG
10459 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10460 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10461 if (bnx2x_q_stats_arr[j].size == 0) {
10462 /* skip this counter */
10463 buf[k + j] = 0;
10464 continue;
10465 }
10466 offset = (hw_stats +
10467 bnx2x_q_stats_arr[j].offset);
10468 if (bnx2x_q_stats_arr[j].size == 4) {
10469 /* 4-byte counter */
10470 buf[k + j] = (u64) *offset;
10471 continue;
10472 }
10473 /* 8-byte counter */
10474 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10475 }
10476 k += BNX2X_NUM_Q_STATS;
10477 }
10478 if (IS_E1HMF_MODE_STAT(bp))
10479 return;
10480 hw_stats = (u32 *)&bp->eth_stats;
10481 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10482 if (bnx2x_stats_arr[j].size == 0) {
10483 /* skip this counter */
10484 buf[k + j] = 0;
10485 continue;
10486 }
10487 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10488 if (bnx2x_stats_arr[j].size == 4) {
10489 /* 4-byte counter */
10490 buf[k + j] = (u64) *offset;
10491 continue;
10492 }
10493 /* 8-byte counter */
10494 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10495 }
de832a55
EG
10496 } else {
10497 hw_stats = (u32 *)&bp->eth_stats;
10498 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10499 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10500 continue;
10501 if (bnx2x_stats_arr[i].size == 0) {
10502 /* skip this counter */
10503 buf[j] = 0;
10504 j++;
10505 continue;
10506 }
10507 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10508 if (bnx2x_stats_arr[i].size == 4) {
10509 /* 4-byte counter */
10510 buf[j] = (u64) *offset;
10511 j++;
10512 continue;
10513 }
10514 /* 8-byte counter */
10515 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10516 j++;
a2fbb9ea 10517 }
a2fbb9ea
ET
10518 }
10519}
10520
10521static int bnx2x_phys_id(struct net_device *dev, u32 data)
10522{
10523 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10524 int port = BP_PORT(bp);
a2fbb9ea
ET
10525 int i;
10526
34f80b04
EG
10527 if (!netif_running(dev))
10528 return 0;
10529
10530 if (!bp->port.pmf)
10531 return 0;
10532
a2fbb9ea
ET
10533 if (data == 0)
10534 data = 2;
10535
10536 for (i = 0; i < (data * 2); i++) {
c18487ee 10537 if ((i % 2) == 0)
34f80b04 10538 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10539 bp->link_params.hw_led_mode,
10540 bp->link_params.chip_id);
10541 else
34f80b04 10542 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10543 bp->link_params.hw_led_mode,
10544 bp->link_params.chip_id);
10545
a2fbb9ea
ET
10546 msleep_interruptible(500);
10547 if (signal_pending(current))
10548 break;
10549 }
10550
c18487ee 10551 if (bp->link_vars.link_up)
34f80b04 10552 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10553 bp->link_vars.line_speed,
10554 bp->link_params.hw_led_mode,
10555 bp->link_params.chip_id);
a2fbb9ea
ET
10556
10557 return 0;
10558}
10559
10560static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10561 .get_settings = bnx2x_get_settings,
10562 .set_settings = bnx2x_set_settings,
10563 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10564 .get_regs_len = bnx2x_get_regs_len,
10565 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10566 .get_wol = bnx2x_get_wol,
10567 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10568 .get_msglevel = bnx2x_get_msglevel,
10569 .set_msglevel = bnx2x_set_msglevel,
10570 .nway_reset = bnx2x_nway_reset,
01e53298 10571 .get_link = bnx2x_get_link,
7a9b2557
VZ
10572 .get_eeprom_len = bnx2x_get_eeprom_len,
10573 .get_eeprom = bnx2x_get_eeprom,
10574 .set_eeprom = bnx2x_set_eeprom,
10575 .get_coalesce = bnx2x_get_coalesce,
10576 .set_coalesce = bnx2x_set_coalesce,
10577 .get_ringparam = bnx2x_get_ringparam,
10578 .set_ringparam = bnx2x_set_ringparam,
10579 .get_pauseparam = bnx2x_get_pauseparam,
10580 .set_pauseparam = bnx2x_set_pauseparam,
10581 .get_rx_csum = bnx2x_get_rx_csum,
10582 .set_rx_csum = bnx2x_set_rx_csum,
10583 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10584 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10585 .set_flags = bnx2x_set_flags,
10586 .get_flags = ethtool_op_get_flags,
10587 .get_sg = ethtool_op_get_sg,
10588 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10589 .get_tso = ethtool_op_get_tso,
10590 .set_tso = bnx2x_set_tso,
10591 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10592 .self_test = bnx2x_self_test,
10593 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10594 .phys_id = bnx2x_phys_id,
10595 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10596 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10597};
10598
10599/* end of ethtool_ops */
10600
10601/****************************************************************************
10602* General service functions
10603****************************************************************************/
10604
10605static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10606{
10607 u16 pmcsr;
10608
10609 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10610
10611 switch (state) {
10612 case PCI_D0:
34f80b04 10613 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10614 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10615 PCI_PM_CTRL_PME_STATUS));
10616
10617 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10618 /* delay required during transition out of D3hot */
a2fbb9ea 10619 msleep(20);
34f80b04 10620 break;
a2fbb9ea 10621
34f80b04
EG
10622 case PCI_D3hot:
10623 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10624 pmcsr |= 3;
a2fbb9ea 10625
34f80b04
EG
10626 if (bp->wol)
10627 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10628
34f80b04
EG
10629 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10630 pmcsr);
a2fbb9ea 10631
34f80b04
EG
10632 /* No more memory access after this point until
10633 * device is brought back to D0.
10634 */
10635 break;
10636
10637 default:
10638 return -EINVAL;
10639 }
10640 return 0;
a2fbb9ea
ET
10641}
10642
237907c1
EG
10643static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10644{
10645 u16 rx_cons_sb;
10646
10647 /* Tell compiler that status block fields can change */
10648 barrier();
10649 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10650 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10651 rx_cons_sb++;
10652 return (fp->rx_comp_cons != rx_cons_sb);
10653}
10654
34f80b04
EG
10655/*
10656 * net_device service functions
10657 */
10658
a2fbb9ea
ET
10659static int bnx2x_poll(struct napi_struct *napi, int budget)
10660{
10661 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10662 napi);
10663 struct bnx2x *bp = fp->bp;
10664 int work_done = 0;
10665
10666#ifdef BNX2X_STOP_ON_ERROR
10667 if (unlikely(bp->panic))
34f80b04 10668 goto poll_panic;
a2fbb9ea
ET
10669#endif
10670
a2fbb9ea
ET
10671 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10672 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10673
10674 bnx2x_update_fpsb_idx(fp);
10675
8534f32c 10676 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10677 work_done = bnx2x_rx_int(fp, budget);
356e2385 10678
8534f32c
EG
10679 /* must not complete if we consumed full budget */
10680 if (work_done >= budget)
10681 goto poll_again;
10682 }
a2fbb9ea 10683
ca00392c 10684 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10685 * ensure that status block indices have been actually read
ca00392c 10686 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10687 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10688 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10689 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10690 * may be postponed to right before bnx2x_ack_sb). In this case
10691 * there will never be another interrupt until there is another update
10692 * of the status block, while there is still unhandled work.
10693 */
10694 rmb();
a2fbb9ea 10695
ca00392c 10696 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10697#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10698poll_panic:
a2fbb9ea 10699#endif
288379f0 10700 napi_complete(napi);
a2fbb9ea 10701
0626b899 10702 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10703 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10704 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10705 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10706 }
356e2385 10707
8534f32c 10708poll_again:
a2fbb9ea
ET
10709 return work_done;
10710}
10711
755735eb
EG
10712
10713/* we split the first BD into headers and data BDs
33471629 10714 * to ease the pain of our fellow microcode engineers
755735eb
EG
10715 * we use one mapping for both BDs
10716 * So far this has only been observed to happen
10717 * in Other Operating Systems(TM)
10718 */
10719static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10720 struct bnx2x_fastpath *fp,
ca00392c
EG
10721 struct sw_tx_bd *tx_buf,
10722 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10723 u16 bd_prod, int nbd)
10724{
ca00392c 10725 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10726 struct eth_tx_bd *d_tx_bd;
10727 dma_addr_t mapping;
10728 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10729
10730 /* first fix first BD */
10731 h_tx_bd->nbd = cpu_to_le16(nbd);
10732 h_tx_bd->nbytes = cpu_to_le16(hlen);
10733
10734 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10735 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10736 h_tx_bd->addr_lo, h_tx_bd->nbd);
10737
10738 /* now get a new data BD
10739 * (after the pbd) and fill it */
10740 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10741 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10742
10743 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10744 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10745
10746 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10747 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10748 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10749
10750 /* this marks the BD as one that has no individual mapping */
10751 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10752
755735eb
EG
10753 DP(NETIF_MSG_TX_QUEUED,
10754 "TSO split data size is %d (%x:%x)\n",
10755 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10756
ca00392c
EG
10757 /* update tx_bd */
10758 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10759
10760 return bd_prod;
10761}
10762
10763static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10764{
10765 if (fix > 0)
10766 csum = (u16) ~csum_fold(csum_sub(csum,
10767 csum_partial(t_header - fix, fix, 0)));
10768
10769 else if (fix < 0)
10770 csum = (u16) ~csum_fold(csum_add(csum,
10771 csum_partial(t_header, -fix, 0)));
10772
10773 return swab16(csum);
10774}
10775
10776static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10777{
10778 u32 rc;
10779
10780 if (skb->ip_summed != CHECKSUM_PARTIAL)
10781 rc = XMIT_PLAIN;
10782
10783 else {
4781bfad 10784 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10785 rc = XMIT_CSUM_V6;
10786 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10787 rc |= XMIT_CSUM_TCP;
10788
10789 } else {
10790 rc = XMIT_CSUM_V4;
10791 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10792 rc |= XMIT_CSUM_TCP;
10793 }
10794 }
10795
10796 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10797 rc |= XMIT_GSO_V4;
10798
10799 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10800 rc |= XMIT_GSO_V6;
10801
10802 return rc;
10803}
10804
632da4d6 10805#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10806/* check if packet requires linearization (packet is too fragmented)
10807 no need to check fragmentation if page size > 8K (there will be no
10808 violation to FW restrictions) */
755735eb
EG
10809static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10810 u32 xmit_type)
10811{
10812 int to_copy = 0;
10813 int hlen = 0;
10814 int first_bd_sz = 0;
10815
10816 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10817 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10818
10819 if (xmit_type & XMIT_GSO) {
10820 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10821 /* Check if LSO packet needs to be copied:
10822 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10823 int wnd_size = MAX_FETCH_BD - 3;
33471629 10824 /* Number of windows to check */
755735eb
EG
10825 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10826 int wnd_idx = 0;
10827 int frag_idx = 0;
10828 u32 wnd_sum = 0;
10829
10830 /* Headers length */
10831 hlen = (int)(skb_transport_header(skb) - skb->data) +
10832 tcp_hdrlen(skb);
10833
10834 /* Amount of data (w/o headers) on linear part of SKB*/
10835 first_bd_sz = skb_headlen(skb) - hlen;
10836
10837 wnd_sum = first_bd_sz;
10838
10839 /* Calculate the first sum - it's special */
10840 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10841 wnd_sum +=
10842 skb_shinfo(skb)->frags[frag_idx].size;
10843
10844 /* If there was data on linear skb data - check it */
10845 if (first_bd_sz > 0) {
10846 if (unlikely(wnd_sum < lso_mss)) {
10847 to_copy = 1;
10848 goto exit_lbl;
10849 }
10850
10851 wnd_sum -= first_bd_sz;
10852 }
10853
10854 /* Others are easier: run through the frag list and
10855 check all windows */
10856 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10857 wnd_sum +=
10858 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10859
10860 if (unlikely(wnd_sum < lso_mss)) {
10861 to_copy = 1;
10862 break;
10863 }
10864 wnd_sum -=
10865 skb_shinfo(skb)->frags[wnd_idx].size;
10866 }
755735eb
EG
10867 } else {
10868 /* in non-LSO too fragmented packet should always
10869 be linearized */
10870 to_copy = 1;
10871 }
10872 }
10873
10874exit_lbl:
10875 if (unlikely(to_copy))
10876 DP(NETIF_MSG_TX_QUEUED,
10877 "Linearization IS REQUIRED for %s packet. "
10878 "num_frags %d hlen %d first_bd_sz %d\n",
10879 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10880 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10881
10882 return to_copy;
10883}
632da4d6 10884#endif
755735eb
EG
10885
10886/* called with netif_tx_lock
a2fbb9ea 10887 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10888 * netif_wake_queue()
a2fbb9ea
ET
10889 */
10890static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10891{
10892 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10893 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10894 struct netdev_queue *txq;
a2fbb9ea 10895 struct sw_tx_bd *tx_buf;
ca00392c
EG
10896 struct eth_tx_start_bd *tx_start_bd;
10897 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10898 struct eth_tx_parse_bd *pbd = NULL;
10899 u16 pkt_prod, bd_prod;
755735eb 10900 int nbd, fp_index;
a2fbb9ea 10901 dma_addr_t mapping;
755735eb 10902 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10903 int i;
10904 u8 hlen = 0;
ca00392c 10905 __le16 pkt_size = 0;
a2fbb9ea
ET
10906
10907#ifdef BNX2X_STOP_ON_ERROR
10908 if (unlikely(bp->panic))
10909 return NETDEV_TX_BUSY;
10910#endif
10911
555f6c78
EG
10912 fp_index = skb_get_queue_mapping(skb);
10913 txq = netdev_get_tx_queue(dev, fp_index);
10914
ca00392c
EG
10915 fp = &bp->fp[fp_index + bp->num_rx_queues];
10916 fp_stat = &bp->fp[fp_index];
755735eb 10917
231fd58a 10918 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10919 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10920 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10921 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10922 return NETDEV_TX_BUSY;
10923 }
10924
755735eb
EG
10925 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10926 " gso type %x xmit_type %x\n",
10927 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10928 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10929
632da4d6 10930#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10931 /* First, check if we need to linearize the skb (due to FW
10932 restrictions). No need to check fragmentation if page size > 8K
10933 (there will be no violation to FW restrictions) */
755735eb
EG
10934 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10935 /* Statistics of linearization */
10936 bp->lin_cnt++;
10937 if (skb_linearize(skb) != 0) {
10938 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10939 "silently dropping this SKB\n");
10940 dev_kfree_skb_any(skb);
da5a662a 10941 return NETDEV_TX_OK;
755735eb
EG
10942 }
10943 }
632da4d6 10944#endif
755735eb 10945
a2fbb9ea 10946 /*
755735eb 10947 Please read carefully. First we use one BD which we mark as start,
ca00392c 10948 then we have a parsing info BD (used for TSO or xsum),
755735eb 10949 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10950 (don't forget to mark the last one as last,
10951 and to unmap only AFTER you write to the BD ...)
755735eb 10952 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10953 */
10954
10955 pkt_prod = fp->tx_pkt_prod++;
755735eb 10956 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10957
755735eb 10958 /* get a tx_buf and first BD */
a2fbb9ea 10959 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 10960 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 10961
ca00392c
EG
10962 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10963 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10964 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 10965 /* header nbd */
ca00392c 10966 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10967
755735eb
EG
10968 /* remember the first BD of the packet */
10969 tx_buf->first_bd = fp->tx_bd_prod;
10970 tx_buf->skb = skb;
ca00392c 10971 tx_buf->flags = 0;
a2fbb9ea
ET
10972
10973 DP(NETIF_MSG_TX_QUEUED,
10974 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 10975 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 10976
0c6671b0
EG
10977#ifdef BCM_VLAN
10978 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10979 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
10980 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10981 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 10982 } else
0c6671b0 10983#endif
ca00392c 10984 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10985
ca00392c
EG
10986 /* turn on parsing and get a BD */
10987 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10988 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 10989
ca00392c 10990 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
10991
10992 if (xmit_type & XMIT_CSUM) {
ca00392c 10993 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
10994
10995 /* for now NS flag is not used in Linux */
4781bfad
EG
10996 pbd->global_data =
10997 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10998 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10999
755735eb
EG
11000 pbd->ip_hlen = (skb_transport_header(skb) -
11001 skb_network_header(skb)) / 2;
11002
11003 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11004
755735eb 11005 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11006 hlen = hlen*2;
a2fbb9ea 11007
ca00392c 11008 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11009
11010 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11011 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11012 ETH_TX_BD_FLAGS_IP_CSUM;
11013 else
ca00392c
EG
11014 tx_start_bd->bd_flags.as_bitfield |=
11015 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11016
11017 if (xmit_type & XMIT_CSUM_TCP) {
11018 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11019
11020 } else {
11021 s8 fix = SKB_CS_OFF(skb); /* signed! */
11022
ca00392c 11023 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11024
755735eb 11025 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11026 "hlen %d fix %d csum before fix %x\n",
11027 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11028
11029 /* HW bug: fixup the CSUM */
11030 pbd->tcp_pseudo_csum =
11031 bnx2x_csum_fix(skb_transport_header(skb),
11032 SKB_CS(skb), fix);
11033
11034 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11035 pbd->tcp_pseudo_csum);
11036 }
a2fbb9ea
ET
11037 }
11038
11039 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11040 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11041
ca00392c
EG
11042 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11043 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11044 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11045 tx_start_bd->nbd = cpu_to_le16(nbd);
11046 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11047 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11048
11049 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11050 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11051 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11052 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11053 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11054
755735eb 11055 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11056
11057 DP(NETIF_MSG_TX_QUEUED,
11058 "TSO packet len %d hlen %d total len %d tso size %d\n",
11059 skb->len, hlen, skb_headlen(skb),
11060 skb_shinfo(skb)->gso_size);
11061
ca00392c 11062 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11063
755735eb 11064 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11065 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11066 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11067
11068 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11069 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11070 pbd->tcp_flags = pbd_tcp_flags(skb);
11071
11072 if (xmit_type & XMIT_GSO_V4) {
11073 pbd->ip_id = swab16(ip_hdr(skb)->id);
11074 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11075 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11076 ip_hdr(skb)->daddr,
11077 0, IPPROTO_TCP, 0));
755735eb
EG
11078
11079 } else
11080 pbd->tcp_pseudo_csum =
11081 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11082 &ipv6_hdr(skb)->daddr,
11083 0, IPPROTO_TCP, 0));
11084
a2fbb9ea
ET
11085 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11086 }
ca00392c 11087 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11088
755735eb
EG
11089 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11090 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11091
755735eb 11092 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11093 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11094 if (total_pkt_bd == NULL)
11095 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11096
755735eb
EG
11097 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11098 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11099
ca00392c
EG
11100 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11101 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11102 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11103 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11104
755735eb 11105 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11106 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11107 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11108 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11109 }
11110
ca00392c 11111 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11112
a2fbb9ea
ET
11113 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11114
755735eb 11115 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11116 * if the packet contains or ends with it
11117 */
11118 if (TX_BD_POFF(bd_prod) < nbd)
11119 nbd++;
11120
ca00392c
EG
11121 if (total_pkt_bd != NULL)
11122 total_pkt_bd->total_pkt_bytes = pkt_size;
11123
a2fbb9ea
ET
11124 if (pbd)
11125 DP(NETIF_MSG_TX_QUEUED,
11126 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11127 " tcp_flags %x xsum %x seq %u hlen %u\n",
11128 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11129 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11130 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11131
755735eb 11132 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11133
58f4c4cf
EG
11134 /*
11135 * Make sure that the BD data is updated before updating the producer
11136 * since FW might read the BD right after the producer is updated.
11137 * This is only applicable for weak-ordered memory model archs such
11138 * as IA-64. The following barrier is also mandatory since FW will
11139 * assumes packets must have BDs.
11140 */
11141 wmb();
11142
ca00392c
EG
11143 fp->tx_db.data.prod += nbd;
11144 barrier();
11145 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11146
11147 mmiowb();
11148
755735eb 11149 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11150
11151 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11152 netif_tx_stop_queue(txq);
58f4c4cf
EG
11153 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11154 if we put Tx into XOFF state. */
11155 smp_mb();
ca00392c 11156 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11157 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11158 netif_tx_wake_queue(txq);
a2fbb9ea 11159 }
ca00392c 11160 fp_stat->tx_pkt++;
a2fbb9ea
ET
11161
11162 return NETDEV_TX_OK;
11163}
11164
bb2a0f7a 11165/* called with rtnl_lock */
a2fbb9ea
ET
11166static int bnx2x_open(struct net_device *dev)
11167{
11168 struct bnx2x *bp = netdev_priv(dev);
11169
6eccabb3
EG
11170 netif_carrier_off(dev);
11171
a2fbb9ea
ET
11172 bnx2x_set_power_state(bp, PCI_D0);
11173
bb2a0f7a 11174 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11175}
11176
bb2a0f7a 11177/* called with rtnl_lock */
a2fbb9ea
ET
11178static int bnx2x_close(struct net_device *dev)
11179{
a2fbb9ea
ET
11180 struct bnx2x *bp = netdev_priv(dev);
11181
11182 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11183 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11184 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11185 if (!CHIP_REV_IS_SLOW(bp))
11186 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11187
11188 return 0;
11189}
11190
f5372251 11191/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11192static void bnx2x_set_rx_mode(struct net_device *dev)
11193{
11194 struct bnx2x *bp = netdev_priv(dev);
11195 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11196 int port = BP_PORT(bp);
11197
11198 if (bp->state != BNX2X_STATE_OPEN) {
11199 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11200 return;
11201 }
11202
11203 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11204
11205 if (dev->flags & IFF_PROMISC)
11206 rx_mode = BNX2X_RX_MODE_PROMISC;
11207
11208 else if ((dev->flags & IFF_ALLMULTI) ||
11209 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11210 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11211
11212 else { /* some multicasts */
11213 if (CHIP_IS_E1(bp)) {
11214 int i, old, offset;
11215 struct dev_mc_list *mclist;
11216 struct mac_configuration_cmd *config =
11217 bnx2x_sp(bp, mcast_config);
11218
11219 for (i = 0, mclist = dev->mc_list;
11220 mclist && (i < dev->mc_count);
11221 i++, mclist = mclist->next) {
11222
11223 config->config_table[i].
11224 cam_entry.msb_mac_addr =
11225 swab16(*(u16 *)&mclist->dmi_addr[0]);
11226 config->config_table[i].
11227 cam_entry.middle_mac_addr =
11228 swab16(*(u16 *)&mclist->dmi_addr[2]);
11229 config->config_table[i].
11230 cam_entry.lsb_mac_addr =
11231 swab16(*(u16 *)&mclist->dmi_addr[4]);
11232 config->config_table[i].cam_entry.flags =
11233 cpu_to_le16(port);
11234 config->config_table[i].
11235 target_table_entry.flags = 0;
ca00392c
EG
11236 config->config_table[i].target_table_entry.
11237 clients_bit_vector =
11238 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11239 config->config_table[i].
11240 target_table_entry.vlan_id = 0;
11241
11242 DP(NETIF_MSG_IFUP,
11243 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11244 config->config_table[i].
11245 cam_entry.msb_mac_addr,
11246 config->config_table[i].
11247 cam_entry.middle_mac_addr,
11248 config->config_table[i].
11249 cam_entry.lsb_mac_addr);
11250 }
8d9c5f34 11251 old = config->hdr.length;
34f80b04
EG
11252 if (old > i) {
11253 for (; i < old; i++) {
11254 if (CAM_IS_INVALID(config->
11255 config_table[i])) {
af246401 11256 /* already invalidated */
34f80b04
EG
11257 break;
11258 }
11259 /* invalidate */
11260 CAM_INVALIDATE(config->
11261 config_table[i]);
11262 }
11263 }
11264
11265 if (CHIP_REV_IS_SLOW(bp))
11266 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11267 else
11268 offset = BNX2X_MAX_MULTICAST*(1 + port);
11269
8d9c5f34 11270 config->hdr.length = i;
34f80b04 11271 config->hdr.offset = offset;
8d9c5f34 11272 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11273 config->hdr.reserved1 = 0;
11274
11275 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11276 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11277 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11278 0);
11279 } else { /* E1H */
11280 /* Accept one or more multicasts */
11281 struct dev_mc_list *mclist;
11282 u32 mc_filter[MC_HASH_SIZE];
11283 u32 crc, bit, regidx;
11284 int i;
11285
11286 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11287
11288 for (i = 0, mclist = dev->mc_list;
11289 mclist && (i < dev->mc_count);
11290 i++, mclist = mclist->next) {
11291
7c510e4b
JB
11292 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11293 mclist->dmi_addr);
34f80b04
EG
11294
11295 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11296 bit = (crc >> 24) & 0xff;
11297 regidx = bit >> 5;
11298 bit &= 0x1f;
11299 mc_filter[regidx] |= (1 << bit);
11300 }
11301
11302 for (i = 0; i < MC_HASH_SIZE; i++)
11303 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11304 mc_filter[i]);
11305 }
11306 }
11307
11308 bp->rx_mode = rx_mode;
11309 bnx2x_set_storm_rx_mode(bp);
11310}
11311
11312/* called with rtnl_lock */
a2fbb9ea
ET
11313static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11314{
11315 struct sockaddr *addr = p;
11316 struct bnx2x *bp = netdev_priv(dev);
11317
34f80b04 11318 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11319 return -EINVAL;
11320
11321 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11322 if (netif_running(dev)) {
11323 if (CHIP_IS_E1(bp))
3101c2bc 11324 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11325 else
3101c2bc 11326 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11327 }
a2fbb9ea
ET
11328
11329 return 0;
11330}
11331
c18487ee 11332/* called with rtnl_lock */
01cd4528
EG
11333static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11334 int devad, u16 addr)
a2fbb9ea 11335{
01cd4528
EG
11336 struct bnx2x *bp = netdev_priv(netdev);
11337 u16 value;
11338 int rc;
11339 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11340
01cd4528
EG
11341 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11342 prtad, devad, addr);
a2fbb9ea 11343
01cd4528
EG
11344 if (prtad != bp->mdio.prtad) {
11345 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11346 prtad, bp->mdio.prtad);
11347 return -EINVAL;
11348 }
11349
11350 /* The HW expects different devad if CL22 is used */
11351 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11352
01cd4528
EG
11353 bnx2x_acquire_phy_lock(bp);
11354 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11355 devad, addr, &value);
11356 bnx2x_release_phy_lock(bp);
11357 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11358
01cd4528
EG
11359 if (!rc)
11360 rc = value;
11361 return rc;
11362}
a2fbb9ea 11363
01cd4528
EG
11364/* called with rtnl_lock */
11365static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11366 u16 addr, u16 value)
11367{
11368 struct bnx2x *bp = netdev_priv(netdev);
11369 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11370 int rc;
11371
11372 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11373 " value 0x%x\n", prtad, devad, addr, value);
11374
11375 if (prtad != bp->mdio.prtad) {
11376 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11377 prtad, bp->mdio.prtad);
11378 return -EINVAL;
a2fbb9ea
ET
11379 }
11380
01cd4528
EG
11381 /* The HW expects different devad if CL22 is used */
11382 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11383
01cd4528
EG
11384 bnx2x_acquire_phy_lock(bp);
11385 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11386 devad, addr, value);
11387 bnx2x_release_phy_lock(bp);
11388 return rc;
11389}
c18487ee 11390
01cd4528
EG
11391/* called with rtnl_lock */
11392static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11393{
11394 struct bnx2x *bp = netdev_priv(dev);
11395 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11396
01cd4528
EG
11397 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11398 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11399
01cd4528
EG
11400 if (!netif_running(dev))
11401 return -EAGAIN;
11402
11403 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11404}
11405
34f80b04 11406/* called with rtnl_lock */
a2fbb9ea
ET
11407static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11408{
11409 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11410 int rc = 0;
a2fbb9ea
ET
11411
11412 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11413 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11414 return -EINVAL;
11415
11416 /* This does not race with packet allocation
c14423fe 11417 * because the actual alloc size is
a2fbb9ea
ET
11418 * only updated as part of load
11419 */
11420 dev->mtu = new_mtu;
11421
11422 if (netif_running(dev)) {
34f80b04
EG
11423 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11424 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11425 }
34f80b04
EG
11426
11427 return rc;
a2fbb9ea
ET
11428}
11429
11430static void bnx2x_tx_timeout(struct net_device *dev)
11431{
11432 struct bnx2x *bp = netdev_priv(dev);
11433
11434#ifdef BNX2X_STOP_ON_ERROR
11435 if (!bp->panic)
11436 bnx2x_panic();
11437#endif
11438 /* This allows the netif to be shutdown gracefully before resetting */
11439 schedule_work(&bp->reset_task);
11440}
11441
11442#ifdef BCM_VLAN
34f80b04 11443/* called with rtnl_lock */
a2fbb9ea
ET
11444static void bnx2x_vlan_rx_register(struct net_device *dev,
11445 struct vlan_group *vlgrp)
11446{
11447 struct bnx2x *bp = netdev_priv(dev);
11448
11449 bp->vlgrp = vlgrp;
0c6671b0
EG
11450
11451 /* Set flags according to the required capabilities */
11452 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11453
11454 if (dev->features & NETIF_F_HW_VLAN_TX)
11455 bp->flags |= HW_VLAN_TX_FLAG;
11456
11457 if (dev->features & NETIF_F_HW_VLAN_RX)
11458 bp->flags |= HW_VLAN_RX_FLAG;
11459
a2fbb9ea 11460 if (netif_running(dev))
49d66772 11461 bnx2x_set_client_config(bp);
a2fbb9ea 11462}
34f80b04 11463
a2fbb9ea
ET
11464#endif
11465
11466#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11467static void poll_bnx2x(struct net_device *dev)
11468{
11469 struct bnx2x *bp = netdev_priv(dev);
11470
11471 disable_irq(bp->pdev->irq);
11472 bnx2x_interrupt(bp->pdev->irq, dev);
11473 enable_irq(bp->pdev->irq);
11474}
11475#endif
11476
c64213cd
SH
11477static const struct net_device_ops bnx2x_netdev_ops = {
11478 .ndo_open = bnx2x_open,
11479 .ndo_stop = bnx2x_close,
11480 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11481 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11482 .ndo_set_mac_address = bnx2x_change_mac_addr,
11483 .ndo_validate_addr = eth_validate_addr,
11484 .ndo_do_ioctl = bnx2x_ioctl,
11485 .ndo_change_mtu = bnx2x_change_mtu,
11486 .ndo_tx_timeout = bnx2x_tx_timeout,
11487#ifdef BCM_VLAN
11488 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11489#endif
11490#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11491 .ndo_poll_controller = poll_bnx2x,
11492#endif
11493};
11494
34f80b04
EG
11495static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11496 struct net_device *dev)
a2fbb9ea
ET
11497{
11498 struct bnx2x *bp;
11499 int rc;
11500
11501 SET_NETDEV_DEV(dev, &pdev->dev);
11502 bp = netdev_priv(dev);
11503
34f80b04
EG
11504 bp->dev = dev;
11505 bp->pdev = pdev;
a2fbb9ea 11506 bp->flags = 0;
34f80b04 11507 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11508
11509 rc = pci_enable_device(pdev);
11510 if (rc) {
11511 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11512 goto err_out;
11513 }
11514
11515 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11516 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11517 " aborting\n");
11518 rc = -ENODEV;
11519 goto err_out_disable;
11520 }
11521
11522 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11523 printk(KERN_ERR PFX "Cannot find second PCI device"
11524 " base address, aborting\n");
11525 rc = -ENODEV;
11526 goto err_out_disable;
11527 }
11528
34f80b04
EG
11529 if (atomic_read(&pdev->enable_cnt) == 1) {
11530 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11531 if (rc) {
11532 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11533 " aborting\n");
11534 goto err_out_disable;
11535 }
a2fbb9ea 11536
34f80b04
EG
11537 pci_set_master(pdev);
11538 pci_save_state(pdev);
11539 }
a2fbb9ea
ET
11540
11541 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11542 if (bp->pm_cap == 0) {
11543 printk(KERN_ERR PFX "Cannot find power management"
11544 " capability, aborting\n");
11545 rc = -EIO;
11546 goto err_out_release;
11547 }
11548
11549 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11550 if (bp->pcie_cap == 0) {
11551 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11552 " aborting\n");
11553 rc = -EIO;
11554 goto err_out_release;
11555 }
11556
6a35528a 11557 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11558 bp->flags |= USING_DAC_FLAG;
6a35528a 11559 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11560 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11561 " failed, aborting\n");
11562 rc = -EIO;
11563 goto err_out_release;
11564 }
11565
284901a9 11566 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11567 printk(KERN_ERR PFX "System does not support DMA,"
11568 " aborting\n");
11569 rc = -EIO;
11570 goto err_out_release;
11571 }
11572
34f80b04
EG
11573 dev->mem_start = pci_resource_start(pdev, 0);
11574 dev->base_addr = dev->mem_start;
11575 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11576
11577 dev->irq = pdev->irq;
11578
275f165f 11579 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11580 if (!bp->regview) {
11581 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11582 rc = -ENOMEM;
11583 goto err_out_release;
11584 }
11585
34f80b04
EG
11586 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11587 min_t(u64, BNX2X_DB_SIZE,
11588 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11589 if (!bp->doorbells) {
11590 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11591 rc = -ENOMEM;
11592 goto err_out_unmap;
11593 }
11594
11595 bnx2x_set_power_state(bp, PCI_D0);
11596
34f80b04
EG
11597 /* clean indirect addresses */
11598 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11599 PCICFG_VENDOR_ID_OFFSET);
11600 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11601 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11602 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11603 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11604
34f80b04 11605 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11606
c64213cd 11607 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11608 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11609 dev->features |= NETIF_F_SG;
11610 dev->features |= NETIF_F_HW_CSUM;
11611 if (bp->flags & USING_DAC_FLAG)
11612 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11613 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11614 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11615#ifdef BCM_VLAN
11616 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11617 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11618
11619 dev->vlan_features |= NETIF_F_SG;
11620 dev->vlan_features |= NETIF_F_HW_CSUM;
11621 if (bp->flags & USING_DAC_FLAG)
11622 dev->vlan_features |= NETIF_F_HIGHDMA;
11623 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11624 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11625#endif
a2fbb9ea 11626
01cd4528
EG
11627 /* get_port_hwinfo() will set prtad and mmds properly */
11628 bp->mdio.prtad = MDIO_PRTAD_NONE;
11629 bp->mdio.mmds = 0;
11630 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11631 bp->mdio.dev = dev;
11632 bp->mdio.mdio_read = bnx2x_mdio_read;
11633 bp->mdio.mdio_write = bnx2x_mdio_write;
11634
a2fbb9ea
ET
11635 return 0;
11636
11637err_out_unmap:
11638 if (bp->regview) {
11639 iounmap(bp->regview);
11640 bp->regview = NULL;
11641 }
a2fbb9ea
ET
11642 if (bp->doorbells) {
11643 iounmap(bp->doorbells);
11644 bp->doorbells = NULL;
11645 }
11646
11647err_out_release:
34f80b04
EG
11648 if (atomic_read(&pdev->enable_cnt) == 1)
11649 pci_release_regions(pdev);
a2fbb9ea
ET
11650
11651err_out_disable:
11652 pci_disable_device(pdev);
11653 pci_set_drvdata(pdev, NULL);
11654
11655err_out:
11656 return rc;
11657}
11658
25047950
ET
11659static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11660{
11661 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11662
11663 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11664 return val;
11665}
11666
11667/* return value of 1=2.5GHz 2=5GHz */
11668static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11669{
11670 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11671
11672 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11673 return val;
11674}
94a78b79
VZ
11675static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11676{
11677 struct bnx2x_fw_file_hdr *fw_hdr;
11678 struct bnx2x_fw_file_section *sections;
11679 u16 *ops_offsets;
11680 u32 offset, len, num_ops;
11681 int i;
11682 const struct firmware *firmware = bp->firmware;
11683 const u8 * fw_ver;
11684
11685 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11686 return -EINVAL;
11687
11688 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11689 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11690
11691 /* Make sure none of the offsets and sizes make us read beyond
11692 * the end of the firmware data */
11693 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11694 offset = be32_to_cpu(sections[i].offset);
11695 len = be32_to_cpu(sections[i].len);
11696 if (offset + len > firmware->size) {
11697 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11698 return -EINVAL;
11699 }
11700 }
11701
11702 /* Likewise for the init_ops offsets */
11703 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11704 ops_offsets = (u16 *)(firmware->data + offset);
11705 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11706
11707 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11708 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11709 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11710 return -EINVAL;
11711 }
11712 }
11713
11714 /* Check FW version */
11715 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11716 fw_ver = firmware->data + offset;
11717 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11718 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11719 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11720 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11721 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11722 " Should be %d.%d.%d.%d\n",
11723 fw_ver[0], fw_ver[1], fw_ver[2],
11724 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11725 BCM_5710_FW_MINOR_VERSION,
11726 BCM_5710_FW_REVISION_VERSION,
11727 BCM_5710_FW_ENGINEERING_VERSION);
11728 return -EINVAL;
11729 }
11730
11731 return 0;
11732}
11733
11734static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11735{
11736 u32 i;
11737 const __be32 *source = (const __be32*)_source;
11738 u32 *target = (u32*)_target;
11739
11740 for (i = 0; i < n/4; i++)
11741 target[i] = be32_to_cpu(source[i]);
11742}
11743
11744/*
11745 Ops array is stored in the following format:
11746 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11747 */
11748static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11749{
11750 u32 i, j, tmp;
11751 const __be32 *source = (const __be32*)_source;
11752 struct raw_op *target = (struct raw_op*)_target;
11753
11754 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11755 tmp = be32_to_cpu(source[j]);
11756 target[i].op = (tmp >> 24) & 0xff;
11757 target[i].offset = tmp & 0xffffff;
11758 target[i].raw_data = be32_to_cpu(source[j+1]);
11759 }
11760}
11761static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11762{
11763 u32 i;
11764 u16 *target = (u16*)_target;
11765 const __be16 *source = (const __be16*)_source;
11766
11767 for (i = 0; i < n/2; i++)
11768 target[i] = be16_to_cpu(source[i]);
11769}
11770
11771#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11772 do { \
11773 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11774 bp->arr = kmalloc(len, GFP_KERNEL); \
11775 if (!bp->arr) { \
11776 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11777 goto lbl; \
11778 } \
11779 func(bp->firmware->data + \
11780 be32_to_cpu(fw_hdr->arr.offset), \
11781 (u8*)bp->arr, len); \
11782 } while (0)
11783
11784
11785static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11786{
11787 char fw_file_name[40] = {0};
11788 int rc, offset;
11789 struct bnx2x_fw_file_hdr *fw_hdr;
11790
11791 /* Create a FW file name */
11792 if (CHIP_IS_E1(bp))
11793 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11794 else
11795 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11796
11797 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11798 BCM_5710_FW_MAJOR_VERSION,
11799 BCM_5710_FW_MINOR_VERSION,
11800 BCM_5710_FW_REVISION_VERSION,
11801 BCM_5710_FW_ENGINEERING_VERSION);
11802
11803 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11804
11805 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11806 if (rc) {
11807 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11808 goto request_firmware_exit;
11809 }
11810
11811 rc = bnx2x_check_firmware(bp);
11812 if (rc) {
11813 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11814 goto request_firmware_exit;
11815 }
11816
11817 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11818
11819 /* Initialize the pointers to the init arrays */
11820 /* Blob */
11821 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11822
11823 /* Opcodes */
11824 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11825
11826 /* Offsets */
11827 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11828
11829 /* STORMs firmware */
11830 bp->tsem_int_table_data = bp->firmware->data +
11831 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11832 bp->tsem_pram_data = bp->firmware->data +
11833 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11834 bp->usem_int_table_data = bp->firmware->data +
11835 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11836 bp->usem_pram_data = bp->firmware->data +
11837 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11838 bp->xsem_int_table_data = bp->firmware->data +
11839 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11840 bp->xsem_pram_data = bp->firmware->data +
11841 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11842 bp->csem_int_table_data = bp->firmware->data +
11843 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11844 bp->csem_pram_data = bp->firmware->data +
11845 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11846
11847 return 0;
11848init_offsets_alloc_err:
11849 kfree(bp->init_ops);
11850init_ops_alloc_err:
11851 kfree(bp->init_data);
11852request_firmware_exit:
11853 release_firmware(bp->firmware);
11854
11855 return rc;
11856}
11857
11858
25047950 11859
a2fbb9ea
ET
11860static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11861 const struct pci_device_id *ent)
11862{
11863 static int version_printed;
11864 struct net_device *dev = NULL;
11865 struct bnx2x *bp;
25047950 11866 int rc;
a2fbb9ea
ET
11867
11868 if (version_printed++ == 0)
11869 printk(KERN_INFO "%s", version);
11870
11871 /* dev zeroed in init_etherdev */
555f6c78 11872 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11873 if (!dev) {
11874 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11875 return -ENOMEM;
34f80b04 11876 }
a2fbb9ea 11877
a2fbb9ea
ET
11878 bp = netdev_priv(dev);
11879 bp->msglevel = debug;
11880
34f80b04 11881 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11882 if (rc < 0) {
11883 free_netdev(dev);
11884 return rc;
11885 }
11886
a2fbb9ea
ET
11887 pci_set_drvdata(pdev, dev);
11888
34f80b04 11889 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11890 if (rc)
11891 goto init_one_exit;
11892
94a78b79
VZ
11893 /* Set init arrays */
11894 rc = bnx2x_init_firmware(bp, &pdev->dev);
11895 if (rc) {
11896 printk(KERN_ERR PFX "Error loading firmware\n");
11897 goto init_one_exit;
11898 }
11899
693fc0d1 11900 rc = register_netdev(dev);
34f80b04 11901 if (rc) {
693fc0d1 11902 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11903 goto init_one_exit;
11904 }
11905
25047950 11906 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11907 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11908 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11909 bnx2x_get_pcie_width(bp),
11910 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11911 dev->base_addr, bp->pdev->irq);
e174961c 11912 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11913
a2fbb9ea 11914 return 0;
34f80b04
EG
11915
11916init_one_exit:
11917 if (bp->regview)
11918 iounmap(bp->regview);
11919
11920 if (bp->doorbells)
11921 iounmap(bp->doorbells);
11922
11923 free_netdev(dev);
11924
11925 if (atomic_read(&pdev->enable_cnt) == 1)
11926 pci_release_regions(pdev);
11927
11928 pci_disable_device(pdev);
11929 pci_set_drvdata(pdev, NULL);
11930
11931 return rc;
a2fbb9ea
ET
11932}
11933
11934static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11935{
11936 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11937 struct bnx2x *bp;
11938
11939 if (!dev) {
228241eb
ET
11940 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11941 return;
11942 }
228241eb 11943 bp = netdev_priv(dev);
a2fbb9ea 11944
a2fbb9ea
ET
11945 unregister_netdev(dev);
11946
94a78b79
VZ
11947 kfree(bp->init_ops_offsets);
11948 kfree(bp->init_ops);
11949 kfree(bp->init_data);
11950 release_firmware(bp->firmware);
11951
a2fbb9ea
ET
11952 if (bp->regview)
11953 iounmap(bp->regview);
11954
11955 if (bp->doorbells)
11956 iounmap(bp->doorbells);
11957
11958 free_netdev(dev);
34f80b04
EG
11959
11960 if (atomic_read(&pdev->enable_cnt) == 1)
11961 pci_release_regions(pdev);
11962
a2fbb9ea
ET
11963 pci_disable_device(pdev);
11964 pci_set_drvdata(pdev, NULL);
11965}
11966
11967static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11968{
11969 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11970 struct bnx2x *bp;
11971
34f80b04
EG
11972 if (!dev) {
11973 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11974 return -ENODEV;
11975 }
11976 bp = netdev_priv(dev);
a2fbb9ea 11977
34f80b04 11978 rtnl_lock();
a2fbb9ea 11979
34f80b04 11980 pci_save_state(pdev);
228241eb 11981
34f80b04
EG
11982 if (!netif_running(dev)) {
11983 rtnl_unlock();
11984 return 0;
11985 }
a2fbb9ea
ET
11986
11987 netif_device_detach(dev);
a2fbb9ea 11988
da5a662a 11989 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11990
a2fbb9ea 11991 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11992
34f80b04
EG
11993 rtnl_unlock();
11994
a2fbb9ea
ET
11995 return 0;
11996}
11997
11998static int bnx2x_resume(struct pci_dev *pdev)
11999{
12000 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12001 struct bnx2x *bp;
a2fbb9ea
ET
12002 int rc;
12003
228241eb
ET
12004 if (!dev) {
12005 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12006 return -ENODEV;
12007 }
228241eb 12008 bp = netdev_priv(dev);
a2fbb9ea 12009
34f80b04
EG
12010 rtnl_lock();
12011
228241eb 12012 pci_restore_state(pdev);
34f80b04
EG
12013
12014 if (!netif_running(dev)) {
12015 rtnl_unlock();
12016 return 0;
12017 }
12018
a2fbb9ea
ET
12019 bnx2x_set_power_state(bp, PCI_D0);
12020 netif_device_attach(dev);
12021
da5a662a 12022 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12023
34f80b04
EG
12024 rtnl_unlock();
12025
12026 return rc;
a2fbb9ea
ET
12027}
12028
f8ef6e44
YG
12029static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12030{
12031 int i;
12032
12033 bp->state = BNX2X_STATE_ERROR;
12034
12035 bp->rx_mode = BNX2X_RX_MODE_NONE;
12036
12037 bnx2x_netif_stop(bp, 0);
12038
12039 del_timer_sync(&bp->timer);
12040 bp->stats_state = STATS_STATE_DISABLED;
12041 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12042
12043 /* Release IRQs */
12044 bnx2x_free_irq(bp);
12045
12046 if (CHIP_IS_E1(bp)) {
12047 struct mac_configuration_cmd *config =
12048 bnx2x_sp(bp, mcast_config);
12049
8d9c5f34 12050 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12051 CAM_INVALIDATE(config->config_table[i]);
12052 }
12053
12054 /* Free SKBs, SGEs, TPA pool and driver internals */
12055 bnx2x_free_skbs(bp);
555f6c78 12056 for_each_rx_queue(bp, i)
f8ef6e44 12057 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12058 for_each_rx_queue(bp, i)
7cde1c8b 12059 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12060 bnx2x_free_mem(bp);
12061
12062 bp->state = BNX2X_STATE_CLOSED;
12063
12064 netif_carrier_off(bp->dev);
12065
12066 return 0;
12067}
12068
12069static void bnx2x_eeh_recover(struct bnx2x *bp)
12070{
12071 u32 val;
12072
12073 mutex_init(&bp->port.phy_mutex);
12074
12075 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12076 bp->link_params.shmem_base = bp->common.shmem_base;
12077 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12078
12079 if (!bp->common.shmem_base ||
12080 (bp->common.shmem_base < 0xA0000) ||
12081 (bp->common.shmem_base >= 0xC0000)) {
12082 BNX2X_DEV_INFO("MCP not active\n");
12083 bp->flags |= NO_MCP_FLAG;
12084 return;
12085 }
12086
12087 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12088 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12089 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12090 BNX2X_ERR("BAD MCP validity signature\n");
12091
12092 if (!BP_NOMCP(bp)) {
12093 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12094 & DRV_MSG_SEQ_NUMBER_MASK);
12095 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12096 }
12097}
12098
493adb1f
WX
12099/**
12100 * bnx2x_io_error_detected - called when PCI error is detected
12101 * @pdev: Pointer to PCI device
12102 * @state: The current pci connection state
12103 *
12104 * This function is called after a PCI bus error affecting
12105 * this device has been detected.
12106 */
12107static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12108 pci_channel_state_t state)
12109{
12110 struct net_device *dev = pci_get_drvdata(pdev);
12111 struct bnx2x *bp = netdev_priv(dev);
12112
12113 rtnl_lock();
12114
12115 netif_device_detach(dev);
12116
07ce50e4
DN
12117 if (state == pci_channel_io_perm_failure) {
12118 rtnl_unlock();
12119 return PCI_ERS_RESULT_DISCONNECT;
12120 }
12121
493adb1f 12122 if (netif_running(dev))
f8ef6e44 12123 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12124
12125 pci_disable_device(pdev);
12126
12127 rtnl_unlock();
12128
12129 /* Request a slot reset */
12130 return PCI_ERS_RESULT_NEED_RESET;
12131}
12132
12133/**
12134 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12135 * @pdev: Pointer to PCI device
12136 *
12137 * Restart the card from scratch, as if from a cold-boot.
12138 */
12139static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12140{
12141 struct net_device *dev = pci_get_drvdata(pdev);
12142 struct bnx2x *bp = netdev_priv(dev);
12143
12144 rtnl_lock();
12145
12146 if (pci_enable_device(pdev)) {
12147 dev_err(&pdev->dev,
12148 "Cannot re-enable PCI device after reset\n");
12149 rtnl_unlock();
12150 return PCI_ERS_RESULT_DISCONNECT;
12151 }
12152
12153 pci_set_master(pdev);
12154 pci_restore_state(pdev);
12155
12156 if (netif_running(dev))
12157 bnx2x_set_power_state(bp, PCI_D0);
12158
12159 rtnl_unlock();
12160
12161 return PCI_ERS_RESULT_RECOVERED;
12162}
12163
12164/**
12165 * bnx2x_io_resume - called when traffic can start flowing again
12166 * @pdev: Pointer to PCI device
12167 *
12168 * This callback is called when the error recovery driver tells us that
12169 * its OK to resume normal operation.
12170 */
12171static void bnx2x_io_resume(struct pci_dev *pdev)
12172{
12173 struct net_device *dev = pci_get_drvdata(pdev);
12174 struct bnx2x *bp = netdev_priv(dev);
12175
12176 rtnl_lock();
12177
f8ef6e44
YG
12178 bnx2x_eeh_recover(bp);
12179
493adb1f 12180 if (netif_running(dev))
f8ef6e44 12181 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12182
12183 netif_device_attach(dev);
12184
12185 rtnl_unlock();
12186}
12187
12188static struct pci_error_handlers bnx2x_err_handler = {
12189 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12190 .slot_reset = bnx2x_io_slot_reset,
12191 .resume = bnx2x_io_resume,
493adb1f
WX
12192};
12193
a2fbb9ea 12194static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12195 .name = DRV_MODULE_NAME,
12196 .id_table = bnx2x_pci_tbl,
12197 .probe = bnx2x_init_one,
12198 .remove = __devexit_p(bnx2x_remove_one),
12199 .suspend = bnx2x_suspend,
12200 .resume = bnx2x_resume,
12201 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12202};
12203
12204static int __init bnx2x_init(void)
12205{
dd21ca6d
SG
12206 int ret;
12207
1cf167f2
EG
12208 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12209 if (bnx2x_wq == NULL) {
12210 printk(KERN_ERR PFX "Cannot create workqueue\n");
12211 return -ENOMEM;
12212 }
12213
dd21ca6d
SG
12214 ret = pci_register_driver(&bnx2x_pci_driver);
12215 if (ret) {
12216 printk(KERN_ERR PFX "Cannot register driver\n");
12217 destroy_workqueue(bnx2x_wq);
12218 }
12219 return ret;
a2fbb9ea
ET
12220}
12221
12222static void __exit bnx2x_cleanup(void)
12223{
12224 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12225
12226 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12227}
12228
12229module_init(bnx2x_init);
12230module_exit(bnx2x_cleanup);
12231
94a78b79 12232
This page took 1.019085 seconds and 5 git commands to generate.