be2net: disable RSS when number of RXQs is reduced to 1 via set-channels
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
6b7c5b94
SP
26
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
6b7c5b94 41static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
42c8b11e 89static const char * const ue_status_hi_desc[] = {
7c185276
AK
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
42c8b11e 113 "NETC",
7c185276
AK
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
6b7c5b94 123
752961a1
SP
124/* Is BE in a multi-channel mode */
125static inline bool be_is_mc(struct be_adapter *adapter) {
126 return (adapter->function_mode & FLEX10_MODE ||
127 adapter->function_mode & VNIC_MODE ||
128 adapter->function_mode & UMC_ENABLED);
129}
130
6b7c5b94
SP
131static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
132{
133 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 134 if (mem->va) {
2b7bcebf
IV
135 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
136 mem->dma);
1cfafab9
SP
137 mem->va = NULL;
138 }
6b7c5b94
SP
139}
140
141static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
142 u16 len, u16 entry_size)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
145
146 memset(q, 0, sizeof(*q));
147 q->len = len;
148 q->entry_size = entry_size;
149 mem->size = len * entry_size;
ede23fa8
JP
150 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
151 GFP_KERNEL);
6b7c5b94 152 if (!mem->va)
10ef9ab4 153 return -ENOMEM;
6b7c5b94
SP
154 return 0;
155}
156
68c45a2d 157static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 158{
db3ea781 159 u32 reg, enabled;
5f0b849e 160
db3ea781
SP
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
5f0b849e 165 if (!enabled && enable)
6b7c5b94 166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else if (enabled && !enable)
6b7c5b94 168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else
6b7c5b94 170 return;
5f0b849e 171
db3ea781
SP
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
174}
175
68c45a2d
SK
176static void be_intr_set(struct be_adapter *adapter, bool enable)
177{
178 int status = 0;
179
180 /* On lancer interrupts can't be controlled via this register */
181 if (lancer_chip(adapter))
182 return;
183
184 if (adapter->eeh_error)
185 return;
186
187 status = be_cmd_intr_set(adapter, enable);
188 if (status)
189 be_reg_intr_set(adapter, enable);
190}
191
8788fdc2 192static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
193{
194 u32 val = 0;
195 val |= qid & DB_RQ_RING_ID_MASK;
196 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
197
198 wmb();
8788fdc2 199 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
200}
201
94d73aaa
VV
202static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
203 u16 posted)
6b7c5b94
SP
204{
205 u32 val = 0;
94d73aaa 206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
208
209 wmb();
94d73aaa 210 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
211}
212
8788fdc2 213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
214 bool arm, bool clear_int, u16 num_popped)
215{
216 u32 val = 0;
217 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
218 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
219 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 220
f67ef7ba 221 if (adapter->eeh_error)
cf588477
SP
222 return;
223
6b7c5b94
SP
224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
231}
232
8788fdc2 233void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
234{
235 u32 val = 0;
236 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
237 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
238 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 239
f67ef7ba 240 if (adapter->eeh_error)
cf588477
SP
241 return;
242
6b7c5b94
SP
243 if (arm)
244 val |= 1 << DB_CQ_REARM_SHIFT;
245 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 246 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
247}
248
6b7c5b94
SP
249static int be_mac_addr_set(struct net_device *netdev, void *p)
250{
251 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 252 struct device *dev = &adapter->pdev->dev;
6b7c5b94 253 struct sockaddr *addr = p;
5a712c13
SP
254 int status;
255 u8 mac[ETH_ALEN];
256 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 257
ca9e4988
AK
258 if (!is_valid_ether_addr(addr->sa_data))
259 return -EADDRNOTAVAIL;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
5a712c13 283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
a65027e4 284 if (status)
e3a7ae2c 285 goto err;
6b7c5b94 286
5a712c13
SP
287 /* The MAC change did not happen, either due to lack of privilege
288 * or PF didn't pre-provision.
289 */
290 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
291 status = -EPERM;
292 goto err;
293 }
294
e3a7ae2c 295 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 296 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
297 return 0;
298err:
5a712c13 299 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
300 return status;
301}
302
ca34fe38
SP
303/* BE2 supports only v0 cmd */
304static void *hw_stats_from_cmd(struct be_adapter *adapter)
305{
306 if (BE2_chip(adapter)) {
307 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
308
309 return &cmd->hw_stats;
61000861 310 } else if (BE3_chip(adapter)) {
ca34fe38
SP
311 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
312
61000861
AK
313 return &cmd->hw_stats;
314 } else {
315 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
316
ca34fe38
SP
317 return &cmd->hw_stats;
318 }
319}
320
321/* BE2 supports only v0 cmd */
322static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
323{
324 if (BE2_chip(adapter)) {
325 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
326
327 return &hw_stats->erx;
61000861 328 } else if (BE3_chip(adapter)) {
ca34fe38
SP
329 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
330
61000861
AK
331 return &hw_stats->erx;
332 } else {
333 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
334
ca34fe38
SP
335 return &hw_stats->erx;
336 }
337}
338
339static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 340{
ac124ff9
SP
341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
343 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 344 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
345 &rxf_stats->port[adapter->port_num];
346 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 347
ac124ff9 348 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
349 drvs->rx_pause_frames = port_stats->rx_pause_frames;
350 drvs->rx_crc_errors = port_stats->rx_crc_errors;
351 drvs->rx_control_frames = port_stats->rx_control_frames;
352 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
359 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
360 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
361 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
362 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 363 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
364 drvs->rx_dropped_header_too_small =
365 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
366 drvs->rx_address_filtered =
367 port_stats->rx_address_filtered +
368 port_stats->rx_vlan_filtered;
89a88ab8
AK
369 drvs->rx_alignment_symbol_errors =
370 port_stats->rx_alignment_symbol_errors;
371
372 drvs->tx_pauseframes = port_stats->tx_pauseframes;
373 drvs->tx_controlframes = port_stats->tx_controlframes;
374
375 if (adapter->port_num)
ac124ff9 376 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 377 else
ac124ff9 378 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 379 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 380 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
381 drvs->forwarded_packets = rxf_stats->forwarded_packets;
382 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
383 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
384 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
385 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
386}
387
ca34fe38 388static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 389{
ac124ff9
SP
390 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
391 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
392 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 393 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
394 &rxf_stats->port[adapter->port_num];
395 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 396
ac124ff9 397 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
398 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
399 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
400 drvs->rx_pause_frames = port_stats->rx_pause_frames;
401 drvs->rx_crc_errors = port_stats->rx_crc_errors;
402 drvs->rx_control_frames = port_stats->rx_control_frames;
403 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
404 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
405 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
406 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
407 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
408 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
409 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
410 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
411 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
412 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
413 drvs->rx_dropped_header_too_small =
414 port_stats->rx_dropped_header_too_small;
415 drvs->rx_input_fifo_overflow_drop =
416 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 417 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
418 drvs->rx_alignment_symbol_errors =
419 port_stats->rx_alignment_symbol_errors;
ac124ff9 420 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
421 drvs->tx_pauseframes = port_stats->tx_pauseframes;
422 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 423 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
424 drvs->jabber_events = port_stats->jabber_events;
425 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 426 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
427 drvs->forwarded_packets = rxf_stats->forwarded_packets;
428 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
429 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
430 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
431 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
432}
433
61000861
AK
434static void populate_be_v2_stats(struct be_adapter *adapter)
435{
436 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
437 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
438 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
439 struct be_port_rxf_stats_v2 *port_stats =
440 &rxf_stats->port[adapter->port_num];
441 struct be_drv_stats *drvs = &adapter->drv_stats;
442
443 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
444 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
445 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
446 drvs->rx_pause_frames = port_stats->rx_pause_frames;
447 drvs->rx_crc_errors = port_stats->rx_crc_errors;
448 drvs->rx_control_frames = port_stats->rx_control_frames;
449 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
450 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
451 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
452 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
453 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
454 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
455 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
456 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
457 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
458 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
459 drvs->rx_dropped_header_too_small =
460 port_stats->rx_dropped_header_too_small;
461 drvs->rx_input_fifo_overflow_drop =
462 port_stats->rx_input_fifo_overflow_drop;
463 drvs->rx_address_filtered = port_stats->rx_address_filtered;
464 drvs->rx_alignment_symbol_errors =
465 port_stats->rx_alignment_symbol_errors;
466 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
467 drvs->tx_pauseframes = port_stats->tx_pauseframes;
468 drvs->tx_controlframes = port_stats->tx_controlframes;
469 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
470 drvs->jabber_events = port_stats->jabber_events;
471 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
472 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
473 drvs->forwarded_packets = rxf_stats->forwarded_packets;
474 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
475 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
476 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
477 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
461ae379
AK
478 if (be_roce_supported(adapter)) {
479 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
480 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
481 drvs->rx_roce_frames = port_stats->roce_frames_received;
482 drvs->roce_drops_crc = port_stats->roce_drops_crc;
483 drvs->roce_drops_payload_len =
484 port_stats->roce_drops_payload_len;
485 }
61000861
AK
486}
487
005d5696
SX
488static void populate_lancer_stats(struct be_adapter *adapter)
489{
89a88ab8 490
005d5696 491 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
492 struct lancer_pport_stats *pport_stats =
493 pport_stats_from_cmd(adapter);
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
540 struct be_rx_obj *rxo,
541 u32 erx_stat)
542{
543 if (!BEx_chip(adapter))
544 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
545 else
546 /* below erx HW counter can actually wrap around after
547 * 65535. Driver accumulates a 32-bit value
548 */
549 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
550 (u16)erx_stat);
551}
552
89a88ab8
AK
553void be_parse_stats(struct be_adapter *adapter)
554{
61000861 555 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
556 struct be_rx_obj *rxo;
557 int i;
a6c578ef 558 u32 erx_stat;
ac124ff9 559
ca34fe38
SP
560 if (lancer_chip(adapter)) {
561 populate_lancer_stats(adapter);
005d5696 562 } else {
ca34fe38
SP
563 if (BE2_chip(adapter))
564 populate_be_v0_stats(adapter);
61000861
AK
565 else if (BE3_chip(adapter))
566 /* for BE3 */
ca34fe38 567 populate_be_v1_stats(adapter);
61000861
AK
568 else
569 populate_be_v2_stats(adapter);
d51ebd33 570
61000861 571 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 572 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
573 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
574 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 575 }
09c1c68f 576 }
89a88ab8
AK
577}
578
ab1594e9
SP
579static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
580 struct rtnl_link_stats64 *stats)
6b7c5b94 581{
ab1594e9 582 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 583 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 584 struct be_rx_obj *rxo;
3c8def97 585 struct be_tx_obj *txo;
ab1594e9
SP
586 u64 pkts, bytes;
587 unsigned int start;
3abcdeda 588 int i;
6b7c5b94 589
3abcdeda 590 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
591 const struct be_rx_stats *rx_stats = rx_stats(rxo);
592 do {
593 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
594 pkts = rx_stats(rxo)->rx_pkts;
595 bytes = rx_stats(rxo)->rx_bytes;
596 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
597 stats->rx_packets += pkts;
598 stats->rx_bytes += bytes;
599 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
600 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
601 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
602 }
603
3c8def97 604 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
605 const struct be_tx_stats *tx_stats = tx_stats(txo);
606 do {
607 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
608 pkts = tx_stats(txo)->tx_pkts;
609 bytes = tx_stats(txo)->tx_bytes;
610 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
611 stats->tx_packets += pkts;
612 stats->tx_bytes += bytes;
3c8def97 613 }
6b7c5b94
SP
614
615 /* bad pkts received */
ab1594e9 616 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
617 drvs->rx_alignment_symbol_errors +
618 drvs->rx_in_range_errors +
619 drvs->rx_out_range_errors +
620 drvs->rx_frame_too_long +
621 drvs->rx_dropped_too_small +
622 drvs->rx_dropped_too_short +
623 drvs->rx_dropped_header_too_small +
624 drvs->rx_dropped_tcp_length +
ab1594e9 625 drvs->rx_dropped_runt;
68110868 626
6b7c5b94 627 /* detailed rx errors */
ab1594e9 628 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
629 drvs->rx_out_range_errors +
630 drvs->rx_frame_too_long;
68110868 631
ab1594e9 632 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
633
634 /* frame alignment errors */
ab1594e9 635 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 636
6b7c5b94
SP
637 /* receiver fifo overrun */
638 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 639 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
640 drvs->rx_input_fifo_overflow_drop +
641 drvs->rx_drops_no_pbuf;
ab1594e9 642 return stats;
6b7c5b94
SP
643}
644
b236916a 645void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 646{
6b7c5b94
SP
647 struct net_device *netdev = adapter->netdev;
648
b236916a 649 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 650 netif_carrier_off(netdev);
b236916a 651 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 652 }
b236916a
AK
653
654 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
655 netif_carrier_on(netdev);
656 else
657 netif_carrier_off(netdev);
6b7c5b94
SP
658}
659
3c8def97 660static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 661 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d
AK
704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705 struct sk_buff *skb)
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
cc4ce020 720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 721 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 722{
1ded132d 723 u16 vlan_tag;
cc4ce020 724
6b7c5b94
SP
725 memset(hdr, 0, sizeof(*hdr));
726
727 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
728
49e4b847 729 if (skb_is_gso(skb)) {
6b7c5b94
SP
730 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
732 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 734 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
736 if (is_tcp_pkt(skb))
737 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
738 else if (is_udp_pkt(skb))
739 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
740 }
741
4c5102f9 742 if (vlan_tx_tag_present(skb)) {
6b7c5b94 743 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 744 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
746 }
747
bc0c3405
AK
748 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
749 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
753}
754
2b7bcebf 755static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
756 bool unmap_single)
757{
758 dma_addr_t dma;
759
760 be_dws_le_to_cpu(wrb, sizeof(*wrb));
761
762 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 763 if (wrb->frag_len) {
7101e111 764 if (unmap_single)
2b7bcebf
IV
765 dma_unmap_single(dev, dma, wrb->frag_len,
766 DMA_TO_DEVICE);
7101e111 767 else
2b7bcebf 768 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
769 }
770}
6b7c5b94 771
3c8def97 772static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
773 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
774 bool skip_hw_vlan)
6b7c5b94 775{
7101e111
SP
776 dma_addr_t busaddr;
777 int i, copied = 0;
2b7bcebf 778 struct device *dev = &adapter->pdev->dev;
6b7c5b94 779 struct sk_buff *first_skb = skb;
6b7c5b94
SP
780 struct be_eth_wrb *wrb;
781 struct be_eth_hdr_wrb *hdr;
7101e111
SP
782 bool map_single = false;
783 u16 map_head;
6b7c5b94 784
6b7c5b94
SP
785 hdr = queue_head_node(txq);
786 queue_head_inc(txq);
7101e111 787 map_head = txq->head;
6b7c5b94 788
ebc8d2ab 789 if (skb->len > skb->data_len) {
e743d313 790 int len = skb_headlen(skb);
2b7bcebf
IV
791 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
792 if (dma_mapping_error(dev, busaddr))
7101e111
SP
793 goto dma_err;
794 map_single = true;
ebc8d2ab
DM
795 wrb = queue_head_node(txq);
796 wrb_fill(wrb, busaddr, len);
797 be_dws_cpu_to_le(wrb, sizeof(*wrb));
798 queue_head_inc(txq);
799 copied += len;
800 }
6b7c5b94 801
ebc8d2ab 802 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 803 const struct skb_frag_struct *frag =
ebc8d2ab 804 &skb_shinfo(skb)->frags[i];
b061b39e 805 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 806 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 807 if (dma_mapping_error(dev, busaddr))
7101e111 808 goto dma_err;
ebc8d2ab 809 wrb = queue_head_node(txq);
9e903e08 810 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
811 be_dws_cpu_to_le(wrb, sizeof(*wrb));
812 queue_head_inc(txq);
9e903e08 813 copied += skb_frag_size(frag);
6b7c5b94
SP
814 }
815
816 if (dummy_wrb) {
817 wrb = queue_head_node(txq);
818 wrb_fill(wrb, 0, 0);
819 be_dws_cpu_to_le(wrb, sizeof(*wrb));
820 queue_head_inc(txq);
821 }
822
bc0c3405 823 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
824 be_dws_cpu_to_le(hdr, sizeof(*hdr));
825
826 return copied;
7101e111
SP
827dma_err:
828 txq->head = map_head;
829 while (copied) {
830 wrb = queue_head_node(txq);
2b7bcebf 831 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
832 map_single = false;
833 copied -= wrb->frag_len;
834 queue_head_inc(txq);
835 }
836 return 0;
6b7c5b94
SP
837}
838
93040ae5 839static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
840 struct sk_buff *skb,
841 bool *skip_hw_vlan)
93040ae5
SK
842{
843 u16 vlan_tag = 0;
844
845 skb = skb_share_check(skb, GFP_ATOMIC);
846 if (unlikely(!skb))
847 return skb;
848
efee8e87 849 if (vlan_tx_tag_present(skb))
93040ae5 850 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
851
852 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
853 if (!vlan_tag)
854 vlan_tag = adapter->pvid;
855 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
856 * skip VLAN insertion
857 */
858 if (skip_hw_vlan)
859 *skip_hw_vlan = true;
860 }
bc0c3405
AK
861
862 if (vlan_tag) {
58717686 863 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
864 if (unlikely(!skb))
865 return skb;
bc0c3405
AK
866 skb->vlan_tci = 0;
867 }
868
869 /* Insert the outer VLAN, if any */
870 if (adapter->qnq_vid) {
871 vlan_tag = adapter->qnq_vid;
58717686 872 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
873 if (unlikely(!skb))
874 return skb;
875 if (skip_hw_vlan)
876 *skip_hw_vlan = true;
877 }
878
93040ae5
SK
879 return skb;
880}
881
bc0c3405
AK
882static bool be_ipv6_exthdr_check(struct sk_buff *skb)
883{
884 struct ethhdr *eh = (struct ethhdr *)skb->data;
885 u16 offset = ETH_HLEN;
886
887 if (eh->h_proto == htons(ETH_P_IPV6)) {
888 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
889
890 offset += sizeof(struct ipv6hdr);
891 if (ip6h->nexthdr != NEXTHDR_TCP &&
892 ip6h->nexthdr != NEXTHDR_UDP) {
893 struct ipv6_opt_hdr *ehdr =
894 (struct ipv6_opt_hdr *) (skb->data + offset);
895
896 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
897 if (ehdr->hdrlen == 0xff)
898 return true;
899 }
900 }
901 return false;
902}
903
904static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
905{
906 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
907}
908
ee9c799c
SP
909static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
910 struct sk_buff *skb)
bc0c3405 911{
ee9c799c 912 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
913}
914
ee9c799c
SP
915static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
916 struct sk_buff *skb,
917 bool *skip_hw_vlan)
6b7c5b94 918{
d2cb6ce7 919 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
920 unsigned int eth_hdr_len;
921 struct iphdr *ip;
93040ae5 922
b54881f9 923 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
48265667 924 * may cause a transmit stall on that port. So the work-around is to
b54881f9 925 * pad short packets (<= 32 bytes) to a 36-byte length.
48265667 926 */
b54881f9 927 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
48265667
SK
928 if (skb_padto(skb, 36))
929 goto tx_drop;
930 skb->len = 36;
931 }
932
1297f9db
AK
933 /* For padded packets, BE HW modifies tot_len field in IP header
934 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 935 * For padded packets, Lancer computes incorrect checksum.
1ded132d 936 */
ee9c799c
SP
937 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
938 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
939 if (skb->len <= 60 &&
940 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 941 is_ipv4_pkt(skb)) {
93040ae5
SK
942 ip = (struct iphdr *)ip_hdr(skb);
943 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
944 }
1ded132d 945
d2cb6ce7
AK
946 /* If vlan tag is already inlined in the packet, skip HW VLAN
947 * tagging in UMC mode
948 */
949 if ((adapter->function_mode & UMC_ENABLED) &&
950 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 951 *skip_hw_vlan = true;
d2cb6ce7 952
93040ae5
SK
953 /* HW has a bug wherein it will calculate CSUM for VLAN
954 * pkts even though it is disabled.
955 * Manually insert VLAN in pkt.
956 */
957 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
958 vlan_tx_tag_present(skb)) {
959 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
960 if (unlikely(!skb))
961 goto tx_drop;
962 }
963
964 /* HW may lockup when VLAN HW tagging is requested on
965 * certain ipv6 packets. Drop such pkts if the HW workaround to
966 * skip HW tagging is not enabled by FW.
967 */
968 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
969 (adapter->pvid || adapter->qnq_vid) &&
970 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
971 goto tx_drop;
972
973 /* Manual VLAN tag insertion to prevent:
974 * ASIC lockup when the ASIC inserts VLAN tag into
975 * certain ipv6 packets. Insert VLAN tags in driver,
976 * and set event, completion, vlan bits accordingly
977 * in the Tx WRB.
978 */
979 if (be_ipv6_tx_stall_chk(adapter, skb) &&
980 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 981 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
982 if (unlikely(!skb))
983 goto tx_drop;
1ded132d
AK
984 }
985
ee9c799c
SP
986 return skb;
987tx_drop:
988 dev_kfree_skb_any(skb);
989 return NULL;
990}
991
992static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
995 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
996 struct be_queue_info *txq = &txo->q;
997 bool dummy_wrb, stopped = false;
998 u32 wrb_cnt = 0, copied = 0;
999 bool skip_hw_vlan = false;
1000 u32 start = txq->head;
1001
1002 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1003 if (!skb) {
1004 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1005 return NETDEV_TX_OK;
bc617526 1006 }
ee9c799c 1007
fe6d2a38 1008 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1009
bc0c3405
AK
1010 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1011 skip_hw_vlan);
c190e3c8 1012 if (copied) {
cd8f76c0
ED
1013 int gso_segs = skb_shinfo(skb)->gso_segs;
1014
c190e3c8 1015 /* record the sent skb in the sent_skb table */
3c8def97
SP
1016 BUG_ON(txo->sent_skb_list[start]);
1017 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1018
1019 /* Ensure txq has space for the next skb; Else stop the queue
1020 * *BEFORE* ringing the tx doorbell, so that we serialze the
1021 * tx compls of the current transmit which'll wake up the queue
1022 */
7101e111 1023 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1024 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1025 txq->len) {
3c8def97 1026 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1027 stopped = true;
1028 }
6b7c5b94 1029
94d73aaa 1030 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1031
cd8f76c0 1032 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1033 } else {
1034 txq->head = start;
bc617526 1035 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1036 dev_kfree_skb_any(skb);
6b7c5b94 1037 }
6b7c5b94
SP
1038 return NETDEV_TX_OK;
1039}
1040
1041static int be_change_mtu(struct net_device *netdev, int new_mtu)
1042{
1043 struct be_adapter *adapter = netdev_priv(netdev);
1044 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
1045 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1046 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
1047 dev_info(&adapter->pdev->dev,
1048 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
1049 BE_MIN_MTU,
1050 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1051 return -EINVAL;
1052 }
1053 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1054 netdev->mtu, new_mtu);
1055 netdev->mtu = new_mtu;
1056 return 0;
1057}
1058
1059/*
82903e4b
AK
1060 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1061 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1062 */
10329df8 1063static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1064{
10329df8
SP
1065 u16 vids[BE_NUM_VLANS_SUPPORTED];
1066 u16 num = 0, i;
82903e4b 1067 int status = 0;
1da87b7f 1068
c0e64ef4
SP
1069 /* No need to further configure vids if in promiscuous mode */
1070 if (adapter->promiscuous)
1071 return 0;
1072
92bf14ab 1073 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1074 goto set_vlan_promisc;
1075
1076 /* Construct VLAN Table to give to HW */
1077 for (i = 0; i < VLAN_N_VID; i++)
1078 if (adapter->vlan_tag[i])
10329df8 1079 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1080
1081 status = be_cmd_vlan_config(adapter, adapter->if_handle,
012bd387 1082 vids, num, 0);
0fc16ebf 1083
0fc16ebf 1084 if (status) {
d9d604f8
AK
1085 /* Set to VLAN promisc mode as setting VLAN filter failed */
1086 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1087 goto set_vlan_promisc;
1088 dev_err(&adapter->pdev->dev,
1089 "Setting HW VLAN filtering failed.\n");
1090 } else {
1091 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1092 /* hw VLAN filtering re-enabled. */
1093 status = be_cmd_rx_filter(adapter,
1094 BE_FLAGS_VLAN_PROMISC, OFF);
1095 if (!status) {
1096 dev_info(&adapter->pdev->dev,
1097 "Disabling VLAN Promiscuous mode.\n");
1098 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1099 dev_info(&adapter->pdev->dev,
1100 "Re-Enabling HW VLAN filtering\n");
1101 }
1102 }
6b7c5b94 1103 }
1da87b7f 1104
b31c50a7 1105 return status;
0fc16ebf
PR
1106
1107set_vlan_promisc:
d9d604f8
AK
1108 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1109
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1113 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1114 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115 } else
1116 dev_err(&adapter->pdev->dev,
1117 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1118 return status;
6b7c5b94
SP
1119}
1120
80d5c368 1121static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1122{
1123 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1124 int status = 0;
6b7c5b94 1125
ba343c77 1126
a85e9986
PR
1127 /* Packets with VID 0 are always received by Lancer by default */
1128 if (lancer_chip(adapter) && vid == 0)
1129 goto ret;
1130
6b7c5b94 1131 adapter->vlan_tag[vid] = 1;
92bf14ab 1132 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
10329df8 1133 status = be_vid_config(adapter);
8e586137 1134
80817cbf
AK
1135 if (!status)
1136 adapter->vlans_added++;
1137 else
1138 adapter->vlan_tag[vid] = 0;
1139ret:
1140 return status;
6b7c5b94
SP
1141}
1142
80d5c368 1143static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1144{
1145 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1146 int status = 0;
6b7c5b94 1147
a85e9986
PR
1148 /* Packets with VID 0 are always received by Lancer by default */
1149 if (lancer_chip(adapter) && vid == 0)
1150 goto ret;
1151
6b7c5b94 1152 adapter->vlan_tag[vid] = 0;
92bf14ab 1153 if (adapter->vlans_added <= be_max_vlans(adapter))
10329df8 1154 status = be_vid_config(adapter);
8e586137 1155
80817cbf
AK
1156 if (!status)
1157 adapter->vlans_added--;
1158 else
1159 adapter->vlan_tag[vid] = 1;
1160ret:
1161 return status;
6b7c5b94
SP
1162}
1163
a54769f5 1164static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1165{
1166 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1167 int status;
6b7c5b94 1168
24307eef 1169 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1170 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1171 adapter->promiscuous = true;
1172 goto done;
6b7c5b94
SP
1173 }
1174
25985edc 1175 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1176 if (adapter->promiscuous) {
1177 adapter->promiscuous = false;
5b8821b7 1178 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1179
1180 if (adapter->vlans_added)
10329df8 1181 be_vid_config(adapter);
6b7c5b94
SP
1182 }
1183
e7b909a6 1184 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1185 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1186 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1187 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1188 goto done;
6b7c5b94 1189 }
6b7c5b94 1190
fbc13f01
AK
1191 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1192 struct netdev_hw_addr *ha;
1193 int i = 1; /* First slot is claimed by the Primary MAC */
1194
1195 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1196 be_cmd_pmac_del(adapter, adapter->if_handle,
1197 adapter->pmac_id[i], 0);
1198 }
1199
92bf14ab 1200 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1201 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1202 adapter->promiscuous = true;
1203 goto done;
1204 }
1205
1206 netdev_for_each_uc_addr(ha, adapter->netdev) {
1207 adapter->uc_macs++; /* First slot is for Primary MAC */
1208 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1209 adapter->if_handle,
1210 &adapter->pmac_id[adapter->uc_macs], 0);
1211 }
1212 }
1213
0fc16ebf
PR
1214 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1215
1216 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1217 if (status) {
1218 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1219 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1220 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1221 }
24307eef
SP
1222done:
1223 return;
6b7c5b94
SP
1224}
1225
ba343c77
SB
1226static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1227{
1228 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1229 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1230 int status;
1231
11ac75ed 1232 if (!sriov_enabled(adapter))
ba343c77
SB
1233 return -EPERM;
1234
11ac75ed 1235 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1236 return -EINVAL;
1237
3175d8c2
SP
1238 if (BEx_chip(adapter)) {
1239 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1240 vf + 1);
ba343c77 1241
11ac75ed
SP
1242 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1243 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1244 } else {
1245 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1246 vf + 1);
590c391d
PR
1247 }
1248
64600ea5 1249 if (status)
ba343c77
SB
1250 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1251 mac, vf);
64600ea5 1252 else
11ac75ed 1253 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1254
ba343c77
SB
1255 return status;
1256}
1257
64600ea5
AK
1258static int be_get_vf_config(struct net_device *netdev, int vf,
1259 struct ifla_vf_info *vi)
1260{
1261 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1262 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1263
11ac75ed 1264 if (!sriov_enabled(adapter))
64600ea5
AK
1265 return -EPERM;
1266
11ac75ed 1267 if (vf >= adapter->num_vfs)
64600ea5
AK
1268 return -EINVAL;
1269
1270 vi->vf = vf;
11ac75ed 1271 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1272 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1273 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1274 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1275
1276 return 0;
1277}
1278
1da87b7f
AK
1279static int be_set_vf_vlan(struct net_device *netdev,
1280 int vf, u16 vlan, u8 qos)
1281{
1282 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1283 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1284 int status = 0;
1285
11ac75ed 1286 if (!sriov_enabled(adapter))
1da87b7f
AK
1287 return -EPERM;
1288
b9fc0e53 1289 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1290 return -EINVAL;
1291
b9fc0e53
AK
1292 if (vlan || qos) {
1293 vlan |= qos << VLAN_PRIO_SHIFT;
1294 if (vf_cfg->vlan_tag != vlan) {
f1f3ee1b 1295 /* If this is new value, program it. Else skip. */
b9fc0e53
AK
1296 vf_cfg->vlan_tag = vlan;
1297 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1298 vf_cfg->if_handle, 0);
f1f3ee1b 1299 }
1da87b7f 1300 } else {
f1f3ee1b 1301 /* Reset Transparent Vlan Tagging. */
b9fc0e53
AK
1302 vf_cfg->vlan_tag = 0;
1303 vlan = vf_cfg->def_vid;
f1f3ee1b 1304 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
b9fc0e53 1305 vf_cfg->if_handle, 0);
1da87b7f
AK
1306 }
1307
1da87b7f
AK
1308
1309 if (status)
1310 dev_info(&adapter->pdev->dev,
1311 "VLAN %d config on VF %d failed\n", vlan, vf);
1312 return status;
1313}
1314
e1d18735
AK
1315static int be_set_vf_tx_rate(struct net_device *netdev,
1316 int vf, int rate)
1317{
1318 struct be_adapter *adapter = netdev_priv(netdev);
1319 int status = 0;
1320
11ac75ed 1321 if (!sriov_enabled(adapter))
e1d18735
AK
1322 return -EPERM;
1323
94f434c2 1324 if (vf >= adapter->num_vfs)
e1d18735
AK
1325 return -EINVAL;
1326
94f434c2
AK
1327 if (rate < 100 || rate > 10000) {
1328 dev_err(&adapter->pdev->dev,
1329 "tx rate must be between 100 and 10000 Mbps\n");
1330 return -EINVAL;
1331 }
e1d18735 1332
d5c18473
PR
1333 if (lancer_chip(adapter))
1334 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1335 else
1336 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1337
1338 if (status)
94f434c2 1339 dev_err(&adapter->pdev->dev,
e1d18735 1340 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1341 else
1342 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1343 return status;
1344}
1345
2632bafd
SP
1346static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1347 ulong now)
6b7c5b94 1348{
2632bafd
SP
1349 aic->rx_pkts_prev = rx_pkts;
1350 aic->tx_reqs_prev = tx_pkts;
1351 aic->jiffies = now;
1352}
ac124ff9 1353
2632bafd
SP
1354static void be_eqd_update(struct be_adapter *adapter)
1355{
1356 struct be_set_eqd set_eqd[MAX_EVT_QS];
1357 int eqd, i, num = 0, start;
1358 struct be_aic_obj *aic;
1359 struct be_eq_obj *eqo;
1360 struct be_rx_obj *rxo;
1361 struct be_tx_obj *txo;
1362 u64 rx_pkts, tx_pkts;
1363 ulong now;
1364 u32 pps, delta;
10ef9ab4 1365
2632bafd
SP
1366 for_all_evt_queues(adapter, eqo, i) {
1367 aic = &adapter->aic_obj[eqo->idx];
1368 if (!aic->enable) {
1369 if (aic->jiffies)
1370 aic->jiffies = 0;
1371 eqd = aic->et_eqd;
1372 goto modify_eqd;
1373 }
6b7c5b94 1374
2632bafd
SP
1375 rxo = &adapter->rx_obj[eqo->idx];
1376 do {
1377 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1378 rx_pkts = rxo->stats.rx_pkts;
1379 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
10ef9ab4 1380
2632bafd
SP
1381 txo = &adapter->tx_obj[eqo->idx];
1382 do {
1383 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1384 tx_pkts = txo->stats.tx_reqs;
1385 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
6b7c5b94 1386
6b7c5b94 1387
2632bafd
SP
1388 /* Skip, if wrapped around or first calculation */
1389 now = jiffies;
1390 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1391 rx_pkts < aic->rx_pkts_prev ||
1392 tx_pkts < aic->tx_reqs_prev) {
1393 be_aic_update(aic, rx_pkts, tx_pkts, now);
1394 continue;
1395 }
1396
1397 delta = jiffies_to_msecs(now - aic->jiffies);
1398 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1399 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1400 eqd = (pps / 15000) << 2;
10ef9ab4 1401
2632bafd
SP
1402 if (eqd < 8)
1403 eqd = 0;
1404 eqd = min_t(u32, eqd, aic->max_eqd);
1405 eqd = max_t(u32, eqd, aic->min_eqd);
1406
1407 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1408modify_eqd:
2632bafd
SP
1409 if (eqd != aic->prev_eqd) {
1410 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1411 set_eqd[num].eq_id = eqo->q.id;
1412 aic->prev_eqd = eqd;
1413 num++;
1414 }
ac124ff9 1415 }
2632bafd
SP
1416
1417 if (num)
1418 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1419}
1420
3abcdeda 1421static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1422 struct be_rx_compl_info *rxcp)
4097f663 1423{
ac124ff9 1424 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1425
ab1594e9 1426 u64_stats_update_begin(&stats->sync);
3abcdeda 1427 stats->rx_compl++;
2e588f84 1428 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1429 stats->rx_pkts++;
2e588f84 1430 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1431 stats->rx_mcast_pkts++;
2e588f84 1432 if (rxcp->err)
ac124ff9 1433 stats->rx_compl_err++;
ab1594e9 1434 u64_stats_update_end(&stats->sync);
4097f663
SP
1435}
1436
2e588f84 1437static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1438{
19fad86f
PR
1439 /* L4 checksum is not reliable for non TCP/UDP packets.
1440 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1441 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1442 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1443}
1444
10ef9ab4
SP
1445static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1446 u16 frag_idx)
6b7c5b94 1447{
10ef9ab4 1448 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1449 struct be_rx_page_info *rx_page_info;
3abcdeda 1450 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1451
3abcdeda 1452 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1453 BUG_ON(!rx_page_info->page);
1454
205859a2 1455 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1456 dma_unmap_page(&adapter->pdev->dev,
1457 dma_unmap_addr(rx_page_info, bus),
1458 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1459 rx_page_info->last_page_user = false;
1460 }
6b7c5b94
SP
1461
1462 atomic_dec(&rxq->used);
1463 return rx_page_info;
1464}
1465
1466/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1467static void be_rx_compl_discard(struct be_rx_obj *rxo,
1468 struct be_rx_compl_info *rxcp)
6b7c5b94 1469{
3abcdeda 1470 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1471 struct be_rx_page_info *page_info;
2e588f84 1472 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1473
e80d9da6 1474 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1475 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1476 put_page(page_info->page);
1477 memset(page_info, 0, sizeof(*page_info));
2e588f84 1478 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1479 }
1480}
1481
1482/*
1483 * skb_fill_rx_data forms a complete skb for an ether frame
1484 * indicated by rxcp.
1485 */
10ef9ab4
SP
1486static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487 struct be_rx_compl_info *rxcp)
6b7c5b94 1488{
3abcdeda 1489 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1490 struct be_rx_page_info *page_info;
2e588f84
SP
1491 u16 i, j;
1492 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1493 u8 *start;
6b7c5b94 1494
10ef9ab4 1495 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1496 start = page_address(page_info->page) + page_info->page_offset;
1497 prefetch(start);
1498
1499 /* Copy data in the first descriptor of this completion */
2e588f84 1500 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1501
6b7c5b94
SP
1502 skb->len = curr_frag_len;
1503 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1504 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1505 /* Complete packet has now been moved to data */
1506 put_page(page_info->page);
1507 skb->data_len = 0;
1508 skb->tail += curr_frag_len;
1509 } else {
ac1ae5f3
ED
1510 hdr_len = ETH_HLEN;
1511 memcpy(skb->data, start, hdr_len);
6b7c5b94 1512 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1513 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1514 skb_shinfo(skb)->frags[0].page_offset =
1515 page_info->page_offset + hdr_len;
9e903e08 1516 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1517 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1518 skb->truesize += rx_frag_size;
6b7c5b94
SP
1519 skb->tail += hdr_len;
1520 }
205859a2 1521 page_info->page = NULL;
6b7c5b94 1522
2e588f84
SP
1523 if (rxcp->pkt_size <= rx_frag_size) {
1524 BUG_ON(rxcp->num_rcvd != 1);
1525 return;
6b7c5b94
SP
1526 }
1527
1528 /* More frags present for this completion */
2e588f84
SP
1529 index_inc(&rxcp->rxq_idx, rxq->len);
1530 remaining = rxcp->pkt_size - curr_frag_len;
1531 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1532 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1533 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1534
bd46cb6c
AK
1535 /* Coalesce all frags from the same physical page in one slot */
1536 if (page_info->page_offset == 0) {
1537 /* Fresh page */
1538 j++;
b061b39e 1539 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1540 skb_shinfo(skb)->frags[j].page_offset =
1541 page_info->page_offset;
9e903e08 1542 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1543 skb_shinfo(skb)->nr_frags++;
1544 } else {
1545 put_page(page_info->page);
1546 }
1547
9e903e08 1548 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1549 skb->len += curr_frag_len;
1550 skb->data_len += curr_frag_len;
bdb28a97 1551 skb->truesize += rx_frag_size;
2e588f84
SP
1552 remaining -= curr_frag_len;
1553 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1554 page_info->page = NULL;
6b7c5b94 1555 }
bd46cb6c 1556 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1557}
1558
5be93b9a 1559/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1560static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1561 struct be_rx_compl_info *rxcp)
6b7c5b94 1562{
10ef9ab4 1563 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1564 struct net_device *netdev = adapter->netdev;
6b7c5b94 1565 struct sk_buff *skb;
89420424 1566
bb349bb4 1567 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1568 if (unlikely(!skb)) {
ac124ff9 1569 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1570 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1571 return;
1572 }
1573
10ef9ab4 1574 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1575
6332c8d3 1576 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1577 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1578 else
1579 skb_checksum_none_assert(skb);
6b7c5b94 1580
6332c8d3 1581 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1582 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1583 if (netdev->features & NETIF_F_RXHASH)
4b972914 1584 skb->rxhash = rxcp->rss_hash;
6384a4d0 1585 skb_mark_napi_id(skb, napi);
6b7c5b94 1586
343e43c0 1587 if (rxcp->vlanf)
86a9bad3 1588 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1589
1590 netif_receive_skb(skb);
6b7c5b94
SP
1591}
1592
5be93b9a 1593/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1594static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1595 struct napi_struct *napi,
1596 struct be_rx_compl_info *rxcp)
6b7c5b94 1597{
10ef9ab4 1598 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1599 struct be_rx_page_info *page_info;
5be93b9a 1600 struct sk_buff *skb = NULL;
3abcdeda 1601 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1602 u16 remaining, curr_frag_len;
1603 u16 i, j;
3968fa1e 1604
10ef9ab4 1605 skb = napi_get_frags(napi);
5be93b9a 1606 if (!skb) {
10ef9ab4 1607 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1608 return;
1609 }
1610
2e588f84
SP
1611 remaining = rxcp->pkt_size;
1612 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1613 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1614
1615 curr_frag_len = min(remaining, rx_frag_size);
1616
bd46cb6c
AK
1617 /* Coalesce all frags from the same physical page in one slot */
1618 if (i == 0 || page_info->page_offset == 0) {
1619 /* First frag or Fresh page */
1620 j++;
b061b39e 1621 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1622 skb_shinfo(skb)->frags[j].page_offset =
1623 page_info->page_offset;
9e903e08 1624 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1625 } else {
1626 put_page(page_info->page);
1627 }
9e903e08 1628 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1629 skb->truesize += rx_frag_size;
bd46cb6c 1630 remaining -= curr_frag_len;
2e588f84 1631 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1632 memset(page_info, 0, sizeof(*page_info));
1633 }
bd46cb6c 1634 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1635
5be93b9a 1636 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1637 skb->len = rxcp->pkt_size;
1638 skb->data_len = rxcp->pkt_size;
5be93b9a 1639 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1640 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1641 if (adapter->netdev->features & NETIF_F_RXHASH)
1642 skb->rxhash = rxcp->rss_hash;
6384a4d0 1643 skb_mark_napi_id(skb, napi);
5be93b9a 1644
343e43c0 1645 if (rxcp->vlanf)
86a9bad3 1646 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1647
10ef9ab4 1648 napi_gro_frags(napi);
2e588f84
SP
1649}
1650
10ef9ab4
SP
1651static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1652 struct be_rx_compl_info *rxcp)
2e588f84
SP
1653{
1654 rxcp->pkt_size =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1656 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1657 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1658 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1659 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1660 rxcp->ip_csum =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1662 rxcp->l4_csum =
1663 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1664 rxcp->ipv6 =
1665 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1666 rxcp->rxq_idx =
1667 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1668 rxcp->num_rcvd =
1669 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1670 rxcp->pkt_type =
1671 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1672 rxcp->rss_hash =
c297977e 1673 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1674 if (rxcp->vlanf) {
1675 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1676 compl);
1677 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1678 compl);
15d72184 1679 }
12004ae9 1680 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1681}
1682
10ef9ab4
SP
1683static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1684 struct be_rx_compl_info *rxcp)
2e588f84
SP
1685{
1686 rxcp->pkt_size =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1688 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1689 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1690 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1691 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1692 rxcp->ip_csum =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1694 rxcp->l4_csum =
1695 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1696 rxcp->ipv6 =
1697 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1698 rxcp->rxq_idx =
1699 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1700 rxcp->num_rcvd =
1701 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1702 rxcp->pkt_type =
1703 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1704 rxcp->rss_hash =
c297977e 1705 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1706 if (rxcp->vlanf) {
1707 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1708 compl);
1709 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1710 compl);
15d72184 1711 }
12004ae9 1712 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1713 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1714 ip_frag, compl);
2e588f84
SP
1715}
1716
1717static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1718{
1719 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1720 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1721 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1722
2e588f84
SP
1723 /* For checking the valid bit it is Ok to use either definition as the
1724 * valid bit is at the same position in both v0 and v1 Rx compl */
1725 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1726 return NULL;
6b7c5b94 1727
2e588f84
SP
1728 rmb();
1729 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1730
2e588f84 1731 if (adapter->be3_native)
10ef9ab4 1732 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1733 else
10ef9ab4 1734 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1735
e38b1706
SK
1736 if (rxcp->ip_frag)
1737 rxcp->l4_csum = 0;
1738
15d72184
SP
1739 if (rxcp->vlanf) {
1740 /* vlanf could be wrongly set in some cards.
1741 * ignore if vtm is not set */
752961a1 1742 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1743 rxcp->vlanf = 0;
6b7c5b94 1744
15d72184 1745 if (!lancer_chip(adapter))
3c709f8f 1746 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1747
939cf306 1748 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1749 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1750 rxcp->vlanf = 0;
1751 }
2e588f84
SP
1752
1753 /* As the compl has been parsed, reset it; we wont touch it again */
1754 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1755
3abcdeda 1756 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1757 return rxcp;
1758}
1759
1829b086 1760static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1761{
6b7c5b94 1762 u32 order = get_order(size);
1829b086 1763
6b7c5b94 1764 if (order > 0)
1829b086
ED
1765 gfp |= __GFP_COMP;
1766 return alloc_pages(gfp, order);
6b7c5b94
SP
1767}
1768
1769/*
1770 * Allocate a page, split it to fragments of size rx_frag_size and post as
1771 * receive buffers to BE
1772 */
1829b086 1773static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1774{
3abcdeda 1775 struct be_adapter *adapter = rxo->adapter;
26d92f92 1776 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1777 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1778 struct page *pagep = NULL;
1779 struct be_eth_rx_d *rxd;
1780 u64 page_dmaaddr = 0, frag_dmaaddr;
1781 u32 posted, page_offset = 0;
1782
3abcdeda 1783 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1784 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1785 if (!pagep) {
1829b086 1786 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1787 if (unlikely(!pagep)) {
ac124ff9 1788 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1789 break;
1790 }
2b7bcebf
IV
1791 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1792 0, adapter->big_page_size,
1793 DMA_FROM_DEVICE);
6b7c5b94
SP
1794 page_info->page_offset = 0;
1795 } else {
1796 get_page(pagep);
1797 page_info->page_offset = page_offset + rx_frag_size;
1798 }
1799 page_offset = page_info->page_offset;
1800 page_info->page = pagep;
fac6da5b 1801 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1802 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1803
1804 rxd = queue_head_node(rxq);
1805 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1806 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1807
1808 /* Any space left in the current big page for another frag? */
1809 if ((page_offset + rx_frag_size + rx_frag_size) >
1810 adapter->big_page_size) {
1811 pagep = NULL;
1812 page_info->last_page_user = true;
1813 }
26d92f92
SP
1814
1815 prev_page_info = page_info;
1816 queue_head_inc(rxq);
10ef9ab4 1817 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1818 }
1819 if (pagep)
26d92f92 1820 prev_page_info->last_page_user = true;
6b7c5b94
SP
1821
1822 if (posted) {
6b7c5b94 1823 atomic_add(posted, &rxq->used);
6384a4d0
SP
1824 if (rxo->rx_post_starved)
1825 rxo->rx_post_starved = false;
8788fdc2 1826 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1827 } else if (atomic_read(&rxq->used) == 0) {
1828 /* Let be_worker replenish when memory is available */
3abcdeda 1829 rxo->rx_post_starved = true;
6b7c5b94 1830 }
6b7c5b94
SP
1831}
1832
5fb379ee 1833static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1834{
6b7c5b94
SP
1835 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1836
1837 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1838 return NULL;
1839
f3eb62d2 1840 rmb();
6b7c5b94
SP
1841 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1842
1843 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1844
1845 queue_tail_inc(tx_cq);
1846 return txcp;
1847}
1848
3c8def97
SP
1849static u16 be_tx_compl_process(struct be_adapter *adapter,
1850 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1851{
3c8def97 1852 struct be_queue_info *txq = &txo->q;
a73b796e 1853 struct be_eth_wrb *wrb;
3c8def97 1854 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1855 struct sk_buff *sent_skb;
ec43b1a6
SP
1856 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1857 bool unmap_skb_hdr = true;
6b7c5b94 1858
ec43b1a6 1859 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1860 BUG_ON(!sent_skb);
ec43b1a6
SP
1861 sent_skbs[txq->tail] = NULL;
1862
1863 /* skip header wrb */
a73b796e 1864 queue_tail_inc(txq);
6b7c5b94 1865
ec43b1a6 1866 do {
6b7c5b94 1867 cur_index = txq->tail;
a73b796e 1868 wrb = queue_tail_node(txq);
2b7bcebf
IV
1869 unmap_tx_frag(&adapter->pdev->dev, wrb,
1870 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1871 unmap_skb_hdr = false;
1872
6b7c5b94
SP
1873 num_wrbs++;
1874 queue_tail_inc(txq);
ec43b1a6 1875 } while (cur_index != last_index);
6b7c5b94 1876
6b7c5b94 1877 kfree_skb(sent_skb);
4d586b82 1878 return num_wrbs;
6b7c5b94
SP
1879}
1880
10ef9ab4
SP
1881/* Return the number of events in the event queue */
1882static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1883{
10ef9ab4
SP
1884 struct be_eq_entry *eqe;
1885 int num = 0;
859b1e4e 1886
10ef9ab4
SP
1887 do {
1888 eqe = queue_tail_node(&eqo->q);
1889 if (eqe->evt == 0)
1890 break;
859b1e4e 1891
10ef9ab4
SP
1892 rmb();
1893 eqe->evt = 0;
1894 num++;
1895 queue_tail_inc(&eqo->q);
1896 } while (true);
1897
1898 return num;
859b1e4e
SP
1899}
1900
10ef9ab4
SP
1901/* Leaves the EQ is disarmed state */
1902static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1903{
10ef9ab4 1904 int num = events_get(eqo);
859b1e4e 1905
10ef9ab4 1906 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1907}
1908
10ef9ab4 1909static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1910{
1911 struct be_rx_page_info *page_info;
3abcdeda
SP
1912 struct be_queue_info *rxq = &rxo->q;
1913 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1914 struct be_rx_compl_info *rxcp;
d23e946c
SP
1915 struct be_adapter *adapter = rxo->adapter;
1916 int flush_wait = 0;
6b7c5b94
SP
1917 u16 tail;
1918
d23e946c
SP
1919 /* Consume pending rx completions.
1920 * Wait for the flush completion (identified by zero num_rcvd)
1921 * to arrive. Notify CQ even when there are no more CQ entries
1922 * for HW to flush partially coalesced CQ entries.
1923 * In Lancer, there is no need to wait for flush compl.
1924 */
1925 for (;;) {
1926 rxcp = be_rx_compl_get(rxo);
1927 if (rxcp == NULL) {
1928 if (lancer_chip(adapter))
1929 break;
1930
1931 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1932 dev_warn(&adapter->pdev->dev,
1933 "did not receive flush compl\n");
1934 break;
1935 }
1936 be_cq_notify(adapter, rx_cq->id, true, 0);
1937 mdelay(1);
1938 } else {
1939 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1940 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1941 if (rxcp->num_rcvd == 0)
1942 break;
1943 }
6b7c5b94
SP
1944 }
1945
d23e946c
SP
1946 /* After cleanup, leave the CQ in unarmed state */
1947 be_cq_notify(adapter, rx_cq->id, false, 0);
1948
1949 /* Then free posted rx buffers that were not used */
6b7c5b94 1950 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1951 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1952 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1953 put_page(page_info->page);
1954 memset(page_info, 0, sizeof(*page_info));
1955 }
1956 BUG_ON(atomic_read(&rxq->used));
482c9e79 1957 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1958}
1959
0ae57bb3 1960static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1961{
0ae57bb3
SP
1962 struct be_tx_obj *txo;
1963 struct be_queue_info *txq;
a8e9179a 1964 struct be_eth_tx_compl *txcp;
4d586b82 1965 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1966 struct sk_buff *sent_skb;
1967 bool dummy_wrb;
0ae57bb3 1968 int i, pending_txqs;
a8e9179a
SP
1969
1970 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1971 do {
0ae57bb3
SP
1972 pending_txqs = adapter->num_tx_qs;
1973
1974 for_all_tx_queues(adapter, txo, i) {
1975 txq = &txo->q;
1976 while ((txcp = be_tx_compl_get(&txo->cq))) {
1977 end_idx =
1978 AMAP_GET_BITS(struct amap_eth_tx_compl,
1979 wrb_index, txcp);
1980 num_wrbs += be_tx_compl_process(adapter, txo,
1981 end_idx);
1982 cmpl++;
1983 }
1984 if (cmpl) {
1985 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1986 atomic_sub(num_wrbs, &txq->used);
1987 cmpl = 0;
1988 num_wrbs = 0;
1989 }
1990 if (atomic_read(&txq->used) == 0)
1991 pending_txqs--;
a8e9179a
SP
1992 }
1993
0ae57bb3 1994 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1995 break;
1996
1997 mdelay(1);
1998 } while (true);
1999
0ae57bb3
SP
2000 for_all_tx_queues(adapter, txo, i) {
2001 txq = &txo->q;
2002 if (atomic_read(&txq->used))
2003 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2004 atomic_read(&txq->used));
2005
2006 /* free posted tx for which compls will never arrive */
2007 while (atomic_read(&txq->used)) {
2008 sent_skb = txo->sent_skb_list[txq->tail];
2009 end_idx = txq->tail;
2010 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2011 &dummy_wrb);
2012 index_adv(&end_idx, num_wrbs - 1, txq->len);
2013 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2014 atomic_sub(num_wrbs, &txq->used);
2015 }
b03388d6 2016 }
6b7c5b94
SP
2017}
2018
10ef9ab4
SP
2019static void be_evt_queues_destroy(struct be_adapter *adapter)
2020{
2021 struct be_eq_obj *eqo;
2022 int i;
2023
2024 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2025 if (eqo->q.created) {
2026 be_eq_clean(eqo);
10ef9ab4 2027 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2028 napi_hash_del(&eqo->napi);
68d7bdcb 2029 netif_napi_del(&eqo->napi);
19d59aa7 2030 }
10ef9ab4
SP
2031 be_queue_free(adapter, &eqo->q);
2032 }
2033}
2034
2035static int be_evt_queues_create(struct be_adapter *adapter)
2036{
2037 struct be_queue_info *eq;
2038 struct be_eq_obj *eqo;
2632bafd 2039 struct be_aic_obj *aic;
10ef9ab4
SP
2040 int i, rc;
2041
92bf14ab
SP
2042 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2043 adapter->cfg_num_qs);
10ef9ab4
SP
2044
2045 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2046 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2047 BE_NAPI_WEIGHT);
6384a4d0 2048 napi_hash_add(&eqo->napi);
2632bafd 2049 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2050 eqo->adapter = adapter;
2051 eqo->tx_budget = BE_TX_BUDGET;
2052 eqo->idx = i;
2632bafd
SP
2053 aic->max_eqd = BE_MAX_EQD;
2054 aic->enable = true;
10ef9ab4
SP
2055
2056 eq = &eqo->q;
2057 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2058 sizeof(struct be_eq_entry));
2059 if (rc)
2060 return rc;
2061
f2f781a7 2062 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2063 if (rc)
2064 return rc;
2065 }
1cfafab9 2066 return 0;
10ef9ab4
SP
2067}
2068
5fb379ee
SP
2069static void be_mcc_queues_destroy(struct be_adapter *adapter)
2070{
2071 struct be_queue_info *q;
5fb379ee 2072
8788fdc2 2073 q = &adapter->mcc_obj.q;
5fb379ee 2074 if (q->created)
8788fdc2 2075 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2076 be_queue_free(adapter, q);
2077
8788fdc2 2078 q = &adapter->mcc_obj.cq;
5fb379ee 2079 if (q->created)
8788fdc2 2080 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2081 be_queue_free(adapter, q);
2082}
2083
2084/* Must be called only after TX qs are created as MCC shares TX EQ */
2085static int be_mcc_queues_create(struct be_adapter *adapter)
2086{
2087 struct be_queue_info *q, *cq;
5fb379ee 2088
8788fdc2 2089 cq = &adapter->mcc_obj.cq;
5fb379ee 2090 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 2091 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2092 goto err;
2093
10ef9ab4
SP
2094 /* Use the default EQ for MCC completions */
2095 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2096 goto mcc_cq_free;
2097
8788fdc2 2098 q = &adapter->mcc_obj.q;
5fb379ee
SP
2099 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2100 goto mcc_cq_destroy;
2101
8788fdc2 2102 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2103 goto mcc_q_free;
2104
2105 return 0;
2106
2107mcc_q_free:
2108 be_queue_free(adapter, q);
2109mcc_cq_destroy:
8788fdc2 2110 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2111mcc_cq_free:
2112 be_queue_free(adapter, cq);
2113err:
2114 return -1;
2115}
2116
6b7c5b94
SP
2117static void be_tx_queues_destroy(struct be_adapter *adapter)
2118{
2119 struct be_queue_info *q;
3c8def97
SP
2120 struct be_tx_obj *txo;
2121 u8 i;
6b7c5b94 2122
3c8def97
SP
2123 for_all_tx_queues(adapter, txo, i) {
2124 q = &txo->q;
2125 if (q->created)
2126 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2127 be_queue_free(adapter, q);
6b7c5b94 2128
3c8def97
SP
2129 q = &txo->cq;
2130 if (q->created)
2131 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2132 be_queue_free(adapter, q);
2133 }
6b7c5b94
SP
2134}
2135
7707133c 2136static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2137{
10ef9ab4 2138 struct be_queue_info *cq, *eq;
3c8def97 2139 struct be_tx_obj *txo;
92bf14ab 2140 int status, i;
6b7c5b94 2141
92bf14ab 2142 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2143
10ef9ab4
SP
2144 for_all_tx_queues(adapter, txo, i) {
2145 cq = &txo->cq;
2146 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2147 sizeof(struct be_eth_tx_compl));
2148 if (status)
2149 return status;
3c8def97 2150
827da44c
JS
2151 u64_stats_init(&txo->stats.sync);
2152 u64_stats_init(&txo->stats.sync_compl);
2153
10ef9ab4
SP
2154 /* If num_evt_qs is less than num_tx_qs, then more than
2155 * one txq share an eq
2156 */
2157 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2158 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2159 if (status)
2160 return status;
6b7c5b94 2161
10ef9ab4
SP
2162 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2163 sizeof(struct be_eth_wrb));
2164 if (status)
2165 return status;
6b7c5b94 2166
94d73aaa 2167 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2168 if (status)
2169 return status;
3c8def97 2170 }
6b7c5b94 2171
d379142b
SP
2172 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2173 adapter->num_tx_qs);
10ef9ab4 2174 return 0;
6b7c5b94
SP
2175}
2176
10ef9ab4 2177static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2178{
2179 struct be_queue_info *q;
3abcdeda
SP
2180 struct be_rx_obj *rxo;
2181 int i;
2182
2183 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2184 q = &rxo->cq;
2185 if (q->created)
2186 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2187 be_queue_free(adapter, q);
ac6a0c4a
SP
2188 }
2189}
2190
10ef9ab4 2191static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2192{
10ef9ab4 2193 struct be_queue_info *eq, *cq;
3abcdeda
SP
2194 struct be_rx_obj *rxo;
2195 int rc, i;
6b7c5b94 2196
92bf14ab
SP
2197 /* We can create as many RSS rings as there are EQs. */
2198 adapter->num_rx_qs = adapter->num_evt_qs;
2199
2200 /* We'll use RSS only if atleast 2 RSS rings are supported.
2201 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2202 */
92bf14ab
SP
2203 if (adapter->num_rx_qs > 1)
2204 adapter->num_rx_qs++;
2205
6b7c5b94 2206 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2207 for_all_rx_queues(adapter, rxo, i) {
2208 rxo->adapter = adapter;
3abcdeda
SP
2209 cq = &rxo->cq;
2210 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2211 sizeof(struct be_eth_rx_compl));
2212 if (rc)
10ef9ab4 2213 return rc;
3abcdeda 2214
827da44c 2215 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2216 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2217 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2218 if (rc)
10ef9ab4 2219 return rc;
3abcdeda 2220 }
6b7c5b94 2221
d379142b
SP
2222 dev_info(&adapter->pdev->dev,
2223 "created %d RSS queue(s) and 1 default RX queue\n",
2224 adapter->num_rx_qs - 1);
10ef9ab4 2225 return 0;
b628bde2
SP
2226}
2227
6b7c5b94
SP
2228static irqreturn_t be_intx(int irq, void *dev)
2229{
e49cc34f
SP
2230 struct be_eq_obj *eqo = dev;
2231 struct be_adapter *adapter = eqo->adapter;
2232 int num_evts = 0;
6b7c5b94 2233
d0b9cec3
SP
2234 /* IRQ is not expected when NAPI is scheduled as the EQ
2235 * will not be armed.
2236 * But, this can happen on Lancer INTx where it takes
2237 * a while to de-assert INTx or in BE2 where occasionaly
2238 * an interrupt may be raised even when EQ is unarmed.
2239 * If NAPI is already scheduled, then counting & notifying
2240 * events will orphan them.
e49cc34f 2241 */
d0b9cec3 2242 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2243 num_evts = events_get(eqo);
d0b9cec3
SP
2244 __napi_schedule(&eqo->napi);
2245 if (num_evts)
2246 eqo->spurious_intr = 0;
2247 }
2248 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2249
d0b9cec3
SP
2250 /* Return IRQ_HANDLED only for the the first spurious intr
2251 * after a valid intr to stop the kernel from branding
2252 * this irq as a bad one!
e49cc34f 2253 */
d0b9cec3
SP
2254 if (num_evts || eqo->spurious_intr++ == 0)
2255 return IRQ_HANDLED;
2256 else
2257 return IRQ_NONE;
6b7c5b94
SP
2258}
2259
10ef9ab4 2260static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2261{
10ef9ab4 2262 struct be_eq_obj *eqo = dev;
6b7c5b94 2263
0b545a62
SP
2264 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2265 napi_schedule(&eqo->napi);
6b7c5b94
SP
2266 return IRQ_HANDLED;
2267}
2268
2e588f84 2269static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2270{
e38b1706 2271 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2272}
2273
10ef9ab4 2274static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
6384a4d0 2275 int budget, int polling)
6b7c5b94 2276{
3abcdeda
SP
2277 struct be_adapter *adapter = rxo->adapter;
2278 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2279 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2280 u32 work_done;
2281
2282 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2283 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2284 if (!rxcp)
2285 break;
2286
12004ae9
SP
2287 /* Is it a flush compl that has no data */
2288 if (unlikely(rxcp->num_rcvd == 0))
2289 goto loop_continue;
2290
2291 /* Discard compl with partial DMA Lancer B0 */
2292 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2293 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2294 goto loop_continue;
2295 }
2296
2297 /* On BE drop pkts that arrive due to imperfect filtering in
2298 * promiscuous mode on some skews
2299 */
2300 if (unlikely(rxcp->port != adapter->port_num &&
2301 !lancer_chip(adapter))) {
10ef9ab4 2302 be_rx_compl_discard(rxo, rxcp);
12004ae9 2303 goto loop_continue;
64642811 2304 }
009dd872 2305
6384a4d0
SP
2306 /* Don't do gro when we're busy_polling */
2307 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2308 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2309 else
6384a4d0
SP
2310 be_rx_compl_process(rxo, napi, rxcp);
2311
12004ae9 2312loop_continue:
2e588f84 2313 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2314 }
2315
10ef9ab4
SP
2316 if (work_done) {
2317 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2318
6384a4d0
SP
2319 /* When an rx-obj gets into post_starved state, just
2320 * let be_worker do the posting.
2321 */
2322 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2323 !rxo->rx_post_starved)
10ef9ab4 2324 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2325 }
10ef9ab4 2326
6b7c5b94
SP
2327 return work_done;
2328}
2329
10ef9ab4
SP
2330static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2331 int budget, int idx)
6b7c5b94 2332{
6b7c5b94 2333 struct be_eth_tx_compl *txcp;
10ef9ab4 2334 int num_wrbs = 0, work_done;
3c8def97 2335
10ef9ab4
SP
2336 for (work_done = 0; work_done < budget; work_done++) {
2337 txcp = be_tx_compl_get(&txo->cq);
2338 if (!txcp)
2339 break;
2340 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2341 AMAP_GET_BITS(struct amap_eth_tx_compl,
2342 wrb_index, txcp));
10ef9ab4 2343 }
6b7c5b94 2344
10ef9ab4
SP
2345 if (work_done) {
2346 be_cq_notify(adapter, txo->cq.id, true, work_done);
2347 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2348
10ef9ab4
SP
2349 /* As Tx wrbs have been freed up, wake up netdev queue
2350 * if it was stopped due to lack of tx wrbs. */
2351 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2352 atomic_read(&txo->q.used) < txo->q.len / 2) {
2353 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2354 }
10ef9ab4
SP
2355
2356 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2357 tx_stats(txo)->tx_compl += work_done;
2358 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2359 }
10ef9ab4
SP
2360 return (work_done < budget); /* Done */
2361}
6b7c5b94 2362
68d7bdcb 2363int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2364{
2365 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2366 struct be_adapter *adapter = eqo->adapter;
0b545a62 2367 int max_work = 0, work, i, num_evts;
6384a4d0 2368 struct be_rx_obj *rxo;
10ef9ab4 2369 bool tx_done;
f31e50a8 2370
0b545a62
SP
2371 num_evts = events_get(eqo);
2372
10ef9ab4
SP
2373 /* Process all TXQs serviced by this EQ */
2374 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2375 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2376 eqo->tx_budget, i);
2377 if (!tx_done)
2378 max_work = budget;
f31e50a8
SP
2379 }
2380
6384a4d0
SP
2381 if (be_lock_napi(eqo)) {
2382 /* This loop will iterate twice for EQ0 in which
2383 * completions of the last RXQ (default one) are also processed
2384 * For other EQs the loop iterates only once
2385 */
2386 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2387 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2388 max_work = max(work, max_work);
2389 }
2390 be_unlock_napi(eqo);
2391 } else {
2392 max_work = budget;
10ef9ab4 2393 }
6b7c5b94 2394
10ef9ab4
SP
2395 if (is_mcc_eqo(eqo))
2396 be_process_mcc(adapter);
93c86700 2397
10ef9ab4
SP
2398 if (max_work < budget) {
2399 napi_complete(napi);
0b545a62 2400 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2401 } else {
2402 /* As we'll continue in polling mode, count and clear events */
0b545a62 2403 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2404 }
10ef9ab4 2405 return max_work;
6b7c5b94
SP
2406}
2407
6384a4d0
SP
2408#ifdef CONFIG_NET_RX_BUSY_POLL
2409static int be_busy_poll(struct napi_struct *napi)
2410{
2411 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2412 struct be_adapter *adapter = eqo->adapter;
2413 struct be_rx_obj *rxo;
2414 int i, work = 0;
2415
2416 if (!be_lock_busy_poll(eqo))
2417 return LL_FLUSH_BUSY;
2418
2419 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2420 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2421 if (work)
2422 break;
2423 }
2424
2425 be_unlock_busy_poll(eqo);
2426 return work;
2427}
2428#endif
2429
f67ef7ba 2430void be_detect_error(struct be_adapter *adapter)
7c185276 2431{
e1cfb67a
PR
2432 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2433 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2434 u32 i;
2435
d23e946c 2436 if (be_hw_error(adapter))
72f02485
SP
2437 return;
2438
e1cfb67a
PR
2439 if (lancer_chip(adapter)) {
2440 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2441 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2442 sliport_err1 = ioread32(adapter->db +
2443 SLIPORT_ERROR1_OFFSET);
2444 sliport_err2 = ioread32(adapter->db +
2445 SLIPORT_ERROR2_OFFSET);
2446 }
2447 } else {
2448 pci_read_config_dword(adapter->pdev,
2449 PCICFG_UE_STATUS_LOW, &ue_lo);
2450 pci_read_config_dword(adapter->pdev,
2451 PCICFG_UE_STATUS_HIGH, &ue_hi);
2452 pci_read_config_dword(adapter->pdev,
2453 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2454 pci_read_config_dword(adapter->pdev,
2455 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2456
f67ef7ba
PR
2457 ue_lo = (ue_lo & ~ue_lo_mask);
2458 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2459 }
7c185276 2460
1451ae6e
AK
2461 /* On certain platforms BE hardware can indicate spurious UEs.
2462 * Allow the h/w to stop working completely in case of a real UE.
2463 * Hence not setting the hw_error for UE detection.
2464 */
2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2466 adapter->hw_error = true;
4bebb56a
SK
2467 /* Do not log error messages if its a FW reset */
2468 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2469 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2470 dev_info(&adapter->pdev->dev,
2471 "Firmware update in progress\n");
2472 return;
2473 } else {
2474 dev_err(&adapter->pdev->dev,
2475 "Error detected in the card\n");
2476 }
f67ef7ba
PR
2477 }
2478
2479 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2480 dev_err(&adapter->pdev->dev,
2481 "ERR: sliport status 0x%x\n", sliport_status);
2482 dev_err(&adapter->pdev->dev,
2483 "ERR: sliport error1 0x%x\n", sliport_err1);
2484 dev_err(&adapter->pdev->dev,
2485 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2486 }
2487
e1cfb67a
PR
2488 if (ue_lo) {
2489 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2490 if (ue_lo & 1)
7c185276
AK
2491 dev_err(&adapter->pdev->dev,
2492 "UE: %s bit set\n", ue_status_low_desc[i]);
2493 }
2494 }
f67ef7ba 2495
e1cfb67a
PR
2496 if (ue_hi) {
2497 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2498 if (ue_hi & 1)
7c185276
AK
2499 dev_err(&adapter->pdev->dev,
2500 "UE: %s bit set\n", ue_status_hi_desc[i]);
2501 }
2502 }
2503
2504}
2505
8d56ff11
SP
2506static void be_msix_disable(struct be_adapter *adapter)
2507{
ac6a0c4a 2508 if (msix_enabled(adapter)) {
8d56ff11 2509 pci_disable_msix(adapter->pdev);
ac6a0c4a 2510 adapter->num_msix_vec = 0;
68d7bdcb 2511 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2512 }
2513}
2514
c2bba3df 2515static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2516{
92bf14ab 2517 int i, status, num_vec;
d379142b 2518 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2519
92bf14ab
SP
2520 /* If RoCE is supported, program the max number of NIC vectors that
2521 * may be configured via set-channels, along with vectors needed for
2522 * RoCe. Else, just program the number we'll use initially.
2523 */
2524 if (be_roce_supported(adapter))
2525 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2526 2 * num_online_cpus());
2527 else
2528 num_vec = adapter->cfg_num_qs;
3abcdeda 2529
ac6a0c4a 2530 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2531 adapter->msix_entries[i].entry = i;
2532
ac6a0c4a 2533 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2534 if (status == 0) {
2535 goto done;
92bf14ab 2536 } else if (status >= MIN_MSIX_VECTORS) {
ac6a0c4a 2537 num_vec = status;
c2bba3df
SK
2538 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2539 num_vec);
2540 if (!status)
3abcdeda 2541 goto done;
3abcdeda 2542 }
d379142b
SP
2543
2544 dev_warn(dev, "MSIx enable failed\n");
92bf14ab 2545
c2bba3df
SK
2546 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2547 if (!be_physfn(adapter))
2548 return status;
2549 return 0;
3abcdeda 2550done:
92bf14ab
SP
2551 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2552 adapter->num_msix_roce_vec = num_vec / 2;
2553 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2554 adapter->num_msix_roce_vec);
2555 }
2556
2557 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2558
2559 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2560 adapter->num_msix_vec);
c2bba3df 2561 return 0;
6b7c5b94
SP
2562}
2563
fe6d2a38 2564static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2565 struct be_eq_obj *eqo)
b628bde2 2566{
f2f781a7 2567 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2568}
6b7c5b94 2569
b628bde2
SP
2570static int be_msix_register(struct be_adapter *adapter)
2571{
10ef9ab4
SP
2572 struct net_device *netdev = adapter->netdev;
2573 struct be_eq_obj *eqo;
2574 int status, i, vec;
6b7c5b94 2575
10ef9ab4
SP
2576 for_all_evt_queues(adapter, eqo, i) {
2577 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2578 vec = be_msix_vec_get(adapter, eqo);
2579 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2580 if (status)
2581 goto err_msix;
2582 }
b628bde2 2583
6b7c5b94 2584 return 0;
3abcdeda 2585err_msix:
10ef9ab4
SP
2586 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2587 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2588 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2589 status);
ac6a0c4a 2590 be_msix_disable(adapter);
6b7c5b94
SP
2591 return status;
2592}
2593
2594static int be_irq_register(struct be_adapter *adapter)
2595{
2596 struct net_device *netdev = adapter->netdev;
2597 int status;
2598
ac6a0c4a 2599 if (msix_enabled(adapter)) {
6b7c5b94
SP
2600 status = be_msix_register(adapter);
2601 if (status == 0)
2602 goto done;
ba343c77
SB
2603 /* INTx is not supported for VF */
2604 if (!be_physfn(adapter))
2605 return status;
6b7c5b94
SP
2606 }
2607
e49cc34f 2608 /* INTx: only the first EQ is used */
6b7c5b94
SP
2609 netdev->irq = adapter->pdev->irq;
2610 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2611 &adapter->eq_obj[0]);
6b7c5b94
SP
2612 if (status) {
2613 dev_err(&adapter->pdev->dev,
2614 "INTx request IRQ failed - err %d\n", status);
2615 return status;
2616 }
2617done:
2618 adapter->isr_registered = true;
2619 return 0;
2620}
2621
2622static void be_irq_unregister(struct be_adapter *adapter)
2623{
2624 struct net_device *netdev = adapter->netdev;
10ef9ab4 2625 struct be_eq_obj *eqo;
3abcdeda 2626 int i;
6b7c5b94
SP
2627
2628 if (!adapter->isr_registered)
2629 return;
2630
2631 /* INTx */
ac6a0c4a 2632 if (!msix_enabled(adapter)) {
e49cc34f 2633 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2634 goto done;
2635 }
2636
2637 /* MSIx */
10ef9ab4
SP
2638 for_all_evt_queues(adapter, eqo, i)
2639 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2640
6b7c5b94
SP
2641done:
2642 adapter->isr_registered = false;
6b7c5b94
SP
2643}
2644
10ef9ab4 2645static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2646{
2647 struct be_queue_info *q;
2648 struct be_rx_obj *rxo;
2649 int i;
2650
2651 for_all_rx_queues(adapter, rxo, i) {
2652 q = &rxo->q;
2653 if (q->created) {
2654 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2655 be_rx_cq_clean(rxo);
482c9e79 2656 }
10ef9ab4 2657 be_queue_free(adapter, q);
482c9e79
SP
2658 }
2659}
2660
889cd4b2
SP
2661static int be_close(struct net_device *netdev)
2662{
2663 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2664 struct be_eq_obj *eqo;
2665 int i;
889cd4b2 2666
045508a8
PP
2667 be_roce_dev_close(adapter);
2668
dff345c5
IV
2669 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2670 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2671 napi_disable(&eqo->napi);
6384a4d0
SP
2672 be_disable_busy_poll(eqo);
2673 }
71237b6f 2674 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2675 }
a323d9bf
SP
2676
2677 be_async_mcc_disable(adapter);
2678
2679 /* Wait for all pending tx completions to arrive so that
2680 * all tx skbs are freed.
2681 */
fba87559 2682 netif_tx_disable(netdev);
6e1f9975 2683 be_tx_compl_clean(adapter);
a323d9bf
SP
2684
2685 be_rx_qs_destroy(adapter);
2686
d11a347d
AK
2687 for (i = 1; i < (adapter->uc_macs + 1); i++)
2688 be_cmd_pmac_del(adapter, adapter->if_handle,
2689 adapter->pmac_id[i], 0);
2690 adapter->uc_macs = 0;
2691
a323d9bf 2692 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2693 if (msix_enabled(adapter))
2694 synchronize_irq(be_msix_vec_get(adapter, eqo));
2695 else
2696 synchronize_irq(netdev->irq);
2697 be_eq_clean(eqo);
63fcb27f
PR
2698 }
2699
889cd4b2
SP
2700 be_irq_unregister(adapter);
2701
482c9e79
SP
2702 return 0;
2703}
2704
10ef9ab4 2705static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2706{
2707 struct be_rx_obj *rxo;
e9008ee9
PR
2708 int rc, i, j;
2709 u8 rsstable[128];
482c9e79
SP
2710
2711 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2712 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2713 sizeof(struct be_eth_rx_d));
2714 if (rc)
2715 return rc;
2716 }
2717
2718 /* The FW would like the default RXQ to be created first */
2719 rxo = default_rxo(adapter);
2720 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2721 adapter->if_handle, false, &rxo->rss_id);
2722 if (rc)
2723 return rc;
2724
2725 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2726 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2727 rx_frag_size, adapter->if_handle,
2728 true, &rxo->rss_id);
482c9e79
SP
2729 if (rc)
2730 return rc;
2731 }
2732
2733 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2734 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2735 for_all_rss_queues(adapter, rxo, i) {
2736 if ((j + i) >= 128)
2737 break;
2738 rsstable[j + i] = rxo->rss_id;
2739 }
2740 }
594ad54a
SR
2741 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2742 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2743
2744 if (!BEx_chip(adapter))
2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2746 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2747 } else {
2748 /* Disable RSS, if only default RX Q is created */
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 }
594ad54a 2751
da1388d6
VV
2752 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2753 128);
2754 if (rc) {
2755 adapter->rss_flags = RSS_ENABLE_NONE;
2756 return rc;
482c9e79
SP
2757 }
2758
2759 /* First time posting */
10ef9ab4 2760 for_all_rx_queues(adapter, rxo, i)
482c9e79 2761 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2762 return 0;
2763}
2764
6b7c5b94
SP
2765static int be_open(struct net_device *netdev)
2766{
2767 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2768 struct be_eq_obj *eqo;
3abcdeda 2769 struct be_rx_obj *rxo;
10ef9ab4 2770 struct be_tx_obj *txo;
b236916a 2771 u8 link_status;
3abcdeda 2772 int status, i;
5fb379ee 2773
10ef9ab4 2774 status = be_rx_qs_create(adapter);
482c9e79
SP
2775 if (status)
2776 goto err;
2777
c2bba3df
SK
2778 status = be_irq_register(adapter);
2779 if (status)
2780 goto err;
5fb379ee 2781
10ef9ab4 2782 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2783 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2784
10ef9ab4
SP
2785 for_all_tx_queues(adapter, txo, i)
2786 be_cq_notify(adapter, txo->cq.id, true, 0);
2787
7a1e9b20
SP
2788 be_async_mcc_enable(adapter);
2789
10ef9ab4
SP
2790 for_all_evt_queues(adapter, eqo, i) {
2791 napi_enable(&eqo->napi);
6384a4d0 2792 be_enable_busy_poll(eqo);
10ef9ab4
SP
2793 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2794 }
04d3d624 2795 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2796
323ff71e 2797 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2798 if (!status)
2799 be_link_status_update(adapter, link_status);
2800
fba87559 2801 netif_tx_start_all_queues(netdev);
045508a8 2802 be_roce_dev_open(adapter);
889cd4b2
SP
2803 return 0;
2804err:
2805 be_close(adapter->netdev);
2806 return -EIO;
5fb379ee
SP
2807}
2808
71d8d1b5
AK
2809static int be_setup_wol(struct be_adapter *adapter, bool enable)
2810{
2811 struct be_dma_mem cmd;
2812 int status = 0;
2813 u8 mac[ETH_ALEN];
2814
2815 memset(mac, 0, ETH_ALEN);
2816
2817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2818 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2819 GFP_KERNEL);
71d8d1b5
AK
2820 if (cmd.va == NULL)
2821 return -1;
71d8d1b5
AK
2822
2823 if (enable) {
2824 status = pci_write_config_dword(adapter->pdev,
2825 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2826 if (status) {
2827 dev_err(&adapter->pdev->dev,
2381a55c 2828 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2829 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2830 cmd.dma);
71d8d1b5
AK
2831 return status;
2832 }
2833 status = be_cmd_enable_magic_wol(adapter,
2834 adapter->netdev->dev_addr, &cmd);
2835 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2836 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2837 } else {
2838 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2839 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2840 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2841 }
2842
2b7bcebf 2843 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2844 return status;
2845}
2846
6d87f5c3
AK
2847/*
2848 * Generate a seed MAC address from the PF MAC Address using jhash.
2849 * MAC Address for VFs are assigned incrementally starting from the seed.
2850 * These addresses are programmed in the ASIC by the PF and the VF driver
2851 * queries for the MAC address during its probe.
2852 */
4c876616 2853static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2854{
f9449ab7 2855 u32 vf;
3abcdeda 2856 int status = 0;
6d87f5c3 2857 u8 mac[ETH_ALEN];
11ac75ed 2858 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2859
2860 be_vf_eth_addr_generate(adapter, mac);
2861
11ac75ed 2862 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2863 if (BEx_chip(adapter))
590c391d 2864 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2865 vf_cfg->if_handle,
2866 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2867 else
2868 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2869 vf + 1);
590c391d 2870
6d87f5c3
AK
2871 if (status)
2872 dev_err(&adapter->pdev->dev,
590c391d 2873 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2874 else
11ac75ed 2875 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2876
2877 mac[5] += 1;
2878 }
2879 return status;
2880}
2881
4c876616
SP
2882static int be_vfs_mac_query(struct be_adapter *adapter)
2883{
2884 int status, vf;
2885 u8 mac[ETH_ALEN];
2886 struct be_vf_cfg *vf_cfg;
95046b92 2887 bool active = false;
4c876616
SP
2888
2889 for_all_vfs(adapter, vf_cfg, vf) {
2890 be_cmd_get_mac_from_list(adapter, mac, &active,
2891 &vf_cfg->pmac_id, 0);
2892
2893 status = be_cmd_mac_addr_query(adapter, mac, false,
2894 vf_cfg->if_handle, 0);
2895 if (status)
2896 return status;
2897 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2898 }
2899 return 0;
2900}
2901
f9449ab7 2902static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2903{
11ac75ed 2904 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2905 u32 vf;
2906
257a3feb 2907 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2908 dev_warn(&adapter->pdev->dev,
2909 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2910 goto done;
2911 }
2912
b4c1df93
SP
2913 pci_disable_sriov(adapter->pdev);
2914
11ac75ed 2915 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2916 if (BEx_chip(adapter))
11ac75ed
SP
2917 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2918 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2919 else
2920 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2921 vf + 1);
f9449ab7 2922
11ac75ed
SP
2923 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2924 }
39f1d94d
SP
2925done:
2926 kfree(adapter->vf_cfg);
2927 adapter->num_vfs = 0;
6d87f5c3
AK
2928}
2929
7707133c
SP
2930static void be_clear_queues(struct be_adapter *adapter)
2931{
2932 be_mcc_queues_destroy(adapter);
2933 be_rx_cqs_destroy(adapter);
2934 be_tx_queues_destroy(adapter);
2935 be_evt_queues_destroy(adapter);
2936}
2937
68d7bdcb 2938static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2939{
191eb756
SP
2940 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2941 cancel_delayed_work_sync(&adapter->work);
2942 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2943 }
68d7bdcb
SP
2944}
2945
b05004ad 2946static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
2947{
2948 int i;
2949
b05004ad
SK
2950 if (adapter->pmac_id) {
2951 for (i = 0; i < (adapter->uc_macs + 1); i++)
2952 be_cmd_pmac_del(adapter, adapter->if_handle,
2953 adapter->pmac_id[i], 0);
2954 adapter->uc_macs = 0;
2955
2956 kfree(adapter->pmac_id);
2957 adapter->pmac_id = NULL;
2958 }
2959}
2960
2961static int be_clear(struct be_adapter *adapter)
2962{
68d7bdcb 2963 be_cancel_worker(adapter);
191eb756 2964
11ac75ed 2965 if (sriov_enabled(adapter))
f9449ab7
SP
2966 be_vf_clear(adapter);
2967
2d17f403 2968 /* delete the primary mac along with the uc-mac list */
b05004ad 2969 be_mac_clear(adapter);
fbc13f01 2970
f9449ab7 2971 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2972
7707133c 2973 be_clear_queues(adapter);
a54769f5 2974
10ef9ab4 2975 be_msix_disable(adapter);
a54769f5
SP
2976 return 0;
2977}
2978
4c876616 2979static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2980{
92bf14ab 2981 struct be_resources res = {0};
4c876616
SP
2982 struct be_vf_cfg *vf_cfg;
2983 u32 cap_flags, en_flags, vf;
922bbe88 2984 int status = 0;
abb93951 2985
4c876616
SP
2986 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2987 BE_IF_FLAGS_MULTICAST;
abb93951 2988
4c876616 2989 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
2990 if (!BE3_chip(adapter)) {
2991 status = be_cmd_get_profile_config(adapter, &res,
2992 vf + 1);
2993 if (!status)
2994 cap_flags = res.if_cap_flags;
2995 }
4c876616
SP
2996
2997 /* If a FW profile exists, then cap_flags are updated */
2998 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2999 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3000 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3001 &vf_cfg->if_handle, vf + 1);
3002 if (status)
3003 goto err;
3004 }
3005err:
3006 return status;
abb93951
PR
3007}
3008
39f1d94d 3009static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3010{
11ac75ed 3011 struct be_vf_cfg *vf_cfg;
30128031
SP
3012 int vf;
3013
39f1d94d
SP
3014 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3015 GFP_KERNEL);
3016 if (!adapter->vf_cfg)
3017 return -ENOMEM;
3018
11ac75ed
SP
3019 for_all_vfs(adapter, vf_cfg, vf) {
3020 vf_cfg->if_handle = -1;
3021 vf_cfg->pmac_id = -1;
30128031 3022 }
39f1d94d 3023 return 0;
30128031
SP
3024}
3025
f9449ab7
SP
3026static int be_vf_setup(struct be_adapter *adapter)
3027{
11ac75ed 3028 struct be_vf_cfg *vf_cfg;
f1f3ee1b 3029 u16 def_vlan, lnk_speed;
4c876616
SP
3030 int status, old_vfs, vf;
3031 struct device *dev = &adapter->pdev->dev;
04a06028 3032 u32 privileges;
39f1d94d 3033
257a3feb 3034 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3035 if (old_vfs) {
3036 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3037 if (old_vfs != num_vfs)
3038 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3039 adapter->num_vfs = old_vfs;
39f1d94d 3040 } else {
92bf14ab 3041 if (num_vfs > be_max_vfs(adapter))
4c876616 3042 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3043 be_max_vfs(adapter), num_vfs);
3044 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3045 if (!adapter->num_vfs)
4c876616 3046 return 0;
39f1d94d
SP
3047 }
3048
3049 status = be_vf_setup_init(adapter);
3050 if (status)
3051 goto err;
30128031 3052
4c876616
SP
3053 if (old_vfs) {
3054 for_all_vfs(adapter, vf_cfg, vf) {
3055 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3056 if (status)
3057 goto err;
3058 }
3059 } else {
3060 status = be_vfs_if_create(adapter);
f9449ab7
SP
3061 if (status)
3062 goto err;
f9449ab7
SP
3063 }
3064
4c876616
SP
3065 if (old_vfs) {
3066 status = be_vfs_mac_query(adapter);
3067 if (status)
3068 goto err;
3069 } else {
39f1d94d
SP
3070 status = be_vf_eth_addr_config(adapter);
3071 if (status)
3072 goto err;
3073 }
f9449ab7 3074
11ac75ed 3075 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3076 /* Allow VFs to programs MAC/VLAN filters */
3077 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3078 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3079 status = be_cmd_set_fn_privileges(adapter,
3080 privileges |
3081 BE_PRIV_FILTMGMT,
3082 vf + 1);
3083 if (!status)
3084 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3085 vf);
3086 }
3087
4c876616
SP
3088 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3089 * Allow full available bandwidth
3090 */
3091 if (BE3_chip(adapter) && !old_vfs)
3092 be_cmd_set_qos(adapter, 1000, vf+1);
3093
3094 status = be_cmd_link_status_query(adapter, &lnk_speed,
3095 NULL, vf + 1);
3096 if (!status)
3097 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
3098
3099 status = be_cmd_get_hsw_config(adapter, &def_vlan,
a77dcb8c 3100 vf + 1, vf_cfg->if_handle, NULL);
f1f3ee1b
AK
3101 if (status)
3102 goto err;
3103 vf_cfg->def_vid = def_vlan;
dcf7ebba 3104
0599863d
VV
3105 if (!old_vfs)
3106 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 3107 }
b4c1df93
SP
3108
3109 if (!old_vfs) {
3110 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3111 if (status) {
3112 dev_err(dev, "SRIOV enable failed\n");
3113 adapter->num_vfs = 0;
3114 goto err;
3115 }
3116 }
f9449ab7
SP
3117 return 0;
3118err:
4c876616
SP
3119 dev_err(dev, "VF setup failed\n");
3120 be_vf_clear(adapter);
f9449ab7
SP
3121 return status;
3122}
3123
92bf14ab
SP
3124/* On BE2/BE3 FW does not suggest the supported limits */
3125static void BEx_get_resources(struct be_adapter *adapter,
3126 struct be_resources *res)
3127{
3128 struct pci_dev *pdev = adapter->pdev;
3129 bool use_sriov = false;
3130
b905b5d4 3131 if (BE3_chip(adapter) && sriov_want(adapter)) {
92bf14ab
SP
3132 int max_vfs;
3133
3134 max_vfs = pci_sriov_get_totalvfs(pdev);
3135 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
b905b5d4 3136 use_sriov = res->max_vfs;
92bf14ab
SP
3137 }
3138
3139 if (be_physfn(adapter))
3140 res->max_uc_mac = BE_UC_PMAC_COUNT;
3141 else
3142 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3143
3144 if (adapter->function_mode & FLEX10_MODE)
3145 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
1aa9673c
AK
3146 else if (adapter->function_mode & UMC_ENABLED)
3147 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
92bf14ab
SP
3148 else
3149 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3150 res->max_mcast_mac = BE_MAX_MC;
3151
30f3fe45 3152 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
92bf14ab 3153 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
30f3fe45 3154 !be_physfn(adapter) || (adapter->port_num > 1))
92bf14ab
SP
3155 res->max_tx_qs = 1;
3156 else
3157 res->max_tx_qs = BE3_MAX_TX_QS;
3158
3159 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3160 !use_sriov && be_physfn(adapter))
3161 res->max_rss_qs = (adapter->be3_native) ?
3162 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3163 res->max_rx_qs = res->max_rss_qs + 1;
3164
68d7bdcb 3165 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
92bf14ab
SP
3166
3167 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3168 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3169 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3170}
3171
30128031
SP
3172static void be_setup_init(struct be_adapter *adapter)
3173{
3174 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3175 adapter->phy.link_speed = -1;
30128031
SP
3176 adapter->if_handle = -1;
3177 adapter->be3_native = false;
3178 adapter->promiscuous = false;
f25b119c
PR
3179 if (be_physfn(adapter))
3180 adapter->cmd_privileges = MAX_PRIVILEGES;
3181 else
3182 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3183}
3184
92bf14ab 3185static int be_get_resources(struct be_adapter *adapter)
abb93951 3186{
92bf14ab
SP
3187 struct device *dev = &adapter->pdev->dev;
3188 struct be_resources res = {0};
3189 int status;
abb93951 3190
92bf14ab
SP
3191 if (BEx_chip(adapter)) {
3192 BEx_get_resources(adapter, &res);
3193 adapter->res = res;
abb93951
PR
3194 }
3195
92bf14ab
SP
3196 /* For Lancer, SH etc read per-function resource limits from FW.
3197 * GET_FUNC_CONFIG returns per function guaranteed limits.
3198 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3199 */
3200 if (!BEx_chip(adapter)) {
3201 status = be_cmd_get_func_config(adapter, &res);
3202 if (status)
3203 return status;
abb93951 3204
92bf14ab
SP
3205 /* If RoCE may be enabled stash away half the EQs for RoCE */
3206 if (be_roce_supported(adapter))
3207 res.max_evt_qs /= 2;
3208 adapter->res = res;
abb93951 3209
92bf14ab
SP
3210 if (be_physfn(adapter)) {
3211 status = be_cmd_get_profile_config(adapter, &res, 0);
3212 if (status)
3213 return status;
3214 adapter->res.max_vfs = res.max_vfs;
3215 }
abb93951 3216
92bf14ab
SP
3217 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3218 be_max_txqs(adapter), be_max_rxqs(adapter),
3219 be_max_rss(adapter), be_max_eqs(adapter),
3220 be_max_vfs(adapter));
3221 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3222 be_max_uc(adapter), be_max_mc(adapter),
3223 be_max_vlans(adapter));
abb93951 3224 }
4c876616 3225
92bf14ab 3226 return 0;
abb93951
PR
3227}
3228
39f1d94d
SP
3229/* Routine to query per function resource limits */
3230static int be_get_config(struct be_adapter *adapter)
3231{
4c876616 3232 int status;
39f1d94d 3233
abb93951
PR
3234 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3235 &adapter->function_mode,
0ad3157e
VV
3236 &adapter->function_caps,
3237 &adapter->asic_rev);
abb93951 3238 if (status)
92bf14ab 3239 return status;
abb93951 3240
92bf14ab
SP
3241 status = be_get_resources(adapter);
3242 if (status)
3243 return status;
abb93951
PR
3244
3245 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3246 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3247 GFP_KERNEL);
3248 if (!adapter->pmac_id)
3249 return -ENOMEM;
abb93951 3250
92bf14ab
SP
3251 /* Sanitize cfg_num_qs based on HW and platform limits */
3252 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3253
3254 return 0;
39f1d94d
SP
3255}
3256
95046b92
SP
3257static int be_mac_setup(struct be_adapter *adapter)
3258{
3259 u8 mac[ETH_ALEN];
3260 int status;
3261
3262 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3263 status = be_cmd_get_perm_mac(adapter, mac);
3264 if (status)
3265 return status;
3266
3267 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3268 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3269 } else {
3270 /* Maybe the HW was reset; dev_addr must be re-programmed */
3271 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3272 }
3273
2c7a9dc1
AK
3274 /* For BE3-R VFs, the PF programs the initial MAC address */
3275 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3276 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3277 &adapter->pmac_id[0], 0);
95046b92
SP
3278 return 0;
3279}
3280
68d7bdcb
SP
3281static void be_schedule_worker(struct be_adapter *adapter)
3282{
3283 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3284 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3285}
3286
7707133c 3287static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3288{
68d7bdcb 3289 struct net_device *netdev = adapter->netdev;
10ef9ab4 3290 int status;
ba343c77 3291
7707133c 3292 status = be_evt_queues_create(adapter);
abb93951
PR
3293 if (status)
3294 goto err;
73d540f2 3295
7707133c 3296 status = be_tx_qs_create(adapter);
c2bba3df
SK
3297 if (status)
3298 goto err;
10ef9ab4 3299
7707133c 3300 status = be_rx_cqs_create(adapter);
10ef9ab4 3301 if (status)
a54769f5 3302 goto err;
6b7c5b94 3303
7707133c 3304 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3305 if (status)
3306 goto err;
3307
68d7bdcb
SP
3308 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3309 if (status)
3310 goto err;
3311
3312 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3313 if (status)
3314 goto err;
3315
7707133c
SP
3316 return 0;
3317err:
3318 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3319 return status;
3320}
3321
68d7bdcb
SP
3322int be_update_queues(struct be_adapter *adapter)
3323{
3324 struct net_device *netdev = adapter->netdev;
3325 int status;
3326
3327 if (netif_running(netdev))
3328 be_close(netdev);
3329
3330 be_cancel_worker(adapter);
3331
3332 /* If any vectors have been shared with RoCE we cannot re-program
3333 * the MSIx table.
3334 */
3335 if (!adapter->num_msix_roce_vec)
3336 be_msix_disable(adapter);
3337
3338 be_clear_queues(adapter);
3339
3340 if (!msix_enabled(adapter)) {
3341 status = be_msix_enable(adapter);
3342 if (status)
3343 return status;
3344 }
3345
3346 status = be_setup_queues(adapter);
3347 if (status)
3348 return status;
3349
3350 be_schedule_worker(adapter);
3351
3352 if (netif_running(netdev))
3353 status = be_open(netdev);
3354
3355 return status;
3356}
3357
7707133c
SP
3358static int be_setup(struct be_adapter *adapter)
3359{
3360 struct device *dev = &adapter->pdev->dev;
3361 u32 tx_fc, rx_fc, en_flags;
3362 int status;
3363
3364 be_setup_init(adapter);
3365
3366 if (!lancer_chip(adapter))
3367 be_cmd_req_native_mode(adapter);
3368
3369 status = be_get_config(adapter);
10ef9ab4 3370 if (status)
a54769f5 3371 goto err;
6b7c5b94 3372
7707133c 3373 status = be_msix_enable(adapter);
10ef9ab4 3374 if (status)
a54769f5 3375 goto err;
6b7c5b94 3376
f9449ab7 3377 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3378 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3379 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3380 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3381 en_flags = en_flags & be_if_cap_flags(adapter);
3382 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3383 &adapter->if_handle, 0);
7707133c 3384 if (status)
a54769f5 3385 goto err;
6b7c5b94 3386
68d7bdcb
SP
3387 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3388 rtnl_lock();
7707133c 3389 status = be_setup_queues(adapter);
68d7bdcb 3390 rtnl_unlock();
95046b92 3391 if (status)
1578e777
PR
3392 goto err;
3393
7707133c
SP
3394 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3395 /* In UMC mode FW does not return right privileges.
3396 * Override with correct privilege equivalent to PF.
3397 */
3398 if (be_is_mc(adapter))
3399 adapter->cmd_privileges = MAX_PRIVILEGES;
3400
3401 status = be_mac_setup(adapter);
10ef9ab4
SP
3402 if (status)
3403 goto err;
3404
eeb65ced 3405 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3406
e9e2a904
SK
3407 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3408 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3409 adapter->fw_ver);
3410 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3411 }
3412
1d1e9a46 3413 if (adapter->vlans_added)
10329df8 3414 be_vid_config(adapter);
7ab8b0b4 3415
a54769f5 3416 be_set_rx_mode(adapter->netdev);
5fb379ee 3417
ddc3f5cb 3418 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3419
ddc3f5cb
AK
3420 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3421 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3422 adapter->rx_fc);
2dc1deb6 3423
b905b5d4 3424 if (sriov_want(adapter)) {
92bf14ab 3425 if (be_max_vfs(adapter))
39f1d94d
SP
3426 be_vf_setup(adapter);
3427 else
3428 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3429 }
3430
f25b119c
PR
3431 status = be_cmd_get_phy_info(adapter);
3432 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3433 adapter->phy.fc_autoneg = 1;
3434
68d7bdcb 3435 be_schedule_worker(adapter);
f9449ab7 3436 return 0;
a54769f5
SP
3437err:
3438 be_clear(adapter);
3439 return status;
3440}
6b7c5b94 3441
66268739
IV
3442#ifdef CONFIG_NET_POLL_CONTROLLER
3443static void be_netpoll(struct net_device *netdev)
3444{
3445 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3446 struct be_eq_obj *eqo;
66268739
IV
3447 int i;
3448
e49cc34f
SP
3449 for_all_evt_queues(adapter, eqo, i) {
3450 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3451 napi_schedule(&eqo->napi);
3452 }
10ef9ab4
SP
3453
3454 return;
66268739
IV
3455}
3456#endif
3457
84517482 3458#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3459static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3460
fa9a6fed 3461static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3462 const u8 *p, u32 img_start, int image_size,
3463 int hdr_size)
fa9a6fed
SB
3464{
3465 u32 crc_offset;
3466 u8 flashed_crc[4];
3467 int status;
3f0d4560
AK
3468
3469 crc_offset = hdr_size + img_start + image_size - 4;
3470
fa9a6fed 3471 p += crc_offset;
3f0d4560
AK
3472
3473 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3474 (image_size - 4));
fa9a6fed
SB
3475 if (status) {
3476 dev_err(&adapter->pdev->dev,
3477 "could not get crc from flash, not flashing redboot\n");
3478 return false;
3479 }
3480
3481 /*update redboot only if crc does not match*/
3482 if (!memcmp(flashed_crc, p, 4))
3483 return false;
3484 else
3485 return true;
fa9a6fed
SB
3486}
3487
306f1348
SP
3488static bool phy_flashing_required(struct be_adapter *adapter)
3489{
42f11cf2
AK
3490 return (adapter->phy.phy_type == TN_8022 &&
3491 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3492}
3493
c165541e
PR
3494static bool is_comp_in_ufi(struct be_adapter *adapter,
3495 struct flash_section_info *fsec, int type)
3496{
3497 int i = 0, img_type = 0;
3498 struct flash_section_info_g2 *fsec_g2 = NULL;
3499
ca34fe38 3500 if (BE2_chip(adapter))
c165541e
PR
3501 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3502
3503 for (i = 0; i < MAX_FLASH_COMP; i++) {
3504 if (fsec_g2)
3505 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3506 else
3507 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3508
3509 if (img_type == type)
3510 return true;
3511 }
3512 return false;
3513
3514}
3515
4188e7df 3516static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3517 int header_size,
3518 const struct firmware *fw)
3519{
3520 struct flash_section_info *fsec = NULL;
3521 const u8 *p = fw->data;
3522
3523 p += header_size;
3524 while (p < (fw->data + fw->size)) {
3525 fsec = (struct flash_section_info *)p;
3526 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3527 return fsec;
3528 p += 32;
3529 }
3530 return NULL;
3531}
3532
773a2d7c
PR
3533static int be_flash(struct be_adapter *adapter, const u8 *img,
3534 struct be_dma_mem *flash_cmd, int optype, int img_size)
3535{
3536 u32 total_bytes = 0, flash_op, num_bytes = 0;
3537 int status = 0;
3538 struct be_cmd_write_flashrom *req = flash_cmd->va;
3539
3540 total_bytes = img_size;
3541 while (total_bytes) {
3542 num_bytes = min_t(u32, 32*1024, total_bytes);
3543
3544 total_bytes -= num_bytes;
3545
3546 if (!total_bytes) {
3547 if (optype == OPTYPE_PHY_FW)
3548 flash_op = FLASHROM_OPER_PHY_FLASH;
3549 else
3550 flash_op = FLASHROM_OPER_FLASH;
3551 } else {
3552 if (optype == OPTYPE_PHY_FW)
3553 flash_op = FLASHROM_OPER_PHY_SAVE;
3554 else
3555 flash_op = FLASHROM_OPER_SAVE;
3556 }
3557
be716446 3558 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3559 img += num_bytes;
3560 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3561 flash_op, num_bytes);
3562 if (status) {
3563 if (status == ILLEGAL_IOCTL_REQ &&
3564 optype == OPTYPE_PHY_FW)
3565 break;
3566 dev_err(&adapter->pdev->dev,
3567 "cmd to write to flash rom failed.\n");
3568 return status;
3569 }
3570 }
3571 return 0;
3572}
3573
0ad3157e 3574/* For BE2, BE3 and BE3-R */
ca34fe38 3575static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3576 const struct firmware *fw,
3577 struct be_dma_mem *flash_cmd,
3578 int num_of_images)
3f0d4560 3579
84517482 3580{
3f0d4560 3581 int status = 0, i, filehdr_size = 0;
c165541e 3582 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3583 const u8 *p = fw->data;
215faf9c 3584 const struct flash_comp *pflashcomp;
773a2d7c 3585 int num_comp, redboot;
c165541e
PR
3586 struct flash_section_info *fsec = NULL;
3587
3588 struct flash_comp gen3_flash_types[] = {
3589 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3590 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3591 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3592 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3593 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3594 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3595 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3596 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3597 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3598 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3599 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3600 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3601 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3602 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3603 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3604 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3605 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3606 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3607 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3608 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3609 };
c165541e
PR
3610
3611 struct flash_comp gen2_flash_types[] = {
3612 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3613 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3614 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3615 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3616 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3617 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3618 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3619 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3620 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3621 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3622 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3623 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3624 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3625 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3626 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3627 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3628 };
3629
ca34fe38 3630 if (BE3_chip(adapter)) {
3f0d4560
AK
3631 pflashcomp = gen3_flash_types;
3632 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3633 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3634 } else {
3635 pflashcomp = gen2_flash_types;
3636 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3637 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3638 }
ca34fe38 3639
c165541e
PR
3640 /* Get flash section info*/
3641 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3642 if (!fsec) {
3643 dev_err(&adapter->pdev->dev,
3644 "Invalid Cookie. UFI corrupted ?\n");
3645 return -1;
3646 }
9fe96934 3647 for (i = 0; i < num_comp; i++) {
c165541e 3648 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3649 continue;
c165541e
PR
3650
3651 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3652 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3653 continue;
3654
773a2d7c
PR
3655 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3656 !phy_flashing_required(adapter))
306f1348 3657 continue;
c165541e 3658
773a2d7c
PR
3659 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3660 redboot = be_flash_redboot(adapter, fw->data,
3661 pflashcomp[i].offset, pflashcomp[i].size,
3662 filehdr_size + img_hdrs_size);
3663 if (!redboot)
3664 continue;
3665 }
c165541e 3666
3f0d4560 3667 p = fw->data;
c165541e 3668 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3669 if (p + pflashcomp[i].size > fw->data + fw->size)
3670 return -1;
773a2d7c
PR
3671
3672 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3673 pflashcomp[i].size);
3674 if (status) {
3675 dev_err(&adapter->pdev->dev,
3676 "Flashing section type %d failed.\n",
3677 pflashcomp[i].img_type);
3678 return status;
84517482 3679 }
84517482 3680 }
84517482
AK
3681 return 0;
3682}
3683
773a2d7c
PR
3684static int be_flash_skyhawk(struct be_adapter *adapter,
3685 const struct firmware *fw,
3686 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3687{
773a2d7c
PR
3688 int status = 0, i, filehdr_size = 0;
3689 int img_offset, img_size, img_optype, redboot;
3690 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3691 const u8 *p = fw->data;
3692 struct flash_section_info *fsec = NULL;
3693
3694 filehdr_size = sizeof(struct flash_file_hdr_g3);
3695 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3696 if (!fsec) {
3697 dev_err(&adapter->pdev->dev,
3698 "Invalid Cookie. UFI corrupted ?\n");
3699 return -1;
3700 }
3701
3702 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3703 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3704 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3705
3706 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3707 case IMAGE_FIRMWARE_iSCSI:
3708 img_optype = OPTYPE_ISCSI_ACTIVE;
3709 break;
3710 case IMAGE_BOOT_CODE:
3711 img_optype = OPTYPE_REDBOOT;
3712 break;
3713 case IMAGE_OPTION_ROM_ISCSI:
3714 img_optype = OPTYPE_BIOS;
3715 break;
3716 case IMAGE_OPTION_ROM_PXE:
3717 img_optype = OPTYPE_PXE_BIOS;
3718 break;
3719 case IMAGE_OPTION_ROM_FCoE:
3720 img_optype = OPTYPE_FCOE_BIOS;
3721 break;
3722 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3723 img_optype = OPTYPE_ISCSI_BACKUP;
3724 break;
3725 case IMAGE_NCSI:
3726 img_optype = OPTYPE_NCSI_FW;
3727 break;
3728 default:
3729 continue;
3730 }
3731
3732 if (img_optype == OPTYPE_REDBOOT) {
3733 redboot = be_flash_redboot(adapter, fw->data,
3734 img_offset, img_size,
3735 filehdr_size + img_hdrs_size);
3736 if (!redboot)
3737 continue;
3738 }
3739
3740 p = fw->data;
3741 p += filehdr_size + img_offset + img_hdrs_size;
3742 if (p + img_size > fw->data + fw->size)
3743 return -1;
3744
3745 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3746 if (status) {
3747 dev_err(&adapter->pdev->dev,
3748 "Flashing section type %d failed.\n",
3749 fsec->fsec_entry[i].type);
3750 return status;
3751 }
3752 }
3753 return 0;
3f0d4560
AK
3754}
3755
485bf569
SN
3756static int lancer_fw_download(struct be_adapter *adapter,
3757 const struct firmware *fw)
84517482 3758{
485bf569
SN
3759#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3760#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3761 struct be_dma_mem flash_cmd;
485bf569
SN
3762 const u8 *data_ptr = NULL;
3763 u8 *dest_image_ptr = NULL;
3764 size_t image_size = 0;
3765 u32 chunk_size = 0;
3766 u32 data_written = 0;
3767 u32 offset = 0;
3768 int status = 0;
3769 u8 add_status = 0;
f67ef7ba 3770 u8 change_status;
84517482 3771
485bf569 3772 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3773 dev_err(&adapter->pdev->dev,
485bf569
SN
3774 "FW Image not properly aligned. "
3775 "Length must be 4 byte aligned.\n");
3776 status = -EINVAL;
3777 goto lancer_fw_exit;
d9efd2af
SB
3778 }
3779
485bf569
SN
3780 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3781 + LANCER_FW_DOWNLOAD_CHUNK;
3782 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3783 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3784 if (!flash_cmd.va) {
3785 status = -ENOMEM;
485bf569
SN
3786 goto lancer_fw_exit;
3787 }
84517482 3788
485bf569
SN
3789 dest_image_ptr = flash_cmd.va +
3790 sizeof(struct lancer_cmd_req_write_object);
3791 image_size = fw->size;
3792 data_ptr = fw->data;
3793
3794 while (image_size) {
3795 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3796
3797 /* Copy the image chunk content. */
3798 memcpy(dest_image_ptr, data_ptr, chunk_size);
3799
3800 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3801 chunk_size, offset,
3802 LANCER_FW_DOWNLOAD_LOCATION,
3803 &data_written, &change_status,
3804 &add_status);
485bf569
SN
3805 if (status)
3806 break;
3807
3808 offset += data_written;
3809 data_ptr += data_written;
3810 image_size -= data_written;
3811 }
3812
3813 if (!status) {
3814 /* Commit the FW written */
3815 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3816 0, offset,
3817 LANCER_FW_DOWNLOAD_LOCATION,
3818 &data_written, &change_status,
3819 &add_status);
485bf569
SN
3820 }
3821
3822 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3823 flash_cmd.dma);
3824 if (status) {
3825 dev_err(&adapter->pdev->dev,
3826 "Firmware load error. "
3827 "Status code: 0x%x Additional Status: 0x%x\n",
3828 status, add_status);
3829 goto lancer_fw_exit;
3830 }
3831
f67ef7ba 3832 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3833 dev_info(&adapter->pdev->dev,
3834 "Resetting adapter to activate new FW\n");
5c510811
SK
3835 status = lancer_physdev_ctrl(adapter,
3836 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3837 if (status) {
3838 dev_err(&adapter->pdev->dev,
3839 "Adapter busy for FW reset.\n"
3840 "New FW will not be active.\n");
3841 goto lancer_fw_exit;
3842 }
3843 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3844 dev_err(&adapter->pdev->dev,
3845 "System reboot required for new FW"
3846 " to be active\n");
3847 }
3848
485bf569
SN
3849 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3850lancer_fw_exit:
3851 return status;
3852}
3853
ca34fe38
SP
3854#define UFI_TYPE2 2
3855#define UFI_TYPE3 3
0ad3157e 3856#define UFI_TYPE3R 10
ca34fe38
SP
3857#define UFI_TYPE4 4
3858static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3859 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3860{
3861 if (fhdr == NULL)
3862 goto be_get_ufi_exit;
3863
ca34fe38
SP
3864 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3865 return UFI_TYPE4;
0ad3157e
VV
3866 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3867 if (fhdr->asic_type_rev == 0x10)
3868 return UFI_TYPE3R;
3869 else
3870 return UFI_TYPE3;
3871 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3872 return UFI_TYPE2;
773a2d7c
PR
3873
3874be_get_ufi_exit:
3875 dev_err(&adapter->pdev->dev,
3876 "UFI and Interface are not compatible for flashing\n");
3877 return -1;
3878}
3879
485bf569
SN
3880static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3881{
485bf569
SN
3882 struct flash_file_hdr_g3 *fhdr3;
3883 struct image_hdr *img_hdr_ptr = NULL;
3884 struct be_dma_mem flash_cmd;
3885 const u8 *p;
773a2d7c 3886 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3887
be716446 3888 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3889 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3890 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3891 if (!flash_cmd.va) {
3892 status = -ENOMEM;
485bf569 3893 goto be_fw_exit;
84517482
AK
3894 }
3895
773a2d7c 3896 p = fw->data;
0ad3157e 3897 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3898
0ad3157e 3899 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3900
773a2d7c
PR
3901 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3902 for (i = 0; i < num_imgs; i++) {
3903 img_hdr_ptr = (struct image_hdr *)(fw->data +
3904 (sizeof(struct flash_file_hdr_g3) +
3905 i * sizeof(struct image_hdr)));
3906 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3907 switch (ufi_type) {
3908 case UFI_TYPE4:
773a2d7c
PR
3909 status = be_flash_skyhawk(adapter, fw,
3910 &flash_cmd, num_imgs);
0ad3157e
VV
3911 break;
3912 case UFI_TYPE3R:
ca34fe38
SP
3913 status = be_flash_BEx(adapter, fw, &flash_cmd,
3914 num_imgs);
0ad3157e
VV
3915 break;
3916 case UFI_TYPE3:
3917 /* Do not flash this ufi on BE3-R cards */
3918 if (adapter->asic_rev < 0x10)
3919 status = be_flash_BEx(adapter, fw,
3920 &flash_cmd,
3921 num_imgs);
3922 else {
3923 status = -1;
3924 dev_err(&adapter->pdev->dev,
3925 "Can't load BE3 UFI on BE3R\n");
3926 }
3927 }
3f0d4560 3928 }
773a2d7c
PR
3929 }
3930
ca34fe38
SP
3931 if (ufi_type == UFI_TYPE2)
3932 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3933 else if (ufi_type == -1)
3f0d4560 3934 status = -1;
84517482 3935
2b7bcebf
IV
3936 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3937 flash_cmd.dma);
84517482
AK
3938 if (status) {
3939 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3940 goto be_fw_exit;
84517482
AK
3941 }
3942
af901ca1 3943 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3944
485bf569
SN
3945be_fw_exit:
3946 return status;
3947}
3948
3949int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3950{
3951 const struct firmware *fw;
3952 int status;
3953
3954 if (!netif_running(adapter->netdev)) {
3955 dev_err(&adapter->pdev->dev,
3956 "Firmware load not allowed (interface is down)\n");
3957 return -1;
3958 }
3959
3960 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3961 if (status)
3962 goto fw_exit;
3963
3964 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3965
3966 if (lancer_chip(adapter))
3967 status = lancer_fw_download(adapter, fw);
3968 else
3969 status = be_fw_download(adapter, fw);
3970
eeb65ced
SK
3971 if (!status)
3972 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3973 adapter->fw_on_flash);
3974
84517482
AK
3975fw_exit:
3976 release_firmware(fw);
3977 return status;
3978}
3979
a77dcb8c
AK
3980static int be_ndo_bridge_setlink(struct net_device *dev,
3981 struct nlmsghdr *nlh)
3982{
3983 struct be_adapter *adapter = netdev_priv(dev);
3984 struct nlattr *attr, *br_spec;
3985 int rem;
3986 int status = 0;
3987 u16 mode = 0;
3988
3989 if (!sriov_enabled(adapter))
3990 return -EOPNOTSUPP;
3991
3992 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3993
3994 nla_for_each_nested(attr, br_spec, rem) {
3995 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3996 continue;
3997
3998 mode = nla_get_u16(attr);
3999 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4000 return -EINVAL;
4001
4002 status = be_cmd_set_hsw_config(adapter, 0, 0,
4003 adapter->if_handle,
4004 mode == BRIDGE_MODE_VEPA ?
4005 PORT_FWD_TYPE_VEPA :
4006 PORT_FWD_TYPE_VEB);
4007 if (status)
4008 goto err;
4009
4010 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4011 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4012
4013 return status;
4014 }
4015err:
4016 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4017 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4018
4019 return status;
4020}
4021
4022static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4023 struct net_device *dev,
4024 u32 filter_mask)
4025{
4026 struct be_adapter *adapter = netdev_priv(dev);
4027 int status = 0;
4028 u8 hsw_mode;
4029
4030 if (!sriov_enabled(adapter))
4031 return 0;
4032
4033 /* BE and Lancer chips support VEB mode only */
4034 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4035 hsw_mode = PORT_FWD_TYPE_VEB;
4036 } else {
4037 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4038 adapter->if_handle, &hsw_mode);
4039 if (status)
4040 return 0;
4041 }
4042
4043 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4044 hsw_mode == PORT_FWD_TYPE_VEPA ?
4045 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4046}
4047
e5686ad8 4048static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4049 .ndo_open = be_open,
4050 .ndo_stop = be_close,
4051 .ndo_start_xmit = be_xmit,
a54769f5 4052 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4053 .ndo_set_mac_address = be_mac_addr_set,
4054 .ndo_change_mtu = be_change_mtu,
ab1594e9 4055 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4056 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4057 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4058 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4059 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4060 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4061 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
4062 .ndo_get_vf_config = be_get_vf_config,
4063#ifdef CONFIG_NET_POLL_CONTROLLER
4064 .ndo_poll_controller = be_netpoll,
4065#endif
a77dcb8c
AK
4066 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4067 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0
SP
4068#ifdef CONFIG_NET_RX_BUSY_POLL
4069 .ndo_busy_poll = be_busy_poll
4070#endif
6b7c5b94
SP
4071};
4072
4073static void be_netdev_init(struct net_device *netdev)
4074{
4075 struct be_adapter *adapter = netdev_priv(netdev);
4076
6332c8d3 4077 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4078 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4079 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4080 if (be_multi_rxq(adapter))
4081 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4082
4083 netdev->features |= netdev->hw_features |
f646968f 4084 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4085
eb8a50d9 4086 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4087 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4088
fbc13f01
AK
4089 netdev->priv_flags |= IFF_UNICAST_FLT;
4090
6b7c5b94
SP
4091 netdev->flags |= IFF_MULTICAST;
4092
b7e5887e 4093 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4094
10ef9ab4 4095 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4096
4097 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4098}
4099
4100static void be_unmap_pci_bars(struct be_adapter *adapter)
4101{
c5b3ad4c
SP
4102 if (adapter->csr)
4103 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4104 if (adapter->db)
ce66f781 4105 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4106}
4107
ce66f781
SP
4108static int db_bar(struct be_adapter *adapter)
4109{
4110 if (lancer_chip(adapter) || !be_physfn(adapter))
4111 return 0;
4112 else
4113 return 4;
4114}
4115
4116static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4117{
dbf0f2a7 4118 if (skyhawk_chip(adapter)) {
ce66f781
SP
4119 adapter->roce_db.size = 4096;
4120 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4121 db_bar(adapter));
4122 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4123 db_bar(adapter));
4124 }
045508a8 4125 return 0;
6b7c5b94
SP
4126}
4127
4128static int be_map_pci_bars(struct be_adapter *adapter)
4129{
4130 u8 __iomem *addr;
fe6d2a38 4131
c5b3ad4c
SP
4132 if (BEx_chip(adapter) && be_physfn(adapter)) {
4133 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4134 if (adapter->csr == NULL)
4135 return -ENOMEM;
4136 }
4137
ce66f781 4138 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4139 if (addr == NULL)
4140 goto pci_map_err;
ba343c77 4141 adapter->db = addr;
ce66f781
SP
4142
4143 be_roce_map_pci_bars(adapter);
6b7c5b94 4144 return 0;
ce66f781 4145
6b7c5b94
SP
4146pci_map_err:
4147 be_unmap_pci_bars(adapter);
4148 return -ENOMEM;
4149}
4150
6b7c5b94
SP
4151static void be_ctrl_cleanup(struct be_adapter *adapter)
4152{
8788fdc2 4153 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4154
4155 be_unmap_pci_bars(adapter);
4156
4157 if (mem->va)
2b7bcebf
IV
4158 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4159 mem->dma);
e7b909a6 4160
5b8821b7 4161 mem = &adapter->rx_filter;
e7b909a6 4162 if (mem->va)
2b7bcebf
IV
4163 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4164 mem->dma);
6b7c5b94
SP
4165}
4166
6b7c5b94
SP
4167static int be_ctrl_init(struct be_adapter *adapter)
4168{
8788fdc2
SP
4169 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4170 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4171 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4172 u32 sli_intf;
6b7c5b94 4173 int status;
6b7c5b94 4174
ce66f781
SP
4175 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4176 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4177 SLI_INTF_FAMILY_SHIFT;
4178 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4179
6b7c5b94
SP
4180 status = be_map_pci_bars(adapter);
4181 if (status)
e7b909a6 4182 goto done;
6b7c5b94
SP
4183
4184 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4185 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4186 mbox_mem_alloc->size,
4187 &mbox_mem_alloc->dma,
4188 GFP_KERNEL);
6b7c5b94 4189 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4190 status = -ENOMEM;
4191 goto unmap_pci_bars;
6b7c5b94
SP
4192 }
4193 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4194 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4195 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4196 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4197
5b8821b7 4198 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4199 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4200 rx_filter->size, &rx_filter->dma,
4201 GFP_KERNEL);
5b8821b7 4202 if (rx_filter->va == NULL) {
e7b909a6
SP
4203 status = -ENOMEM;
4204 goto free_mbox;
4205 }
1f9061d2 4206
2984961c 4207 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4208 spin_lock_init(&adapter->mcc_lock);
4209 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4210
dd131e76 4211 init_completion(&adapter->flash_compl);
cf588477 4212 pci_save_state(adapter->pdev);
6b7c5b94 4213 return 0;
e7b909a6
SP
4214
4215free_mbox:
2b7bcebf
IV
4216 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4217 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4218
4219unmap_pci_bars:
4220 be_unmap_pci_bars(adapter);
4221
4222done:
4223 return status;
6b7c5b94
SP
4224}
4225
4226static void be_stats_cleanup(struct be_adapter *adapter)
4227{
3abcdeda 4228 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4229
4230 if (cmd->va)
2b7bcebf
IV
4231 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4232 cmd->va, cmd->dma);
6b7c5b94
SP
4233}
4234
4235static int be_stats_init(struct be_adapter *adapter)
4236{
3abcdeda 4237 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4238
ca34fe38
SP
4239 if (lancer_chip(adapter))
4240 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4241 else if (BE2_chip(adapter))
89a88ab8 4242 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4243 else if (BE3_chip(adapter))
ca34fe38 4244 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4245 else
4246 /* ALL non-BE ASICs */
4247 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4248
ede23fa8
JP
4249 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4250 GFP_KERNEL);
6b7c5b94
SP
4251 if (cmd->va == NULL)
4252 return -1;
4253 return 0;
4254}
4255
3bc6b06c 4256static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4257{
4258 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4259
6b7c5b94
SP
4260 if (!adapter)
4261 return;
4262
045508a8 4263 be_roce_dev_remove(adapter);
8cef7a78 4264 be_intr_set(adapter, false);
045508a8 4265
f67ef7ba
PR
4266 cancel_delayed_work_sync(&adapter->func_recovery_work);
4267
6b7c5b94
SP
4268 unregister_netdev(adapter->netdev);
4269
5fb379ee
SP
4270 be_clear(adapter);
4271
bf99e50d
PR
4272 /* tell fw we're done with firing cmds */
4273 be_cmd_fw_clean(adapter);
4274
6b7c5b94
SP
4275 be_stats_cleanup(adapter);
4276
4277 be_ctrl_cleanup(adapter);
4278
d6b6d987
SP
4279 pci_disable_pcie_error_reporting(pdev);
4280
6b7c5b94
SP
4281 pci_release_regions(pdev);
4282 pci_disable_device(pdev);
4283
4284 free_netdev(adapter->netdev);
4285}
4286
4762f6ce
AK
4287bool be_is_wol_supported(struct be_adapter *adapter)
4288{
4289 return ((adapter->wol_cap & BE_WOL_CAP) &&
4290 !be_is_wol_excluded(adapter)) ? true : false;
4291}
4292
941a77d5
SK
4293u32 be_get_fw_log_level(struct be_adapter *adapter)
4294{
4295 struct be_dma_mem extfat_cmd;
4296 struct be_fat_conf_params *cfgs;
4297 int status;
4298 u32 level = 0;
4299 int j;
4300
f25b119c
PR
4301 if (lancer_chip(adapter))
4302 return 0;
4303
941a77d5
SK
4304 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4305 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4306 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4307 &extfat_cmd.dma);
4308
4309 if (!extfat_cmd.va) {
4310 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4311 __func__);
4312 goto err;
4313 }
4314
4315 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4316 if (!status) {
4317 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4318 sizeof(struct be_cmd_resp_hdr));
ac46a462 4319 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4320 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4321 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4322 }
4323 }
4324 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4325 extfat_cmd.dma);
4326err:
4327 return level;
4328}
abb93951 4329
39f1d94d 4330static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4331{
6b7c5b94 4332 int status;
941a77d5 4333 u32 level;
6b7c5b94 4334
9e1453c5
AK
4335 status = be_cmd_get_cntl_attributes(adapter);
4336 if (status)
4337 return status;
4338
4762f6ce
AK
4339 status = be_cmd_get_acpi_wol_cap(adapter);
4340 if (status) {
4341 /* in case of a failure to get wol capabillities
4342 * check the exclusion list to determine WOL capability */
4343 if (!be_is_wol_excluded(adapter))
4344 adapter->wol_cap |= BE_WOL_CAP;
4345 }
4346
4347 if (be_is_wol_supported(adapter))
4348 adapter->wol = true;
4349
7aeb2156
PR
4350 /* Must be a power of 2 or else MODULO will BUG_ON */
4351 adapter->be_get_temp_freq = 64;
4352
941a77d5
SK
4353 level = be_get_fw_log_level(adapter);
4354 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4355
92bf14ab 4356 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4357 return 0;
6b7c5b94
SP
4358}
4359
f67ef7ba 4360static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4361{
01e5b2c4 4362 struct device *dev = &adapter->pdev->dev;
d8110f62 4363 int status;
d8110f62 4364
f67ef7ba
PR
4365 status = lancer_test_and_set_rdy_state(adapter);
4366 if (status)
4367 goto err;
d8110f62 4368
f67ef7ba
PR
4369 if (netif_running(adapter->netdev))
4370 be_close(adapter->netdev);
d8110f62 4371
f67ef7ba
PR
4372 be_clear(adapter);
4373
01e5b2c4 4374 be_clear_all_error(adapter);
f67ef7ba
PR
4375
4376 status = be_setup(adapter);
4377 if (status)
4378 goto err;
d8110f62 4379
f67ef7ba
PR
4380 if (netif_running(adapter->netdev)) {
4381 status = be_open(adapter->netdev);
d8110f62
PR
4382 if (status)
4383 goto err;
f67ef7ba 4384 }
d8110f62 4385
4bebb56a 4386 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4387 return 0;
4388err:
01e5b2c4
SK
4389 if (status == -EAGAIN)
4390 dev_err(dev, "Waiting for resource provisioning\n");
4391 else
4bebb56a 4392 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4393
f67ef7ba
PR
4394 return status;
4395}
4396
4397static void be_func_recovery_task(struct work_struct *work)
4398{
4399 struct be_adapter *adapter =
4400 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4401 int status = 0;
d8110f62 4402
f67ef7ba 4403 be_detect_error(adapter);
d8110f62 4404
f67ef7ba 4405 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4406
f67ef7ba
PR
4407 rtnl_lock();
4408 netif_device_detach(adapter->netdev);
4409 rtnl_unlock();
d8110f62 4410
f67ef7ba 4411 status = lancer_recover_func(adapter);
f67ef7ba
PR
4412 if (!status)
4413 netif_device_attach(adapter->netdev);
d8110f62 4414 }
f67ef7ba 4415
01e5b2c4
SK
4416 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4417 * no need to attempt further recovery.
4418 */
4419 if (!status || status == -EAGAIN)
4420 schedule_delayed_work(&adapter->func_recovery_work,
4421 msecs_to_jiffies(1000));
d8110f62
PR
4422}
4423
4424static void be_worker(struct work_struct *work)
4425{
4426 struct be_adapter *adapter =
4427 container_of(work, struct be_adapter, work.work);
4428 struct be_rx_obj *rxo;
4429 int i;
4430
d8110f62
PR
4431 /* when interrupts are not yet enabled, just reap any pending
4432 * mcc completions */
4433 if (!netif_running(adapter->netdev)) {
072a9c48 4434 local_bh_disable();
10ef9ab4 4435 be_process_mcc(adapter);
072a9c48 4436 local_bh_enable();
d8110f62
PR
4437 goto reschedule;
4438 }
4439
4440 if (!adapter->stats_cmd_sent) {
4441 if (lancer_chip(adapter))
4442 lancer_cmd_get_pport_stats(adapter,
4443 &adapter->stats_cmd);
4444 else
4445 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4446 }
4447
d696b5e2
VV
4448 if (be_physfn(adapter) &&
4449 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4450 be_cmd_get_die_temperature(adapter);
4451
d8110f62 4452 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4453 /* Replenish RX-queues starved due to memory
4454 * allocation failures.
4455 */
4456 if (rxo->rx_post_starved)
d8110f62 4457 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4458 }
4459
2632bafd 4460 be_eqd_update(adapter);
10ef9ab4 4461
d8110f62
PR
4462reschedule:
4463 adapter->work_counter++;
4464 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4465}
4466
257a3feb 4467/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4468static bool be_reset_required(struct be_adapter *adapter)
4469{
257a3feb 4470 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4471}
4472
d379142b
SP
4473static char *mc_name(struct be_adapter *adapter)
4474{
4475 if (adapter->function_mode & FLEX10_MODE)
4476 return "FLEX10";
4477 else if (adapter->function_mode & VNIC_MODE)
4478 return "vNIC";
4479 else if (adapter->function_mode & UMC_ENABLED)
4480 return "UMC";
4481 else
4482 return "";
4483}
4484
4485static inline char *func_name(struct be_adapter *adapter)
4486{
4487 return be_physfn(adapter) ? "PF" : "VF";
4488}
4489
1dd06ae8 4490static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4491{
4492 int status = 0;
4493 struct be_adapter *adapter;
4494 struct net_device *netdev;
b4e32a71 4495 char port_name;
6b7c5b94
SP
4496
4497 status = pci_enable_device(pdev);
4498 if (status)
4499 goto do_none;
4500
4501 status = pci_request_regions(pdev, DRV_NAME);
4502 if (status)
4503 goto disable_dev;
4504 pci_set_master(pdev);
4505
7f640062 4506 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4507 if (netdev == NULL) {
4508 status = -ENOMEM;
4509 goto rel_reg;
4510 }
4511 adapter = netdev_priv(netdev);
4512 adapter->pdev = pdev;
4513 pci_set_drvdata(pdev, adapter);
4514 adapter->netdev = netdev;
2243e2e9 4515 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4516
4c15c243 4517 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4518 if (!status) {
4519 netdev->features |= NETIF_F_HIGHDMA;
4520 } else {
4c15c243 4521 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4522 if (status) {
4523 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4524 goto free_netdev;
4525 }
4526 }
4527
ea58c180
AK
4528 if (be_physfn(adapter)) {
4529 status = pci_enable_pcie_error_reporting(pdev);
4530 if (!status)
4531 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4532 }
d6b6d987 4533
6b7c5b94
SP
4534 status = be_ctrl_init(adapter);
4535 if (status)
39f1d94d 4536 goto free_netdev;
6b7c5b94 4537
2243e2e9 4538 /* sync up with fw's ready state */
ba343c77 4539 if (be_physfn(adapter)) {
bf99e50d 4540 status = be_fw_wait_ready(adapter);
ba343c77
SB
4541 if (status)
4542 goto ctrl_clean;
ba343c77 4543 }
6b7c5b94 4544
39f1d94d
SP
4545 if (be_reset_required(adapter)) {
4546 status = be_cmd_reset_function(adapter);
4547 if (status)
4548 goto ctrl_clean;
556ae191 4549
2d177be8
KA
4550 /* Wait for interrupts to quiesce after an FLR */
4551 msleep(100);
4552 }
8cef7a78
SK
4553
4554 /* Allow interrupts for other ULPs running on NIC function */
4555 be_intr_set(adapter, true);
10ef9ab4 4556
2d177be8
KA
4557 /* tell fw we're ready to fire cmds */
4558 status = be_cmd_fw_init(adapter);
4559 if (status)
4560 goto ctrl_clean;
4561
2243e2e9
SP
4562 status = be_stats_init(adapter);
4563 if (status)
4564 goto ctrl_clean;
4565
39f1d94d 4566 status = be_get_initial_config(adapter);
6b7c5b94
SP
4567 if (status)
4568 goto stats_clean;
6b7c5b94
SP
4569
4570 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4571 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4572 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4573
5fb379ee
SP
4574 status = be_setup(adapter);
4575 if (status)
55f5c3c5 4576 goto stats_clean;
2243e2e9 4577
3abcdeda 4578 be_netdev_init(netdev);
6b7c5b94
SP
4579 status = register_netdev(netdev);
4580 if (status != 0)
5fb379ee 4581 goto unsetup;
6b7c5b94 4582
045508a8
PP
4583 be_roce_dev_add(adapter);
4584
f67ef7ba
PR
4585 schedule_delayed_work(&adapter->func_recovery_work,
4586 msecs_to_jiffies(1000));
b4e32a71
PR
4587
4588 be_cmd_query_port_name(adapter, &port_name);
4589
d379142b
SP
4590 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4591 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4592
6b7c5b94
SP
4593 return 0;
4594
5fb379ee
SP
4595unsetup:
4596 be_clear(adapter);
6b7c5b94
SP
4597stats_clean:
4598 be_stats_cleanup(adapter);
4599ctrl_clean:
4600 be_ctrl_cleanup(adapter);
f9449ab7 4601free_netdev:
fe6d2a38 4602 free_netdev(netdev);
6b7c5b94
SP
4603rel_reg:
4604 pci_release_regions(pdev);
4605disable_dev:
4606 pci_disable_device(pdev);
4607do_none:
c4ca2374 4608 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4609 return status;
4610}
4611
4612static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4613{
4614 struct be_adapter *adapter = pci_get_drvdata(pdev);
4615 struct net_device *netdev = adapter->netdev;
4616
71d8d1b5
AK
4617 if (adapter->wol)
4618 be_setup_wol(adapter, true);
4619
d4360d6f 4620 be_intr_set(adapter, false);
f67ef7ba
PR
4621 cancel_delayed_work_sync(&adapter->func_recovery_work);
4622
6b7c5b94
SP
4623 netif_device_detach(netdev);
4624 if (netif_running(netdev)) {
4625 rtnl_lock();
4626 be_close(netdev);
4627 rtnl_unlock();
4628 }
9b0365f1 4629 be_clear(adapter);
6b7c5b94
SP
4630
4631 pci_save_state(pdev);
4632 pci_disable_device(pdev);
4633 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4634 return 0;
4635}
4636
4637static int be_resume(struct pci_dev *pdev)
4638{
4639 int status = 0;
4640 struct be_adapter *adapter = pci_get_drvdata(pdev);
4641 struct net_device *netdev = adapter->netdev;
4642
4643 netif_device_detach(netdev);
4644
4645 status = pci_enable_device(pdev);
4646 if (status)
4647 return status;
4648
1ca01512 4649 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4650 pci_restore_state(pdev);
4651
dd5746bf
SB
4652 status = be_fw_wait_ready(adapter);
4653 if (status)
4654 return status;
4655
d4360d6f 4656 be_intr_set(adapter, true);
2243e2e9
SP
4657 /* tell fw we're ready to fire cmds */
4658 status = be_cmd_fw_init(adapter);
4659 if (status)
4660 return status;
4661
9b0365f1 4662 be_setup(adapter);
6b7c5b94
SP
4663 if (netif_running(netdev)) {
4664 rtnl_lock();
4665 be_open(netdev);
4666 rtnl_unlock();
4667 }
f67ef7ba
PR
4668
4669 schedule_delayed_work(&adapter->func_recovery_work,
4670 msecs_to_jiffies(1000));
6b7c5b94 4671 netif_device_attach(netdev);
71d8d1b5
AK
4672
4673 if (adapter->wol)
4674 be_setup_wol(adapter, false);
a4ca055f 4675
6b7c5b94
SP
4676 return 0;
4677}
4678
82456b03
SP
4679/*
4680 * An FLR will stop BE from DMAing any data.
4681 */
4682static void be_shutdown(struct pci_dev *pdev)
4683{
4684 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4685
2d5d4154
AK
4686 if (!adapter)
4687 return;
82456b03 4688
0f4a6828 4689 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4690 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4691
2d5d4154 4692 netif_device_detach(adapter->netdev);
82456b03 4693
57841869
AK
4694 be_cmd_reset_function(adapter);
4695
82456b03 4696 pci_disable_device(pdev);
82456b03
SP
4697}
4698
cf588477
SP
4699static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4700 pci_channel_state_t state)
4701{
4702 struct be_adapter *adapter = pci_get_drvdata(pdev);
4703 struct net_device *netdev = adapter->netdev;
4704
4705 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4706
01e5b2c4
SK
4707 if (!adapter->eeh_error) {
4708 adapter->eeh_error = true;
cf588477 4709
01e5b2c4 4710 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4711
cf588477 4712 rtnl_lock();
01e5b2c4
SK
4713 netif_device_detach(netdev);
4714 if (netif_running(netdev))
4715 be_close(netdev);
cf588477 4716 rtnl_unlock();
01e5b2c4
SK
4717
4718 be_clear(adapter);
cf588477 4719 }
cf588477
SP
4720
4721 if (state == pci_channel_io_perm_failure)
4722 return PCI_ERS_RESULT_DISCONNECT;
4723
4724 pci_disable_device(pdev);
4725
eeb7fc7b
SK
4726 /* The error could cause the FW to trigger a flash debug dump.
4727 * Resetting the card while flash dump is in progress
c8a54163
PR
4728 * can cause it not to recover; wait for it to finish.
4729 * Wait only for first function as it is needed only once per
4730 * adapter.
eeb7fc7b 4731 */
c8a54163
PR
4732 if (pdev->devfn == 0)
4733 ssleep(30);
4734
cf588477
SP
4735 return PCI_ERS_RESULT_NEED_RESET;
4736}
4737
4738static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4739{
4740 struct be_adapter *adapter = pci_get_drvdata(pdev);
4741 int status;
4742
4743 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4744
4745 status = pci_enable_device(pdev);
4746 if (status)
4747 return PCI_ERS_RESULT_DISCONNECT;
4748
4749 pci_set_master(pdev);
1ca01512 4750 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4751 pci_restore_state(pdev);
4752
4753 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4754 dev_info(&adapter->pdev->dev,
4755 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4756 status = be_fw_wait_ready(adapter);
cf588477
SP
4757 if (status)
4758 return PCI_ERS_RESULT_DISCONNECT;
4759
d6b6d987 4760 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4761 be_clear_all_error(adapter);
cf588477
SP
4762 return PCI_ERS_RESULT_RECOVERED;
4763}
4764
4765static void be_eeh_resume(struct pci_dev *pdev)
4766{
4767 int status = 0;
4768 struct be_adapter *adapter = pci_get_drvdata(pdev);
4769 struct net_device *netdev = adapter->netdev;
4770
4771 dev_info(&adapter->pdev->dev, "EEH resume\n");
4772
4773 pci_save_state(pdev);
4774
2d177be8 4775 status = be_cmd_reset_function(adapter);
cf588477
SP
4776 if (status)
4777 goto err;
4778
2d177be8
KA
4779 /* tell fw we're ready to fire cmds */
4780 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4781 if (status)
4782 goto err;
4783
cf588477
SP
4784 status = be_setup(adapter);
4785 if (status)
4786 goto err;
4787
4788 if (netif_running(netdev)) {
4789 status = be_open(netdev);
4790 if (status)
4791 goto err;
4792 }
f67ef7ba
PR
4793
4794 schedule_delayed_work(&adapter->func_recovery_work,
4795 msecs_to_jiffies(1000));
cf588477
SP
4796 netif_device_attach(netdev);
4797 return;
4798err:
4799 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4800}
4801
3646f0e5 4802static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4803 .error_detected = be_eeh_err_detected,
4804 .slot_reset = be_eeh_reset,
4805 .resume = be_eeh_resume,
4806};
4807
6b7c5b94
SP
4808static struct pci_driver be_driver = {
4809 .name = DRV_NAME,
4810 .id_table = be_dev_ids,
4811 .probe = be_probe,
4812 .remove = be_remove,
4813 .suspend = be_suspend,
cf588477 4814 .resume = be_resume,
82456b03 4815 .shutdown = be_shutdown,
cf588477 4816 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4817};
4818
4819static int __init be_init_module(void)
4820{
8e95a202
JP
4821 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4822 rx_frag_size != 2048) {
6b7c5b94
SP
4823 printk(KERN_WARNING DRV_NAME
4824 " : Module param rx_frag_size must be 2048/4096/8192."
4825 " Using 2048\n");
4826 rx_frag_size = 2048;
4827 }
6b7c5b94
SP
4828
4829 return pci_register_driver(&be_driver);
4830}
4831module_init(be_init_module);
4832
4833static void __exit be_exit_module(void)
4834{
4835 pci_unregister_driver(&be_driver);
4836}
4837module_exit(be_exit_module);
This page took 1.097761 seconds and 5 git commands to generate.