be2net: Add TX completion error statistics in ethtool
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276
AK
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
c3c18bc1 741 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 742
49e4b847 743 if (skb_is_gso(skb)) {
c3c18bc1
SP
744 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
745 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 746 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 747 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 748 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 749 if (skb->encapsulation) {
c3c18bc1 750 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
751 proto = skb_inner_ip_proto(skb);
752 } else {
753 proto = skb_ip_proto(skb);
754 }
755 if (proto == IPPROTO_TCP)
c3c18bc1 756 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 757 else if (proto == IPPROTO_UDP)
c3c18bc1 758 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
759 }
760
4c5102f9 761 if (vlan_tx_tag_present(skb)) {
c3c18bc1 762 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 763 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 764 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
765 }
766
bc0c3405 767 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
c3c18bc1
SP
768 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
769 SET_TX_WRB_HDR_BITS(event, hdr, 1);
770 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
771 SET_TX_WRB_HDR_BITS(len, hdr, len);
6b7c5b94
SP
772}
773
2b7bcebf 774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 775 bool unmap_single)
7101e111
SP
776{
777 dma_addr_t dma;
778
779 be_dws_le_to_cpu(wrb, sizeof(*wrb));
780
781 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 782 if (wrb->frag_len) {
7101e111 783 if (unmap_single)
2b7bcebf
IV
784 dma_unmap_single(dev, dma, wrb->frag_len,
785 DMA_TO_DEVICE);
7101e111 786 else
2b7bcebf 787 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
788 }
789}
6b7c5b94 790
3c8def97 791static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
792 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
793 bool skip_hw_vlan)
6b7c5b94 794{
7101e111
SP
795 dma_addr_t busaddr;
796 int i, copied = 0;
2b7bcebf 797 struct device *dev = &adapter->pdev->dev;
6b7c5b94 798 struct sk_buff *first_skb = skb;
6b7c5b94
SP
799 struct be_eth_wrb *wrb;
800 struct be_eth_hdr_wrb *hdr;
7101e111
SP
801 bool map_single = false;
802 u16 map_head;
6b7c5b94 803
6b7c5b94
SP
804 hdr = queue_head_node(txq);
805 queue_head_inc(txq);
7101e111 806 map_head = txq->head;
6b7c5b94 807
ebc8d2ab 808 if (skb->len > skb->data_len) {
e743d313 809 int len = skb_headlen(skb);
2b7bcebf
IV
810 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
811 if (dma_mapping_error(dev, busaddr))
7101e111
SP
812 goto dma_err;
813 map_single = true;
ebc8d2ab
DM
814 wrb = queue_head_node(txq);
815 wrb_fill(wrb, busaddr, len);
816 be_dws_cpu_to_le(wrb, sizeof(*wrb));
817 queue_head_inc(txq);
818 copied += len;
819 }
6b7c5b94 820
ebc8d2ab 821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 822 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 823 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 824 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 825 if (dma_mapping_error(dev, busaddr))
7101e111 826 goto dma_err;
ebc8d2ab 827 wrb = queue_head_node(txq);
9e903e08 828 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
829 be_dws_cpu_to_le(wrb, sizeof(*wrb));
830 queue_head_inc(txq);
9e903e08 831 copied += skb_frag_size(frag);
6b7c5b94
SP
832 }
833
834 if (dummy_wrb) {
835 wrb = queue_head_node(txq);
836 wrb_fill(wrb, 0, 0);
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
839 }
840
bc0c3405 841 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
842 be_dws_cpu_to_le(hdr, sizeof(*hdr));
843
844 return copied;
7101e111
SP
845dma_err:
846 txq->head = map_head;
847 while (copied) {
848 wrb = queue_head_node(txq);
2b7bcebf 849 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
850 map_single = false;
851 copied -= wrb->frag_len;
852 queue_head_inc(txq);
853 }
854 return 0;
6b7c5b94
SP
855}
856
93040ae5 857static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
858 struct sk_buff *skb,
859 bool *skip_hw_vlan)
93040ae5
SK
860{
861 u16 vlan_tag = 0;
862
863 skb = skb_share_check(skb, GFP_ATOMIC);
864 if (unlikely(!skb))
865 return skb;
866
efee8e87 867 if (vlan_tx_tag_present(skb))
93040ae5 868 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
869
870 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
871 if (!vlan_tag)
872 vlan_tag = adapter->pvid;
873 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
874 * skip VLAN insertion
875 */
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
bc0c3405
AK
879
880 if (vlan_tag) {
58717686 881 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
882 if (unlikely(!skb))
883 return skb;
bc0c3405
AK
884 skb->vlan_tci = 0;
885 }
886
887 /* Insert the outer VLAN, if any */
888 if (adapter->qnq_vid) {
889 vlan_tag = adapter->qnq_vid;
58717686 890 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
891 if (unlikely(!skb))
892 return skb;
893 if (skip_hw_vlan)
894 *skip_hw_vlan = true;
895 }
896
93040ae5
SK
897 return skb;
898}
899
bc0c3405
AK
900static bool be_ipv6_exthdr_check(struct sk_buff *skb)
901{
902 struct ethhdr *eh = (struct ethhdr *)skb->data;
903 u16 offset = ETH_HLEN;
904
905 if (eh->h_proto == htons(ETH_P_IPV6)) {
906 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
907
908 offset += sizeof(struct ipv6hdr);
909 if (ip6h->nexthdr != NEXTHDR_TCP &&
910 ip6h->nexthdr != NEXTHDR_UDP) {
911 struct ipv6_opt_hdr *ehdr =
912 (struct ipv6_opt_hdr *) (skb->data + offset);
913
914 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
915 if (ehdr->hdrlen == 0xff)
916 return true;
917 }
918 }
919 return false;
920}
921
922static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
923{
924 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
925}
926
748b539a 927static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 928{
ee9c799c 929 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
930}
931
ec495fac
VV
932static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
933 struct sk_buff *skb,
934 bool *skip_hw_vlan)
6b7c5b94 935{
d2cb6ce7 936 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
937 unsigned int eth_hdr_len;
938 struct iphdr *ip;
93040ae5 939
1297f9db
AK
940 /* For padded packets, BE HW modifies tot_len field in IP header
941 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 942 * For padded packets, Lancer computes incorrect checksum.
1ded132d 943 */
ee9c799c
SP
944 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
945 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
946 if (skb->len <= 60 &&
947 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 948 is_ipv4_pkt(skb)) {
93040ae5
SK
949 ip = (struct iphdr *)ip_hdr(skb);
950 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
951 }
1ded132d 952
d2cb6ce7 953 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 954 * tagging in pvid-tagging mode
d2cb6ce7 955 */
f93f160b 956 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 957 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 958 *skip_hw_vlan = true;
d2cb6ce7 959
93040ae5
SK
960 /* HW has a bug wherein it will calculate CSUM for VLAN
961 * pkts even though it is disabled.
962 * Manually insert VLAN in pkt.
963 */
964 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
965 vlan_tx_tag_present(skb)) {
966 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 967 if (unlikely(!skb))
c9128951 968 goto err;
bc0c3405
AK
969 }
970
971 /* HW may lockup when VLAN HW tagging is requested on
972 * certain ipv6 packets. Drop such pkts if the HW workaround to
973 * skip HW tagging is not enabled by FW.
974 */
975 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
976 (adapter->pvid || adapter->qnq_vid) &&
977 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
978 goto tx_drop;
979
980 /* Manual VLAN tag insertion to prevent:
981 * ASIC lockup when the ASIC inserts VLAN tag into
982 * certain ipv6 packets. Insert VLAN tags in driver,
983 * and set event, completion, vlan bits accordingly
984 * in the Tx WRB.
985 */
986 if (be_ipv6_tx_stall_chk(adapter, skb) &&
987 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 988 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 989 if (unlikely(!skb))
c9128951 990 goto err;
1ded132d
AK
991 }
992
ee9c799c
SP
993 return skb;
994tx_drop:
995 dev_kfree_skb_any(skb);
c9128951 996err:
ee9c799c
SP
997 return NULL;
998}
999
ec495fac
VV
1000static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1001 struct sk_buff *skb,
1002 bool *skip_hw_vlan)
1003{
1004 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1005 * less may cause a transmit stall on that port. So the work-around is
1006 * to pad short packets (<= 32 bytes) to a 36-byte length.
1007 */
1008 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1009 if (skb_padto(skb, 36))
1010 return NULL;
1011 skb->len = 36;
1012 }
1013
1014 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1015 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1016 if (!skb)
1017 return NULL;
1018 }
1019
1020 return skb;
1021}
1022
ee9c799c
SP
1023static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1024{
1025 struct be_adapter *adapter = netdev_priv(netdev);
1026 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1027 struct be_queue_info *txq = &txo->q;
1028 bool dummy_wrb, stopped = false;
1029 u32 wrb_cnt = 0, copied = 0;
1030 bool skip_hw_vlan = false;
1031 u32 start = txq->head;
1032
1033 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1034 if (!skb) {
1035 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1036 return NETDEV_TX_OK;
bc617526 1037 }
ee9c799c 1038
fe6d2a38 1039 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1040
bc0c3405
AK
1041 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1042 skip_hw_vlan);
c190e3c8 1043 if (copied) {
cd8f76c0
ED
1044 int gso_segs = skb_shinfo(skb)->gso_segs;
1045
c190e3c8 1046 /* record the sent skb in the sent_skb table */
3c8def97
SP
1047 BUG_ON(txo->sent_skb_list[start]);
1048 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1049
1050 /* Ensure txq has space for the next skb; Else stop the queue
1051 * *BEFORE* ringing the tx doorbell, so that we serialze the
1052 * tx compls of the current transmit which'll wake up the queue
1053 */
7101e111 1054 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1055 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1056 txq->len) {
3c8def97 1057 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1058 stopped = true;
1059 }
6b7c5b94 1060
94d73aaa 1061 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1062
cd8f76c0 1063 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1064 } else {
1065 txq->head = start;
bc617526 1066 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1067 dev_kfree_skb_any(skb);
6b7c5b94 1068 }
6b7c5b94
SP
1069 return NETDEV_TX_OK;
1070}
1071
1072static int be_change_mtu(struct net_device *netdev, int new_mtu)
1073{
1074 struct be_adapter *adapter = netdev_priv(netdev);
1075 if (new_mtu < BE_MIN_MTU ||
748b539a 1076 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94 1077 dev_info(&adapter->pdev->dev,
748b539a
SP
1078 "MTU must be between %d and %d bytes\n",
1079 BE_MIN_MTU,
1080 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1081 return -EINVAL;
1082 }
1083 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
748b539a 1084 netdev->mtu, new_mtu);
6b7c5b94
SP
1085 netdev->mtu = new_mtu;
1086 return 0;
1087}
1088
1089/*
82903e4b
AK
1090 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1091 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1092 */
10329df8 1093static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1094{
10329df8 1095 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1096 u16 num = 0, i = 0;
82903e4b 1097 int status = 0;
1da87b7f 1098
c0e64ef4
SP
1099 /* No need to further configure vids if in promiscuous mode */
1100 if (adapter->promiscuous)
1101 return 0;
1102
92bf14ab 1103 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1104 goto set_vlan_promisc;
1105
1106 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1107 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1108 vids[num++] = cpu_to_le16(i);
0fc16ebf 1109
4d567d97 1110 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1111 if (status) {
d9d604f8 1112 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1113 if (addl_status(status) ==
1114 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8
AK
1115 goto set_vlan_promisc;
1116 dev_err(&adapter->pdev->dev,
1117 "Setting HW VLAN filtering failed.\n");
1118 } else {
1119 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1120 /* hw VLAN filtering re-enabled. */
1121 status = be_cmd_rx_filter(adapter,
1122 BE_FLAGS_VLAN_PROMISC, OFF);
1123 if (!status) {
1124 dev_info(&adapter->pdev->dev,
1125 "Disabling VLAN Promiscuous mode.\n");
1126 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1127 }
1128 }
6b7c5b94 1129 }
1da87b7f 1130
b31c50a7 1131 return status;
0fc16ebf
PR
1132
1133set_vlan_promisc:
a6b74e01
SK
1134 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1135 return 0;
d9d604f8
AK
1136
1137 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1138 if (!status) {
1139 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1140 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1141 } else
1142 dev_err(&adapter->pdev->dev,
1143 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1144 return status;
6b7c5b94
SP
1145}
1146
80d5c368 1147static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1148{
1149 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1150 int status = 0;
6b7c5b94 1151
a85e9986
PR
1152 /* Packets with VID 0 are always received by Lancer by default */
1153 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1154 return status;
1155
f6cbd364 1156 if (test_bit(vid, adapter->vids))
48291c22 1157 return status;
a85e9986 1158
f6cbd364 1159 set_bit(vid, adapter->vids);
a6b74e01 1160 adapter->vlans_added++;
8e586137 1161
a6b74e01
SK
1162 status = be_vid_config(adapter);
1163 if (status) {
1164 adapter->vlans_added--;
f6cbd364 1165 clear_bit(vid, adapter->vids);
a6b74e01 1166 }
48291c22 1167
80817cbf 1168 return status;
6b7c5b94
SP
1169}
1170
80d5c368 1171static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1172{
1173 struct be_adapter *adapter = netdev_priv(netdev);
1174
a85e9986
PR
1175 /* Packets with VID 0 are always received by Lancer by default */
1176 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1177 return 0;
a85e9986 1178
f6cbd364 1179 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1180 adapter->vlans_added--;
1181
1182 return be_vid_config(adapter);
6b7c5b94
SP
1183}
1184
7ad09458
S
1185static void be_clear_promisc(struct be_adapter *adapter)
1186{
1187 adapter->promiscuous = false;
a0794885 1188 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1189
1190 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1191}
1192
a54769f5 1193static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1194{
1195 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1196 int status;
6b7c5b94 1197
24307eef 1198 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1199 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1200 adapter->promiscuous = true;
1201 goto done;
6b7c5b94
SP
1202 }
1203
25985edc 1204 /* BE was previously in promiscuous mode; disable it */
24307eef 1205 if (adapter->promiscuous) {
7ad09458 1206 be_clear_promisc(adapter);
c0e64ef4 1207 if (adapter->vlans_added)
10329df8 1208 be_vid_config(adapter);
6b7c5b94
SP
1209 }
1210
e7b909a6 1211 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1212 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1213 netdev_mc_count(netdev) > be_max_mc(adapter))
1214 goto set_mcast_promisc;
6b7c5b94 1215
fbc13f01
AK
1216 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1217 struct netdev_hw_addr *ha;
1218 int i = 1; /* First slot is claimed by the Primary MAC */
1219
1220 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1221 be_cmd_pmac_del(adapter, adapter->if_handle,
1222 adapter->pmac_id[i], 0);
1223 }
1224
92bf14ab 1225 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1226 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1227 adapter->promiscuous = true;
1228 goto done;
1229 }
1230
1231 netdev_for_each_uc_addr(ha, adapter->netdev) {
1232 adapter->uc_macs++; /* First slot is for Primary MAC */
1233 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1234 adapter->if_handle,
1235 &adapter->pmac_id[adapter->uc_macs], 0);
1236 }
1237 }
1238
0fc16ebf 1239 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1240 if (!status) {
1241 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1242 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1243 goto done;
0fc16ebf 1244 }
a0794885
KA
1245
1246set_mcast_promisc:
1247 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1248 return;
1249
1250 /* Set to MCAST promisc mode if setting MULTICAST address fails
1251 * or if num configured exceeds what we support
1252 */
1253 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1254 if (!status)
1255 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1256done:
1257 return;
6b7c5b94
SP
1258}
1259
ba343c77
SB
1260static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1263 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1264 int status;
1265
11ac75ed 1266 if (!sriov_enabled(adapter))
ba343c77
SB
1267 return -EPERM;
1268
11ac75ed 1269 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1270 return -EINVAL;
1271
3c31aaf3
VV
1272 /* Proceed further only if user provided MAC is different
1273 * from active MAC
1274 */
1275 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1276 return 0;
1277
3175d8c2
SP
1278 if (BEx_chip(adapter)) {
1279 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1280 vf + 1);
ba343c77 1281
11ac75ed
SP
1282 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1283 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1284 } else {
1285 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1286 vf + 1);
590c391d
PR
1287 }
1288
abccf23e
KA
1289 if (status) {
1290 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1291 mac, vf, status);
1292 return be_cmd_status(status);
1293 }
64600ea5 1294
abccf23e
KA
1295 ether_addr_copy(vf_cfg->mac_addr, mac);
1296
1297 return 0;
ba343c77
SB
1298}
1299
64600ea5 1300static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1301 struct ifla_vf_info *vi)
64600ea5
AK
1302{
1303 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1304 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1305
11ac75ed 1306 if (!sriov_enabled(adapter))
64600ea5
AK
1307 return -EPERM;
1308
11ac75ed 1309 if (vf >= adapter->num_vfs)
64600ea5
AK
1310 return -EINVAL;
1311
1312 vi->vf = vf;
ed616689
SC
1313 vi->max_tx_rate = vf_cfg->tx_rate;
1314 vi->min_tx_rate = 0;
a60b3a13
AK
1315 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1316 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1317 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1318 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1319
1320 return 0;
1321}
1322
748b539a 1323static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1324{
1325 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1326 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1327 int status = 0;
1328
11ac75ed 1329 if (!sriov_enabled(adapter))
1da87b7f
AK
1330 return -EPERM;
1331
b9fc0e53 1332 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1333 return -EINVAL;
1334
b9fc0e53
AK
1335 if (vlan || qos) {
1336 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1337 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1338 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1339 vf_cfg->if_handle, 0);
1da87b7f 1340 } else {
f1f3ee1b 1341 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1342 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1343 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1344 }
1345
abccf23e
KA
1346 if (status) {
1347 dev_err(&adapter->pdev->dev,
1348 "VLAN %d config on VF %d failed : %#x\n", vlan,
1349 vf, status);
1350 return be_cmd_status(status);
1351 }
1352
1353 vf_cfg->vlan_tag = vlan;
1354
1355 return 0;
1da87b7f
AK
1356}
1357
ed616689
SC
1358static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1359 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1360{
1361 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1362 struct device *dev = &adapter->pdev->dev;
1363 int percent_rate, status = 0;
1364 u16 link_speed = 0;
1365 u8 link_status;
e1d18735 1366
11ac75ed 1367 if (!sriov_enabled(adapter))
e1d18735
AK
1368 return -EPERM;
1369
94f434c2 1370 if (vf >= adapter->num_vfs)
e1d18735
AK
1371 return -EINVAL;
1372
ed616689
SC
1373 if (min_tx_rate)
1374 return -EINVAL;
1375
0f77ba73
RN
1376 if (!max_tx_rate)
1377 goto config_qos;
1378
1379 status = be_cmd_link_status_query(adapter, &link_speed,
1380 &link_status, 0);
1381 if (status)
1382 goto err;
1383
1384 if (!link_status) {
1385 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1386 status = -ENETDOWN;
0f77ba73
RN
1387 goto err;
1388 }
1389
1390 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1391 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1392 link_speed);
1393 status = -EINVAL;
1394 goto err;
1395 }
1396
1397 /* On Skyhawk the QOS setting must be done only as a % value */
1398 percent_rate = link_speed / 100;
1399 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1400 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1401 percent_rate);
1402 status = -EINVAL;
1403 goto err;
94f434c2 1404 }
e1d18735 1405
0f77ba73
RN
1406config_qos:
1407 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1408 if (status)
0f77ba73
RN
1409 goto err;
1410
1411 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1412 return 0;
1413
1414err:
1415 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1416 max_tx_rate, vf);
abccf23e 1417 return be_cmd_status(status);
e1d18735 1418}
bdce2ad7
SR
1419static int be_set_vf_link_state(struct net_device *netdev, int vf,
1420 int link_state)
1421{
1422 struct be_adapter *adapter = netdev_priv(netdev);
1423 int status;
1424
1425 if (!sriov_enabled(adapter))
1426 return -EPERM;
1427
1428 if (vf >= adapter->num_vfs)
1429 return -EINVAL;
1430
1431 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1432 if (status) {
1433 dev_err(&adapter->pdev->dev,
1434 "Link state change on VF %d failed: %#x\n", vf, status);
1435 return be_cmd_status(status);
1436 }
bdce2ad7 1437
abccf23e
KA
1438 adapter->vf_cfg[vf].plink_tracking = link_state;
1439
1440 return 0;
bdce2ad7 1441}
e1d18735 1442
2632bafd
SP
1443static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1444 ulong now)
6b7c5b94 1445{
2632bafd
SP
1446 aic->rx_pkts_prev = rx_pkts;
1447 aic->tx_reqs_prev = tx_pkts;
1448 aic->jiffies = now;
1449}
ac124ff9 1450
2632bafd
SP
1451static void be_eqd_update(struct be_adapter *adapter)
1452{
1453 struct be_set_eqd set_eqd[MAX_EVT_QS];
1454 int eqd, i, num = 0, start;
1455 struct be_aic_obj *aic;
1456 struct be_eq_obj *eqo;
1457 struct be_rx_obj *rxo;
1458 struct be_tx_obj *txo;
1459 u64 rx_pkts, tx_pkts;
1460 ulong now;
1461 u32 pps, delta;
10ef9ab4 1462
2632bafd
SP
1463 for_all_evt_queues(adapter, eqo, i) {
1464 aic = &adapter->aic_obj[eqo->idx];
1465 if (!aic->enable) {
1466 if (aic->jiffies)
1467 aic->jiffies = 0;
1468 eqd = aic->et_eqd;
1469 goto modify_eqd;
1470 }
6b7c5b94 1471
2632bafd
SP
1472 rxo = &adapter->rx_obj[eqo->idx];
1473 do {
57a7744e 1474 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1475 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1476 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1477
2632bafd
SP
1478 txo = &adapter->tx_obj[eqo->idx];
1479 do {
57a7744e 1480 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1481 tx_pkts = txo->stats.tx_reqs;
57a7744e 1482 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1483
6b7c5b94 1484
2632bafd
SP
1485 /* Skip, if wrapped around or first calculation */
1486 now = jiffies;
1487 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1488 rx_pkts < aic->rx_pkts_prev ||
1489 tx_pkts < aic->tx_reqs_prev) {
1490 be_aic_update(aic, rx_pkts, tx_pkts, now);
1491 continue;
1492 }
1493
1494 delta = jiffies_to_msecs(now - aic->jiffies);
1495 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1496 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1497 eqd = (pps / 15000) << 2;
10ef9ab4 1498
2632bafd
SP
1499 if (eqd < 8)
1500 eqd = 0;
1501 eqd = min_t(u32, eqd, aic->max_eqd);
1502 eqd = max_t(u32, eqd, aic->min_eqd);
1503
1504 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1505modify_eqd:
2632bafd
SP
1506 if (eqd != aic->prev_eqd) {
1507 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1508 set_eqd[num].eq_id = eqo->q.id;
1509 aic->prev_eqd = eqd;
1510 num++;
1511 }
ac124ff9 1512 }
2632bafd
SP
1513
1514 if (num)
1515 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1516}
1517
3abcdeda 1518static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1519 struct be_rx_compl_info *rxcp)
4097f663 1520{
ac124ff9 1521 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1522
ab1594e9 1523 u64_stats_update_begin(&stats->sync);
3abcdeda 1524 stats->rx_compl++;
2e588f84 1525 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1526 stats->rx_pkts++;
2e588f84 1527 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1528 stats->rx_mcast_pkts++;
2e588f84 1529 if (rxcp->err)
ac124ff9 1530 stats->rx_compl_err++;
ab1594e9 1531 u64_stats_update_end(&stats->sync);
4097f663
SP
1532}
1533
2e588f84 1534static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1535{
19fad86f 1536 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1537 * Also ignore ipcksm for ipv6 pkts
1538 */
2e588f84 1539 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1540 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1541}
1542
0b0ef1d0 1543static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1544{
10ef9ab4 1545 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1546 struct be_rx_page_info *rx_page_info;
3abcdeda 1547 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1548 u16 frag_idx = rxq->tail;
6b7c5b94 1549
3abcdeda 1550 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1551 BUG_ON(!rx_page_info->page);
1552
e50287be 1553 if (rx_page_info->last_frag) {
2b7bcebf
IV
1554 dma_unmap_page(&adapter->pdev->dev,
1555 dma_unmap_addr(rx_page_info, bus),
1556 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1557 rx_page_info->last_frag = false;
1558 } else {
1559 dma_sync_single_for_cpu(&adapter->pdev->dev,
1560 dma_unmap_addr(rx_page_info, bus),
1561 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1562 }
6b7c5b94 1563
0b0ef1d0 1564 queue_tail_inc(rxq);
6b7c5b94
SP
1565 atomic_dec(&rxq->used);
1566 return rx_page_info;
1567}
1568
1569/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1570static void be_rx_compl_discard(struct be_rx_obj *rxo,
1571 struct be_rx_compl_info *rxcp)
6b7c5b94 1572{
6b7c5b94 1573 struct be_rx_page_info *page_info;
2e588f84 1574 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1575
e80d9da6 1576 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1577 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1578 put_page(page_info->page);
1579 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1580 }
1581}
1582
1583/*
1584 * skb_fill_rx_data forms a complete skb for an ether frame
1585 * indicated by rxcp.
1586 */
10ef9ab4
SP
1587static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1588 struct be_rx_compl_info *rxcp)
6b7c5b94 1589{
6b7c5b94 1590 struct be_rx_page_info *page_info;
2e588f84
SP
1591 u16 i, j;
1592 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1593 u8 *start;
6b7c5b94 1594
0b0ef1d0 1595 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1596 start = page_address(page_info->page) + page_info->page_offset;
1597 prefetch(start);
1598
1599 /* Copy data in the first descriptor of this completion */
2e588f84 1600 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1601
6b7c5b94
SP
1602 skb->len = curr_frag_len;
1603 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1604 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1605 /* Complete packet has now been moved to data */
1606 put_page(page_info->page);
1607 skb->data_len = 0;
1608 skb->tail += curr_frag_len;
1609 } else {
ac1ae5f3
ED
1610 hdr_len = ETH_HLEN;
1611 memcpy(skb->data, start, hdr_len);
6b7c5b94 1612 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1613 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1614 skb_shinfo(skb)->frags[0].page_offset =
1615 page_info->page_offset + hdr_len;
748b539a
SP
1616 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1617 curr_frag_len - hdr_len);
6b7c5b94 1618 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1619 skb->truesize += rx_frag_size;
6b7c5b94
SP
1620 skb->tail += hdr_len;
1621 }
205859a2 1622 page_info->page = NULL;
6b7c5b94 1623
2e588f84
SP
1624 if (rxcp->pkt_size <= rx_frag_size) {
1625 BUG_ON(rxcp->num_rcvd != 1);
1626 return;
6b7c5b94
SP
1627 }
1628
1629 /* More frags present for this completion */
2e588f84
SP
1630 remaining = rxcp->pkt_size - curr_frag_len;
1631 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1632 page_info = get_rx_page_info(rxo);
2e588f84 1633 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1634
bd46cb6c
AK
1635 /* Coalesce all frags from the same physical page in one slot */
1636 if (page_info->page_offset == 0) {
1637 /* Fresh page */
1638 j++;
b061b39e 1639 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1640 skb_shinfo(skb)->frags[j].page_offset =
1641 page_info->page_offset;
9e903e08 1642 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1643 skb_shinfo(skb)->nr_frags++;
1644 } else {
1645 put_page(page_info->page);
1646 }
1647
9e903e08 1648 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1649 skb->len += curr_frag_len;
1650 skb->data_len += curr_frag_len;
bdb28a97 1651 skb->truesize += rx_frag_size;
2e588f84 1652 remaining -= curr_frag_len;
205859a2 1653 page_info->page = NULL;
6b7c5b94 1654 }
bd46cb6c 1655 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1656}
1657
5be93b9a 1658/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1659static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1660 struct be_rx_compl_info *rxcp)
6b7c5b94 1661{
10ef9ab4 1662 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1663 struct net_device *netdev = adapter->netdev;
6b7c5b94 1664 struct sk_buff *skb;
89420424 1665
bb349bb4 1666 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1667 if (unlikely(!skb)) {
ac124ff9 1668 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1669 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1670 return;
1671 }
1672
10ef9ab4 1673 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1674
6332c8d3 1675 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1676 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1677 else
1678 skb_checksum_none_assert(skb);
6b7c5b94 1679
6332c8d3 1680 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1681 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1682 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1683 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1684
b6c0e89d 1685 skb->csum_level = rxcp->tunneled;
6384a4d0 1686 skb_mark_napi_id(skb, napi);
6b7c5b94 1687
343e43c0 1688 if (rxcp->vlanf)
86a9bad3 1689 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1690
1691 netif_receive_skb(skb);
6b7c5b94
SP
1692}
1693
5be93b9a 1694/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1695static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1696 struct napi_struct *napi,
1697 struct be_rx_compl_info *rxcp)
6b7c5b94 1698{
10ef9ab4 1699 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1700 struct be_rx_page_info *page_info;
5be93b9a 1701 struct sk_buff *skb = NULL;
2e588f84
SP
1702 u16 remaining, curr_frag_len;
1703 u16 i, j;
3968fa1e 1704
10ef9ab4 1705 skb = napi_get_frags(napi);
5be93b9a 1706 if (!skb) {
10ef9ab4 1707 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1708 return;
1709 }
1710
2e588f84
SP
1711 remaining = rxcp->pkt_size;
1712 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1713 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1714
1715 curr_frag_len = min(remaining, rx_frag_size);
1716
bd46cb6c
AK
1717 /* Coalesce all frags from the same physical page in one slot */
1718 if (i == 0 || page_info->page_offset == 0) {
1719 /* First frag or Fresh page */
1720 j++;
b061b39e 1721 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1722 skb_shinfo(skb)->frags[j].page_offset =
1723 page_info->page_offset;
9e903e08 1724 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1725 } else {
1726 put_page(page_info->page);
1727 }
9e903e08 1728 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1729 skb->truesize += rx_frag_size;
bd46cb6c 1730 remaining -= curr_frag_len;
6b7c5b94
SP
1731 memset(page_info, 0, sizeof(*page_info));
1732 }
bd46cb6c 1733 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1734
5be93b9a 1735 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1736 skb->len = rxcp->pkt_size;
1737 skb->data_len = rxcp->pkt_size;
5be93b9a 1738 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1739 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1740 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1741 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1742
b6c0e89d 1743 skb->csum_level = rxcp->tunneled;
6384a4d0 1744 skb_mark_napi_id(skb, napi);
5be93b9a 1745
343e43c0 1746 if (rxcp->vlanf)
86a9bad3 1747 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1748
10ef9ab4 1749 napi_gro_frags(napi);
2e588f84
SP
1750}
1751
10ef9ab4
SP
1752static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1753 struct be_rx_compl_info *rxcp)
2e588f84 1754{
c3c18bc1
SP
1755 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1756 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1757 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1758 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1759 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1760 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1761 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1762 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1763 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1764 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1765 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1766 if (rxcp->vlanf) {
c3c18bc1
SP
1767 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1768 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1769 }
c3c18bc1 1770 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1771 rxcp->tunneled =
c3c18bc1 1772 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1773}
1774
10ef9ab4
SP
1775static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1776 struct be_rx_compl_info *rxcp)
2e588f84 1777{
c3c18bc1
SP
1778 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1779 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1780 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1781 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1782 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1783 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1784 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1785 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1786 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1787 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1788 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1789 if (rxcp->vlanf) {
c3c18bc1
SP
1790 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1791 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1792 }
c3c18bc1
SP
1793 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1794 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1795}
1796
1797static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1798{
1799 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1800 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1801 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1802
2e588f84
SP
1803 /* For checking the valid bit it is Ok to use either definition as the
1804 * valid bit is at the same position in both v0 and v1 Rx compl */
1805 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1806 return NULL;
6b7c5b94 1807
2e588f84
SP
1808 rmb();
1809 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1810
2e588f84 1811 if (adapter->be3_native)
10ef9ab4 1812 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1813 else
10ef9ab4 1814 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1815
e38b1706
SK
1816 if (rxcp->ip_frag)
1817 rxcp->l4_csum = 0;
1818
15d72184 1819 if (rxcp->vlanf) {
f93f160b
VV
1820 /* In QNQ modes, if qnq bit is not set, then the packet was
1821 * tagged only with the transparent outer vlan-tag and must
1822 * not be treated as a vlan packet by host
1823 */
1824 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1825 rxcp->vlanf = 0;
6b7c5b94 1826
15d72184 1827 if (!lancer_chip(adapter))
3c709f8f 1828 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1829
939cf306 1830 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1831 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1832 rxcp->vlanf = 0;
1833 }
2e588f84
SP
1834
1835 /* As the compl has been parsed, reset it; we wont touch it again */
1836 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1837
3abcdeda 1838 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1839 return rxcp;
1840}
1841
1829b086 1842static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1843{
6b7c5b94 1844 u32 order = get_order(size);
1829b086 1845
6b7c5b94 1846 if (order > 0)
1829b086
ED
1847 gfp |= __GFP_COMP;
1848 return alloc_pages(gfp, order);
6b7c5b94
SP
1849}
1850
1851/*
1852 * Allocate a page, split it to fragments of size rx_frag_size and post as
1853 * receive buffers to BE
1854 */
1829b086 1855static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1856{
3abcdeda 1857 struct be_adapter *adapter = rxo->adapter;
26d92f92 1858 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1859 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1860 struct page *pagep = NULL;
ba42fad0 1861 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1862 struct be_eth_rx_d *rxd;
1863 u64 page_dmaaddr = 0, frag_dmaaddr;
1864 u32 posted, page_offset = 0;
1865
3abcdeda 1866 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1867 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1868 if (!pagep) {
1829b086 1869 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1870 if (unlikely(!pagep)) {
ac124ff9 1871 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1872 break;
1873 }
ba42fad0
IV
1874 page_dmaaddr = dma_map_page(dev, pagep, 0,
1875 adapter->big_page_size,
2b7bcebf 1876 DMA_FROM_DEVICE);
ba42fad0
IV
1877 if (dma_mapping_error(dev, page_dmaaddr)) {
1878 put_page(pagep);
1879 pagep = NULL;
1880 rx_stats(rxo)->rx_post_fail++;
1881 break;
1882 }
e50287be 1883 page_offset = 0;
6b7c5b94
SP
1884 } else {
1885 get_page(pagep);
e50287be 1886 page_offset += rx_frag_size;
6b7c5b94 1887 }
e50287be 1888 page_info->page_offset = page_offset;
6b7c5b94 1889 page_info->page = pagep;
6b7c5b94
SP
1890
1891 rxd = queue_head_node(rxq);
e50287be 1892 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1893 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1894 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1895
1896 /* Any space left in the current big page for another frag? */
1897 if ((page_offset + rx_frag_size + rx_frag_size) >
1898 adapter->big_page_size) {
1899 pagep = NULL;
e50287be
SP
1900 page_info->last_frag = true;
1901 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1902 } else {
1903 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1904 }
26d92f92
SP
1905
1906 prev_page_info = page_info;
1907 queue_head_inc(rxq);
10ef9ab4 1908 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1909 }
e50287be
SP
1910
1911 /* Mark the last frag of a page when we break out of the above loop
1912 * with no more slots available in the RXQ
1913 */
1914 if (pagep) {
1915 prev_page_info->last_frag = true;
1916 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1917 }
6b7c5b94
SP
1918
1919 if (posted) {
6b7c5b94 1920 atomic_add(posted, &rxq->used);
6384a4d0
SP
1921 if (rxo->rx_post_starved)
1922 rxo->rx_post_starved = false;
8788fdc2 1923 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1924 } else if (atomic_read(&rxq->used) == 0) {
1925 /* Let be_worker replenish when memory is available */
3abcdeda 1926 rxo->rx_post_starved = true;
6b7c5b94 1927 }
6b7c5b94
SP
1928}
1929
5fb379ee 1930static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1931{
6b7c5b94
SP
1932 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1933
1934 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1935 return NULL;
1936
f3eb62d2 1937 rmb();
6b7c5b94
SP
1938 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1939
1940 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1941
1942 queue_tail_inc(tx_cq);
1943 return txcp;
1944}
1945
3c8def97 1946static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1947 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1948{
3c8def97 1949 struct be_queue_info *txq = &txo->q;
a73b796e 1950 struct be_eth_wrb *wrb;
3c8def97 1951 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1952 struct sk_buff *sent_skb;
ec43b1a6
SP
1953 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1954 bool unmap_skb_hdr = true;
6b7c5b94 1955
ec43b1a6 1956 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1957 BUG_ON(!sent_skb);
ec43b1a6
SP
1958 sent_skbs[txq->tail] = NULL;
1959
1960 /* skip header wrb */
a73b796e 1961 queue_tail_inc(txq);
6b7c5b94 1962
ec43b1a6 1963 do {
6b7c5b94 1964 cur_index = txq->tail;
a73b796e 1965 wrb = queue_tail_node(txq);
2b7bcebf
IV
1966 unmap_tx_frag(&adapter->pdev->dev, wrb,
1967 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1968 unmap_skb_hdr = false;
1969
6b7c5b94
SP
1970 num_wrbs++;
1971 queue_tail_inc(txq);
ec43b1a6 1972 } while (cur_index != last_index);
6b7c5b94 1973
96d49225 1974 dev_consume_skb_any(sent_skb);
4d586b82 1975 return num_wrbs;
6b7c5b94
SP
1976}
1977
10ef9ab4
SP
1978/* Return the number of events in the event queue */
1979static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1980{
10ef9ab4
SP
1981 struct be_eq_entry *eqe;
1982 int num = 0;
859b1e4e 1983
10ef9ab4
SP
1984 do {
1985 eqe = queue_tail_node(&eqo->q);
1986 if (eqe->evt == 0)
1987 break;
859b1e4e 1988
10ef9ab4
SP
1989 rmb();
1990 eqe->evt = 0;
1991 num++;
1992 queue_tail_inc(&eqo->q);
1993 } while (true);
1994
1995 return num;
859b1e4e
SP
1996}
1997
10ef9ab4
SP
1998/* Leaves the EQ is disarmed state */
1999static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2000{
10ef9ab4 2001 int num = events_get(eqo);
859b1e4e 2002
10ef9ab4 2003 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2004}
2005
10ef9ab4 2006static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2007{
2008 struct be_rx_page_info *page_info;
3abcdeda
SP
2009 struct be_queue_info *rxq = &rxo->q;
2010 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2011 struct be_rx_compl_info *rxcp;
d23e946c
SP
2012 struct be_adapter *adapter = rxo->adapter;
2013 int flush_wait = 0;
6b7c5b94 2014
d23e946c
SP
2015 /* Consume pending rx completions.
2016 * Wait for the flush completion (identified by zero num_rcvd)
2017 * to arrive. Notify CQ even when there are no more CQ entries
2018 * for HW to flush partially coalesced CQ entries.
2019 * In Lancer, there is no need to wait for flush compl.
2020 */
2021 for (;;) {
2022 rxcp = be_rx_compl_get(rxo);
ddf1169f 2023 if (!rxcp) {
d23e946c
SP
2024 if (lancer_chip(adapter))
2025 break;
2026
2027 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2028 dev_warn(&adapter->pdev->dev,
2029 "did not receive flush compl\n");
2030 break;
2031 }
2032 be_cq_notify(adapter, rx_cq->id, true, 0);
2033 mdelay(1);
2034 } else {
2035 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2036 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2037 if (rxcp->num_rcvd == 0)
2038 break;
2039 }
6b7c5b94
SP
2040 }
2041
d23e946c
SP
2042 /* After cleanup, leave the CQ in unarmed state */
2043 be_cq_notify(adapter, rx_cq->id, false, 0);
2044
2045 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2046 while (atomic_read(&rxq->used) > 0) {
2047 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2048 put_page(page_info->page);
2049 memset(page_info, 0, sizeof(*page_info));
2050 }
2051 BUG_ON(atomic_read(&rxq->used));
482c9e79 2052 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2053}
2054
0ae57bb3 2055static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2056{
0ae57bb3
SP
2057 struct be_tx_obj *txo;
2058 struct be_queue_info *txq;
a8e9179a 2059 struct be_eth_tx_compl *txcp;
4d586b82 2060 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2061 struct sk_buff *sent_skb;
2062 bool dummy_wrb;
0ae57bb3 2063 int i, pending_txqs;
a8e9179a 2064
1a3d0717 2065 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2066 do {
0ae57bb3
SP
2067 pending_txqs = adapter->num_tx_qs;
2068
2069 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2070 cmpl = 0;
2071 num_wrbs = 0;
0ae57bb3
SP
2072 txq = &txo->q;
2073 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2074 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2075 num_wrbs += be_tx_compl_process(adapter, txo,
2076 end_idx);
2077 cmpl++;
2078 }
2079 if (cmpl) {
2080 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2081 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2082 timeo = 0;
0ae57bb3
SP
2083 }
2084 if (atomic_read(&txq->used) == 0)
2085 pending_txqs--;
a8e9179a
SP
2086 }
2087
1a3d0717 2088 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2089 break;
2090
2091 mdelay(1);
2092 } while (true);
2093
0ae57bb3
SP
2094 for_all_tx_queues(adapter, txo, i) {
2095 txq = &txo->q;
2096 if (atomic_read(&txq->used))
2097 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2098 atomic_read(&txq->used));
2099
2100 /* free posted tx for which compls will never arrive */
2101 while (atomic_read(&txq->used)) {
2102 sent_skb = txo->sent_skb_list[txq->tail];
2103 end_idx = txq->tail;
2104 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2105 &dummy_wrb);
2106 index_adv(&end_idx, num_wrbs - 1, txq->len);
2107 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2108 atomic_sub(num_wrbs, &txq->used);
2109 }
b03388d6 2110 }
6b7c5b94
SP
2111}
2112
10ef9ab4
SP
2113static void be_evt_queues_destroy(struct be_adapter *adapter)
2114{
2115 struct be_eq_obj *eqo;
2116 int i;
2117
2118 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2119 if (eqo->q.created) {
2120 be_eq_clean(eqo);
10ef9ab4 2121 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2122 napi_hash_del(&eqo->napi);
68d7bdcb 2123 netif_napi_del(&eqo->napi);
19d59aa7 2124 }
10ef9ab4
SP
2125 be_queue_free(adapter, &eqo->q);
2126 }
2127}
2128
2129static int be_evt_queues_create(struct be_adapter *adapter)
2130{
2131 struct be_queue_info *eq;
2132 struct be_eq_obj *eqo;
2632bafd 2133 struct be_aic_obj *aic;
10ef9ab4
SP
2134 int i, rc;
2135
92bf14ab
SP
2136 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2137 adapter->cfg_num_qs);
10ef9ab4
SP
2138
2139 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2140 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2141 BE_NAPI_WEIGHT);
6384a4d0 2142 napi_hash_add(&eqo->napi);
2632bafd 2143 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2144 eqo->adapter = adapter;
2145 eqo->tx_budget = BE_TX_BUDGET;
2146 eqo->idx = i;
2632bafd
SP
2147 aic->max_eqd = BE_MAX_EQD;
2148 aic->enable = true;
10ef9ab4
SP
2149
2150 eq = &eqo->q;
2151 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2152 sizeof(struct be_eq_entry));
10ef9ab4
SP
2153 if (rc)
2154 return rc;
2155
f2f781a7 2156 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2157 if (rc)
2158 return rc;
2159 }
1cfafab9 2160 return 0;
10ef9ab4
SP
2161}
2162
5fb379ee
SP
2163static void be_mcc_queues_destroy(struct be_adapter *adapter)
2164{
2165 struct be_queue_info *q;
5fb379ee 2166
8788fdc2 2167 q = &adapter->mcc_obj.q;
5fb379ee 2168 if (q->created)
8788fdc2 2169 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2170 be_queue_free(adapter, q);
2171
8788fdc2 2172 q = &adapter->mcc_obj.cq;
5fb379ee 2173 if (q->created)
8788fdc2 2174 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2175 be_queue_free(adapter, q);
2176}
2177
2178/* Must be called only after TX qs are created as MCC shares TX EQ */
2179static int be_mcc_queues_create(struct be_adapter *adapter)
2180{
2181 struct be_queue_info *q, *cq;
5fb379ee 2182
8788fdc2 2183 cq = &adapter->mcc_obj.cq;
5fb379ee 2184 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2185 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2186 goto err;
2187
10ef9ab4
SP
2188 /* Use the default EQ for MCC completions */
2189 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2190 goto mcc_cq_free;
2191
8788fdc2 2192 q = &adapter->mcc_obj.q;
5fb379ee
SP
2193 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2194 goto mcc_cq_destroy;
2195
8788fdc2 2196 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2197 goto mcc_q_free;
2198
2199 return 0;
2200
2201mcc_q_free:
2202 be_queue_free(adapter, q);
2203mcc_cq_destroy:
8788fdc2 2204 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2205mcc_cq_free:
2206 be_queue_free(adapter, cq);
2207err:
2208 return -1;
2209}
2210
6b7c5b94
SP
2211static void be_tx_queues_destroy(struct be_adapter *adapter)
2212{
2213 struct be_queue_info *q;
3c8def97
SP
2214 struct be_tx_obj *txo;
2215 u8 i;
6b7c5b94 2216
3c8def97
SP
2217 for_all_tx_queues(adapter, txo, i) {
2218 q = &txo->q;
2219 if (q->created)
2220 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2221 be_queue_free(adapter, q);
6b7c5b94 2222
3c8def97
SP
2223 q = &txo->cq;
2224 if (q->created)
2225 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2226 be_queue_free(adapter, q);
2227 }
6b7c5b94
SP
2228}
2229
7707133c 2230static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2231{
10ef9ab4 2232 struct be_queue_info *cq, *eq;
3c8def97 2233 struct be_tx_obj *txo;
92bf14ab 2234 int status, i;
6b7c5b94 2235
92bf14ab 2236 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2237
10ef9ab4
SP
2238 for_all_tx_queues(adapter, txo, i) {
2239 cq = &txo->cq;
2240 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2241 sizeof(struct be_eth_tx_compl));
2242 if (status)
2243 return status;
3c8def97 2244
827da44c
JS
2245 u64_stats_init(&txo->stats.sync);
2246 u64_stats_init(&txo->stats.sync_compl);
2247
10ef9ab4
SP
2248 /* If num_evt_qs is less than num_tx_qs, then more than
2249 * one txq share an eq
2250 */
2251 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2252 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2253 if (status)
2254 return status;
6b7c5b94 2255
10ef9ab4
SP
2256 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2257 sizeof(struct be_eth_wrb));
2258 if (status)
2259 return status;
6b7c5b94 2260
94d73aaa 2261 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2262 if (status)
2263 return status;
3c8def97 2264 }
6b7c5b94 2265
d379142b
SP
2266 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2267 adapter->num_tx_qs);
10ef9ab4 2268 return 0;
6b7c5b94
SP
2269}
2270
10ef9ab4 2271static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2272{
2273 struct be_queue_info *q;
3abcdeda
SP
2274 struct be_rx_obj *rxo;
2275 int i;
2276
2277 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2278 q = &rxo->cq;
2279 if (q->created)
2280 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2281 be_queue_free(adapter, q);
ac6a0c4a
SP
2282 }
2283}
2284
10ef9ab4 2285static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2286{
10ef9ab4 2287 struct be_queue_info *eq, *cq;
3abcdeda
SP
2288 struct be_rx_obj *rxo;
2289 int rc, i;
6b7c5b94 2290
92bf14ab
SP
2291 /* We can create as many RSS rings as there are EQs. */
2292 adapter->num_rx_qs = adapter->num_evt_qs;
2293
2294 /* We'll use RSS only if atleast 2 RSS rings are supported.
2295 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2296 */
92bf14ab
SP
2297 if (adapter->num_rx_qs > 1)
2298 adapter->num_rx_qs++;
2299
6b7c5b94 2300 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2301 for_all_rx_queues(adapter, rxo, i) {
2302 rxo->adapter = adapter;
3abcdeda
SP
2303 cq = &rxo->cq;
2304 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2305 sizeof(struct be_eth_rx_compl));
3abcdeda 2306 if (rc)
10ef9ab4 2307 return rc;
3abcdeda 2308
827da44c 2309 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2310 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2311 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2312 if (rc)
10ef9ab4 2313 return rc;
3abcdeda 2314 }
6b7c5b94 2315
d379142b
SP
2316 dev_info(&adapter->pdev->dev,
2317 "created %d RSS queue(s) and 1 default RX queue\n",
2318 adapter->num_rx_qs - 1);
10ef9ab4 2319 return 0;
b628bde2
SP
2320}
2321
6b7c5b94
SP
2322static irqreturn_t be_intx(int irq, void *dev)
2323{
e49cc34f
SP
2324 struct be_eq_obj *eqo = dev;
2325 struct be_adapter *adapter = eqo->adapter;
2326 int num_evts = 0;
6b7c5b94 2327
d0b9cec3
SP
2328 /* IRQ is not expected when NAPI is scheduled as the EQ
2329 * will not be armed.
2330 * But, this can happen on Lancer INTx where it takes
2331 * a while to de-assert INTx or in BE2 where occasionaly
2332 * an interrupt may be raised even when EQ is unarmed.
2333 * If NAPI is already scheduled, then counting & notifying
2334 * events will orphan them.
e49cc34f 2335 */
d0b9cec3 2336 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2337 num_evts = events_get(eqo);
d0b9cec3
SP
2338 __napi_schedule(&eqo->napi);
2339 if (num_evts)
2340 eqo->spurious_intr = 0;
2341 }
2342 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2343
d0b9cec3
SP
2344 /* Return IRQ_HANDLED only for the the first spurious intr
2345 * after a valid intr to stop the kernel from branding
2346 * this irq as a bad one!
e49cc34f 2347 */
d0b9cec3
SP
2348 if (num_evts || eqo->spurious_intr++ == 0)
2349 return IRQ_HANDLED;
2350 else
2351 return IRQ_NONE;
6b7c5b94
SP
2352}
2353
10ef9ab4 2354static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2355{
10ef9ab4 2356 struct be_eq_obj *eqo = dev;
6b7c5b94 2357
0b545a62
SP
2358 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2359 napi_schedule(&eqo->napi);
6b7c5b94
SP
2360 return IRQ_HANDLED;
2361}
2362
2e588f84 2363static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2364{
e38b1706 2365 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2366}
2367
10ef9ab4 2368static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2369 int budget, int polling)
6b7c5b94 2370{
3abcdeda
SP
2371 struct be_adapter *adapter = rxo->adapter;
2372 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2373 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2374 u32 work_done;
2375
2376 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2377 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2378 if (!rxcp)
2379 break;
2380
12004ae9
SP
2381 /* Is it a flush compl that has no data */
2382 if (unlikely(rxcp->num_rcvd == 0))
2383 goto loop_continue;
2384
2385 /* Discard compl with partial DMA Lancer B0 */
2386 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2387 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2388 goto loop_continue;
2389 }
2390
2391 /* On BE drop pkts that arrive due to imperfect filtering in
2392 * promiscuous mode on some skews
2393 */
2394 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2395 !lancer_chip(adapter))) {
10ef9ab4 2396 be_rx_compl_discard(rxo, rxcp);
12004ae9 2397 goto loop_continue;
64642811 2398 }
009dd872 2399
6384a4d0
SP
2400 /* Don't do gro when we're busy_polling */
2401 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2402 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2403 else
6384a4d0
SP
2404 be_rx_compl_process(rxo, napi, rxcp);
2405
12004ae9 2406loop_continue:
2e588f84 2407 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2408 }
2409
10ef9ab4
SP
2410 if (work_done) {
2411 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2412
6384a4d0
SP
2413 /* When an rx-obj gets into post_starved state, just
2414 * let be_worker do the posting.
2415 */
2416 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2417 !rxo->rx_post_starved)
10ef9ab4 2418 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2419 }
10ef9ab4 2420
6b7c5b94
SP
2421 return work_done;
2422}
2423
512bb8a2
KA
2424static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2425{
2426 switch (status) {
2427 case BE_TX_COMP_HDR_PARSE_ERR:
2428 tx_stats(txo)->tx_hdr_parse_err++;
2429 break;
2430 case BE_TX_COMP_NDMA_ERR:
2431 tx_stats(txo)->tx_dma_err++;
2432 break;
2433 case BE_TX_COMP_ACL_ERR:
2434 tx_stats(txo)->tx_spoof_check_err++;
2435 break;
2436 }
2437}
2438
2439static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2440{
2441 switch (status) {
2442 case LANCER_TX_COMP_LSO_ERR:
2443 tx_stats(txo)->tx_tso_err++;
2444 break;
2445 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2446 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2447 tx_stats(txo)->tx_spoof_check_err++;
2448 break;
2449 case LANCER_TX_COMP_QINQ_ERR:
2450 tx_stats(txo)->tx_qinq_err++;
2451 break;
2452 case LANCER_TX_COMP_PARITY_ERR:
2453 tx_stats(txo)->tx_internal_parity_err++;
2454 break;
2455 case LANCER_TX_COMP_DMA_ERR:
2456 tx_stats(txo)->tx_dma_err++;
2457 break;
2458 }
2459}
2460
10ef9ab4
SP
2461static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2462 int budget, int idx)
6b7c5b94 2463{
6b7c5b94 2464 struct be_eth_tx_compl *txcp;
10ef9ab4 2465 int num_wrbs = 0, work_done;
512bb8a2 2466 u32 compl_status;
3c8def97 2467
10ef9ab4
SP
2468 for (work_done = 0; work_done < budget; work_done++) {
2469 txcp = be_tx_compl_get(&txo->cq);
2470 if (!txcp)
2471 break;
2472 num_wrbs += be_tx_compl_process(adapter, txo,
c3c18bc1
SP
2473 GET_TX_COMPL_BITS(wrb_index,
2474 txcp));
512bb8a2
KA
2475 compl_status = GET_TX_COMPL_BITS(status, txcp);
2476 if (compl_status) {
2477 if (lancer_chip(adapter))
2478 lancer_update_tx_err(txo, compl_status);
2479 else
2480 be_update_tx_err(txo, compl_status);
2481 }
10ef9ab4 2482 }
6b7c5b94 2483
10ef9ab4
SP
2484 if (work_done) {
2485 be_cq_notify(adapter, txo->cq.id, true, work_done);
2486 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2487
10ef9ab4
SP
2488 /* As Tx wrbs have been freed up, wake up netdev queue
2489 * if it was stopped due to lack of tx wrbs. */
2490 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2491 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2492 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2493 }
10ef9ab4
SP
2494
2495 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2496 tx_stats(txo)->tx_compl += work_done;
2497 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2498 }
10ef9ab4
SP
2499 return (work_done < budget); /* Done */
2500}
6b7c5b94 2501
68d7bdcb 2502int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2503{
2504 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2505 struct be_adapter *adapter = eqo->adapter;
0b545a62 2506 int max_work = 0, work, i, num_evts;
6384a4d0 2507 struct be_rx_obj *rxo;
10ef9ab4 2508 bool tx_done;
f31e50a8 2509
0b545a62
SP
2510 num_evts = events_get(eqo);
2511
10ef9ab4
SP
2512 /* Process all TXQs serviced by this EQ */
2513 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2514 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2515 eqo->tx_budget, i);
2516 if (!tx_done)
2517 max_work = budget;
f31e50a8
SP
2518 }
2519
6384a4d0
SP
2520 if (be_lock_napi(eqo)) {
2521 /* This loop will iterate twice for EQ0 in which
2522 * completions of the last RXQ (default one) are also processed
2523 * For other EQs the loop iterates only once
2524 */
2525 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2526 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2527 max_work = max(work, max_work);
2528 }
2529 be_unlock_napi(eqo);
2530 } else {
2531 max_work = budget;
10ef9ab4 2532 }
6b7c5b94 2533
10ef9ab4
SP
2534 if (is_mcc_eqo(eqo))
2535 be_process_mcc(adapter);
93c86700 2536
10ef9ab4
SP
2537 if (max_work < budget) {
2538 napi_complete(napi);
0b545a62 2539 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2540 } else {
2541 /* As we'll continue in polling mode, count and clear events */
0b545a62 2542 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2543 }
10ef9ab4 2544 return max_work;
6b7c5b94
SP
2545}
2546
6384a4d0
SP
2547#ifdef CONFIG_NET_RX_BUSY_POLL
2548static int be_busy_poll(struct napi_struct *napi)
2549{
2550 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2551 struct be_adapter *adapter = eqo->adapter;
2552 struct be_rx_obj *rxo;
2553 int i, work = 0;
2554
2555 if (!be_lock_busy_poll(eqo))
2556 return LL_FLUSH_BUSY;
2557
2558 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2559 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2560 if (work)
2561 break;
2562 }
2563
2564 be_unlock_busy_poll(eqo);
2565 return work;
2566}
2567#endif
2568
f67ef7ba 2569void be_detect_error(struct be_adapter *adapter)
7c185276 2570{
e1cfb67a
PR
2571 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2572 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2573 u32 i;
eb0eecc1
SK
2574 bool error_detected = false;
2575 struct device *dev = &adapter->pdev->dev;
2576 struct net_device *netdev = adapter->netdev;
7c185276 2577
d23e946c 2578 if (be_hw_error(adapter))
72f02485
SP
2579 return;
2580
e1cfb67a
PR
2581 if (lancer_chip(adapter)) {
2582 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2583 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2584 sliport_err1 = ioread32(adapter->db +
748b539a 2585 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2586 sliport_err2 = ioread32(adapter->db +
748b539a 2587 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2588 adapter->hw_error = true;
2589 /* Do not log error messages if its a FW reset */
2590 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2591 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2592 dev_info(dev, "Firmware update in progress\n");
2593 } else {
2594 error_detected = true;
2595 dev_err(dev, "Error detected in the card\n");
2596 dev_err(dev, "ERR: sliport status 0x%x\n",
2597 sliport_status);
2598 dev_err(dev, "ERR: sliport error1 0x%x\n",
2599 sliport_err1);
2600 dev_err(dev, "ERR: sliport error2 0x%x\n",
2601 sliport_err2);
2602 }
e1cfb67a
PR
2603 }
2604 } else {
2605 pci_read_config_dword(adapter->pdev,
748b539a 2606 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2607 pci_read_config_dword(adapter->pdev,
748b539a 2608 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2609 pci_read_config_dword(adapter->pdev,
748b539a 2610 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2611 pci_read_config_dword(adapter->pdev,
748b539a 2612 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2613
f67ef7ba
PR
2614 ue_lo = (ue_lo & ~ue_lo_mask);
2615 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2616
eb0eecc1
SK
2617 /* On certain platforms BE hardware can indicate spurious UEs.
2618 * Allow HW to stop working completely in case of a real UE.
2619 * Hence not setting the hw_error for UE detection.
2620 */
f67ef7ba 2621
eb0eecc1
SK
2622 if (ue_lo || ue_hi) {
2623 error_detected = true;
2624 dev_err(dev,
2625 "Unrecoverable Error detected in the adapter");
2626 dev_err(dev, "Please reboot server to recover");
2627 if (skyhawk_chip(adapter))
2628 adapter->hw_error = true;
2629 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2630 if (ue_lo & 1)
2631 dev_err(dev, "UE: %s bit set\n",
2632 ue_status_low_desc[i]);
2633 }
2634 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2635 if (ue_hi & 1)
2636 dev_err(dev, "UE: %s bit set\n",
2637 ue_status_hi_desc[i]);
2638 }
7c185276
AK
2639 }
2640 }
eb0eecc1
SK
2641 if (error_detected)
2642 netif_carrier_off(netdev);
7c185276
AK
2643}
2644
8d56ff11
SP
2645static void be_msix_disable(struct be_adapter *adapter)
2646{
ac6a0c4a 2647 if (msix_enabled(adapter)) {
8d56ff11 2648 pci_disable_msix(adapter->pdev);
ac6a0c4a 2649 adapter->num_msix_vec = 0;
68d7bdcb 2650 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2651 }
2652}
2653
c2bba3df 2654static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2655{
7dc4c064 2656 int i, num_vec;
d379142b 2657 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2658
92bf14ab
SP
2659 /* If RoCE is supported, program the max number of NIC vectors that
2660 * may be configured via set-channels, along with vectors needed for
2661 * RoCe. Else, just program the number we'll use initially.
2662 */
2663 if (be_roce_supported(adapter))
2664 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2665 2 * num_online_cpus());
2666 else
2667 num_vec = adapter->cfg_num_qs;
3abcdeda 2668
ac6a0c4a 2669 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2670 adapter->msix_entries[i].entry = i;
2671
7dc4c064
AG
2672 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2673 MIN_MSIX_VECTORS, num_vec);
2674 if (num_vec < 0)
2675 goto fail;
92bf14ab 2676
92bf14ab
SP
2677 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2678 adapter->num_msix_roce_vec = num_vec / 2;
2679 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2680 adapter->num_msix_roce_vec);
2681 }
2682
2683 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2684
2685 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2686 adapter->num_msix_vec);
c2bba3df 2687 return 0;
7dc4c064
AG
2688
2689fail:
2690 dev_warn(dev, "MSIx enable failed\n");
2691
2692 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2693 if (!be_physfn(adapter))
2694 return num_vec;
2695 return 0;
6b7c5b94
SP
2696}
2697
fe6d2a38 2698static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2699 struct be_eq_obj *eqo)
b628bde2 2700{
f2f781a7 2701 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2702}
6b7c5b94 2703
b628bde2
SP
2704static int be_msix_register(struct be_adapter *adapter)
2705{
10ef9ab4
SP
2706 struct net_device *netdev = adapter->netdev;
2707 struct be_eq_obj *eqo;
2708 int status, i, vec;
6b7c5b94 2709
10ef9ab4
SP
2710 for_all_evt_queues(adapter, eqo, i) {
2711 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2712 vec = be_msix_vec_get(adapter, eqo);
2713 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2714 if (status)
2715 goto err_msix;
2716 }
b628bde2 2717
6b7c5b94 2718 return 0;
3abcdeda 2719err_msix:
10ef9ab4
SP
2720 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2721 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2722 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2723 status);
ac6a0c4a 2724 be_msix_disable(adapter);
6b7c5b94
SP
2725 return status;
2726}
2727
2728static int be_irq_register(struct be_adapter *adapter)
2729{
2730 struct net_device *netdev = adapter->netdev;
2731 int status;
2732
ac6a0c4a 2733 if (msix_enabled(adapter)) {
6b7c5b94
SP
2734 status = be_msix_register(adapter);
2735 if (status == 0)
2736 goto done;
ba343c77
SB
2737 /* INTx is not supported for VF */
2738 if (!be_physfn(adapter))
2739 return status;
6b7c5b94
SP
2740 }
2741
e49cc34f 2742 /* INTx: only the first EQ is used */
6b7c5b94
SP
2743 netdev->irq = adapter->pdev->irq;
2744 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2745 &adapter->eq_obj[0]);
6b7c5b94
SP
2746 if (status) {
2747 dev_err(&adapter->pdev->dev,
2748 "INTx request IRQ failed - err %d\n", status);
2749 return status;
2750 }
2751done:
2752 adapter->isr_registered = true;
2753 return 0;
2754}
2755
2756static void be_irq_unregister(struct be_adapter *adapter)
2757{
2758 struct net_device *netdev = adapter->netdev;
10ef9ab4 2759 struct be_eq_obj *eqo;
3abcdeda 2760 int i;
6b7c5b94
SP
2761
2762 if (!adapter->isr_registered)
2763 return;
2764
2765 /* INTx */
ac6a0c4a 2766 if (!msix_enabled(adapter)) {
e49cc34f 2767 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2768 goto done;
2769 }
2770
2771 /* MSIx */
10ef9ab4
SP
2772 for_all_evt_queues(adapter, eqo, i)
2773 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2774
6b7c5b94
SP
2775done:
2776 adapter->isr_registered = false;
6b7c5b94
SP
2777}
2778
10ef9ab4 2779static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2780{
2781 struct be_queue_info *q;
2782 struct be_rx_obj *rxo;
2783 int i;
2784
2785 for_all_rx_queues(adapter, rxo, i) {
2786 q = &rxo->q;
2787 if (q->created) {
2788 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2789 be_rx_cq_clean(rxo);
482c9e79 2790 }
10ef9ab4 2791 be_queue_free(adapter, q);
482c9e79
SP
2792 }
2793}
2794
889cd4b2
SP
2795static int be_close(struct net_device *netdev)
2796{
2797 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2798 struct be_eq_obj *eqo;
2799 int i;
889cd4b2 2800
e1ad8e33
KA
2801 /* This protection is needed as be_close() may be called even when the
2802 * adapter is in cleared state (after eeh perm failure)
2803 */
2804 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2805 return 0;
2806
045508a8
PP
2807 be_roce_dev_close(adapter);
2808
dff345c5
IV
2809 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2810 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2811 napi_disable(&eqo->napi);
6384a4d0
SP
2812 be_disable_busy_poll(eqo);
2813 }
71237b6f 2814 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2815 }
a323d9bf
SP
2816
2817 be_async_mcc_disable(adapter);
2818
2819 /* Wait for all pending tx completions to arrive so that
2820 * all tx skbs are freed.
2821 */
fba87559 2822 netif_tx_disable(netdev);
6e1f9975 2823 be_tx_compl_clean(adapter);
a323d9bf
SP
2824
2825 be_rx_qs_destroy(adapter);
2826
d11a347d
AK
2827 for (i = 1; i < (adapter->uc_macs + 1); i++)
2828 be_cmd_pmac_del(adapter, adapter->if_handle,
2829 adapter->pmac_id[i], 0);
2830 adapter->uc_macs = 0;
2831
a323d9bf 2832 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2833 if (msix_enabled(adapter))
2834 synchronize_irq(be_msix_vec_get(adapter, eqo));
2835 else
2836 synchronize_irq(netdev->irq);
2837 be_eq_clean(eqo);
63fcb27f
PR
2838 }
2839
889cd4b2
SP
2840 be_irq_unregister(adapter);
2841
482c9e79
SP
2842 return 0;
2843}
2844
10ef9ab4 2845static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2846{
2847 struct be_rx_obj *rxo;
e9008ee9 2848 int rc, i, j;
e2557877
VD
2849 u8 rss_hkey[RSS_HASH_KEY_LEN];
2850 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2851
2852 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2853 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2854 sizeof(struct be_eth_rx_d));
2855 if (rc)
2856 return rc;
2857 }
2858
2859 /* The FW would like the default RXQ to be created first */
2860 rxo = default_rxo(adapter);
2861 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2862 adapter->if_handle, false, &rxo->rss_id);
2863 if (rc)
2864 return rc;
2865
2866 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2867 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2868 rx_frag_size, adapter->if_handle,
2869 true, &rxo->rss_id);
482c9e79
SP
2870 if (rc)
2871 return rc;
2872 }
2873
2874 if (be_multi_rxq(adapter)) {
e2557877
VD
2875 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2876 j += adapter->num_rx_qs - 1) {
e9008ee9 2877 for_all_rss_queues(adapter, rxo, i) {
e2557877 2878 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2879 break;
e2557877
VD
2880 rss->rsstable[j + i] = rxo->rss_id;
2881 rss->rss_queue[j + i] = i;
e9008ee9
PR
2882 }
2883 }
e2557877
VD
2884 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2885 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2886
2887 if (!BEx_chip(adapter))
e2557877
VD
2888 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2889 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2890 } else {
2891 /* Disable RSS, if only default RX Q is created */
e2557877 2892 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2893 }
594ad54a 2894
e2557877 2895 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2896 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2897 128, rss_hkey);
da1388d6 2898 if (rc) {
e2557877 2899 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2900 return rc;
482c9e79
SP
2901 }
2902
e2557877
VD
2903 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2904
482c9e79 2905 /* First time posting */
10ef9ab4 2906 for_all_rx_queues(adapter, rxo, i)
482c9e79 2907 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2908 return 0;
2909}
2910
6b7c5b94
SP
2911static int be_open(struct net_device *netdev)
2912{
2913 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2914 struct be_eq_obj *eqo;
3abcdeda 2915 struct be_rx_obj *rxo;
10ef9ab4 2916 struct be_tx_obj *txo;
b236916a 2917 u8 link_status;
3abcdeda 2918 int status, i;
5fb379ee 2919
10ef9ab4 2920 status = be_rx_qs_create(adapter);
482c9e79
SP
2921 if (status)
2922 goto err;
2923
c2bba3df
SK
2924 status = be_irq_register(adapter);
2925 if (status)
2926 goto err;
5fb379ee 2927
10ef9ab4 2928 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2929 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2930
10ef9ab4
SP
2931 for_all_tx_queues(adapter, txo, i)
2932 be_cq_notify(adapter, txo->cq.id, true, 0);
2933
7a1e9b20
SP
2934 be_async_mcc_enable(adapter);
2935
10ef9ab4
SP
2936 for_all_evt_queues(adapter, eqo, i) {
2937 napi_enable(&eqo->napi);
6384a4d0 2938 be_enable_busy_poll(eqo);
4cad9f3b 2939 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2940 }
04d3d624 2941 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2942
323ff71e 2943 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2944 if (!status)
2945 be_link_status_update(adapter, link_status);
2946
fba87559 2947 netif_tx_start_all_queues(netdev);
045508a8 2948 be_roce_dev_open(adapter);
c9c47142 2949
c5abe7c0 2950#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2951 if (skyhawk_chip(adapter))
2952 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2953#endif
2954
889cd4b2
SP
2955 return 0;
2956err:
2957 be_close(adapter->netdev);
2958 return -EIO;
5fb379ee
SP
2959}
2960
71d8d1b5
AK
2961static int be_setup_wol(struct be_adapter *adapter, bool enable)
2962{
2963 struct be_dma_mem cmd;
2964 int status = 0;
2965 u8 mac[ETH_ALEN];
2966
2967 memset(mac, 0, ETH_ALEN);
2968
2969 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2970 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2971 GFP_KERNEL);
ddf1169f 2972 if (!cmd.va)
6b568689 2973 return -ENOMEM;
71d8d1b5
AK
2974
2975 if (enable) {
2976 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2977 PCICFG_PM_CONTROL_OFFSET,
2978 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2979 if (status) {
2980 dev_err(&adapter->pdev->dev,
2381a55c 2981 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2982 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2983 cmd.dma);
71d8d1b5
AK
2984 return status;
2985 }
2986 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2987 adapter->netdev->dev_addr,
2988 &cmd);
71d8d1b5
AK
2989 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2990 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2991 } else {
2992 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2993 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2994 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2995 }
2996
2b7bcebf 2997 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2998 return status;
2999}
3000
6d87f5c3
AK
3001/*
3002 * Generate a seed MAC address from the PF MAC Address using jhash.
3003 * MAC Address for VFs are assigned incrementally starting from the seed.
3004 * These addresses are programmed in the ASIC by the PF and the VF driver
3005 * queries for the MAC address during its probe.
3006 */
4c876616 3007static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3008{
f9449ab7 3009 u32 vf;
3abcdeda 3010 int status = 0;
6d87f5c3 3011 u8 mac[ETH_ALEN];
11ac75ed 3012 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3013
3014 be_vf_eth_addr_generate(adapter, mac);
3015
11ac75ed 3016 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3017 if (BEx_chip(adapter))
590c391d 3018 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3019 vf_cfg->if_handle,
3020 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3021 else
3022 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3023 vf + 1);
590c391d 3024
6d87f5c3
AK
3025 if (status)
3026 dev_err(&adapter->pdev->dev,
748b539a
SP
3027 "Mac address assignment failed for VF %d\n",
3028 vf);
6d87f5c3 3029 else
11ac75ed 3030 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3031
3032 mac[5] += 1;
3033 }
3034 return status;
3035}
3036
4c876616
SP
3037static int be_vfs_mac_query(struct be_adapter *adapter)
3038{
3039 int status, vf;
3040 u8 mac[ETH_ALEN];
3041 struct be_vf_cfg *vf_cfg;
4c876616
SP
3042
3043 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3044 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3045 mac, vf_cfg->if_handle,
3046 false, vf+1);
4c876616
SP
3047 if (status)
3048 return status;
3049 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3050 }
3051 return 0;
3052}
3053
f9449ab7 3054static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3055{
11ac75ed 3056 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3057 u32 vf;
3058
257a3feb 3059 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3060 dev_warn(&adapter->pdev->dev,
3061 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3062 goto done;
3063 }
3064
b4c1df93
SP
3065 pci_disable_sriov(adapter->pdev);
3066
11ac75ed 3067 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3068 if (BEx_chip(adapter))
11ac75ed
SP
3069 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3070 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3071 else
3072 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3073 vf + 1);
f9449ab7 3074
11ac75ed
SP
3075 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3076 }
39f1d94d
SP
3077done:
3078 kfree(adapter->vf_cfg);
3079 adapter->num_vfs = 0;
f174c7ec 3080 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3081}
3082
7707133c
SP
3083static void be_clear_queues(struct be_adapter *adapter)
3084{
3085 be_mcc_queues_destroy(adapter);
3086 be_rx_cqs_destroy(adapter);
3087 be_tx_queues_destroy(adapter);
3088 be_evt_queues_destroy(adapter);
3089}
3090
68d7bdcb 3091static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3092{
191eb756
SP
3093 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3094 cancel_delayed_work_sync(&adapter->work);
3095 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3096 }
68d7bdcb
SP
3097}
3098
b05004ad 3099static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3100{
3101 int i;
3102
b05004ad
SK
3103 if (adapter->pmac_id) {
3104 for (i = 0; i < (adapter->uc_macs + 1); i++)
3105 be_cmd_pmac_del(adapter, adapter->if_handle,
3106 adapter->pmac_id[i], 0);
3107 adapter->uc_macs = 0;
3108
3109 kfree(adapter->pmac_id);
3110 adapter->pmac_id = NULL;
3111 }
3112}
3113
c5abe7c0 3114#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3115static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3116{
3117 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3118 be_cmd_manage_iface(adapter, adapter->if_handle,
3119 OP_CONVERT_TUNNEL_TO_NORMAL);
3120
3121 if (adapter->vxlan_port)
3122 be_cmd_set_vxlan_port(adapter, 0);
3123
3124 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3125 adapter->vxlan_port = 0;
3126}
c5abe7c0 3127#endif
c9c47142 3128
b05004ad
SK
3129static int be_clear(struct be_adapter *adapter)
3130{
68d7bdcb 3131 be_cancel_worker(adapter);
191eb756 3132
11ac75ed 3133 if (sriov_enabled(adapter))
f9449ab7
SP
3134 be_vf_clear(adapter);
3135
bec84e6b
VV
3136 /* Re-configure FW to distribute resources evenly across max-supported
3137 * number of VFs, only when VFs are not already enabled.
3138 */
3139 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3140 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3141 pci_sriov_get_totalvfs(adapter->pdev));
3142
c5abe7c0 3143#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3144 be_disable_vxlan_offloads(adapter);
c5abe7c0 3145#endif
2d17f403 3146 /* delete the primary mac along with the uc-mac list */
b05004ad 3147 be_mac_clear(adapter);
fbc13f01 3148
f9449ab7 3149 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3150
7707133c 3151 be_clear_queues(adapter);
a54769f5 3152
10ef9ab4 3153 be_msix_disable(adapter);
e1ad8e33 3154 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3155 return 0;
3156}
3157
4c876616 3158static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3159{
92bf14ab 3160 struct be_resources res = {0};
4c876616
SP
3161 struct be_vf_cfg *vf_cfg;
3162 u32 cap_flags, en_flags, vf;
922bbe88 3163 int status = 0;
abb93951 3164
4c876616
SP
3165 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3166 BE_IF_FLAGS_MULTICAST;
abb93951 3167
4c876616 3168 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3169 if (!BE3_chip(adapter)) {
3170 status = be_cmd_get_profile_config(adapter, &res,
3171 vf + 1);
3172 if (!status)
3173 cap_flags = res.if_cap_flags;
3174 }
4c876616
SP
3175
3176 /* If a FW profile exists, then cap_flags are updated */
3177 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3178 BE_IF_FLAGS_BROADCAST |
3179 BE_IF_FLAGS_MULTICAST);
3180 status =
3181 be_cmd_if_create(adapter, cap_flags, en_flags,
3182 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3183 if (status)
3184 goto err;
3185 }
3186err:
3187 return status;
abb93951
PR
3188}
3189
39f1d94d 3190static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3191{
11ac75ed 3192 struct be_vf_cfg *vf_cfg;
30128031
SP
3193 int vf;
3194
39f1d94d
SP
3195 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3196 GFP_KERNEL);
3197 if (!adapter->vf_cfg)
3198 return -ENOMEM;
3199
11ac75ed
SP
3200 for_all_vfs(adapter, vf_cfg, vf) {
3201 vf_cfg->if_handle = -1;
3202 vf_cfg->pmac_id = -1;
30128031 3203 }
39f1d94d 3204 return 0;
30128031
SP
3205}
3206
f9449ab7
SP
3207static int be_vf_setup(struct be_adapter *adapter)
3208{
c502224e 3209 struct device *dev = &adapter->pdev->dev;
11ac75ed 3210 struct be_vf_cfg *vf_cfg;
4c876616 3211 int status, old_vfs, vf;
04a06028 3212 u32 privileges;
39f1d94d 3213
257a3feb 3214 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3215
3216 status = be_vf_setup_init(adapter);
3217 if (status)
3218 goto err;
30128031 3219
4c876616
SP
3220 if (old_vfs) {
3221 for_all_vfs(adapter, vf_cfg, vf) {
3222 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3223 if (status)
3224 goto err;
3225 }
f9449ab7 3226
4c876616
SP
3227 status = be_vfs_mac_query(adapter);
3228 if (status)
3229 goto err;
3230 } else {
bec84e6b
VV
3231 status = be_vfs_if_create(adapter);
3232 if (status)
3233 goto err;
3234
39f1d94d
SP
3235 status = be_vf_eth_addr_config(adapter);
3236 if (status)
3237 goto err;
3238 }
f9449ab7 3239
11ac75ed 3240 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3241 /* Allow VFs to programs MAC/VLAN filters */
3242 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3243 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3244 status = be_cmd_set_fn_privileges(adapter,
3245 privileges |
3246 BE_PRIV_FILTMGMT,
3247 vf + 1);
3248 if (!status)
3249 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3250 vf);
3251 }
3252
0f77ba73
RN
3253 /* Allow full available bandwidth */
3254 if (!old_vfs)
3255 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3256
bdce2ad7 3257 if (!old_vfs) {
0599863d 3258 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3259 be_cmd_set_logical_link_config(adapter,
3260 IFLA_VF_LINK_STATE_AUTO,
3261 vf+1);
3262 }
f9449ab7 3263 }
b4c1df93
SP
3264
3265 if (!old_vfs) {
3266 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3267 if (status) {
3268 dev_err(dev, "SRIOV enable failed\n");
3269 adapter->num_vfs = 0;
3270 goto err;
3271 }
3272 }
f174c7ec
VV
3273
3274 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3275 return 0;
3276err:
4c876616
SP
3277 dev_err(dev, "VF setup failed\n");
3278 be_vf_clear(adapter);
f9449ab7
SP
3279 return status;
3280}
3281
f93f160b
VV
3282/* Converting function_mode bits on BE3 to SH mc_type enums */
3283
3284static u8 be_convert_mc_type(u32 function_mode)
3285{
66064dbc 3286 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3287 return vNIC1;
66064dbc 3288 else if (function_mode & QNQ_MODE)
f93f160b
VV
3289 return FLEX10;
3290 else if (function_mode & VNIC_MODE)
3291 return vNIC2;
3292 else if (function_mode & UMC_ENABLED)
3293 return UMC;
3294 else
3295 return MC_NONE;
3296}
3297
92bf14ab
SP
3298/* On BE2/BE3 FW does not suggest the supported limits */
3299static void BEx_get_resources(struct be_adapter *adapter,
3300 struct be_resources *res)
3301{
bec84e6b 3302 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3303
3304 if (be_physfn(adapter))
3305 res->max_uc_mac = BE_UC_PMAC_COUNT;
3306 else
3307 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3308
f93f160b
VV
3309 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3310
3311 if (be_is_mc(adapter)) {
3312 /* Assuming that there are 4 channels per port,
3313 * when multi-channel is enabled
3314 */
3315 if (be_is_qnq_mode(adapter))
3316 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3317 else
3318 /* In a non-qnq multichannel mode, the pvid
3319 * takes up one vlan entry
3320 */
3321 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3322 } else {
92bf14ab 3323 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3324 }
3325
92bf14ab
SP
3326 res->max_mcast_mac = BE_MAX_MC;
3327
a5243dab
VV
3328 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3329 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3330 * *only* if it is RSS-capable.
3331 */
3332 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3333 !be_physfn(adapter) || (be_is_mc(adapter) &&
3334 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3335 res->max_tx_qs = 1;
3336 else
3337 res->max_tx_qs = BE3_MAX_TX_QS;
3338
3339 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3340 !use_sriov && be_physfn(adapter))
3341 res->max_rss_qs = (adapter->be3_native) ?
3342 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3343 res->max_rx_qs = res->max_rss_qs + 1;
3344
e3dc867c 3345 if (be_physfn(adapter))
d3518e21 3346 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3347 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3348 else
3349 res->max_evt_qs = 1;
92bf14ab
SP
3350
3351 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3352 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3353 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3354}
3355
30128031
SP
3356static void be_setup_init(struct be_adapter *adapter)
3357{
3358 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3359 adapter->phy.link_speed = -1;
30128031
SP
3360 adapter->if_handle = -1;
3361 adapter->be3_native = false;
3362 adapter->promiscuous = false;
f25b119c
PR
3363 if (be_physfn(adapter))
3364 adapter->cmd_privileges = MAX_PRIVILEGES;
3365 else
3366 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3367}
3368
bec84e6b
VV
3369static int be_get_sriov_config(struct be_adapter *adapter)
3370{
3371 struct device *dev = &adapter->pdev->dev;
3372 struct be_resources res = {0};
d3d18312 3373 int max_vfs, old_vfs;
bec84e6b
VV
3374
3375 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3376 be_cmd_get_profile_config(adapter, &res, 0);
3377
bec84e6b
VV
3378 if (BE3_chip(adapter) && !res.max_vfs) {
3379 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3380 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3381 }
3382
d3d18312 3383 adapter->pool_res = res;
bec84e6b
VV
3384
3385 if (!be_max_vfs(adapter)) {
3386 if (num_vfs)
3387 dev_warn(dev, "device doesn't support SRIOV\n");
3388 adapter->num_vfs = 0;
3389 return 0;
3390 }
3391
d3d18312
SP
3392 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3393
bec84e6b
VV
3394 /* validate num_vfs module param */
3395 old_vfs = pci_num_vf(adapter->pdev);
3396 if (old_vfs) {
3397 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3398 if (old_vfs != num_vfs)
3399 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3400 adapter->num_vfs = old_vfs;
3401 } else {
3402 if (num_vfs > be_max_vfs(adapter)) {
3403 dev_info(dev, "Resources unavailable to init %d VFs\n",
3404 num_vfs);
3405 dev_info(dev, "Limiting to %d VFs\n",
3406 be_max_vfs(adapter));
3407 }
3408 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3409 }
3410
3411 return 0;
3412}
3413
92bf14ab 3414static int be_get_resources(struct be_adapter *adapter)
abb93951 3415{
92bf14ab
SP
3416 struct device *dev = &adapter->pdev->dev;
3417 struct be_resources res = {0};
3418 int status;
abb93951 3419
92bf14ab
SP
3420 if (BEx_chip(adapter)) {
3421 BEx_get_resources(adapter, &res);
3422 adapter->res = res;
abb93951
PR
3423 }
3424
92bf14ab
SP
3425 /* For Lancer, SH etc read per-function resource limits from FW.
3426 * GET_FUNC_CONFIG returns per function guaranteed limits.
3427 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3428 */
3429 if (!BEx_chip(adapter)) {
3430 status = be_cmd_get_func_config(adapter, &res);
3431 if (status)
3432 return status;
abb93951 3433
92bf14ab
SP
3434 /* If RoCE may be enabled stash away half the EQs for RoCE */
3435 if (be_roce_supported(adapter))
3436 res.max_evt_qs /= 2;
3437 adapter->res = res;
abb93951 3438 }
4c876616 3439
acbafeb1
SP
3440 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3441 be_max_txqs(adapter), be_max_rxqs(adapter),
3442 be_max_rss(adapter), be_max_eqs(adapter),
3443 be_max_vfs(adapter));
3444 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3445 be_max_uc(adapter), be_max_mc(adapter),
3446 be_max_vlans(adapter));
3447
92bf14ab 3448 return 0;
abb93951
PR
3449}
3450
d3d18312
SP
3451static void be_sriov_config(struct be_adapter *adapter)
3452{
3453 struct device *dev = &adapter->pdev->dev;
3454 int status;
3455
3456 status = be_get_sriov_config(adapter);
3457 if (status) {
3458 dev_err(dev, "Failed to query SR-IOV configuration\n");
3459 dev_err(dev, "SR-IOV cannot be enabled\n");
3460 return;
3461 }
3462
3463 /* When the HW is in SRIOV capable configuration, the PF-pool
3464 * resources are equally distributed across the max-number of
3465 * VFs. The user may request only a subset of the max-vfs to be
3466 * enabled. Based on num_vfs, redistribute the resources across
3467 * num_vfs so that each VF will have access to more number of
3468 * resources. This facility is not available in BE3 FW.
3469 * Also, this is done by FW in Lancer chip.
3470 */
3471 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3472 status = be_cmd_set_sriov_config(adapter,
3473 adapter->pool_res,
3474 adapter->num_vfs);
3475 if (status)
3476 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3477 }
3478}
3479
39f1d94d
SP
3480static int be_get_config(struct be_adapter *adapter)
3481{
542963b7 3482 u16 profile_id;
4c876616 3483 int status;
39f1d94d 3484
e97e3cda 3485 status = be_cmd_query_fw_cfg(adapter);
abb93951 3486 if (status)
92bf14ab 3487 return status;
abb93951 3488
542963b7
VV
3489 if (be_physfn(adapter)) {
3490 status = be_cmd_get_active_profile(adapter, &profile_id);
3491 if (!status)
3492 dev_info(&adapter->pdev->dev,
3493 "Using profile 0x%x\n", profile_id);
962bcb75 3494 }
bec84e6b 3495
d3d18312
SP
3496 if (!BE2_chip(adapter) && be_physfn(adapter))
3497 be_sriov_config(adapter);
542963b7 3498
92bf14ab
SP
3499 status = be_get_resources(adapter);
3500 if (status)
3501 return status;
abb93951 3502
46ee9c14
RN
3503 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3504 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3505 if (!adapter->pmac_id)
3506 return -ENOMEM;
abb93951 3507
92bf14ab
SP
3508 /* Sanitize cfg_num_qs based on HW and platform limits */
3509 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3510
3511 return 0;
39f1d94d
SP
3512}
3513
95046b92
SP
3514static int be_mac_setup(struct be_adapter *adapter)
3515{
3516 u8 mac[ETH_ALEN];
3517 int status;
3518
3519 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3520 status = be_cmd_get_perm_mac(adapter, mac);
3521 if (status)
3522 return status;
3523
3524 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3525 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3526 } else {
3527 /* Maybe the HW was reset; dev_addr must be re-programmed */
3528 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3529 }
3530
2c7a9dc1
AK
3531 /* For BE3-R VFs, the PF programs the initial MAC address */
3532 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3533 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3534 &adapter->pmac_id[0], 0);
95046b92
SP
3535 return 0;
3536}
3537
68d7bdcb
SP
3538static void be_schedule_worker(struct be_adapter *adapter)
3539{
3540 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3541 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3542}
3543
7707133c 3544static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3545{
68d7bdcb 3546 struct net_device *netdev = adapter->netdev;
10ef9ab4 3547 int status;
ba343c77 3548
7707133c 3549 status = be_evt_queues_create(adapter);
abb93951
PR
3550 if (status)
3551 goto err;
73d540f2 3552
7707133c 3553 status = be_tx_qs_create(adapter);
c2bba3df
SK
3554 if (status)
3555 goto err;
10ef9ab4 3556
7707133c 3557 status = be_rx_cqs_create(adapter);
10ef9ab4 3558 if (status)
a54769f5 3559 goto err;
6b7c5b94 3560
7707133c 3561 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3562 if (status)
3563 goto err;
3564
68d7bdcb
SP
3565 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3566 if (status)
3567 goto err;
3568
3569 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3570 if (status)
3571 goto err;
3572
7707133c
SP
3573 return 0;
3574err:
3575 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3576 return status;
3577}
3578
68d7bdcb
SP
3579int be_update_queues(struct be_adapter *adapter)
3580{
3581 struct net_device *netdev = adapter->netdev;
3582 int status;
3583
3584 if (netif_running(netdev))
3585 be_close(netdev);
3586
3587 be_cancel_worker(adapter);
3588
3589 /* If any vectors have been shared with RoCE we cannot re-program
3590 * the MSIx table.
3591 */
3592 if (!adapter->num_msix_roce_vec)
3593 be_msix_disable(adapter);
3594
3595 be_clear_queues(adapter);
3596
3597 if (!msix_enabled(adapter)) {
3598 status = be_msix_enable(adapter);
3599 if (status)
3600 return status;
3601 }
3602
3603 status = be_setup_queues(adapter);
3604 if (status)
3605 return status;
3606
3607 be_schedule_worker(adapter);
3608
3609 if (netif_running(netdev))
3610 status = be_open(netdev);
3611
3612 return status;
3613}
3614
7707133c
SP
3615static int be_setup(struct be_adapter *adapter)
3616{
3617 struct device *dev = &adapter->pdev->dev;
3618 u32 tx_fc, rx_fc, en_flags;
3619 int status;
3620
3621 be_setup_init(adapter);
3622
3623 if (!lancer_chip(adapter))
3624 be_cmd_req_native_mode(adapter);
3625
3626 status = be_get_config(adapter);
10ef9ab4 3627 if (status)
a54769f5 3628 goto err;
6b7c5b94 3629
7707133c 3630 status = be_msix_enable(adapter);
10ef9ab4 3631 if (status)
a54769f5 3632 goto err;
6b7c5b94 3633
f9449ab7 3634 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3635 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3636 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3637 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3638 en_flags = en_flags & be_if_cap_flags(adapter);
3639 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3640 &adapter->if_handle, 0);
7707133c 3641 if (status)
a54769f5 3642 goto err;
6b7c5b94 3643
68d7bdcb
SP
3644 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3645 rtnl_lock();
7707133c 3646 status = be_setup_queues(adapter);
68d7bdcb 3647 rtnl_unlock();
95046b92 3648 if (status)
1578e777
PR
3649 goto err;
3650
7707133c 3651 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3652
3653 status = be_mac_setup(adapter);
10ef9ab4
SP
3654 if (status)
3655 goto err;
3656
e97e3cda 3657 be_cmd_get_fw_ver(adapter);
acbafeb1 3658 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3659
e9e2a904
SK
3660 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3661 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3662 adapter->fw_ver);
3663 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3664 }
3665
1d1e9a46 3666 if (adapter->vlans_added)
10329df8 3667 be_vid_config(adapter);
7ab8b0b4 3668
a54769f5 3669 be_set_rx_mode(adapter->netdev);
5fb379ee 3670
76a9e08e
SR
3671 be_cmd_get_acpi_wol_cap(adapter);
3672
ddc3f5cb 3673 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3674
ddc3f5cb
AK
3675 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3676 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3677 adapter->rx_fc);
2dc1deb6 3678
bdce2ad7
SR
3679 if (be_physfn(adapter))
3680 be_cmd_set_logical_link_config(adapter,
3681 IFLA_VF_LINK_STATE_AUTO, 0);
3682
bec84e6b
VV
3683 if (adapter->num_vfs)
3684 be_vf_setup(adapter);
f9449ab7 3685
f25b119c
PR
3686 status = be_cmd_get_phy_info(adapter);
3687 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3688 adapter->phy.fc_autoneg = 1;
3689
68d7bdcb 3690 be_schedule_worker(adapter);
e1ad8e33 3691 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3692 return 0;
a54769f5
SP
3693err:
3694 be_clear(adapter);
3695 return status;
3696}
6b7c5b94 3697
66268739
IV
3698#ifdef CONFIG_NET_POLL_CONTROLLER
3699static void be_netpoll(struct net_device *netdev)
3700{
3701 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3702 struct be_eq_obj *eqo;
66268739
IV
3703 int i;
3704
e49cc34f
SP
3705 for_all_evt_queues(adapter, eqo, i) {
3706 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3707 napi_schedule(&eqo->napi);
3708 }
10ef9ab4
SP
3709
3710 return;
66268739
IV
3711}
3712#endif
3713
96c9b2e4 3714static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3715
306f1348
SP
3716static bool phy_flashing_required(struct be_adapter *adapter)
3717{
42f11cf2
AK
3718 return (adapter->phy.phy_type == TN_8022 &&
3719 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3720}
3721
c165541e
PR
3722static bool is_comp_in_ufi(struct be_adapter *adapter,
3723 struct flash_section_info *fsec, int type)
3724{
3725 int i = 0, img_type = 0;
3726 struct flash_section_info_g2 *fsec_g2 = NULL;
3727
ca34fe38 3728 if (BE2_chip(adapter))
c165541e
PR
3729 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3730
3731 for (i = 0; i < MAX_FLASH_COMP; i++) {
3732 if (fsec_g2)
3733 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3734 else
3735 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3736
3737 if (img_type == type)
3738 return true;
3739 }
3740 return false;
3741
3742}
3743
4188e7df 3744static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3745 int header_size,
3746 const struct firmware *fw)
c165541e
PR
3747{
3748 struct flash_section_info *fsec = NULL;
3749 const u8 *p = fw->data;
3750
3751 p += header_size;
3752 while (p < (fw->data + fw->size)) {
3753 fsec = (struct flash_section_info *)p;
3754 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3755 return fsec;
3756 p += 32;
3757 }
3758 return NULL;
3759}
3760
96c9b2e4
VV
3761static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3762 u32 img_offset, u32 img_size, int hdr_size,
3763 u16 img_optype, bool *crc_match)
3764{
3765 u32 crc_offset;
3766 int status;
3767 u8 crc[4];
3768
3769 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3770 if (status)
3771 return status;
3772
3773 crc_offset = hdr_size + img_offset + img_size - 4;
3774
3775 /* Skip flashing, if crc of flashed region matches */
3776 if (!memcmp(crc, p + crc_offset, 4))
3777 *crc_match = true;
3778 else
3779 *crc_match = false;
3780
3781 return status;
3782}
3783
773a2d7c 3784static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3785 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3786{
773a2d7c 3787 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3788 u32 total_bytes, flash_op, num_bytes;
3789 int status;
773a2d7c
PR
3790
3791 total_bytes = img_size;
3792 while (total_bytes) {
3793 num_bytes = min_t(u32, 32*1024, total_bytes);
3794
3795 total_bytes -= num_bytes;
3796
3797 if (!total_bytes) {
3798 if (optype == OPTYPE_PHY_FW)
3799 flash_op = FLASHROM_OPER_PHY_FLASH;
3800 else
3801 flash_op = FLASHROM_OPER_FLASH;
3802 } else {
3803 if (optype == OPTYPE_PHY_FW)
3804 flash_op = FLASHROM_OPER_PHY_SAVE;
3805 else
3806 flash_op = FLASHROM_OPER_SAVE;
3807 }
3808
be716446 3809 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3810 img += num_bytes;
3811 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3812 flash_op, num_bytes);
4c60005f 3813 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3814 optype == OPTYPE_PHY_FW)
3815 break;
3816 else if (status)
773a2d7c 3817 return status;
773a2d7c
PR
3818 }
3819 return 0;
3820}
3821
0ad3157e 3822/* For BE2, BE3 and BE3-R */
ca34fe38 3823static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3824 const struct firmware *fw,
3825 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3826{
c165541e 3827 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3828 struct device *dev = &adapter->pdev->dev;
c165541e 3829 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3830 int status, i, filehdr_size, num_comp;
3831 const struct flash_comp *pflashcomp;
3832 bool crc_match;
3833 const u8 *p;
c165541e
PR
3834
3835 struct flash_comp gen3_flash_types[] = {
3836 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3837 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3838 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3839 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3840 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3841 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3842 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3843 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3844 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3845 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3846 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3847 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3848 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3849 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3850 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3851 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3852 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3853 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3854 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3855 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3856 };
c165541e
PR
3857
3858 struct flash_comp gen2_flash_types[] = {
3859 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3860 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3861 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3862 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3863 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3864 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3865 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3866 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3867 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3868 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3869 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3870 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3871 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3872 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3873 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3874 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3875 };
3876
ca34fe38 3877 if (BE3_chip(adapter)) {
3f0d4560
AK
3878 pflashcomp = gen3_flash_types;
3879 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3880 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3881 } else {
3882 pflashcomp = gen2_flash_types;
3883 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3884 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3885 }
ca34fe38 3886
c165541e
PR
3887 /* Get flash section info*/
3888 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3889 if (!fsec) {
96c9b2e4 3890 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3891 return -1;
3892 }
9fe96934 3893 for (i = 0; i < num_comp; i++) {
c165541e 3894 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3895 continue;
c165541e
PR
3896
3897 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3898 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3899 continue;
3900
773a2d7c
PR
3901 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3902 !phy_flashing_required(adapter))
306f1348 3903 continue;
c165541e 3904
773a2d7c 3905 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3906 status = be_check_flash_crc(adapter, fw->data,
3907 pflashcomp[i].offset,
3908 pflashcomp[i].size,
3909 filehdr_size +
3910 img_hdrs_size,
3911 OPTYPE_REDBOOT, &crc_match);
3912 if (status) {
3913 dev_err(dev,
3914 "Could not get CRC for 0x%x region\n",
3915 pflashcomp[i].optype);
3916 continue;
3917 }
3918
3919 if (crc_match)
773a2d7c
PR
3920 continue;
3921 }
c165541e 3922
96c9b2e4
VV
3923 p = fw->data + filehdr_size + pflashcomp[i].offset +
3924 img_hdrs_size;
306f1348
SP
3925 if (p + pflashcomp[i].size > fw->data + fw->size)
3926 return -1;
773a2d7c
PR
3927
3928 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3929 pflashcomp[i].size);
773a2d7c 3930 if (status) {
96c9b2e4 3931 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3932 pflashcomp[i].img_type);
3933 return status;
84517482 3934 }
84517482 3935 }
84517482
AK
3936 return 0;
3937}
3938
96c9b2e4
VV
3939static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3940{
3941 u32 img_type = le32_to_cpu(fsec_entry.type);
3942 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3943
3944 if (img_optype != 0xFFFF)
3945 return img_optype;
3946
3947 switch (img_type) {
3948 case IMAGE_FIRMWARE_iSCSI:
3949 img_optype = OPTYPE_ISCSI_ACTIVE;
3950 break;
3951 case IMAGE_BOOT_CODE:
3952 img_optype = OPTYPE_REDBOOT;
3953 break;
3954 case IMAGE_OPTION_ROM_ISCSI:
3955 img_optype = OPTYPE_BIOS;
3956 break;
3957 case IMAGE_OPTION_ROM_PXE:
3958 img_optype = OPTYPE_PXE_BIOS;
3959 break;
3960 case IMAGE_OPTION_ROM_FCoE:
3961 img_optype = OPTYPE_FCOE_BIOS;
3962 break;
3963 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3964 img_optype = OPTYPE_ISCSI_BACKUP;
3965 break;
3966 case IMAGE_NCSI:
3967 img_optype = OPTYPE_NCSI_FW;
3968 break;
3969 case IMAGE_FLASHISM_JUMPVECTOR:
3970 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3971 break;
3972 case IMAGE_FIRMWARE_PHY:
3973 img_optype = OPTYPE_SH_PHY_FW;
3974 break;
3975 case IMAGE_REDBOOT_DIR:
3976 img_optype = OPTYPE_REDBOOT_DIR;
3977 break;
3978 case IMAGE_REDBOOT_CONFIG:
3979 img_optype = OPTYPE_REDBOOT_CONFIG;
3980 break;
3981 case IMAGE_UFI_DIR:
3982 img_optype = OPTYPE_UFI_DIR;
3983 break;
3984 default:
3985 break;
3986 }
3987
3988 return img_optype;
3989}
3990
773a2d7c 3991static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
3992 const struct firmware *fw,
3993 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3994{
773a2d7c 3995 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 3996 struct device *dev = &adapter->pdev->dev;
773a2d7c 3997 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3998 u32 img_offset, img_size, img_type;
3999 int status, i, filehdr_size;
4000 bool crc_match, old_fw_img;
4001 u16 img_optype;
4002 const u8 *p;
773a2d7c
PR
4003
4004 filehdr_size = sizeof(struct flash_file_hdr_g3);
4005 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4006 if (!fsec) {
96c9b2e4 4007 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4008 return -EINVAL;
773a2d7c
PR
4009 }
4010
4011 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4012 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4013 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4014 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4015 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4016 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4017
96c9b2e4 4018 if (img_optype == 0xFFFF)
773a2d7c 4019 continue;
96c9b2e4
VV
4020 /* Don't bother verifying CRC if an old FW image is being
4021 * flashed
4022 */
4023 if (old_fw_img)
4024 goto flash;
4025
4026 status = be_check_flash_crc(adapter, fw->data, img_offset,
4027 img_size, filehdr_size +
4028 img_hdrs_size, img_optype,
4029 &crc_match);
4030 /* The current FW image on the card does not recognize the new
4031 * FLASH op_type. The FW download is partially complete.
4032 * Reboot the server now to enable FW image to recognize the
4033 * new FLASH op_type. To complete the remaining process,
4034 * download the same FW again after the reboot.
4035 */
4c60005f
KA
4036 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4037 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
4038 dev_err(dev, "Flash incomplete. Reset the server\n");
4039 dev_err(dev, "Download FW image again after reset\n");
4040 return -EAGAIN;
4041 } else if (status) {
4042 dev_err(dev, "Could not get CRC for 0x%x region\n",
4043 img_optype);
4044 return -EFAULT;
773a2d7c
PR
4045 }
4046
96c9b2e4
VV
4047 if (crc_match)
4048 continue;
773a2d7c 4049
96c9b2e4
VV
4050flash:
4051 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4052 if (p + img_size > fw->data + fw->size)
4053 return -1;
4054
4055 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4056 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4057 * UFI_DIR region
4058 */
4c60005f
KA
4059 if (old_fw_img &&
4060 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4061 (img_optype == OPTYPE_UFI_DIR &&
4062 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4063 continue;
4064 } else if (status) {
4065 dev_err(dev, "Flashing section type 0x%x failed\n",
4066 img_type);
4067 return -EFAULT;
773a2d7c
PR
4068 }
4069 }
4070 return 0;
3f0d4560
AK
4071}
4072
485bf569 4073static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4074 const struct firmware *fw)
84517482 4075{
485bf569
SN
4076#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4077#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 4078 struct be_dma_mem flash_cmd;
485bf569
SN
4079 const u8 *data_ptr = NULL;
4080 u8 *dest_image_ptr = NULL;
4081 size_t image_size = 0;
4082 u32 chunk_size = 0;
4083 u32 data_written = 0;
4084 u32 offset = 0;
4085 int status = 0;
4086 u8 add_status = 0;
f67ef7ba 4087 u8 change_status;
84517482 4088
485bf569 4089 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 4090 dev_err(&adapter->pdev->dev,
485bf569
SN
4091 "FW Image not properly aligned. "
4092 "Length must be 4 byte aligned.\n");
4093 status = -EINVAL;
4094 goto lancer_fw_exit;
d9efd2af
SB
4095 }
4096
485bf569
SN
4097 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4098 + LANCER_FW_DOWNLOAD_CHUNK;
4099 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 4100 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
4101 if (!flash_cmd.va) {
4102 status = -ENOMEM;
485bf569
SN
4103 goto lancer_fw_exit;
4104 }
84517482 4105
485bf569
SN
4106 dest_image_ptr = flash_cmd.va +
4107 sizeof(struct lancer_cmd_req_write_object);
4108 image_size = fw->size;
4109 data_ptr = fw->data;
4110
4111 while (image_size) {
4112 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4113
4114 /* Copy the image chunk content. */
4115 memcpy(dest_image_ptr, data_ptr, chunk_size);
4116
4117 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4118 chunk_size, offset,
4119 LANCER_FW_DOWNLOAD_LOCATION,
4120 &data_written, &change_status,
4121 &add_status);
485bf569
SN
4122 if (status)
4123 break;
4124
4125 offset += data_written;
4126 data_ptr += data_written;
4127 image_size -= data_written;
4128 }
4129
4130 if (!status) {
4131 /* Commit the FW written */
4132 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4133 0, offset,
4134 LANCER_FW_DOWNLOAD_LOCATION,
4135 &data_written, &change_status,
4136 &add_status);
485bf569
SN
4137 }
4138
4139 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
748b539a 4140 flash_cmd.dma);
485bf569
SN
4141 if (status) {
4142 dev_err(&adapter->pdev->dev,
4143 "Firmware load error. "
4144 "Status code: 0x%x Additional Status: 0x%x\n",
4145 status, add_status);
4146 goto lancer_fw_exit;
4147 }
4148
f67ef7ba 4149 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
4150 dev_info(&adapter->pdev->dev,
4151 "Resetting adapter to activate new FW\n");
5c510811
SK
4152 status = lancer_physdev_ctrl(adapter,
4153 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
4154 if (status) {
4155 dev_err(&adapter->pdev->dev,
4156 "Adapter busy for FW reset.\n"
4157 "New FW will not be active.\n");
4158 goto lancer_fw_exit;
4159 }
4160 } else if (change_status != LANCER_NO_RESET_NEEDED) {
748b539a
SP
4161 dev_err(&adapter->pdev->dev,
4162 "System reboot required for new FW to be active\n");
f67ef7ba
PR
4163 }
4164
485bf569
SN
4165 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4166lancer_fw_exit:
4167 return status;
4168}
4169
ca34fe38
SP
4170#define UFI_TYPE2 2
4171#define UFI_TYPE3 3
0ad3157e 4172#define UFI_TYPE3R 10
ca34fe38
SP
4173#define UFI_TYPE4 4
4174static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4175 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4176{
ddf1169f 4177 if (!fhdr)
773a2d7c
PR
4178 goto be_get_ufi_exit;
4179
ca34fe38
SP
4180 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4181 return UFI_TYPE4;
0ad3157e
VV
4182 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4183 if (fhdr->asic_type_rev == 0x10)
4184 return UFI_TYPE3R;
4185 else
4186 return UFI_TYPE3;
4187 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4188 return UFI_TYPE2;
773a2d7c
PR
4189
4190be_get_ufi_exit:
4191 dev_err(&adapter->pdev->dev,
4192 "UFI and Interface are not compatible for flashing\n");
4193 return -1;
4194}
4195
485bf569
SN
4196static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4197{
485bf569
SN
4198 struct flash_file_hdr_g3 *fhdr3;
4199 struct image_hdr *img_hdr_ptr = NULL;
4200 struct be_dma_mem flash_cmd;
4201 const u8 *p;
773a2d7c 4202 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4203
be716446 4204 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4205 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4206 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4207 if (!flash_cmd.va) {
4208 status = -ENOMEM;
485bf569 4209 goto be_fw_exit;
84517482
AK
4210 }
4211
773a2d7c 4212 p = fw->data;
0ad3157e 4213 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4214
0ad3157e 4215 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4216
773a2d7c
PR
4217 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4218 for (i = 0; i < num_imgs; i++) {
4219 img_hdr_ptr = (struct image_hdr *)(fw->data +
4220 (sizeof(struct flash_file_hdr_g3) +
4221 i * sizeof(struct image_hdr)));
4222 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4223 switch (ufi_type) {
4224 case UFI_TYPE4:
773a2d7c 4225 status = be_flash_skyhawk(adapter, fw,
748b539a 4226 &flash_cmd, num_imgs);
0ad3157e
VV
4227 break;
4228 case UFI_TYPE3R:
ca34fe38
SP
4229 status = be_flash_BEx(adapter, fw, &flash_cmd,
4230 num_imgs);
0ad3157e
VV
4231 break;
4232 case UFI_TYPE3:
4233 /* Do not flash this ufi on BE3-R cards */
4234 if (adapter->asic_rev < 0x10)
4235 status = be_flash_BEx(adapter, fw,
4236 &flash_cmd,
4237 num_imgs);
4238 else {
56ace3a0 4239 status = -EINVAL;
0ad3157e
VV
4240 dev_err(&adapter->pdev->dev,
4241 "Can't load BE3 UFI on BE3R\n");
4242 }
4243 }
3f0d4560 4244 }
773a2d7c
PR
4245 }
4246
ca34fe38
SP
4247 if (ufi_type == UFI_TYPE2)
4248 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4249 else if (ufi_type == -1)
56ace3a0 4250 status = -EINVAL;
84517482 4251
2b7bcebf
IV
4252 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4253 flash_cmd.dma);
84517482
AK
4254 if (status) {
4255 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4256 goto be_fw_exit;
84517482
AK
4257 }
4258
af901ca1 4259 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4260
485bf569
SN
4261be_fw_exit:
4262 return status;
4263}
4264
4265int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4266{
4267 const struct firmware *fw;
4268 int status;
4269
4270 if (!netif_running(adapter->netdev)) {
4271 dev_err(&adapter->pdev->dev,
4272 "Firmware load not allowed (interface is down)\n");
940a3fcd 4273 return -ENETDOWN;
485bf569
SN
4274 }
4275
4276 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4277 if (status)
4278 goto fw_exit;
4279
4280 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4281
4282 if (lancer_chip(adapter))
4283 status = lancer_fw_download(adapter, fw);
4284 else
4285 status = be_fw_download(adapter, fw);
4286
eeb65ced 4287 if (!status)
e97e3cda 4288 be_cmd_get_fw_ver(adapter);
eeb65ced 4289
84517482
AK
4290fw_exit:
4291 release_firmware(fw);
4292 return status;
4293}
4294
748b539a 4295static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4296{
4297 struct be_adapter *adapter = netdev_priv(dev);
4298 struct nlattr *attr, *br_spec;
4299 int rem;
4300 int status = 0;
4301 u16 mode = 0;
4302
4303 if (!sriov_enabled(adapter))
4304 return -EOPNOTSUPP;
4305
4306 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4307
4308 nla_for_each_nested(attr, br_spec, rem) {
4309 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4310 continue;
4311
4312 mode = nla_get_u16(attr);
4313 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4314 return -EINVAL;
4315
4316 status = be_cmd_set_hsw_config(adapter, 0, 0,
4317 adapter->if_handle,
4318 mode == BRIDGE_MODE_VEPA ?
4319 PORT_FWD_TYPE_VEPA :
4320 PORT_FWD_TYPE_VEB);
4321 if (status)
4322 goto err;
4323
4324 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4325 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4326
4327 return status;
4328 }
4329err:
4330 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4331 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4332
4333 return status;
4334}
4335
4336static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4337 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4338{
4339 struct be_adapter *adapter = netdev_priv(dev);
4340 int status = 0;
4341 u8 hsw_mode;
4342
4343 if (!sriov_enabled(adapter))
4344 return 0;
4345
4346 /* BE and Lancer chips support VEB mode only */
4347 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4348 hsw_mode = PORT_FWD_TYPE_VEB;
4349 } else {
4350 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4351 adapter->if_handle, &hsw_mode);
4352 if (status)
4353 return 0;
4354 }
4355
4356 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4357 hsw_mode == PORT_FWD_TYPE_VEPA ?
4358 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4359}
4360
c5abe7c0 4361#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4362static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4363 __be16 port)
4364{
4365 struct be_adapter *adapter = netdev_priv(netdev);
4366 struct device *dev = &adapter->pdev->dev;
4367 int status;
4368
4369 if (lancer_chip(adapter) || BEx_chip(adapter))
4370 return;
4371
4372 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4373 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4374 be16_to_cpu(port));
4375 dev_info(dev,
4376 "Only one UDP port supported for VxLAN offloads\n");
4377 return;
4378 }
4379
4380 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4381 OP_CONVERT_NORMAL_TO_TUNNEL);
4382 if (status) {
4383 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4384 goto err;
4385 }
4386
4387 status = be_cmd_set_vxlan_port(adapter, port);
4388 if (status) {
4389 dev_warn(dev, "Failed to add VxLAN port\n");
4390 goto err;
4391 }
4392 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4393 adapter->vxlan_port = port;
4394
4395 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4396 be16_to_cpu(port));
4397 return;
4398err:
4399 be_disable_vxlan_offloads(adapter);
4400 return;
4401}
4402
4403static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4404 __be16 port)
4405{
4406 struct be_adapter *adapter = netdev_priv(netdev);
4407
4408 if (lancer_chip(adapter) || BEx_chip(adapter))
4409 return;
4410
4411 if (adapter->vxlan_port != port)
4412 return;
4413
4414 be_disable_vxlan_offloads(adapter);
4415
4416 dev_info(&adapter->pdev->dev,
4417 "Disabled VxLAN offloads for UDP port %d\n",
4418 be16_to_cpu(port));
4419}
c5abe7c0 4420#endif
c9c47142 4421
e5686ad8 4422static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4423 .ndo_open = be_open,
4424 .ndo_stop = be_close,
4425 .ndo_start_xmit = be_xmit,
a54769f5 4426 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4427 .ndo_set_mac_address = be_mac_addr_set,
4428 .ndo_change_mtu = be_change_mtu,
ab1594e9 4429 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4430 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4431 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4432 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4433 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4434 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4435 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4436 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4437 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4438#ifdef CONFIG_NET_POLL_CONTROLLER
4439 .ndo_poll_controller = be_netpoll,
4440#endif
a77dcb8c
AK
4441 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4442 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4443#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4444 .ndo_busy_poll = be_busy_poll,
6384a4d0 4445#endif
c5abe7c0 4446#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4447 .ndo_add_vxlan_port = be_add_vxlan_port,
4448 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4449#endif
6b7c5b94
SP
4450};
4451
4452static void be_netdev_init(struct net_device *netdev)
4453{
4454 struct be_adapter *adapter = netdev_priv(netdev);
4455
c9c47142
SP
4456 if (skyhawk_chip(adapter)) {
4457 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4458 NETIF_F_TSO | NETIF_F_TSO6 |
4459 NETIF_F_GSO_UDP_TUNNEL;
4460 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4461 }
6332c8d3 4462 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4463 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4464 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4465 if (be_multi_rxq(adapter))
4466 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4467
4468 netdev->features |= netdev->hw_features |
f646968f 4469 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4470
eb8a50d9 4471 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4472 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4473
fbc13f01
AK
4474 netdev->priv_flags |= IFF_UNICAST_FLT;
4475
6b7c5b94
SP
4476 netdev->flags |= IFF_MULTICAST;
4477
b7e5887e 4478 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4479
10ef9ab4 4480 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4481
7ad24ea4 4482 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4483}
4484
4485static void be_unmap_pci_bars(struct be_adapter *adapter)
4486{
c5b3ad4c
SP
4487 if (adapter->csr)
4488 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4489 if (adapter->db)
ce66f781 4490 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4491}
4492
ce66f781
SP
4493static int db_bar(struct be_adapter *adapter)
4494{
4495 if (lancer_chip(adapter) || !be_physfn(adapter))
4496 return 0;
4497 else
4498 return 4;
4499}
4500
4501static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4502{
dbf0f2a7 4503 if (skyhawk_chip(adapter)) {
ce66f781
SP
4504 adapter->roce_db.size = 4096;
4505 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4506 db_bar(adapter));
4507 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4508 db_bar(adapter));
4509 }
045508a8 4510 return 0;
6b7c5b94
SP
4511}
4512
4513static int be_map_pci_bars(struct be_adapter *adapter)
4514{
4515 u8 __iomem *addr;
fe6d2a38 4516
c5b3ad4c
SP
4517 if (BEx_chip(adapter) && be_physfn(adapter)) {
4518 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4519 if (!adapter->csr)
c5b3ad4c
SP
4520 return -ENOMEM;
4521 }
4522
ce66f781 4523 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4524 if (!addr)
6b7c5b94 4525 goto pci_map_err;
ba343c77 4526 adapter->db = addr;
ce66f781
SP
4527
4528 be_roce_map_pci_bars(adapter);
6b7c5b94 4529 return 0;
ce66f781 4530
6b7c5b94 4531pci_map_err:
acbafeb1 4532 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4533 be_unmap_pci_bars(adapter);
4534 return -ENOMEM;
4535}
4536
6b7c5b94
SP
4537static void be_ctrl_cleanup(struct be_adapter *adapter)
4538{
8788fdc2 4539 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4540
4541 be_unmap_pci_bars(adapter);
4542
4543 if (mem->va)
2b7bcebf
IV
4544 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4545 mem->dma);
e7b909a6 4546
5b8821b7 4547 mem = &adapter->rx_filter;
e7b909a6 4548 if (mem->va)
2b7bcebf
IV
4549 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4550 mem->dma);
6b7c5b94
SP
4551}
4552
6b7c5b94
SP
4553static int be_ctrl_init(struct be_adapter *adapter)
4554{
8788fdc2
SP
4555 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4556 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4557 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4558 u32 sli_intf;
6b7c5b94 4559 int status;
6b7c5b94 4560
ce66f781
SP
4561 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4562 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4563 SLI_INTF_FAMILY_SHIFT;
4564 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4565
6b7c5b94
SP
4566 status = be_map_pci_bars(adapter);
4567 if (status)
e7b909a6 4568 goto done;
6b7c5b94
SP
4569
4570 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4571 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4572 mbox_mem_alloc->size,
4573 &mbox_mem_alloc->dma,
4574 GFP_KERNEL);
6b7c5b94 4575 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4576 status = -ENOMEM;
4577 goto unmap_pci_bars;
6b7c5b94
SP
4578 }
4579 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4580 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4581 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4582 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4583
5b8821b7 4584 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4585 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4586 rx_filter->size, &rx_filter->dma,
4587 GFP_KERNEL);
ddf1169f 4588 if (!rx_filter->va) {
e7b909a6
SP
4589 status = -ENOMEM;
4590 goto free_mbox;
4591 }
1f9061d2 4592
2984961c 4593 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4594 spin_lock_init(&adapter->mcc_lock);
4595 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4596
5eeff635 4597 init_completion(&adapter->et_cmd_compl);
cf588477 4598 pci_save_state(adapter->pdev);
6b7c5b94 4599 return 0;
e7b909a6
SP
4600
4601free_mbox:
2b7bcebf
IV
4602 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4603 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4604
4605unmap_pci_bars:
4606 be_unmap_pci_bars(adapter);
4607
4608done:
4609 return status;
6b7c5b94
SP
4610}
4611
4612static void be_stats_cleanup(struct be_adapter *adapter)
4613{
3abcdeda 4614 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4615
4616 if (cmd->va)
2b7bcebf
IV
4617 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4618 cmd->va, cmd->dma);
6b7c5b94
SP
4619}
4620
4621static int be_stats_init(struct be_adapter *adapter)
4622{
3abcdeda 4623 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4624
ca34fe38
SP
4625 if (lancer_chip(adapter))
4626 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4627 else if (BE2_chip(adapter))
89a88ab8 4628 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4629 else if (BE3_chip(adapter))
ca34fe38 4630 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4631 else
4632 /* ALL non-BE ASICs */
4633 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4634
ede23fa8
JP
4635 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4636 GFP_KERNEL);
ddf1169f 4637 if (!cmd->va)
6b568689 4638 return -ENOMEM;
6b7c5b94
SP
4639 return 0;
4640}
4641
3bc6b06c 4642static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4643{
4644 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4645
6b7c5b94
SP
4646 if (!adapter)
4647 return;
4648
045508a8 4649 be_roce_dev_remove(adapter);
8cef7a78 4650 be_intr_set(adapter, false);
045508a8 4651
f67ef7ba
PR
4652 cancel_delayed_work_sync(&adapter->func_recovery_work);
4653
6b7c5b94
SP
4654 unregister_netdev(adapter->netdev);
4655
5fb379ee
SP
4656 be_clear(adapter);
4657
bf99e50d
PR
4658 /* tell fw we're done with firing cmds */
4659 be_cmd_fw_clean(adapter);
4660
6b7c5b94
SP
4661 be_stats_cleanup(adapter);
4662
4663 be_ctrl_cleanup(adapter);
4664
d6b6d987
SP
4665 pci_disable_pcie_error_reporting(pdev);
4666
6b7c5b94
SP
4667 pci_release_regions(pdev);
4668 pci_disable_device(pdev);
4669
4670 free_netdev(adapter->netdev);
4671}
4672
39f1d94d 4673static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4674{
baaa08d1 4675 int status, level;
6b7c5b94 4676
9e1453c5
AK
4677 status = be_cmd_get_cntl_attributes(adapter);
4678 if (status)
4679 return status;
4680
7aeb2156
PR
4681 /* Must be a power of 2 or else MODULO will BUG_ON */
4682 adapter->be_get_temp_freq = 64;
4683
baaa08d1
VV
4684 if (BEx_chip(adapter)) {
4685 level = be_cmd_get_fw_log_level(adapter);
4686 adapter->msg_enable =
4687 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4688 }
941a77d5 4689
92bf14ab 4690 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4691 return 0;
6b7c5b94
SP
4692}
4693
f67ef7ba 4694static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4695{
01e5b2c4 4696 struct device *dev = &adapter->pdev->dev;
d8110f62 4697 int status;
d8110f62 4698
f67ef7ba
PR
4699 status = lancer_test_and_set_rdy_state(adapter);
4700 if (status)
4701 goto err;
d8110f62 4702
f67ef7ba
PR
4703 if (netif_running(adapter->netdev))
4704 be_close(adapter->netdev);
d8110f62 4705
f67ef7ba
PR
4706 be_clear(adapter);
4707
01e5b2c4 4708 be_clear_all_error(adapter);
f67ef7ba
PR
4709
4710 status = be_setup(adapter);
4711 if (status)
4712 goto err;
d8110f62 4713
f67ef7ba
PR
4714 if (netif_running(adapter->netdev)) {
4715 status = be_open(adapter->netdev);
d8110f62
PR
4716 if (status)
4717 goto err;
f67ef7ba 4718 }
d8110f62 4719
4bebb56a 4720 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4721 return 0;
4722err:
01e5b2c4
SK
4723 if (status == -EAGAIN)
4724 dev_err(dev, "Waiting for resource provisioning\n");
4725 else
4bebb56a 4726 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4727
f67ef7ba
PR
4728 return status;
4729}
4730
4731static void be_func_recovery_task(struct work_struct *work)
4732{
4733 struct be_adapter *adapter =
4734 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4735 int status = 0;
d8110f62 4736
f67ef7ba 4737 be_detect_error(adapter);
d8110f62 4738
f67ef7ba 4739 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4740
f67ef7ba
PR
4741 rtnl_lock();
4742 netif_device_detach(adapter->netdev);
4743 rtnl_unlock();
d8110f62 4744
f67ef7ba 4745 status = lancer_recover_func(adapter);
f67ef7ba
PR
4746 if (!status)
4747 netif_device_attach(adapter->netdev);
d8110f62 4748 }
f67ef7ba 4749
01e5b2c4
SK
4750 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4751 * no need to attempt further recovery.
4752 */
4753 if (!status || status == -EAGAIN)
4754 schedule_delayed_work(&adapter->func_recovery_work,
4755 msecs_to_jiffies(1000));
d8110f62
PR
4756}
4757
4758static void be_worker(struct work_struct *work)
4759{
4760 struct be_adapter *adapter =
4761 container_of(work, struct be_adapter, work.work);
4762 struct be_rx_obj *rxo;
4763 int i;
4764
d8110f62
PR
4765 /* when interrupts are not yet enabled, just reap any pending
4766 * mcc completions */
4767 if (!netif_running(adapter->netdev)) {
072a9c48 4768 local_bh_disable();
10ef9ab4 4769 be_process_mcc(adapter);
072a9c48 4770 local_bh_enable();
d8110f62
PR
4771 goto reschedule;
4772 }
4773
4774 if (!adapter->stats_cmd_sent) {
4775 if (lancer_chip(adapter))
4776 lancer_cmd_get_pport_stats(adapter,
4777 &adapter->stats_cmd);
4778 else
4779 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4780 }
4781
d696b5e2
VV
4782 if (be_physfn(adapter) &&
4783 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4784 be_cmd_get_die_temperature(adapter);
4785
d8110f62 4786 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4787 /* Replenish RX-queues starved due to memory
4788 * allocation failures.
4789 */
4790 if (rxo->rx_post_starved)
d8110f62 4791 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4792 }
4793
2632bafd 4794 be_eqd_update(adapter);
10ef9ab4 4795
d8110f62
PR
4796reschedule:
4797 adapter->work_counter++;
4798 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4799}
4800
257a3feb 4801/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4802static bool be_reset_required(struct be_adapter *adapter)
4803{
257a3feb 4804 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4805}
4806
d379142b
SP
4807static char *mc_name(struct be_adapter *adapter)
4808{
f93f160b
VV
4809 char *str = ""; /* default */
4810
4811 switch (adapter->mc_type) {
4812 case UMC:
4813 str = "UMC";
4814 break;
4815 case FLEX10:
4816 str = "FLEX10";
4817 break;
4818 case vNIC1:
4819 str = "vNIC-1";
4820 break;
4821 case nPAR:
4822 str = "nPAR";
4823 break;
4824 case UFP:
4825 str = "UFP";
4826 break;
4827 case vNIC2:
4828 str = "vNIC-2";
4829 break;
4830 default:
4831 str = "";
4832 }
4833
4834 return str;
d379142b
SP
4835}
4836
4837static inline char *func_name(struct be_adapter *adapter)
4838{
4839 return be_physfn(adapter) ? "PF" : "VF";
4840}
4841
1dd06ae8 4842static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4843{
4844 int status = 0;
4845 struct be_adapter *adapter;
4846 struct net_device *netdev;
b4e32a71 4847 char port_name;
6b7c5b94 4848
acbafeb1
SP
4849 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4850
6b7c5b94
SP
4851 status = pci_enable_device(pdev);
4852 if (status)
4853 goto do_none;
4854
4855 status = pci_request_regions(pdev, DRV_NAME);
4856 if (status)
4857 goto disable_dev;
4858 pci_set_master(pdev);
4859
7f640062 4860 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 4861 if (!netdev) {
6b7c5b94
SP
4862 status = -ENOMEM;
4863 goto rel_reg;
4864 }
4865 adapter = netdev_priv(netdev);
4866 adapter->pdev = pdev;
4867 pci_set_drvdata(pdev, adapter);
4868 adapter->netdev = netdev;
2243e2e9 4869 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4870
4c15c243 4871 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4872 if (!status) {
4873 netdev->features |= NETIF_F_HIGHDMA;
4874 } else {
4c15c243 4875 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4876 if (status) {
4877 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4878 goto free_netdev;
4879 }
4880 }
4881
ea58c180
AK
4882 if (be_physfn(adapter)) {
4883 status = pci_enable_pcie_error_reporting(pdev);
4884 if (!status)
4885 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4886 }
d6b6d987 4887
6b7c5b94
SP
4888 status = be_ctrl_init(adapter);
4889 if (status)
39f1d94d 4890 goto free_netdev;
6b7c5b94 4891
2243e2e9 4892 /* sync up with fw's ready state */
ba343c77 4893 if (be_physfn(adapter)) {
bf99e50d 4894 status = be_fw_wait_ready(adapter);
ba343c77
SB
4895 if (status)
4896 goto ctrl_clean;
ba343c77 4897 }
6b7c5b94 4898
39f1d94d
SP
4899 if (be_reset_required(adapter)) {
4900 status = be_cmd_reset_function(adapter);
4901 if (status)
4902 goto ctrl_clean;
556ae191 4903
2d177be8
KA
4904 /* Wait for interrupts to quiesce after an FLR */
4905 msleep(100);
4906 }
8cef7a78
SK
4907
4908 /* Allow interrupts for other ULPs running on NIC function */
4909 be_intr_set(adapter, true);
10ef9ab4 4910
2d177be8
KA
4911 /* tell fw we're ready to fire cmds */
4912 status = be_cmd_fw_init(adapter);
4913 if (status)
4914 goto ctrl_clean;
4915
2243e2e9
SP
4916 status = be_stats_init(adapter);
4917 if (status)
4918 goto ctrl_clean;
4919
39f1d94d 4920 status = be_get_initial_config(adapter);
6b7c5b94
SP
4921 if (status)
4922 goto stats_clean;
6b7c5b94
SP
4923
4924 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4925 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4926 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4927
5fb379ee
SP
4928 status = be_setup(adapter);
4929 if (status)
55f5c3c5 4930 goto stats_clean;
2243e2e9 4931
3abcdeda 4932 be_netdev_init(netdev);
6b7c5b94
SP
4933 status = register_netdev(netdev);
4934 if (status != 0)
5fb379ee 4935 goto unsetup;
6b7c5b94 4936
045508a8
PP
4937 be_roce_dev_add(adapter);
4938
f67ef7ba
PR
4939 schedule_delayed_work(&adapter->func_recovery_work,
4940 msecs_to_jiffies(1000));
b4e32a71
PR
4941
4942 be_cmd_query_port_name(adapter, &port_name);
4943
d379142b
SP
4944 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4945 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4946
6b7c5b94
SP
4947 return 0;
4948
5fb379ee
SP
4949unsetup:
4950 be_clear(adapter);
6b7c5b94
SP
4951stats_clean:
4952 be_stats_cleanup(adapter);
4953ctrl_clean:
4954 be_ctrl_cleanup(adapter);
f9449ab7 4955free_netdev:
fe6d2a38 4956 free_netdev(netdev);
6b7c5b94
SP
4957rel_reg:
4958 pci_release_regions(pdev);
4959disable_dev:
4960 pci_disable_device(pdev);
4961do_none:
c4ca2374 4962 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4963 return status;
4964}
4965
4966static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4967{
4968 struct be_adapter *adapter = pci_get_drvdata(pdev);
4969 struct net_device *netdev = adapter->netdev;
4970
76a9e08e 4971 if (adapter->wol_en)
71d8d1b5
AK
4972 be_setup_wol(adapter, true);
4973
d4360d6f 4974 be_intr_set(adapter, false);
f67ef7ba
PR
4975 cancel_delayed_work_sync(&adapter->func_recovery_work);
4976
6b7c5b94
SP
4977 netif_device_detach(netdev);
4978 if (netif_running(netdev)) {
4979 rtnl_lock();
4980 be_close(netdev);
4981 rtnl_unlock();
4982 }
9b0365f1 4983 be_clear(adapter);
6b7c5b94
SP
4984
4985 pci_save_state(pdev);
4986 pci_disable_device(pdev);
4987 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4988 return 0;
4989}
4990
4991static int be_resume(struct pci_dev *pdev)
4992{
4993 int status = 0;
4994 struct be_adapter *adapter = pci_get_drvdata(pdev);
4995 struct net_device *netdev = adapter->netdev;
4996
4997 netif_device_detach(netdev);
4998
4999 status = pci_enable_device(pdev);
5000 if (status)
5001 return status;
5002
1ca01512 5003 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5004 pci_restore_state(pdev);
5005
dd5746bf
SB
5006 status = be_fw_wait_ready(adapter);
5007 if (status)
5008 return status;
5009
d4360d6f 5010 be_intr_set(adapter, true);
2243e2e9
SP
5011 /* tell fw we're ready to fire cmds */
5012 status = be_cmd_fw_init(adapter);
5013 if (status)
5014 return status;
5015
9b0365f1 5016 be_setup(adapter);
6b7c5b94
SP
5017 if (netif_running(netdev)) {
5018 rtnl_lock();
5019 be_open(netdev);
5020 rtnl_unlock();
5021 }
f67ef7ba
PR
5022
5023 schedule_delayed_work(&adapter->func_recovery_work,
5024 msecs_to_jiffies(1000));
6b7c5b94 5025 netif_device_attach(netdev);
71d8d1b5 5026
76a9e08e 5027 if (adapter->wol_en)
71d8d1b5 5028 be_setup_wol(adapter, false);
a4ca055f 5029
6b7c5b94
SP
5030 return 0;
5031}
5032
82456b03
SP
5033/*
5034 * An FLR will stop BE from DMAing any data.
5035 */
5036static void be_shutdown(struct pci_dev *pdev)
5037{
5038 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5039
2d5d4154
AK
5040 if (!adapter)
5041 return;
82456b03 5042
d114f99a 5043 be_roce_dev_shutdown(adapter);
0f4a6828 5044 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5045 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5046
2d5d4154 5047 netif_device_detach(adapter->netdev);
82456b03 5048
57841869
AK
5049 be_cmd_reset_function(adapter);
5050
82456b03 5051 pci_disable_device(pdev);
82456b03
SP
5052}
5053
cf588477 5054static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5055 pci_channel_state_t state)
cf588477
SP
5056{
5057 struct be_adapter *adapter = pci_get_drvdata(pdev);
5058 struct net_device *netdev = adapter->netdev;
5059
5060 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5061
01e5b2c4
SK
5062 if (!adapter->eeh_error) {
5063 adapter->eeh_error = true;
cf588477 5064
01e5b2c4 5065 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5066
cf588477 5067 rtnl_lock();
01e5b2c4
SK
5068 netif_device_detach(netdev);
5069 if (netif_running(netdev))
5070 be_close(netdev);
cf588477 5071 rtnl_unlock();
01e5b2c4
SK
5072
5073 be_clear(adapter);
cf588477 5074 }
cf588477
SP
5075
5076 if (state == pci_channel_io_perm_failure)
5077 return PCI_ERS_RESULT_DISCONNECT;
5078
5079 pci_disable_device(pdev);
5080
eeb7fc7b
SK
5081 /* The error could cause the FW to trigger a flash debug dump.
5082 * Resetting the card while flash dump is in progress
c8a54163
PR
5083 * can cause it not to recover; wait for it to finish.
5084 * Wait only for first function as it is needed only once per
5085 * adapter.
eeb7fc7b 5086 */
c8a54163
PR
5087 if (pdev->devfn == 0)
5088 ssleep(30);
5089
cf588477
SP
5090 return PCI_ERS_RESULT_NEED_RESET;
5091}
5092
5093static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5094{
5095 struct be_adapter *adapter = pci_get_drvdata(pdev);
5096 int status;
5097
5098 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5099
5100 status = pci_enable_device(pdev);
5101 if (status)
5102 return PCI_ERS_RESULT_DISCONNECT;
5103
5104 pci_set_master(pdev);
1ca01512 5105 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5106 pci_restore_state(pdev);
5107
5108 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5109 dev_info(&adapter->pdev->dev,
5110 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5111 status = be_fw_wait_ready(adapter);
cf588477
SP
5112 if (status)
5113 return PCI_ERS_RESULT_DISCONNECT;
5114
d6b6d987 5115 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5116 be_clear_all_error(adapter);
cf588477
SP
5117 return PCI_ERS_RESULT_RECOVERED;
5118}
5119
5120static void be_eeh_resume(struct pci_dev *pdev)
5121{
5122 int status = 0;
5123 struct be_adapter *adapter = pci_get_drvdata(pdev);
5124 struct net_device *netdev = adapter->netdev;
5125
5126 dev_info(&adapter->pdev->dev, "EEH resume\n");
5127
5128 pci_save_state(pdev);
5129
2d177be8 5130 status = be_cmd_reset_function(adapter);
cf588477
SP
5131 if (status)
5132 goto err;
5133
03a58baa
KA
5134 /* On some BE3 FW versions, after a HW reset,
5135 * interrupts will remain disabled for each function.
5136 * So, explicitly enable interrupts
5137 */
5138 be_intr_set(adapter, true);
5139
2d177be8
KA
5140 /* tell fw we're ready to fire cmds */
5141 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5142 if (status)
5143 goto err;
5144
cf588477
SP
5145 status = be_setup(adapter);
5146 if (status)
5147 goto err;
5148
5149 if (netif_running(netdev)) {
5150 status = be_open(netdev);
5151 if (status)
5152 goto err;
5153 }
f67ef7ba
PR
5154
5155 schedule_delayed_work(&adapter->func_recovery_work,
5156 msecs_to_jiffies(1000));
cf588477
SP
5157 netif_device_attach(netdev);
5158 return;
5159err:
5160 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5161}
5162
3646f0e5 5163static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5164 .error_detected = be_eeh_err_detected,
5165 .slot_reset = be_eeh_reset,
5166 .resume = be_eeh_resume,
5167};
5168
6b7c5b94
SP
5169static struct pci_driver be_driver = {
5170 .name = DRV_NAME,
5171 .id_table = be_dev_ids,
5172 .probe = be_probe,
5173 .remove = be_remove,
5174 .suspend = be_suspend,
cf588477 5175 .resume = be_resume,
82456b03 5176 .shutdown = be_shutdown,
cf588477 5177 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5178};
5179
5180static int __init be_init_module(void)
5181{
8e95a202
JP
5182 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5183 rx_frag_size != 2048) {
6b7c5b94
SP
5184 printk(KERN_WARNING DRV_NAME
5185 " : Module param rx_frag_size must be 2048/4096/8192."
5186 " Using 2048\n");
5187 rx_frag_size = 2048;
5188 }
6b7c5b94
SP
5189
5190 return pci_register_driver(&be_driver);
5191}
5192module_init(be_init_module);
5193
5194static void __exit be_exit_module(void)
5195{
5196 pci_unregister_driver(&be_driver);
5197}
5198module_exit(be_exit_module);
This page took 1.127906 seconds and 5 git commands to generate.