be2net: read VF's capabilities from GET_PROFILE_CONFIG cmd
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
6b7c5b94 42static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
42c8b11e 114 "NETC",
7c185276
AK
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
49e4b847 743 if (skb_is_gso(skb)) {
6b7c5b94
SP
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94 749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142
SP
750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
6b7c5b94 757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
c9c47142 758 else if (proto == IPPROTO_UDP)
6b7c5b94
SP
759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
4c5102f9 762 if (vlan_tx_tag_present(skb)) {
6b7c5b94 763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
766 }
767
bc0c3405
AK
768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
2b7bcebf 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 776 bool unmap_single)
7101e111
SP
777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 783 if (wrb->frag_len) {
7101e111 784 if (unmap_single)
2b7bcebf
IV
785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
7101e111 787 else
2b7bcebf 788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
789 }
790}
6b7c5b94 791
3c8def97 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
6b7c5b94 795{
7101e111
SP
796 dma_addr_t busaddr;
797 int i, copied = 0;
2b7bcebf 798 struct device *dev = &adapter->pdev->dev;
6b7c5b94 799 struct sk_buff *first_skb = skb;
6b7c5b94
SP
800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
7101e111
SP
802 bool map_single = false;
803 u16 map_head;
6b7c5b94 804
6b7c5b94
SP
805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
7101e111 807 map_head = txq->head;
6b7c5b94 808
ebc8d2ab 809 if (skb->len > skb->data_len) {
e743d313 810 int len = skb_headlen(skb);
2b7bcebf
IV
811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
7101e111
SP
813 goto dma_err;
814 map_single = true;
ebc8d2ab
DM
815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
6b7c5b94 821
ebc8d2ab 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 824 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 825 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 826 if (dma_mapping_error(dev, busaddr))
7101e111 827 goto dma_err;
ebc8d2ab 828 wrb = queue_head_node(txq);
9e903e08 829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
9e903e08 832 copied += skb_frag_size(frag);
6b7c5b94
SP
833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
bc0c3405 842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
7101e111
SP
846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
2b7bcebf 850 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
6b7c5b94
SP
856}
857
93040ae5 858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
93040ae5
SK
861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
efee8e87 868 if (vlan_tx_tag_present(skb))
93040ae5 869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
bc0c3405
AK
880
881 if (vlan_tag) {
58717686 882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
58717686 891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
93040ae5
SK
898 return skb;
899}
900
bc0c3405
AK
901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
748b539a 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 929{
ee9c799c 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
931}
932
ec495fac
VV
933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
6b7c5b94 936{
d2cb6ce7 937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
93040ae5 940
1297f9db
AK
941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 943 * For padded packets, Lancer computes incorrect checksum.
1ded132d 944 */
ee9c799c
SP
945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 949 is_ipv4_pkt(skb)) {
93040ae5
SK
950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
1ded132d 953
d2cb6ce7 954 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 955 * tagging in pvid-tagging mode
d2cb6ce7 956 */
f93f160b 957 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 959 *skip_hw_vlan = true;
d2cb6ce7 960
93040ae5
SK
961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 968 if (unlikely(!skb))
c9128951 969 goto err;
bc0c3405
AK
970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 990 if (unlikely(!skb))
c9128951 991 goto err;
1ded132d
AK
992 }
993
ee9c799c
SP
994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
c9128951 997err:
ee9c799c
SP
998 return NULL;
999}
1000
ec495fac
VV
1001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
ee9c799c
SP
1024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1037 return NETDEV_TX_OK;
bc617526 1038 }
ee9c799c 1039
fe6d2a38 1040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1041
bc0c3405
AK
1042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
c190e3c8 1044 if (copied) {
cd8f76c0
ED
1045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
c190e3c8 1047 /* record the sent skb in the sent_skb table */
3c8def97
SP
1048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1050
1051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
7101e111 1055 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
3c8def97 1058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1059 stopped = true;
1060 }
6b7c5b94 1061
94d73aaa 1062 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1063
cd8f76c0 1064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1065 } else {
1066 txq->head = start;
bc617526 1067 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1068 dev_kfree_skb_any(skb);
6b7c5b94 1069 }
6b7c5b94
SP
1070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
748b539a 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94 1078 dev_info(&adapter->pdev->dev,
748b539a
SP
1079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
748b539a 1085 netdev->mtu, new_mtu);
6b7c5b94
SP
1086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
82903e4b
AK
1091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1093 */
10329df8 1094static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1095{
10329df8 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1097 u16 num = 0, i = 0;
82903e4b 1098 int status = 0;
1da87b7f 1099
c0e64ef4
SP
1100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
92bf14ab 1104 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
0fc16ebf 1110
4d567d97 1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1112 if (status) {
d9d604f8 1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8
AK
1116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1128 }
1129 }
6b7c5b94 1130 }
1da87b7f 1131
b31c50a7 1132 return status;
0fc16ebf
PR
1133
1134set_vlan_promisc:
a6b74e01
SK
1135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
d9d604f8
AK
1137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1145 return status;
6b7c5b94
SP
1146}
1147
80d5c368 1148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1151 int status = 0;
6b7c5b94 1152
a85e9986
PR
1153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1155 return status;
1156
f6cbd364 1157 if (test_bit(vid, adapter->vids))
48291c22 1158 return status;
a85e9986 1159
f6cbd364 1160 set_bit(vid, adapter->vids);
a6b74e01 1161 adapter->vlans_added++;
8e586137 1162
a6b74e01
SK
1163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
f6cbd364 1166 clear_bit(vid, adapter->vids);
a6b74e01 1167 }
48291c22 1168
80817cbf 1169 return status;
6b7c5b94
SP
1170}
1171
80d5c368 1172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1175 int status = 0;
6b7c5b94 1176
a85e9986
PR
1177 /* Packets with VID 0 are always received by Lancer by default */
1178 if (lancer_chip(adapter) && vid == 0)
1179 goto ret;
1180
f6cbd364 1181 clear_bit(vid, adapter->vids);
a6b74e01 1182 status = be_vid_config(adapter);
80817cbf
AK
1183 if (!status)
1184 adapter->vlans_added--;
1185 else
f6cbd364 1186 set_bit(vid, adapter->vids);
80817cbf
AK
1187ret:
1188 return status;
6b7c5b94
SP
1189}
1190
7ad09458
S
1191static void be_clear_promisc(struct be_adapter *adapter)
1192{
1193 adapter->promiscuous = false;
a0794885 1194 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1195
1196 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1197}
1198
a54769f5 1199static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1200{
1201 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1202 int status;
6b7c5b94 1203
24307eef 1204 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1205 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1206 adapter->promiscuous = true;
1207 goto done;
6b7c5b94
SP
1208 }
1209
25985edc 1210 /* BE was previously in promiscuous mode; disable it */
24307eef 1211 if (adapter->promiscuous) {
7ad09458 1212 be_clear_promisc(adapter);
c0e64ef4 1213 if (adapter->vlans_added)
10329df8 1214 be_vid_config(adapter);
6b7c5b94
SP
1215 }
1216
e7b909a6 1217 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1218 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1219 netdev_mc_count(netdev) > be_max_mc(adapter))
1220 goto set_mcast_promisc;
6b7c5b94 1221
fbc13f01
AK
1222 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1223 struct netdev_hw_addr *ha;
1224 int i = 1; /* First slot is claimed by the Primary MAC */
1225
1226 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1227 be_cmd_pmac_del(adapter, adapter->if_handle,
1228 adapter->pmac_id[i], 0);
1229 }
1230
92bf14ab 1231 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1232 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1233 adapter->promiscuous = true;
1234 goto done;
1235 }
1236
1237 netdev_for_each_uc_addr(ha, adapter->netdev) {
1238 adapter->uc_macs++; /* First slot is for Primary MAC */
1239 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1240 adapter->if_handle,
1241 &adapter->pmac_id[adapter->uc_macs], 0);
1242 }
1243 }
1244
0fc16ebf 1245 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1246 if (!status) {
1247 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1248 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1249 goto done;
0fc16ebf 1250 }
a0794885
KA
1251
1252set_mcast_promisc:
1253 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1254 return;
1255
1256 /* Set to MCAST promisc mode if setting MULTICAST address fails
1257 * or if num configured exceeds what we support
1258 */
1259 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 if (!status)
1261 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1262done:
1263 return;
6b7c5b94
SP
1264}
1265
ba343c77
SB
1266static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1267{
1268 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1269 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1270 int status;
1271
11ac75ed 1272 if (!sriov_enabled(adapter))
ba343c77
SB
1273 return -EPERM;
1274
11ac75ed 1275 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1276 return -EINVAL;
1277
3175d8c2
SP
1278 if (BEx_chip(adapter)) {
1279 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1280 vf + 1);
ba343c77 1281
11ac75ed
SP
1282 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1283 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1284 } else {
1285 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1286 vf + 1);
590c391d
PR
1287 }
1288
64600ea5 1289 if (status)
ba343c77 1290 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748b539a 1291 mac, vf);
64600ea5 1292 else
11ac75ed 1293 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1294
ba343c77
SB
1295 return status;
1296}
1297
64600ea5 1298static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1299 struct ifla_vf_info *vi)
64600ea5
AK
1300{
1301 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1302 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1303
11ac75ed 1304 if (!sriov_enabled(adapter))
64600ea5
AK
1305 return -EPERM;
1306
11ac75ed 1307 if (vf >= adapter->num_vfs)
64600ea5
AK
1308 return -EINVAL;
1309
1310 vi->vf = vf;
ed616689
SC
1311 vi->max_tx_rate = vf_cfg->tx_rate;
1312 vi->min_tx_rate = 0;
a60b3a13
AK
1313 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1314 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1315 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1316 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1317
1318 return 0;
1319}
1320
748b539a 1321static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1322{
1323 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1324 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1325 int status = 0;
1326
11ac75ed 1327 if (!sriov_enabled(adapter))
1da87b7f
AK
1328 return -EPERM;
1329
b9fc0e53 1330 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1331 return -EINVAL;
1332
b9fc0e53
AK
1333 if (vlan || qos) {
1334 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1335 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1336 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1337 vf_cfg->if_handle, 0);
1da87b7f 1338 } else {
f1f3ee1b 1339 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1340 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1341 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1342 }
1343
c502224e
SK
1344 if (!status)
1345 vf_cfg->vlan_tag = vlan;
1346 else
1da87b7f 1347 dev_info(&adapter->pdev->dev,
c502224e 1348 "VLAN %d config on VF %d failed\n", vlan, vf);
1da87b7f
AK
1349 return status;
1350}
1351
ed616689
SC
1352static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1353 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1354{
1355 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1356 struct device *dev = &adapter->pdev->dev;
1357 int percent_rate, status = 0;
1358 u16 link_speed = 0;
1359 u8 link_status;
e1d18735 1360
11ac75ed 1361 if (!sriov_enabled(adapter))
e1d18735
AK
1362 return -EPERM;
1363
94f434c2 1364 if (vf >= adapter->num_vfs)
e1d18735
AK
1365 return -EINVAL;
1366
ed616689
SC
1367 if (min_tx_rate)
1368 return -EINVAL;
1369
0f77ba73
RN
1370 if (!max_tx_rate)
1371 goto config_qos;
1372
1373 status = be_cmd_link_status_query(adapter, &link_speed,
1374 &link_status, 0);
1375 if (status)
1376 goto err;
1377
1378 if (!link_status) {
1379 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1380 status = -EPERM;
1381 goto err;
1382 }
1383
1384 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1385 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1386 link_speed);
1387 status = -EINVAL;
1388 goto err;
1389 }
1390
1391 /* On Skyhawk the QOS setting must be done only as a % value */
1392 percent_rate = link_speed / 100;
1393 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1394 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1395 percent_rate);
1396 status = -EINVAL;
1397 goto err;
94f434c2 1398 }
e1d18735 1399
0f77ba73
RN
1400config_qos:
1401 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1402 if (status)
0f77ba73
RN
1403 goto err;
1404
1405 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1406 return 0;
1407
1408err:
1409 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1410 max_tx_rate, vf);
e1d18735
AK
1411 return status;
1412}
bdce2ad7
SR
1413static int be_set_vf_link_state(struct net_device *netdev, int vf,
1414 int link_state)
1415{
1416 struct be_adapter *adapter = netdev_priv(netdev);
1417 int status;
1418
1419 if (!sriov_enabled(adapter))
1420 return -EPERM;
1421
1422 if (vf >= adapter->num_vfs)
1423 return -EINVAL;
1424
1425 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1426 if (!status)
1427 adapter->vf_cfg[vf].plink_tracking = link_state;
1428
1429 return status;
1430}
e1d18735 1431
2632bafd
SP
1432static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1433 ulong now)
6b7c5b94 1434{
2632bafd
SP
1435 aic->rx_pkts_prev = rx_pkts;
1436 aic->tx_reqs_prev = tx_pkts;
1437 aic->jiffies = now;
1438}
ac124ff9 1439
2632bafd
SP
1440static void be_eqd_update(struct be_adapter *adapter)
1441{
1442 struct be_set_eqd set_eqd[MAX_EVT_QS];
1443 int eqd, i, num = 0, start;
1444 struct be_aic_obj *aic;
1445 struct be_eq_obj *eqo;
1446 struct be_rx_obj *rxo;
1447 struct be_tx_obj *txo;
1448 u64 rx_pkts, tx_pkts;
1449 ulong now;
1450 u32 pps, delta;
10ef9ab4 1451
2632bafd
SP
1452 for_all_evt_queues(adapter, eqo, i) {
1453 aic = &adapter->aic_obj[eqo->idx];
1454 if (!aic->enable) {
1455 if (aic->jiffies)
1456 aic->jiffies = 0;
1457 eqd = aic->et_eqd;
1458 goto modify_eqd;
1459 }
6b7c5b94 1460
2632bafd
SP
1461 rxo = &adapter->rx_obj[eqo->idx];
1462 do {
57a7744e 1463 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1464 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1465 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1466
2632bafd
SP
1467 txo = &adapter->tx_obj[eqo->idx];
1468 do {
57a7744e 1469 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1470 tx_pkts = txo->stats.tx_reqs;
57a7744e 1471 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1472
6b7c5b94 1473
2632bafd
SP
1474 /* Skip, if wrapped around or first calculation */
1475 now = jiffies;
1476 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1477 rx_pkts < aic->rx_pkts_prev ||
1478 tx_pkts < aic->tx_reqs_prev) {
1479 be_aic_update(aic, rx_pkts, tx_pkts, now);
1480 continue;
1481 }
1482
1483 delta = jiffies_to_msecs(now - aic->jiffies);
1484 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1485 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1486 eqd = (pps / 15000) << 2;
10ef9ab4 1487
2632bafd
SP
1488 if (eqd < 8)
1489 eqd = 0;
1490 eqd = min_t(u32, eqd, aic->max_eqd);
1491 eqd = max_t(u32, eqd, aic->min_eqd);
1492
1493 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1494modify_eqd:
2632bafd
SP
1495 if (eqd != aic->prev_eqd) {
1496 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1497 set_eqd[num].eq_id = eqo->q.id;
1498 aic->prev_eqd = eqd;
1499 num++;
1500 }
ac124ff9 1501 }
2632bafd
SP
1502
1503 if (num)
1504 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1505}
1506
3abcdeda 1507static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1508 struct be_rx_compl_info *rxcp)
4097f663 1509{
ac124ff9 1510 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1511
ab1594e9 1512 u64_stats_update_begin(&stats->sync);
3abcdeda 1513 stats->rx_compl++;
2e588f84 1514 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1515 stats->rx_pkts++;
2e588f84 1516 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1517 stats->rx_mcast_pkts++;
2e588f84 1518 if (rxcp->err)
ac124ff9 1519 stats->rx_compl_err++;
ab1594e9 1520 u64_stats_update_end(&stats->sync);
4097f663
SP
1521}
1522
2e588f84 1523static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1524{
19fad86f 1525 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1526 * Also ignore ipcksm for ipv6 pkts
1527 */
2e588f84 1528 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1529 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1530}
1531
0b0ef1d0 1532static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1533{
10ef9ab4 1534 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1535 struct be_rx_page_info *rx_page_info;
3abcdeda 1536 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1537 u16 frag_idx = rxq->tail;
6b7c5b94 1538
3abcdeda 1539 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1540 BUG_ON(!rx_page_info->page);
1541
e50287be 1542 if (rx_page_info->last_frag) {
2b7bcebf
IV
1543 dma_unmap_page(&adapter->pdev->dev,
1544 dma_unmap_addr(rx_page_info, bus),
1545 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1546 rx_page_info->last_frag = false;
1547 } else {
1548 dma_sync_single_for_cpu(&adapter->pdev->dev,
1549 dma_unmap_addr(rx_page_info, bus),
1550 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1551 }
6b7c5b94 1552
0b0ef1d0 1553 queue_tail_inc(rxq);
6b7c5b94
SP
1554 atomic_dec(&rxq->used);
1555 return rx_page_info;
1556}
1557
1558/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1559static void be_rx_compl_discard(struct be_rx_obj *rxo,
1560 struct be_rx_compl_info *rxcp)
6b7c5b94 1561{
6b7c5b94 1562 struct be_rx_page_info *page_info;
2e588f84 1563 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1564
e80d9da6 1565 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1566 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1567 put_page(page_info->page);
1568 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1569 }
1570}
1571
1572/*
1573 * skb_fill_rx_data forms a complete skb for an ether frame
1574 * indicated by rxcp.
1575 */
10ef9ab4
SP
1576static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1577 struct be_rx_compl_info *rxcp)
6b7c5b94 1578{
6b7c5b94 1579 struct be_rx_page_info *page_info;
2e588f84
SP
1580 u16 i, j;
1581 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1582 u8 *start;
6b7c5b94 1583
0b0ef1d0 1584 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1585 start = page_address(page_info->page) + page_info->page_offset;
1586 prefetch(start);
1587
1588 /* Copy data in the first descriptor of this completion */
2e588f84 1589 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1590
6b7c5b94
SP
1591 skb->len = curr_frag_len;
1592 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1593 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1594 /* Complete packet has now been moved to data */
1595 put_page(page_info->page);
1596 skb->data_len = 0;
1597 skb->tail += curr_frag_len;
1598 } else {
ac1ae5f3
ED
1599 hdr_len = ETH_HLEN;
1600 memcpy(skb->data, start, hdr_len);
6b7c5b94 1601 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1602 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1603 skb_shinfo(skb)->frags[0].page_offset =
1604 page_info->page_offset + hdr_len;
748b539a
SP
1605 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1606 curr_frag_len - hdr_len);
6b7c5b94 1607 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1608 skb->truesize += rx_frag_size;
6b7c5b94
SP
1609 skb->tail += hdr_len;
1610 }
205859a2 1611 page_info->page = NULL;
6b7c5b94 1612
2e588f84
SP
1613 if (rxcp->pkt_size <= rx_frag_size) {
1614 BUG_ON(rxcp->num_rcvd != 1);
1615 return;
6b7c5b94
SP
1616 }
1617
1618 /* More frags present for this completion */
2e588f84
SP
1619 remaining = rxcp->pkt_size - curr_frag_len;
1620 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1621 page_info = get_rx_page_info(rxo);
2e588f84 1622 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1623
bd46cb6c
AK
1624 /* Coalesce all frags from the same physical page in one slot */
1625 if (page_info->page_offset == 0) {
1626 /* Fresh page */
1627 j++;
b061b39e 1628 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1629 skb_shinfo(skb)->frags[j].page_offset =
1630 page_info->page_offset;
9e903e08 1631 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1632 skb_shinfo(skb)->nr_frags++;
1633 } else {
1634 put_page(page_info->page);
1635 }
1636
9e903e08 1637 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1638 skb->len += curr_frag_len;
1639 skb->data_len += curr_frag_len;
bdb28a97 1640 skb->truesize += rx_frag_size;
2e588f84 1641 remaining -= curr_frag_len;
205859a2 1642 page_info->page = NULL;
6b7c5b94 1643 }
bd46cb6c 1644 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1645}
1646
5be93b9a 1647/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1648static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1649 struct be_rx_compl_info *rxcp)
6b7c5b94 1650{
10ef9ab4 1651 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1652 struct net_device *netdev = adapter->netdev;
6b7c5b94 1653 struct sk_buff *skb;
89420424 1654
bb349bb4 1655 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1656 if (unlikely(!skb)) {
ac124ff9 1657 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1658 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1659 return;
1660 }
1661
10ef9ab4 1662 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1663
6332c8d3 1664 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1666 else
1667 skb_checksum_none_assert(skb);
6b7c5b94 1668
6332c8d3 1669 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1670 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1671 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1672 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1673
1674 skb->encapsulation = rxcp->tunneled;
6384a4d0 1675 skb_mark_napi_id(skb, napi);
6b7c5b94 1676
343e43c0 1677 if (rxcp->vlanf)
86a9bad3 1678 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1679
1680 netif_receive_skb(skb);
6b7c5b94
SP
1681}
1682
5be93b9a 1683/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1684static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1685 struct napi_struct *napi,
1686 struct be_rx_compl_info *rxcp)
6b7c5b94 1687{
10ef9ab4 1688 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1689 struct be_rx_page_info *page_info;
5be93b9a 1690 struct sk_buff *skb = NULL;
2e588f84
SP
1691 u16 remaining, curr_frag_len;
1692 u16 i, j;
3968fa1e 1693
10ef9ab4 1694 skb = napi_get_frags(napi);
5be93b9a 1695 if (!skb) {
10ef9ab4 1696 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1697 return;
1698 }
1699
2e588f84
SP
1700 remaining = rxcp->pkt_size;
1701 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1702 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1703
1704 curr_frag_len = min(remaining, rx_frag_size);
1705
bd46cb6c
AK
1706 /* Coalesce all frags from the same physical page in one slot */
1707 if (i == 0 || page_info->page_offset == 0) {
1708 /* First frag or Fresh page */
1709 j++;
b061b39e 1710 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1711 skb_shinfo(skb)->frags[j].page_offset =
1712 page_info->page_offset;
9e903e08 1713 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1714 } else {
1715 put_page(page_info->page);
1716 }
9e903e08 1717 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1718 skb->truesize += rx_frag_size;
bd46cb6c 1719 remaining -= curr_frag_len;
6b7c5b94
SP
1720 memset(page_info, 0, sizeof(*page_info));
1721 }
bd46cb6c 1722 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1723
5be93b9a 1724 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1725 skb->len = rxcp->pkt_size;
1726 skb->data_len = rxcp->pkt_size;
5be93b9a 1727 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1728 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1729 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1730 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1731
1732 skb->encapsulation = rxcp->tunneled;
6384a4d0 1733 skb_mark_napi_id(skb, napi);
5be93b9a 1734
343e43c0 1735 if (rxcp->vlanf)
86a9bad3 1736 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1737
10ef9ab4 1738 napi_gro_frags(napi);
2e588f84
SP
1739}
1740
10ef9ab4
SP
1741static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1742 struct be_rx_compl_info *rxcp)
2e588f84
SP
1743{
1744 rxcp->pkt_size =
1745 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1746 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1747 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1748 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1749 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1750 rxcp->ip_csum =
1751 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1752 rxcp->l4_csum =
1753 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1754 rxcp->ipv6 =
1755 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1756 rxcp->num_rcvd =
1757 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1758 rxcp->pkt_type =
1759 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1760 rxcp->rss_hash =
c297977e 1761 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1762 if (rxcp->vlanf) {
f93f160b 1763 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f 1764 compl);
748b539a
SP
1765 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1766 vlan_tag, compl);
15d72184 1767 }
12004ae9 1768 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
c9c47142
SP
1769 rxcp->tunneled =
1770 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
2e588f84
SP
1771}
1772
10ef9ab4
SP
1773static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1774 struct be_rx_compl_info *rxcp)
2e588f84
SP
1775{
1776 rxcp->pkt_size =
1777 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1778 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1779 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1780 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1781 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1782 rxcp->ip_csum =
1783 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1784 rxcp->l4_csum =
1785 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1786 rxcp->ipv6 =
1787 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1788 rxcp->num_rcvd =
1789 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1790 rxcp->pkt_type =
1791 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1792 rxcp->rss_hash =
c297977e 1793 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1794 if (rxcp->vlanf) {
f93f160b 1795 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f 1796 compl);
748b539a
SP
1797 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1798 vlan_tag, compl);
15d72184 1799 }
12004ae9 1800 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1801 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1802 ip_frag, compl);
2e588f84
SP
1803}
1804
1805static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1806{
1807 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1808 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1809 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1810
2e588f84
SP
1811 /* For checking the valid bit it is Ok to use either definition as the
1812 * valid bit is at the same position in both v0 and v1 Rx compl */
1813 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1814 return NULL;
6b7c5b94 1815
2e588f84
SP
1816 rmb();
1817 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1818
2e588f84 1819 if (adapter->be3_native)
10ef9ab4 1820 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1821 else
10ef9ab4 1822 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1823
e38b1706
SK
1824 if (rxcp->ip_frag)
1825 rxcp->l4_csum = 0;
1826
15d72184 1827 if (rxcp->vlanf) {
f93f160b
VV
1828 /* In QNQ modes, if qnq bit is not set, then the packet was
1829 * tagged only with the transparent outer vlan-tag and must
1830 * not be treated as a vlan packet by host
1831 */
1832 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1833 rxcp->vlanf = 0;
6b7c5b94 1834
15d72184 1835 if (!lancer_chip(adapter))
3c709f8f 1836 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1837
939cf306 1838 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1839 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1840 rxcp->vlanf = 0;
1841 }
2e588f84
SP
1842
1843 /* As the compl has been parsed, reset it; we wont touch it again */
1844 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1845
3abcdeda 1846 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1847 return rxcp;
1848}
1849
1829b086 1850static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1851{
6b7c5b94 1852 u32 order = get_order(size);
1829b086 1853
6b7c5b94 1854 if (order > 0)
1829b086
ED
1855 gfp |= __GFP_COMP;
1856 return alloc_pages(gfp, order);
6b7c5b94
SP
1857}
1858
1859/*
1860 * Allocate a page, split it to fragments of size rx_frag_size and post as
1861 * receive buffers to BE
1862 */
1829b086 1863static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1864{
3abcdeda 1865 struct be_adapter *adapter = rxo->adapter;
26d92f92 1866 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1867 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1868 struct page *pagep = NULL;
ba42fad0 1869 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1870 struct be_eth_rx_d *rxd;
1871 u64 page_dmaaddr = 0, frag_dmaaddr;
1872 u32 posted, page_offset = 0;
1873
3abcdeda 1874 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1875 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1876 if (!pagep) {
1829b086 1877 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1878 if (unlikely(!pagep)) {
ac124ff9 1879 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1880 break;
1881 }
ba42fad0
IV
1882 page_dmaaddr = dma_map_page(dev, pagep, 0,
1883 adapter->big_page_size,
2b7bcebf 1884 DMA_FROM_DEVICE);
ba42fad0
IV
1885 if (dma_mapping_error(dev, page_dmaaddr)) {
1886 put_page(pagep);
1887 pagep = NULL;
1888 rx_stats(rxo)->rx_post_fail++;
1889 break;
1890 }
e50287be 1891 page_offset = 0;
6b7c5b94
SP
1892 } else {
1893 get_page(pagep);
e50287be 1894 page_offset += rx_frag_size;
6b7c5b94 1895 }
e50287be 1896 page_info->page_offset = page_offset;
6b7c5b94 1897 page_info->page = pagep;
6b7c5b94
SP
1898
1899 rxd = queue_head_node(rxq);
e50287be 1900 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1901 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1902 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1903
1904 /* Any space left in the current big page for another frag? */
1905 if ((page_offset + rx_frag_size + rx_frag_size) >
1906 adapter->big_page_size) {
1907 pagep = NULL;
e50287be
SP
1908 page_info->last_frag = true;
1909 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1910 } else {
1911 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1912 }
26d92f92
SP
1913
1914 prev_page_info = page_info;
1915 queue_head_inc(rxq);
10ef9ab4 1916 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1917 }
e50287be
SP
1918
1919 /* Mark the last frag of a page when we break out of the above loop
1920 * with no more slots available in the RXQ
1921 */
1922 if (pagep) {
1923 prev_page_info->last_frag = true;
1924 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1925 }
6b7c5b94
SP
1926
1927 if (posted) {
6b7c5b94 1928 atomic_add(posted, &rxq->used);
6384a4d0
SP
1929 if (rxo->rx_post_starved)
1930 rxo->rx_post_starved = false;
8788fdc2 1931 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1932 } else if (atomic_read(&rxq->used) == 0) {
1933 /* Let be_worker replenish when memory is available */
3abcdeda 1934 rxo->rx_post_starved = true;
6b7c5b94 1935 }
6b7c5b94
SP
1936}
1937
5fb379ee 1938static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1939{
6b7c5b94
SP
1940 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1941
1942 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1943 return NULL;
1944
f3eb62d2 1945 rmb();
6b7c5b94
SP
1946 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1947
1948 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1949
1950 queue_tail_inc(tx_cq);
1951 return txcp;
1952}
1953
3c8def97 1954static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1955 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1956{
3c8def97 1957 struct be_queue_info *txq = &txo->q;
a73b796e 1958 struct be_eth_wrb *wrb;
3c8def97 1959 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1960 struct sk_buff *sent_skb;
ec43b1a6
SP
1961 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1962 bool unmap_skb_hdr = true;
6b7c5b94 1963
ec43b1a6 1964 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1965 BUG_ON(!sent_skb);
ec43b1a6
SP
1966 sent_skbs[txq->tail] = NULL;
1967
1968 /* skip header wrb */
a73b796e 1969 queue_tail_inc(txq);
6b7c5b94 1970
ec43b1a6 1971 do {
6b7c5b94 1972 cur_index = txq->tail;
a73b796e 1973 wrb = queue_tail_node(txq);
2b7bcebf
IV
1974 unmap_tx_frag(&adapter->pdev->dev, wrb,
1975 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1976 unmap_skb_hdr = false;
1977
6b7c5b94
SP
1978 num_wrbs++;
1979 queue_tail_inc(txq);
ec43b1a6 1980 } while (cur_index != last_index);
6b7c5b94 1981
d8ec2c02 1982 dev_kfree_skb_any(sent_skb);
4d586b82 1983 return num_wrbs;
6b7c5b94
SP
1984}
1985
10ef9ab4
SP
1986/* Return the number of events in the event queue */
1987static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1988{
10ef9ab4
SP
1989 struct be_eq_entry *eqe;
1990 int num = 0;
859b1e4e 1991
10ef9ab4
SP
1992 do {
1993 eqe = queue_tail_node(&eqo->q);
1994 if (eqe->evt == 0)
1995 break;
859b1e4e 1996
10ef9ab4
SP
1997 rmb();
1998 eqe->evt = 0;
1999 num++;
2000 queue_tail_inc(&eqo->q);
2001 } while (true);
2002
2003 return num;
859b1e4e
SP
2004}
2005
10ef9ab4
SP
2006/* Leaves the EQ is disarmed state */
2007static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2008{
10ef9ab4 2009 int num = events_get(eqo);
859b1e4e 2010
10ef9ab4 2011 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2012}
2013
10ef9ab4 2014static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2015{
2016 struct be_rx_page_info *page_info;
3abcdeda
SP
2017 struct be_queue_info *rxq = &rxo->q;
2018 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2019 struct be_rx_compl_info *rxcp;
d23e946c
SP
2020 struct be_adapter *adapter = rxo->adapter;
2021 int flush_wait = 0;
6b7c5b94 2022
d23e946c
SP
2023 /* Consume pending rx completions.
2024 * Wait for the flush completion (identified by zero num_rcvd)
2025 * to arrive. Notify CQ even when there are no more CQ entries
2026 * for HW to flush partially coalesced CQ entries.
2027 * In Lancer, there is no need to wait for flush compl.
2028 */
2029 for (;;) {
2030 rxcp = be_rx_compl_get(rxo);
2031 if (rxcp == NULL) {
2032 if (lancer_chip(adapter))
2033 break;
2034
2035 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2036 dev_warn(&adapter->pdev->dev,
2037 "did not receive flush compl\n");
2038 break;
2039 }
2040 be_cq_notify(adapter, rx_cq->id, true, 0);
2041 mdelay(1);
2042 } else {
2043 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2044 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2045 if (rxcp->num_rcvd == 0)
2046 break;
2047 }
6b7c5b94
SP
2048 }
2049
d23e946c
SP
2050 /* After cleanup, leave the CQ in unarmed state */
2051 be_cq_notify(adapter, rx_cq->id, false, 0);
2052
2053 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2054 while (atomic_read(&rxq->used) > 0) {
2055 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2056 put_page(page_info->page);
2057 memset(page_info, 0, sizeof(*page_info));
2058 }
2059 BUG_ON(atomic_read(&rxq->used));
482c9e79 2060 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2061}
2062
0ae57bb3 2063static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2064{
0ae57bb3
SP
2065 struct be_tx_obj *txo;
2066 struct be_queue_info *txq;
a8e9179a 2067 struct be_eth_tx_compl *txcp;
4d586b82 2068 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2069 struct sk_buff *sent_skb;
2070 bool dummy_wrb;
0ae57bb3 2071 int i, pending_txqs;
a8e9179a 2072
1a3d0717 2073 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2074 do {
0ae57bb3
SP
2075 pending_txqs = adapter->num_tx_qs;
2076
2077 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2078 cmpl = 0;
2079 num_wrbs = 0;
0ae57bb3
SP
2080 txq = &txo->q;
2081 while ((txcp = be_tx_compl_get(&txo->cq))) {
2082 end_idx =
2083 AMAP_GET_BITS(struct amap_eth_tx_compl,
2084 wrb_index, txcp);
2085 num_wrbs += be_tx_compl_process(adapter, txo,
2086 end_idx);
2087 cmpl++;
2088 }
2089 if (cmpl) {
2090 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2091 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2092 timeo = 0;
0ae57bb3
SP
2093 }
2094 if (atomic_read(&txq->used) == 0)
2095 pending_txqs--;
a8e9179a
SP
2096 }
2097
1a3d0717 2098 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2099 break;
2100
2101 mdelay(1);
2102 } while (true);
2103
0ae57bb3
SP
2104 for_all_tx_queues(adapter, txo, i) {
2105 txq = &txo->q;
2106 if (atomic_read(&txq->used))
2107 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2108 atomic_read(&txq->used));
2109
2110 /* free posted tx for which compls will never arrive */
2111 while (atomic_read(&txq->used)) {
2112 sent_skb = txo->sent_skb_list[txq->tail];
2113 end_idx = txq->tail;
2114 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2115 &dummy_wrb);
2116 index_adv(&end_idx, num_wrbs - 1, txq->len);
2117 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2118 atomic_sub(num_wrbs, &txq->used);
2119 }
b03388d6 2120 }
6b7c5b94
SP
2121}
2122
10ef9ab4
SP
2123static void be_evt_queues_destroy(struct be_adapter *adapter)
2124{
2125 struct be_eq_obj *eqo;
2126 int i;
2127
2128 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2129 if (eqo->q.created) {
2130 be_eq_clean(eqo);
10ef9ab4 2131 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2132 napi_hash_del(&eqo->napi);
68d7bdcb 2133 netif_napi_del(&eqo->napi);
19d59aa7 2134 }
10ef9ab4
SP
2135 be_queue_free(adapter, &eqo->q);
2136 }
2137}
2138
2139static int be_evt_queues_create(struct be_adapter *adapter)
2140{
2141 struct be_queue_info *eq;
2142 struct be_eq_obj *eqo;
2632bafd 2143 struct be_aic_obj *aic;
10ef9ab4
SP
2144 int i, rc;
2145
92bf14ab
SP
2146 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2147 adapter->cfg_num_qs);
10ef9ab4
SP
2148
2149 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2150 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2151 BE_NAPI_WEIGHT);
6384a4d0 2152 napi_hash_add(&eqo->napi);
2632bafd 2153 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2154 eqo->adapter = adapter;
2155 eqo->tx_budget = BE_TX_BUDGET;
2156 eqo->idx = i;
2632bafd
SP
2157 aic->max_eqd = BE_MAX_EQD;
2158 aic->enable = true;
10ef9ab4
SP
2159
2160 eq = &eqo->q;
2161 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2162 sizeof(struct be_eq_entry));
10ef9ab4
SP
2163 if (rc)
2164 return rc;
2165
f2f781a7 2166 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2167 if (rc)
2168 return rc;
2169 }
1cfafab9 2170 return 0;
10ef9ab4
SP
2171}
2172
5fb379ee
SP
2173static void be_mcc_queues_destroy(struct be_adapter *adapter)
2174{
2175 struct be_queue_info *q;
5fb379ee 2176
8788fdc2 2177 q = &adapter->mcc_obj.q;
5fb379ee 2178 if (q->created)
8788fdc2 2179 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2180 be_queue_free(adapter, q);
2181
8788fdc2 2182 q = &adapter->mcc_obj.cq;
5fb379ee 2183 if (q->created)
8788fdc2 2184 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2185 be_queue_free(adapter, q);
2186}
2187
2188/* Must be called only after TX qs are created as MCC shares TX EQ */
2189static int be_mcc_queues_create(struct be_adapter *adapter)
2190{
2191 struct be_queue_info *q, *cq;
5fb379ee 2192
8788fdc2 2193 cq = &adapter->mcc_obj.cq;
5fb379ee 2194 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2195 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2196 goto err;
2197
10ef9ab4
SP
2198 /* Use the default EQ for MCC completions */
2199 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2200 goto mcc_cq_free;
2201
8788fdc2 2202 q = &adapter->mcc_obj.q;
5fb379ee
SP
2203 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2204 goto mcc_cq_destroy;
2205
8788fdc2 2206 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2207 goto mcc_q_free;
2208
2209 return 0;
2210
2211mcc_q_free:
2212 be_queue_free(adapter, q);
2213mcc_cq_destroy:
8788fdc2 2214 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2215mcc_cq_free:
2216 be_queue_free(adapter, cq);
2217err:
2218 return -1;
2219}
2220
6b7c5b94
SP
2221static void be_tx_queues_destroy(struct be_adapter *adapter)
2222{
2223 struct be_queue_info *q;
3c8def97
SP
2224 struct be_tx_obj *txo;
2225 u8 i;
6b7c5b94 2226
3c8def97
SP
2227 for_all_tx_queues(adapter, txo, i) {
2228 q = &txo->q;
2229 if (q->created)
2230 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2231 be_queue_free(adapter, q);
6b7c5b94 2232
3c8def97
SP
2233 q = &txo->cq;
2234 if (q->created)
2235 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2236 be_queue_free(adapter, q);
2237 }
6b7c5b94
SP
2238}
2239
7707133c 2240static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2241{
10ef9ab4 2242 struct be_queue_info *cq, *eq;
3c8def97 2243 struct be_tx_obj *txo;
92bf14ab 2244 int status, i;
6b7c5b94 2245
92bf14ab 2246 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2247
10ef9ab4
SP
2248 for_all_tx_queues(adapter, txo, i) {
2249 cq = &txo->cq;
2250 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2251 sizeof(struct be_eth_tx_compl));
2252 if (status)
2253 return status;
3c8def97 2254
827da44c
JS
2255 u64_stats_init(&txo->stats.sync);
2256 u64_stats_init(&txo->stats.sync_compl);
2257
10ef9ab4
SP
2258 /* If num_evt_qs is less than num_tx_qs, then more than
2259 * one txq share an eq
2260 */
2261 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2262 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2263 if (status)
2264 return status;
6b7c5b94 2265
10ef9ab4
SP
2266 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2267 sizeof(struct be_eth_wrb));
2268 if (status)
2269 return status;
6b7c5b94 2270
94d73aaa 2271 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2272 if (status)
2273 return status;
3c8def97 2274 }
6b7c5b94 2275
d379142b
SP
2276 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2277 adapter->num_tx_qs);
10ef9ab4 2278 return 0;
6b7c5b94
SP
2279}
2280
10ef9ab4 2281static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2282{
2283 struct be_queue_info *q;
3abcdeda
SP
2284 struct be_rx_obj *rxo;
2285 int i;
2286
2287 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2288 q = &rxo->cq;
2289 if (q->created)
2290 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2291 be_queue_free(adapter, q);
ac6a0c4a
SP
2292 }
2293}
2294
10ef9ab4 2295static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2296{
10ef9ab4 2297 struct be_queue_info *eq, *cq;
3abcdeda
SP
2298 struct be_rx_obj *rxo;
2299 int rc, i;
6b7c5b94 2300
92bf14ab
SP
2301 /* We can create as many RSS rings as there are EQs. */
2302 adapter->num_rx_qs = adapter->num_evt_qs;
2303
2304 /* We'll use RSS only if atleast 2 RSS rings are supported.
2305 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2306 */
92bf14ab
SP
2307 if (adapter->num_rx_qs > 1)
2308 adapter->num_rx_qs++;
2309
6b7c5b94 2310 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2311 for_all_rx_queues(adapter, rxo, i) {
2312 rxo->adapter = adapter;
3abcdeda
SP
2313 cq = &rxo->cq;
2314 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2315 sizeof(struct be_eth_rx_compl));
3abcdeda 2316 if (rc)
10ef9ab4 2317 return rc;
3abcdeda 2318
827da44c 2319 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2320 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2321 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2322 if (rc)
10ef9ab4 2323 return rc;
3abcdeda 2324 }
6b7c5b94 2325
d379142b
SP
2326 dev_info(&adapter->pdev->dev,
2327 "created %d RSS queue(s) and 1 default RX queue\n",
2328 adapter->num_rx_qs - 1);
10ef9ab4 2329 return 0;
b628bde2
SP
2330}
2331
6b7c5b94
SP
2332static irqreturn_t be_intx(int irq, void *dev)
2333{
e49cc34f
SP
2334 struct be_eq_obj *eqo = dev;
2335 struct be_adapter *adapter = eqo->adapter;
2336 int num_evts = 0;
6b7c5b94 2337
d0b9cec3
SP
2338 /* IRQ is not expected when NAPI is scheduled as the EQ
2339 * will not be armed.
2340 * But, this can happen on Lancer INTx where it takes
2341 * a while to de-assert INTx or in BE2 where occasionaly
2342 * an interrupt may be raised even when EQ is unarmed.
2343 * If NAPI is already scheduled, then counting & notifying
2344 * events will orphan them.
e49cc34f 2345 */
d0b9cec3 2346 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2347 num_evts = events_get(eqo);
d0b9cec3
SP
2348 __napi_schedule(&eqo->napi);
2349 if (num_evts)
2350 eqo->spurious_intr = 0;
2351 }
2352 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2353
d0b9cec3
SP
2354 /* Return IRQ_HANDLED only for the the first spurious intr
2355 * after a valid intr to stop the kernel from branding
2356 * this irq as a bad one!
e49cc34f 2357 */
d0b9cec3
SP
2358 if (num_evts || eqo->spurious_intr++ == 0)
2359 return IRQ_HANDLED;
2360 else
2361 return IRQ_NONE;
6b7c5b94
SP
2362}
2363
10ef9ab4 2364static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2365{
10ef9ab4 2366 struct be_eq_obj *eqo = dev;
6b7c5b94 2367
0b545a62
SP
2368 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2369 napi_schedule(&eqo->napi);
6b7c5b94
SP
2370 return IRQ_HANDLED;
2371}
2372
2e588f84 2373static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2374{
e38b1706 2375 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2376}
2377
10ef9ab4 2378static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2379 int budget, int polling)
6b7c5b94 2380{
3abcdeda
SP
2381 struct be_adapter *adapter = rxo->adapter;
2382 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2383 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2384 u32 work_done;
2385
2386 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2387 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2388 if (!rxcp)
2389 break;
2390
12004ae9
SP
2391 /* Is it a flush compl that has no data */
2392 if (unlikely(rxcp->num_rcvd == 0))
2393 goto loop_continue;
2394
2395 /* Discard compl with partial DMA Lancer B0 */
2396 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2397 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2398 goto loop_continue;
2399 }
2400
2401 /* On BE drop pkts that arrive due to imperfect filtering in
2402 * promiscuous mode on some skews
2403 */
2404 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2405 !lancer_chip(adapter))) {
10ef9ab4 2406 be_rx_compl_discard(rxo, rxcp);
12004ae9 2407 goto loop_continue;
64642811 2408 }
009dd872 2409
6384a4d0
SP
2410 /* Don't do gro when we're busy_polling */
2411 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2412 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2413 else
6384a4d0
SP
2414 be_rx_compl_process(rxo, napi, rxcp);
2415
12004ae9 2416loop_continue:
2e588f84 2417 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2418 }
2419
10ef9ab4
SP
2420 if (work_done) {
2421 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2422
6384a4d0
SP
2423 /* When an rx-obj gets into post_starved state, just
2424 * let be_worker do the posting.
2425 */
2426 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2427 !rxo->rx_post_starved)
10ef9ab4 2428 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2429 }
10ef9ab4 2430
6b7c5b94
SP
2431 return work_done;
2432}
2433
10ef9ab4
SP
2434static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2435 int budget, int idx)
6b7c5b94 2436{
6b7c5b94 2437 struct be_eth_tx_compl *txcp;
10ef9ab4 2438 int num_wrbs = 0, work_done;
3c8def97 2439
10ef9ab4
SP
2440 for (work_done = 0; work_done < budget; work_done++) {
2441 txcp = be_tx_compl_get(&txo->cq);
2442 if (!txcp)
2443 break;
2444 num_wrbs += be_tx_compl_process(adapter, txo,
748b539a
SP
2445 AMAP_GET_BITS(struct
2446 amap_eth_tx_compl,
2447 wrb_index, txcp));
10ef9ab4 2448 }
6b7c5b94 2449
10ef9ab4
SP
2450 if (work_done) {
2451 be_cq_notify(adapter, txo->cq.id, true, work_done);
2452 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2453
10ef9ab4
SP
2454 /* As Tx wrbs have been freed up, wake up netdev queue
2455 * if it was stopped due to lack of tx wrbs. */
2456 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2457 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2458 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2459 }
10ef9ab4
SP
2460
2461 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2462 tx_stats(txo)->tx_compl += work_done;
2463 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2464 }
10ef9ab4
SP
2465 return (work_done < budget); /* Done */
2466}
6b7c5b94 2467
68d7bdcb 2468int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2469{
2470 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2471 struct be_adapter *adapter = eqo->adapter;
0b545a62 2472 int max_work = 0, work, i, num_evts;
6384a4d0 2473 struct be_rx_obj *rxo;
10ef9ab4 2474 bool tx_done;
f31e50a8 2475
0b545a62
SP
2476 num_evts = events_get(eqo);
2477
10ef9ab4
SP
2478 /* Process all TXQs serviced by this EQ */
2479 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2480 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2481 eqo->tx_budget, i);
2482 if (!tx_done)
2483 max_work = budget;
f31e50a8
SP
2484 }
2485
6384a4d0
SP
2486 if (be_lock_napi(eqo)) {
2487 /* This loop will iterate twice for EQ0 in which
2488 * completions of the last RXQ (default one) are also processed
2489 * For other EQs the loop iterates only once
2490 */
2491 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2492 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2493 max_work = max(work, max_work);
2494 }
2495 be_unlock_napi(eqo);
2496 } else {
2497 max_work = budget;
10ef9ab4 2498 }
6b7c5b94 2499
10ef9ab4
SP
2500 if (is_mcc_eqo(eqo))
2501 be_process_mcc(adapter);
93c86700 2502
10ef9ab4
SP
2503 if (max_work < budget) {
2504 napi_complete(napi);
0b545a62 2505 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2506 } else {
2507 /* As we'll continue in polling mode, count and clear events */
0b545a62 2508 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2509 }
10ef9ab4 2510 return max_work;
6b7c5b94
SP
2511}
2512
6384a4d0
SP
2513#ifdef CONFIG_NET_RX_BUSY_POLL
2514static int be_busy_poll(struct napi_struct *napi)
2515{
2516 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2517 struct be_adapter *adapter = eqo->adapter;
2518 struct be_rx_obj *rxo;
2519 int i, work = 0;
2520
2521 if (!be_lock_busy_poll(eqo))
2522 return LL_FLUSH_BUSY;
2523
2524 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2525 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2526 if (work)
2527 break;
2528 }
2529
2530 be_unlock_busy_poll(eqo);
2531 return work;
2532}
2533#endif
2534
f67ef7ba 2535void be_detect_error(struct be_adapter *adapter)
7c185276 2536{
e1cfb67a
PR
2537 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2538 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2539 u32 i;
eb0eecc1
SK
2540 bool error_detected = false;
2541 struct device *dev = &adapter->pdev->dev;
2542 struct net_device *netdev = adapter->netdev;
7c185276 2543
d23e946c 2544 if (be_hw_error(adapter))
72f02485
SP
2545 return;
2546
e1cfb67a
PR
2547 if (lancer_chip(adapter)) {
2548 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2549 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2550 sliport_err1 = ioread32(adapter->db +
748b539a 2551 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2552 sliport_err2 = ioread32(adapter->db +
748b539a 2553 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2554 adapter->hw_error = true;
2555 /* Do not log error messages if its a FW reset */
2556 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2557 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2558 dev_info(dev, "Firmware update in progress\n");
2559 } else {
2560 error_detected = true;
2561 dev_err(dev, "Error detected in the card\n");
2562 dev_err(dev, "ERR: sliport status 0x%x\n",
2563 sliport_status);
2564 dev_err(dev, "ERR: sliport error1 0x%x\n",
2565 sliport_err1);
2566 dev_err(dev, "ERR: sliport error2 0x%x\n",
2567 sliport_err2);
2568 }
e1cfb67a
PR
2569 }
2570 } else {
2571 pci_read_config_dword(adapter->pdev,
748b539a 2572 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2573 pci_read_config_dword(adapter->pdev,
748b539a 2574 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2575 pci_read_config_dword(adapter->pdev,
748b539a 2576 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2577 pci_read_config_dword(adapter->pdev,
748b539a 2578 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2579
f67ef7ba
PR
2580 ue_lo = (ue_lo & ~ue_lo_mask);
2581 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2582
eb0eecc1
SK
2583 /* On certain platforms BE hardware can indicate spurious UEs.
2584 * Allow HW to stop working completely in case of a real UE.
2585 * Hence not setting the hw_error for UE detection.
2586 */
f67ef7ba 2587
eb0eecc1
SK
2588 if (ue_lo || ue_hi) {
2589 error_detected = true;
2590 dev_err(dev,
2591 "Unrecoverable Error detected in the adapter");
2592 dev_err(dev, "Please reboot server to recover");
2593 if (skyhawk_chip(adapter))
2594 adapter->hw_error = true;
2595 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2596 if (ue_lo & 1)
2597 dev_err(dev, "UE: %s bit set\n",
2598 ue_status_low_desc[i]);
2599 }
2600 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2601 if (ue_hi & 1)
2602 dev_err(dev, "UE: %s bit set\n",
2603 ue_status_hi_desc[i]);
2604 }
7c185276
AK
2605 }
2606 }
eb0eecc1
SK
2607 if (error_detected)
2608 netif_carrier_off(netdev);
7c185276
AK
2609}
2610
8d56ff11
SP
2611static void be_msix_disable(struct be_adapter *adapter)
2612{
ac6a0c4a 2613 if (msix_enabled(adapter)) {
8d56ff11 2614 pci_disable_msix(adapter->pdev);
ac6a0c4a 2615 adapter->num_msix_vec = 0;
68d7bdcb 2616 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2617 }
2618}
2619
c2bba3df 2620static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2621{
7dc4c064 2622 int i, num_vec;
d379142b 2623 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2624
92bf14ab
SP
2625 /* If RoCE is supported, program the max number of NIC vectors that
2626 * may be configured via set-channels, along with vectors needed for
2627 * RoCe. Else, just program the number we'll use initially.
2628 */
2629 if (be_roce_supported(adapter))
2630 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2631 2 * num_online_cpus());
2632 else
2633 num_vec = adapter->cfg_num_qs;
3abcdeda 2634
ac6a0c4a 2635 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2636 adapter->msix_entries[i].entry = i;
2637
7dc4c064
AG
2638 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2639 MIN_MSIX_VECTORS, num_vec);
2640 if (num_vec < 0)
2641 goto fail;
92bf14ab 2642
92bf14ab
SP
2643 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2644 adapter->num_msix_roce_vec = num_vec / 2;
2645 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2646 adapter->num_msix_roce_vec);
2647 }
2648
2649 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2650
2651 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2652 adapter->num_msix_vec);
c2bba3df 2653 return 0;
7dc4c064
AG
2654
2655fail:
2656 dev_warn(dev, "MSIx enable failed\n");
2657
2658 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2659 if (!be_physfn(adapter))
2660 return num_vec;
2661 return 0;
6b7c5b94
SP
2662}
2663
fe6d2a38 2664static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2665 struct be_eq_obj *eqo)
b628bde2 2666{
f2f781a7 2667 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2668}
6b7c5b94 2669
b628bde2
SP
2670static int be_msix_register(struct be_adapter *adapter)
2671{
10ef9ab4
SP
2672 struct net_device *netdev = adapter->netdev;
2673 struct be_eq_obj *eqo;
2674 int status, i, vec;
6b7c5b94 2675
10ef9ab4
SP
2676 for_all_evt_queues(adapter, eqo, i) {
2677 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2678 vec = be_msix_vec_get(adapter, eqo);
2679 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2680 if (status)
2681 goto err_msix;
2682 }
b628bde2 2683
6b7c5b94 2684 return 0;
3abcdeda 2685err_msix:
10ef9ab4
SP
2686 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2687 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2688 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2689 status);
ac6a0c4a 2690 be_msix_disable(adapter);
6b7c5b94
SP
2691 return status;
2692}
2693
2694static int be_irq_register(struct be_adapter *adapter)
2695{
2696 struct net_device *netdev = adapter->netdev;
2697 int status;
2698
ac6a0c4a 2699 if (msix_enabled(adapter)) {
6b7c5b94
SP
2700 status = be_msix_register(adapter);
2701 if (status == 0)
2702 goto done;
ba343c77
SB
2703 /* INTx is not supported for VF */
2704 if (!be_physfn(adapter))
2705 return status;
6b7c5b94
SP
2706 }
2707
e49cc34f 2708 /* INTx: only the first EQ is used */
6b7c5b94
SP
2709 netdev->irq = adapter->pdev->irq;
2710 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2711 &adapter->eq_obj[0]);
6b7c5b94
SP
2712 if (status) {
2713 dev_err(&adapter->pdev->dev,
2714 "INTx request IRQ failed - err %d\n", status);
2715 return status;
2716 }
2717done:
2718 adapter->isr_registered = true;
2719 return 0;
2720}
2721
2722static void be_irq_unregister(struct be_adapter *adapter)
2723{
2724 struct net_device *netdev = adapter->netdev;
10ef9ab4 2725 struct be_eq_obj *eqo;
3abcdeda 2726 int i;
6b7c5b94
SP
2727
2728 if (!adapter->isr_registered)
2729 return;
2730
2731 /* INTx */
ac6a0c4a 2732 if (!msix_enabled(adapter)) {
e49cc34f 2733 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2734 goto done;
2735 }
2736
2737 /* MSIx */
10ef9ab4
SP
2738 for_all_evt_queues(adapter, eqo, i)
2739 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2740
6b7c5b94
SP
2741done:
2742 adapter->isr_registered = false;
6b7c5b94
SP
2743}
2744
10ef9ab4 2745static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2746{
2747 struct be_queue_info *q;
2748 struct be_rx_obj *rxo;
2749 int i;
2750
2751 for_all_rx_queues(adapter, rxo, i) {
2752 q = &rxo->q;
2753 if (q->created) {
2754 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2755 be_rx_cq_clean(rxo);
482c9e79 2756 }
10ef9ab4 2757 be_queue_free(adapter, q);
482c9e79
SP
2758 }
2759}
2760
889cd4b2
SP
2761static int be_close(struct net_device *netdev)
2762{
2763 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2764 struct be_eq_obj *eqo;
2765 int i;
889cd4b2 2766
e1ad8e33
KA
2767 /* This protection is needed as be_close() may be called even when the
2768 * adapter is in cleared state (after eeh perm failure)
2769 */
2770 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2771 return 0;
2772
045508a8
PP
2773 be_roce_dev_close(adapter);
2774
dff345c5
IV
2775 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2776 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2777 napi_disable(&eqo->napi);
6384a4d0
SP
2778 be_disable_busy_poll(eqo);
2779 }
71237b6f 2780 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2781 }
a323d9bf
SP
2782
2783 be_async_mcc_disable(adapter);
2784
2785 /* Wait for all pending tx completions to arrive so that
2786 * all tx skbs are freed.
2787 */
fba87559 2788 netif_tx_disable(netdev);
6e1f9975 2789 be_tx_compl_clean(adapter);
a323d9bf
SP
2790
2791 be_rx_qs_destroy(adapter);
2792
d11a347d
AK
2793 for (i = 1; i < (adapter->uc_macs + 1); i++)
2794 be_cmd_pmac_del(adapter, adapter->if_handle,
2795 adapter->pmac_id[i], 0);
2796 adapter->uc_macs = 0;
2797
a323d9bf 2798 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2799 if (msix_enabled(adapter))
2800 synchronize_irq(be_msix_vec_get(adapter, eqo));
2801 else
2802 synchronize_irq(netdev->irq);
2803 be_eq_clean(eqo);
63fcb27f
PR
2804 }
2805
889cd4b2
SP
2806 be_irq_unregister(adapter);
2807
482c9e79
SP
2808 return 0;
2809}
2810
10ef9ab4 2811static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2812{
2813 struct be_rx_obj *rxo;
e9008ee9 2814 int rc, i, j;
e2557877
VD
2815 u8 rss_hkey[RSS_HASH_KEY_LEN];
2816 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2817
2818 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2819 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2820 sizeof(struct be_eth_rx_d));
2821 if (rc)
2822 return rc;
2823 }
2824
2825 /* The FW would like the default RXQ to be created first */
2826 rxo = default_rxo(adapter);
2827 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2828 adapter->if_handle, false, &rxo->rss_id);
2829 if (rc)
2830 return rc;
2831
2832 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2833 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2834 rx_frag_size, adapter->if_handle,
2835 true, &rxo->rss_id);
482c9e79
SP
2836 if (rc)
2837 return rc;
2838 }
2839
2840 if (be_multi_rxq(adapter)) {
e2557877
VD
2841 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2842 j += adapter->num_rx_qs - 1) {
e9008ee9 2843 for_all_rss_queues(adapter, rxo, i) {
e2557877 2844 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2845 break;
e2557877
VD
2846 rss->rsstable[j + i] = rxo->rss_id;
2847 rss->rss_queue[j + i] = i;
e9008ee9
PR
2848 }
2849 }
e2557877
VD
2850 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2851 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2852
2853 if (!BEx_chip(adapter))
e2557877
VD
2854 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2855 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2856 } else {
2857 /* Disable RSS, if only default RX Q is created */
e2557877 2858 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2859 }
594ad54a 2860
e2557877 2861 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2862 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2863 128, rss_hkey);
da1388d6 2864 if (rc) {
e2557877 2865 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2866 return rc;
482c9e79
SP
2867 }
2868
e2557877
VD
2869 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2870
482c9e79 2871 /* First time posting */
10ef9ab4 2872 for_all_rx_queues(adapter, rxo, i)
482c9e79 2873 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2874 return 0;
2875}
2876
6b7c5b94
SP
2877static int be_open(struct net_device *netdev)
2878{
2879 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2880 struct be_eq_obj *eqo;
3abcdeda 2881 struct be_rx_obj *rxo;
10ef9ab4 2882 struct be_tx_obj *txo;
b236916a 2883 u8 link_status;
3abcdeda 2884 int status, i;
5fb379ee 2885
10ef9ab4 2886 status = be_rx_qs_create(adapter);
482c9e79
SP
2887 if (status)
2888 goto err;
2889
c2bba3df
SK
2890 status = be_irq_register(adapter);
2891 if (status)
2892 goto err;
5fb379ee 2893
10ef9ab4 2894 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2895 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2896
10ef9ab4
SP
2897 for_all_tx_queues(adapter, txo, i)
2898 be_cq_notify(adapter, txo->cq.id, true, 0);
2899
7a1e9b20
SP
2900 be_async_mcc_enable(adapter);
2901
10ef9ab4
SP
2902 for_all_evt_queues(adapter, eqo, i) {
2903 napi_enable(&eqo->napi);
6384a4d0 2904 be_enable_busy_poll(eqo);
10ef9ab4
SP
2905 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2906 }
04d3d624 2907 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2908
323ff71e 2909 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2910 if (!status)
2911 be_link_status_update(adapter, link_status);
2912
fba87559 2913 netif_tx_start_all_queues(netdev);
045508a8 2914 be_roce_dev_open(adapter);
c9c47142 2915
c5abe7c0 2916#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2917 if (skyhawk_chip(adapter))
2918 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2919#endif
2920
889cd4b2
SP
2921 return 0;
2922err:
2923 be_close(adapter->netdev);
2924 return -EIO;
5fb379ee
SP
2925}
2926
71d8d1b5
AK
2927static int be_setup_wol(struct be_adapter *adapter, bool enable)
2928{
2929 struct be_dma_mem cmd;
2930 int status = 0;
2931 u8 mac[ETH_ALEN];
2932
2933 memset(mac, 0, ETH_ALEN);
2934
2935 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2936 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2937 GFP_KERNEL);
71d8d1b5
AK
2938 if (cmd.va == NULL)
2939 return -1;
71d8d1b5
AK
2940
2941 if (enable) {
2942 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2943 PCICFG_PM_CONTROL_OFFSET,
2944 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2945 if (status) {
2946 dev_err(&adapter->pdev->dev,
2381a55c 2947 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2948 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2949 cmd.dma);
71d8d1b5
AK
2950 return status;
2951 }
2952 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2953 adapter->netdev->dev_addr,
2954 &cmd);
71d8d1b5
AK
2955 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2956 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2957 } else {
2958 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2959 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2960 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2961 }
2962
2b7bcebf 2963 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2964 return status;
2965}
2966
6d87f5c3
AK
2967/*
2968 * Generate a seed MAC address from the PF MAC Address using jhash.
2969 * MAC Address for VFs are assigned incrementally starting from the seed.
2970 * These addresses are programmed in the ASIC by the PF and the VF driver
2971 * queries for the MAC address during its probe.
2972 */
4c876616 2973static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2974{
f9449ab7 2975 u32 vf;
3abcdeda 2976 int status = 0;
6d87f5c3 2977 u8 mac[ETH_ALEN];
11ac75ed 2978 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2979
2980 be_vf_eth_addr_generate(adapter, mac);
2981
11ac75ed 2982 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2983 if (BEx_chip(adapter))
590c391d 2984 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2985 vf_cfg->if_handle,
2986 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2987 else
2988 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2989 vf + 1);
590c391d 2990
6d87f5c3
AK
2991 if (status)
2992 dev_err(&adapter->pdev->dev,
748b539a
SP
2993 "Mac address assignment failed for VF %d\n",
2994 vf);
6d87f5c3 2995 else
11ac75ed 2996 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2997
2998 mac[5] += 1;
2999 }
3000 return status;
3001}
3002
4c876616
SP
3003static int be_vfs_mac_query(struct be_adapter *adapter)
3004{
3005 int status, vf;
3006 u8 mac[ETH_ALEN];
3007 struct be_vf_cfg *vf_cfg;
4c876616
SP
3008
3009 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3010 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3011 mac, vf_cfg->if_handle,
3012 false, vf+1);
4c876616
SP
3013 if (status)
3014 return status;
3015 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3016 }
3017 return 0;
3018}
3019
f9449ab7 3020static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3021{
11ac75ed 3022 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3023 u32 vf;
3024
257a3feb 3025 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3026 dev_warn(&adapter->pdev->dev,
3027 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3028 goto done;
3029 }
3030
b4c1df93
SP
3031 pci_disable_sriov(adapter->pdev);
3032
11ac75ed 3033 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3034 if (BEx_chip(adapter))
11ac75ed
SP
3035 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3036 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3037 else
3038 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3039 vf + 1);
f9449ab7 3040
11ac75ed
SP
3041 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3042 }
39f1d94d
SP
3043done:
3044 kfree(adapter->vf_cfg);
3045 adapter->num_vfs = 0;
6d87f5c3
AK
3046}
3047
7707133c
SP
3048static void be_clear_queues(struct be_adapter *adapter)
3049{
3050 be_mcc_queues_destroy(adapter);
3051 be_rx_cqs_destroy(adapter);
3052 be_tx_queues_destroy(adapter);
3053 be_evt_queues_destroy(adapter);
3054}
3055
68d7bdcb 3056static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3057{
191eb756
SP
3058 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3059 cancel_delayed_work_sync(&adapter->work);
3060 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3061 }
68d7bdcb
SP
3062}
3063
b05004ad 3064static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3065{
3066 int i;
3067
b05004ad
SK
3068 if (adapter->pmac_id) {
3069 for (i = 0; i < (adapter->uc_macs + 1); i++)
3070 be_cmd_pmac_del(adapter, adapter->if_handle,
3071 adapter->pmac_id[i], 0);
3072 adapter->uc_macs = 0;
3073
3074 kfree(adapter->pmac_id);
3075 adapter->pmac_id = NULL;
3076 }
3077}
3078
c5abe7c0 3079#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3080static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3081{
3082 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3083 be_cmd_manage_iface(adapter, adapter->if_handle,
3084 OP_CONVERT_TUNNEL_TO_NORMAL);
3085
3086 if (adapter->vxlan_port)
3087 be_cmd_set_vxlan_port(adapter, 0);
3088
3089 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3090 adapter->vxlan_port = 0;
3091}
c5abe7c0 3092#endif
c9c47142 3093
b05004ad
SK
3094static int be_clear(struct be_adapter *adapter)
3095{
68d7bdcb 3096 be_cancel_worker(adapter);
191eb756 3097
11ac75ed 3098 if (sriov_enabled(adapter))
f9449ab7
SP
3099 be_vf_clear(adapter);
3100
c5abe7c0 3101#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3102 be_disable_vxlan_offloads(adapter);
c5abe7c0 3103#endif
2d17f403 3104 /* delete the primary mac along with the uc-mac list */
b05004ad 3105 be_mac_clear(adapter);
fbc13f01 3106
f9449ab7 3107 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3108
7707133c 3109 be_clear_queues(adapter);
a54769f5 3110
10ef9ab4 3111 be_msix_disable(adapter);
e1ad8e33 3112 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3113 return 0;
3114}
3115
4c876616 3116static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3117{
92bf14ab 3118 struct be_resources res = {0};
4c876616
SP
3119 struct be_vf_cfg *vf_cfg;
3120 u32 cap_flags, en_flags, vf;
922bbe88 3121 int status = 0;
abb93951 3122
4c876616
SP
3123 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3124 BE_IF_FLAGS_MULTICAST;
abb93951 3125
4c876616 3126 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3127 if (!BE3_chip(adapter)) {
3128 status = be_cmd_get_profile_config(adapter, &res,
3129 vf + 1);
3130 if (!status)
3131 cap_flags = res.if_cap_flags;
3132 }
4c876616
SP
3133
3134 /* If a FW profile exists, then cap_flags are updated */
3135 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3136 BE_IF_FLAGS_BROADCAST |
3137 BE_IF_FLAGS_MULTICAST);
3138 status =
3139 be_cmd_if_create(adapter, cap_flags, en_flags,
3140 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3141 if (status)
3142 goto err;
3143 }
3144err:
3145 return status;
abb93951
PR
3146}
3147
39f1d94d 3148static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3149{
11ac75ed 3150 struct be_vf_cfg *vf_cfg;
30128031
SP
3151 int vf;
3152
39f1d94d
SP
3153 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3154 GFP_KERNEL);
3155 if (!adapter->vf_cfg)
3156 return -ENOMEM;
3157
11ac75ed
SP
3158 for_all_vfs(adapter, vf_cfg, vf) {
3159 vf_cfg->if_handle = -1;
3160 vf_cfg->pmac_id = -1;
30128031 3161 }
39f1d94d 3162 return 0;
30128031
SP
3163}
3164
f9449ab7
SP
3165static int be_vf_setup(struct be_adapter *adapter)
3166{
c502224e 3167 struct device *dev = &adapter->pdev->dev;
11ac75ed 3168 struct be_vf_cfg *vf_cfg;
4c876616 3169 int status, old_vfs, vf;
04a06028 3170 u32 privileges;
39f1d94d 3171
257a3feb 3172 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3173 if (old_vfs) {
3174 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3175 if (old_vfs != num_vfs)
3176 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3177 adapter->num_vfs = old_vfs;
39f1d94d 3178 } else {
92bf14ab 3179 if (num_vfs > be_max_vfs(adapter))
4c876616 3180 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3181 be_max_vfs(adapter), num_vfs);
3182 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3183 if (!adapter->num_vfs)
4c876616 3184 return 0;
39f1d94d
SP
3185 }
3186
3187 status = be_vf_setup_init(adapter);
3188 if (status)
3189 goto err;
30128031 3190
4c876616
SP
3191 if (old_vfs) {
3192 for_all_vfs(adapter, vf_cfg, vf) {
3193 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3194 if (status)
3195 goto err;
3196 }
3197 } else {
3198 status = be_vfs_if_create(adapter);
f9449ab7
SP
3199 if (status)
3200 goto err;
f9449ab7
SP
3201 }
3202
4c876616
SP
3203 if (old_vfs) {
3204 status = be_vfs_mac_query(adapter);
3205 if (status)
3206 goto err;
3207 } else {
39f1d94d
SP
3208 status = be_vf_eth_addr_config(adapter);
3209 if (status)
3210 goto err;
3211 }
f9449ab7 3212
11ac75ed 3213 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3214 /* Allow VFs to programs MAC/VLAN filters */
3215 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3216 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3217 status = be_cmd_set_fn_privileges(adapter,
3218 privileges |
3219 BE_PRIV_FILTMGMT,
3220 vf + 1);
3221 if (!status)
3222 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3223 vf);
3224 }
3225
0f77ba73
RN
3226 /* Allow full available bandwidth */
3227 if (!old_vfs)
3228 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3229
bdce2ad7 3230 if (!old_vfs) {
0599863d 3231 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3232 be_cmd_set_logical_link_config(adapter,
3233 IFLA_VF_LINK_STATE_AUTO,
3234 vf+1);
3235 }
f9449ab7 3236 }
b4c1df93
SP
3237
3238 if (!old_vfs) {
3239 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3240 if (status) {
3241 dev_err(dev, "SRIOV enable failed\n");
3242 adapter->num_vfs = 0;
3243 goto err;
3244 }
3245 }
f9449ab7
SP
3246 return 0;
3247err:
4c876616
SP
3248 dev_err(dev, "VF setup failed\n");
3249 be_vf_clear(adapter);
f9449ab7
SP
3250 return status;
3251}
3252
f93f160b
VV
3253/* Converting function_mode bits on BE3 to SH mc_type enums */
3254
3255static u8 be_convert_mc_type(u32 function_mode)
3256{
66064dbc 3257 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3258 return vNIC1;
66064dbc 3259 else if (function_mode & QNQ_MODE)
f93f160b
VV
3260 return FLEX10;
3261 else if (function_mode & VNIC_MODE)
3262 return vNIC2;
3263 else if (function_mode & UMC_ENABLED)
3264 return UMC;
3265 else
3266 return MC_NONE;
3267}
3268
92bf14ab
SP
3269/* On BE2/BE3 FW does not suggest the supported limits */
3270static void BEx_get_resources(struct be_adapter *adapter,
3271 struct be_resources *res)
3272{
3273 struct pci_dev *pdev = adapter->pdev;
3274 bool use_sriov = false;
ecf1f6e1
SR
3275 int max_vfs = 0;
3276
3277 if (be_physfn(adapter) && BE3_chip(adapter)) {
3278 be_cmd_get_profile_config(adapter, res, 0);
3279 /* Some old versions of BE3 FW don't report max_vfs value */
3280 if (res->max_vfs == 0) {
3281 max_vfs = pci_sriov_get_totalvfs(pdev);
3282 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3283 }
3284 use_sriov = res->max_vfs && sriov_want(adapter);
92bf14ab
SP
3285 }
3286
3287 if (be_physfn(adapter))
3288 res->max_uc_mac = BE_UC_PMAC_COUNT;
3289 else
3290 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3291
f93f160b
VV
3292 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3293
3294 if (be_is_mc(adapter)) {
3295 /* Assuming that there are 4 channels per port,
3296 * when multi-channel is enabled
3297 */
3298 if (be_is_qnq_mode(adapter))
3299 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3300 else
3301 /* In a non-qnq multichannel mode, the pvid
3302 * takes up one vlan entry
3303 */
3304 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3305 } else {
92bf14ab 3306 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3307 }
3308
92bf14ab
SP
3309 res->max_mcast_mac = BE_MAX_MC;
3310
a5243dab
VV
3311 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3312 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3313 * *only* if it is RSS-capable.
3314 */
3315 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3316 !be_physfn(adapter) || (be_is_mc(adapter) &&
3317 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3318 res->max_tx_qs = 1;
3319 else
3320 res->max_tx_qs = BE3_MAX_TX_QS;
3321
3322 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3323 !use_sriov && be_physfn(adapter))
3324 res->max_rss_qs = (adapter->be3_native) ?
3325 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3326 res->max_rx_qs = res->max_rss_qs + 1;
3327
e3dc867c 3328 if (be_physfn(adapter))
ecf1f6e1 3329 res->max_evt_qs = (res->max_vfs > 0) ?
e3dc867c
SR
3330 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3331 else
3332 res->max_evt_qs = 1;
92bf14ab
SP
3333
3334 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3335 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3336 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3337}
3338
30128031
SP
3339static void be_setup_init(struct be_adapter *adapter)
3340{
3341 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3342 adapter->phy.link_speed = -1;
30128031
SP
3343 adapter->if_handle = -1;
3344 adapter->be3_native = false;
3345 adapter->promiscuous = false;
f25b119c
PR
3346 if (be_physfn(adapter))
3347 adapter->cmd_privileges = MAX_PRIVILEGES;
3348 else
3349 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3350}
3351
92bf14ab 3352static int be_get_resources(struct be_adapter *adapter)
abb93951 3353{
92bf14ab
SP
3354 struct device *dev = &adapter->pdev->dev;
3355 struct be_resources res = {0};
3356 int status;
abb93951 3357
92bf14ab
SP
3358 if (BEx_chip(adapter)) {
3359 BEx_get_resources(adapter, &res);
3360 adapter->res = res;
abb93951
PR
3361 }
3362
92bf14ab
SP
3363 /* For Lancer, SH etc read per-function resource limits from FW.
3364 * GET_FUNC_CONFIG returns per function guaranteed limits.
3365 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3366 */
3367 if (!BEx_chip(adapter)) {
3368 status = be_cmd_get_func_config(adapter, &res);
3369 if (status)
3370 return status;
abb93951 3371
92bf14ab
SP
3372 /* If RoCE may be enabled stash away half the EQs for RoCE */
3373 if (be_roce_supported(adapter))
3374 res.max_evt_qs /= 2;
3375 adapter->res = res;
abb93951 3376
92bf14ab
SP
3377 if (be_physfn(adapter)) {
3378 status = be_cmd_get_profile_config(adapter, &res, 0);
3379 if (status)
3380 return status;
3381 adapter->res.max_vfs = res.max_vfs;
10cccf60 3382 adapter->res.vf_if_cap_flags = res.vf_if_cap_flags;
92bf14ab 3383 }
abb93951 3384
92bf14ab
SP
3385 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3386 be_max_txqs(adapter), be_max_rxqs(adapter),
3387 be_max_rss(adapter), be_max_eqs(adapter),
3388 be_max_vfs(adapter));
3389 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3390 be_max_uc(adapter), be_max_mc(adapter),
3391 be_max_vlans(adapter));
abb93951 3392 }
4c876616 3393
92bf14ab 3394 return 0;
abb93951
PR
3395}
3396
39f1d94d
SP
3397/* Routine to query per function resource limits */
3398static int be_get_config(struct be_adapter *adapter)
3399{
542963b7 3400 u16 profile_id;
4c876616 3401 int status;
39f1d94d 3402
abb93951
PR
3403 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3404 &adapter->function_mode,
0ad3157e
VV
3405 &adapter->function_caps,
3406 &adapter->asic_rev);
abb93951 3407 if (status)
92bf14ab 3408 return status;
abb93951 3409
542963b7
VV
3410 if (be_physfn(adapter)) {
3411 status = be_cmd_get_active_profile(adapter, &profile_id);
3412 if (!status)
3413 dev_info(&adapter->pdev->dev,
3414 "Using profile 0x%x\n", profile_id);
3415 }
3416
92bf14ab
SP
3417 status = be_get_resources(adapter);
3418 if (status)
3419 return status;
abb93951 3420
46ee9c14
RN
3421 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3422 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3423 if (!adapter->pmac_id)
3424 return -ENOMEM;
abb93951 3425
92bf14ab
SP
3426 /* Sanitize cfg_num_qs based on HW and platform limits */
3427 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3428
3429 return 0;
39f1d94d
SP
3430}
3431
95046b92
SP
3432static int be_mac_setup(struct be_adapter *adapter)
3433{
3434 u8 mac[ETH_ALEN];
3435 int status;
3436
3437 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3438 status = be_cmd_get_perm_mac(adapter, mac);
3439 if (status)
3440 return status;
3441
3442 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3443 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3444 } else {
3445 /* Maybe the HW was reset; dev_addr must be re-programmed */
3446 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3447 }
3448
2c7a9dc1
AK
3449 /* For BE3-R VFs, the PF programs the initial MAC address */
3450 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3451 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3452 &adapter->pmac_id[0], 0);
95046b92
SP
3453 return 0;
3454}
3455
68d7bdcb
SP
3456static void be_schedule_worker(struct be_adapter *adapter)
3457{
3458 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3459 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3460}
3461
7707133c 3462static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3463{
68d7bdcb 3464 struct net_device *netdev = adapter->netdev;
10ef9ab4 3465 int status;
ba343c77 3466
7707133c 3467 status = be_evt_queues_create(adapter);
abb93951
PR
3468 if (status)
3469 goto err;
73d540f2 3470
7707133c 3471 status = be_tx_qs_create(adapter);
c2bba3df
SK
3472 if (status)
3473 goto err;
10ef9ab4 3474
7707133c 3475 status = be_rx_cqs_create(adapter);
10ef9ab4 3476 if (status)
a54769f5 3477 goto err;
6b7c5b94 3478
7707133c 3479 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3480 if (status)
3481 goto err;
3482
68d7bdcb
SP
3483 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3484 if (status)
3485 goto err;
3486
3487 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3488 if (status)
3489 goto err;
3490
7707133c
SP
3491 return 0;
3492err:
3493 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3494 return status;
3495}
3496
68d7bdcb
SP
3497int be_update_queues(struct be_adapter *adapter)
3498{
3499 struct net_device *netdev = adapter->netdev;
3500 int status;
3501
3502 if (netif_running(netdev))
3503 be_close(netdev);
3504
3505 be_cancel_worker(adapter);
3506
3507 /* If any vectors have been shared with RoCE we cannot re-program
3508 * the MSIx table.
3509 */
3510 if (!adapter->num_msix_roce_vec)
3511 be_msix_disable(adapter);
3512
3513 be_clear_queues(adapter);
3514
3515 if (!msix_enabled(adapter)) {
3516 status = be_msix_enable(adapter);
3517 if (status)
3518 return status;
3519 }
3520
3521 status = be_setup_queues(adapter);
3522 if (status)
3523 return status;
3524
3525 be_schedule_worker(adapter);
3526
3527 if (netif_running(netdev))
3528 status = be_open(netdev);
3529
3530 return status;
3531}
3532
7707133c
SP
3533static int be_setup(struct be_adapter *adapter)
3534{
3535 struct device *dev = &adapter->pdev->dev;
3536 u32 tx_fc, rx_fc, en_flags;
3537 int status;
3538
3539 be_setup_init(adapter);
3540
3541 if (!lancer_chip(adapter))
3542 be_cmd_req_native_mode(adapter);
3543
3544 status = be_get_config(adapter);
10ef9ab4 3545 if (status)
a54769f5 3546 goto err;
6b7c5b94 3547
7707133c 3548 status = be_msix_enable(adapter);
10ef9ab4 3549 if (status)
a54769f5 3550 goto err;
6b7c5b94 3551
f9449ab7 3552 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3553 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3554 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3555 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3556 en_flags = en_flags & be_if_cap_flags(adapter);
3557 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3558 &adapter->if_handle, 0);
7707133c 3559 if (status)
a54769f5 3560 goto err;
6b7c5b94 3561
68d7bdcb
SP
3562 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3563 rtnl_lock();
7707133c 3564 status = be_setup_queues(adapter);
68d7bdcb 3565 rtnl_unlock();
95046b92 3566 if (status)
1578e777
PR
3567 goto err;
3568
7707133c 3569 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3570
3571 status = be_mac_setup(adapter);
10ef9ab4
SP
3572 if (status)
3573 goto err;
3574
eeb65ced 3575 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3576
e9e2a904
SK
3577 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3578 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3579 adapter->fw_ver);
3580 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3581 }
3582
1d1e9a46 3583 if (adapter->vlans_added)
10329df8 3584 be_vid_config(adapter);
7ab8b0b4 3585
a54769f5 3586 be_set_rx_mode(adapter->netdev);
5fb379ee 3587
76a9e08e
SR
3588 be_cmd_get_acpi_wol_cap(adapter);
3589
ddc3f5cb 3590 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3591
ddc3f5cb
AK
3592 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3593 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3594 adapter->rx_fc);
2dc1deb6 3595
bdce2ad7
SR
3596 if (be_physfn(adapter))
3597 be_cmd_set_logical_link_config(adapter,
3598 IFLA_VF_LINK_STATE_AUTO, 0);
3599
b905b5d4 3600 if (sriov_want(adapter)) {
92bf14ab 3601 if (be_max_vfs(adapter))
39f1d94d
SP
3602 be_vf_setup(adapter);
3603 else
3604 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3605 }
3606
f25b119c
PR
3607 status = be_cmd_get_phy_info(adapter);
3608 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3609 adapter->phy.fc_autoneg = 1;
3610
68d7bdcb 3611 be_schedule_worker(adapter);
e1ad8e33 3612 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3613 return 0;
a54769f5
SP
3614err:
3615 be_clear(adapter);
3616 return status;
3617}
6b7c5b94 3618
66268739
IV
3619#ifdef CONFIG_NET_POLL_CONTROLLER
3620static void be_netpoll(struct net_device *netdev)
3621{
3622 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3623 struct be_eq_obj *eqo;
66268739
IV
3624 int i;
3625
e49cc34f
SP
3626 for_all_evt_queues(adapter, eqo, i) {
3627 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3628 napi_schedule(&eqo->napi);
3629 }
10ef9ab4
SP
3630
3631 return;
66268739
IV
3632}
3633#endif
3634
96c9b2e4 3635static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3636
306f1348
SP
3637static bool phy_flashing_required(struct be_adapter *adapter)
3638{
42f11cf2
AK
3639 return (adapter->phy.phy_type == TN_8022 &&
3640 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3641}
3642
c165541e
PR
3643static bool is_comp_in_ufi(struct be_adapter *adapter,
3644 struct flash_section_info *fsec, int type)
3645{
3646 int i = 0, img_type = 0;
3647 struct flash_section_info_g2 *fsec_g2 = NULL;
3648
ca34fe38 3649 if (BE2_chip(adapter))
c165541e
PR
3650 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3651
3652 for (i = 0; i < MAX_FLASH_COMP; i++) {
3653 if (fsec_g2)
3654 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3655 else
3656 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3657
3658 if (img_type == type)
3659 return true;
3660 }
3661 return false;
3662
3663}
3664
4188e7df 3665static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3666 int header_size,
3667 const struct firmware *fw)
c165541e
PR
3668{
3669 struct flash_section_info *fsec = NULL;
3670 const u8 *p = fw->data;
3671
3672 p += header_size;
3673 while (p < (fw->data + fw->size)) {
3674 fsec = (struct flash_section_info *)p;
3675 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3676 return fsec;
3677 p += 32;
3678 }
3679 return NULL;
3680}
3681
96c9b2e4
VV
3682static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3683 u32 img_offset, u32 img_size, int hdr_size,
3684 u16 img_optype, bool *crc_match)
3685{
3686 u32 crc_offset;
3687 int status;
3688 u8 crc[4];
3689
3690 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3691 if (status)
3692 return status;
3693
3694 crc_offset = hdr_size + img_offset + img_size - 4;
3695
3696 /* Skip flashing, if crc of flashed region matches */
3697 if (!memcmp(crc, p + crc_offset, 4))
3698 *crc_match = true;
3699 else
3700 *crc_match = false;
3701
3702 return status;
3703}
3704
773a2d7c 3705static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3706 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3707{
773a2d7c 3708 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3709 u32 total_bytes, flash_op, num_bytes;
3710 int status;
773a2d7c
PR
3711
3712 total_bytes = img_size;
3713 while (total_bytes) {
3714 num_bytes = min_t(u32, 32*1024, total_bytes);
3715
3716 total_bytes -= num_bytes;
3717
3718 if (!total_bytes) {
3719 if (optype == OPTYPE_PHY_FW)
3720 flash_op = FLASHROM_OPER_PHY_FLASH;
3721 else
3722 flash_op = FLASHROM_OPER_FLASH;
3723 } else {
3724 if (optype == OPTYPE_PHY_FW)
3725 flash_op = FLASHROM_OPER_PHY_SAVE;
3726 else
3727 flash_op = FLASHROM_OPER_SAVE;
3728 }
3729
be716446 3730 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3731 img += num_bytes;
3732 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3733 flash_op, num_bytes);
4c60005f 3734 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3735 optype == OPTYPE_PHY_FW)
3736 break;
3737 else if (status)
773a2d7c 3738 return status;
773a2d7c
PR
3739 }
3740 return 0;
3741}
3742
0ad3157e 3743/* For BE2, BE3 and BE3-R */
ca34fe38 3744static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3745 const struct firmware *fw,
3746 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3747{
c165541e 3748 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3749 struct device *dev = &adapter->pdev->dev;
c165541e 3750 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3751 int status, i, filehdr_size, num_comp;
3752 const struct flash_comp *pflashcomp;
3753 bool crc_match;
3754 const u8 *p;
c165541e
PR
3755
3756 struct flash_comp gen3_flash_types[] = {
3757 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3758 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3759 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3760 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3761 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3762 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3763 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3764 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3765 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3766 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3767 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3768 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3769 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3770 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3771 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3772 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3773 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3774 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3775 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3776 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3777 };
c165541e
PR
3778
3779 struct flash_comp gen2_flash_types[] = {
3780 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3781 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3782 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3783 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3784 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3785 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3786 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3787 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3788 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3789 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3790 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3791 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3792 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3793 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3794 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3795 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3796 };
3797
ca34fe38 3798 if (BE3_chip(adapter)) {
3f0d4560
AK
3799 pflashcomp = gen3_flash_types;
3800 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3801 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3802 } else {
3803 pflashcomp = gen2_flash_types;
3804 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3805 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3806 }
ca34fe38 3807
c165541e
PR
3808 /* Get flash section info*/
3809 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3810 if (!fsec) {
96c9b2e4 3811 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3812 return -1;
3813 }
9fe96934 3814 for (i = 0; i < num_comp; i++) {
c165541e 3815 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3816 continue;
c165541e
PR
3817
3818 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3819 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3820 continue;
3821
773a2d7c
PR
3822 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3823 !phy_flashing_required(adapter))
306f1348 3824 continue;
c165541e 3825
773a2d7c 3826 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3827 status = be_check_flash_crc(adapter, fw->data,
3828 pflashcomp[i].offset,
3829 pflashcomp[i].size,
3830 filehdr_size +
3831 img_hdrs_size,
3832 OPTYPE_REDBOOT, &crc_match);
3833 if (status) {
3834 dev_err(dev,
3835 "Could not get CRC for 0x%x region\n",
3836 pflashcomp[i].optype);
3837 continue;
3838 }
3839
3840 if (crc_match)
773a2d7c
PR
3841 continue;
3842 }
c165541e 3843
96c9b2e4
VV
3844 p = fw->data + filehdr_size + pflashcomp[i].offset +
3845 img_hdrs_size;
306f1348
SP
3846 if (p + pflashcomp[i].size > fw->data + fw->size)
3847 return -1;
773a2d7c
PR
3848
3849 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3850 pflashcomp[i].size);
773a2d7c 3851 if (status) {
96c9b2e4 3852 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3853 pflashcomp[i].img_type);
3854 return status;
84517482 3855 }
84517482 3856 }
84517482
AK
3857 return 0;
3858}
3859
96c9b2e4
VV
3860static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3861{
3862 u32 img_type = le32_to_cpu(fsec_entry.type);
3863 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3864
3865 if (img_optype != 0xFFFF)
3866 return img_optype;
3867
3868 switch (img_type) {
3869 case IMAGE_FIRMWARE_iSCSI:
3870 img_optype = OPTYPE_ISCSI_ACTIVE;
3871 break;
3872 case IMAGE_BOOT_CODE:
3873 img_optype = OPTYPE_REDBOOT;
3874 break;
3875 case IMAGE_OPTION_ROM_ISCSI:
3876 img_optype = OPTYPE_BIOS;
3877 break;
3878 case IMAGE_OPTION_ROM_PXE:
3879 img_optype = OPTYPE_PXE_BIOS;
3880 break;
3881 case IMAGE_OPTION_ROM_FCoE:
3882 img_optype = OPTYPE_FCOE_BIOS;
3883 break;
3884 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3885 img_optype = OPTYPE_ISCSI_BACKUP;
3886 break;
3887 case IMAGE_NCSI:
3888 img_optype = OPTYPE_NCSI_FW;
3889 break;
3890 case IMAGE_FLASHISM_JUMPVECTOR:
3891 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3892 break;
3893 case IMAGE_FIRMWARE_PHY:
3894 img_optype = OPTYPE_SH_PHY_FW;
3895 break;
3896 case IMAGE_REDBOOT_DIR:
3897 img_optype = OPTYPE_REDBOOT_DIR;
3898 break;
3899 case IMAGE_REDBOOT_CONFIG:
3900 img_optype = OPTYPE_REDBOOT_CONFIG;
3901 break;
3902 case IMAGE_UFI_DIR:
3903 img_optype = OPTYPE_UFI_DIR;
3904 break;
3905 default:
3906 break;
3907 }
3908
3909 return img_optype;
3910}
3911
773a2d7c 3912static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
3913 const struct firmware *fw,
3914 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3915{
773a2d7c 3916 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 3917 struct device *dev = &adapter->pdev->dev;
773a2d7c 3918 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3919 u32 img_offset, img_size, img_type;
3920 int status, i, filehdr_size;
3921 bool crc_match, old_fw_img;
3922 u16 img_optype;
3923 const u8 *p;
773a2d7c
PR
3924
3925 filehdr_size = sizeof(struct flash_file_hdr_g3);
3926 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3927 if (!fsec) {
96c9b2e4 3928 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
773a2d7c
PR
3929 return -1;
3930 }
3931
3932 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3933 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3934 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
3935 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3936 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3937 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 3938
96c9b2e4 3939 if (img_optype == 0xFFFF)
773a2d7c 3940 continue;
96c9b2e4
VV
3941 /* Don't bother verifying CRC if an old FW image is being
3942 * flashed
3943 */
3944 if (old_fw_img)
3945 goto flash;
3946
3947 status = be_check_flash_crc(adapter, fw->data, img_offset,
3948 img_size, filehdr_size +
3949 img_hdrs_size, img_optype,
3950 &crc_match);
3951 /* The current FW image on the card does not recognize the new
3952 * FLASH op_type. The FW download is partially complete.
3953 * Reboot the server now to enable FW image to recognize the
3954 * new FLASH op_type. To complete the remaining process,
3955 * download the same FW again after the reboot.
3956 */
4c60005f
KA
3957 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
3958 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
3959 dev_err(dev, "Flash incomplete. Reset the server\n");
3960 dev_err(dev, "Download FW image again after reset\n");
3961 return -EAGAIN;
3962 } else if (status) {
3963 dev_err(dev, "Could not get CRC for 0x%x region\n",
3964 img_optype);
3965 return -EFAULT;
773a2d7c
PR
3966 }
3967
96c9b2e4
VV
3968 if (crc_match)
3969 continue;
773a2d7c 3970
96c9b2e4
VV
3971flash:
3972 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
3973 if (p + img_size > fw->data + fw->size)
3974 return -1;
3975
3976 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
3977 /* For old FW images ignore ILLEGAL_FIELD error or errors on
3978 * UFI_DIR region
3979 */
4c60005f
KA
3980 if (old_fw_img &&
3981 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
3982 (img_optype == OPTYPE_UFI_DIR &&
3983 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
3984 continue;
3985 } else if (status) {
3986 dev_err(dev, "Flashing section type 0x%x failed\n",
3987 img_type);
3988 return -EFAULT;
773a2d7c
PR
3989 }
3990 }
3991 return 0;
3f0d4560
AK
3992}
3993
485bf569 3994static int lancer_fw_download(struct be_adapter *adapter,
748b539a 3995 const struct firmware *fw)
84517482 3996{
485bf569
SN
3997#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3998#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3999 struct be_dma_mem flash_cmd;
485bf569
SN
4000 const u8 *data_ptr = NULL;
4001 u8 *dest_image_ptr = NULL;
4002 size_t image_size = 0;
4003 u32 chunk_size = 0;
4004 u32 data_written = 0;
4005 u32 offset = 0;
4006 int status = 0;
4007 u8 add_status = 0;
f67ef7ba 4008 u8 change_status;
84517482 4009
485bf569 4010 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 4011 dev_err(&adapter->pdev->dev,
485bf569
SN
4012 "FW Image not properly aligned. "
4013 "Length must be 4 byte aligned.\n");
4014 status = -EINVAL;
4015 goto lancer_fw_exit;
d9efd2af
SB
4016 }
4017
485bf569
SN
4018 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4019 + LANCER_FW_DOWNLOAD_CHUNK;
4020 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 4021 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
4022 if (!flash_cmd.va) {
4023 status = -ENOMEM;
485bf569
SN
4024 goto lancer_fw_exit;
4025 }
84517482 4026
485bf569
SN
4027 dest_image_ptr = flash_cmd.va +
4028 sizeof(struct lancer_cmd_req_write_object);
4029 image_size = fw->size;
4030 data_ptr = fw->data;
4031
4032 while (image_size) {
4033 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4034
4035 /* Copy the image chunk content. */
4036 memcpy(dest_image_ptr, data_ptr, chunk_size);
4037
4038 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4039 chunk_size, offset,
4040 LANCER_FW_DOWNLOAD_LOCATION,
4041 &data_written, &change_status,
4042 &add_status);
485bf569
SN
4043 if (status)
4044 break;
4045
4046 offset += data_written;
4047 data_ptr += data_written;
4048 image_size -= data_written;
4049 }
4050
4051 if (!status) {
4052 /* Commit the FW written */
4053 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4054 0, offset,
4055 LANCER_FW_DOWNLOAD_LOCATION,
4056 &data_written, &change_status,
4057 &add_status);
485bf569
SN
4058 }
4059
4060 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
748b539a 4061 flash_cmd.dma);
485bf569
SN
4062 if (status) {
4063 dev_err(&adapter->pdev->dev,
4064 "Firmware load error. "
4065 "Status code: 0x%x Additional Status: 0x%x\n",
4066 status, add_status);
4067 goto lancer_fw_exit;
4068 }
4069
f67ef7ba 4070 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
4071 dev_info(&adapter->pdev->dev,
4072 "Resetting adapter to activate new FW\n");
5c510811
SK
4073 status = lancer_physdev_ctrl(adapter,
4074 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
4075 if (status) {
4076 dev_err(&adapter->pdev->dev,
4077 "Adapter busy for FW reset.\n"
4078 "New FW will not be active.\n");
4079 goto lancer_fw_exit;
4080 }
4081 } else if (change_status != LANCER_NO_RESET_NEEDED) {
748b539a
SP
4082 dev_err(&adapter->pdev->dev,
4083 "System reboot required for new FW to be active\n");
f67ef7ba
PR
4084 }
4085
485bf569
SN
4086 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4087lancer_fw_exit:
4088 return status;
4089}
4090
ca34fe38
SP
4091#define UFI_TYPE2 2
4092#define UFI_TYPE3 3
0ad3157e 4093#define UFI_TYPE3R 10
ca34fe38
SP
4094#define UFI_TYPE4 4
4095static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4096 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
4097{
4098 if (fhdr == NULL)
4099 goto be_get_ufi_exit;
4100
ca34fe38
SP
4101 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4102 return UFI_TYPE4;
0ad3157e
VV
4103 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4104 if (fhdr->asic_type_rev == 0x10)
4105 return UFI_TYPE3R;
4106 else
4107 return UFI_TYPE3;
4108 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4109 return UFI_TYPE2;
773a2d7c
PR
4110
4111be_get_ufi_exit:
4112 dev_err(&adapter->pdev->dev,
4113 "UFI and Interface are not compatible for flashing\n");
4114 return -1;
4115}
4116
485bf569
SN
4117static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4118{
485bf569
SN
4119 struct flash_file_hdr_g3 *fhdr3;
4120 struct image_hdr *img_hdr_ptr = NULL;
4121 struct be_dma_mem flash_cmd;
4122 const u8 *p;
773a2d7c 4123 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4124
be716446 4125 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4126 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4127 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4128 if (!flash_cmd.va) {
4129 status = -ENOMEM;
485bf569 4130 goto be_fw_exit;
84517482
AK
4131 }
4132
773a2d7c 4133 p = fw->data;
0ad3157e 4134 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4135
0ad3157e 4136 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4137
773a2d7c
PR
4138 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4139 for (i = 0; i < num_imgs; i++) {
4140 img_hdr_ptr = (struct image_hdr *)(fw->data +
4141 (sizeof(struct flash_file_hdr_g3) +
4142 i * sizeof(struct image_hdr)));
4143 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4144 switch (ufi_type) {
4145 case UFI_TYPE4:
773a2d7c 4146 status = be_flash_skyhawk(adapter, fw,
748b539a 4147 &flash_cmd, num_imgs);
0ad3157e
VV
4148 break;
4149 case UFI_TYPE3R:
ca34fe38
SP
4150 status = be_flash_BEx(adapter, fw, &flash_cmd,
4151 num_imgs);
0ad3157e
VV
4152 break;
4153 case UFI_TYPE3:
4154 /* Do not flash this ufi on BE3-R cards */
4155 if (adapter->asic_rev < 0x10)
4156 status = be_flash_BEx(adapter, fw,
4157 &flash_cmd,
4158 num_imgs);
4159 else {
4160 status = -1;
4161 dev_err(&adapter->pdev->dev,
4162 "Can't load BE3 UFI on BE3R\n");
4163 }
4164 }
3f0d4560 4165 }
773a2d7c
PR
4166 }
4167
ca34fe38
SP
4168 if (ufi_type == UFI_TYPE2)
4169 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4170 else if (ufi_type == -1)
3f0d4560 4171 status = -1;
84517482 4172
2b7bcebf
IV
4173 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4174 flash_cmd.dma);
84517482
AK
4175 if (status) {
4176 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4177 goto be_fw_exit;
84517482
AK
4178 }
4179
af901ca1 4180 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4181
485bf569
SN
4182be_fw_exit:
4183 return status;
4184}
4185
4186int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4187{
4188 const struct firmware *fw;
4189 int status;
4190
4191 if (!netif_running(adapter->netdev)) {
4192 dev_err(&adapter->pdev->dev,
4193 "Firmware load not allowed (interface is down)\n");
4194 return -1;
4195 }
4196
4197 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4198 if (status)
4199 goto fw_exit;
4200
4201 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4202
4203 if (lancer_chip(adapter))
4204 status = lancer_fw_download(adapter, fw);
4205 else
4206 status = be_fw_download(adapter, fw);
4207
eeb65ced
SK
4208 if (!status)
4209 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4210 adapter->fw_on_flash);
4211
84517482
AK
4212fw_exit:
4213 release_firmware(fw);
4214 return status;
4215}
4216
748b539a 4217static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4218{
4219 struct be_adapter *adapter = netdev_priv(dev);
4220 struct nlattr *attr, *br_spec;
4221 int rem;
4222 int status = 0;
4223 u16 mode = 0;
4224
4225 if (!sriov_enabled(adapter))
4226 return -EOPNOTSUPP;
4227
4228 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4229
4230 nla_for_each_nested(attr, br_spec, rem) {
4231 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4232 continue;
4233
4234 mode = nla_get_u16(attr);
4235 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4236 return -EINVAL;
4237
4238 status = be_cmd_set_hsw_config(adapter, 0, 0,
4239 adapter->if_handle,
4240 mode == BRIDGE_MODE_VEPA ?
4241 PORT_FWD_TYPE_VEPA :
4242 PORT_FWD_TYPE_VEB);
4243 if (status)
4244 goto err;
4245
4246 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4247 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4248
4249 return status;
4250 }
4251err:
4252 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4253 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4254
4255 return status;
4256}
4257
4258static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4259 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4260{
4261 struct be_adapter *adapter = netdev_priv(dev);
4262 int status = 0;
4263 u8 hsw_mode;
4264
4265 if (!sriov_enabled(adapter))
4266 return 0;
4267
4268 /* BE and Lancer chips support VEB mode only */
4269 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4270 hsw_mode = PORT_FWD_TYPE_VEB;
4271 } else {
4272 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4273 adapter->if_handle, &hsw_mode);
4274 if (status)
4275 return 0;
4276 }
4277
4278 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4279 hsw_mode == PORT_FWD_TYPE_VEPA ?
4280 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4281}
4282
c5abe7c0 4283#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4284static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4285 __be16 port)
4286{
4287 struct be_adapter *adapter = netdev_priv(netdev);
4288 struct device *dev = &adapter->pdev->dev;
4289 int status;
4290
4291 if (lancer_chip(adapter) || BEx_chip(adapter))
4292 return;
4293
4294 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4295 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4296 be16_to_cpu(port));
4297 dev_info(dev,
4298 "Only one UDP port supported for VxLAN offloads\n");
4299 return;
4300 }
4301
4302 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4303 OP_CONVERT_NORMAL_TO_TUNNEL);
4304 if (status) {
4305 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4306 goto err;
4307 }
4308
4309 status = be_cmd_set_vxlan_port(adapter, port);
4310 if (status) {
4311 dev_warn(dev, "Failed to add VxLAN port\n");
4312 goto err;
4313 }
4314 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4315 adapter->vxlan_port = port;
4316
4317 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4318 be16_to_cpu(port));
4319 return;
4320err:
4321 be_disable_vxlan_offloads(adapter);
4322 return;
4323}
4324
4325static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4326 __be16 port)
4327{
4328 struct be_adapter *adapter = netdev_priv(netdev);
4329
4330 if (lancer_chip(adapter) || BEx_chip(adapter))
4331 return;
4332
4333 if (adapter->vxlan_port != port)
4334 return;
4335
4336 be_disable_vxlan_offloads(adapter);
4337
4338 dev_info(&adapter->pdev->dev,
4339 "Disabled VxLAN offloads for UDP port %d\n",
4340 be16_to_cpu(port));
4341}
c5abe7c0 4342#endif
c9c47142 4343
e5686ad8 4344static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4345 .ndo_open = be_open,
4346 .ndo_stop = be_close,
4347 .ndo_start_xmit = be_xmit,
a54769f5 4348 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4349 .ndo_set_mac_address = be_mac_addr_set,
4350 .ndo_change_mtu = be_change_mtu,
ab1594e9 4351 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4352 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4353 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4354 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4355 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4356 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4357 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4358 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4359 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4360#ifdef CONFIG_NET_POLL_CONTROLLER
4361 .ndo_poll_controller = be_netpoll,
4362#endif
a77dcb8c
AK
4363 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4364 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4365#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4366 .ndo_busy_poll = be_busy_poll,
6384a4d0 4367#endif
c5abe7c0 4368#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4369 .ndo_add_vxlan_port = be_add_vxlan_port,
4370 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4371#endif
6b7c5b94
SP
4372};
4373
4374static void be_netdev_init(struct net_device *netdev)
4375{
4376 struct be_adapter *adapter = netdev_priv(netdev);
4377
c9c47142
SP
4378 if (skyhawk_chip(adapter)) {
4379 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4380 NETIF_F_TSO | NETIF_F_TSO6 |
4381 NETIF_F_GSO_UDP_TUNNEL;
4382 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4383 }
6332c8d3 4384 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4385 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4386 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4387 if (be_multi_rxq(adapter))
4388 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4389
4390 netdev->features |= netdev->hw_features |
f646968f 4391 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4392
eb8a50d9 4393 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4394 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4395
fbc13f01
AK
4396 netdev->priv_flags |= IFF_UNICAST_FLT;
4397
6b7c5b94
SP
4398 netdev->flags |= IFF_MULTICAST;
4399
b7e5887e 4400 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4401
10ef9ab4 4402 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4403
7ad24ea4 4404 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4405}
4406
4407static void be_unmap_pci_bars(struct be_adapter *adapter)
4408{
c5b3ad4c
SP
4409 if (adapter->csr)
4410 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4411 if (adapter->db)
ce66f781 4412 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4413}
4414
ce66f781
SP
4415static int db_bar(struct be_adapter *adapter)
4416{
4417 if (lancer_chip(adapter) || !be_physfn(adapter))
4418 return 0;
4419 else
4420 return 4;
4421}
4422
4423static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4424{
dbf0f2a7 4425 if (skyhawk_chip(adapter)) {
ce66f781
SP
4426 adapter->roce_db.size = 4096;
4427 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4428 db_bar(adapter));
4429 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4430 db_bar(adapter));
4431 }
045508a8 4432 return 0;
6b7c5b94
SP
4433}
4434
4435static int be_map_pci_bars(struct be_adapter *adapter)
4436{
4437 u8 __iomem *addr;
fe6d2a38 4438
c5b3ad4c
SP
4439 if (BEx_chip(adapter) && be_physfn(adapter)) {
4440 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4441 if (adapter->csr == NULL)
4442 return -ENOMEM;
4443 }
4444
ce66f781 4445 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4446 if (addr == NULL)
4447 goto pci_map_err;
ba343c77 4448 adapter->db = addr;
ce66f781
SP
4449
4450 be_roce_map_pci_bars(adapter);
6b7c5b94 4451 return 0;
ce66f781 4452
6b7c5b94
SP
4453pci_map_err:
4454 be_unmap_pci_bars(adapter);
4455 return -ENOMEM;
4456}
4457
6b7c5b94
SP
4458static void be_ctrl_cleanup(struct be_adapter *adapter)
4459{
8788fdc2 4460 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4461
4462 be_unmap_pci_bars(adapter);
4463
4464 if (mem->va)
2b7bcebf
IV
4465 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4466 mem->dma);
e7b909a6 4467
5b8821b7 4468 mem = &adapter->rx_filter;
e7b909a6 4469 if (mem->va)
2b7bcebf
IV
4470 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4471 mem->dma);
6b7c5b94
SP
4472}
4473
6b7c5b94
SP
4474static int be_ctrl_init(struct be_adapter *adapter)
4475{
8788fdc2
SP
4476 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4477 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4478 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4479 u32 sli_intf;
6b7c5b94 4480 int status;
6b7c5b94 4481
ce66f781
SP
4482 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4483 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4484 SLI_INTF_FAMILY_SHIFT;
4485 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4486
6b7c5b94
SP
4487 status = be_map_pci_bars(adapter);
4488 if (status)
e7b909a6 4489 goto done;
6b7c5b94
SP
4490
4491 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4492 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4493 mbox_mem_alloc->size,
4494 &mbox_mem_alloc->dma,
4495 GFP_KERNEL);
6b7c5b94 4496 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4497 status = -ENOMEM;
4498 goto unmap_pci_bars;
6b7c5b94
SP
4499 }
4500 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4501 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4502 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4503 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4504
5b8821b7 4505 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4506 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4507 rx_filter->size, &rx_filter->dma,
4508 GFP_KERNEL);
5b8821b7 4509 if (rx_filter->va == NULL) {
e7b909a6
SP
4510 status = -ENOMEM;
4511 goto free_mbox;
4512 }
1f9061d2 4513
2984961c 4514 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4515 spin_lock_init(&adapter->mcc_lock);
4516 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4517
5eeff635 4518 init_completion(&adapter->et_cmd_compl);
cf588477 4519 pci_save_state(adapter->pdev);
6b7c5b94 4520 return 0;
e7b909a6
SP
4521
4522free_mbox:
2b7bcebf
IV
4523 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4524 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4525
4526unmap_pci_bars:
4527 be_unmap_pci_bars(adapter);
4528
4529done:
4530 return status;
6b7c5b94
SP
4531}
4532
4533static void be_stats_cleanup(struct be_adapter *adapter)
4534{
3abcdeda 4535 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4536
4537 if (cmd->va)
2b7bcebf
IV
4538 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4539 cmd->va, cmd->dma);
6b7c5b94
SP
4540}
4541
4542static int be_stats_init(struct be_adapter *adapter)
4543{
3abcdeda 4544 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4545
ca34fe38
SP
4546 if (lancer_chip(adapter))
4547 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4548 else if (BE2_chip(adapter))
89a88ab8 4549 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4550 else if (BE3_chip(adapter))
ca34fe38 4551 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4552 else
4553 /* ALL non-BE ASICs */
4554 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4555
ede23fa8
JP
4556 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4557 GFP_KERNEL);
6b7c5b94
SP
4558 if (cmd->va == NULL)
4559 return -1;
4560 return 0;
4561}
4562
3bc6b06c 4563static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4564{
4565 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4566
6b7c5b94
SP
4567 if (!adapter)
4568 return;
4569
045508a8 4570 be_roce_dev_remove(adapter);
8cef7a78 4571 be_intr_set(adapter, false);
045508a8 4572
f67ef7ba
PR
4573 cancel_delayed_work_sync(&adapter->func_recovery_work);
4574
6b7c5b94
SP
4575 unregister_netdev(adapter->netdev);
4576
5fb379ee
SP
4577 be_clear(adapter);
4578
bf99e50d
PR
4579 /* tell fw we're done with firing cmds */
4580 be_cmd_fw_clean(adapter);
4581
6b7c5b94
SP
4582 be_stats_cleanup(adapter);
4583
4584 be_ctrl_cleanup(adapter);
4585
d6b6d987
SP
4586 pci_disable_pcie_error_reporting(pdev);
4587
6b7c5b94
SP
4588 pci_release_regions(pdev);
4589 pci_disable_device(pdev);
4590
4591 free_netdev(adapter->netdev);
4592}
4593
39f1d94d 4594static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4595{
baaa08d1 4596 int status, level;
6b7c5b94 4597
9e1453c5
AK
4598 status = be_cmd_get_cntl_attributes(adapter);
4599 if (status)
4600 return status;
4601
7aeb2156
PR
4602 /* Must be a power of 2 or else MODULO will BUG_ON */
4603 adapter->be_get_temp_freq = 64;
4604
baaa08d1
VV
4605 if (BEx_chip(adapter)) {
4606 level = be_cmd_get_fw_log_level(adapter);
4607 adapter->msg_enable =
4608 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4609 }
941a77d5 4610
92bf14ab 4611 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4612 return 0;
6b7c5b94
SP
4613}
4614
f67ef7ba 4615static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4616{
01e5b2c4 4617 struct device *dev = &adapter->pdev->dev;
d8110f62 4618 int status;
d8110f62 4619
f67ef7ba
PR
4620 status = lancer_test_and_set_rdy_state(adapter);
4621 if (status)
4622 goto err;
d8110f62 4623
f67ef7ba
PR
4624 if (netif_running(adapter->netdev))
4625 be_close(adapter->netdev);
d8110f62 4626
f67ef7ba
PR
4627 be_clear(adapter);
4628
01e5b2c4 4629 be_clear_all_error(adapter);
f67ef7ba
PR
4630
4631 status = be_setup(adapter);
4632 if (status)
4633 goto err;
d8110f62 4634
f67ef7ba
PR
4635 if (netif_running(adapter->netdev)) {
4636 status = be_open(adapter->netdev);
d8110f62
PR
4637 if (status)
4638 goto err;
f67ef7ba 4639 }
d8110f62 4640
4bebb56a 4641 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4642 return 0;
4643err:
01e5b2c4
SK
4644 if (status == -EAGAIN)
4645 dev_err(dev, "Waiting for resource provisioning\n");
4646 else
4bebb56a 4647 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4648
f67ef7ba
PR
4649 return status;
4650}
4651
4652static void be_func_recovery_task(struct work_struct *work)
4653{
4654 struct be_adapter *adapter =
4655 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4656 int status = 0;
d8110f62 4657
f67ef7ba 4658 be_detect_error(adapter);
d8110f62 4659
f67ef7ba 4660 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4661
f67ef7ba
PR
4662 rtnl_lock();
4663 netif_device_detach(adapter->netdev);
4664 rtnl_unlock();
d8110f62 4665
f67ef7ba 4666 status = lancer_recover_func(adapter);
f67ef7ba
PR
4667 if (!status)
4668 netif_device_attach(adapter->netdev);
d8110f62 4669 }
f67ef7ba 4670
01e5b2c4
SK
4671 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4672 * no need to attempt further recovery.
4673 */
4674 if (!status || status == -EAGAIN)
4675 schedule_delayed_work(&adapter->func_recovery_work,
4676 msecs_to_jiffies(1000));
d8110f62
PR
4677}
4678
4679static void be_worker(struct work_struct *work)
4680{
4681 struct be_adapter *adapter =
4682 container_of(work, struct be_adapter, work.work);
4683 struct be_rx_obj *rxo;
4684 int i;
4685
d8110f62
PR
4686 /* when interrupts are not yet enabled, just reap any pending
4687 * mcc completions */
4688 if (!netif_running(adapter->netdev)) {
072a9c48 4689 local_bh_disable();
10ef9ab4 4690 be_process_mcc(adapter);
072a9c48 4691 local_bh_enable();
d8110f62
PR
4692 goto reschedule;
4693 }
4694
4695 if (!adapter->stats_cmd_sent) {
4696 if (lancer_chip(adapter))
4697 lancer_cmd_get_pport_stats(adapter,
4698 &adapter->stats_cmd);
4699 else
4700 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4701 }
4702
d696b5e2
VV
4703 if (be_physfn(adapter) &&
4704 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4705 be_cmd_get_die_temperature(adapter);
4706
d8110f62 4707 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4708 /* Replenish RX-queues starved due to memory
4709 * allocation failures.
4710 */
4711 if (rxo->rx_post_starved)
d8110f62 4712 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4713 }
4714
2632bafd 4715 be_eqd_update(adapter);
10ef9ab4 4716
d8110f62
PR
4717reschedule:
4718 adapter->work_counter++;
4719 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4720}
4721
257a3feb 4722/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4723static bool be_reset_required(struct be_adapter *adapter)
4724{
257a3feb 4725 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4726}
4727
d379142b
SP
4728static char *mc_name(struct be_adapter *adapter)
4729{
f93f160b
VV
4730 char *str = ""; /* default */
4731
4732 switch (adapter->mc_type) {
4733 case UMC:
4734 str = "UMC";
4735 break;
4736 case FLEX10:
4737 str = "FLEX10";
4738 break;
4739 case vNIC1:
4740 str = "vNIC-1";
4741 break;
4742 case nPAR:
4743 str = "nPAR";
4744 break;
4745 case UFP:
4746 str = "UFP";
4747 break;
4748 case vNIC2:
4749 str = "vNIC-2";
4750 break;
4751 default:
4752 str = "";
4753 }
4754
4755 return str;
d379142b
SP
4756}
4757
4758static inline char *func_name(struct be_adapter *adapter)
4759{
4760 return be_physfn(adapter) ? "PF" : "VF";
4761}
4762
1dd06ae8 4763static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4764{
4765 int status = 0;
4766 struct be_adapter *adapter;
4767 struct net_device *netdev;
b4e32a71 4768 char port_name;
6b7c5b94
SP
4769
4770 status = pci_enable_device(pdev);
4771 if (status)
4772 goto do_none;
4773
4774 status = pci_request_regions(pdev, DRV_NAME);
4775 if (status)
4776 goto disable_dev;
4777 pci_set_master(pdev);
4778
7f640062 4779 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4780 if (netdev == NULL) {
4781 status = -ENOMEM;
4782 goto rel_reg;
4783 }
4784 adapter = netdev_priv(netdev);
4785 adapter->pdev = pdev;
4786 pci_set_drvdata(pdev, adapter);
4787 adapter->netdev = netdev;
2243e2e9 4788 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4789
4c15c243 4790 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4791 if (!status) {
4792 netdev->features |= NETIF_F_HIGHDMA;
4793 } else {
4c15c243 4794 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4795 if (status) {
4796 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4797 goto free_netdev;
4798 }
4799 }
4800
ea58c180
AK
4801 if (be_physfn(adapter)) {
4802 status = pci_enable_pcie_error_reporting(pdev);
4803 if (!status)
4804 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4805 }
d6b6d987 4806
6b7c5b94
SP
4807 status = be_ctrl_init(adapter);
4808 if (status)
39f1d94d 4809 goto free_netdev;
6b7c5b94 4810
2243e2e9 4811 /* sync up with fw's ready state */
ba343c77 4812 if (be_physfn(adapter)) {
bf99e50d 4813 status = be_fw_wait_ready(adapter);
ba343c77
SB
4814 if (status)
4815 goto ctrl_clean;
ba343c77 4816 }
6b7c5b94 4817
39f1d94d
SP
4818 if (be_reset_required(adapter)) {
4819 status = be_cmd_reset_function(adapter);
4820 if (status)
4821 goto ctrl_clean;
556ae191 4822
2d177be8
KA
4823 /* Wait for interrupts to quiesce after an FLR */
4824 msleep(100);
4825 }
8cef7a78
SK
4826
4827 /* Allow interrupts for other ULPs running on NIC function */
4828 be_intr_set(adapter, true);
10ef9ab4 4829
2d177be8
KA
4830 /* tell fw we're ready to fire cmds */
4831 status = be_cmd_fw_init(adapter);
4832 if (status)
4833 goto ctrl_clean;
4834
2243e2e9
SP
4835 status = be_stats_init(adapter);
4836 if (status)
4837 goto ctrl_clean;
4838
39f1d94d 4839 status = be_get_initial_config(adapter);
6b7c5b94
SP
4840 if (status)
4841 goto stats_clean;
6b7c5b94
SP
4842
4843 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4844 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4845 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4846
5fb379ee
SP
4847 status = be_setup(adapter);
4848 if (status)
55f5c3c5 4849 goto stats_clean;
2243e2e9 4850
3abcdeda 4851 be_netdev_init(netdev);
6b7c5b94
SP
4852 status = register_netdev(netdev);
4853 if (status != 0)
5fb379ee 4854 goto unsetup;
6b7c5b94 4855
045508a8
PP
4856 be_roce_dev_add(adapter);
4857
f67ef7ba
PR
4858 schedule_delayed_work(&adapter->func_recovery_work,
4859 msecs_to_jiffies(1000));
b4e32a71
PR
4860
4861 be_cmd_query_port_name(adapter, &port_name);
4862
d379142b
SP
4863 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4864 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4865
6b7c5b94
SP
4866 return 0;
4867
5fb379ee
SP
4868unsetup:
4869 be_clear(adapter);
6b7c5b94
SP
4870stats_clean:
4871 be_stats_cleanup(adapter);
4872ctrl_clean:
4873 be_ctrl_cleanup(adapter);
f9449ab7 4874free_netdev:
fe6d2a38 4875 free_netdev(netdev);
6b7c5b94
SP
4876rel_reg:
4877 pci_release_regions(pdev);
4878disable_dev:
4879 pci_disable_device(pdev);
4880do_none:
c4ca2374 4881 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4882 return status;
4883}
4884
4885static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4886{
4887 struct be_adapter *adapter = pci_get_drvdata(pdev);
4888 struct net_device *netdev = adapter->netdev;
4889
76a9e08e 4890 if (adapter->wol_en)
71d8d1b5
AK
4891 be_setup_wol(adapter, true);
4892
d4360d6f 4893 be_intr_set(adapter, false);
f67ef7ba
PR
4894 cancel_delayed_work_sync(&adapter->func_recovery_work);
4895
6b7c5b94
SP
4896 netif_device_detach(netdev);
4897 if (netif_running(netdev)) {
4898 rtnl_lock();
4899 be_close(netdev);
4900 rtnl_unlock();
4901 }
9b0365f1 4902 be_clear(adapter);
6b7c5b94
SP
4903
4904 pci_save_state(pdev);
4905 pci_disable_device(pdev);
4906 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4907 return 0;
4908}
4909
4910static int be_resume(struct pci_dev *pdev)
4911{
4912 int status = 0;
4913 struct be_adapter *adapter = pci_get_drvdata(pdev);
4914 struct net_device *netdev = adapter->netdev;
4915
4916 netif_device_detach(netdev);
4917
4918 status = pci_enable_device(pdev);
4919 if (status)
4920 return status;
4921
1ca01512 4922 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4923 pci_restore_state(pdev);
4924
dd5746bf
SB
4925 status = be_fw_wait_ready(adapter);
4926 if (status)
4927 return status;
4928
d4360d6f 4929 be_intr_set(adapter, true);
2243e2e9
SP
4930 /* tell fw we're ready to fire cmds */
4931 status = be_cmd_fw_init(adapter);
4932 if (status)
4933 return status;
4934
9b0365f1 4935 be_setup(adapter);
6b7c5b94
SP
4936 if (netif_running(netdev)) {
4937 rtnl_lock();
4938 be_open(netdev);
4939 rtnl_unlock();
4940 }
f67ef7ba
PR
4941
4942 schedule_delayed_work(&adapter->func_recovery_work,
4943 msecs_to_jiffies(1000));
6b7c5b94 4944 netif_device_attach(netdev);
71d8d1b5 4945
76a9e08e 4946 if (adapter->wol_en)
71d8d1b5 4947 be_setup_wol(adapter, false);
a4ca055f 4948
6b7c5b94
SP
4949 return 0;
4950}
4951
82456b03
SP
4952/*
4953 * An FLR will stop BE from DMAing any data.
4954 */
4955static void be_shutdown(struct pci_dev *pdev)
4956{
4957 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4958
2d5d4154
AK
4959 if (!adapter)
4960 return;
82456b03 4961
0f4a6828 4962 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4963 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4964
2d5d4154 4965 netif_device_detach(adapter->netdev);
82456b03 4966
57841869
AK
4967 be_cmd_reset_function(adapter);
4968
82456b03 4969 pci_disable_device(pdev);
82456b03
SP
4970}
4971
cf588477 4972static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 4973 pci_channel_state_t state)
cf588477
SP
4974{
4975 struct be_adapter *adapter = pci_get_drvdata(pdev);
4976 struct net_device *netdev = adapter->netdev;
4977
4978 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4979
01e5b2c4
SK
4980 if (!adapter->eeh_error) {
4981 adapter->eeh_error = true;
cf588477 4982
01e5b2c4 4983 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4984
cf588477 4985 rtnl_lock();
01e5b2c4
SK
4986 netif_device_detach(netdev);
4987 if (netif_running(netdev))
4988 be_close(netdev);
cf588477 4989 rtnl_unlock();
01e5b2c4
SK
4990
4991 be_clear(adapter);
cf588477 4992 }
cf588477
SP
4993
4994 if (state == pci_channel_io_perm_failure)
4995 return PCI_ERS_RESULT_DISCONNECT;
4996
4997 pci_disable_device(pdev);
4998
eeb7fc7b
SK
4999 /* The error could cause the FW to trigger a flash debug dump.
5000 * Resetting the card while flash dump is in progress
c8a54163
PR
5001 * can cause it not to recover; wait for it to finish.
5002 * Wait only for first function as it is needed only once per
5003 * adapter.
eeb7fc7b 5004 */
c8a54163
PR
5005 if (pdev->devfn == 0)
5006 ssleep(30);
5007
cf588477
SP
5008 return PCI_ERS_RESULT_NEED_RESET;
5009}
5010
5011static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5012{
5013 struct be_adapter *adapter = pci_get_drvdata(pdev);
5014 int status;
5015
5016 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5017
5018 status = pci_enable_device(pdev);
5019 if (status)
5020 return PCI_ERS_RESULT_DISCONNECT;
5021
5022 pci_set_master(pdev);
1ca01512 5023 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5024 pci_restore_state(pdev);
5025
5026 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5027 dev_info(&adapter->pdev->dev,
5028 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5029 status = be_fw_wait_ready(adapter);
cf588477
SP
5030 if (status)
5031 return PCI_ERS_RESULT_DISCONNECT;
5032
d6b6d987 5033 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5034 be_clear_all_error(adapter);
cf588477
SP
5035 return PCI_ERS_RESULT_RECOVERED;
5036}
5037
5038static void be_eeh_resume(struct pci_dev *pdev)
5039{
5040 int status = 0;
5041 struct be_adapter *adapter = pci_get_drvdata(pdev);
5042 struct net_device *netdev = adapter->netdev;
5043
5044 dev_info(&adapter->pdev->dev, "EEH resume\n");
5045
5046 pci_save_state(pdev);
5047
2d177be8 5048 status = be_cmd_reset_function(adapter);
cf588477
SP
5049 if (status)
5050 goto err;
5051
03a58baa
KA
5052 /* On some BE3 FW versions, after a HW reset,
5053 * interrupts will remain disabled for each function.
5054 * So, explicitly enable interrupts
5055 */
5056 be_intr_set(adapter, true);
5057
2d177be8
KA
5058 /* tell fw we're ready to fire cmds */
5059 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5060 if (status)
5061 goto err;
5062
cf588477
SP
5063 status = be_setup(adapter);
5064 if (status)
5065 goto err;
5066
5067 if (netif_running(netdev)) {
5068 status = be_open(netdev);
5069 if (status)
5070 goto err;
5071 }
f67ef7ba
PR
5072
5073 schedule_delayed_work(&adapter->func_recovery_work,
5074 msecs_to_jiffies(1000));
cf588477
SP
5075 netif_device_attach(netdev);
5076 return;
5077err:
5078 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5079}
5080
3646f0e5 5081static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5082 .error_detected = be_eeh_err_detected,
5083 .slot_reset = be_eeh_reset,
5084 .resume = be_eeh_resume,
5085};
5086
6b7c5b94
SP
5087static struct pci_driver be_driver = {
5088 .name = DRV_NAME,
5089 .id_table = be_dev_ids,
5090 .probe = be_probe,
5091 .remove = be_remove,
5092 .suspend = be_suspend,
cf588477 5093 .resume = be_resume,
82456b03 5094 .shutdown = be_shutdown,
cf588477 5095 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5096};
5097
5098static int __init be_init_module(void)
5099{
8e95a202
JP
5100 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5101 rx_frag_size != 2048) {
6b7c5b94
SP
5102 printk(KERN_WARNING DRV_NAME
5103 " : Module param rx_frag_size must be 2048/4096/8192."
5104 " Using 2048\n");
5105 rx_frag_size = 2048;
5106 }
6b7c5b94
SP
5107
5108 return pci_register_driver(&be_driver);
5109}
5110module_init(be_init_module);
5111
5112static void __exit be_exit_module(void)
5113{
5114 pci_unregister_driver(&be_driver);
5115}
5116module_exit(be_exit_module);
This page took 1.086251 seconds and 5 git commands to generate.