be2net: receive pkts with L3, L4 errors on VFs
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
6b7c5b94
SP
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
196
197 wmb();
8788fdc2 198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
199}
200
94d73aaa
VV
201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
6b7c5b94
SP
203{
204 u32 val = 0;
03d28ffe 205
94d73aaa 206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
208
209 wmb();
94d73aaa 210 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
211}
212
8788fdc2 213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
214 bool arm, bool clear_int, u16 num_popped,
215 u32 eq_delay_mult_enc)
6b7c5b94
SP
216{
217 u32 val = 0;
03d28ffe 218
6b7c5b94 219 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 220 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 221
f67ef7ba 222 if (adapter->eeh_error)
cf588477
SP
223 return;
224
6b7c5b94
SP
225 if (arm)
226 val |= 1 << DB_EQ_REARM_SHIFT;
227 if (clear_int)
228 val |= 1 << DB_EQ_CLR_SHIFT;
229 val |= 1 << DB_EQ_EVNT_SHIFT;
230 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 231 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 232 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
233}
234
8788fdc2 235void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
236{
237 u32 val = 0;
03d28ffe 238
6b7c5b94 239 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
240 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
241 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 242
f67ef7ba 243 if (adapter->eeh_error)
cf588477
SP
244 return;
245
6b7c5b94
SP
246 if (arm)
247 val |= 1 << DB_CQ_REARM_SHIFT;
248 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 249 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
250}
251
6b7c5b94
SP
252static int be_mac_addr_set(struct net_device *netdev, void *p)
253{
254 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 255 struct device *dev = &adapter->pdev->dev;
6b7c5b94 256 struct sockaddr *addr = p;
5a712c13
SP
257 int status;
258 u8 mac[ETH_ALEN];
259 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 260
ca9e4988
AK
261 if (!is_valid_ether_addr(addr->sa_data))
262 return -EADDRNOTAVAIL;
263
ff32f8ab
VV
264 /* Proceed further only if, User provided MAC is different
265 * from active MAC
266 */
267 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
268 return 0;
269
5a712c13
SP
270 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
271 * privilege or if PF did not provision the new MAC address.
272 * On BE3, this cmd will always fail if the VF doesn't have the
273 * FILTMGMT privilege. This failure is OK, only if the PF programmed
274 * the MAC for the VF.
704e4c88 275 */
5a712c13
SP
276 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
277 adapter->if_handle, &adapter->pmac_id[0], 0);
278 if (!status) {
279 curr_pmac_id = adapter->pmac_id[0];
280
281 /* Delete the old programmed MAC. This call may fail if the
282 * old MAC was already deleted by the PF driver.
283 */
284 if (adapter->pmac_id[0] != old_pmac_id)
285 be_cmd_pmac_del(adapter, adapter->if_handle,
286 old_pmac_id, 0);
704e4c88
PR
287 }
288
5a712c13
SP
289 /* Decide if the new MAC is successfully activated only after
290 * querying the FW
704e4c88 291 */
b188f090
SR
292 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
293 adapter->if_handle, true, 0);
a65027e4 294 if (status)
e3a7ae2c 295 goto err;
6b7c5b94 296
5a712c13
SP
297 /* The MAC change did not happen, either due to lack of privilege
298 * or PF didn't pre-provision.
299 */
61d23e9f 300 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
301 status = -EPERM;
302 goto err;
303 }
304
e3a7ae2c 305 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 306 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
307 return 0;
308err:
5a712c13 309 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
310 return status;
311}
312
ca34fe38
SP
313/* BE2 supports only v0 cmd */
314static void *hw_stats_from_cmd(struct be_adapter *adapter)
315{
316 if (BE2_chip(adapter)) {
317 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
61000861 320 } else if (BE3_chip(adapter)) {
ca34fe38
SP
321 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
322
61000861
AK
323 return &cmd->hw_stats;
324 } else {
325 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
326
ca34fe38
SP
327 return &cmd->hw_stats;
328 }
329}
330
331/* BE2 supports only v0 cmd */
332static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
333{
334 if (BE2_chip(adapter)) {
335 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
61000861 338 } else if (BE3_chip(adapter)) {
ca34fe38
SP
339 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
340
61000861
AK
341 return &hw_stats->erx;
342 } else {
343 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
344
ca34fe38
SP
345 return &hw_stats->erx;
346 }
347}
348
349static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 350{
ac124ff9
SP
351 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
352 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
353 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 354 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
355 &rxf_stats->port[adapter->port_num];
356 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 357
ac124ff9 358 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
359 drvs->rx_pause_frames = port_stats->rx_pause_frames;
360 drvs->rx_crc_errors = port_stats->rx_crc_errors;
361 drvs->rx_control_frames = port_stats->rx_control_frames;
362 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
363 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
364 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
365 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
366 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
367 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
368 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
369 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
370 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
371 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
372 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 373 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
374 drvs->rx_dropped_header_too_small =
375 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
376 drvs->rx_address_filtered =
377 port_stats->rx_address_filtered +
378 port_stats->rx_vlan_filtered;
89a88ab8
AK
379 drvs->rx_alignment_symbol_errors =
380 port_stats->rx_alignment_symbol_errors;
381
382 drvs->tx_pauseframes = port_stats->tx_pauseframes;
383 drvs->tx_controlframes = port_stats->tx_controlframes;
384
385 if (adapter->port_num)
ac124ff9 386 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 387 else
ac124ff9 388 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 389 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 390 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
391 drvs->forwarded_packets = rxf_stats->forwarded_packets;
392 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
393 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
394 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
395 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
396}
397
ca34fe38 398static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 399{
ac124ff9
SP
400 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
401 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
402 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 403 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
404 &rxf_stats->port[adapter->port_num];
405 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 406
ac124ff9 407 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
408 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
409 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
410 drvs->rx_pause_frames = port_stats->rx_pause_frames;
411 drvs->rx_crc_errors = port_stats->rx_crc_errors;
412 drvs->rx_control_frames = port_stats->rx_control_frames;
413 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
414 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
415 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
416 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
417 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
418 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
419 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
420 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
421 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
422 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
423 drvs->rx_dropped_header_too_small =
424 port_stats->rx_dropped_header_too_small;
425 drvs->rx_input_fifo_overflow_drop =
426 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 427 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
428 drvs->rx_alignment_symbol_errors =
429 port_stats->rx_alignment_symbol_errors;
ac124ff9 430 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
431 drvs->tx_pauseframes = port_stats->tx_pauseframes;
432 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 433 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
434 drvs->jabber_events = port_stats->jabber_events;
435 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 436 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
437 drvs->forwarded_packets = rxf_stats->forwarded_packets;
438 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
439 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
440 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
441 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
442}
443
61000861
AK
444static void populate_be_v2_stats(struct be_adapter *adapter)
445{
446 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
447 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
448 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
449 struct be_port_rxf_stats_v2 *port_stats =
450 &rxf_stats->port[adapter->port_num];
451 struct be_drv_stats *drvs = &adapter->drv_stats;
452
453 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
454 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
455 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
456 drvs->rx_pause_frames = port_stats->rx_pause_frames;
457 drvs->rx_crc_errors = port_stats->rx_crc_errors;
458 drvs->rx_control_frames = port_stats->rx_control_frames;
459 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
460 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
461 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
462 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
463 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
464 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
465 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
466 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
467 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
468 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
469 drvs->rx_dropped_header_too_small =
470 port_stats->rx_dropped_header_too_small;
471 drvs->rx_input_fifo_overflow_drop =
472 port_stats->rx_input_fifo_overflow_drop;
473 drvs->rx_address_filtered = port_stats->rx_address_filtered;
474 drvs->rx_alignment_symbol_errors =
475 port_stats->rx_alignment_symbol_errors;
476 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
477 drvs->tx_pauseframes = port_stats->tx_pauseframes;
478 drvs->tx_controlframes = port_stats->tx_controlframes;
479 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
480 drvs->jabber_events = port_stats->jabber_events;
481 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
482 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
483 drvs->forwarded_packets = rxf_stats->forwarded_packets;
484 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
485 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
486 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
487 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 488 if (be_roce_supported(adapter)) {
461ae379
AK
489 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
490 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
491 drvs->rx_roce_frames = port_stats->roce_frames_received;
492 drvs->roce_drops_crc = port_stats->roce_drops_crc;
493 drvs->roce_drops_payload_len =
494 port_stats->roce_drops_payload_len;
495 }
61000861
AK
496}
497
005d5696
SX
498static void populate_lancer_stats(struct be_adapter *adapter)
499{
005d5696 500 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 501 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
502
503 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
504 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
505 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
506 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 507 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 508 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
509 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
510 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
511 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
512 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
513 drvs->rx_dropped_tcp_length =
514 pport_stats->rx_dropped_invalid_tcp_length;
515 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
516 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
517 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
518 drvs->rx_dropped_header_too_small =
519 pport_stats->rx_dropped_header_too_small;
520 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
521 drvs->rx_address_filtered =
522 pport_stats->rx_address_filtered +
523 pport_stats->rx_vlan_filtered;
ac124ff9 524 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 525 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
526 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
527 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 528 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
529 drvs->forwarded_packets = pport_stats->num_forwards_lo;
530 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 531 drvs->rx_drops_too_many_frags =
ac124ff9 532 pport_stats->rx_drops_too_many_frags_lo;
005d5696 533}
89a88ab8 534
09c1c68f
SP
535static void accumulate_16bit_val(u32 *acc, u16 val)
536{
537#define lo(x) (x & 0xFFFF)
538#define hi(x) (x & 0xFFFF0000)
539 bool wrapped = val < lo(*acc);
540 u32 newacc = hi(*acc) + val;
541
542 if (wrapped)
543 newacc += 65536;
544 ACCESS_ONCE(*acc) = newacc;
545}
546
4188e7df 547static void populate_erx_stats(struct be_adapter *adapter,
748b539a 548 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
549{
550 if (!BEx_chip(adapter))
551 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
552 else
553 /* below erx HW counter can actually wrap around after
554 * 65535. Driver accumulates a 32-bit value
555 */
556 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
557 (u16)erx_stat);
558}
559
89a88ab8
AK
560void be_parse_stats(struct be_adapter *adapter)
561{
61000861 562 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
563 struct be_rx_obj *rxo;
564 int i;
a6c578ef 565 u32 erx_stat;
ac124ff9 566
ca34fe38
SP
567 if (lancer_chip(adapter)) {
568 populate_lancer_stats(adapter);
005d5696 569 } else {
ca34fe38
SP
570 if (BE2_chip(adapter))
571 populate_be_v0_stats(adapter);
61000861
AK
572 else if (BE3_chip(adapter))
573 /* for BE3 */
ca34fe38 574 populate_be_v1_stats(adapter);
61000861
AK
575 else
576 populate_be_v2_stats(adapter);
d51ebd33 577
61000861 578 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 579 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
580 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
581 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 582 }
09c1c68f 583 }
89a88ab8
AK
584}
585
ab1594e9 586static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 587 struct rtnl_link_stats64 *stats)
6b7c5b94 588{
ab1594e9 589 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 590 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 591 struct be_rx_obj *rxo;
3c8def97 592 struct be_tx_obj *txo;
ab1594e9
SP
593 u64 pkts, bytes;
594 unsigned int start;
3abcdeda 595 int i;
6b7c5b94 596
3abcdeda 597 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 598 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 599
ab1594e9 600 do {
57a7744e 601 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
602 pkts = rx_stats(rxo)->rx_pkts;
603 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 604 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
605 stats->rx_packets += pkts;
606 stats->rx_bytes += bytes;
607 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
608 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
609 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
610 }
611
3c8def97 612 for_all_tx_queues(adapter, txo, i) {
ab1594e9 613 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 614
ab1594e9 615 do {
57a7744e 616 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
617 pkts = tx_stats(txo)->tx_pkts;
618 bytes = tx_stats(txo)->tx_bytes;
57a7744e 619 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
620 stats->tx_packets += pkts;
621 stats->tx_bytes += bytes;
3c8def97 622 }
6b7c5b94
SP
623
624 /* bad pkts received */
ab1594e9 625 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
626 drvs->rx_alignment_symbol_errors +
627 drvs->rx_in_range_errors +
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long +
630 drvs->rx_dropped_too_small +
631 drvs->rx_dropped_too_short +
632 drvs->rx_dropped_header_too_small +
633 drvs->rx_dropped_tcp_length +
ab1594e9 634 drvs->rx_dropped_runt;
68110868 635
6b7c5b94 636 /* detailed rx errors */
ab1594e9 637 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long;
68110868 640
ab1594e9 641 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
642
643 /* frame alignment errors */
ab1594e9 644 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 645
6b7c5b94
SP
646 /* receiver fifo overrun */
647 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 648 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
649 drvs->rx_input_fifo_overflow_drop +
650 drvs->rx_drops_no_pbuf;
ab1594e9 651 return stats;
6b7c5b94
SP
652}
653
b236916a 654void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 655{
6b7c5b94
SP
656 struct net_device *netdev = adapter->netdev;
657
b236916a 658 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 659 netif_carrier_off(netdev);
b236916a 660 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 661 }
b236916a 662
bdce2ad7 663 if (link_status)
b236916a
AK
664 netif_carrier_on(netdev);
665 else
666 netif_carrier_off(netdev);
18824894
IV
667
668 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
669}
670
5f07b3c5 671static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 672{
3c8def97
SP
673 struct be_tx_stats *stats = tx_stats(txo);
674
ab1594e9 675 u64_stats_update_begin(&stats->sync);
ac124ff9 676 stats->tx_reqs++;
5f07b3c5
SP
677 stats->tx_bytes += skb->len;
678 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 679 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
680}
681
5f07b3c5
SP
682/* Returns number of WRBs needed for the skb */
683static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 684{
5f07b3c5
SP
685 /* +1 for the header wrb */
686 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
687}
688
689static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
690{
f986afcb
SP
691 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
692 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
693 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
694 wrb->rsvd0 = 0;
695}
696
697/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
698 * to avoid the swap and shift/mask operations in wrb_fill().
699 */
700static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
701{
702 wrb->frag_pa_hi = 0;
703 wrb->frag_pa_lo = 0;
704 wrb->frag_len = 0;
89b1f496 705 wrb->rsvd0 = 0;
6b7c5b94
SP
706}
707
1ded132d 708static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 709 struct sk_buff *skb)
1ded132d
AK
710{
711 u8 vlan_prio;
712 u16 vlan_tag;
713
df8a39de 714 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
715 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
716 /* If vlan priority provided by OS is NOT in available bmap */
717 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
718 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
719 adapter->recommended_prio;
720
721 return vlan_tag;
722}
723
c9c47142
SP
724/* Used only for IP tunnel packets */
725static u16 skb_inner_ip_proto(struct sk_buff *skb)
726{
727 return (inner_ip_hdr(skb)->version == 4) ?
728 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
729}
730
731static u16 skb_ip_proto(struct sk_buff *skb)
732{
733 return (ip_hdr(skb)->version == 4) ?
734 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
735}
736
cf5671e6
SB
737static inline bool be_is_txq_full(struct be_tx_obj *txo)
738{
739 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
740}
741
742static inline bool be_can_txq_wake(struct be_tx_obj *txo)
743{
744 return atomic_read(&txo->q.used) < txo->q.len / 2;
745}
746
747static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
748{
749 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
750}
751
804abcdb
SB
752static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
753 struct sk_buff *skb,
754 struct be_wrb_params *wrb_params)
6b7c5b94 755{
804abcdb 756 u16 proto;
6b7c5b94 757
49e4b847 758 if (skb_is_gso(skb)) {
804abcdb
SB
759 BE_WRB_F_SET(wrb_params->features, LSO, 1);
760 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 761 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 762 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 763 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 764 if (skb->encapsulation) {
804abcdb 765 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
766 proto = skb_inner_ip_proto(skb);
767 } else {
768 proto = skb_ip_proto(skb);
769 }
770 if (proto == IPPROTO_TCP)
804abcdb 771 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 772 else if (proto == IPPROTO_UDP)
804abcdb 773 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
774 }
775
df8a39de 776 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
777 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
778 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
779 }
780
804abcdb
SB
781 BE_WRB_F_SET(wrb_params->features, CRC, 1);
782}
5f07b3c5 783
804abcdb
SB
784static void wrb_fill_hdr(struct be_adapter *adapter,
785 struct be_eth_hdr_wrb *hdr,
786 struct be_wrb_params *wrb_params,
787 struct sk_buff *skb)
788{
789 memset(hdr, 0, sizeof(*hdr));
790
791 SET_TX_WRB_HDR_BITS(crc, hdr,
792 BE_WRB_F_GET(wrb_params->features, CRC));
793 SET_TX_WRB_HDR_BITS(ipcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, IPCS));
795 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
796 BE_WRB_F_GET(wrb_params->features, TCPCS));
797 SET_TX_WRB_HDR_BITS(udpcs, hdr,
798 BE_WRB_F_GET(wrb_params->features, UDPCS));
799
800 SET_TX_WRB_HDR_BITS(lso, hdr,
801 BE_WRB_F_GET(wrb_params->features, LSO));
802 SET_TX_WRB_HDR_BITS(lso6, hdr,
803 BE_WRB_F_GET(wrb_params->features, LSO6));
804 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
805
806 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
807 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 808 */
804abcdb
SB
809 SET_TX_WRB_HDR_BITS(event, hdr,
810 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
811 SET_TX_WRB_HDR_BITS(vlan, hdr,
812 BE_WRB_F_GET(wrb_params->features, VLAN));
813 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
814
815 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
816 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
6b7c5b94
SP
817}
818
2b7bcebf 819static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 820 bool unmap_single)
7101e111
SP
821{
822 dma_addr_t dma;
f986afcb 823 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 824
7101e111 825
f986afcb
SP
826 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
827 (u64)le32_to_cpu(wrb->frag_pa_lo);
828 if (frag_len) {
7101e111 829 if (unmap_single)
f986afcb 830 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 831 else
f986afcb 832 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
833 }
834}
6b7c5b94 835
79a0d7d8
SB
836/* Grab a WRB header for xmit */
837static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
838{
839 u16 head = txo->q.head;
840
841 queue_head_inc(&txo->q);
842 return head;
843}
844
845/* Set up the WRB header for xmit */
846static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
847 struct be_tx_obj *txo,
848 struct be_wrb_params *wrb_params,
849 struct sk_buff *skb, u16 head)
850{
851 u32 num_frags = skb_wrb_cnt(skb);
852 struct be_queue_info *txq = &txo->q;
853 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
854
855 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
856 be_dws_cpu_to_le(hdr, sizeof(*hdr));
857
858 BUG_ON(txo->sent_skb_list[head]);
859 txo->sent_skb_list[head] = skb;
860 txo->last_req_hdr = head;
861 atomic_add(num_frags, &txq->used);
862 txo->last_req_wrb_cnt = num_frags;
863 txo->pend_wrb_cnt += num_frags;
864}
865
866/* Setup a WRB fragment (buffer descriptor) for xmit */
867static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
868 int len)
869{
870 struct be_eth_wrb *wrb;
871 struct be_queue_info *txq = &txo->q;
872
873 wrb = queue_head_node(txq);
874 wrb_fill(wrb, busaddr, len);
875 queue_head_inc(txq);
876}
877
878/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
879 * was invoked. The producer index is restored to the previous packet and the
880 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
881 */
882static void be_xmit_restore(struct be_adapter *adapter,
883 struct be_tx_obj *txo, u16 head, bool map_single,
884 u32 copied)
885{
886 struct device *dev;
887 struct be_eth_wrb *wrb;
888 struct be_queue_info *txq = &txo->q;
889
890 dev = &adapter->pdev->dev;
891 txq->head = head;
892
893 /* skip the first wrb (hdr); it's not mapped */
894 queue_head_inc(txq);
895 while (copied) {
896 wrb = queue_head_node(txq);
897 unmap_tx_frag(dev, wrb, map_single);
898 map_single = false;
899 copied -= le32_to_cpu(wrb->frag_len);
900 queue_head_inc(txq);
901 }
902
903 txq->head = head;
904}
905
906/* Enqueue the given packet for transmit. This routine allocates WRBs for the
907 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
908 * of WRBs used up by the packet.
909 */
5f07b3c5 910static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
911 struct sk_buff *skb,
912 struct be_wrb_params *wrb_params)
6b7c5b94 913{
5f07b3c5 914 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 915 struct device *dev = &adapter->pdev->dev;
5f07b3c5 916 struct be_queue_info *txq = &txo->q;
7101e111 917 bool map_single = false;
5f07b3c5 918 u16 head = txq->head;
79a0d7d8
SB
919 dma_addr_t busaddr;
920 int len;
6b7c5b94 921
79a0d7d8 922 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 923
ebc8d2ab 924 if (skb->len > skb->data_len) {
79a0d7d8 925 len = skb_headlen(skb);
03d28ffe 926
2b7bcebf
IV
927 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
928 if (dma_mapping_error(dev, busaddr))
7101e111
SP
929 goto dma_err;
930 map_single = true;
79a0d7d8 931 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
932 copied += len;
933 }
6b7c5b94 934
ebc8d2ab 935 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 936 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 937 len = skb_frag_size(frag);
03d28ffe 938
79a0d7d8 939 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 940 if (dma_mapping_error(dev, busaddr))
7101e111 941 goto dma_err;
79a0d7d8
SB
942 be_tx_setup_wrb_frag(txo, busaddr, len);
943 copied += len;
6b7c5b94
SP
944 }
945
79a0d7d8 946 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 947
5f07b3c5
SP
948 be_tx_stats_update(txo, skb);
949 return wrb_cnt;
6b7c5b94 950
7101e111 951dma_err:
79a0d7d8
SB
952 adapter->drv_stats.dma_map_errors++;
953 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 954 return 0;
6b7c5b94
SP
955}
956
f7062ee5
SP
957static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
958{
959 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
960}
961
93040ae5 962static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 963 struct sk_buff *skb,
804abcdb
SB
964 struct be_wrb_params
965 *wrb_params)
93040ae5
SK
966{
967 u16 vlan_tag = 0;
968
969 skb = skb_share_check(skb, GFP_ATOMIC);
970 if (unlikely(!skb))
971 return skb;
972
df8a39de 973 if (skb_vlan_tag_present(skb))
93040ae5 974 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
975
976 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
977 if (!vlan_tag)
978 vlan_tag = adapter->pvid;
979 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
980 * skip VLAN insertion
981 */
804abcdb 982 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 983 }
bc0c3405
AK
984
985 if (vlan_tag) {
62749e2c
JP
986 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
987 vlan_tag);
bc0c3405
AK
988 if (unlikely(!skb))
989 return skb;
bc0c3405
AK
990 skb->vlan_tci = 0;
991 }
992
993 /* Insert the outer VLAN, if any */
994 if (adapter->qnq_vid) {
995 vlan_tag = adapter->qnq_vid;
62749e2c
JP
996 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
997 vlan_tag);
bc0c3405
AK
998 if (unlikely(!skb))
999 return skb;
804abcdb 1000 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1001 }
1002
93040ae5
SK
1003 return skb;
1004}
1005
bc0c3405
AK
1006static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1007{
1008 struct ethhdr *eh = (struct ethhdr *)skb->data;
1009 u16 offset = ETH_HLEN;
1010
1011 if (eh->h_proto == htons(ETH_P_IPV6)) {
1012 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1013
1014 offset += sizeof(struct ipv6hdr);
1015 if (ip6h->nexthdr != NEXTHDR_TCP &&
1016 ip6h->nexthdr != NEXTHDR_UDP) {
1017 struct ipv6_opt_hdr *ehdr =
504fbf1e 1018 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1019
1020 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1021 if (ehdr->hdrlen == 0xff)
1022 return true;
1023 }
1024 }
1025 return false;
1026}
1027
1028static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1029{
df8a39de 1030 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1031}
1032
748b539a 1033static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1034{
ee9c799c 1035 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1036}
1037
ec495fac
VV
1038static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1039 struct sk_buff *skb,
804abcdb
SB
1040 struct be_wrb_params
1041 *wrb_params)
6b7c5b94 1042{
d2cb6ce7 1043 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1044 unsigned int eth_hdr_len;
1045 struct iphdr *ip;
93040ae5 1046
1297f9db
AK
1047 /* For padded packets, BE HW modifies tot_len field in IP header
1048 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1049 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1050 */
ee9c799c
SP
1051 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1052 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1053 if (skb->len <= 60 &&
df8a39de 1054 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1055 is_ipv4_pkt(skb)) {
93040ae5
SK
1056 ip = (struct iphdr *)ip_hdr(skb);
1057 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1058 }
1ded132d 1059
d2cb6ce7 1060 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1061 * tagging in pvid-tagging mode
d2cb6ce7 1062 */
f93f160b 1063 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1064 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1065 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1066
93040ae5
SK
1067 /* HW has a bug wherein it will calculate CSUM for VLAN
1068 * pkts even though it is disabled.
1069 * Manually insert VLAN in pkt.
1070 */
1071 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1072 skb_vlan_tag_present(skb)) {
804abcdb 1073 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1074 if (unlikely(!skb))
c9128951 1075 goto err;
bc0c3405
AK
1076 }
1077
1078 /* HW may lockup when VLAN HW tagging is requested on
1079 * certain ipv6 packets. Drop such pkts if the HW workaround to
1080 * skip HW tagging is not enabled by FW.
1081 */
1082 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1083 (adapter->pvid || adapter->qnq_vid) &&
1084 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1085 goto tx_drop;
1086
1087 /* Manual VLAN tag insertion to prevent:
1088 * ASIC lockup when the ASIC inserts VLAN tag into
1089 * certain ipv6 packets. Insert VLAN tags in driver,
1090 * and set event, completion, vlan bits accordingly
1091 * in the Tx WRB.
1092 */
1093 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1094 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1095 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1096 if (unlikely(!skb))
c9128951 1097 goto err;
1ded132d
AK
1098 }
1099
ee9c799c
SP
1100 return skb;
1101tx_drop:
1102 dev_kfree_skb_any(skb);
c9128951 1103err:
ee9c799c
SP
1104 return NULL;
1105}
1106
ec495fac
VV
1107static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1108 struct sk_buff *skb,
804abcdb 1109 struct be_wrb_params *wrb_params)
ec495fac
VV
1110{
1111 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1112 * less may cause a transmit stall on that port. So the work-around is
1113 * to pad short packets (<= 32 bytes) to a 36-byte length.
1114 */
1115 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1116 if (skb_put_padto(skb, 36))
ec495fac 1117 return NULL;
ec495fac
VV
1118 }
1119
1120 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1121 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1122 if (!skb)
1123 return NULL;
1124 }
1125
1126 return skb;
1127}
1128
5f07b3c5
SP
1129static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1130{
1131 struct be_queue_info *txq = &txo->q;
1132 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1133
1134 /* Mark the last request eventable if it hasn't been marked already */
1135 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1136 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1137
1138 /* compose a dummy wrb if there are odd set of wrbs to notify */
1139 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1140 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1141 queue_head_inc(txq);
1142 atomic_inc(&txq->used);
1143 txo->pend_wrb_cnt++;
1144 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1145 TX_HDR_WRB_NUM_SHIFT);
1146 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1147 TX_HDR_WRB_NUM_SHIFT);
1148 }
1149 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1150 txo->pend_wrb_cnt = 0;
1151}
1152
ee9c799c
SP
1153static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1154{
1155 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1156 u16 q_idx = skb_get_queue_mapping(skb);
1157 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1158 struct be_wrb_params wrb_params = { 0 };
804abcdb 1159 bool flush = !skb->xmit_more;
5f07b3c5 1160 u16 wrb_cnt;
ee9c799c 1161
804abcdb 1162 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1163 if (unlikely(!skb))
1164 goto drop;
6b7c5b94 1165
804abcdb
SB
1166 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1167
1168 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1169 if (unlikely(!wrb_cnt)) {
1170 dev_kfree_skb_any(skb);
1171 goto drop;
1172 }
cd8f76c0 1173
cf5671e6 1174 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1175 netif_stop_subqueue(netdev, q_idx);
1176 tx_stats(txo)->tx_stops++;
1177 }
c190e3c8 1178
5f07b3c5
SP
1179 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1180 be_xmit_flush(adapter, txo);
6b7c5b94 1181
5f07b3c5
SP
1182 return NETDEV_TX_OK;
1183drop:
1184 tx_stats(txo)->tx_drv_drops++;
1185 /* Flush the already enqueued tx requests */
1186 if (flush && txo->pend_wrb_cnt)
1187 be_xmit_flush(adapter, txo);
6b7c5b94 1188
6b7c5b94
SP
1189 return NETDEV_TX_OK;
1190}
1191
1192static int be_change_mtu(struct net_device *netdev, int new_mtu)
1193{
1194 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1195 struct device *dev = &adapter->pdev->dev;
1196
1197 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1198 dev_info(dev, "MTU must be between %d and %d bytes\n",
1199 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1200 return -EINVAL;
1201 }
0d3f5cce
KA
1202
1203 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1204 netdev->mtu, new_mtu);
6b7c5b94
SP
1205 netdev->mtu = new_mtu;
1206 return 0;
1207}
1208
f66b7cfd
SP
1209static inline bool be_in_all_promisc(struct be_adapter *adapter)
1210{
1211 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1212 BE_IF_FLAGS_ALL_PROMISCUOUS;
1213}
1214
1215static int be_set_vlan_promisc(struct be_adapter *adapter)
1216{
1217 struct device *dev = &adapter->pdev->dev;
1218 int status;
1219
1220 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1221 return 0;
1222
1223 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1224 if (!status) {
1225 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1226 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1227 } else {
1228 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1229 }
1230 return status;
1231}
1232
1233static int be_clear_vlan_promisc(struct be_adapter *adapter)
1234{
1235 struct device *dev = &adapter->pdev->dev;
1236 int status;
1237
1238 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1239 if (!status) {
1240 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1241 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1242 }
1243 return status;
1244}
1245
6b7c5b94 1246/*
82903e4b
AK
1247 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1248 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1249 */
10329df8 1250static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1251{
50762667 1252 struct device *dev = &adapter->pdev->dev;
10329df8 1253 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1254 u16 num = 0, i = 0;
82903e4b 1255 int status = 0;
1da87b7f 1256
c0e64ef4 1257 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1258 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1259 return 0;
1260
92bf14ab 1261 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1262 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1263
1264 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1265 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1266 vids[num++] = cpu_to_le16(i);
0fc16ebf 1267
435452aa 1268 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1269 if (status) {
f66b7cfd 1270 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1271 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1272 if (addl_status(status) ==
1273 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1274 return be_set_vlan_promisc(adapter);
1275 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1276 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1277 }
0fc16ebf 1278 return status;
6b7c5b94
SP
1279}
1280
80d5c368 1281static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1282{
1283 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1284 int status = 0;
6b7c5b94 1285
a85e9986
PR
1286 /* Packets with VID 0 are always received by Lancer by default */
1287 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1288 return status;
1289
f6cbd364 1290 if (test_bit(vid, adapter->vids))
48291c22 1291 return status;
a85e9986 1292
f6cbd364 1293 set_bit(vid, adapter->vids);
a6b74e01 1294 adapter->vlans_added++;
8e586137 1295
a6b74e01
SK
1296 status = be_vid_config(adapter);
1297 if (status) {
1298 adapter->vlans_added--;
f6cbd364 1299 clear_bit(vid, adapter->vids);
a6b74e01 1300 }
48291c22 1301
80817cbf 1302 return status;
6b7c5b94
SP
1303}
1304
80d5c368 1305static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1306{
1307 struct be_adapter *adapter = netdev_priv(netdev);
1308
a85e9986
PR
1309 /* Packets with VID 0 are always received by Lancer by default */
1310 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1311 return 0;
a85e9986 1312
f6cbd364 1313 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1314 adapter->vlans_added--;
1315
1316 return be_vid_config(adapter);
6b7c5b94
SP
1317}
1318
f66b7cfd 1319static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1320{
ac34b743 1321 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1322 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1323}
1324
f66b7cfd
SP
1325static void be_set_all_promisc(struct be_adapter *adapter)
1326{
1327 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1328 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1329}
1330
1331static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1332{
0fc16ebf 1333 int status;
6b7c5b94 1334
f66b7cfd
SP
1335 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1336 return;
6b7c5b94 1337
f66b7cfd
SP
1338 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1339 if (!status)
1340 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1341}
1342
1343static void be_set_mc_list(struct be_adapter *adapter)
1344{
1345 int status;
1346
1347 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1348 if (!status)
1349 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1350 else
1351 be_set_mc_promisc(adapter);
1352}
1353
1354static void be_set_uc_list(struct be_adapter *adapter)
1355{
1356 struct netdev_hw_addr *ha;
1357 int i = 1; /* First slot is claimed by the Primary MAC */
1358
1359 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1360 be_cmd_pmac_del(adapter, adapter->if_handle,
1361 adapter->pmac_id[i], 0);
1362
1363 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1364 be_set_all_promisc(adapter);
1365 return;
6b7c5b94
SP
1366 }
1367
f66b7cfd
SP
1368 netdev_for_each_uc_addr(ha, adapter->netdev) {
1369 adapter->uc_macs++; /* First slot is for Primary MAC */
1370 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1371 &adapter->pmac_id[adapter->uc_macs], 0);
1372 }
1373}
6b7c5b94 1374
f66b7cfd
SP
1375static void be_clear_uc_list(struct be_adapter *adapter)
1376{
1377 int i;
fbc13f01 1378
f66b7cfd
SP
1379 for (i = 1; i < (adapter->uc_macs + 1); i++)
1380 be_cmd_pmac_del(adapter, adapter->if_handle,
1381 adapter->pmac_id[i], 0);
1382 adapter->uc_macs = 0;
1383}
fbc13f01 1384
f66b7cfd
SP
1385static void be_set_rx_mode(struct net_device *netdev)
1386{
1387 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1388
f66b7cfd
SP
1389 if (netdev->flags & IFF_PROMISC) {
1390 be_set_all_promisc(adapter);
1391 return;
fbc13f01
AK
1392 }
1393
f66b7cfd
SP
1394 /* Interface was previously in promiscuous mode; disable it */
1395 if (be_in_all_promisc(adapter)) {
1396 be_clear_all_promisc(adapter);
1397 if (adapter->vlans_added)
1398 be_vid_config(adapter);
0fc16ebf 1399 }
a0794885 1400
f66b7cfd
SP
1401 /* Enable multicast promisc if num configured exceeds what we support */
1402 if (netdev->flags & IFF_ALLMULTI ||
1403 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1404 be_set_mc_promisc(adapter);
a0794885 1405 return;
f66b7cfd 1406 }
a0794885 1407
f66b7cfd
SP
1408 if (netdev_uc_count(netdev) != adapter->uc_macs)
1409 be_set_uc_list(adapter);
1410
1411 be_set_mc_list(adapter);
6b7c5b94
SP
1412}
1413
ba343c77
SB
1414static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1415{
1416 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1417 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1418 int status;
1419
11ac75ed 1420 if (!sriov_enabled(adapter))
ba343c77
SB
1421 return -EPERM;
1422
11ac75ed 1423 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1424 return -EINVAL;
1425
3c31aaf3
VV
1426 /* Proceed further only if user provided MAC is different
1427 * from active MAC
1428 */
1429 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1430 return 0;
1431
3175d8c2
SP
1432 if (BEx_chip(adapter)) {
1433 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1434 vf + 1);
ba343c77 1435
11ac75ed
SP
1436 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1437 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1438 } else {
1439 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1440 vf + 1);
590c391d
PR
1441 }
1442
abccf23e
KA
1443 if (status) {
1444 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1445 mac, vf, status);
1446 return be_cmd_status(status);
1447 }
64600ea5 1448
abccf23e
KA
1449 ether_addr_copy(vf_cfg->mac_addr, mac);
1450
1451 return 0;
ba343c77
SB
1452}
1453
64600ea5 1454static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1455 struct ifla_vf_info *vi)
64600ea5
AK
1456{
1457 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1458 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1459
11ac75ed 1460 if (!sriov_enabled(adapter))
64600ea5
AK
1461 return -EPERM;
1462
11ac75ed 1463 if (vf >= adapter->num_vfs)
64600ea5
AK
1464 return -EINVAL;
1465
1466 vi->vf = vf;
ed616689
SC
1467 vi->max_tx_rate = vf_cfg->tx_rate;
1468 vi->min_tx_rate = 0;
a60b3a13
AK
1469 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1470 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1471 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1472 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1473 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1474
1475 return 0;
1476}
1477
435452aa
VV
1478static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1479{
1480 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1481 u16 vids[BE_NUM_VLANS_SUPPORTED];
1482 int vf_if_id = vf_cfg->if_handle;
1483 int status;
1484
1485 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1486 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1487 if (status)
1488 return status;
1489
1490 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1491 vids[0] = 0;
1492 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1493 if (!status)
1494 dev_info(&adapter->pdev->dev,
1495 "Cleared guest VLANs on VF%d", vf);
1496
1497 /* After TVT is enabled, disallow VFs to program VLAN filters */
1498 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1499 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1500 ~BE_PRIV_FILTMGMT, vf + 1);
1501 if (!status)
1502 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1503 }
1504 return 0;
1505}
1506
1507static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1508{
1509 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1510 struct device *dev = &adapter->pdev->dev;
1511 int status;
1512
1513 /* Reset Transparent VLAN Tagging. */
1514 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1515 vf_cfg->if_handle, 0, 0);
435452aa
VV
1516 if (status)
1517 return status;
1518
1519 /* Allow VFs to program VLAN filtering */
1520 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1521 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1522 BE_PRIV_FILTMGMT, vf + 1);
1523 if (!status) {
1524 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1525 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1526 }
1527 }
1528
1529 dev_info(dev,
1530 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1531 return 0;
1532}
1533
748b539a 1534static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1535{
1536 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1537 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1538 int status;
1da87b7f 1539
11ac75ed 1540 if (!sriov_enabled(adapter))
1da87b7f
AK
1541 return -EPERM;
1542
b9fc0e53 1543 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1544 return -EINVAL;
1545
b9fc0e53
AK
1546 if (vlan || qos) {
1547 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1548 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1549 } else {
435452aa 1550 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1551 }
1552
abccf23e
KA
1553 if (status) {
1554 dev_err(&adapter->pdev->dev,
435452aa
VV
1555 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1556 status);
abccf23e
KA
1557 return be_cmd_status(status);
1558 }
1559
1560 vf_cfg->vlan_tag = vlan;
abccf23e 1561 return 0;
1da87b7f
AK
1562}
1563
ed616689
SC
1564static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1565 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1566{
1567 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1568 struct device *dev = &adapter->pdev->dev;
1569 int percent_rate, status = 0;
1570 u16 link_speed = 0;
1571 u8 link_status;
e1d18735 1572
11ac75ed 1573 if (!sriov_enabled(adapter))
e1d18735
AK
1574 return -EPERM;
1575
94f434c2 1576 if (vf >= adapter->num_vfs)
e1d18735
AK
1577 return -EINVAL;
1578
ed616689
SC
1579 if (min_tx_rate)
1580 return -EINVAL;
1581
0f77ba73
RN
1582 if (!max_tx_rate)
1583 goto config_qos;
1584
1585 status = be_cmd_link_status_query(adapter, &link_speed,
1586 &link_status, 0);
1587 if (status)
1588 goto err;
1589
1590 if (!link_status) {
1591 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1592 status = -ENETDOWN;
0f77ba73
RN
1593 goto err;
1594 }
1595
1596 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1597 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1598 link_speed);
1599 status = -EINVAL;
1600 goto err;
1601 }
1602
1603 /* On Skyhawk the QOS setting must be done only as a % value */
1604 percent_rate = link_speed / 100;
1605 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1606 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1607 percent_rate);
1608 status = -EINVAL;
1609 goto err;
94f434c2 1610 }
e1d18735 1611
0f77ba73
RN
1612config_qos:
1613 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1614 if (status)
0f77ba73
RN
1615 goto err;
1616
1617 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1618 return 0;
1619
1620err:
1621 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1622 max_tx_rate, vf);
abccf23e 1623 return be_cmd_status(status);
e1d18735 1624}
e2fb1afa 1625
bdce2ad7
SR
1626static int be_set_vf_link_state(struct net_device *netdev, int vf,
1627 int link_state)
1628{
1629 struct be_adapter *adapter = netdev_priv(netdev);
1630 int status;
1631
1632 if (!sriov_enabled(adapter))
1633 return -EPERM;
1634
1635 if (vf >= adapter->num_vfs)
1636 return -EINVAL;
1637
1638 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1639 if (status) {
1640 dev_err(&adapter->pdev->dev,
1641 "Link state change on VF %d failed: %#x\n", vf, status);
1642 return be_cmd_status(status);
1643 }
bdce2ad7 1644
abccf23e
KA
1645 adapter->vf_cfg[vf].plink_tracking = link_state;
1646
1647 return 0;
bdce2ad7 1648}
e1d18735 1649
e7bcbd7b
KA
1650static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1651{
1652 struct be_adapter *adapter = netdev_priv(netdev);
1653 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1654 u8 spoofchk;
1655 int status;
1656
1657 if (!sriov_enabled(adapter))
1658 return -EPERM;
1659
1660 if (vf >= adapter->num_vfs)
1661 return -EINVAL;
1662
1663 if (BEx_chip(adapter))
1664 return -EOPNOTSUPP;
1665
1666 if (enable == vf_cfg->spoofchk)
1667 return 0;
1668
1669 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1670
1671 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1672 0, spoofchk);
1673 if (status) {
1674 dev_err(&adapter->pdev->dev,
1675 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1676 return be_cmd_status(status);
1677 }
1678
1679 vf_cfg->spoofchk = enable;
1680 return 0;
1681}
1682
2632bafd
SP
1683static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1684 ulong now)
6b7c5b94 1685{
2632bafd
SP
1686 aic->rx_pkts_prev = rx_pkts;
1687 aic->tx_reqs_prev = tx_pkts;
1688 aic->jiffies = now;
1689}
ac124ff9 1690
20947770 1691static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1692{
20947770
PR
1693 struct be_adapter *adapter = eqo->adapter;
1694 int eqd, start;
2632bafd 1695 struct be_aic_obj *aic;
2632bafd
SP
1696 struct be_rx_obj *rxo;
1697 struct be_tx_obj *txo;
20947770 1698 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1699 ulong now;
1700 u32 pps, delta;
20947770 1701 int i;
10ef9ab4 1702
20947770
PR
1703 aic = &adapter->aic_obj[eqo->idx];
1704 if (!aic->enable) {
1705 if (aic->jiffies)
1706 aic->jiffies = 0;
1707 eqd = aic->et_eqd;
1708 return eqd;
1709 }
6b7c5b94 1710
20947770 1711 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1712 do {
57a7744e 1713 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1714 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1715 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1716 }
10ef9ab4 1717
20947770 1718 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1719 do {
57a7744e 1720 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1721 tx_pkts += txo->stats.tx_reqs;
57a7744e 1722 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1723 }
6b7c5b94 1724
20947770
PR
1725 /* Skip, if wrapped around or first calculation */
1726 now = jiffies;
1727 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1728 rx_pkts < aic->rx_pkts_prev ||
1729 tx_pkts < aic->tx_reqs_prev) {
1730 be_aic_update(aic, rx_pkts, tx_pkts, now);
1731 return aic->prev_eqd;
1732 }
2632bafd 1733
20947770
PR
1734 delta = jiffies_to_msecs(now - aic->jiffies);
1735 if (delta == 0)
1736 return aic->prev_eqd;
10ef9ab4 1737
20947770
PR
1738 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1739 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1740 eqd = (pps / 15000) << 2;
2632bafd 1741
20947770
PR
1742 if (eqd < 8)
1743 eqd = 0;
1744 eqd = min_t(u32, eqd, aic->max_eqd);
1745 eqd = max_t(u32, eqd, aic->min_eqd);
1746
1747 be_aic_update(aic, rx_pkts, tx_pkts, now);
1748
1749 return eqd;
1750}
1751
1752/* For Skyhawk-R only */
1753static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1754{
1755 struct be_adapter *adapter = eqo->adapter;
1756 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1757 ulong now = jiffies;
1758 int eqd;
1759 u32 mult_enc;
1760
1761 if (!aic->enable)
1762 return 0;
1763
1764 if (time_before_eq(now, aic->jiffies) ||
1765 jiffies_to_msecs(now - aic->jiffies) < 1)
1766 eqd = aic->prev_eqd;
1767 else
1768 eqd = be_get_new_eqd(eqo);
1769
1770 if (eqd > 100)
1771 mult_enc = R2I_DLY_ENC_1;
1772 else if (eqd > 60)
1773 mult_enc = R2I_DLY_ENC_2;
1774 else if (eqd > 20)
1775 mult_enc = R2I_DLY_ENC_3;
1776 else
1777 mult_enc = R2I_DLY_ENC_0;
1778
1779 aic->prev_eqd = eqd;
1780
1781 return mult_enc;
1782}
1783
1784void be_eqd_update(struct be_adapter *adapter, bool force_update)
1785{
1786 struct be_set_eqd set_eqd[MAX_EVT_QS];
1787 struct be_aic_obj *aic;
1788 struct be_eq_obj *eqo;
1789 int i, num = 0, eqd;
1790
1791 for_all_evt_queues(adapter, eqo, i) {
1792 aic = &adapter->aic_obj[eqo->idx];
1793 eqd = be_get_new_eqd(eqo);
1794 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1795 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1796 set_eqd[num].eq_id = eqo->q.id;
1797 aic->prev_eqd = eqd;
1798 num++;
1799 }
ac124ff9 1800 }
2632bafd
SP
1801
1802 if (num)
1803 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1804}
1805
3abcdeda 1806static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1807 struct be_rx_compl_info *rxcp)
4097f663 1808{
ac124ff9 1809 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1810
ab1594e9 1811 u64_stats_update_begin(&stats->sync);
3abcdeda 1812 stats->rx_compl++;
2e588f84 1813 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1814 stats->rx_pkts++;
2e588f84 1815 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1816 stats->rx_mcast_pkts++;
2e588f84 1817 if (rxcp->err)
ac124ff9 1818 stats->rx_compl_err++;
ab1594e9 1819 u64_stats_update_end(&stats->sync);
4097f663
SP
1820}
1821
2e588f84 1822static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1823{
19fad86f 1824 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1825 * Also ignore ipcksm for ipv6 pkts
1826 */
2e588f84 1827 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1828 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1829}
1830
0b0ef1d0 1831static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1832{
10ef9ab4 1833 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1834 struct be_rx_page_info *rx_page_info;
3abcdeda 1835 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1836 u16 frag_idx = rxq->tail;
6b7c5b94 1837
3abcdeda 1838 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1839 BUG_ON(!rx_page_info->page);
1840
e50287be 1841 if (rx_page_info->last_frag) {
2b7bcebf
IV
1842 dma_unmap_page(&adapter->pdev->dev,
1843 dma_unmap_addr(rx_page_info, bus),
1844 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1845 rx_page_info->last_frag = false;
1846 } else {
1847 dma_sync_single_for_cpu(&adapter->pdev->dev,
1848 dma_unmap_addr(rx_page_info, bus),
1849 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1850 }
6b7c5b94 1851
0b0ef1d0 1852 queue_tail_inc(rxq);
6b7c5b94
SP
1853 atomic_dec(&rxq->used);
1854 return rx_page_info;
1855}
1856
1857/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1858static void be_rx_compl_discard(struct be_rx_obj *rxo,
1859 struct be_rx_compl_info *rxcp)
6b7c5b94 1860{
6b7c5b94 1861 struct be_rx_page_info *page_info;
2e588f84 1862 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1863
e80d9da6 1864 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1865 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1866 put_page(page_info->page);
1867 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1868 }
1869}
1870
1871/*
1872 * skb_fill_rx_data forms a complete skb for an ether frame
1873 * indicated by rxcp.
1874 */
10ef9ab4
SP
1875static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1876 struct be_rx_compl_info *rxcp)
6b7c5b94 1877{
6b7c5b94 1878 struct be_rx_page_info *page_info;
2e588f84
SP
1879 u16 i, j;
1880 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1881 u8 *start;
6b7c5b94 1882
0b0ef1d0 1883 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1884 start = page_address(page_info->page) + page_info->page_offset;
1885 prefetch(start);
1886
1887 /* Copy data in the first descriptor of this completion */
2e588f84 1888 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1889
6b7c5b94
SP
1890 skb->len = curr_frag_len;
1891 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1892 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1893 /* Complete packet has now been moved to data */
1894 put_page(page_info->page);
1895 skb->data_len = 0;
1896 skb->tail += curr_frag_len;
1897 } else {
ac1ae5f3
ED
1898 hdr_len = ETH_HLEN;
1899 memcpy(skb->data, start, hdr_len);
6b7c5b94 1900 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1901 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1902 skb_shinfo(skb)->frags[0].page_offset =
1903 page_info->page_offset + hdr_len;
748b539a
SP
1904 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1905 curr_frag_len - hdr_len);
6b7c5b94 1906 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1907 skb->truesize += rx_frag_size;
6b7c5b94
SP
1908 skb->tail += hdr_len;
1909 }
205859a2 1910 page_info->page = NULL;
6b7c5b94 1911
2e588f84
SP
1912 if (rxcp->pkt_size <= rx_frag_size) {
1913 BUG_ON(rxcp->num_rcvd != 1);
1914 return;
6b7c5b94
SP
1915 }
1916
1917 /* More frags present for this completion */
2e588f84
SP
1918 remaining = rxcp->pkt_size - curr_frag_len;
1919 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1920 page_info = get_rx_page_info(rxo);
2e588f84 1921 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1922
bd46cb6c
AK
1923 /* Coalesce all frags from the same physical page in one slot */
1924 if (page_info->page_offset == 0) {
1925 /* Fresh page */
1926 j++;
b061b39e 1927 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1928 skb_shinfo(skb)->frags[j].page_offset =
1929 page_info->page_offset;
9e903e08 1930 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1931 skb_shinfo(skb)->nr_frags++;
1932 } else {
1933 put_page(page_info->page);
1934 }
1935
9e903e08 1936 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1937 skb->len += curr_frag_len;
1938 skb->data_len += curr_frag_len;
bdb28a97 1939 skb->truesize += rx_frag_size;
2e588f84 1940 remaining -= curr_frag_len;
205859a2 1941 page_info->page = NULL;
6b7c5b94 1942 }
bd46cb6c 1943 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1944}
1945
5be93b9a 1946/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1947static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1948 struct be_rx_compl_info *rxcp)
6b7c5b94 1949{
10ef9ab4 1950 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1951 struct net_device *netdev = adapter->netdev;
6b7c5b94 1952 struct sk_buff *skb;
89420424 1953
bb349bb4 1954 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1955 if (unlikely(!skb)) {
ac124ff9 1956 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1957 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1958 return;
1959 }
1960
10ef9ab4 1961 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1962
6332c8d3 1963 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1964 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1965 else
1966 skb_checksum_none_assert(skb);
6b7c5b94 1967
6332c8d3 1968 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1969 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1970 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1971 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1972
b6c0e89d 1973 skb->csum_level = rxcp->tunneled;
6384a4d0 1974 skb_mark_napi_id(skb, napi);
6b7c5b94 1975
343e43c0 1976 if (rxcp->vlanf)
86a9bad3 1977 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1978
1979 netif_receive_skb(skb);
6b7c5b94
SP
1980}
1981
5be93b9a 1982/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1983static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1984 struct napi_struct *napi,
1985 struct be_rx_compl_info *rxcp)
6b7c5b94 1986{
10ef9ab4 1987 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1988 struct be_rx_page_info *page_info;
5be93b9a 1989 struct sk_buff *skb = NULL;
2e588f84
SP
1990 u16 remaining, curr_frag_len;
1991 u16 i, j;
3968fa1e 1992
10ef9ab4 1993 skb = napi_get_frags(napi);
5be93b9a 1994 if (!skb) {
10ef9ab4 1995 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1996 return;
1997 }
1998
2e588f84
SP
1999 remaining = rxcp->pkt_size;
2000 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2001 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2002
2003 curr_frag_len = min(remaining, rx_frag_size);
2004
bd46cb6c
AK
2005 /* Coalesce all frags from the same physical page in one slot */
2006 if (i == 0 || page_info->page_offset == 0) {
2007 /* First frag or Fresh page */
2008 j++;
b061b39e 2009 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2010 skb_shinfo(skb)->frags[j].page_offset =
2011 page_info->page_offset;
9e903e08 2012 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2013 } else {
2014 put_page(page_info->page);
2015 }
9e903e08 2016 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2017 skb->truesize += rx_frag_size;
bd46cb6c 2018 remaining -= curr_frag_len;
6b7c5b94
SP
2019 memset(page_info, 0, sizeof(*page_info));
2020 }
bd46cb6c 2021 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2022
5be93b9a 2023 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2024 skb->len = rxcp->pkt_size;
2025 skb->data_len = rxcp->pkt_size;
5be93b9a 2026 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2027 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2028 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2029 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2030
b6c0e89d 2031 skb->csum_level = rxcp->tunneled;
6384a4d0 2032 skb_mark_napi_id(skb, napi);
5be93b9a 2033
343e43c0 2034 if (rxcp->vlanf)
86a9bad3 2035 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2036
10ef9ab4 2037 napi_gro_frags(napi);
2e588f84
SP
2038}
2039
10ef9ab4
SP
2040static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2041 struct be_rx_compl_info *rxcp)
2e588f84 2042{
c3c18bc1
SP
2043 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2044 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2045 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2046 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2047 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2048 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2049 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2050 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2051 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2052 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2053 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2054 if (rxcp->vlanf) {
c3c18bc1
SP
2055 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2056 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2057 }
c3c18bc1 2058 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2059 rxcp->tunneled =
c3c18bc1 2060 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2061}
2062
10ef9ab4
SP
2063static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2064 struct be_rx_compl_info *rxcp)
2e588f84 2065{
c3c18bc1
SP
2066 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2067 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2068 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2069 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2070 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2071 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2072 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2073 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2074 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2075 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2076 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2077 if (rxcp->vlanf) {
c3c18bc1
SP
2078 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2079 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2080 }
c3c18bc1
SP
2081 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2082 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2083}
2084
2085static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2086{
2087 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2088 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2089 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2090
2e588f84
SP
2091 /* For checking the valid bit it is Ok to use either definition as the
2092 * valid bit is at the same position in both v0 and v1 Rx compl */
2093 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2094 return NULL;
6b7c5b94 2095
2e588f84
SP
2096 rmb();
2097 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2098
2e588f84 2099 if (adapter->be3_native)
10ef9ab4 2100 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2101 else
10ef9ab4 2102 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2103
e38b1706
SK
2104 if (rxcp->ip_frag)
2105 rxcp->l4_csum = 0;
2106
15d72184 2107 if (rxcp->vlanf) {
f93f160b
VV
2108 /* In QNQ modes, if qnq bit is not set, then the packet was
2109 * tagged only with the transparent outer vlan-tag and must
2110 * not be treated as a vlan packet by host
2111 */
2112 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2113 rxcp->vlanf = 0;
6b7c5b94 2114
15d72184 2115 if (!lancer_chip(adapter))
3c709f8f 2116 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2117
939cf306 2118 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2119 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2120 rxcp->vlanf = 0;
2121 }
2e588f84
SP
2122
2123 /* As the compl has been parsed, reset it; we wont touch it again */
2124 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2125
3abcdeda 2126 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2127 return rxcp;
2128}
2129
1829b086 2130static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2131{
6b7c5b94 2132 u32 order = get_order(size);
1829b086 2133
6b7c5b94 2134 if (order > 0)
1829b086
ED
2135 gfp |= __GFP_COMP;
2136 return alloc_pages(gfp, order);
6b7c5b94
SP
2137}
2138
2139/*
2140 * Allocate a page, split it to fragments of size rx_frag_size and post as
2141 * receive buffers to BE
2142 */
c30d7266 2143static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2144{
3abcdeda 2145 struct be_adapter *adapter = rxo->adapter;
26d92f92 2146 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2147 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2148 struct page *pagep = NULL;
ba42fad0 2149 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2150 struct be_eth_rx_d *rxd;
2151 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2152 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2153
3abcdeda 2154 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2155 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2156 if (!pagep) {
1829b086 2157 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2158 if (unlikely(!pagep)) {
ac124ff9 2159 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2160 break;
2161 }
ba42fad0
IV
2162 page_dmaaddr = dma_map_page(dev, pagep, 0,
2163 adapter->big_page_size,
2b7bcebf 2164 DMA_FROM_DEVICE);
ba42fad0
IV
2165 if (dma_mapping_error(dev, page_dmaaddr)) {
2166 put_page(pagep);
2167 pagep = NULL;
d3de1540 2168 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2169 break;
2170 }
e50287be 2171 page_offset = 0;
6b7c5b94
SP
2172 } else {
2173 get_page(pagep);
e50287be 2174 page_offset += rx_frag_size;
6b7c5b94 2175 }
e50287be 2176 page_info->page_offset = page_offset;
6b7c5b94 2177 page_info->page = pagep;
6b7c5b94
SP
2178
2179 rxd = queue_head_node(rxq);
e50287be 2180 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2181 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2182 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2183
2184 /* Any space left in the current big page for another frag? */
2185 if ((page_offset + rx_frag_size + rx_frag_size) >
2186 adapter->big_page_size) {
2187 pagep = NULL;
e50287be
SP
2188 page_info->last_frag = true;
2189 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2190 } else {
2191 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2192 }
26d92f92
SP
2193
2194 prev_page_info = page_info;
2195 queue_head_inc(rxq);
10ef9ab4 2196 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2197 }
e50287be
SP
2198
2199 /* Mark the last frag of a page when we break out of the above loop
2200 * with no more slots available in the RXQ
2201 */
2202 if (pagep) {
2203 prev_page_info->last_frag = true;
2204 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2205 }
6b7c5b94
SP
2206
2207 if (posted) {
6b7c5b94 2208 atomic_add(posted, &rxq->used);
6384a4d0
SP
2209 if (rxo->rx_post_starved)
2210 rxo->rx_post_starved = false;
c30d7266 2211 do {
69304cc9 2212 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2213 be_rxq_notify(adapter, rxq->id, notify);
2214 posted -= notify;
2215 } while (posted);
ea1dae11
SP
2216 } else if (atomic_read(&rxq->used) == 0) {
2217 /* Let be_worker replenish when memory is available */
3abcdeda 2218 rxo->rx_post_starved = true;
6b7c5b94 2219 }
6b7c5b94
SP
2220}
2221
152ffe5b 2222static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2223{
152ffe5b
SB
2224 struct be_queue_info *tx_cq = &txo->cq;
2225 struct be_tx_compl_info *txcp = &txo->txcp;
2226 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2227
152ffe5b 2228 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2229 return NULL;
2230
152ffe5b 2231 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2232 rmb();
152ffe5b 2233 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2234
152ffe5b
SB
2235 txcp->status = GET_TX_COMPL_BITS(status, compl);
2236 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2237
152ffe5b 2238 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2239 queue_tail_inc(tx_cq);
2240 return txcp;
2241}
2242
3c8def97 2243static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2244 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2245{
5f07b3c5 2246 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2247 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2248 u16 frag_index, num_wrbs = 0;
2249 struct sk_buff *skb = NULL;
2250 bool unmap_skb_hdr = false;
a73b796e 2251 struct be_eth_wrb *wrb;
6b7c5b94 2252
ec43b1a6 2253 do {
5f07b3c5
SP
2254 if (sent_skbs[txq->tail]) {
2255 /* Free skb from prev req */
2256 if (skb)
2257 dev_consume_skb_any(skb);
2258 skb = sent_skbs[txq->tail];
2259 sent_skbs[txq->tail] = NULL;
2260 queue_tail_inc(txq); /* skip hdr wrb */
2261 num_wrbs++;
2262 unmap_skb_hdr = true;
2263 }
a73b796e 2264 wrb = queue_tail_node(txq);
5f07b3c5 2265 frag_index = txq->tail;
2b7bcebf 2266 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2267 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2268 unmap_skb_hdr = false;
6b7c5b94 2269 queue_tail_inc(txq);
5f07b3c5
SP
2270 num_wrbs++;
2271 } while (frag_index != last_index);
2272 dev_consume_skb_any(skb);
6b7c5b94 2273
4d586b82 2274 return num_wrbs;
6b7c5b94
SP
2275}
2276
10ef9ab4
SP
2277/* Return the number of events in the event queue */
2278static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2279{
10ef9ab4
SP
2280 struct be_eq_entry *eqe;
2281 int num = 0;
859b1e4e 2282
10ef9ab4
SP
2283 do {
2284 eqe = queue_tail_node(&eqo->q);
2285 if (eqe->evt == 0)
2286 break;
859b1e4e 2287
10ef9ab4
SP
2288 rmb();
2289 eqe->evt = 0;
2290 num++;
2291 queue_tail_inc(&eqo->q);
2292 } while (true);
2293
2294 return num;
859b1e4e
SP
2295}
2296
10ef9ab4
SP
2297/* Leaves the EQ is disarmed state */
2298static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2299{
10ef9ab4 2300 int num = events_get(eqo);
859b1e4e 2301
20947770 2302 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2303}
2304
10ef9ab4 2305static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2306{
2307 struct be_rx_page_info *page_info;
3abcdeda
SP
2308 struct be_queue_info *rxq = &rxo->q;
2309 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2310 struct be_rx_compl_info *rxcp;
d23e946c
SP
2311 struct be_adapter *adapter = rxo->adapter;
2312 int flush_wait = 0;
6b7c5b94 2313
d23e946c
SP
2314 /* Consume pending rx completions.
2315 * Wait for the flush completion (identified by zero num_rcvd)
2316 * to arrive. Notify CQ even when there are no more CQ entries
2317 * for HW to flush partially coalesced CQ entries.
2318 * In Lancer, there is no need to wait for flush compl.
2319 */
2320 for (;;) {
2321 rxcp = be_rx_compl_get(rxo);
ddf1169f 2322 if (!rxcp) {
d23e946c
SP
2323 if (lancer_chip(adapter))
2324 break;
2325
2326 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2327 dev_warn(&adapter->pdev->dev,
2328 "did not receive flush compl\n");
2329 break;
2330 }
2331 be_cq_notify(adapter, rx_cq->id, true, 0);
2332 mdelay(1);
2333 } else {
2334 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2335 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2336 if (rxcp->num_rcvd == 0)
2337 break;
2338 }
6b7c5b94
SP
2339 }
2340
d23e946c
SP
2341 /* After cleanup, leave the CQ in unarmed state */
2342 be_cq_notify(adapter, rx_cq->id, false, 0);
2343
2344 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2345 while (atomic_read(&rxq->used) > 0) {
2346 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2347 put_page(page_info->page);
2348 memset(page_info, 0, sizeof(*page_info));
2349 }
2350 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2351 rxq->tail = 0;
2352 rxq->head = 0;
6b7c5b94
SP
2353}
2354
0ae57bb3 2355static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2356{
5f07b3c5
SP
2357 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2358 struct device *dev = &adapter->pdev->dev;
152ffe5b 2359 struct be_tx_compl_info *txcp;
0ae57bb3 2360 struct be_queue_info *txq;
152ffe5b 2361 struct be_tx_obj *txo;
0ae57bb3 2362 int i, pending_txqs;
a8e9179a 2363
1a3d0717 2364 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2365 do {
0ae57bb3
SP
2366 pending_txqs = adapter->num_tx_qs;
2367
2368 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2369 cmpl = 0;
2370 num_wrbs = 0;
0ae57bb3 2371 txq = &txo->q;
152ffe5b
SB
2372 while ((txcp = be_tx_compl_get(txo))) {
2373 num_wrbs +=
2374 be_tx_compl_process(adapter, txo,
2375 txcp->end_index);
0ae57bb3
SP
2376 cmpl++;
2377 }
2378 if (cmpl) {
2379 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2380 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2381 timeo = 0;
0ae57bb3 2382 }
cf5671e6 2383 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2384 pending_txqs--;
a8e9179a
SP
2385 }
2386
1a3d0717 2387 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2388 break;
2389
2390 mdelay(1);
2391 } while (true);
2392
5f07b3c5 2393 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2394 for_all_tx_queues(adapter, txo, i) {
2395 txq = &txo->q;
0ae57bb3 2396
5f07b3c5
SP
2397 if (atomic_read(&txq->used)) {
2398 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2399 i, atomic_read(&txq->used));
2400 notified_idx = txq->tail;
0ae57bb3 2401 end_idx = txq->tail;
5f07b3c5
SP
2402 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2403 txq->len);
2404 /* Use the tx-compl process logic to handle requests
2405 * that were not sent to the HW.
2406 */
0ae57bb3
SP
2407 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2408 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2409 BUG_ON(atomic_read(&txq->used));
2410 txo->pend_wrb_cnt = 0;
2411 /* Since hw was never notified of these requests,
2412 * reset TXQ indices
2413 */
2414 txq->head = notified_idx;
2415 txq->tail = notified_idx;
0ae57bb3 2416 }
b03388d6 2417 }
6b7c5b94
SP
2418}
2419
10ef9ab4
SP
2420static void be_evt_queues_destroy(struct be_adapter *adapter)
2421{
2422 struct be_eq_obj *eqo;
2423 int i;
2424
2425 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2426 if (eqo->q.created) {
2427 be_eq_clean(eqo);
10ef9ab4 2428 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2429 napi_hash_del(&eqo->napi);
68d7bdcb 2430 netif_napi_del(&eqo->napi);
19d59aa7 2431 }
d658d98a 2432 free_cpumask_var(eqo->affinity_mask);
10ef9ab4
SP
2433 be_queue_free(adapter, &eqo->q);
2434 }
2435}
2436
2437static int be_evt_queues_create(struct be_adapter *adapter)
2438{
2439 struct be_queue_info *eq;
2440 struct be_eq_obj *eqo;
2632bafd 2441 struct be_aic_obj *aic;
10ef9ab4
SP
2442 int i, rc;
2443
92bf14ab
SP
2444 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2445 adapter->cfg_num_qs);
10ef9ab4
SP
2446
2447 for_all_evt_queues(adapter, eqo, i) {
d658d98a
PR
2448 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2449 return -ENOMEM;
2450 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2451 eqo->affinity_mask);
2452
68d7bdcb
SP
2453 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2454 BE_NAPI_WEIGHT);
6384a4d0 2455 napi_hash_add(&eqo->napi);
2632bafd 2456 aic = &adapter->aic_obj[i];
10ef9ab4 2457 eqo->adapter = adapter;
10ef9ab4 2458 eqo->idx = i;
2632bafd
SP
2459 aic->max_eqd = BE_MAX_EQD;
2460 aic->enable = true;
10ef9ab4
SP
2461
2462 eq = &eqo->q;
2463 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2464 sizeof(struct be_eq_entry));
10ef9ab4
SP
2465 if (rc)
2466 return rc;
2467
f2f781a7 2468 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2469 if (rc)
2470 return rc;
2471 }
1cfafab9 2472 return 0;
10ef9ab4
SP
2473}
2474
5fb379ee
SP
2475static void be_mcc_queues_destroy(struct be_adapter *adapter)
2476{
2477 struct be_queue_info *q;
5fb379ee 2478
8788fdc2 2479 q = &adapter->mcc_obj.q;
5fb379ee 2480 if (q->created)
8788fdc2 2481 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2482 be_queue_free(adapter, q);
2483
8788fdc2 2484 q = &adapter->mcc_obj.cq;
5fb379ee 2485 if (q->created)
8788fdc2 2486 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2487 be_queue_free(adapter, q);
2488}
2489
2490/* Must be called only after TX qs are created as MCC shares TX EQ */
2491static int be_mcc_queues_create(struct be_adapter *adapter)
2492{
2493 struct be_queue_info *q, *cq;
5fb379ee 2494
8788fdc2 2495 cq = &adapter->mcc_obj.cq;
5fb379ee 2496 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2497 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2498 goto err;
2499
10ef9ab4
SP
2500 /* Use the default EQ for MCC completions */
2501 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2502 goto mcc_cq_free;
2503
8788fdc2 2504 q = &adapter->mcc_obj.q;
5fb379ee
SP
2505 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2506 goto mcc_cq_destroy;
2507
8788fdc2 2508 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2509 goto mcc_q_free;
2510
2511 return 0;
2512
2513mcc_q_free:
2514 be_queue_free(adapter, q);
2515mcc_cq_destroy:
8788fdc2 2516 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2517mcc_cq_free:
2518 be_queue_free(adapter, cq);
2519err:
2520 return -1;
2521}
2522
6b7c5b94
SP
2523static void be_tx_queues_destroy(struct be_adapter *adapter)
2524{
2525 struct be_queue_info *q;
3c8def97
SP
2526 struct be_tx_obj *txo;
2527 u8 i;
6b7c5b94 2528
3c8def97
SP
2529 for_all_tx_queues(adapter, txo, i) {
2530 q = &txo->q;
2531 if (q->created)
2532 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2533 be_queue_free(adapter, q);
6b7c5b94 2534
3c8def97
SP
2535 q = &txo->cq;
2536 if (q->created)
2537 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2538 be_queue_free(adapter, q);
2539 }
6b7c5b94
SP
2540}
2541
7707133c 2542static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2543{
73f394e6 2544 struct be_queue_info *cq;
3c8def97 2545 struct be_tx_obj *txo;
73f394e6 2546 struct be_eq_obj *eqo;
92bf14ab 2547 int status, i;
6b7c5b94 2548
92bf14ab 2549 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2550
10ef9ab4
SP
2551 for_all_tx_queues(adapter, txo, i) {
2552 cq = &txo->cq;
2553 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2554 sizeof(struct be_eth_tx_compl));
2555 if (status)
2556 return status;
3c8def97 2557
827da44c
JS
2558 u64_stats_init(&txo->stats.sync);
2559 u64_stats_init(&txo->stats.sync_compl);
2560
10ef9ab4
SP
2561 /* If num_evt_qs is less than num_tx_qs, then more than
2562 * one txq share an eq
2563 */
73f394e6
SP
2564 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2565 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2566 if (status)
2567 return status;
6b7c5b94 2568
10ef9ab4
SP
2569 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2570 sizeof(struct be_eth_wrb));
2571 if (status)
2572 return status;
6b7c5b94 2573
94d73aaa 2574 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2575 if (status)
2576 return status;
73f394e6
SP
2577
2578 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2579 eqo->idx);
3c8def97 2580 }
6b7c5b94 2581
d379142b
SP
2582 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2583 adapter->num_tx_qs);
10ef9ab4 2584 return 0;
6b7c5b94
SP
2585}
2586
10ef9ab4 2587static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2588{
2589 struct be_queue_info *q;
3abcdeda
SP
2590 struct be_rx_obj *rxo;
2591 int i;
2592
2593 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2594 q = &rxo->cq;
2595 if (q->created)
2596 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2597 be_queue_free(adapter, q);
ac6a0c4a
SP
2598 }
2599}
2600
10ef9ab4 2601static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2602{
10ef9ab4 2603 struct be_queue_info *eq, *cq;
3abcdeda
SP
2604 struct be_rx_obj *rxo;
2605 int rc, i;
6b7c5b94 2606
92bf14ab 2607 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2608 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2609
71bb8bd0
VV
2610 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2611 if (adapter->num_rss_qs <= 1)
2612 adapter->num_rss_qs = 0;
2613
2614 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2615
2616 /* When the interface is not capable of RSS rings (and there is no
2617 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2618 */
71bb8bd0
VV
2619 if (adapter->num_rx_qs == 0)
2620 adapter->num_rx_qs = 1;
92bf14ab 2621
6b7c5b94 2622 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2623 for_all_rx_queues(adapter, rxo, i) {
2624 rxo->adapter = adapter;
3abcdeda
SP
2625 cq = &rxo->cq;
2626 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2627 sizeof(struct be_eth_rx_compl));
3abcdeda 2628 if (rc)
10ef9ab4 2629 return rc;
3abcdeda 2630
827da44c 2631 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2632 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2633 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2634 if (rc)
10ef9ab4 2635 return rc;
3abcdeda 2636 }
6b7c5b94 2637
d379142b 2638 dev_info(&adapter->pdev->dev,
71bb8bd0 2639 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2640 return 0;
b628bde2
SP
2641}
2642
6b7c5b94
SP
2643static irqreturn_t be_intx(int irq, void *dev)
2644{
e49cc34f
SP
2645 struct be_eq_obj *eqo = dev;
2646 struct be_adapter *adapter = eqo->adapter;
2647 int num_evts = 0;
6b7c5b94 2648
d0b9cec3
SP
2649 /* IRQ is not expected when NAPI is scheduled as the EQ
2650 * will not be armed.
2651 * But, this can happen on Lancer INTx where it takes
2652 * a while to de-assert INTx or in BE2 where occasionaly
2653 * an interrupt may be raised even when EQ is unarmed.
2654 * If NAPI is already scheduled, then counting & notifying
2655 * events will orphan them.
e49cc34f 2656 */
d0b9cec3 2657 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2658 num_evts = events_get(eqo);
d0b9cec3
SP
2659 __napi_schedule(&eqo->napi);
2660 if (num_evts)
2661 eqo->spurious_intr = 0;
2662 }
20947770 2663 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2664
d0b9cec3
SP
2665 /* Return IRQ_HANDLED only for the the first spurious intr
2666 * after a valid intr to stop the kernel from branding
2667 * this irq as a bad one!
e49cc34f 2668 */
d0b9cec3
SP
2669 if (num_evts || eqo->spurious_intr++ == 0)
2670 return IRQ_HANDLED;
2671 else
2672 return IRQ_NONE;
6b7c5b94
SP
2673}
2674
10ef9ab4 2675static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2676{
10ef9ab4 2677 struct be_eq_obj *eqo = dev;
6b7c5b94 2678
20947770 2679 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2680 napi_schedule(&eqo->napi);
6b7c5b94
SP
2681 return IRQ_HANDLED;
2682}
2683
2e588f84 2684static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2685{
e38b1706 2686 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2687}
2688
10ef9ab4 2689static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2690 int budget, int polling)
6b7c5b94 2691{
3abcdeda
SP
2692 struct be_adapter *adapter = rxo->adapter;
2693 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2694 struct be_rx_compl_info *rxcp;
6b7c5b94 2695 u32 work_done;
c30d7266 2696 u32 frags_consumed = 0;
6b7c5b94
SP
2697
2698 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2699 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2700 if (!rxcp)
2701 break;
2702
12004ae9
SP
2703 /* Is it a flush compl that has no data */
2704 if (unlikely(rxcp->num_rcvd == 0))
2705 goto loop_continue;
2706
2707 /* Discard compl with partial DMA Lancer B0 */
2708 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2709 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2710 goto loop_continue;
2711 }
2712
2713 /* On BE drop pkts that arrive due to imperfect filtering in
2714 * promiscuous mode on some skews
2715 */
2716 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2717 !lancer_chip(adapter))) {
10ef9ab4 2718 be_rx_compl_discard(rxo, rxcp);
12004ae9 2719 goto loop_continue;
64642811 2720 }
009dd872 2721
6384a4d0
SP
2722 /* Don't do gro when we're busy_polling */
2723 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2724 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2725 else
6384a4d0
SP
2726 be_rx_compl_process(rxo, napi, rxcp);
2727
12004ae9 2728loop_continue:
c30d7266 2729 frags_consumed += rxcp->num_rcvd;
2e588f84 2730 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2731 }
2732
10ef9ab4
SP
2733 if (work_done) {
2734 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2735
6384a4d0
SP
2736 /* When an rx-obj gets into post_starved state, just
2737 * let be_worker do the posting.
2738 */
2739 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2740 !rxo->rx_post_starved)
c30d7266
AK
2741 be_post_rx_frags(rxo, GFP_ATOMIC,
2742 max_t(u32, MAX_RX_POST,
2743 frags_consumed));
6b7c5b94 2744 }
10ef9ab4 2745
6b7c5b94
SP
2746 return work_done;
2747}
2748
152ffe5b 2749static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2750{
2751 switch (status) {
2752 case BE_TX_COMP_HDR_PARSE_ERR:
2753 tx_stats(txo)->tx_hdr_parse_err++;
2754 break;
2755 case BE_TX_COMP_NDMA_ERR:
2756 tx_stats(txo)->tx_dma_err++;
2757 break;
2758 case BE_TX_COMP_ACL_ERR:
2759 tx_stats(txo)->tx_spoof_check_err++;
2760 break;
2761 }
2762}
2763
152ffe5b 2764static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2765{
2766 switch (status) {
2767 case LANCER_TX_COMP_LSO_ERR:
2768 tx_stats(txo)->tx_tso_err++;
2769 break;
2770 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2771 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2772 tx_stats(txo)->tx_spoof_check_err++;
2773 break;
2774 case LANCER_TX_COMP_QINQ_ERR:
2775 tx_stats(txo)->tx_qinq_err++;
2776 break;
2777 case LANCER_TX_COMP_PARITY_ERR:
2778 tx_stats(txo)->tx_internal_parity_err++;
2779 break;
2780 case LANCER_TX_COMP_DMA_ERR:
2781 tx_stats(txo)->tx_dma_err++;
2782 break;
2783 }
2784}
2785
c8f64615
SP
2786static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2787 int idx)
6b7c5b94 2788{
c8f64615 2789 int num_wrbs = 0, work_done = 0;
152ffe5b 2790 struct be_tx_compl_info *txcp;
c8f64615 2791
152ffe5b
SB
2792 while ((txcp = be_tx_compl_get(txo))) {
2793 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2794 work_done++;
3c8def97 2795
152ffe5b 2796 if (txcp->status) {
512bb8a2 2797 if (lancer_chip(adapter))
152ffe5b 2798 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2799 else
152ffe5b 2800 be_update_tx_err(txo, txcp->status);
512bb8a2 2801 }
10ef9ab4 2802 }
6b7c5b94 2803
10ef9ab4
SP
2804 if (work_done) {
2805 be_cq_notify(adapter, txo->cq.id, true, work_done);
2806 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2807
10ef9ab4
SP
2808 /* As Tx wrbs have been freed up, wake up netdev queue
2809 * if it was stopped due to lack of tx wrbs. */
2810 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2811 be_can_txq_wake(txo)) {
10ef9ab4 2812 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2813 }
10ef9ab4
SP
2814
2815 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2816 tx_stats(txo)->tx_compl += work_done;
2817 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2818 }
10ef9ab4 2819}
6b7c5b94 2820
f7062ee5
SP
2821#ifdef CONFIG_NET_RX_BUSY_POLL
2822static inline bool be_lock_napi(struct be_eq_obj *eqo)
2823{
2824 bool status = true;
2825
2826 spin_lock(&eqo->lock); /* BH is already disabled */
2827 if (eqo->state & BE_EQ_LOCKED) {
2828 WARN_ON(eqo->state & BE_EQ_NAPI);
2829 eqo->state |= BE_EQ_NAPI_YIELD;
2830 status = false;
2831 } else {
2832 eqo->state = BE_EQ_NAPI;
2833 }
2834 spin_unlock(&eqo->lock);
2835 return status;
2836}
2837
2838static inline void be_unlock_napi(struct be_eq_obj *eqo)
2839{
2840 spin_lock(&eqo->lock); /* BH is already disabled */
2841
2842 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2843 eqo->state = BE_EQ_IDLE;
2844
2845 spin_unlock(&eqo->lock);
2846}
2847
2848static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2849{
2850 bool status = true;
2851
2852 spin_lock_bh(&eqo->lock);
2853 if (eqo->state & BE_EQ_LOCKED) {
2854 eqo->state |= BE_EQ_POLL_YIELD;
2855 status = false;
2856 } else {
2857 eqo->state |= BE_EQ_POLL;
2858 }
2859 spin_unlock_bh(&eqo->lock);
2860 return status;
2861}
2862
2863static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2864{
2865 spin_lock_bh(&eqo->lock);
2866
2867 WARN_ON(eqo->state & (BE_EQ_NAPI));
2868 eqo->state = BE_EQ_IDLE;
2869
2870 spin_unlock_bh(&eqo->lock);
2871}
2872
2873static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2874{
2875 spin_lock_init(&eqo->lock);
2876 eqo->state = BE_EQ_IDLE;
2877}
2878
2879static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2880{
2881 local_bh_disable();
2882
2883 /* It's enough to just acquire napi lock on the eqo to stop
2884 * be_busy_poll() from processing any queueus.
2885 */
2886 while (!be_lock_napi(eqo))
2887 mdelay(1);
2888
2889 local_bh_enable();
2890}
2891
2892#else /* CONFIG_NET_RX_BUSY_POLL */
2893
2894static inline bool be_lock_napi(struct be_eq_obj *eqo)
2895{
2896 return true;
2897}
2898
2899static inline void be_unlock_napi(struct be_eq_obj *eqo)
2900{
2901}
2902
2903static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2904{
2905 return false;
2906}
2907
2908static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2909{
2910}
2911
2912static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2913{
2914}
2915
2916static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2917{
2918}
2919#endif /* CONFIG_NET_RX_BUSY_POLL */
2920
68d7bdcb 2921int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2922{
2923 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2924 struct be_adapter *adapter = eqo->adapter;
0b545a62 2925 int max_work = 0, work, i, num_evts;
6384a4d0 2926 struct be_rx_obj *rxo;
a4906ea0 2927 struct be_tx_obj *txo;
20947770 2928 u32 mult_enc = 0;
f31e50a8 2929
0b545a62
SP
2930 num_evts = events_get(eqo);
2931
a4906ea0
SP
2932 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2933 be_process_tx(adapter, txo, i);
f31e50a8 2934
6384a4d0
SP
2935 if (be_lock_napi(eqo)) {
2936 /* This loop will iterate twice for EQ0 in which
2937 * completions of the last RXQ (default one) are also processed
2938 * For other EQs the loop iterates only once
2939 */
2940 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2941 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2942 max_work = max(work, max_work);
2943 }
2944 be_unlock_napi(eqo);
2945 } else {
2946 max_work = budget;
10ef9ab4 2947 }
6b7c5b94 2948
10ef9ab4
SP
2949 if (is_mcc_eqo(eqo))
2950 be_process_mcc(adapter);
93c86700 2951
10ef9ab4
SP
2952 if (max_work < budget) {
2953 napi_complete(napi);
20947770
PR
2954
2955 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
2956 * delay via a delay multiplier encoding value
2957 */
2958 if (skyhawk_chip(adapter))
2959 mult_enc = be_get_eq_delay_mult_enc(eqo);
2960
2961 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
2962 mult_enc);
10ef9ab4
SP
2963 } else {
2964 /* As we'll continue in polling mode, count and clear events */
20947770 2965 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 2966 }
10ef9ab4 2967 return max_work;
6b7c5b94
SP
2968}
2969
6384a4d0
SP
2970#ifdef CONFIG_NET_RX_BUSY_POLL
2971static int be_busy_poll(struct napi_struct *napi)
2972{
2973 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2974 struct be_adapter *adapter = eqo->adapter;
2975 struct be_rx_obj *rxo;
2976 int i, work = 0;
2977
2978 if (!be_lock_busy_poll(eqo))
2979 return LL_FLUSH_BUSY;
2980
2981 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2982 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2983 if (work)
2984 break;
2985 }
2986
2987 be_unlock_busy_poll(eqo);
2988 return work;
2989}
2990#endif
2991
f67ef7ba 2992void be_detect_error(struct be_adapter *adapter)
7c185276 2993{
e1cfb67a
PR
2994 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2995 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2996 u32 i;
eb0eecc1
SK
2997 bool error_detected = false;
2998 struct device *dev = &adapter->pdev->dev;
2999 struct net_device *netdev = adapter->netdev;
7c185276 3000
d23e946c 3001 if (be_hw_error(adapter))
72f02485
SP
3002 return;
3003
e1cfb67a
PR
3004 if (lancer_chip(adapter)) {
3005 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3006 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3007 sliport_err1 = ioread32(adapter->db +
748b539a 3008 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3009 sliport_err2 = ioread32(adapter->db +
748b539a 3010 SLIPORT_ERROR2_OFFSET);
eb0eecc1 3011 adapter->hw_error = true;
d0e1b319 3012 error_detected = true;
eb0eecc1
SK
3013 /* Do not log error messages if its a FW reset */
3014 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3015 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3016 dev_info(dev, "Firmware update in progress\n");
3017 } else {
eb0eecc1
SK
3018 dev_err(dev, "Error detected in the card\n");
3019 dev_err(dev, "ERR: sliport status 0x%x\n",
3020 sliport_status);
3021 dev_err(dev, "ERR: sliport error1 0x%x\n",
3022 sliport_err1);
3023 dev_err(dev, "ERR: sliport error2 0x%x\n",
3024 sliport_err2);
3025 }
e1cfb67a
PR
3026 }
3027 } else {
25848c90
SR
3028 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3029 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3030 ue_lo_mask = ioread32(adapter->pcicfg +
3031 PCICFG_UE_STATUS_LOW_MASK);
3032 ue_hi_mask = ioread32(adapter->pcicfg +
3033 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3034
f67ef7ba
PR
3035 ue_lo = (ue_lo & ~ue_lo_mask);
3036 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3037
eb0eecc1
SK
3038 /* On certain platforms BE hardware can indicate spurious UEs.
3039 * Allow HW to stop working completely in case of a real UE.
3040 * Hence not setting the hw_error for UE detection.
3041 */
f67ef7ba 3042
eb0eecc1
SK
3043 if (ue_lo || ue_hi) {
3044 error_detected = true;
3045 dev_err(dev,
3046 "Unrecoverable Error detected in the adapter");
3047 dev_err(dev, "Please reboot server to recover");
3048 if (skyhawk_chip(adapter))
3049 adapter->hw_error = true;
3050 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3051 if (ue_lo & 1)
3052 dev_err(dev, "UE: %s bit set\n",
3053 ue_status_low_desc[i]);
3054 }
3055 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3056 if (ue_hi & 1)
3057 dev_err(dev, "UE: %s bit set\n",
3058 ue_status_hi_desc[i]);
3059 }
7c185276
AK
3060 }
3061 }
eb0eecc1
SK
3062 if (error_detected)
3063 netif_carrier_off(netdev);
7c185276
AK
3064}
3065
8d56ff11
SP
3066static void be_msix_disable(struct be_adapter *adapter)
3067{
ac6a0c4a 3068 if (msix_enabled(adapter)) {
8d56ff11 3069 pci_disable_msix(adapter->pdev);
ac6a0c4a 3070 adapter->num_msix_vec = 0;
68d7bdcb 3071 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3072 }
3073}
3074
c2bba3df 3075static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3076{
7dc4c064 3077 int i, num_vec;
d379142b 3078 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3079
92bf14ab
SP
3080 /* If RoCE is supported, program the max number of NIC vectors that
3081 * may be configured via set-channels, along with vectors needed for
3082 * RoCe. Else, just program the number we'll use initially.
3083 */
3084 if (be_roce_supported(adapter))
3085 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3086 2 * num_online_cpus());
3087 else
3088 num_vec = adapter->cfg_num_qs;
3abcdeda 3089
ac6a0c4a 3090 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3091 adapter->msix_entries[i].entry = i;
3092
7dc4c064
AG
3093 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3094 MIN_MSIX_VECTORS, num_vec);
3095 if (num_vec < 0)
3096 goto fail;
92bf14ab 3097
92bf14ab
SP
3098 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3099 adapter->num_msix_roce_vec = num_vec / 2;
3100 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3101 adapter->num_msix_roce_vec);
3102 }
3103
3104 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3105
3106 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3107 adapter->num_msix_vec);
c2bba3df 3108 return 0;
7dc4c064
AG
3109
3110fail:
3111 dev_warn(dev, "MSIx enable failed\n");
3112
3113 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3114 if (!be_physfn(adapter))
3115 return num_vec;
3116 return 0;
6b7c5b94
SP
3117}
3118
fe6d2a38 3119static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3120 struct be_eq_obj *eqo)
b628bde2 3121{
f2f781a7 3122 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3123}
6b7c5b94 3124
b628bde2
SP
3125static int be_msix_register(struct be_adapter *adapter)
3126{
10ef9ab4
SP
3127 struct net_device *netdev = adapter->netdev;
3128 struct be_eq_obj *eqo;
3129 int status, i, vec;
6b7c5b94 3130
10ef9ab4
SP
3131 for_all_evt_queues(adapter, eqo, i) {
3132 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3133 vec = be_msix_vec_get(adapter, eqo);
3134 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3135 if (status)
3136 goto err_msix;
d658d98a
PR
3137
3138 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3139 }
b628bde2 3140
6b7c5b94 3141 return 0;
3abcdeda 3142err_msix:
10ef9ab4
SP
3143 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3144 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3145 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3146 status);
ac6a0c4a 3147 be_msix_disable(adapter);
6b7c5b94
SP
3148 return status;
3149}
3150
3151static int be_irq_register(struct be_adapter *adapter)
3152{
3153 struct net_device *netdev = adapter->netdev;
3154 int status;
3155
ac6a0c4a 3156 if (msix_enabled(adapter)) {
6b7c5b94
SP
3157 status = be_msix_register(adapter);
3158 if (status == 0)
3159 goto done;
ba343c77
SB
3160 /* INTx is not supported for VF */
3161 if (!be_physfn(adapter))
3162 return status;
6b7c5b94
SP
3163 }
3164
e49cc34f 3165 /* INTx: only the first EQ is used */
6b7c5b94
SP
3166 netdev->irq = adapter->pdev->irq;
3167 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3168 &adapter->eq_obj[0]);
6b7c5b94
SP
3169 if (status) {
3170 dev_err(&adapter->pdev->dev,
3171 "INTx request IRQ failed - err %d\n", status);
3172 return status;
3173 }
3174done:
3175 adapter->isr_registered = true;
3176 return 0;
3177}
3178
3179static void be_irq_unregister(struct be_adapter *adapter)
3180{
3181 struct net_device *netdev = adapter->netdev;
10ef9ab4 3182 struct be_eq_obj *eqo;
d658d98a 3183 int i, vec;
6b7c5b94
SP
3184
3185 if (!adapter->isr_registered)
3186 return;
3187
3188 /* INTx */
ac6a0c4a 3189 if (!msix_enabled(adapter)) {
e49cc34f 3190 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3191 goto done;
3192 }
3193
3194 /* MSIx */
d658d98a
PR
3195 for_all_evt_queues(adapter, eqo, i) {
3196 vec = be_msix_vec_get(adapter, eqo);
3197 irq_set_affinity_hint(vec, NULL);
3198 free_irq(vec, eqo);
3199 }
3abcdeda 3200
6b7c5b94
SP
3201done:
3202 adapter->isr_registered = false;
6b7c5b94
SP
3203}
3204
10ef9ab4 3205static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3206{
3207 struct be_queue_info *q;
3208 struct be_rx_obj *rxo;
3209 int i;
3210
3211 for_all_rx_queues(adapter, rxo, i) {
3212 q = &rxo->q;
3213 if (q->created) {
3214 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3215 be_rx_cq_clean(rxo);
482c9e79 3216 }
10ef9ab4 3217 be_queue_free(adapter, q);
482c9e79
SP
3218 }
3219}
3220
889cd4b2
SP
3221static int be_close(struct net_device *netdev)
3222{
3223 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3224 struct be_eq_obj *eqo;
3225 int i;
889cd4b2 3226
e1ad8e33
KA
3227 /* This protection is needed as be_close() may be called even when the
3228 * adapter is in cleared state (after eeh perm failure)
3229 */
3230 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3231 return 0;
3232
045508a8
PP
3233 be_roce_dev_close(adapter);
3234
dff345c5
IV
3235 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3236 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3237 napi_disable(&eqo->napi);
6384a4d0
SP
3238 be_disable_busy_poll(eqo);
3239 }
71237b6f 3240 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3241 }
a323d9bf
SP
3242
3243 be_async_mcc_disable(adapter);
3244
3245 /* Wait for all pending tx completions to arrive so that
3246 * all tx skbs are freed.
3247 */
fba87559 3248 netif_tx_disable(netdev);
6e1f9975 3249 be_tx_compl_clean(adapter);
a323d9bf
SP
3250
3251 be_rx_qs_destroy(adapter);
f66b7cfd 3252 be_clear_uc_list(adapter);
d11a347d 3253
a323d9bf 3254 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3255 if (msix_enabled(adapter))
3256 synchronize_irq(be_msix_vec_get(adapter, eqo));
3257 else
3258 synchronize_irq(netdev->irq);
3259 be_eq_clean(eqo);
63fcb27f
PR
3260 }
3261
889cd4b2
SP
3262 be_irq_unregister(adapter);
3263
482c9e79
SP
3264 return 0;
3265}
3266
10ef9ab4 3267static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3268{
1dcf7b1c
ED
3269 struct rss_info *rss = &adapter->rss_info;
3270 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3271 struct be_rx_obj *rxo;
e9008ee9 3272 int rc, i, j;
482c9e79
SP
3273
3274 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3275 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3276 sizeof(struct be_eth_rx_d));
3277 if (rc)
3278 return rc;
3279 }
3280
71bb8bd0
VV
3281 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3282 rxo = default_rxo(adapter);
3283 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3284 rx_frag_size, adapter->if_handle,
3285 false, &rxo->rss_id);
3286 if (rc)
3287 return rc;
3288 }
10ef9ab4
SP
3289
3290 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3291 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3292 rx_frag_size, adapter->if_handle,
3293 true, &rxo->rss_id);
482c9e79
SP
3294 if (rc)
3295 return rc;
3296 }
3297
3298 if (be_multi_rxq(adapter)) {
71bb8bd0 3299 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3300 for_all_rss_queues(adapter, rxo, i) {
e2557877 3301 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3302 break;
e2557877
VD
3303 rss->rsstable[j + i] = rxo->rss_id;
3304 rss->rss_queue[j + i] = i;
e9008ee9
PR
3305 }
3306 }
e2557877
VD
3307 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3308 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3309
3310 if (!BEx_chip(adapter))
e2557877
VD
3311 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3312 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3313 } else {
3314 /* Disable RSS, if only default RX Q is created */
e2557877 3315 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3316 }
594ad54a 3317
1dcf7b1c 3318 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3319 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3320 128, rss_key);
da1388d6 3321 if (rc) {
e2557877 3322 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3323 return rc;
482c9e79
SP
3324 }
3325
1dcf7b1c 3326 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3327
482c9e79 3328 /* First time posting */
10ef9ab4 3329 for_all_rx_queues(adapter, rxo, i)
c30d7266 3330 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3331 return 0;
3332}
3333
6b7c5b94
SP
3334static int be_open(struct net_device *netdev)
3335{
3336 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3337 struct be_eq_obj *eqo;
3abcdeda 3338 struct be_rx_obj *rxo;
10ef9ab4 3339 struct be_tx_obj *txo;
b236916a 3340 u8 link_status;
3abcdeda 3341 int status, i;
5fb379ee 3342
10ef9ab4 3343 status = be_rx_qs_create(adapter);
482c9e79
SP
3344 if (status)
3345 goto err;
3346
c2bba3df
SK
3347 status = be_irq_register(adapter);
3348 if (status)
3349 goto err;
5fb379ee 3350
10ef9ab4 3351 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3352 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3353
10ef9ab4
SP
3354 for_all_tx_queues(adapter, txo, i)
3355 be_cq_notify(adapter, txo->cq.id, true, 0);
3356
7a1e9b20
SP
3357 be_async_mcc_enable(adapter);
3358
10ef9ab4
SP
3359 for_all_evt_queues(adapter, eqo, i) {
3360 napi_enable(&eqo->napi);
6384a4d0 3361 be_enable_busy_poll(eqo);
20947770 3362 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3363 }
04d3d624 3364 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3365
323ff71e 3366 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3367 if (!status)
3368 be_link_status_update(adapter, link_status);
3369
fba87559 3370 netif_tx_start_all_queues(netdev);
045508a8 3371 be_roce_dev_open(adapter);
c9c47142 3372
c5abe7c0 3373#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3374 if (skyhawk_chip(adapter))
3375 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3376#endif
3377
889cd4b2
SP
3378 return 0;
3379err:
3380 be_close(adapter->netdev);
3381 return -EIO;
5fb379ee
SP
3382}
3383
71d8d1b5
AK
3384static int be_setup_wol(struct be_adapter *adapter, bool enable)
3385{
3386 struct be_dma_mem cmd;
3387 int status = 0;
3388 u8 mac[ETH_ALEN];
3389
c7bf7169 3390 eth_zero_addr(mac);
71d8d1b5
AK
3391
3392 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3393 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3394 GFP_KERNEL);
ddf1169f 3395 if (!cmd.va)
6b568689 3396 return -ENOMEM;
71d8d1b5
AK
3397
3398 if (enable) {
3399 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3400 PCICFG_PM_CONTROL_OFFSET,
3401 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3402 if (status) {
3403 dev_err(&adapter->pdev->dev,
2381a55c 3404 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3405 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3406 cmd.dma);
71d8d1b5
AK
3407 return status;
3408 }
3409 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3410 adapter->netdev->dev_addr,
3411 &cmd);
71d8d1b5
AK
3412 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3413 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3414 } else {
3415 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3416 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3417 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3418 }
3419
2b7bcebf 3420 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3421 return status;
3422}
3423
f7062ee5
SP
3424static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3425{
3426 u32 addr;
3427
3428 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3429
3430 mac[5] = (u8)(addr & 0xFF);
3431 mac[4] = (u8)((addr >> 8) & 0xFF);
3432 mac[3] = (u8)((addr >> 16) & 0xFF);
3433 /* Use the OUI from the current MAC address */
3434 memcpy(mac, adapter->netdev->dev_addr, 3);
3435}
3436
6d87f5c3
AK
3437/*
3438 * Generate a seed MAC address from the PF MAC Address using jhash.
3439 * MAC Address for VFs are assigned incrementally starting from the seed.
3440 * These addresses are programmed in the ASIC by the PF and the VF driver
3441 * queries for the MAC address during its probe.
3442 */
4c876616 3443static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3444{
f9449ab7 3445 u32 vf;
3abcdeda 3446 int status = 0;
6d87f5c3 3447 u8 mac[ETH_ALEN];
11ac75ed 3448 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3449
3450 be_vf_eth_addr_generate(adapter, mac);
3451
11ac75ed 3452 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3453 if (BEx_chip(adapter))
590c391d 3454 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3455 vf_cfg->if_handle,
3456 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3457 else
3458 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3459 vf + 1);
590c391d 3460
6d87f5c3
AK
3461 if (status)
3462 dev_err(&adapter->pdev->dev,
748b539a
SP
3463 "Mac address assignment failed for VF %d\n",
3464 vf);
6d87f5c3 3465 else
11ac75ed 3466 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3467
3468 mac[5] += 1;
3469 }
3470 return status;
3471}
3472
4c876616
SP
3473static int be_vfs_mac_query(struct be_adapter *adapter)
3474{
3475 int status, vf;
3476 u8 mac[ETH_ALEN];
3477 struct be_vf_cfg *vf_cfg;
4c876616
SP
3478
3479 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3480 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3481 mac, vf_cfg->if_handle,
3482 false, vf+1);
4c876616
SP
3483 if (status)
3484 return status;
3485 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3486 }
3487 return 0;
3488}
3489
f9449ab7 3490static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3491{
11ac75ed 3492 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3493 u32 vf;
3494
257a3feb 3495 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3496 dev_warn(&adapter->pdev->dev,
3497 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3498 goto done;
3499 }
3500
b4c1df93
SP
3501 pci_disable_sriov(adapter->pdev);
3502
11ac75ed 3503 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3504 if (BEx_chip(adapter))
11ac75ed
SP
3505 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3506 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3507 else
3508 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3509 vf + 1);
f9449ab7 3510
11ac75ed
SP
3511 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3512 }
39f1d94d
SP
3513done:
3514 kfree(adapter->vf_cfg);
3515 adapter->num_vfs = 0;
f174c7ec 3516 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3517}
3518
7707133c
SP
3519static void be_clear_queues(struct be_adapter *adapter)
3520{
3521 be_mcc_queues_destroy(adapter);
3522 be_rx_cqs_destroy(adapter);
3523 be_tx_queues_destroy(adapter);
3524 be_evt_queues_destroy(adapter);
3525}
3526
68d7bdcb 3527static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3528{
191eb756
SP
3529 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3530 cancel_delayed_work_sync(&adapter->work);
3531 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3532 }
68d7bdcb
SP
3533}
3534
eb7dd46c
SP
3535static void be_cancel_err_detection(struct be_adapter *adapter)
3536{
3537 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3538 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3539 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3540 }
3541}
3542
b05004ad 3543static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3544{
b05004ad 3545 if (adapter->pmac_id) {
f66b7cfd
SP
3546 be_cmd_pmac_del(adapter, adapter->if_handle,
3547 adapter->pmac_id[0], 0);
b05004ad
SK
3548 kfree(adapter->pmac_id);
3549 adapter->pmac_id = NULL;
3550 }
3551}
3552
c5abe7c0 3553#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3554static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3555{
630f4b70
SB
3556 struct net_device *netdev = adapter->netdev;
3557
c9c47142
SP
3558 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3559 be_cmd_manage_iface(adapter, adapter->if_handle,
3560 OP_CONVERT_TUNNEL_TO_NORMAL);
3561
3562 if (adapter->vxlan_port)
3563 be_cmd_set_vxlan_port(adapter, 0);
3564
3565 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3566 adapter->vxlan_port = 0;
630f4b70
SB
3567
3568 netdev->hw_enc_features = 0;
3569 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3570 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3571}
c5abe7c0 3572#endif
c9c47142 3573
f2858738
VV
3574static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3575{
3576 struct be_resources res = adapter->pool_res;
3577 u16 num_vf_qs = 1;
3578
3579 /* Distribute the queue resources equally among the PF and it's VFs
3580 * Do not distribute queue resources in multi-channel configuration.
3581 */
3582 if (num_vfs && !be_is_mc(adapter)) {
3583 /* If number of VFs requested is 8 less than max supported,
3584 * assign 8 queue pairs to the PF and divide the remaining
3585 * resources evenly among the VFs
3586 */
3587 if (num_vfs < (be_max_vfs(adapter) - 8))
3588 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3589 else
3590 num_vf_qs = res.max_rss_qs / num_vfs;
3591
3592 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3593 * interfaces per port. Provide RSS on VFs, only if number
3594 * of VFs requested is less than MAX_RSS_IFACES limit.
3595 */
3596 if (num_vfs >= MAX_RSS_IFACES)
3597 num_vf_qs = 1;
3598 }
3599 return num_vf_qs;
3600}
3601
b05004ad
SK
3602static int be_clear(struct be_adapter *adapter)
3603{
f2858738
VV
3604 struct pci_dev *pdev = adapter->pdev;
3605 u16 num_vf_qs;
3606
68d7bdcb 3607 be_cancel_worker(adapter);
191eb756 3608
11ac75ed 3609 if (sriov_enabled(adapter))
f9449ab7
SP
3610 be_vf_clear(adapter);
3611
bec84e6b
VV
3612 /* Re-configure FW to distribute resources evenly across max-supported
3613 * number of VFs, only when VFs are not already enabled.
3614 */
ace40aff
VV
3615 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3616 !pci_vfs_assigned(pdev)) {
f2858738
VV
3617 num_vf_qs = be_calculate_vf_qs(adapter,
3618 pci_sriov_get_totalvfs(pdev));
bec84e6b 3619 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3620 pci_sriov_get_totalvfs(pdev),
3621 num_vf_qs);
3622 }
bec84e6b 3623
c5abe7c0 3624#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3625 be_disable_vxlan_offloads(adapter);
c5abe7c0 3626#endif
2d17f403 3627 /* delete the primary mac along with the uc-mac list */
b05004ad 3628 be_mac_clear(adapter);
fbc13f01 3629
f9449ab7 3630 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3631
7707133c 3632 be_clear_queues(adapter);
a54769f5 3633
10ef9ab4 3634 be_msix_disable(adapter);
e1ad8e33 3635 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3636 return 0;
3637}
3638
0700d816
KA
3639static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3640 u32 cap_flags, u32 vf)
3641{
3642 u32 en_flags;
0700d816
KA
3643
3644 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3645 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
71bb8bd0 3646 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
0700d816
KA
3647
3648 en_flags &= cap_flags;
3649
435452aa 3650 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
0700d816
KA
3651}
3652
4c876616 3653static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3654{
92bf14ab 3655 struct be_resources res = {0};
4c876616 3656 struct be_vf_cfg *vf_cfg;
0700d816
KA
3657 u32 cap_flags, vf;
3658 int status;
abb93951 3659
0700d816 3660 /* If a FW profile exists, then cap_flags are updated */
4c876616 3661 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
0ed7d749 3662 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3663
4c876616 3664 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3665 if (!BE3_chip(adapter)) {
3666 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3667 RESOURCE_LIMITS,
92bf14ab 3668 vf + 1);
435452aa 3669 if (!status) {
92bf14ab 3670 cap_flags = res.if_cap_flags;
435452aa
VV
3671 /* Prevent VFs from enabling VLAN promiscuous
3672 * mode
3673 */
3674 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3675 }
92bf14ab 3676 }
4c876616 3677
0700d816
KA
3678 status = be_if_create(adapter, &vf_cfg->if_handle,
3679 cap_flags, vf + 1);
4c876616 3680 if (status)
0700d816 3681 return status;
4c876616 3682 }
0700d816
KA
3683
3684 return 0;
abb93951
PR
3685}
3686
39f1d94d 3687static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3688{
11ac75ed 3689 struct be_vf_cfg *vf_cfg;
30128031
SP
3690 int vf;
3691
39f1d94d
SP
3692 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3693 GFP_KERNEL);
3694 if (!adapter->vf_cfg)
3695 return -ENOMEM;
3696
11ac75ed
SP
3697 for_all_vfs(adapter, vf_cfg, vf) {
3698 vf_cfg->if_handle = -1;
3699 vf_cfg->pmac_id = -1;
30128031 3700 }
39f1d94d 3701 return 0;
30128031
SP
3702}
3703
f9449ab7
SP
3704static int be_vf_setup(struct be_adapter *adapter)
3705{
c502224e 3706 struct device *dev = &adapter->pdev->dev;
11ac75ed 3707 struct be_vf_cfg *vf_cfg;
4c876616 3708 int status, old_vfs, vf;
e7bcbd7b 3709 bool spoofchk;
39f1d94d 3710
257a3feb 3711 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3712
3713 status = be_vf_setup_init(adapter);
3714 if (status)
3715 goto err;
30128031 3716
4c876616
SP
3717 if (old_vfs) {
3718 for_all_vfs(adapter, vf_cfg, vf) {
3719 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3720 if (status)
3721 goto err;
3722 }
f9449ab7 3723
4c876616
SP
3724 status = be_vfs_mac_query(adapter);
3725 if (status)
3726 goto err;
3727 } else {
bec84e6b
VV
3728 status = be_vfs_if_create(adapter);
3729 if (status)
3730 goto err;
3731
39f1d94d
SP
3732 status = be_vf_eth_addr_config(adapter);
3733 if (status)
3734 goto err;
3735 }
f9449ab7 3736
11ac75ed 3737 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3738 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3739 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3740 vf + 1);
3741 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3742 status = be_cmd_set_fn_privileges(adapter,
435452aa 3743 vf_cfg->privileges |
04a06028
SP
3744 BE_PRIV_FILTMGMT,
3745 vf + 1);
435452aa
VV
3746 if (!status) {
3747 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3748 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3749 vf);
435452aa 3750 }
04a06028
SP
3751 }
3752
0f77ba73
RN
3753 /* Allow full available bandwidth */
3754 if (!old_vfs)
3755 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3756
e7bcbd7b
KA
3757 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3758 vf_cfg->if_handle, NULL,
3759 &spoofchk);
3760 if (!status)
3761 vf_cfg->spoofchk = spoofchk;
3762
bdce2ad7 3763 if (!old_vfs) {
0599863d 3764 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3765 be_cmd_set_logical_link_config(adapter,
3766 IFLA_VF_LINK_STATE_AUTO,
3767 vf+1);
3768 }
f9449ab7 3769 }
b4c1df93
SP
3770
3771 if (!old_vfs) {
3772 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3773 if (status) {
3774 dev_err(dev, "SRIOV enable failed\n");
3775 adapter->num_vfs = 0;
3776 goto err;
3777 }
3778 }
f174c7ec
VV
3779
3780 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3781 return 0;
3782err:
4c876616
SP
3783 dev_err(dev, "VF setup failed\n");
3784 be_vf_clear(adapter);
f9449ab7
SP
3785 return status;
3786}
3787
f93f160b
VV
3788/* Converting function_mode bits on BE3 to SH mc_type enums */
3789
3790static u8 be_convert_mc_type(u32 function_mode)
3791{
66064dbc 3792 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3793 return vNIC1;
66064dbc 3794 else if (function_mode & QNQ_MODE)
f93f160b
VV
3795 return FLEX10;
3796 else if (function_mode & VNIC_MODE)
3797 return vNIC2;
3798 else if (function_mode & UMC_ENABLED)
3799 return UMC;
3800 else
3801 return MC_NONE;
3802}
3803
92bf14ab
SP
3804/* On BE2/BE3 FW does not suggest the supported limits */
3805static void BEx_get_resources(struct be_adapter *adapter,
3806 struct be_resources *res)
3807{
bec84e6b 3808 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3809
3810 if (be_physfn(adapter))
3811 res->max_uc_mac = BE_UC_PMAC_COUNT;
3812 else
3813 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3814
f93f160b
VV
3815 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3816
3817 if (be_is_mc(adapter)) {
3818 /* Assuming that there are 4 channels per port,
3819 * when multi-channel is enabled
3820 */
3821 if (be_is_qnq_mode(adapter))
3822 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3823 else
3824 /* In a non-qnq multichannel mode, the pvid
3825 * takes up one vlan entry
3826 */
3827 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3828 } else {
92bf14ab 3829 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3830 }
3831
92bf14ab
SP
3832 res->max_mcast_mac = BE_MAX_MC;
3833
a5243dab
VV
3834 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3835 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3836 * *only* if it is RSS-capable.
3837 */
3838 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3839 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3840 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3841 res->max_tx_qs = 1;
a28277dc
SR
3842 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3843 struct be_resources super_nic_res = {0};
3844
3845 /* On a SuperNIC profile, the driver needs to use the
3846 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3847 */
f2858738
VV
3848 be_cmd_get_profile_config(adapter, &super_nic_res,
3849 RESOURCE_LIMITS, 0);
a28277dc
SR
3850 /* Some old versions of BE3 FW don't report max_tx_qs value */
3851 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3852 } else {
92bf14ab 3853 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3854 }
92bf14ab
SP
3855
3856 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3857 !use_sriov && be_physfn(adapter))
3858 res->max_rss_qs = (adapter->be3_native) ?
3859 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3860 res->max_rx_qs = res->max_rss_qs + 1;
3861
e3dc867c 3862 if (be_physfn(adapter))
d3518e21 3863 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3864 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3865 else
3866 res->max_evt_qs = 1;
92bf14ab
SP
3867
3868 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 3869 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
3870 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3871 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3872}
3873
30128031
SP
3874static void be_setup_init(struct be_adapter *adapter)
3875{
3876 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3877 adapter->phy.link_speed = -1;
30128031
SP
3878 adapter->if_handle = -1;
3879 adapter->be3_native = false;
f66b7cfd 3880 adapter->if_flags = 0;
f25b119c
PR
3881 if (be_physfn(adapter))
3882 adapter->cmd_privileges = MAX_PRIVILEGES;
3883 else
3884 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3885}
3886
bec84e6b
VV
3887static int be_get_sriov_config(struct be_adapter *adapter)
3888{
bec84e6b 3889 struct be_resources res = {0};
d3d18312 3890 int max_vfs, old_vfs;
bec84e6b 3891
f2858738 3892 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 3893
ace40aff 3894 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
3895 if (BE3_chip(adapter) && !res.max_vfs) {
3896 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3897 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3898 }
3899
d3d18312 3900 adapter->pool_res = res;
bec84e6b 3901
ace40aff
VV
3902 /* If during previous unload of the driver, the VFs were not disabled,
3903 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3904 * Instead use the TotalVFs value stored in the pci-dev struct.
3905 */
bec84e6b
VV
3906 old_vfs = pci_num_vf(adapter->pdev);
3907 if (old_vfs) {
ace40aff
VV
3908 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3909 old_vfs);
3910
3911 adapter->pool_res.max_vfs =
3912 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 3913 adapter->num_vfs = old_vfs;
bec84e6b
VV
3914 }
3915
3916 return 0;
3917}
3918
ace40aff
VV
3919static void be_alloc_sriov_res(struct be_adapter *adapter)
3920{
3921 int old_vfs = pci_num_vf(adapter->pdev);
3922 u16 num_vf_qs;
3923 int status;
3924
3925 be_get_sriov_config(adapter);
3926
3927 if (!old_vfs)
3928 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3929
3930 /* When the HW is in SRIOV capable configuration, the PF-pool
3931 * resources are given to PF during driver load, if there are no
3932 * old VFs. This facility is not available in BE3 FW.
3933 * Also, this is done by FW in Lancer chip.
3934 */
3935 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3936 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3937 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3938 num_vf_qs);
3939 if (status)
3940 dev_err(&adapter->pdev->dev,
3941 "Failed to optimize SRIOV resources\n");
3942 }
3943}
3944
92bf14ab 3945static int be_get_resources(struct be_adapter *adapter)
abb93951 3946{
92bf14ab
SP
3947 struct device *dev = &adapter->pdev->dev;
3948 struct be_resources res = {0};
3949 int status;
abb93951 3950
92bf14ab
SP
3951 if (BEx_chip(adapter)) {
3952 BEx_get_resources(adapter, &res);
3953 adapter->res = res;
abb93951
PR
3954 }
3955
92bf14ab
SP
3956 /* For Lancer, SH etc read per-function resource limits from FW.
3957 * GET_FUNC_CONFIG returns per function guaranteed limits.
3958 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3959 */
3960 if (!BEx_chip(adapter)) {
3961 status = be_cmd_get_func_config(adapter, &res);
3962 if (status)
3963 return status;
abb93951 3964
71bb8bd0
VV
3965 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3966 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3967 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3968 res.max_rss_qs -= 1;
3969
92bf14ab
SP
3970 /* If RoCE may be enabled stash away half the EQs for RoCE */
3971 if (be_roce_supported(adapter))
3972 res.max_evt_qs /= 2;
3973 adapter->res = res;
abb93951 3974 }
4c876616 3975
71bb8bd0
VV
3976 /* If FW supports RSS default queue, then skip creating non-RSS
3977 * queue for non-IP traffic.
3978 */
3979 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3980 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3981
acbafeb1
SP
3982 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3983 be_max_txqs(adapter), be_max_rxqs(adapter),
3984 be_max_rss(adapter), be_max_eqs(adapter),
3985 be_max_vfs(adapter));
3986 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3987 be_max_uc(adapter), be_max_mc(adapter),
3988 be_max_vlans(adapter));
3989
ace40aff
VV
3990 /* Sanitize cfg_num_qs based on HW and platform limits */
3991 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3992 be_max_qs(adapter));
92bf14ab 3993 return 0;
abb93951
PR
3994}
3995
39f1d94d
SP
3996static int be_get_config(struct be_adapter *adapter)
3997{
6b085ba9 3998 int status, level;
542963b7 3999 u16 profile_id;
6b085ba9
SP
4000
4001 status = be_cmd_get_cntl_attributes(adapter);
4002 if (status)
4003 return status;
39f1d94d 4004
e97e3cda 4005 status = be_cmd_query_fw_cfg(adapter);
abb93951 4006 if (status)
92bf14ab 4007 return status;
abb93951 4008
6b085ba9
SP
4009 if (BEx_chip(adapter)) {
4010 level = be_cmd_get_fw_log_level(adapter);
4011 adapter->msg_enable =
4012 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4013 }
4014
4015 be_cmd_get_acpi_wol_cap(adapter);
4016
21252377
VV
4017 be_cmd_query_port_name(adapter);
4018
4019 if (be_physfn(adapter)) {
542963b7
VV
4020 status = be_cmd_get_active_profile(adapter, &profile_id);
4021 if (!status)
4022 dev_info(&adapter->pdev->dev,
4023 "Using profile 0x%x\n", profile_id);
962bcb75 4024 }
bec84e6b 4025
92bf14ab
SP
4026 status = be_get_resources(adapter);
4027 if (status)
4028 return status;
abb93951 4029
46ee9c14
RN
4030 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4031 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
4032 if (!adapter->pmac_id)
4033 return -ENOMEM;
abb93951 4034
92bf14ab 4035 return 0;
39f1d94d
SP
4036}
4037
95046b92
SP
4038static int be_mac_setup(struct be_adapter *adapter)
4039{
4040 u8 mac[ETH_ALEN];
4041 int status;
4042
4043 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4044 status = be_cmd_get_perm_mac(adapter, mac);
4045 if (status)
4046 return status;
4047
4048 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4049 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4050 } else {
4051 /* Maybe the HW was reset; dev_addr must be re-programmed */
4052 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4053 }
4054
2c7a9dc1
AK
4055 /* For BE3-R VFs, the PF programs the initial MAC address */
4056 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4057 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4058 &adapter->pmac_id[0], 0);
95046b92
SP
4059 return 0;
4060}
4061
68d7bdcb
SP
4062static void be_schedule_worker(struct be_adapter *adapter)
4063{
4064 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4065 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4066}
4067
eb7dd46c
SP
4068static void be_schedule_err_detection(struct be_adapter *adapter)
4069{
4070 schedule_delayed_work(&adapter->be_err_detection_work,
4071 msecs_to_jiffies(1000));
4072 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4073}
4074
7707133c 4075static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4076{
68d7bdcb 4077 struct net_device *netdev = adapter->netdev;
10ef9ab4 4078 int status;
ba343c77 4079
7707133c 4080 status = be_evt_queues_create(adapter);
abb93951
PR
4081 if (status)
4082 goto err;
73d540f2 4083
7707133c 4084 status = be_tx_qs_create(adapter);
c2bba3df
SK
4085 if (status)
4086 goto err;
10ef9ab4 4087
7707133c 4088 status = be_rx_cqs_create(adapter);
10ef9ab4 4089 if (status)
a54769f5 4090 goto err;
6b7c5b94 4091
7707133c 4092 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4093 if (status)
4094 goto err;
4095
68d7bdcb
SP
4096 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4097 if (status)
4098 goto err;
4099
4100 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4101 if (status)
4102 goto err;
4103
7707133c
SP
4104 return 0;
4105err:
4106 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4107 return status;
4108}
4109
68d7bdcb
SP
4110int be_update_queues(struct be_adapter *adapter)
4111{
4112 struct net_device *netdev = adapter->netdev;
4113 int status;
4114
4115 if (netif_running(netdev))
4116 be_close(netdev);
4117
4118 be_cancel_worker(adapter);
4119
4120 /* If any vectors have been shared with RoCE we cannot re-program
4121 * the MSIx table.
4122 */
4123 if (!adapter->num_msix_roce_vec)
4124 be_msix_disable(adapter);
4125
4126 be_clear_queues(adapter);
4127
4128 if (!msix_enabled(adapter)) {
4129 status = be_msix_enable(adapter);
4130 if (status)
4131 return status;
4132 }
4133
4134 status = be_setup_queues(adapter);
4135 if (status)
4136 return status;
4137
4138 be_schedule_worker(adapter);
4139
4140 if (netif_running(netdev))
4141 status = be_open(netdev);
4142
4143 return status;
4144}
4145
f7062ee5
SP
4146static inline int fw_major_num(const char *fw_ver)
4147{
4148 int fw_major = 0, i;
4149
4150 i = sscanf(fw_ver, "%d.", &fw_major);
4151 if (i != 1)
4152 return 0;
4153
4154 return fw_major;
4155}
4156
f962f840
SP
4157/* If any VFs are already enabled don't FLR the PF */
4158static bool be_reset_required(struct be_adapter *adapter)
4159{
4160 return pci_num_vf(adapter->pdev) ? false : true;
4161}
4162
4163/* Wait for the FW to be ready and perform the required initialization */
4164static int be_func_init(struct be_adapter *adapter)
4165{
4166 int status;
4167
4168 status = be_fw_wait_ready(adapter);
4169 if (status)
4170 return status;
4171
4172 if (be_reset_required(adapter)) {
4173 status = be_cmd_reset_function(adapter);
4174 if (status)
4175 return status;
4176
4177 /* Wait for interrupts to quiesce after an FLR */
4178 msleep(100);
4179
4180 /* We can clear all errors when function reset succeeds */
4181 be_clear_all_error(adapter);
4182 }
4183
4184 /* Tell FW we're ready to fire cmds */
4185 status = be_cmd_fw_init(adapter);
4186 if (status)
4187 return status;
4188
4189 /* Allow interrupts for other ULPs running on NIC function */
4190 be_intr_set(adapter, true);
4191
4192 return 0;
4193}
4194
7707133c
SP
4195static int be_setup(struct be_adapter *adapter)
4196{
4197 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4198 int status;
4199
f962f840
SP
4200 status = be_func_init(adapter);
4201 if (status)
4202 return status;
4203
7707133c
SP
4204 be_setup_init(adapter);
4205
4206 if (!lancer_chip(adapter))
4207 be_cmd_req_native_mode(adapter);
4208
ace40aff
VV
4209 if (!BE2_chip(adapter) && be_physfn(adapter))
4210 be_alloc_sriov_res(adapter);
4211
7707133c 4212 status = be_get_config(adapter);
10ef9ab4 4213 if (status)
a54769f5 4214 goto err;
6b7c5b94 4215
7707133c 4216 status = be_msix_enable(adapter);
10ef9ab4 4217 if (status)
a54769f5 4218 goto err;
6b7c5b94 4219
0700d816
KA
4220 status = be_if_create(adapter, &adapter->if_handle,
4221 be_if_cap_flags(adapter), 0);
7707133c 4222 if (status)
a54769f5 4223 goto err;
6b7c5b94 4224
68d7bdcb
SP
4225 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4226 rtnl_lock();
7707133c 4227 status = be_setup_queues(adapter);
68d7bdcb 4228 rtnl_unlock();
95046b92 4229 if (status)
1578e777
PR
4230 goto err;
4231
7707133c 4232 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4233
4234 status = be_mac_setup(adapter);
10ef9ab4
SP
4235 if (status)
4236 goto err;
4237
e97e3cda 4238 be_cmd_get_fw_ver(adapter);
acbafeb1 4239 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4240
e9e2a904 4241 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4242 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4243 adapter->fw_ver);
4244 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4245 }
4246
1d1e9a46 4247 if (adapter->vlans_added)
10329df8 4248 be_vid_config(adapter);
7ab8b0b4 4249
a54769f5 4250 be_set_rx_mode(adapter->netdev);
5fb379ee 4251
00d594c3
KA
4252 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4253 adapter->rx_fc);
4254 if (status)
4255 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4256 &adapter->rx_fc);
590c391d 4257
00d594c3
KA
4258 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4259 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4260
bdce2ad7
SR
4261 if (be_physfn(adapter))
4262 be_cmd_set_logical_link_config(adapter,
4263 IFLA_VF_LINK_STATE_AUTO, 0);
4264
bec84e6b
VV
4265 if (adapter->num_vfs)
4266 be_vf_setup(adapter);
f9449ab7 4267
f25b119c
PR
4268 status = be_cmd_get_phy_info(adapter);
4269 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4270 adapter->phy.fc_autoneg = 1;
4271
68d7bdcb 4272 be_schedule_worker(adapter);
e1ad8e33 4273 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4274 return 0;
a54769f5
SP
4275err:
4276 be_clear(adapter);
4277 return status;
4278}
6b7c5b94 4279
66268739
IV
4280#ifdef CONFIG_NET_POLL_CONTROLLER
4281static void be_netpoll(struct net_device *netdev)
4282{
4283 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4284 struct be_eq_obj *eqo;
66268739
IV
4285 int i;
4286
e49cc34f 4287 for_all_evt_queues(adapter, eqo, i) {
20947770 4288 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4289 napi_schedule(&eqo->napi);
4290 }
66268739
IV
4291}
4292#endif
4293
96c9b2e4 4294static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4295
306f1348
SP
4296static bool phy_flashing_required(struct be_adapter *adapter)
4297{
e02cfd96 4298 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4299 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4300}
4301
c165541e
PR
4302static bool is_comp_in_ufi(struct be_adapter *adapter,
4303 struct flash_section_info *fsec, int type)
4304{
4305 int i = 0, img_type = 0;
4306 struct flash_section_info_g2 *fsec_g2 = NULL;
4307
ca34fe38 4308 if (BE2_chip(adapter))
c165541e
PR
4309 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4310
4311 for (i = 0; i < MAX_FLASH_COMP; i++) {
4312 if (fsec_g2)
4313 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4314 else
4315 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4316
4317 if (img_type == type)
4318 return true;
4319 }
4320 return false;
4321
4322}
4323
4188e7df 4324static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4325 int header_size,
4326 const struct firmware *fw)
c165541e
PR
4327{
4328 struct flash_section_info *fsec = NULL;
4329 const u8 *p = fw->data;
4330
4331 p += header_size;
4332 while (p < (fw->data + fw->size)) {
4333 fsec = (struct flash_section_info *)p;
4334 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4335 return fsec;
4336 p += 32;
4337 }
4338 return NULL;
4339}
4340
96c9b2e4
VV
4341static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4342 u32 img_offset, u32 img_size, int hdr_size,
4343 u16 img_optype, bool *crc_match)
4344{
4345 u32 crc_offset;
4346 int status;
4347 u8 crc[4];
4348
70a7b525
VV
4349 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4350 img_size - 4);
96c9b2e4
VV
4351 if (status)
4352 return status;
4353
4354 crc_offset = hdr_size + img_offset + img_size - 4;
4355
4356 /* Skip flashing, if crc of flashed region matches */
4357 if (!memcmp(crc, p + crc_offset, 4))
4358 *crc_match = true;
4359 else
4360 *crc_match = false;
4361
4362 return status;
4363}
4364
773a2d7c 4365static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4366 struct be_dma_mem *flash_cmd, int optype, int img_size,
4367 u32 img_offset)
773a2d7c 4368{
70a7b525 4369 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4370 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4371 int status;
773a2d7c 4372
773a2d7c
PR
4373 while (total_bytes) {
4374 num_bytes = min_t(u32, 32*1024, total_bytes);
4375
4376 total_bytes -= num_bytes;
4377
4378 if (!total_bytes) {
4379 if (optype == OPTYPE_PHY_FW)
4380 flash_op = FLASHROM_OPER_PHY_FLASH;
4381 else
4382 flash_op = FLASHROM_OPER_FLASH;
4383 } else {
4384 if (optype == OPTYPE_PHY_FW)
4385 flash_op = FLASHROM_OPER_PHY_SAVE;
4386 else
4387 flash_op = FLASHROM_OPER_SAVE;
4388 }
4389
be716446 4390 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4391 img += num_bytes;
4392 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4393 flash_op, img_offset +
4394 bytes_sent, num_bytes);
4c60005f 4395 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4396 optype == OPTYPE_PHY_FW)
4397 break;
4398 else if (status)
773a2d7c 4399 return status;
70a7b525
VV
4400
4401 bytes_sent += num_bytes;
773a2d7c
PR
4402 }
4403 return 0;
4404}
4405
0ad3157e 4406/* For BE2, BE3 and BE3-R */
ca34fe38 4407static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4408 const struct firmware *fw,
4409 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4410{
c165541e 4411 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4412 struct device *dev = &adapter->pdev->dev;
c165541e 4413 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4414 int status, i, filehdr_size, num_comp;
4415 const struct flash_comp *pflashcomp;
4416 bool crc_match;
4417 const u8 *p;
c165541e
PR
4418
4419 struct flash_comp gen3_flash_types[] = {
4420 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4421 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4422 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4423 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4424 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4425 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4426 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4427 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4428 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4429 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4430 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4431 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4432 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4433 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4434 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4435 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4436 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4437 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4438 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4439 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4440 };
c165541e
PR
4441
4442 struct flash_comp gen2_flash_types[] = {
4443 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4444 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4445 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4446 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4447 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4448 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4449 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4450 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4451 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4452 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4453 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4454 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4455 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4456 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4457 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4458 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4459 };
4460
ca34fe38 4461 if (BE3_chip(adapter)) {
3f0d4560
AK
4462 pflashcomp = gen3_flash_types;
4463 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4464 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4465 } else {
4466 pflashcomp = gen2_flash_types;
4467 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4468 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4469 img_hdrs_size = 0;
84517482 4470 }
ca34fe38 4471
c165541e
PR
4472 /* Get flash section info*/
4473 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4474 if (!fsec) {
96c9b2e4 4475 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4476 return -1;
4477 }
9fe96934 4478 for (i = 0; i < num_comp; i++) {
c165541e 4479 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4480 continue;
c165541e
PR
4481
4482 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4483 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4484 continue;
4485
773a2d7c
PR
4486 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4487 !phy_flashing_required(adapter))
306f1348 4488 continue;
c165541e 4489
773a2d7c 4490 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4491 status = be_check_flash_crc(adapter, fw->data,
4492 pflashcomp[i].offset,
4493 pflashcomp[i].size,
4494 filehdr_size +
4495 img_hdrs_size,
4496 OPTYPE_REDBOOT, &crc_match);
4497 if (status) {
4498 dev_err(dev,
4499 "Could not get CRC for 0x%x region\n",
4500 pflashcomp[i].optype);
4501 continue;
4502 }
4503
4504 if (crc_match)
773a2d7c
PR
4505 continue;
4506 }
c165541e 4507
96c9b2e4
VV
4508 p = fw->data + filehdr_size + pflashcomp[i].offset +
4509 img_hdrs_size;
306f1348
SP
4510 if (p + pflashcomp[i].size > fw->data + fw->size)
4511 return -1;
773a2d7c
PR
4512
4513 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4514 pflashcomp[i].size, 0);
773a2d7c 4515 if (status) {
96c9b2e4 4516 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4517 pflashcomp[i].img_type);
4518 return status;
84517482 4519 }
84517482 4520 }
84517482
AK
4521 return 0;
4522}
4523
96c9b2e4
VV
4524static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4525{
4526 u32 img_type = le32_to_cpu(fsec_entry.type);
4527 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4528
4529 if (img_optype != 0xFFFF)
4530 return img_optype;
4531
4532 switch (img_type) {
4533 case IMAGE_FIRMWARE_iSCSI:
4534 img_optype = OPTYPE_ISCSI_ACTIVE;
4535 break;
4536 case IMAGE_BOOT_CODE:
4537 img_optype = OPTYPE_REDBOOT;
4538 break;
4539 case IMAGE_OPTION_ROM_ISCSI:
4540 img_optype = OPTYPE_BIOS;
4541 break;
4542 case IMAGE_OPTION_ROM_PXE:
4543 img_optype = OPTYPE_PXE_BIOS;
4544 break;
4545 case IMAGE_OPTION_ROM_FCoE:
4546 img_optype = OPTYPE_FCOE_BIOS;
4547 break;
4548 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4549 img_optype = OPTYPE_ISCSI_BACKUP;
4550 break;
4551 case IMAGE_NCSI:
4552 img_optype = OPTYPE_NCSI_FW;
4553 break;
4554 case IMAGE_FLASHISM_JUMPVECTOR:
4555 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4556 break;
4557 case IMAGE_FIRMWARE_PHY:
4558 img_optype = OPTYPE_SH_PHY_FW;
4559 break;
4560 case IMAGE_REDBOOT_DIR:
4561 img_optype = OPTYPE_REDBOOT_DIR;
4562 break;
4563 case IMAGE_REDBOOT_CONFIG:
4564 img_optype = OPTYPE_REDBOOT_CONFIG;
4565 break;
4566 case IMAGE_UFI_DIR:
4567 img_optype = OPTYPE_UFI_DIR;
4568 break;
4569 default:
4570 break;
4571 }
4572
4573 return img_optype;
4574}
4575
773a2d7c 4576static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4577 const struct firmware *fw,
4578 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4579{
773a2d7c 4580 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4581 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4582 struct device *dev = &adapter->pdev->dev;
773a2d7c 4583 struct flash_section_info *fsec = NULL;
96c9b2e4 4584 u32 img_offset, img_size, img_type;
70a7b525 4585 u16 img_optype, flash_optype;
96c9b2e4 4586 int status, i, filehdr_size;
96c9b2e4 4587 const u8 *p;
773a2d7c
PR
4588
4589 filehdr_size = sizeof(struct flash_file_hdr_g3);
4590 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4591 if (!fsec) {
96c9b2e4 4592 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4593 return -EINVAL;
773a2d7c
PR
4594 }
4595
70a7b525 4596retry_flash:
773a2d7c
PR
4597 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4598 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4599 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4600 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4601 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4602 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4603
96c9b2e4 4604 if (img_optype == 0xFFFF)
773a2d7c 4605 continue;
70a7b525
VV
4606
4607 if (flash_offset_support)
4608 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4609 else
4610 flash_optype = img_optype;
4611
96c9b2e4
VV
4612 /* Don't bother verifying CRC if an old FW image is being
4613 * flashed
4614 */
4615 if (old_fw_img)
4616 goto flash;
4617
4618 status = be_check_flash_crc(adapter, fw->data, img_offset,
4619 img_size, filehdr_size +
70a7b525 4620 img_hdrs_size, flash_optype,
96c9b2e4 4621 &crc_match);
4c60005f
KA
4622 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4623 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4624 /* The current FW image on the card does not support
4625 * OFFSET based flashing. Retry using older mechanism
4626 * of OPTYPE based flashing
4627 */
4628 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4629 flash_offset_support = false;
4630 goto retry_flash;
4631 }
4632
4633 /* The current FW image on the card does not recognize
4634 * the new FLASH op_type. The FW download is partially
4635 * complete. Reboot the server now to enable FW image
4636 * to recognize the new FLASH op_type. To complete the
4637 * remaining process, download the same FW again after
4638 * the reboot.
4639 */
96c9b2e4
VV
4640 dev_err(dev, "Flash incomplete. Reset the server\n");
4641 dev_err(dev, "Download FW image again after reset\n");
4642 return -EAGAIN;
4643 } else if (status) {
4644 dev_err(dev, "Could not get CRC for 0x%x region\n",
4645 img_optype);
4646 return -EFAULT;
773a2d7c
PR
4647 }
4648
96c9b2e4
VV
4649 if (crc_match)
4650 continue;
773a2d7c 4651
96c9b2e4
VV
4652flash:
4653 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4654 if (p + img_size > fw->data + fw->size)
4655 return -1;
4656
70a7b525
VV
4657 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4658 img_offset);
4659
4660 /* The current FW image on the card does not support OFFSET
4661 * based flashing. Retry using older mechanism of OPTYPE based
4662 * flashing
4663 */
4664 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4665 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4666 flash_offset_support = false;
4667 goto retry_flash;
4668 }
4669
96c9b2e4
VV
4670 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4671 * UFI_DIR region
4672 */
4c60005f
KA
4673 if (old_fw_img &&
4674 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4675 (img_optype == OPTYPE_UFI_DIR &&
4676 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4677 continue;
4678 } else if (status) {
4679 dev_err(dev, "Flashing section type 0x%x failed\n",
4680 img_type);
4681 return -EFAULT;
773a2d7c
PR
4682 }
4683 }
4684 return 0;
3f0d4560
AK
4685}
4686
485bf569 4687static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4688 const struct firmware *fw)
84517482 4689{
485bf569
SN
4690#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4691#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4692 struct device *dev = &adapter->pdev->dev;
84517482 4693 struct be_dma_mem flash_cmd;
485bf569
SN
4694 const u8 *data_ptr = NULL;
4695 u8 *dest_image_ptr = NULL;
4696 size_t image_size = 0;
4697 u32 chunk_size = 0;
4698 u32 data_written = 0;
4699 u32 offset = 0;
4700 int status = 0;
4701 u8 add_status = 0;
f67ef7ba 4702 u8 change_status;
84517482 4703
485bf569 4704 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4705 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4706 return -EINVAL;
d9efd2af
SB
4707 }
4708
485bf569
SN
4709 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4710 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4711 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4712 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4713 if (!flash_cmd.va)
4714 return -ENOMEM;
84517482 4715
485bf569
SN
4716 dest_image_ptr = flash_cmd.va +
4717 sizeof(struct lancer_cmd_req_write_object);
4718 image_size = fw->size;
4719 data_ptr = fw->data;
4720
4721 while (image_size) {
4722 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4723
4724 /* Copy the image chunk content. */
4725 memcpy(dest_image_ptr, data_ptr, chunk_size);
4726
4727 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4728 chunk_size, offset,
4729 LANCER_FW_DOWNLOAD_LOCATION,
4730 &data_written, &change_status,
4731 &add_status);
485bf569
SN
4732 if (status)
4733 break;
4734
4735 offset += data_written;
4736 data_ptr += data_written;
4737 image_size -= data_written;
4738 }
4739
4740 if (!status) {
4741 /* Commit the FW written */
4742 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4743 0, offset,
4744 LANCER_FW_DOWNLOAD_LOCATION,
4745 &data_written, &change_status,
4746 &add_status);
485bf569
SN
4747 }
4748
bb864e07 4749 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4750 if (status) {
bb864e07 4751 dev_err(dev, "Firmware load error\n");
3fb8cb80 4752 return be_cmd_status(status);
485bf569
SN
4753 }
4754
bb864e07
KA
4755 dev_info(dev, "Firmware flashed successfully\n");
4756
f67ef7ba 4757 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4758 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4759 status = lancer_physdev_ctrl(adapter,
4760 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4761 if (status) {
bb864e07
KA
4762 dev_err(dev, "Adapter busy, could not reset FW\n");
4763 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4764 }
4765 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4766 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4767 }
3fb8cb80
KA
4768
4769 return 0;
485bf569
SN
4770}
4771
5d3acd0d
VV
4772#define BE2_UFI 2
4773#define BE3_UFI 3
4774#define BE3R_UFI 10
4775#define SH_UFI 4
81a9e226 4776#define SH_P2_UFI 11
5d3acd0d 4777
ca34fe38 4778static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4779 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4780{
5d3acd0d
VV
4781 if (!fhdr) {
4782 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4783 return -1;
4784 }
773a2d7c 4785
5d3acd0d
VV
4786 /* First letter of the build version is used to identify
4787 * which chip this image file is meant for.
4788 */
4789 switch (fhdr->build[0]) {
4790 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4791 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4792 SH_UFI;
5d3acd0d
VV
4793 case BLD_STR_UFI_TYPE_BE3:
4794 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4795 BE3_UFI;
4796 case BLD_STR_UFI_TYPE_BE2:
4797 return BE2_UFI;
4798 default:
4799 return -1;
4800 }
4801}
773a2d7c 4802
5d3acd0d
VV
4803/* Check if the flash image file is compatible with the adapter that
4804 * is being flashed.
4805 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4806 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4807 */
4808static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4809 struct flash_file_hdr_g3 *fhdr)
4810{
4811 int ufi_type = be_get_ufi_type(adapter, fhdr);
4812
4813 switch (ufi_type) {
81a9e226 4814 case SH_P2_UFI:
5d3acd0d 4815 return skyhawk_chip(adapter);
81a9e226
VV
4816 case SH_UFI:
4817 return (skyhawk_chip(adapter) &&
4818 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4819 case BE3R_UFI:
4820 return BE3_chip(adapter);
4821 case BE3_UFI:
4822 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4823 case BE2_UFI:
4824 return BE2_chip(adapter);
4825 default:
4826 return false;
4827 }
773a2d7c
PR
4828}
4829
485bf569
SN
4830static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4831{
5d3acd0d 4832 struct device *dev = &adapter->pdev->dev;
485bf569 4833 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4834 struct image_hdr *img_hdr_ptr;
4835 int status = 0, i, num_imgs;
485bf569 4836 struct be_dma_mem flash_cmd;
84517482 4837
5d3acd0d
VV
4838 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4839 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4840 dev_err(dev, "Flash image is not compatible with adapter\n");
4841 return -EINVAL;
84517482
AK
4842 }
4843
5d3acd0d
VV
4844 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4845 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4846 GFP_KERNEL);
4847 if (!flash_cmd.va)
4848 return -ENOMEM;
773a2d7c 4849
773a2d7c
PR
4850 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4851 for (i = 0; i < num_imgs; i++) {
4852 img_hdr_ptr = (struct image_hdr *)(fw->data +
4853 (sizeof(struct flash_file_hdr_g3) +
4854 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4855 if (!BE2_chip(adapter) &&
4856 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4857 continue;
84517482 4858
5d3acd0d
VV
4859 if (skyhawk_chip(adapter))
4860 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4861 num_imgs);
4862 else
4863 status = be_flash_BEx(adapter, fw, &flash_cmd,
4864 num_imgs);
84517482
AK
4865 }
4866
5d3acd0d
VV
4867 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4868 if (!status)
4869 dev_info(dev, "Firmware flashed successfully\n");
84517482 4870
485bf569
SN
4871 return status;
4872}
4873
4874int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4875{
4876 const struct firmware *fw;
4877 int status;
4878
4879 if (!netif_running(adapter->netdev)) {
4880 dev_err(&adapter->pdev->dev,
4881 "Firmware load not allowed (interface is down)\n");
940a3fcd 4882 return -ENETDOWN;
485bf569
SN
4883 }
4884
4885 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4886 if (status)
4887 goto fw_exit;
4888
4889 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4890
4891 if (lancer_chip(adapter))
4892 status = lancer_fw_download(adapter, fw);
4893 else
4894 status = be_fw_download(adapter, fw);
4895
eeb65ced 4896 if (!status)
e97e3cda 4897 be_cmd_get_fw_ver(adapter);
eeb65ced 4898
84517482
AK
4899fw_exit:
4900 release_firmware(fw);
4901 return status;
4902}
4903
add511b3
RP
4904static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4905 u16 flags)
a77dcb8c
AK
4906{
4907 struct be_adapter *adapter = netdev_priv(dev);
4908 struct nlattr *attr, *br_spec;
4909 int rem;
4910 int status = 0;
4911 u16 mode = 0;
4912
4913 if (!sriov_enabled(adapter))
4914 return -EOPNOTSUPP;
4915
4916 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4917 if (!br_spec)
4918 return -EINVAL;
a77dcb8c
AK
4919
4920 nla_for_each_nested(attr, br_spec, rem) {
4921 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4922 continue;
4923
b7c1a314
TG
4924 if (nla_len(attr) < sizeof(mode))
4925 return -EINVAL;
4926
a77dcb8c
AK
4927 mode = nla_get_u16(attr);
4928 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4929 return -EINVAL;
4930
4931 status = be_cmd_set_hsw_config(adapter, 0, 0,
4932 adapter->if_handle,
4933 mode == BRIDGE_MODE_VEPA ?
4934 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4935 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4936 if (status)
4937 goto err;
4938
4939 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4940 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4941
4942 return status;
4943 }
4944err:
4945 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4946 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4947
4948 return status;
4949}
4950
4951static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4952 struct net_device *dev, u32 filter_mask,
4953 int nlflags)
a77dcb8c
AK
4954{
4955 struct be_adapter *adapter = netdev_priv(dev);
4956 int status = 0;
4957 u8 hsw_mode;
4958
4959 if (!sriov_enabled(adapter))
4960 return 0;
4961
4962 /* BE and Lancer chips support VEB mode only */
4963 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4964 hsw_mode = PORT_FWD_TYPE_VEB;
4965 } else {
4966 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4967 adapter->if_handle, &hsw_mode,
4968 NULL);
a77dcb8c
AK
4969 if (status)
4970 return 0;
4971 }
4972
4973 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4974 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4975 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
46c264da 4976 0, 0, nlflags);
a77dcb8c
AK
4977}
4978
c5abe7c0 4979#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4980/* VxLAN offload Notes:
4981 *
4982 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4983 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4984 * is expected to work across all types of IP tunnels once exported. Skyhawk
4985 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4986 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4987 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4988 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4989 *
4990 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4991 * adds more than one port, disable offloads and don't re-enable them again
4992 * until after all the tunnels are removed.
4993 */
c9c47142
SP
4994static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4995 __be16 port)
4996{
4997 struct be_adapter *adapter = netdev_priv(netdev);
4998 struct device *dev = &adapter->pdev->dev;
4999 int status;
5000
5001 if (lancer_chip(adapter) || BEx_chip(adapter))
5002 return;
5003
5004 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
5005 dev_info(dev,
5006 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
5007 dev_info(dev, "Disabling VxLAN offloads\n");
5008 adapter->vxlan_port_count++;
5009 goto err;
c9c47142
SP
5010 }
5011
630f4b70
SB
5012 if (adapter->vxlan_port_count++ >= 1)
5013 return;
5014
c9c47142
SP
5015 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5016 OP_CONVERT_NORMAL_TO_TUNNEL);
5017 if (status) {
5018 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5019 goto err;
5020 }
5021
5022 status = be_cmd_set_vxlan_port(adapter, port);
5023 if (status) {
5024 dev_warn(dev, "Failed to add VxLAN port\n");
5025 goto err;
5026 }
5027 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5028 adapter->vxlan_port = port;
5029
630f4b70
SB
5030 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5031 NETIF_F_TSO | NETIF_F_TSO6 |
5032 NETIF_F_GSO_UDP_TUNNEL;
5033 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 5034 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 5035
c9c47142
SP
5036 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5037 be16_to_cpu(port));
5038 return;
5039err:
5040 be_disable_vxlan_offloads(adapter);
c9c47142
SP
5041}
5042
5043static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5044 __be16 port)
5045{
5046 struct be_adapter *adapter = netdev_priv(netdev);
5047
5048 if (lancer_chip(adapter) || BEx_chip(adapter))
5049 return;
5050
5051 if (adapter->vxlan_port != port)
630f4b70 5052 goto done;
c9c47142
SP
5053
5054 be_disable_vxlan_offloads(adapter);
5055
5056 dev_info(&adapter->pdev->dev,
5057 "Disabled VxLAN offloads for UDP port %d\n",
5058 be16_to_cpu(port));
630f4b70
SB
5059done:
5060 adapter->vxlan_port_count--;
c9c47142 5061}
725d548f 5062
5f35227e
JG
5063static netdev_features_t be_features_check(struct sk_buff *skb,
5064 struct net_device *dev,
5065 netdev_features_t features)
725d548f 5066{
16dde0d6
SB
5067 struct be_adapter *adapter = netdev_priv(dev);
5068 u8 l4_hdr = 0;
5069
5070 /* The code below restricts offload features for some tunneled packets.
5071 * Offload features for normal (non tunnel) packets are unchanged.
5072 */
5073 if (!skb->encapsulation ||
5074 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5075 return features;
5076
5077 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5078 * should disable tunnel offload features if it's not a VxLAN packet,
5079 * as tunnel offloads have been enabled only for VxLAN. This is done to
5080 * allow other tunneled traffic like GRE work fine while VxLAN
5081 * offloads are configured in Skyhawk-R.
5082 */
5083 switch (vlan_get_protocol(skb)) {
5084 case htons(ETH_P_IP):
5085 l4_hdr = ip_hdr(skb)->protocol;
5086 break;
5087 case htons(ETH_P_IPV6):
5088 l4_hdr = ipv6_hdr(skb)->nexthdr;
5089 break;
5090 default:
5091 return features;
5092 }
5093
5094 if (l4_hdr != IPPROTO_UDP ||
5095 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5096 skb->inner_protocol != htons(ETH_P_TEB) ||
5097 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5098 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5099 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5100
5101 return features;
725d548f 5102}
c5abe7c0 5103#endif
c9c47142 5104
e5686ad8 5105static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5106 .ndo_open = be_open,
5107 .ndo_stop = be_close,
5108 .ndo_start_xmit = be_xmit,
a54769f5 5109 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5110 .ndo_set_mac_address = be_mac_addr_set,
5111 .ndo_change_mtu = be_change_mtu,
ab1594e9 5112 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5113 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5114 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5115 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5116 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5117 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5118 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5119 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5120 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5121 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5122#ifdef CONFIG_NET_POLL_CONTROLLER
5123 .ndo_poll_controller = be_netpoll,
5124#endif
a77dcb8c
AK
5125 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5126 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5127#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5128 .ndo_busy_poll = be_busy_poll,
6384a4d0 5129#endif
c5abe7c0 5130#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
5131 .ndo_add_vxlan_port = be_add_vxlan_port,
5132 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 5133 .ndo_features_check = be_features_check,
c5abe7c0 5134#endif
6b7c5b94
SP
5135};
5136
5137static void be_netdev_init(struct net_device *netdev)
5138{
5139 struct be_adapter *adapter = netdev_priv(netdev);
5140
6332c8d3 5141 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5142 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5143 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
5144 if (be_multi_rxq(adapter))
5145 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5146
5147 netdev->features |= netdev->hw_features |
f646968f 5148 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5149
eb8a50d9 5150 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5151 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5152
fbc13f01
AK
5153 netdev->priv_flags |= IFF_UNICAST_FLT;
5154
6b7c5b94
SP
5155 netdev->flags |= IFF_MULTICAST;
5156
b7e5887e 5157 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 5158
10ef9ab4 5159 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5160
7ad24ea4 5161 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5162}
5163
87ac1a52
KA
5164static void be_cleanup(struct be_adapter *adapter)
5165{
5166 struct net_device *netdev = adapter->netdev;
5167
5168 rtnl_lock();
5169 netif_device_detach(netdev);
5170 if (netif_running(netdev))
5171 be_close(netdev);
5172 rtnl_unlock();
5173
5174 be_clear(adapter);
5175}
5176
484d76fd 5177static int be_resume(struct be_adapter *adapter)
78fad34e 5178{
d0e1b319 5179 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5180 int status;
5181
78fad34e
SP
5182 status = be_setup(adapter);
5183 if (status)
484d76fd 5184 return status;
78fad34e 5185
d0e1b319
KA
5186 if (netif_running(netdev)) {
5187 status = be_open(netdev);
78fad34e 5188 if (status)
484d76fd 5189 return status;
78fad34e
SP
5190 }
5191
d0e1b319
KA
5192 netif_device_attach(netdev);
5193
484d76fd
KA
5194 return 0;
5195}
5196
5197static int be_err_recover(struct be_adapter *adapter)
5198{
5199 struct device *dev = &adapter->pdev->dev;
5200 int status;
5201
5202 status = be_resume(adapter);
5203 if (status)
5204 goto err;
5205
9fa465c0 5206 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5207 return 0;
5208err:
9fa465c0 5209 if (be_physfn(adapter))
78fad34e 5210 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5211 else
5212 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5213
5214 return status;
5215}
5216
eb7dd46c 5217static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5218{
5219 struct be_adapter *adapter =
eb7dd46c
SP
5220 container_of(work, struct be_adapter,
5221 be_err_detection_work.work);
78fad34e
SP
5222 int status = 0;
5223
5224 be_detect_error(adapter);
5225
d0e1b319 5226 if (adapter->hw_error) {
87ac1a52 5227 be_cleanup(adapter);
d0e1b319
KA
5228
5229 /* As of now error recovery support is in Lancer only */
5230 if (lancer_chip(adapter))
5231 status = be_err_recover(adapter);
78fad34e
SP
5232 }
5233
9fa465c0
SP
5234 /* Always attempt recovery on VFs */
5235 if (!status || be_virtfn(adapter))
eb7dd46c 5236 be_schedule_err_detection(adapter);
78fad34e
SP
5237}
5238
5239static void be_log_sfp_info(struct be_adapter *adapter)
5240{
5241 int status;
5242
5243 status = be_cmd_query_sfp_info(adapter);
5244 if (!status) {
5245 dev_err(&adapter->pdev->dev,
5246 "Unqualified SFP+ detected on %c from %s part no: %s",
5247 adapter->port_name, adapter->phy.vendor_name,
5248 adapter->phy.vendor_pn);
5249 }
5250 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5251}
5252
5253static void be_worker(struct work_struct *work)
5254{
5255 struct be_adapter *adapter =
5256 container_of(work, struct be_adapter, work.work);
5257 struct be_rx_obj *rxo;
5258 int i;
5259
5260 /* when interrupts are not yet enabled, just reap any pending
5261 * mcc completions
5262 */
5263 if (!netif_running(adapter->netdev)) {
5264 local_bh_disable();
5265 be_process_mcc(adapter);
5266 local_bh_enable();
5267 goto reschedule;
5268 }
5269
5270 if (!adapter->stats_cmd_sent) {
5271 if (lancer_chip(adapter))
5272 lancer_cmd_get_pport_stats(adapter,
5273 &adapter->stats_cmd);
5274 else
5275 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5276 }
5277
5278 if (be_physfn(adapter) &&
5279 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5280 be_cmd_get_die_temperature(adapter);
5281
5282 for_all_rx_queues(adapter, rxo, i) {
5283 /* Replenish RX-queues starved due to memory
5284 * allocation failures.
5285 */
5286 if (rxo->rx_post_starved)
5287 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5288 }
5289
20947770
PR
5290 /* EQ-delay update for Skyhawk is done while notifying EQ */
5291 if (!skyhawk_chip(adapter))
5292 be_eqd_update(adapter, false);
78fad34e
SP
5293
5294 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5295 be_log_sfp_info(adapter);
5296
5297reschedule:
5298 adapter->work_counter++;
5299 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5300}
5301
6b7c5b94
SP
5302static void be_unmap_pci_bars(struct be_adapter *adapter)
5303{
c5b3ad4c
SP
5304 if (adapter->csr)
5305 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5306 if (adapter->db)
ce66f781 5307 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5308}
5309
ce66f781
SP
5310static int db_bar(struct be_adapter *adapter)
5311{
5312 if (lancer_chip(adapter) || !be_physfn(adapter))
5313 return 0;
5314 else
5315 return 4;
5316}
5317
5318static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5319{
dbf0f2a7 5320 if (skyhawk_chip(adapter)) {
ce66f781
SP
5321 adapter->roce_db.size = 4096;
5322 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5323 db_bar(adapter));
5324 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5325 db_bar(adapter));
5326 }
045508a8 5327 return 0;
6b7c5b94
SP
5328}
5329
5330static int be_map_pci_bars(struct be_adapter *adapter)
5331{
0fa74a4b 5332 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5333 u8 __iomem *addr;
78fad34e
SP
5334 u32 sli_intf;
5335
5336 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5337 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5338 SLI_INTF_FAMILY_SHIFT;
5339 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5340
c5b3ad4c 5341 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5342 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5343 if (!adapter->csr)
c5b3ad4c
SP
5344 return -ENOMEM;
5345 }
5346
25848c90 5347 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5348 if (!addr)
6b7c5b94 5349 goto pci_map_err;
ba343c77 5350 adapter->db = addr;
ce66f781 5351
25848c90
SR
5352 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5353 if (be_physfn(adapter)) {
5354 /* PCICFG is the 2nd BAR in BE2 */
5355 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5356 if (!addr)
5357 goto pci_map_err;
5358 adapter->pcicfg = addr;
5359 } else {
5360 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5361 }
5362 }
5363
ce66f781 5364 be_roce_map_pci_bars(adapter);
6b7c5b94 5365 return 0;
ce66f781 5366
6b7c5b94 5367pci_map_err:
25848c90 5368 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5369 be_unmap_pci_bars(adapter);
5370 return -ENOMEM;
5371}
5372
78fad34e 5373static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5374{
8788fdc2 5375 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5376 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5377
5378 if (mem->va)
78fad34e 5379 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5380
5b8821b7 5381 mem = &adapter->rx_filter;
e7b909a6 5382 if (mem->va)
78fad34e
SP
5383 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5384
5385 mem = &adapter->stats_cmd;
5386 if (mem->va)
5387 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5388}
5389
78fad34e
SP
5390/* Allocate and initialize various fields in be_adapter struct */
5391static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5392{
8788fdc2
SP
5393 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5394 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5395 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5396 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5397 struct device *dev = &adapter->pdev->dev;
5398 int status = 0;
6b7c5b94
SP
5399
5400 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
78fad34e 5401 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
2b7bcebf
IV
5402 &mbox_mem_alloc->dma,
5403 GFP_KERNEL);
78fad34e
SP
5404 if (!mbox_mem_alloc->va)
5405 return -ENOMEM;
5406
6b7c5b94
SP
5407 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5408 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5409 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5410 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 5411
5b8821b7 5412 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5413 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5414 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5415 if (!rx_filter->va) {
e7b909a6
SP
5416 status = -ENOMEM;
5417 goto free_mbox;
5418 }
1f9061d2 5419
78fad34e
SP
5420 if (lancer_chip(adapter))
5421 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5422 else if (BE2_chip(adapter))
5423 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5424 else if (BE3_chip(adapter))
5425 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5426 else
5427 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5428 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5429 &stats_cmd->dma, GFP_KERNEL);
5430 if (!stats_cmd->va) {
5431 status = -ENOMEM;
5432 goto free_rx_filter;
5433 }
5434
2984961c 5435 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5436 spin_lock_init(&adapter->mcc_lock);
5437 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5438 init_completion(&adapter->et_cmd_compl);
e7b909a6 5439
78fad34e 5440 pci_save_state(adapter->pdev);
6b7c5b94 5441
78fad34e 5442 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5443 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5444 be_err_detection_task);
6b7c5b94 5445
78fad34e
SP
5446 adapter->rx_fc = true;
5447 adapter->tx_fc = true;
6b7c5b94 5448
78fad34e
SP
5449 /* Must be a power of 2 or else MODULO will BUG_ON */
5450 adapter->be_get_temp_freq = 64;
ca34fe38 5451
6b7c5b94 5452 return 0;
78fad34e
SP
5453
5454free_rx_filter:
5455 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5456free_mbox:
5457 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5458 mbox_mem_alloc->dma);
5459 return status;
6b7c5b94
SP
5460}
5461
3bc6b06c 5462static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5463{
5464 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5465
6b7c5b94
SP
5466 if (!adapter)
5467 return;
5468
045508a8 5469 be_roce_dev_remove(adapter);
8cef7a78 5470 be_intr_set(adapter, false);
045508a8 5471
eb7dd46c 5472 be_cancel_err_detection(adapter);
f67ef7ba 5473
6b7c5b94
SP
5474 unregister_netdev(adapter->netdev);
5475
5fb379ee
SP
5476 be_clear(adapter);
5477
bf99e50d
PR
5478 /* tell fw we're done with firing cmds */
5479 be_cmd_fw_clean(adapter);
5480
78fad34e
SP
5481 be_unmap_pci_bars(adapter);
5482 be_drv_cleanup(adapter);
6b7c5b94 5483
d6b6d987
SP
5484 pci_disable_pcie_error_reporting(pdev);
5485
6b7c5b94
SP
5486 pci_release_regions(pdev);
5487 pci_disable_device(pdev);
5488
5489 free_netdev(adapter->netdev);
5490}
5491
d379142b
SP
5492static char *mc_name(struct be_adapter *adapter)
5493{
f93f160b
VV
5494 char *str = ""; /* default */
5495
5496 switch (adapter->mc_type) {
5497 case UMC:
5498 str = "UMC";
5499 break;
5500 case FLEX10:
5501 str = "FLEX10";
5502 break;
5503 case vNIC1:
5504 str = "vNIC-1";
5505 break;
5506 case nPAR:
5507 str = "nPAR";
5508 break;
5509 case UFP:
5510 str = "UFP";
5511 break;
5512 case vNIC2:
5513 str = "vNIC-2";
5514 break;
5515 default:
5516 str = "";
5517 }
5518
5519 return str;
d379142b
SP
5520}
5521
5522static inline char *func_name(struct be_adapter *adapter)
5523{
5524 return be_physfn(adapter) ? "PF" : "VF";
5525}
5526
f7062ee5
SP
5527static inline char *nic_name(struct pci_dev *pdev)
5528{
5529 switch (pdev->device) {
5530 case OC_DEVICE_ID1:
5531 return OC_NAME;
5532 case OC_DEVICE_ID2:
5533 return OC_NAME_BE;
5534 case OC_DEVICE_ID3:
5535 case OC_DEVICE_ID4:
5536 return OC_NAME_LANCER;
5537 case BE_DEVICE_ID2:
5538 return BE3_NAME;
5539 case OC_DEVICE_ID5:
5540 case OC_DEVICE_ID6:
5541 return OC_NAME_SH;
5542 default:
5543 return BE_NAME;
5544 }
5545}
5546
1dd06ae8 5547static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5548{
6b7c5b94
SP
5549 struct be_adapter *adapter;
5550 struct net_device *netdev;
21252377 5551 int status = 0;
6b7c5b94 5552
acbafeb1
SP
5553 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5554
6b7c5b94
SP
5555 status = pci_enable_device(pdev);
5556 if (status)
5557 goto do_none;
5558
5559 status = pci_request_regions(pdev, DRV_NAME);
5560 if (status)
5561 goto disable_dev;
5562 pci_set_master(pdev);
5563
7f640062 5564 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5565 if (!netdev) {
6b7c5b94
SP
5566 status = -ENOMEM;
5567 goto rel_reg;
5568 }
5569 adapter = netdev_priv(netdev);
5570 adapter->pdev = pdev;
5571 pci_set_drvdata(pdev, adapter);
5572 adapter->netdev = netdev;
2243e2e9 5573 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5574
4c15c243 5575 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5576 if (!status) {
5577 netdev->features |= NETIF_F_HIGHDMA;
5578 } else {
4c15c243 5579 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5580 if (status) {
5581 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5582 goto free_netdev;
5583 }
5584 }
5585
2f951a9a
KA
5586 status = pci_enable_pcie_error_reporting(pdev);
5587 if (!status)
5588 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5589
78fad34e 5590 status = be_map_pci_bars(adapter);
6b7c5b94 5591 if (status)
39f1d94d 5592 goto free_netdev;
6b7c5b94 5593
78fad34e
SP
5594 status = be_drv_init(adapter);
5595 if (status)
5596 goto unmap_bars;
5597
5fb379ee
SP
5598 status = be_setup(adapter);
5599 if (status)
78fad34e 5600 goto drv_cleanup;
2243e2e9 5601
3abcdeda 5602 be_netdev_init(netdev);
6b7c5b94
SP
5603 status = register_netdev(netdev);
5604 if (status != 0)
5fb379ee 5605 goto unsetup;
6b7c5b94 5606
045508a8
PP
5607 be_roce_dev_add(adapter);
5608
eb7dd46c 5609 be_schedule_err_detection(adapter);
b4e32a71 5610
d379142b 5611 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5612 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5613
6b7c5b94
SP
5614 return 0;
5615
5fb379ee
SP
5616unsetup:
5617 be_clear(adapter);
78fad34e
SP
5618drv_cleanup:
5619 be_drv_cleanup(adapter);
5620unmap_bars:
5621 be_unmap_pci_bars(adapter);
f9449ab7 5622free_netdev:
fe6d2a38 5623 free_netdev(netdev);
6b7c5b94
SP
5624rel_reg:
5625 pci_release_regions(pdev);
5626disable_dev:
5627 pci_disable_device(pdev);
5628do_none:
c4ca2374 5629 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5630 return status;
5631}
5632
5633static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5634{
5635 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5636
76a9e08e 5637 if (adapter->wol_en)
71d8d1b5
AK
5638 be_setup_wol(adapter, true);
5639
d4360d6f 5640 be_intr_set(adapter, false);
eb7dd46c 5641 be_cancel_err_detection(adapter);
f67ef7ba 5642
87ac1a52 5643 be_cleanup(adapter);
6b7c5b94
SP
5644
5645 pci_save_state(pdev);
5646 pci_disable_device(pdev);
5647 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5648 return 0;
5649}
5650
484d76fd 5651static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5652{
6b7c5b94 5653 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5654 int status = 0;
6b7c5b94
SP
5655
5656 status = pci_enable_device(pdev);
5657 if (status)
5658 return status;
5659
1ca01512 5660 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5661 pci_restore_state(pdev);
5662
484d76fd 5663 status = be_resume(adapter);
2243e2e9
SP
5664 if (status)
5665 return status;
5666
eb7dd46c
SP
5667 be_schedule_err_detection(adapter);
5668
76a9e08e 5669 if (adapter->wol_en)
71d8d1b5 5670 be_setup_wol(adapter, false);
a4ca055f 5671
6b7c5b94
SP
5672 return 0;
5673}
5674
82456b03
SP
5675/*
5676 * An FLR will stop BE from DMAing any data.
5677 */
5678static void be_shutdown(struct pci_dev *pdev)
5679{
5680 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5681
2d5d4154
AK
5682 if (!adapter)
5683 return;
82456b03 5684
d114f99a 5685 be_roce_dev_shutdown(adapter);
0f4a6828 5686 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5687 be_cancel_err_detection(adapter);
a4ca055f 5688
2d5d4154 5689 netif_device_detach(adapter->netdev);
82456b03 5690
57841869
AK
5691 be_cmd_reset_function(adapter);
5692
82456b03 5693 pci_disable_device(pdev);
82456b03
SP
5694}
5695
cf588477 5696static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5697 pci_channel_state_t state)
cf588477
SP
5698{
5699 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5700
5701 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5702
01e5b2c4
SK
5703 if (!adapter->eeh_error) {
5704 adapter->eeh_error = true;
cf588477 5705
eb7dd46c 5706 be_cancel_err_detection(adapter);
cf588477 5707
87ac1a52 5708 be_cleanup(adapter);
cf588477 5709 }
cf588477
SP
5710
5711 if (state == pci_channel_io_perm_failure)
5712 return PCI_ERS_RESULT_DISCONNECT;
5713
5714 pci_disable_device(pdev);
5715
eeb7fc7b
SK
5716 /* The error could cause the FW to trigger a flash debug dump.
5717 * Resetting the card while flash dump is in progress
c8a54163
PR
5718 * can cause it not to recover; wait for it to finish.
5719 * Wait only for first function as it is needed only once per
5720 * adapter.
eeb7fc7b 5721 */
c8a54163
PR
5722 if (pdev->devfn == 0)
5723 ssleep(30);
5724
cf588477
SP
5725 return PCI_ERS_RESULT_NEED_RESET;
5726}
5727
5728static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5729{
5730 struct be_adapter *adapter = pci_get_drvdata(pdev);
5731 int status;
5732
5733 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5734
5735 status = pci_enable_device(pdev);
5736 if (status)
5737 return PCI_ERS_RESULT_DISCONNECT;
5738
5739 pci_set_master(pdev);
1ca01512 5740 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5741 pci_restore_state(pdev);
5742
5743 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5744 dev_info(&adapter->pdev->dev,
5745 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5746 status = be_fw_wait_ready(adapter);
cf588477
SP
5747 if (status)
5748 return PCI_ERS_RESULT_DISCONNECT;
5749
d6b6d987 5750 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5751 be_clear_all_error(adapter);
cf588477
SP
5752 return PCI_ERS_RESULT_RECOVERED;
5753}
5754
5755static void be_eeh_resume(struct pci_dev *pdev)
5756{
5757 int status = 0;
5758 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5759
5760 dev_info(&adapter->pdev->dev, "EEH resume\n");
5761
5762 pci_save_state(pdev);
5763
484d76fd 5764 status = be_resume(adapter);
bf99e50d
PR
5765 if (status)
5766 goto err;
5767
eb7dd46c 5768 be_schedule_err_detection(adapter);
cf588477
SP
5769 return;
5770err:
5771 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5772}
5773
ace40aff
VV
5774static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5775{
5776 struct be_adapter *adapter = pci_get_drvdata(pdev);
5777 u16 num_vf_qs;
5778 int status;
5779
5780 if (!num_vfs)
5781 be_vf_clear(adapter);
5782
5783 adapter->num_vfs = num_vfs;
5784
5785 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5786 dev_warn(&pdev->dev,
5787 "Cannot disable VFs while they are assigned\n");
5788 return -EBUSY;
5789 }
5790
5791 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5792 * are equally distributed across the max-number of VFs. The user may
5793 * request only a subset of the max-vfs to be enabled.
5794 * Based on num_vfs, redistribute the resources across num_vfs so that
5795 * each VF will have access to more number of resources.
5796 * This facility is not available in BE3 FW.
5797 * Also, this is done by FW in Lancer chip.
5798 */
5799 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5800 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5801 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5802 adapter->num_vfs, num_vf_qs);
5803 if (status)
5804 dev_err(&pdev->dev,
5805 "Failed to optimize SR-IOV resources\n");
5806 }
5807
5808 status = be_get_resources(adapter);
5809 if (status)
5810 return be_cmd_status(status);
5811
5812 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5813 rtnl_lock();
5814 status = be_update_queues(adapter);
5815 rtnl_unlock();
5816 if (status)
5817 return be_cmd_status(status);
5818
5819 if (adapter->num_vfs)
5820 status = be_vf_setup(adapter);
5821
5822 if (!status)
5823 return adapter->num_vfs;
5824
5825 return 0;
5826}
5827
3646f0e5 5828static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5829 .error_detected = be_eeh_err_detected,
5830 .slot_reset = be_eeh_reset,
5831 .resume = be_eeh_resume,
5832};
5833
6b7c5b94
SP
5834static struct pci_driver be_driver = {
5835 .name = DRV_NAME,
5836 .id_table = be_dev_ids,
5837 .probe = be_probe,
5838 .remove = be_remove,
5839 .suspend = be_suspend,
484d76fd 5840 .resume = be_pci_resume,
82456b03 5841 .shutdown = be_shutdown,
ace40aff 5842 .sriov_configure = be_pci_sriov_configure,
cf588477 5843 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5844};
5845
5846static int __init be_init_module(void)
5847{
8e95a202
JP
5848 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5849 rx_frag_size != 2048) {
6b7c5b94
SP
5850 printk(KERN_WARNING DRV_NAME
5851 " : Module param rx_frag_size must be 2048/4096/8192."
5852 " Using 2048\n");
5853 rx_frag_size = 2048;
5854 }
6b7c5b94 5855
ace40aff
VV
5856 if (num_vfs > 0) {
5857 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5858 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5859 }
5860
6b7c5b94
SP
5861 return pci_register_driver(&be_driver);
5862}
5863module_init(be_init_module);
5864
5865static void __exit be_exit_module(void)
5866{
5867 pci_unregister_driver(&be_driver);
5868}
5869module_exit(be_exit_module);
This page took 1.453785 seconds and 5 git commands to generate.