be2net: process port misconfig async event
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276 88};
e2fb1afa 89
7c185276 90/* UE Status High CSR */
42c8b11e 91static const char * const ue_status_hi_desc[] = {
7c185276
AK
92 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
6bdf8f55
VV
113 "ECRC",
114 "Poison TLP",
42c8b11e 115 "NETC",
6bdf8f55
VV
116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
7c185276
AK
123 "Unknown"
124};
6b7c5b94
SP
125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 129
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 138 u16 len, u16 entry_size)
6b7c5b94
SP
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
ede23fa8
JP
146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 return 0;
151}
152
68c45a2d 153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 154{
db3ea781 155 u32 reg, enabled;
5f0b849e 156
db3ea781 157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 158 &reg);
db3ea781
SP
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781 168 pci_write_config_dword(adapter->pdev,
748b539a 169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
68c45a2d
SK
172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
8788fdc2 188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
03d28ffe 191
6b7c5b94
SP
192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
194
195 wmb();
8788fdc2 196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
197}
198
94d73aaa
VV
199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
6b7c5b94
SP
201{
202 u32 val = 0;
03d28ffe 203
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 212 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
03d28ffe 215
6b7c5b94 216 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
03d28ffe 234
6b7c5b94 235 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 238
f67ef7ba 239 if (adapter->eeh_error)
cf588477
SP
240 return;
241
6b7c5b94
SP
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
246}
247
6b7c5b94
SP
248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 251 struct device *dev = &adapter->pdev->dev;
6b7c5b94 252 struct sockaddr *addr = p;
5a712c13
SP
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 256
ca9e4988
AK
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
ff32f8ab
VV
260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
5a712c13
SP
266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
704e4c88 271 */
5a712c13
SP
272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
704e4c88
PR
283 }
284
5a712c13
SP
285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
704e4c88 287 */
b188f090
SR
288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
a65027e4 290 if (status)
e3a7ae2c 291 goto err;
6b7c5b94 292
5a712c13
SP
293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
61d23e9f 296 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
297 status = -EPERM;
298 goto err;
299 }
300
e3a7ae2c 301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 302 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
303 return 0;
304err:
5a712c13 305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
306 return status;
307}
308
ca34fe38
SP
309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
61000861 316 } else if (BE3_chip(adapter)) {
ca34fe38
SP
317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
61000861
AK
319 return &cmd->hw_stats;
320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
ca34fe38
SP
323 return &cmd->hw_stats;
324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
61000861 334 } else if (BE3_chip(adapter)) {
ca34fe38
SP
335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
61000861
AK
337 return &hw_stats->erx;
338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
ca34fe38
SP
341 return &hw_stats->erx;
342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 346{
ac124ff9
SP
347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 350 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 353
ac124ff9 354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
89a88ab8
AK
375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
ac124ff9 382 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 383 else
ac124ff9 384 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
ca34fe38 394static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 395{
ac124ff9
SP
396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 399 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 402
ac124ff9 403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
ac124ff9 426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
61000861
AK
440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 484 if (be_roce_supported(adapter)) {
461ae379
AK
485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
61000861
AK
492}
493
005d5696
SX
494static void populate_lancer_stats(struct be_adapter *adapter)
495{
005d5696 496 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
ac124ff9 520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 524 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 527 drvs->rx_drops_too_many_frags =
ac124ff9 528 pport_stats->rx_drops_too_many_frags_lo;
005d5696 529}
89a88ab8 530
09c1c68f
SP
531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
4188e7df 543static void populate_erx_stats(struct be_adapter *adapter,
748b539a 544 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
89a88ab8
AK
556void be_parse_stats(struct be_adapter *adapter)
557{
61000861 558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
559 struct be_rx_obj *rxo;
560 int i;
a6c578ef 561 u32 erx_stat;
ac124ff9 562
ca34fe38
SP
563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
005d5696 565 } else {
ca34fe38
SP
566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
61000861
AK
568 else if (BE3_chip(adapter))
569 /* for BE3 */
ca34fe38 570 populate_be_v1_stats(adapter);
61000861
AK
571 else
572 populate_be_v2_stats(adapter);
d51ebd33 573
61000861 574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 575 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 578 }
09c1c68f 579 }
89a88ab8
AK
580}
581
ab1594e9 582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 583 struct rtnl_link_stats64 *stats)
6b7c5b94 584{
ab1594e9 585 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 586 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 587 struct be_rx_obj *rxo;
3c8def97 588 struct be_tx_obj *txo;
ab1594e9
SP
589 u64 pkts, bytes;
590 unsigned int start;
3abcdeda 591 int i;
6b7c5b94 592
3abcdeda 593 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 595
ab1594e9 596 do {
57a7744e 597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
606 }
607
3c8def97 608 for_all_tx_queues(adapter, txo, i) {
ab1594e9 609 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 610
ab1594e9 611 do {
57a7744e 612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
57a7744e 615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
3c8def97 618 }
6b7c5b94
SP
619
620 /* bad pkts received */
ab1594e9 621 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
ab1594e9 630 drvs->rx_dropped_runt;
68110868 631
6b7c5b94 632 /* detailed rx errors */
ab1594e9 633 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
68110868 636
ab1594e9 637 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
638
639 /* frame alignment errors */
ab1594e9 640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 641
6b7c5b94
SP
642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
ab1594e9 647 return stats;
6b7c5b94
SP
648}
649
b236916a 650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 651{
6b7c5b94
SP
652 struct net_device *netdev = adapter->netdev;
653
b236916a 654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 655 netif_carrier_off(netdev);
b236916a 656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 657 }
b236916a 658
bdce2ad7 659 if (link_status)
b236916a
AK
660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
6b7c5b94
SP
663}
664
5f07b3c5 665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 666{
3c8def97
SP
667 struct be_tx_stats *stats = tx_stats(txo);
668
ab1594e9 669 u64_stats_update_begin(&stats->sync);
ac124ff9 670 stats->tx_reqs++;
5f07b3c5
SP
671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
5f07b3c5
SP
676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 678{
5f07b3c5
SP
679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
685 wrb->frag_pa_hi = upper_32_bits(addr);
686 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
687 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 688 wrb->rsvd0 = 0;
6b7c5b94
SP
689}
690
1ded132d 691static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 692 struct sk_buff *skb)
1ded132d
AK
693{
694 u8 vlan_prio;
695 u16 vlan_tag;
696
df8a39de 697 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
698 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
699 /* If vlan priority provided by OS is NOT in available bmap */
700 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
701 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
702 adapter->recommended_prio;
703
704 return vlan_tag;
705}
706
c9c47142
SP
707/* Used only for IP tunnel packets */
708static u16 skb_inner_ip_proto(struct sk_buff *skb)
709{
710 return (inner_ip_hdr(skb)->version == 4) ?
711 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
712}
713
714static u16 skb_ip_proto(struct sk_buff *skb)
715{
716 return (ip_hdr(skb)->version == 4) ?
717 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
718}
719
cc4ce020 720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
721 struct sk_buff *skb, u32 wrb_cnt, u32 len,
722 bool skip_hw_vlan)
6b7c5b94 723{
c9c47142 724 u16 vlan_tag, proto;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
c3c18bc1 728 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 729
49e4b847 730 if (skb_is_gso(skb)) {
c3c18bc1
SP
731 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
732 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 734 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 736 if (skb->encapsulation) {
c3c18bc1 737 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
738 proto = skb_inner_ip_proto(skb);
739 } else {
740 proto = skb_ip_proto(skb);
741 }
742 if (proto == IPPROTO_TCP)
c3c18bc1 743 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 744 else if (proto == IPPROTO_UDP)
c3c18bc1 745 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
746 }
747
df8a39de 748 if (skb_vlan_tag_present(skb)) {
c3c18bc1 749 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
752 }
753
c3c18bc1
SP
754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
755 SET_TX_WRB_HDR_BITS(len, hdr, len);
5f07b3c5
SP
756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
6b7c5b94
SP
762}
763
2b7bcebf 764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 765 bool unmap_single)
7101e111
SP
766{
767 dma_addr_t dma;
768
769 be_dws_le_to_cpu(wrb, sizeof(*wrb));
770
771 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 772 if (wrb->frag_len) {
7101e111 773 if (unmap_single)
2b7bcebf
IV
774 dma_unmap_single(dev, dma, wrb->frag_len,
775 DMA_TO_DEVICE);
7101e111 776 else
2b7bcebf 777 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
778 }
779}
6b7c5b94 780
5f07b3c5
SP
781/* Returns the number of WRBs used up by the skb */
782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
783 struct sk_buff *skb, bool skip_hw_vlan)
6b7c5b94 784{
5f07b3c5 785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 786 struct device *dev = &adapter->pdev->dev;
5f07b3c5 787 struct be_queue_info *txq = &txo->q;
6b7c5b94 788 struct be_eth_hdr_wrb *hdr;
7101e111 789 bool map_single = false;
5f07b3c5
SP
790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
6b7c5b94 793
6b7c5b94 794 hdr = queue_head_node(txq);
5f07b3c5
SP
795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
6b7c5b94
SP
798 queue_head_inc(txq);
799
ebc8d2ab 800 if (skb->len > skb->data_len) {
e743d313 801 int len = skb_headlen(skb);
03d28ffe 802
2b7bcebf
IV
803 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
804 if (dma_mapping_error(dev, busaddr))
7101e111
SP
805 goto dma_err;
806 map_single = true;
ebc8d2ab
DM
807 wrb = queue_head_node(txq);
808 wrb_fill(wrb, busaddr, len);
809 be_dws_cpu_to_le(wrb, sizeof(*wrb));
810 queue_head_inc(txq);
811 copied += len;
812 }
6b7c5b94 813
ebc8d2ab 814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 815 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
03d28ffe 816
b061b39e 817 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 818 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 819 if (dma_mapping_error(dev, busaddr))
7101e111 820 goto dma_err;
ebc8d2ab 821 wrb = queue_head_node(txq);
9e903e08 822 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
9e903e08 825 copied += skb_frag_size(frag);
6b7c5b94
SP
826 }
827
5f07b3c5
SP
828 BUG_ON(txo->sent_skb_list[head]);
829 txo->sent_skb_list[head] = skb;
830 txo->last_req_hdr = head;
831 atomic_add(wrb_cnt, &txq->used);
832 txo->last_req_wrb_cnt = wrb_cnt;
833 txo->pend_wrb_cnt += wrb_cnt;
6b7c5b94 834
5f07b3c5
SP
835 be_tx_stats_update(txo, skb);
836 return wrb_cnt;
6b7c5b94 837
7101e111 838dma_err:
5f07b3c5
SP
839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
7101e111
SP
845 while (copied) {
846 wrb = queue_head_node(txq);
2b7bcebf 847 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
848 map_single = false;
849 copied -= wrb->frag_len;
d3de1540 850 adapter->drv_stats.dma_map_errors++;
7101e111
SP
851 queue_head_inc(txq);
852 }
5f07b3c5 853 txq->head = head;
7101e111 854 return 0;
6b7c5b94
SP
855}
856
f7062ee5
SP
857static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
858{
859 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
860}
861
93040ae5 862static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
863 struct sk_buff *skb,
864 bool *skip_hw_vlan)
93040ae5
SK
865{
866 u16 vlan_tag = 0;
867
868 skb = skb_share_check(skb, GFP_ATOMIC);
869 if (unlikely(!skb))
870 return skb;
871
df8a39de 872 if (skb_vlan_tag_present(skb))
93040ae5 873 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
874
875 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
876 if (!vlan_tag)
877 vlan_tag = adapter->pvid;
878 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
879 * skip VLAN insertion
880 */
881 if (skip_hw_vlan)
882 *skip_hw_vlan = true;
883 }
bc0c3405
AK
884
885 if (vlan_tag) {
62749e2c
JP
886 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
887 vlan_tag);
bc0c3405
AK
888 if (unlikely(!skb))
889 return skb;
bc0c3405
AK
890 skb->vlan_tci = 0;
891 }
892
893 /* Insert the outer VLAN, if any */
894 if (adapter->qnq_vid) {
895 vlan_tag = adapter->qnq_vid;
62749e2c
JP
896 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
897 vlan_tag);
bc0c3405
AK
898 if (unlikely(!skb))
899 return skb;
900 if (skip_hw_vlan)
901 *skip_hw_vlan = true;
902 }
903
93040ae5
SK
904 return skb;
905}
906
bc0c3405
AK
907static bool be_ipv6_exthdr_check(struct sk_buff *skb)
908{
909 struct ethhdr *eh = (struct ethhdr *)skb->data;
910 u16 offset = ETH_HLEN;
911
912 if (eh->h_proto == htons(ETH_P_IPV6)) {
913 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
914
915 offset += sizeof(struct ipv6hdr);
916 if (ip6h->nexthdr != NEXTHDR_TCP &&
917 ip6h->nexthdr != NEXTHDR_UDP) {
918 struct ipv6_opt_hdr *ehdr =
504fbf1e 919 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
920
921 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
922 if (ehdr->hdrlen == 0xff)
923 return true;
924 }
925 }
926 return false;
927}
928
929static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
930{
df8a39de 931 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
932}
933
748b539a 934static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 935{
ee9c799c 936 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
937}
938
ec495fac
VV
939static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
940 struct sk_buff *skb,
941 bool *skip_hw_vlan)
6b7c5b94 942{
d2cb6ce7 943 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
944 unsigned int eth_hdr_len;
945 struct iphdr *ip;
93040ae5 946
1297f9db
AK
947 /* For padded packets, BE HW modifies tot_len field in IP header
948 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 949 * For padded packets, Lancer computes incorrect checksum.
1ded132d 950 */
ee9c799c
SP
951 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
952 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 953 if (skb->len <= 60 &&
df8a39de 954 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 955 is_ipv4_pkt(skb)) {
93040ae5
SK
956 ip = (struct iphdr *)ip_hdr(skb);
957 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
958 }
1ded132d 959
d2cb6ce7 960 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 961 * tagging in pvid-tagging mode
d2cb6ce7 962 */
f93f160b 963 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 964 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 965 *skip_hw_vlan = true;
d2cb6ce7 966
93040ae5
SK
967 /* HW has a bug wherein it will calculate CSUM for VLAN
968 * pkts even though it is disabled.
969 * Manually insert VLAN in pkt.
970 */
971 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 972 skb_vlan_tag_present(skb)) {
ee9c799c 973 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 974 if (unlikely(!skb))
c9128951 975 goto err;
bc0c3405
AK
976 }
977
978 /* HW may lockup when VLAN HW tagging is requested on
979 * certain ipv6 packets. Drop such pkts if the HW workaround to
980 * skip HW tagging is not enabled by FW.
981 */
982 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
983 (adapter->pvid || adapter->qnq_vid) &&
984 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
985 goto tx_drop;
986
987 /* Manual VLAN tag insertion to prevent:
988 * ASIC lockup when the ASIC inserts VLAN tag into
989 * certain ipv6 packets. Insert VLAN tags in driver,
990 * and set event, completion, vlan bits accordingly
991 * in the Tx WRB.
992 */
993 if (be_ipv6_tx_stall_chk(adapter, skb) &&
994 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 995 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 996 if (unlikely(!skb))
c9128951 997 goto err;
1ded132d
AK
998 }
999
ee9c799c
SP
1000 return skb;
1001tx_drop:
1002 dev_kfree_skb_any(skb);
c9128951 1003err:
ee9c799c
SP
1004 return NULL;
1005}
1006
ec495fac
VV
1007static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1008 struct sk_buff *skb,
1009 bool *skip_hw_vlan)
1010{
1011 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1012 * less may cause a transmit stall on that port. So the work-around is
1013 * to pad short packets (<= 32 bytes) to a 36-byte length.
1014 */
1015 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1016 if (skb_put_padto(skb, 36))
ec495fac 1017 return NULL;
ec495fac
VV
1018 }
1019
1020 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1021 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1022 if (!skb)
1023 return NULL;
1024 }
1025
1026 return skb;
1027}
1028
5f07b3c5
SP
1029static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1030{
1031 struct be_queue_info *txq = &txo->q;
1032 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1033
1034 /* Mark the last request eventable if it hasn't been marked already */
1035 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1036 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1037
1038 /* compose a dummy wrb if there are odd set of wrbs to notify */
1039 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1040 wrb_fill(queue_head_node(txq), 0, 0);
1041 queue_head_inc(txq);
1042 atomic_inc(&txq->used);
1043 txo->pend_wrb_cnt++;
1044 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1045 TX_HDR_WRB_NUM_SHIFT);
1046 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1047 TX_HDR_WRB_NUM_SHIFT);
1048 }
1049 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1050 txo->pend_wrb_cnt = 0;
1051}
1052
ee9c799c
SP
1053static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1054{
5f07b3c5 1055 bool skip_hw_vlan = false, flush = !skb->xmit_more;
ee9c799c 1056 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1057 u16 q_idx = skb_get_queue_mapping(skb);
1058 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
ee9c799c 1059 struct be_queue_info *txq = &txo->q;
5f07b3c5 1060 u16 wrb_cnt;
ee9c799c
SP
1061
1062 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
5f07b3c5
SP
1063 if (unlikely(!skb))
1064 goto drop;
6b7c5b94 1065
5f07b3c5
SP
1066 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1067 if (unlikely(!wrb_cnt)) {
1068 dev_kfree_skb_any(skb);
1069 goto drop;
1070 }
cd8f76c0 1071
5f07b3c5
SP
1072 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1073 netif_stop_subqueue(netdev, q_idx);
1074 tx_stats(txo)->tx_stops++;
1075 }
c190e3c8 1076
5f07b3c5
SP
1077 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1078 be_xmit_flush(adapter, txo);
6b7c5b94 1079
5f07b3c5
SP
1080 return NETDEV_TX_OK;
1081drop:
1082 tx_stats(txo)->tx_drv_drops++;
1083 /* Flush the already enqueued tx requests */
1084 if (flush && txo->pend_wrb_cnt)
1085 be_xmit_flush(adapter, txo);
6b7c5b94 1086
6b7c5b94
SP
1087 return NETDEV_TX_OK;
1088}
1089
1090static int be_change_mtu(struct net_device *netdev, int new_mtu)
1091{
1092 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1093 struct device *dev = &adapter->pdev->dev;
1094
1095 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1096 dev_info(dev, "MTU must be between %d and %d bytes\n",
1097 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1098 return -EINVAL;
1099 }
0d3f5cce
KA
1100
1101 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1102 netdev->mtu, new_mtu);
6b7c5b94
SP
1103 netdev->mtu = new_mtu;
1104 return 0;
1105}
1106
f66b7cfd
SP
1107static inline bool be_in_all_promisc(struct be_adapter *adapter)
1108{
1109 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1110 BE_IF_FLAGS_ALL_PROMISCUOUS;
1111}
1112
1113static int be_set_vlan_promisc(struct be_adapter *adapter)
1114{
1115 struct device *dev = &adapter->pdev->dev;
1116 int status;
1117
1118 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1119 return 0;
1120
1121 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1122 if (!status) {
1123 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1124 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1125 } else {
1126 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1127 }
1128 return status;
1129}
1130
1131static int be_clear_vlan_promisc(struct be_adapter *adapter)
1132{
1133 struct device *dev = &adapter->pdev->dev;
1134 int status;
1135
1136 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1137 if (!status) {
1138 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1139 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1140 }
1141 return status;
1142}
1143
6b7c5b94 1144/*
82903e4b
AK
1145 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1146 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1147 */
10329df8 1148static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1149{
50762667 1150 struct device *dev = &adapter->pdev->dev;
10329df8 1151 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1152 u16 num = 0, i = 0;
82903e4b 1153 int status = 0;
1da87b7f 1154
c0e64ef4 1155 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1156 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1157 return 0;
1158
92bf14ab 1159 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1160 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1161
1162 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1163 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1164 vids[num++] = cpu_to_le16(i);
0fc16ebf 1165
4d567d97 1166 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1167 if (status) {
f66b7cfd 1168 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1169 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1170 if (addl_status(status) ==
1171 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1172 return be_set_vlan_promisc(adapter);
1173 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1174 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1175 }
0fc16ebf 1176 return status;
6b7c5b94
SP
1177}
1178
80d5c368 1179static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1182 int status = 0;
6b7c5b94 1183
a85e9986
PR
1184 /* Packets with VID 0 are always received by Lancer by default */
1185 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1186 return status;
1187
f6cbd364 1188 if (test_bit(vid, adapter->vids))
48291c22 1189 return status;
a85e9986 1190
f6cbd364 1191 set_bit(vid, adapter->vids);
a6b74e01 1192 adapter->vlans_added++;
8e586137 1193
a6b74e01
SK
1194 status = be_vid_config(adapter);
1195 if (status) {
1196 adapter->vlans_added--;
f6cbd364 1197 clear_bit(vid, adapter->vids);
a6b74e01 1198 }
48291c22 1199
80817cbf 1200 return status;
6b7c5b94
SP
1201}
1202
80d5c368 1203static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1204{
1205 struct be_adapter *adapter = netdev_priv(netdev);
1206
a85e9986
PR
1207 /* Packets with VID 0 are always received by Lancer by default */
1208 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1209 return 0;
a85e9986 1210
f6cbd364 1211 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1212 adapter->vlans_added--;
1213
1214 return be_vid_config(adapter);
6b7c5b94
SP
1215}
1216
f66b7cfd 1217static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1218{
ac34b743 1219 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1220 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1221}
1222
f66b7cfd
SP
1223static void be_set_all_promisc(struct be_adapter *adapter)
1224{
1225 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1226 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1227}
1228
1229static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1230{
0fc16ebf 1231 int status;
6b7c5b94 1232
f66b7cfd
SP
1233 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1234 return;
6b7c5b94 1235
f66b7cfd
SP
1236 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1237 if (!status)
1238 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1239}
1240
1241static void be_set_mc_list(struct be_adapter *adapter)
1242{
1243 int status;
1244
1245 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1246 if (!status)
1247 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1248 else
1249 be_set_mc_promisc(adapter);
1250}
1251
1252static void be_set_uc_list(struct be_adapter *adapter)
1253{
1254 struct netdev_hw_addr *ha;
1255 int i = 1; /* First slot is claimed by the Primary MAC */
1256
1257 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1258 be_cmd_pmac_del(adapter, adapter->if_handle,
1259 adapter->pmac_id[i], 0);
1260
1261 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1262 be_set_all_promisc(adapter);
1263 return;
6b7c5b94
SP
1264 }
1265
f66b7cfd
SP
1266 netdev_for_each_uc_addr(ha, adapter->netdev) {
1267 adapter->uc_macs++; /* First slot is for Primary MAC */
1268 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1269 &adapter->pmac_id[adapter->uc_macs], 0);
1270 }
1271}
6b7c5b94 1272
f66b7cfd
SP
1273static void be_clear_uc_list(struct be_adapter *adapter)
1274{
1275 int i;
fbc13f01 1276
f66b7cfd
SP
1277 for (i = 1; i < (adapter->uc_macs + 1); i++)
1278 be_cmd_pmac_del(adapter, adapter->if_handle,
1279 adapter->pmac_id[i], 0);
1280 adapter->uc_macs = 0;
1281}
fbc13f01 1282
f66b7cfd
SP
1283static void be_set_rx_mode(struct net_device *netdev)
1284{
1285 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1286
f66b7cfd
SP
1287 if (netdev->flags & IFF_PROMISC) {
1288 be_set_all_promisc(adapter);
1289 return;
fbc13f01
AK
1290 }
1291
f66b7cfd
SP
1292 /* Interface was previously in promiscuous mode; disable it */
1293 if (be_in_all_promisc(adapter)) {
1294 be_clear_all_promisc(adapter);
1295 if (adapter->vlans_added)
1296 be_vid_config(adapter);
0fc16ebf 1297 }
a0794885 1298
f66b7cfd
SP
1299 /* Enable multicast promisc if num configured exceeds what we support */
1300 if (netdev->flags & IFF_ALLMULTI ||
1301 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1302 be_set_mc_promisc(adapter);
a0794885 1303 return;
f66b7cfd 1304 }
a0794885 1305
f66b7cfd
SP
1306 if (netdev_uc_count(netdev) != adapter->uc_macs)
1307 be_set_uc_list(adapter);
1308
1309 be_set_mc_list(adapter);
6b7c5b94
SP
1310}
1311
ba343c77
SB
1312static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1313{
1314 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1315 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1316 int status;
1317
11ac75ed 1318 if (!sriov_enabled(adapter))
ba343c77
SB
1319 return -EPERM;
1320
11ac75ed 1321 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1322 return -EINVAL;
1323
3c31aaf3
VV
1324 /* Proceed further only if user provided MAC is different
1325 * from active MAC
1326 */
1327 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1328 return 0;
1329
3175d8c2
SP
1330 if (BEx_chip(adapter)) {
1331 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1332 vf + 1);
ba343c77 1333
11ac75ed
SP
1334 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1335 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1336 } else {
1337 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1338 vf + 1);
590c391d
PR
1339 }
1340
abccf23e
KA
1341 if (status) {
1342 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1343 mac, vf, status);
1344 return be_cmd_status(status);
1345 }
64600ea5 1346
abccf23e
KA
1347 ether_addr_copy(vf_cfg->mac_addr, mac);
1348
1349 return 0;
ba343c77
SB
1350}
1351
64600ea5 1352static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1353 struct ifla_vf_info *vi)
64600ea5
AK
1354{
1355 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1356 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1357
11ac75ed 1358 if (!sriov_enabled(adapter))
64600ea5
AK
1359 return -EPERM;
1360
11ac75ed 1361 if (vf >= adapter->num_vfs)
64600ea5
AK
1362 return -EINVAL;
1363
1364 vi->vf = vf;
ed616689
SC
1365 vi->max_tx_rate = vf_cfg->tx_rate;
1366 vi->min_tx_rate = 0;
a60b3a13
AK
1367 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1368 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1369 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1370 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1371
1372 return 0;
1373}
1374
748b539a 1375static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1376{
1377 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1378 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1379 int status = 0;
1380
11ac75ed 1381 if (!sriov_enabled(adapter))
1da87b7f
AK
1382 return -EPERM;
1383
b9fc0e53 1384 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1385 return -EINVAL;
1386
b9fc0e53
AK
1387 if (vlan || qos) {
1388 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1389 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1390 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1391 vf_cfg->if_handle, 0);
1da87b7f 1392 } else {
f1f3ee1b 1393 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1394 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1395 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1396 }
1397
abccf23e
KA
1398 if (status) {
1399 dev_err(&adapter->pdev->dev,
1400 "VLAN %d config on VF %d failed : %#x\n", vlan,
1401 vf, status);
1402 return be_cmd_status(status);
1403 }
1404
1405 vf_cfg->vlan_tag = vlan;
1406
1407 return 0;
1da87b7f
AK
1408}
1409
ed616689
SC
1410static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1411 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1412{
1413 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1414 struct device *dev = &adapter->pdev->dev;
1415 int percent_rate, status = 0;
1416 u16 link_speed = 0;
1417 u8 link_status;
e1d18735 1418
11ac75ed 1419 if (!sriov_enabled(adapter))
e1d18735
AK
1420 return -EPERM;
1421
94f434c2 1422 if (vf >= adapter->num_vfs)
e1d18735
AK
1423 return -EINVAL;
1424
ed616689
SC
1425 if (min_tx_rate)
1426 return -EINVAL;
1427
0f77ba73
RN
1428 if (!max_tx_rate)
1429 goto config_qos;
1430
1431 status = be_cmd_link_status_query(adapter, &link_speed,
1432 &link_status, 0);
1433 if (status)
1434 goto err;
1435
1436 if (!link_status) {
1437 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1438 status = -ENETDOWN;
0f77ba73
RN
1439 goto err;
1440 }
1441
1442 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1443 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1444 link_speed);
1445 status = -EINVAL;
1446 goto err;
1447 }
1448
1449 /* On Skyhawk the QOS setting must be done only as a % value */
1450 percent_rate = link_speed / 100;
1451 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1452 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1453 percent_rate);
1454 status = -EINVAL;
1455 goto err;
94f434c2 1456 }
e1d18735 1457
0f77ba73
RN
1458config_qos:
1459 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1460 if (status)
0f77ba73
RN
1461 goto err;
1462
1463 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1464 return 0;
1465
1466err:
1467 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1468 max_tx_rate, vf);
abccf23e 1469 return be_cmd_status(status);
e1d18735 1470}
e2fb1afa 1471
bdce2ad7
SR
1472static int be_set_vf_link_state(struct net_device *netdev, int vf,
1473 int link_state)
1474{
1475 struct be_adapter *adapter = netdev_priv(netdev);
1476 int status;
1477
1478 if (!sriov_enabled(adapter))
1479 return -EPERM;
1480
1481 if (vf >= adapter->num_vfs)
1482 return -EINVAL;
1483
1484 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1485 if (status) {
1486 dev_err(&adapter->pdev->dev,
1487 "Link state change on VF %d failed: %#x\n", vf, status);
1488 return be_cmd_status(status);
1489 }
bdce2ad7 1490
abccf23e
KA
1491 adapter->vf_cfg[vf].plink_tracking = link_state;
1492
1493 return 0;
bdce2ad7 1494}
e1d18735 1495
2632bafd
SP
1496static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1497 ulong now)
6b7c5b94 1498{
2632bafd
SP
1499 aic->rx_pkts_prev = rx_pkts;
1500 aic->tx_reqs_prev = tx_pkts;
1501 aic->jiffies = now;
1502}
ac124ff9 1503
2632bafd
SP
1504static void be_eqd_update(struct be_adapter *adapter)
1505{
1506 struct be_set_eqd set_eqd[MAX_EVT_QS];
1507 int eqd, i, num = 0, start;
1508 struct be_aic_obj *aic;
1509 struct be_eq_obj *eqo;
1510 struct be_rx_obj *rxo;
1511 struct be_tx_obj *txo;
1512 u64 rx_pkts, tx_pkts;
1513 ulong now;
1514 u32 pps, delta;
10ef9ab4 1515
2632bafd
SP
1516 for_all_evt_queues(adapter, eqo, i) {
1517 aic = &adapter->aic_obj[eqo->idx];
1518 if (!aic->enable) {
1519 if (aic->jiffies)
1520 aic->jiffies = 0;
1521 eqd = aic->et_eqd;
1522 goto modify_eqd;
1523 }
6b7c5b94 1524
2632bafd
SP
1525 rxo = &adapter->rx_obj[eqo->idx];
1526 do {
57a7744e 1527 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1528 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1529 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1530
2632bafd
SP
1531 txo = &adapter->tx_obj[eqo->idx];
1532 do {
57a7744e 1533 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1534 tx_pkts = txo->stats.tx_reqs;
57a7744e 1535 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1536
2632bafd
SP
1537 /* Skip, if wrapped around or first calculation */
1538 now = jiffies;
1539 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1540 rx_pkts < aic->rx_pkts_prev ||
1541 tx_pkts < aic->tx_reqs_prev) {
1542 be_aic_update(aic, rx_pkts, tx_pkts, now);
1543 continue;
1544 }
1545
1546 delta = jiffies_to_msecs(now - aic->jiffies);
1547 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1548 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1549 eqd = (pps / 15000) << 2;
10ef9ab4 1550
2632bafd
SP
1551 if (eqd < 8)
1552 eqd = 0;
1553 eqd = min_t(u32, eqd, aic->max_eqd);
1554 eqd = max_t(u32, eqd, aic->min_eqd);
1555
1556 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1557modify_eqd:
2632bafd
SP
1558 if (eqd != aic->prev_eqd) {
1559 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1560 set_eqd[num].eq_id = eqo->q.id;
1561 aic->prev_eqd = eqd;
1562 num++;
1563 }
ac124ff9 1564 }
2632bafd
SP
1565
1566 if (num)
1567 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1568}
1569
3abcdeda 1570static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1571 struct be_rx_compl_info *rxcp)
4097f663 1572{
ac124ff9 1573 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1574
ab1594e9 1575 u64_stats_update_begin(&stats->sync);
3abcdeda 1576 stats->rx_compl++;
2e588f84 1577 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1578 stats->rx_pkts++;
2e588f84 1579 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1580 stats->rx_mcast_pkts++;
2e588f84 1581 if (rxcp->err)
ac124ff9 1582 stats->rx_compl_err++;
ab1594e9 1583 u64_stats_update_end(&stats->sync);
4097f663
SP
1584}
1585
2e588f84 1586static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1587{
19fad86f 1588 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1589 * Also ignore ipcksm for ipv6 pkts
1590 */
2e588f84 1591 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1592 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1593}
1594
0b0ef1d0 1595static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1596{
10ef9ab4 1597 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1598 struct be_rx_page_info *rx_page_info;
3abcdeda 1599 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1600 u16 frag_idx = rxq->tail;
6b7c5b94 1601
3abcdeda 1602 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1603 BUG_ON(!rx_page_info->page);
1604
e50287be 1605 if (rx_page_info->last_frag) {
2b7bcebf
IV
1606 dma_unmap_page(&adapter->pdev->dev,
1607 dma_unmap_addr(rx_page_info, bus),
1608 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1609 rx_page_info->last_frag = false;
1610 } else {
1611 dma_sync_single_for_cpu(&adapter->pdev->dev,
1612 dma_unmap_addr(rx_page_info, bus),
1613 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1614 }
6b7c5b94 1615
0b0ef1d0 1616 queue_tail_inc(rxq);
6b7c5b94
SP
1617 atomic_dec(&rxq->used);
1618 return rx_page_info;
1619}
1620
1621/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1622static void be_rx_compl_discard(struct be_rx_obj *rxo,
1623 struct be_rx_compl_info *rxcp)
6b7c5b94 1624{
6b7c5b94 1625 struct be_rx_page_info *page_info;
2e588f84 1626 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1627
e80d9da6 1628 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1629 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1630 put_page(page_info->page);
1631 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1632 }
1633}
1634
1635/*
1636 * skb_fill_rx_data forms a complete skb for an ether frame
1637 * indicated by rxcp.
1638 */
10ef9ab4
SP
1639static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1640 struct be_rx_compl_info *rxcp)
6b7c5b94 1641{
6b7c5b94 1642 struct be_rx_page_info *page_info;
2e588f84
SP
1643 u16 i, j;
1644 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1645 u8 *start;
6b7c5b94 1646
0b0ef1d0 1647 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1648 start = page_address(page_info->page) + page_info->page_offset;
1649 prefetch(start);
1650
1651 /* Copy data in the first descriptor of this completion */
2e588f84 1652 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1653
6b7c5b94
SP
1654 skb->len = curr_frag_len;
1655 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1656 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1657 /* Complete packet has now been moved to data */
1658 put_page(page_info->page);
1659 skb->data_len = 0;
1660 skb->tail += curr_frag_len;
1661 } else {
ac1ae5f3
ED
1662 hdr_len = ETH_HLEN;
1663 memcpy(skb->data, start, hdr_len);
6b7c5b94 1664 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1665 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1666 skb_shinfo(skb)->frags[0].page_offset =
1667 page_info->page_offset + hdr_len;
748b539a
SP
1668 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1669 curr_frag_len - hdr_len);
6b7c5b94 1670 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1671 skb->truesize += rx_frag_size;
6b7c5b94
SP
1672 skb->tail += hdr_len;
1673 }
205859a2 1674 page_info->page = NULL;
6b7c5b94 1675
2e588f84
SP
1676 if (rxcp->pkt_size <= rx_frag_size) {
1677 BUG_ON(rxcp->num_rcvd != 1);
1678 return;
6b7c5b94
SP
1679 }
1680
1681 /* More frags present for this completion */
2e588f84
SP
1682 remaining = rxcp->pkt_size - curr_frag_len;
1683 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1684 page_info = get_rx_page_info(rxo);
2e588f84 1685 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1686
bd46cb6c
AK
1687 /* Coalesce all frags from the same physical page in one slot */
1688 if (page_info->page_offset == 0) {
1689 /* Fresh page */
1690 j++;
b061b39e 1691 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1692 skb_shinfo(skb)->frags[j].page_offset =
1693 page_info->page_offset;
9e903e08 1694 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1695 skb_shinfo(skb)->nr_frags++;
1696 } else {
1697 put_page(page_info->page);
1698 }
1699
9e903e08 1700 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1701 skb->len += curr_frag_len;
1702 skb->data_len += curr_frag_len;
bdb28a97 1703 skb->truesize += rx_frag_size;
2e588f84 1704 remaining -= curr_frag_len;
205859a2 1705 page_info->page = NULL;
6b7c5b94 1706 }
bd46cb6c 1707 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1708}
1709
5be93b9a 1710/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1711static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1712 struct be_rx_compl_info *rxcp)
6b7c5b94 1713{
10ef9ab4 1714 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1715 struct net_device *netdev = adapter->netdev;
6b7c5b94 1716 struct sk_buff *skb;
89420424 1717
bb349bb4 1718 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1719 if (unlikely(!skb)) {
ac124ff9 1720 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1721 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1722 return;
1723 }
1724
10ef9ab4 1725 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1726
6332c8d3 1727 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1728 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1729 else
1730 skb_checksum_none_assert(skb);
6b7c5b94 1731
6332c8d3 1732 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1733 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1734 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1735 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1736
b6c0e89d 1737 skb->csum_level = rxcp->tunneled;
6384a4d0 1738 skb_mark_napi_id(skb, napi);
6b7c5b94 1739
343e43c0 1740 if (rxcp->vlanf)
86a9bad3 1741 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1742
1743 netif_receive_skb(skb);
6b7c5b94
SP
1744}
1745
5be93b9a 1746/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1747static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1748 struct napi_struct *napi,
1749 struct be_rx_compl_info *rxcp)
6b7c5b94 1750{
10ef9ab4 1751 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1752 struct be_rx_page_info *page_info;
5be93b9a 1753 struct sk_buff *skb = NULL;
2e588f84
SP
1754 u16 remaining, curr_frag_len;
1755 u16 i, j;
3968fa1e 1756
10ef9ab4 1757 skb = napi_get_frags(napi);
5be93b9a 1758 if (!skb) {
10ef9ab4 1759 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1760 return;
1761 }
1762
2e588f84
SP
1763 remaining = rxcp->pkt_size;
1764 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1765 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1766
1767 curr_frag_len = min(remaining, rx_frag_size);
1768
bd46cb6c
AK
1769 /* Coalesce all frags from the same physical page in one slot */
1770 if (i == 0 || page_info->page_offset == 0) {
1771 /* First frag or Fresh page */
1772 j++;
b061b39e 1773 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1774 skb_shinfo(skb)->frags[j].page_offset =
1775 page_info->page_offset;
9e903e08 1776 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1777 } else {
1778 put_page(page_info->page);
1779 }
9e903e08 1780 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1781 skb->truesize += rx_frag_size;
bd46cb6c 1782 remaining -= curr_frag_len;
6b7c5b94
SP
1783 memset(page_info, 0, sizeof(*page_info));
1784 }
bd46cb6c 1785 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1786
5be93b9a 1787 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1788 skb->len = rxcp->pkt_size;
1789 skb->data_len = rxcp->pkt_size;
5be93b9a 1790 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1791 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1792 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1793 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1794
b6c0e89d 1795 skb->csum_level = rxcp->tunneled;
6384a4d0 1796 skb_mark_napi_id(skb, napi);
5be93b9a 1797
343e43c0 1798 if (rxcp->vlanf)
86a9bad3 1799 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1800
10ef9ab4 1801 napi_gro_frags(napi);
2e588f84
SP
1802}
1803
10ef9ab4
SP
1804static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1805 struct be_rx_compl_info *rxcp)
2e588f84 1806{
c3c18bc1
SP
1807 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1808 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1809 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1810 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1811 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1812 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1813 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1814 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1815 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1816 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1817 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1818 if (rxcp->vlanf) {
c3c18bc1
SP
1819 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1820 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1821 }
c3c18bc1 1822 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1823 rxcp->tunneled =
c3c18bc1 1824 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1825}
1826
10ef9ab4
SP
1827static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1828 struct be_rx_compl_info *rxcp)
2e588f84 1829{
c3c18bc1
SP
1830 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1831 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1832 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1833 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1834 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1835 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1836 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1837 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1838 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1839 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1840 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1841 if (rxcp->vlanf) {
c3c18bc1
SP
1842 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1843 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1844 }
c3c18bc1
SP
1845 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1846 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1847}
1848
1849static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1850{
1851 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1852 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1853 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1854
2e588f84
SP
1855 /* For checking the valid bit it is Ok to use either definition as the
1856 * valid bit is at the same position in both v0 and v1 Rx compl */
1857 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1858 return NULL;
6b7c5b94 1859
2e588f84
SP
1860 rmb();
1861 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1862
2e588f84 1863 if (adapter->be3_native)
10ef9ab4 1864 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1865 else
10ef9ab4 1866 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1867
e38b1706
SK
1868 if (rxcp->ip_frag)
1869 rxcp->l4_csum = 0;
1870
15d72184 1871 if (rxcp->vlanf) {
f93f160b
VV
1872 /* In QNQ modes, if qnq bit is not set, then the packet was
1873 * tagged only with the transparent outer vlan-tag and must
1874 * not be treated as a vlan packet by host
1875 */
1876 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1877 rxcp->vlanf = 0;
6b7c5b94 1878
15d72184 1879 if (!lancer_chip(adapter))
3c709f8f 1880 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1881
939cf306 1882 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1883 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1884 rxcp->vlanf = 0;
1885 }
2e588f84
SP
1886
1887 /* As the compl has been parsed, reset it; we wont touch it again */
1888 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1889
3abcdeda 1890 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1891 return rxcp;
1892}
1893
1829b086 1894static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1895{
6b7c5b94 1896 u32 order = get_order(size);
1829b086 1897
6b7c5b94 1898 if (order > 0)
1829b086
ED
1899 gfp |= __GFP_COMP;
1900 return alloc_pages(gfp, order);
6b7c5b94
SP
1901}
1902
1903/*
1904 * Allocate a page, split it to fragments of size rx_frag_size and post as
1905 * receive buffers to BE
1906 */
c30d7266 1907static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1908{
3abcdeda 1909 struct be_adapter *adapter = rxo->adapter;
26d92f92 1910 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1911 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1912 struct page *pagep = NULL;
ba42fad0 1913 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1914 struct be_eth_rx_d *rxd;
1915 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1916 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1917
3abcdeda 1918 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 1919 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 1920 if (!pagep) {
1829b086 1921 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1922 if (unlikely(!pagep)) {
ac124ff9 1923 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1924 break;
1925 }
ba42fad0
IV
1926 page_dmaaddr = dma_map_page(dev, pagep, 0,
1927 adapter->big_page_size,
2b7bcebf 1928 DMA_FROM_DEVICE);
ba42fad0
IV
1929 if (dma_mapping_error(dev, page_dmaaddr)) {
1930 put_page(pagep);
1931 pagep = NULL;
d3de1540 1932 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1933 break;
1934 }
e50287be 1935 page_offset = 0;
6b7c5b94
SP
1936 } else {
1937 get_page(pagep);
e50287be 1938 page_offset += rx_frag_size;
6b7c5b94 1939 }
e50287be 1940 page_info->page_offset = page_offset;
6b7c5b94 1941 page_info->page = pagep;
6b7c5b94
SP
1942
1943 rxd = queue_head_node(rxq);
e50287be 1944 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1945 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1946 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1947
1948 /* Any space left in the current big page for another frag? */
1949 if ((page_offset + rx_frag_size + rx_frag_size) >
1950 adapter->big_page_size) {
1951 pagep = NULL;
e50287be
SP
1952 page_info->last_frag = true;
1953 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1954 } else {
1955 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1956 }
26d92f92
SP
1957
1958 prev_page_info = page_info;
1959 queue_head_inc(rxq);
10ef9ab4 1960 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1961 }
e50287be
SP
1962
1963 /* Mark the last frag of a page when we break out of the above loop
1964 * with no more slots available in the RXQ
1965 */
1966 if (pagep) {
1967 prev_page_info->last_frag = true;
1968 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1969 }
6b7c5b94
SP
1970
1971 if (posted) {
6b7c5b94 1972 atomic_add(posted, &rxq->used);
6384a4d0
SP
1973 if (rxo->rx_post_starved)
1974 rxo->rx_post_starved = false;
c30d7266
AK
1975 do {
1976 notify = min(256u, posted);
1977 be_rxq_notify(adapter, rxq->id, notify);
1978 posted -= notify;
1979 } while (posted);
ea1dae11
SP
1980 } else if (atomic_read(&rxq->used) == 0) {
1981 /* Let be_worker replenish when memory is available */
3abcdeda 1982 rxo->rx_post_starved = true;
6b7c5b94 1983 }
6b7c5b94
SP
1984}
1985
5fb379ee 1986static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1987{
6b7c5b94
SP
1988 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1989
1990 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1991 return NULL;
1992
f3eb62d2 1993 rmb();
6b7c5b94
SP
1994 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1995
1996 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1997
1998 queue_tail_inc(tx_cq);
1999 return txcp;
2000}
2001
3c8def97 2002static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2003 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2004{
5f07b3c5 2005 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2006 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2007 u16 frag_index, num_wrbs = 0;
2008 struct sk_buff *skb = NULL;
2009 bool unmap_skb_hdr = false;
a73b796e 2010 struct be_eth_wrb *wrb;
6b7c5b94 2011
ec43b1a6 2012 do {
5f07b3c5
SP
2013 if (sent_skbs[txq->tail]) {
2014 /* Free skb from prev req */
2015 if (skb)
2016 dev_consume_skb_any(skb);
2017 skb = sent_skbs[txq->tail];
2018 sent_skbs[txq->tail] = NULL;
2019 queue_tail_inc(txq); /* skip hdr wrb */
2020 num_wrbs++;
2021 unmap_skb_hdr = true;
2022 }
a73b796e 2023 wrb = queue_tail_node(txq);
5f07b3c5 2024 frag_index = txq->tail;
2b7bcebf 2025 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2026 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2027 unmap_skb_hdr = false;
6b7c5b94 2028 queue_tail_inc(txq);
5f07b3c5
SP
2029 num_wrbs++;
2030 } while (frag_index != last_index);
2031 dev_consume_skb_any(skb);
6b7c5b94 2032
4d586b82 2033 return num_wrbs;
6b7c5b94
SP
2034}
2035
10ef9ab4
SP
2036/* Return the number of events in the event queue */
2037static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2038{
10ef9ab4
SP
2039 struct be_eq_entry *eqe;
2040 int num = 0;
859b1e4e 2041
10ef9ab4
SP
2042 do {
2043 eqe = queue_tail_node(&eqo->q);
2044 if (eqe->evt == 0)
2045 break;
859b1e4e 2046
10ef9ab4
SP
2047 rmb();
2048 eqe->evt = 0;
2049 num++;
2050 queue_tail_inc(&eqo->q);
2051 } while (true);
2052
2053 return num;
859b1e4e
SP
2054}
2055
10ef9ab4
SP
2056/* Leaves the EQ is disarmed state */
2057static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2058{
10ef9ab4 2059 int num = events_get(eqo);
859b1e4e 2060
10ef9ab4 2061 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2062}
2063
10ef9ab4 2064static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2065{
2066 struct be_rx_page_info *page_info;
3abcdeda
SP
2067 struct be_queue_info *rxq = &rxo->q;
2068 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2069 struct be_rx_compl_info *rxcp;
d23e946c
SP
2070 struct be_adapter *adapter = rxo->adapter;
2071 int flush_wait = 0;
6b7c5b94 2072
d23e946c
SP
2073 /* Consume pending rx completions.
2074 * Wait for the flush completion (identified by zero num_rcvd)
2075 * to arrive. Notify CQ even when there are no more CQ entries
2076 * for HW to flush partially coalesced CQ entries.
2077 * In Lancer, there is no need to wait for flush compl.
2078 */
2079 for (;;) {
2080 rxcp = be_rx_compl_get(rxo);
ddf1169f 2081 if (!rxcp) {
d23e946c
SP
2082 if (lancer_chip(adapter))
2083 break;
2084
2085 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2086 dev_warn(&adapter->pdev->dev,
2087 "did not receive flush compl\n");
2088 break;
2089 }
2090 be_cq_notify(adapter, rx_cq->id, true, 0);
2091 mdelay(1);
2092 } else {
2093 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2094 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2095 if (rxcp->num_rcvd == 0)
2096 break;
2097 }
6b7c5b94
SP
2098 }
2099
d23e946c
SP
2100 /* After cleanup, leave the CQ in unarmed state */
2101 be_cq_notify(adapter, rx_cq->id, false, 0);
2102
2103 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2104 while (atomic_read(&rxq->used) > 0) {
2105 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2106 put_page(page_info->page);
2107 memset(page_info, 0, sizeof(*page_info));
2108 }
2109 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2110 rxq->tail = 0;
2111 rxq->head = 0;
6b7c5b94
SP
2112}
2113
0ae57bb3 2114static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2115{
5f07b3c5
SP
2116 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2117 struct device *dev = &adapter->pdev->dev;
0ae57bb3
SP
2118 struct be_tx_obj *txo;
2119 struct be_queue_info *txq;
a8e9179a 2120 struct be_eth_tx_compl *txcp;
0ae57bb3 2121 int i, pending_txqs;
a8e9179a 2122
1a3d0717 2123 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2124 do {
0ae57bb3
SP
2125 pending_txqs = adapter->num_tx_qs;
2126
2127 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2128 cmpl = 0;
2129 num_wrbs = 0;
0ae57bb3
SP
2130 txq = &txo->q;
2131 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2132 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2133 num_wrbs += be_tx_compl_process(adapter, txo,
2134 end_idx);
2135 cmpl++;
2136 }
2137 if (cmpl) {
2138 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2139 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2140 timeo = 0;
0ae57bb3 2141 }
5f07b3c5 2142 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
0ae57bb3 2143 pending_txqs--;
a8e9179a
SP
2144 }
2145
1a3d0717 2146 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2147 break;
2148
2149 mdelay(1);
2150 } while (true);
2151
5f07b3c5 2152 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2153 for_all_tx_queues(adapter, txo, i) {
2154 txq = &txo->q;
0ae57bb3 2155
5f07b3c5
SP
2156 if (atomic_read(&txq->used)) {
2157 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2158 i, atomic_read(&txq->used));
2159 notified_idx = txq->tail;
0ae57bb3 2160 end_idx = txq->tail;
5f07b3c5
SP
2161 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2162 txq->len);
2163 /* Use the tx-compl process logic to handle requests
2164 * that were not sent to the HW.
2165 */
0ae57bb3
SP
2166 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2167 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2168 BUG_ON(atomic_read(&txq->used));
2169 txo->pend_wrb_cnt = 0;
2170 /* Since hw was never notified of these requests,
2171 * reset TXQ indices
2172 */
2173 txq->head = notified_idx;
2174 txq->tail = notified_idx;
0ae57bb3 2175 }
b03388d6 2176 }
6b7c5b94
SP
2177}
2178
10ef9ab4
SP
2179static void be_evt_queues_destroy(struct be_adapter *adapter)
2180{
2181 struct be_eq_obj *eqo;
2182 int i;
2183
2184 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2185 if (eqo->q.created) {
2186 be_eq_clean(eqo);
10ef9ab4 2187 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2188 napi_hash_del(&eqo->napi);
68d7bdcb 2189 netif_napi_del(&eqo->napi);
19d59aa7 2190 }
10ef9ab4
SP
2191 be_queue_free(adapter, &eqo->q);
2192 }
2193}
2194
2195static int be_evt_queues_create(struct be_adapter *adapter)
2196{
2197 struct be_queue_info *eq;
2198 struct be_eq_obj *eqo;
2632bafd 2199 struct be_aic_obj *aic;
10ef9ab4
SP
2200 int i, rc;
2201
92bf14ab
SP
2202 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2203 adapter->cfg_num_qs);
10ef9ab4
SP
2204
2205 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2206 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2207 BE_NAPI_WEIGHT);
6384a4d0 2208 napi_hash_add(&eqo->napi);
2632bafd 2209 aic = &adapter->aic_obj[i];
10ef9ab4 2210 eqo->adapter = adapter;
10ef9ab4 2211 eqo->idx = i;
2632bafd
SP
2212 aic->max_eqd = BE_MAX_EQD;
2213 aic->enable = true;
10ef9ab4
SP
2214
2215 eq = &eqo->q;
2216 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2217 sizeof(struct be_eq_entry));
10ef9ab4
SP
2218 if (rc)
2219 return rc;
2220
f2f781a7 2221 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2222 if (rc)
2223 return rc;
2224 }
1cfafab9 2225 return 0;
10ef9ab4
SP
2226}
2227
5fb379ee
SP
2228static void be_mcc_queues_destroy(struct be_adapter *adapter)
2229{
2230 struct be_queue_info *q;
5fb379ee 2231
8788fdc2 2232 q = &adapter->mcc_obj.q;
5fb379ee 2233 if (q->created)
8788fdc2 2234 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2235 be_queue_free(adapter, q);
2236
8788fdc2 2237 q = &adapter->mcc_obj.cq;
5fb379ee 2238 if (q->created)
8788fdc2 2239 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2240 be_queue_free(adapter, q);
2241}
2242
2243/* Must be called only after TX qs are created as MCC shares TX EQ */
2244static int be_mcc_queues_create(struct be_adapter *adapter)
2245{
2246 struct be_queue_info *q, *cq;
5fb379ee 2247
8788fdc2 2248 cq = &adapter->mcc_obj.cq;
5fb379ee 2249 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2250 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2251 goto err;
2252
10ef9ab4
SP
2253 /* Use the default EQ for MCC completions */
2254 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2255 goto mcc_cq_free;
2256
8788fdc2 2257 q = &adapter->mcc_obj.q;
5fb379ee
SP
2258 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2259 goto mcc_cq_destroy;
2260
8788fdc2 2261 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2262 goto mcc_q_free;
2263
2264 return 0;
2265
2266mcc_q_free:
2267 be_queue_free(adapter, q);
2268mcc_cq_destroy:
8788fdc2 2269 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2270mcc_cq_free:
2271 be_queue_free(adapter, cq);
2272err:
2273 return -1;
2274}
2275
6b7c5b94
SP
2276static void be_tx_queues_destroy(struct be_adapter *adapter)
2277{
2278 struct be_queue_info *q;
3c8def97
SP
2279 struct be_tx_obj *txo;
2280 u8 i;
6b7c5b94 2281
3c8def97
SP
2282 for_all_tx_queues(adapter, txo, i) {
2283 q = &txo->q;
2284 if (q->created)
2285 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2286 be_queue_free(adapter, q);
6b7c5b94 2287
3c8def97
SP
2288 q = &txo->cq;
2289 if (q->created)
2290 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2291 be_queue_free(adapter, q);
2292 }
6b7c5b94
SP
2293}
2294
7707133c 2295static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2296{
10ef9ab4 2297 struct be_queue_info *cq, *eq;
3c8def97 2298 struct be_tx_obj *txo;
92bf14ab 2299 int status, i;
6b7c5b94 2300
92bf14ab 2301 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2302
10ef9ab4
SP
2303 for_all_tx_queues(adapter, txo, i) {
2304 cq = &txo->cq;
2305 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2306 sizeof(struct be_eth_tx_compl));
2307 if (status)
2308 return status;
3c8def97 2309
827da44c
JS
2310 u64_stats_init(&txo->stats.sync);
2311 u64_stats_init(&txo->stats.sync_compl);
2312
10ef9ab4
SP
2313 /* If num_evt_qs is less than num_tx_qs, then more than
2314 * one txq share an eq
2315 */
2316 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2317 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2318 if (status)
2319 return status;
6b7c5b94 2320
10ef9ab4
SP
2321 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2322 sizeof(struct be_eth_wrb));
2323 if (status)
2324 return status;
6b7c5b94 2325
94d73aaa 2326 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2327 if (status)
2328 return status;
3c8def97 2329 }
6b7c5b94 2330
d379142b
SP
2331 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2332 adapter->num_tx_qs);
10ef9ab4 2333 return 0;
6b7c5b94
SP
2334}
2335
10ef9ab4 2336static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2337{
2338 struct be_queue_info *q;
3abcdeda
SP
2339 struct be_rx_obj *rxo;
2340 int i;
2341
2342 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2343 q = &rxo->cq;
2344 if (q->created)
2345 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2346 be_queue_free(adapter, q);
ac6a0c4a
SP
2347 }
2348}
2349
10ef9ab4 2350static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2351{
10ef9ab4 2352 struct be_queue_info *eq, *cq;
3abcdeda
SP
2353 struct be_rx_obj *rxo;
2354 int rc, i;
6b7c5b94 2355
92bf14ab
SP
2356 /* We can create as many RSS rings as there are EQs. */
2357 adapter->num_rx_qs = adapter->num_evt_qs;
2358
2359 /* We'll use RSS only if atleast 2 RSS rings are supported.
2360 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2361 */
92bf14ab
SP
2362 if (adapter->num_rx_qs > 1)
2363 adapter->num_rx_qs++;
2364
6b7c5b94 2365 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2366 for_all_rx_queues(adapter, rxo, i) {
2367 rxo->adapter = adapter;
3abcdeda
SP
2368 cq = &rxo->cq;
2369 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2370 sizeof(struct be_eth_rx_compl));
3abcdeda 2371 if (rc)
10ef9ab4 2372 return rc;
3abcdeda 2373
827da44c 2374 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2375 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2376 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2377 if (rc)
10ef9ab4 2378 return rc;
3abcdeda 2379 }
6b7c5b94 2380
d379142b
SP
2381 dev_info(&adapter->pdev->dev,
2382 "created %d RSS queue(s) and 1 default RX queue\n",
2383 adapter->num_rx_qs - 1);
10ef9ab4 2384 return 0;
b628bde2
SP
2385}
2386
6b7c5b94
SP
2387static irqreturn_t be_intx(int irq, void *dev)
2388{
e49cc34f
SP
2389 struct be_eq_obj *eqo = dev;
2390 struct be_adapter *adapter = eqo->adapter;
2391 int num_evts = 0;
6b7c5b94 2392
d0b9cec3
SP
2393 /* IRQ is not expected when NAPI is scheduled as the EQ
2394 * will not be armed.
2395 * But, this can happen on Lancer INTx where it takes
2396 * a while to de-assert INTx or in BE2 where occasionaly
2397 * an interrupt may be raised even when EQ is unarmed.
2398 * If NAPI is already scheduled, then counting & notifying
2399 * events will orphan them.
e49cc34f 2400 */
d0b9cec3 2401 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2402 num_evts = events_get(eqo);
d0b9cec3
SP
2403 __napi_schedule(&eqo->napi);
2404 if (num_evts)
2405 eqo->spurious_intr = 0;
2406 }
2407 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2408
d0b9cec3
SP
2409 /* Return IRQ_HANDLED only for the the first spurious intr
2410 * after a valid intr to stop the kernel from branding
2411 * this irq as a bad one!
e49cc34f 2412 */
d0b9cec3
SP
2413 if (num_evts || eqo->spurious_intr++ == 0)
2414 return IRQ_HANDLED;
2415 else
2416 return IRQ_NONE;
6b7c5b94
SP
2417}
2418
10ef9ab4 2419static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2420{
10ef9ab4 2421 struct be_eq_obj *eqo = dev;
6b7c5b94 2422
0b545a62
SP
2423 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2424 napi_schedule(&eqo->napi);
6b7c5b94
SP
2425 return IRQ_HANDLED;
2426}
2427
2e588f84 2428static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2429{
e38b1706 2430 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2431}
2432
10ef9ab4 2433static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2434 int budget, int polling)
6b7c5b94 2435{
3abcdeda
SP
2436 struct be_adapter *adapter = rxo->adapter;
2437 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2438 struct be_rx_compl_info *rxcp;
6b7c5b94 2439 u32 work_done;
c30d7266 2440 u32 frags_consumed = 0;
6b7c5b94
SP
2441
2442 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2443 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2444 if (!rxcp)
2445 break;
2446
12004ae9
SP
2447 /* Is it a flush compl that has no data */
2448 if (unlikely(rxcp->num_rcvd == 0))
2449 goto loop_continue;
2450
2451 /* Discard compl with partial DMA Lancer B0 */
2452 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2453 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2454 goto loop_continue;
2455 }
2456
2457 /* On BE drop pkts that arrive due to imperfect filtering in
2458 * promiscuous mode on some skews
2459 */
2460 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2461 !lancer_chip(adapter))) {
10ef9ab4 2462 be_rx_compl_discard(rxo, rxcp);
12004ae9 2463 goto loop_continue;
64642811 2464 }
009dd872 2465
6384a4d0
SP
2466 /* Don't do gro when we're busy_polling */
2467 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2468 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2469 else
6384a4d0
SP
2470 be_rx_compl_process(rxo, napi, rxcp);
2471
12004ae9 2472loop_continue:
c30d7266 2473 frags_consumed += rxcp->num_rcvd;
2e588f84 2474 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2475 }
2476
10ef9ab4
SP
2477 if (work_done) {
2478 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2479
6384a4d0
SP
2480 /* When an rx-obj gets into post_starved state, just
2481 * let be_worker do the posting.
2482 */
2483 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2484 !rxo->rx_post_starved)
c30d7266
AK
2485 be_post_rx_frags(rxo, GFP_ATOMIC,
2486 max_t(u32, MAX_RX_POST,
2487 frags_consumed));
6b7c5b94 2488 }
10ef9ab4 2489
6b7c5b94
SP
2490 return work_done;
2491}
2492
512bb8a2
KA
2493static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2494{
2495 switch (status) {
2496 case BE_TX_COMP_HDR_PARSE_ERR:
2497 tx_stats(txo)->tx_hdr_parse_err++;
2498 break;
2499 case BE_TX_COMP_NDMA_ERR:
2500 tx_stats(txo)->tx_dma_err++;
2501 break;
2502 case BE_TX_COMP_ACL_ERR:
2503 tx_stats(txo)->tx_spoof_check_err++;
2504 break;
2505 }
2506}
2507
2508static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2509{
2510 switch (status) {
2511 case LANCER_TX_COMP_LSO_ERR:
2512 tx_stats(txo)->tx_tso_err++;
2513 break;
2514 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2515 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2516 tx_stats(txo)->tx_spoof_check_err++;
2517 break;
2518 case LANCER_TX_COMP_QINQ_ERR:
2519 tx_stats(txo)->tx_qinq_err++;
2520 break;
2521 case LANCER_TX_COMP_PARITY_ERR:
2522 tx_stats(txo)->tx_internal_parity_err++;
2523 break;
2524 case LANCER_TX_COMP_DMA_ERR:
2525 tx_stats(txo)->tx_dma_err++;
2526 break;
2527 }
2528}
2529
c8f64615
SP
2530static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2531 int idx)
6b7c5b94 2532{
6b7c5b94 2533 struct be_eth_tx_compl *txcp;
c8f64615 2534 int num_wrbs = 0, work_done = 0;
512bb8a2 2535 u32 compl_status;
c8f64615
SP
2536 u16 last_idx;
2537
2538 while ((txcp = be_tx_compl_get(&txo->cq))) {
2539 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2540 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2541 work_done++;
3c8def97 2542
512bb8a2
KA
2543 compl_status = GET_TX_COMPL_BITS(status, txcp);
2544 if (compl_status) {
2545 if (lancer_chip(adapter))
2546 lancer_update_tx_err(txo, compl_status);
2547 else
2548 be_update_tx_err(txo, compl_status);
2549 }
10ef9ab4 2550 }
6b7c5b94 2551
10ef9ab4
SP
2552 if (work_done) {
2553 be_cq_notify(adapter, txo->cq.id, true, work_done);
2554 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2555
10ef9ab4
SP
2556 /* As Tx wrbs have been freed up, wake up netdev queue
2557 * if it was stopped due to lack of tx wrbs. */
2558 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2559 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2560 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2561 }
10ef9ab4
SP
2562
2563 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2564 tx_stats(txo)->tx_compl += work_done;
2565 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2566 }
10ef9ab4 2567}
6b7c5b94 2568
f7062ee5
SP
2569#ifdef CONFIG_NET_RX_BUSY_POLL
2570static inline bool be_lock_napi(struct be_eq_obj *eqo)
2571{
2572 bool status = true;
2573
2574 spin_lock(&eqo->lock); /* BH is already disabled */
2575 if (eqo->state & BE_EQ_LOCKED) {
2576 WARN_ON(eqo->state & BE_EQ_NAPI);
2577 eqo->state |= BE_EQ_NAPI_YIELD;
2578 status = false;
2579 } else {
2580 eqo->state = BE_EQ_NAPI;
2581 }
2582 spin_unlock(&eqo->lock);
2583 return status;
2584}
2585
2586static inline void be_unlock_napi(struct be_eq_obj *eqo)
2587{
2588 spin_lock(&eqo->lock); /* BH is already disabled */
2589
2590 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2591 eqo->state = BE_EQ_IDLE;
2592
2593 spin_unlock(&eqo->lock);
2594}
2595
2596static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2597{
2598 bool status = true;
2599
2600 spin_lock_bh(&eqo->lock);
2601 if (eqo->state & BE_EQ_LOCKED) {
2602 eqo->state |= BE_EQ_POLL_YIELD;
2603 status = false;
2604 } else {
2605 eqo->state |= BE_EQ_POLL;
2606 }
2607 spin_unlock_bh(&eqo->lock);
2608 return status;
2609}
2610
2611static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2612{
2613 spin_lock_bh(&eqo->lock);
2614
2615 WARN_ON(eqo->state & (BE_EQ_NAPI));
2616 eqo->state = BE_EQ_IDLE;
2617
2618 spin_unlock_bh(&eqo->lock);
2619}
2620
2621static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2622{
2623 spin_lock_init(&eqo->lock);
2624 eqo->state = BE_EQ_IDLE;
2625}
2626
2627static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2628{
2629 local_bh_disable();
2630
2631 /* It's enough to just acquire napi lock on the eqo to stop
2632 * be_busy_poll() from processing any queueus.
2633 */
2634 while (!be_lock_napi(eqo))
2635 mdelay(1);
2636
2637 local_bh_enable();
2638}
2639
2640#else /* CONFIG_NET_RX_BUSY_POLL */
2641
2642static inline bool be_lock_napi(struct be_eq_obj *eqo)
2643{
2644 return true;
2645}
2646
2647static inline void be_unlock_napi(struct be_eq_obj *eqo)
2648{
2649}
2650
2651static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2652{
2653 return false;
2654}
2655
2656static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2657{
2658}
2659
2660static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2661{
2662}
2663
2664static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2665{
2666}
2667#endif /* CONFIG_NET_RX_BUSY_POLL */
2668
68d7bdcb 2669int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2670{
2671 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2672 struct be_adapter *adapter = eqo->adapter;
0b545a62 2673 int max_work = 0, work, i, num_evts;
6384a4d0 2674 struct be_rx_obj *rxo;
a4906ea0 2675 struct be_tx_obj *txo;
f31e50a8 2676
0b545a62
SP
2677 num_evts = events_get(eqo);
2678
a4906ea0
SP
2679 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2680 be_process_tx(adapter, txo, i);
f31e50a8 2681
6384a4d0
SP
2682 if (be_lock_napi(eqo)) {
2683 /* This loop will iterate twice for EQ0 in which
2684 * completions of the last RXQ (default one) are also processed
2685 * For other EQs the loop iterates only once
2686 */
2687 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2688 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2689 max_work = max(work, max_work);
2690 }
2691 be_unlock_napi(eqo);
2692 } else {
2693 max_work = budget;
10ef9ab4 2694 }
6b7c5b94 2695
10ef9ab4
SP
2696 if (is_mcc_eqo(eqo))
2697 be_process_mcc(adapter);
93c86700 2698
10ef9ab4
SP
2699 if (max_work < budget) {
2700 napi_complete(napi);
0b545a62 2701 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2702 } else {
2703 /* As we'll continue in polling mode, count and clear events */
0b545a62 2704 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2705 }
10ef9ab4 2706 return max_work;
6b7c5b94
SP
2707}
2708
6384a4d0
SP
2709#ifdef CONFIG_NET_RX_BUSY_POLL
2710static int be_busy_poll(struct napi_struct *napi)
2711{
2712 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2713 struct be_adapter *adapter = eqo->adapter;
2714 struct be_rx_obj *rxo;
2715 int i, work = 0;
2716
2717 if (!be_lock_busy_poll(eqo))
2718 return LL_FLUSH_BUSY;
2719
2720 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2721 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2722 if (work)
2723 break;
2724 }
2725
2726 be_unlock_busy_poll(eqo);
2727 return work;
2728}
2729#endif
2730
f67ef7ba 2731void be_detect_error(struct be_adapter *adapter)
7c185276 2732{
e1cfb67a
PR
2733 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2734 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2735 u32 i;
eb0eecc1
SK
2736 bool error_detected = false;
2737 struct device *dev = &adapter->pdev->dev;
2738 struct net_device *netdev = adapter->netdev;
7c185276 2739
d23e946c 2740 if (be_hw_error(adapter))
72f02485
SP
2741 return;
2742
e1cfb67a
PR
2743 if (lancer_chip(adapter)) {
2744 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2745 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2746 sliport_err1 = ioread32(adapter->db +
748b539a 2747 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2748 sliport_err2 = ioread32(adapter->db +
748b539a 2749 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2750 adapter->hw_error = true;
2751 /* Do not log error messages if its a FW reset */
2752 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2753 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2754 dev_info(dev, "Firmware update in progress\n");
2755 } else {
2756 error_detected = true;
2757 dev_err(dev, "Error detected in the card\n");
2758 dev_err(dev, "ERR: sliport status 0x%x\n",
2759 sliport_status);
2760 dev_err(dev, "ERR: sliport error1 0x%x\n",
2761 sliport_err1);
2762 dev_err(dev, "ERR: sliport error2 0x%x\n",
2763 sliport_err2);
2764 }
e1cfb67a
PR
2765 }
2766 } else {
2767 pci_read_config_dword(adapter->pdev,
748b539a 2768 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2769 pci_read_config_dword(adapter->pdev,
748b539a 2770 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2771 pci_read_config_dword(adapter->pdev,
748b539a 2772 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2773 pci_read_config_dword(adapter->pdev,
748b539a 2774 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2775
f67ef7ba
PR
2776 ue_lo = (ue_lo & ~ue_lo_mask);
2777 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2778
eb0eecc1
SK
2779 /* On certain platforms BE hardware can indicate spurious UEs.
2780 * Allow HW to stop working completely in case of a real UE.
2781 * Hence not setting the hw_error for UE detection.
2782 */
f67ef7ba 2783
eb0eecc1
SK
2784 if (ue_lo || ue_hi) {
2785 error_detected = true;
2786 dev_err(dev,
2787 "Unrecoverable Error detected in the adapter");
2788 dev_err(dev, "Please reboot server to recover");
2789 if (skyhawk_chip(adapter))
2790 adapter->hw_error = true;
2791 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2792 if (ue_lo & 1)
2793 dev_err(dev, "UE: %s bit set\n",
2794 ue_status_low_desc[i]);
2795 }
2796 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2797 if (ue_hi & 1)
2798 dev_err(dev, "UE: %s bit set\n",
2799 ue_status_hi_desc[i]);
2800 }
7c185276
AK
2801 }
2802 }
eb0eecc1
SK
2803 if (error_detected)
2804 netif_carrier_off(netdev);
7c185276
AK
2805}
2806
8d56ff11
SP
2807static void be_msix_disable(struct be_adapter *adapter)
2808{
ac6a0c4a 2809 if (msix_enabled(adapter)) {
8d56ff11 2810 pci_disable_msix(adapter->pdev);
ac6a0c4a 2811 adapter->num_msix_vec = 0;
68d7bdcb 2812 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2813 }
2814}
2815
c2bba3df 2816static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2817{
7dc4c064 2818 int i, num_vec;
d379142b 2819 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2820
92bf14ab
SP
2821 /* If RoCE is supported, program the max number of NIC vectors that
2822 * may be configured via set-channels, along with vectors needed for
2823 * RoCe. Else, just program the number we'll use initially.
2824 */
2825 if (be_roce_supported(adapter))
2826 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2827 2 * num_online_cpus());
2828 else
2829 num_vec = adapter->cfg_num_qs;
3abcdeda 2830
ac6a0c4a 2831 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2832 adapter->msix_entries[i].entry = i;
2833
7dc4c064
AG
2834 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2835 MIN_MSIX_VECTORS, num_vec);
2836 if (num_vec < 0)
2837 goto fail;
92bf14ab 2838
92bf14ab
SP
2839 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2840 adapter->num_msix_roce_vec = num_vec / 2;
2841 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2842 adapter->num_msix_roce_vec);
2843 }
2844
2845 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2846
2847 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2848 adapter->num_msix_vec);
c2bba3df 2849 return 0;
7dc4c064
AG
2850
2851fail:
2852 dev_warn(dev, "MSIx enable failed\n");
2853
2854 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2855 if (!be_physfn(adapter))
2856 return num_vec;
2857 return 0;
6b7c5b94
SP
2858}
2859
fe6d2a38 2860static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2861 struct be_eq_obj *eqo)
b628bde2 2862{
f2f781a7 2863 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2864}
6b7c5b94 2865
b628bde2
SP
2866static int be_msix_register(struct be_adapter *adapter)
2867{
10ef9ab4
SP
2868 struct net_device *netdev = adapter->netdev;
2869 struct be_eq_obj *eqo;
2870 int status, i, vec;
6b7c5b94 2871
10ef9ab4
SP
2872 for_all_evt_queues(adapter, eqo, i) {
2873 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2874 vec = be_msix_vec_get(adapter, eqo);
2875 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2876 if (status)
2877 goto err_msix;
2878 }
b628bde2 2879
6b7c5b94 2880 return 0;
3abcdeda 2881err_msix:
10ef9ab4
SP
2882 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2883 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2884 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2885 status);
ac6a0c4a 2886 be_msix_disable(adapter);
6b7c5b94
SP
2887 return status;
2888}
2889
2890static int be_irq_register(struct be_adapter *adapter)
2891{
2892 struct net_device *netdev = adapter->netdev;
2893 int status;
2894
ac6a0c4a 2895 if (msix_enabled(adapter)) {
6b7c5b94
SP
2896 status = be_msix_register(adapter);
2897 if (status == 0)
2898 goto done;
ba343c77
SB
2899 /* INTx is not supported for VF */
2900 if (!be_physfn(adapter))
2901 return status;
6b7c5b94
SP
2902 }
2903
e49cc34f 2904 /* INTx: only the first EQ is used */
6b7c5b94
SP
2905 netdev->irq = adapter->pdev->irq;
2906 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2907 &adapter->eq_obj[0]);
6b7c5b94
SP
2908 if (status) {
2909 dev_err(&adapter->pdev->dev,
2910 "INTx request IRQ failed - err %d\n", status);
2911 return status;
2912 }
2913done:
2914 adapter->isr_registered = true;
2915 return 0;
2916}
2917
2918static void be_irq_unregister(struct be_adapter *adapter)
2919{
2920 struct net_device *netdev = adapter->netdev;
10ef9ab4 2921 struct be_eq_obj *eqo;
3abcdeda 2922 int i;
6b7c5b94
SP
2923
2924 if (!adapter->isr_registered)
2925 return;
2926
2927 /* INTx */
ac6a0c4a 2928 if (!msix_enabled(adapter)) {
e49cc34f 2929 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2930 goto done;
2931 }
2932
2933 /* MSIx */
10ef9ab4
SP
2934 for_all_evt_queues(adapter, eqo, i)
2935 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2936
6b7c5b94
SP
2937done:
2938 adapter->isr_registered = false;
6b7c5b94
SP
2939}
2940
10ef9ab4 2941static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2942{
2943 struct be_queue_info *q;
2944 struct be_rx_obj *rxo;
2945 int i;
2946
2947 for_all_rx_queues(adapter, rxo, i) {
2948 q = &rxo->q;
2949 if (q->created) {
2950 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2951 be_rx_cq_clean(rxo);
482c9e79 2952 }
10ef9ab4 2953 be_queue_free(adapter, q);
482c9e79
SP
2954 }
2955}
2956
889cd4b2
SP
2957static int be_close(struct net_device *netdev)
2958{
2959 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2960 struct be_eq_obj *eqo;
2961 int i;
889cd4b2 2962
e1ad8e33
KA
2963 /* This protection is needed as be_close() may be called even when the
2964 * adapter is in cleared state (after eeh perm failure)
2965 */
2966 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2967 return 0;
2968
045508a8
PP
2969 be_roce_dev_close(adapter);
2970
dff345c5
IV
2971 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2972 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2973 napi_disable(&eqo->napi);
6384a4d0
SP
2974 be_disable_busy_poll(eqo);
2975 }
71237b6f 2976 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2977 }
a323d9bf
SP
2978
2979 be_async_mcc_disable(adapter);
2980
2981 /* Wait for all pending tx completions to arrive so that
2982 * all tx skbs are freed.
2983 */
fba87559 2984 netif_tx_disable(netdev);
6e1f9975 2985 be_tx_compl_clean(adapter);
a323d9bf
SP
2986
2987 be_rx_qs_destroy(adapter);
f66b7cfd 2988 be_clear_uc_list(adapter);
d11a347d 2989
a323d9bf 2990 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2991 if (msix_enabled(adapter))
2992 synchronize_irq(be_msix_vec_get(adapter, eqo));
2993 else
2994 synchronize_irq(netdev->irq);
2995 be_eq_clean(eqo);
63fcb27f
PR
2996 }
2997
889cd4b2
SP
2998 be_irq_unregister(adapter);
2999
482c9e79
SP
3000 return 0;
3001}
3002
10ef9ab4 3003static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3004{
1dcf7b1c
ED
3005 struct rss_info *rss = &adapter->rss_info;
3006 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3007 struct be_rx_obj *rxo;
e9008ee9 3008 int rc, i, j;
482c9e79
SP
3009
3010 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3011 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3012 sizeof(struct be_eth_rx_d));
3013 if (rc)
3014 return rc;
3015 }
3016
3017 /* The FW would like the default RXQ to be created first */
3018 rxo = default_rxo(adapter);
3019 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3020 adapter->if_handle, false, &rxo->rss_id);
3021 if (rc)
3022 return rc;
3023
3024 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3025 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3026 rx_frag_size, adapter->if_handle,
3027 true, &rxo->rss_id);
482c9e79
SP
3028 if (rc)
3029 return rc;
3030 }
3031
3032 if (be_multi_rxq(adapter)) {
e2557877
VD
3033 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3034 j += adapter->num_rx_qs - 1) {
e9008ee9 3035 for_all_rss_queues(adapter, rxo, i) {
e2557877 3036 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3037 break;
e2557877
VD
3038 rss->rsstable[j + i] = rxo->rss_id;
3039 rss->rss_queue[j + i] = i;
e9008ee9
PR
3040 }
3041 }
e2557877
VD
3042 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3043 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3044
3045 if (!BEx_chip(adapter))
e2557877
VD
3046 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3047 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3048 } else {
3049 /* Disable RSS, if only default RX Q is created */
e2557877 3050 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3051 }
594ad54a 3052
1dcf7b1c 3053 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3054 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3055 128, rss_key);
da1388d6 3056 if (rc) {
e2557877 3057 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3058 return rc;
482c9e79
SP
3059 }
3060
1dcf7b1c 3061 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3062
482c9e79 3063 /* First time posting */
10ef9ab4 3064 for_all_rx_queues(adapter, rxo, i)
c30d7266 3065 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3066 return 0;
3067}
3068
6b7c5b94
SP
3069static int be_open(struct net_device *netdev)
3070{
3071 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3072 struct be_eq_obj *eqo;
3abcdeda 3073 struct be_rx_obj *rxo;
10ef9ab4 3074 struct be_tx_obj *txo;
b236916a 3075 u8 link_status;
3abcdeda 3076 int status, i;
5fb379ee 3077
10ef9ab4 3078 status = be_rx_qs_create(adapter);
482c9e79
SP
3079 if (status)
3080 goto err;
3081
c2bba3df
SK
3082 status = be_irq_register(adapter);
3083 if (status)
3084 goto err;
5fb379ee 3085
10ef9ab4 3086 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3087 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3088
10ef9ab4
SP
3089 for_all_tx_queues(adapter, txo, i)
3090 be_cq_notify(adapter, txo->cq.id, true, 0);
3091
7a1e9b20
SP
3092 be_async_mcc_enable(adapter);
3093
10ef9ab4
SP
3094 for_all_evt_queues(adapter, eqo, i) {
3095 napi_enable(&eqo->napi);
6384a4d0 3096 be_enable_busy_poll(eqo);
4cad9f3b 3097 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3098 }
04d3d624 3099 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3100
323ff71e 3101 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3102 if (!status)
3103 be_link_status_update(adapter, link_status);
3104
fba87559 3105 netif_tx_start_all_queues(netdev);
045508a8 3106 be_roce_dev_open(adapter);
c9c47142 3107
c5abe7c0 3108#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3109 if (skyhawk_chip(adapter))
3110 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3111#endif
3112
889cd4b2
SP
3113 return 0;
3114err:
3115 be_close(adapter->netdev);
3116 return -EIO;
5fb379ee
SP
3117}
3118
71d8d1b5
AK
3119static int be_setup_wol(struct be_adapter *adapter, bool enable)
3120{
3121 struct be_dma_mem cmd;
3122 int status = 0;
3123 u8 mac[ETH_ALEN];
3124
3125 memset(mac, 0, ETH_ALEN);
3126
3127 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3128 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3129 GFP_KERNEL);
ddf1169f 3130 if (!cmd.va)
6b568689 3131 return -ENOMEM;
71d8d1b5
AK
3132
3133 if (enable) {
3134 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3135 PCICFG_PM_CONTROL_OFFSET,
3136 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3137 if (status) {
3138 dev_err(&adapter->pdev->dev,
2381a55c 3139 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3140 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3141 cmd.dma);
71d8d1b5
AK
3142 return status;
3143 }
3144 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3145 adapter->netdev->dev_addr,
3146 &cmd);
71d8d1b5
AK
3147 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3148 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3149 } else {
3150 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3151 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3152 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3153 }
3154
2b7bcebf 3155 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3156 return status;
3157}
3158
f7062ee5
SP
3159static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3160{
3161 u32 addr;
3162
3163 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3164
3165 mac[5] = (u8)(addr & 0xFF);
3166 mac[4] = (u8)((addr >> 8) & 0xFF);
3167 mac[3] = (u8)((addr >> 16) & 0xFF);
3168 /* Use the OUI from the current MAC address */
3169 memcpy(mac, adapter->netdev->dev_addr, 3);
3170}
3171
6d87f5c3
AK
3172/*
3173 * Generate a seed MAC address from the PF MAC Address using jhash.
3174 * MAC Address for VFs are assigned incrementally starting from the seed.
3175 * These addresses are programmed in the ASIC by the PF and the VF driver
3176 * queries for the MAC address during its probe.
3177 */
4c876616 3178static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3179{
f9449ab7 3180 u32 vf;
3abcdeda 3181 int status = 0;
6d87f5c3 3182 u8 mac[ETH_ALEN];
11ac75ed 3183 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3184
3185 be_vf_eth_addr_generate(adapter, mac);
3186
11ac75ed 3187 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3188 if (BEx_chip(adapter))
590c391d 3189 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3190 vf_cfg->if_handle,
3191 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3192 else
3193 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3194 vf + 1);
590c391d 3195
6d87f5c3
AK
3196 if (status)
3197 dev_err(&adapter->pdev->dev,
748b539a
SP
3198 "Mac address assignment failed for VF %d\n",
3199 vf);
6d87f5c3 3200 else
11ac75ed 3201 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3202
3203 mac[5] += 1;
3204 }
3205 return status;
3206}
3207
4c876616
SP
3208static int be_vfs_mac_query(struct be_adapter *adapter)
3209{
3210 int status, vf;
3211 u8 mac[ETH_ALEN];
3212 struct be_vf_cfg *vf_cfg;
4c876616
SP
3213
3214 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3215 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3216 mac, vf_cfg->if_handle,
3217 false, vf+1);
4c876616
SP
3218 if (status)
3219 return status;
3220 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3221 }
3222 return 0;
3223}
3224
f9449ab7 3225static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3226{
11ac75ed 3227 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3228 u32 vf;
3229
257a3feb 3230 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3231 dev_warn(&adapter->pdev->dev,
3232 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3233 goto done;
3234 }
3235
b4c1df93
SP
3236 pci_disable_sriov(adapter->pdev);
3237
11ac75ed 3238 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3239 if (BEx_chip(adapter))
11ac75ed
SP
3240 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3241 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3242 else
3243 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3244 vf + 1);
f9449ab7 3245
11ac75ed
SP
3246 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3247 }
39f1d94d
SP
3248done:
3249 kfree(adapter->vf_cfg);
3250 adapter->num_vfs = 0;
f174c7ec 3251 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3252}
3253
7707133c
SP
3254static void be_clear_queues(struct be_adapter *adapter)
3255{
3256 be_mcc_queues_destroy(adapter);
3257 be_rx_cqs_destroy(adapter);
3258 be_tx_queues_destroy(adapter);
3259 be_evt_queues_destroy(adapter);
3260}
3261
68d7bdcb 3262static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3263{
191eb756
SP
3264 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3265 cancel_delayed_work_sync(&adapter->work);
3266 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3267 }
68d7bdcb
SP
3268}
3269
b05004ad 3270static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3271{
b05004ad 3272 if (adapter->pmac_id) {
f66b7cfd
SP
3273 be_cmd_pmac_del(adapter, adapter->if_handle,
3274 adapter->pmac_id[0], 0);
b05004ad
SK
3275 kfree(adapter->pmac_id);
3276 adapter->pmac_id = NULL;
3277 }
3278}
3279
c5abe7c0 3280#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3281static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3282{
630f4b70
SB
3283 struct net_device *netdev = adapter->netdev;
3284
c9c47142
SP
3285 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3286 be_cmd_manage_iface(adapter, adapter->if_handle,
3287 OP_CONVERT_TUNNEL_TO_NORMAL);
3288
3289 if (adapter->vxlan_port)
3290 be_cmd_set_vxlan_port(adapter, 0);
3291
3292 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3293 adapter->vxlan_port = 0;
630f4b70
SB
3294
3295 netdev->hw_enc_features = 0;
3296 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3297 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3298}
c5abe7c0 3299#endif
c9c47142 3300
b05004ad
SK
3301static int be_clear(struct be_adapter *adapter)
3302{
68d7bdcb 3303 be_cancel_worker(adapter);
191eb756 3304
11ac75ed 3305 if (sriov_enabled(adapter))
f9449ab7
SP
3306 be_vf_clear(adapter);
3307
bec84e6b
VV
3308 /* Re-configure FW to distribute resources evenly across max-supported
3309 * number of VFs, only when VFs are not already enabled.
3310 */
3311 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3312 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3313 pci_sriov_get_totalvfs(adapter->pdev));
3314
c5abe7c0 3315#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3316 be_disable_vxlan_offloads(adapter);
c5abe7c0 3317#endif
2d17f403 3318 /* delete the primary mac along with the uc-mac list */
b05004ad 3319 be_mac_clear(adapter);
fbc13f01 3320
f9449ab7 3321 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3322
7707133c 3323 be_clear_queues(adapter);
a54769f5 3324
10ef9ab4 3325 be_msix_disable(adapter);
e1ad8e33 3326 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3327 return 0;
3328}
3329
0700d816
KA
3330static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3331 u32 cap_flags, u32 vf)
3332{
3333 u32 en_flags;
3334 int status;
3335
3336 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3337 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3338 BE_IF_FLAGS_RSS;
3339
3340 en_flags &= cap_flags;
3341
3342 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3343 if_handle, vf);
3344
3345 return status;
3346}
3347
4c876616 3348static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3349{
92bf14ab 3350 struct be_resources res = {0};
4c876616 3351 struct be_vf_cfg *vf_cfg;
0700d816
KA
3352 u32 cap_flags, vf;
3353 int status;
abb93951 3354
0700d816 3355 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3356 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3357 BE_IF_FLAGS_MULTICAST;
abb93951 3358
4c876616 3359 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3360 if (!BE3_chip(adapter)) {
3361 status = be_cmd_get_profile_config(adapter, &res,
3362 vf + 1);
3363 if (!status)
3364 cap_flags = res.if_cap_flags;
3365 }
4c876616 3366
0700d816
KA
3367 status = be_if_create(adapter, &vf_cfg->if_handle,
3368 cap_flags, vf + 1);
4c876616 3369 if (status)
0700d816 3370 return status;
4c876616 3371 }
0700d816
KA
3372
3373 return 0;
abb93951
PR
3374}
3375
39f1d94d 3376static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3377{
11ac75ed 3378 struct be_vf_cfg *vf_cfg;
30128031
SP
3379 int vf;
3380
39f1d94d
SP
3381 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3382 GFP_KERNEL);
3383 if (!adapter->vf_cfg)
3384 return -ENOMEM;
3385
11ac75ed
SP
3386 for_all_vfs(adapter, vf_cfg, vf) {
3387 vf_cfg->if_handle = -1;
3388 vf_cfg->pmac_id = -1;
30128031 3389 }
39f1d94d 3390 return 0;
30128031
SP
3391}
3392
f9449ab7
SP
3393static int be_vf_setup(struct be_adapter *adapter)
3394{
c502224e 3395 struct device *dev = &adapter->pdev->dev;
11ac75ed 3396 struct be_vf_cfg *vf_cfg;
4c876616 3397 int status, old_vfs, vf;
04a06028 3398 u32 privileges;
39f1d94d 3399
257a3feb 3400 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3401
3402 status = be_vf_setup_init(adapter);
3403 if (status)
3404 goto err;
30128031 3405
4c876616
SP
3406 if (old_vfs) {
3407 for_all_vfs(adapter, vf_cfg, vf) {
3408 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3409 if (status)
3410 goto err;
3411 }
f9449ab7 3412
4c876616
SP
3413 status = be_vfs_mac_query(adapter);
3414 if (status)
3415 goto err;
3416 } else {
bec84e6b
VV
3417 status = be_vfs_if_create(adapter);
3418 if (status)
3419 goto err;
3420
39f1d94d
SP
3421 status = be_vf_eth_addr_config(adapter);
3422 if (status)
3423 goto err;
3424 }
f9449ab7 3425
11ac75ed 3426 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3427 /* Allow VFs to programs MAC/VLAN filters */
3428 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3429 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3430 status = be_cmd_set_fn_privileges(adapter,
3431 privileges |
3432 BE_PRIV_FILTMGMT,
3433 vf + 1);
3434 if (!status)
3435 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3436 vf);
3437 }
3438
0f77ba73
RN
3439 /* Allow full available bandwidth */
3440 if (!old_vfs)
3441 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3442
bdce2ad7 3443 if (!old_vfs) {
0599863d 3444 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3445 be_cmd_set_logical_link_config(adapter,
3446 IFLA_VF_LINK_STATE_AUTO,
3447 vf+1);
3448 }
f9449ab7 3449 }
b4c1df93
SP
3450
3451 if (!old_vfs) {
3452 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3453 if (status) {
3454 dev_err(dev, "SRIOV enable failed\n");
3455 adapter->num_vfs = 0;
3456 goto err;
3457 }
3458 }
f174c7ec
VV
3459
3460 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3461 return 0;
3462err:
4c876616
SP
3463 dev_err(dev, "VF setup failed\n");
3464 be_vf_clear(adapter);
f9449ab7
SP
3465 return status;
3466}
3467
f93f160b
VV
3468/* Converting function_mode bits on BE3 to SH mc_type enums */
3469
3470static u8 be_convert_mc_type(u32 function_mode)
3471{
66064dbc 3472 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3473 return vNIC1;
66064dbc 3474 else if (function_mode & QNQ_MODE)
f93f160b
VV
3475 return FLEX10;
3476 else if (function_mode & VNIC_MODE)
3477 return vNIC2;
3478 else if (function_mode & UMC_ENABLED)
3479 return UMC;
3480 else
3481 return MC_NONE;
3482}
3483
92bf14ab
SP
3484/* On BE2/BE3 FW does not suggest the supported limits */
3485static void BEx_get_resources(struct be_adapter *adapter,
3486 struct be_resources *res)
3487{
bec84e6b 3488 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3489
3490 if (be_physfn(adapter))
3491 res->max_uc_mac = BE_UC_PMAC_COUNT;
3492 else
3493 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3494
f93f160b
VV
3495 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3496
3497 if (be_is_mc(adapter)) {
3498 /* Assuming that there are 4 channels per port,
3499 * when multi-channel is enabled
3500 */
3501 if (be_is_qnq_mode(adapter))
3502 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3503 else
3504 /* In a non-qnq multichannel mode, the pvid
3505 * takes up one vlan entry
3506 */
3507 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3508 } else {
92bf14ab 3509 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3510 }
3511
92bf14ab
SP
3512 res->max_mcast_mac = BE_MAX_MC;
3513
a5243dab
VV
3514 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3515 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3516 * *only* if it is RSS-capable.
3517 */
3518 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3519 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3520 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3521 res->max_tx_qs = 1;
a28277dc
SR
3522 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3523 struct be_resources super_nic_res = {0};
3524
3525 /* On a SuperNIC profile, the driver needs to use the
3526 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3527 */
3528 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3529 /* Some old versions of BE3 FW don't report max_tx_qs value */
3530 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3531 } else {
92bf14ab 3532 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3533 }
92bf14ab
SP
3534
3535 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3536 !use_sriov && be_physfn(adapter))
3537 res->max_rss_qs = (adapter->be3_native) ?
3538 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3539 res->max_rx_qs = res->max_rss_qs + 1;
3540
e3dc867c 3541 if (be_physfn(adapter))
d3518e21 3542 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3543 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3544 else
3545 res->max_evt_qs = 1;
92bf14ab
SP
3546
3547 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3548 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3549 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3550}
3551
30128031
SP
3552static void be_setup_init(struct be_adapter *adapter)
3553{
3554 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3555 adapter->phy.link_speed = -1;
30128031
SP
3556 adapter->if_handle = -1;
3557 adapter->be3_native = false;
f66b7cfd 3558 adapter->if_flags = 0;
f25b119c
PR
3559 if (be_physfn(adapter))
3560 adapter->cmd_privileges = MAX_PRIVILEGES;
3561 else
3562 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3563}
3564
bec84e6b
VV
3565static int be_get_sriov_config(struct be_adapter *adapter)
3566{
3567 struct device *dev = &adapter->pdev->dev;
3568 struct be_resources res = {0};
d3d18312 3569 int max_vfs, old_vfs;
bec84e6b
VV
3570
3571 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3572 be_cmd_get_profile_config(adapter, &res, 0);
3573
bec84e6b
VV
3574 if (BE3_chip(adapter) && !res.max_vfs) {
3575 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3576 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3577 }
3578
d3d18312 3579 adapter->pool_res = res;
bec84e6b
VV
3580
3581 if (!be_max_vfs(adapter)) {
3582 if (num_vfs)
50762667 3583 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3584 adapter->num_vfs = 0;
3585 return 0;
3586 }
3587
d3d18312
SP
3588 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3589
bec84e6b
VV
3590 /* validate num_vfs module param */
3591 old_vfs = pci_num_vf(adapter->pdev);
3592 if (old_vfs) {
3593 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3594 if (old_vfs != num_vfs)
3595 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3596 adapter->num_vfs = old_vfs;
3597 } else {
3598 if (num_vfs > be_max_vfs(adapter)) {
3599 dev_info(dev, "Resources unavailable to init %d VFs\n",
3600 num_vfs);
3601 dev_info(dev, "Limiting to %d VFs\n",
3602 be_max_vfs(adapter));
3603 }
3604 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3605 }
3606
3607 return 0;
3608}
3609
92bf14ab 3610static int be_get_resources(struct be_adapter *adapter)
abb93951 3611{
92bf14ab
SP
3612 struct device *dev = &adapter->pdev->dev;
3613 struct be_resources res = {0};
3614 int status;
abb93951 3615
92bf14ab
SP
3616 if (BEx_chip(adapter)) {
3617 BEx_get_resources(adapter, &res);
3618 adapter->res = res;
abb93951
PR
3619 }
3620
92bf14ab
SP
3621 /* For Lancer, SH etc read per-function resource limits from FW.
3622 * GET_FUNC_CONFIG returns per function guaranteed limits.
3623 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3624 */
3625 if (!BEx_chip(adapter)) {
3626 status = be_cmd_get_func_config(adapter, &res);
3627 if (status)
3628 return status;
abb93951 3629
92bf14ab
SP
3630 /* If RoCE may be enabled stash away half the EQs for RoCE */
3631 if (be_roce_supported(adapter))
3632 res.max_evt_qs /= 2;
3633 adapter->res = res;
abb93951 3634 }
4c876616 3635
acbafeb1
SP
3636 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3637 be_max_txqs(adapter), be_max_rxqs(adapter),
3638 be_max_rss(adapter), be_max_eqs(adapter),
3639 be_max_vfs(adapter));
3640 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3641 be_max_uc(adapter), be_max_mc(adapter),
3642 be_max_vlans(adapter));
3643
92bf14ab 3644 return 0;
abb93951
PR
3645}
3646
d3d18312
SP
3647static void be_sriov_config(struct be_adapter *adapter)
3648{
3649 struct device *dev = &adapter->pdev->dev;
3650 int status;
3651
3652 status = be_get_sriov_config(adapter);
3653 if (status) {
3654 dev_err(dev, "Failed to query SR-IOV configuration\n");
3655 dev_err(dev, "SR-IOV cannot be enabled\n");
3656 return;
3657 }
3658
3659 /* When the HW is in SRIOV capable configuration, the PF-pool
3660 * resources are equally distributed across the max-number of
3661 * VFs. The user may request only a subset of the max-vfs to be
3662 * enabled. Based on num_vfs, redistribute the resources across
3663 * num_vfs so that each VF will have access to more number of
3664 * resources. This facility is not available in BE3 FW.
3665 * Also, this is done by FW in Lancer chip.
3666 */
3667 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3668 status = be_cmd_set_sriov_config(adapter,
3669 adapter->pool_res,
3670 adapter->num_vfs);
3671 if (status)
3672 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3673 }
3674}
3675
39f1d94d
SP
3676static int be_get_config(struct be_adapter *adapter)
3677{
542963b7 3678 u16 profile_id;
4c876616 3679 int status;
39f1d94d 3680
e97e3cda 3681 status = be_cmd_query_fw_cfg(adapter);
abb93951 3682 if (status)
92bf14ab 3683 return status;
abb93951 3684
21252377
VV
3685 be_cmd_query_port_name(adapter);
3686
3687 if (be_physfn(adapter)) {
542963b7
VV
3688 status = be_cmd_get_active_profile(adapter, &profile_id);
3689 if (!status)
3690 dev_info(&adapter->pdev->dev,
3691 "Using profile 0x%x\n", profile_id);
962bcb75 3692 }
bec84e6b 3693
d3d18312
SP
3694 if (!BE2_chip(adapter) && be_physfn(adapter))
3695 be_sriov_config(adapter);
542963b7 3696
92bf14ab
SP
3697 status = be_get_resources(adapter);
3698 if (status)
3699 return status;
abb93951 3700
46ee9c14
RN
3701 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3702 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3703 if (!adapter->pmac_id)
3704 return -ENOMEM;
abb93951 3705
92bf14ab
SP
3706 /* Sanitize cfg_num_qs based on HW and platform limits */
3707 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3708
3709 return 0;
39f1d94d
SP
3710}
3711
95046b92
SP
3712static int be_mac_setup(struct be_adapter *adapter)
3713{
3714 u8 mac[ETH_ALEN];
3715 int status;
3716
3717 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3718 status = be_cmd_get_perm_mac(adapter, mac);
3719 if (status)
3720 return status;
3721
3722 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3723 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3724 } else {
3725 /* Maybe the HW was reset; dev_addr must be re-programmed */
3726 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3727 }
3728
2c7a9dc1
AK
3729 /* For BE3-R VFs, the PF programs the initial MAC address */
3730 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3731 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3732 &adapter->pmac_id[0], 0);
95046b92
SP
3733 return 0;
3734}
3735
68d7bdcb
SP
3736static void be_schedule_worker(struct be_adapter *adapter)
3737{
3738 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3739 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3740}
3741
7707133c 3742static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3743{
68d7bdcb 3744 struct net_device *netdev = adapter->netdev;
10ef9ab4 3745 int status;
ba343c77 3746
7707133c 3747 status = be_evt_queues_create(adapter);
abb93951
PR
3748 if (status)
3749 goto err;
73d540f2 3750
7707133c 3751 status = be_tx_qs_create(adapter);
c2bba3df
SK
3752 if (status)
3753 goto err;
10ef9ab4 3754
7707133c 3755 status = be_rx_cqs_create(adapter);
10ef9ab4 3756 if (status)
a54769f5 3757 goto err;
6b7c5b94 3758
7707133c 3759 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3760 if (status)
3761 goto err;
3762
68d7bdcb
SP
3763 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3764 if (status)
3765 goto err;
3766
3767 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3768 if (status)
3769 goto err;
3770
7707133c
SP
3771 return 0;
3772err:
3773 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3774 return status;
3775}
3776
68d7bdcb
SP
3777int be_update_queues(struct be_adapter *adapter)
3778{
3779 struct net_device *netdev = adapter->netdev;
3780 int status;
3781
3782 if (netif_running(netdev))
3783 be_close(netdev);
3784
3785 be_cancel_worker(adapter);
3786
3787 /* If any vectors have been shared with RoCE we cannot re-program
3788 * the MSIx table.
3789 */
3790 if (!adapter->num_msix_roce_vec)
3791 be_msix_disable(adapter);
3792
3793 be_clear_queues(adapter);
3794
3795 if (!msix_enabled(adapter)) {
3796 status = be_msix_enable(adapter);
3797 if (status)
3798 return status;
3799 }
3800
3801 status = be_setup_queues(adapter);
3802 if (status)
3803 return status;
3804
3805 be_schedule_worker(adapter);
3806
3807 if (netif_running(netdev))
3808 status = be_open(netdev);
3809
3810 return status;
3811}
3812
f7062ee5
SP
3813static inline int fw_major_num(const char *fw_ver)
3814{
3815 int fw_major = 0, i;
3816
3817 i = sscanf(fw_ver, "%d.", &fw_major);
3818 if (i != 1)
3819 return 0;
3820
3821 return fw_major;
3822}
3823
7707133c
SP
3824static int be_setup(struct be_adapter *adapter)
3825{
3826 struct device *dev = &adapter->pdev->dev;
7707133c
SP
3827 int status;
3828
3829 be_setup_init(adapter);
3830
3831 if (!lancer_chip(adapter))
3832 be_cmd_req_native_mode(adapter);
3833
3834 status = be_get_config(adapter);
10ef9ab4 3835 if (status)
a54769f5 3836 goto err;
6b7c5b94 3837
7707133c 3838 status = be_msix_enable(adapter);
10ef9ab4 3839 if (status)
a54769f5 3840 goto err;
6b7c5b94 3841
0700d816
KA
3842 status = be_if_create(adapter, &adapter->if_handle,
3843 be_if_cap_flags(adapter), 0);
7707133c 3844 if (status)
a54769f5 3845 goto err;
6b7c5b94 3846
68d7bdcb
SP
3847 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3848 rtnl_lock();
7707133c 3849 status = be_setup_queues(adapter);
68d7bdcb 3850 rtnl_unlock();
95046b92 3851 if (status)
1578e777
PR
3852 goto err;
3853
7707133c 3854 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3855
3856 status = be_mac_setup(adapter);
10ef9ab4
SP
3857 if (status)
3858 goto err;
3859
e97e3cda 3860 be_cmd_get_fw_ver(adapter);
acbafeb1 3861 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3862
e9e2a904 3863 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3864 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3865 adapter->fw_ver);
3866 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3867 }
3868
1d1e9a46 3869 if (adapter->vlans_added)
10329df8 3870 be_vid_config(adapter);
7ab8b0b4 3871
a54769f5 3872 be_set_rx_mode(adapter->netdev);
5fb379ee 3873
76a9e08e
SR
3874 be_cmd_get_acpi_wol_cap(adapter);
3875
00d594c3
KA
3876 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3877 adapter->rx_fc);
3878 if (status)
3879 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3880 &adapter->rx_fc);
590c391d 3881
00d594c3
KA
3882 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3883 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 3884
bdce2ad7
SR
3885 if (be_physfn(adapter))
3886 be_cmd_set_logical_link_config(adapter,
3887 IFLA_VF_LINK_STATE_AUTO, 0);
3888
bec84e6b
VV
3889 if (adapter->num_vfs)
3890 be_vf_setup(adapter);
f9449ab7 3891
f25b119c
PR
3892 status = be_cmd_get_phy_info(adapter);
3893 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3894 adapter->phy.fc_autoneg = 1;
3895
68d7bdcb 3896 be_schedule_worker(adapter);
e1ad8e33 3897 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3898 return 0;
a54769f5
SP
3899err:
3900 be_clear(adapter);
3901 return status;
3902}
6b7c5b94 3903
66268739
IV
3904#ifdef CONFIG_NET_POLL_CONTROLLER
3905static void be_netpoll(struct net_device *netdev)
3906{
3907 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3908 struct be_eq_obj *eqo;
66268739
IV
3909 int i;
3910
e49cc34f
SP
3911 for_all_evt_queues(adapter, eqo, i) {
3912 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3913 napi_schedule(&eqo->napi);
3914 }
66268739
IV
3915}
3916#endif
3917
96c9b2e4 3918static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3919
306f1348
SP
3920static bool phy_flashing_required(struct be_adapter *adapter)
3921{
e02cfd96 3922 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 3923 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3924}
3925
c165541e
PR
3926static bool is_comp_in_ufi(struct be_adapter *adapter,
3927 struct flash_section_info *fsec, int type)
3928{
3929 int i = 0, img_type = 0;
3930 struct flash_section_info_g2 *fsec_g2 = NULL;
3931
ca34fe38 3932 if (BE2_chip(adapter))
c165541e
PR
3933 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3934
3935 for (i = 0; i < MAX_FLASH_COMP; i++) {
3936 if (fsec_g2)
3937 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3938 else
3939 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3940
3941 if (img_type == type)
3942 return true;
3943 }
3944 return false;
3945
3946}
3947
4188e7df 3948static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3949 int header_size,
3950 const struct firmware *fw)
c165541e
PR
3951{
3952 struct flash_section_info *fsec = NULL;
3953 const u8 *p = fw->data;
3954
3955 p += header_size;
3956 while (p < (fw->data + fw->size)) {
3957 fsec = (struct flash_section_info *)p;
3958 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3959 return fsec;
3960 p += 32;
3961 }
3962 return NULL;
3963}
3964
96c9b2e4
VV
3965static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3966 u32 img_offset, u32 img_size, int hdr_size,
3967 u16 img_optype, bool *crc_match)
3968{
3969 u32 crc_offset;
3970 int status;
3971 u8 crc[4];
3972
70a7b525
VV
3973 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
3974 img_size - 4);
96c9b2e4
VV
3975 if (status)
3976 return status;
3977
3978 crc_offset = hdr_size + img_offset + img_size - 4;
3979
3980 /* Skip flashing, if crc of flashed region matches */
3981 if (!memcmp(crc, p + crc_offset, 4))
3982 *crc_match = true;
3983 else
3984 *crc_match = false;
3985
3986 return status;
3987}
3988
773a2d7c 3989static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
3990 struct be_dma_mem *flash_cmd, int optype, int img_size,
3991 u32 img_offset)
773a2d7c 3992{
70a7b525 3993 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 3994 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 3995 int status;
773a2d7c 3996
773a2d7c
PR
3997 while (total_bytes) {
3998 num_bytes = min_t(u32, 32*1024, total_bytes);
3999
4000 total_bytes -= num_bytes;
4001
4002 if (!total_bytes) {
4003 if (optype == OPTYPE_PHY_FW)
4004 flash_op = FLASHROM_OPER_PHY_FLASH;
4005 else
4006 flash_op = FLASHROM_OPER_FLASH;
4007 } else {
4008 if (optype == OPTYPE_PHY_FW)
4009 flash_op = FLASHROM_OPER_PHY_SAVE;
4010 else
4011 flash_op = FLASHROM_OPER_SAVE;
4012 }
4013
be716446 4014 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4015 img += num_bytes;
4016 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4017 flash_op, img_offset +
4018 bytes_sent, num_bytes);
4c60005f 4019 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4020 optype == OPTYPE_PHY_FW)
4021 break;
4022 else if (status)
773a2d7c 4023 return status;
70a7b525
VV
4024
4025 bytes_sent += num_bytes;
773a2d7c
PR
4026 }
4027 return 0;
4028}
4029
0ad3157e 4030/* For BE2, BE3 and BE3-R */
ca34fe38 4031static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4032 const struct firmware *fw,
4033 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4034{
c165541e 4035 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4036 struct device *dev = &adapter->pdev->dev;
c165541e 4037 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4038 int status, i, filehdr_size, num_comp;
4039 const struct flash_comp *pflashcomp;
4040 bool crc_match;
4041 const u8 *p;
c165541e
PR
4042
4043 struct flash_comp gen3_flash_types[] = {
4044 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4045 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4046 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4047 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4048 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4049 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4050 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4051 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4052 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4053 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4054 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4055 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4056 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4057 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4058 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4059 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4060 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4061 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4062 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4063 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4064 };
c165541e
PR
4065
4066 struct flash_comp gen2_flash_types[] = {
4067 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4068 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4069 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4070 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4071 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4072 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4073 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4074 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4075 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4076 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4077 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4078 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4079 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4080 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4081 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4082 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4083 };
4084
ca34fe38 4085 if (BE3_chip(adapter)) {
3f0d4560
AK
4086 pflashcomp = gen3_flash_types;
4087 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4088 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4089 } else {
4090 pflashcomp = gen2_flash_types;
4091 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4092 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4093 img_hdrs_size = 0;
84517482 4094 }
ca34fe38 4095
c165541e
PR
4096 /* Get flash section info*/
4097 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4098 if (!fsec) {
96c9b2e4 4099 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4100 return -1;
4101 }
9fe96934 4102 for (i = 0; i < num_comp; i++) {
c165541e 4103 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4104 continue;
c165541e
PR
4105
4106 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4107 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4108 continue;
4109
773a2d7c
PR
4110 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4111 !phy_flashing_required(adapter))
306f1348 4112 continue;
c165541e 4113
773a2d7c 4114 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4115 status = be_check_flash_crc(adapter, fw->data,
4116 pflashcomp[i].offset,
4117 pflashcomp[i].size,
4118 filehdr_size +
4119 img_hdrs_size,
4120 OPTYPE_REDBOOT, &crc_match);
4121 if (status) {
4122 dev_err(dev,
4123 "Could not get CRC for 0x%x region\n",
4124 pflashcomp[i].optype);
4125 continue;
4126 }
4127
4128 if (crc_match)
773a2d7c
PR
4129 continue;
4130 }
c165541e 4131
96c9b2e4
VV
4132 p = fw->data + filehdr_size + pflashcomp[i].offset +
4133 img_hdrs_size;
306f1348
SP
4134 if (p + pflashcomp[i].size > fw->data + fw->size)
4135 return -1;
773a2d7c
PR
4136
4137 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4138 pflashcomp[i].size, 0);
773a2d7c 4139 if (status) {
96c9b2e4 4140 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4141 pflashcomp[i].img_type);
4142 return status;
84517482 4143 }
84517482 4144 }
84517482
AK
4145 return 0;
4146}
4147
96c9b2e4
VV
4148static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4149{
4150 u32 img_type = le32_to_cpu(fsec_entry.type);
4151 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4152
4153 if (img_optype != 0xFFFF)
4154 return img_optype;
4155
4156 switch (img_type) {
4157 case IMAGE_FIRMWARE_iSCSI:
4158 img_optype = OPTYPE_ISCSI_ACTIVE;
4159 break;
4160 case IMAGE_BOOT_CODE:
4161 img_optype = OPTYPE_REDBOOT;
4162 break;
4163 case IMAGE_OPTION_ROM_ISCSI:
4164 img_optype = OPTYPE_BIOS;
4165 break;
4166 case IMAGE_OPTION_ROM_PXE:
4167 img_optype = OPTYPE_PXE_BIOS;
4168 break;
4169 case IMAGE_OPTION_ROM_FCoE:
4170 img_optype = OPTYPE_FCOE_BIOS;
4171 break;
4172 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4173 img_optype = OPTYPE_ISCSI_BACKUP;
4174 break;
4175 case IMAGE_NCSI:
4176 img_optype = OPTYPE_NCSI_FW;
4177 break;
4178 case IMAGE_FLASHISM_JUMPVECTOR:
4179 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4180 break;
4181 case IMAGE_FIRMWARE_PHY:
4182 img_optype = OPTYPE_SH_PHY_FW;
4183 break;
4184 case IMAGE_REDBOOT_DIR:
4185 img_optype = OPTYPE_REDBOOT_DIR;
4186 break;
4187 case IMAGE_REDBOOT_CONFIG:
4188 img_optype = OPTYPE_REDBOOT_CONFIG;
4189 break;
4190 case IMAGE_UFI_DIR:
4191 img_optype = OPTYPE_UFI_DIR;
4192 break;
4193 default:
4194 break;
4195 }
4196
4197 return img_optype;
4198}
4199
773a2d7c 4200static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4201 const struct firmware *fw,
4202 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4203{
773a2d7c 4204 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4205 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4206 struct device *dev = &adapter->pdev->dev;
773a2d7c 4207 struct flash_section_info *fsec = NULL;
96c9b2e4 4208 u32 img_offset, img_size, img_type;
70a7b525 4209 u16 img_optype, flash_optype;
96c9b2e4 4210 int status, i, filehdr_size;
96c9b2e4 4211 const u8 *p;
773a2d7c
PR
4212
4213 filehdr_size = sizeof(struct flash_file_hdr_g3);
4214 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4215 if (!fsec) {
96c9b2e4 4216 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4217 return -EINVAL;
773a2d7c
PR
4218 }
4219
70a7b525 4220retry_flash:
773a2d7c
PR
4221 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4222 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4223 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4224 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4225 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4226 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4227
96c9b2e4 4228 if (img_optype == 0xFFFF)
773a2d7c 4229 continue;
70a7b525
VV
4230
4231 if (flash_offset_support)
4232 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4233 else
4234 flash_optype = img_optype;
4235
96c9b2e4
VV
4236 /* Don't bother verifying CRC if an old FW image is being
4237 * flashed
4238 */
4239 if (old_fw_img)
4240 goto flash;
4241
4242 status = be_check_flash_crc(adapter, fw->data, img_offset,
4243 img_size, filehdr_size +
70a7b525 4244 img_hdrs_size, flash_optype,
96c9b2e4 4245 &crc_match);
4c60005f
KA
4246 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4247 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4248 /* The current FW image on the card does not support
4249 * OFFSET based flashing. Retry using older mechanism
4250 * of OPTYPE based flashing
4251 */
4252 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4253 flash_offset_support = false;
4254 goto retry_flash;
4255 }
4256
4257 /* The current FW image on the card does not recognize
4258 * the new FLASH op_type. The FW download is partially
4259 * complete. Reboot the server now to enable FW image
4260 * to recognize the new FLASH op_type. To complete the
4261 * remaining process, download the same FW again after
4262 * the reboot.
4263 */
96c9b2e4
VV
4264 dev_err(dev, "Flash incomplete. Reset the server\n");
4265 dev_err(dev, "Download FW image again after reset\n");
4266 return -EAGAIN;
4267 } else if (status) {
4268 dev_err(dev, "Could not get CRC for 0x%x region\n",
4269 img_optype);
4270 return -EFAULT;
773a2d7c
PR
4271 }
4272
96c9b2e4
VV
4273 if (crc_match)
4274 continue;
773a2d7c 4275
96c9b2e4
VV
4276flash:
4277 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4278 if (p + img_size > fw->data + fw->size)
4279 return -1;
4280
70a7b525
VV
4281 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4282 img_offset);
4283
4284 /* The current FW image on the card does not support OFFSET
4285 * based flashing. Retry using older mechanism of OPTYPE based
4286 * flashing
4287 */
4288 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4289 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4290 flash_offset_support = false;
4291 goto retry_flash;
4292 }
4293
96c9b2e4
VV
4294 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4295 * UFI_DIR region
4296 */
4c60005f
KA
4297 if (old_fw_img &&
4298 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4299 (img_optype == OPTYPE_UFI_DIR &&
4300 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4301 continue;
4302 } else if (status) {
4303 dev_err(dev, "Flashing section type 0x%x failed\n",
4304 img_type);
4305 return -EFAULT;
773a2d7c
PR
4306 }
4307 }
4308 return 0;
3f0d4560
AK
4309}
4310
485bf569 4311static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4312 const struct firmware *fw)
84517482 4313{
485bf569
SN
4314#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4315#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4316 struct device *dev = &adapter->pdev->dev;
84517482 4317 struct be_dma_mem flash_cmd;
485bf569
SN
4318 const u8 *data_ptr = NULL;
4319 u8 *dest_image_ptr = NULL;
4320 size_t image_size = 0;
4321 u32 chunk_size = 0;
4322 u32 data_written = 0;
4323 u32 offset = 0;
4324 int status = 0;
4325 u8 add_status = 0;
f67ef7ba 4326 u8 change_status;
84517482 4327
485bf569 4328 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4329 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4330 return -EINVAL;
d9efd2af
SB
4331 }
4332
485bf569
SN
4333 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4334 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4335 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4336 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4337 if (!flash_cmd.va)
4338 return -ENOMEM;
84517482 4339
485bf569
SN
4340 dest_image_ptr = flash_cmd.va +
4341 sizeof(struct lancer_cmd_req_write_object);
4342 image_size = fw->size;
4343 data_ptr = fw->data;
4344
4345 while (image_size) {
4346 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4347
4348 /* Copy the image chunk content. */
4349 memcpy(dest_image_ptr, data_ptr, chunk_size);
4350
4351 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4352 chunk_size, offset,
4353 LANCER_FW_DOWNLOAD_LOCATION,
4354 &data_written, &change_status,
4355 &add_status);
485bf569
SN
4356 if (status)
4357 break;
4358
4359 offset += data_written;
4360 data_ptr += data_written;
4361 image_size -= data_written;
4362 }
4363
4364 if (!status) {
4365 /* Commit the FW written */
4366 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4367 0, offset,
4368 LANCER_FW_DOWNLOAD_LOCATION,
4369 &data_written, &change_status,
4370 &add_status);
485bf569
SN
4371 }
4372
bb864e07 4373 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4374 if (status) {
bb864e07 4375 dev_err(dev, "Firmware load error\n");
3fb8cb80 4376 return be_cmd_status(status);
485bf569
SN
4377 }
4378
bb864e07
KA
4379 dev_info(dev, "Firmware flashed successfully\n");
4380
f67ef7ba 4381 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4382 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4383 status = lancer_physdev_ctrl(adapter,
4384 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4385 if (status) {
bb864e07
KA
4386 dev_err(dev, "Adapter busy, could not reset FW\n");
4387 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4388 }
4389 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4390 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4391 }
3fb8cb80
KA
4392
4393 return 0;
485bf569
SN
4394}
4395
5d3acd0d
VV
4396#define BE2_UFI 2
4397#define BE3_UFI 3
4398#define BE3R_UFI 10
4399#define SH_UFI 4
81a9e226 4400#define SH_P2_UFI 11
5d3acd0d 4401
ca34fe38 4402static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4403 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4404{
5d3acd0d
VV
4405 if (!fhdr) {
4406 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4407 return -1;
4408 }
773a2d7c 4409
5d3acd0d
VV
4410 /* First letter of the build version is used to identify
4411 * which chip this image file is meant for.
4412 */
4413 switch (fhdr->build[0]) {
4414 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4415 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4416 SH_UFI;
5d3acd0d
VV
4417 case BLD_STR_UFI_TYPE_BE3:
4418 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4419 BE3_UFI;
4420 case BLD_STR_UFI_TYPE_BE2:
4421 return BE2_UFI;
4422 default:
4423 return -1;
4424 }
4425}
773a2d7c 4426
5d3acd0d
VV
4427/* Check if the flash image file is compatible with the adapter that
4428 * is being flashed.
4429 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4430 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4431 */
4432static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4433 struct flash_file_hdr_g3 *fhdr)
4434{
4435 int ufi_type = be_get_ufi_type(adapter, fhdr);
4436
4437 switch (ufi_type) {
81a9e226 4438 case SH_P2_UFI:
5d3acd0d 4439 return skyhawk_chip(adapter);
81a9e226
VV
4440 case SH_UFI:
4441 return (skyhawk_chip(adapter) &&
4442 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4443 case BE3R_UFI:
4444 return BE3_chip(adapter);
4445 case BE3_UFI:
4446 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4447 case BE2_UFI:
4448 return BE2_chip(adapter);
4449 default:
4450 return false;
4451 }
773a2d7c
PR
4452}
4453
485bf569
SN
4454static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4455{
5d3acd0d 4456 struct device *dev = &adapter->pdev->dev;
485bf569 4457 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4458 struct image_hdr *img_hdr_ptr;
4459 int status = 0, i, num_imgs;
485bf569 4460 struct be_dma_mem flash_cmd;
84517482 4461
5d3acd0d
VV
4462 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4463 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4464 dev_err(dev, "Flash image is not compatible with adapter\n");
4465 return -EINVAL;
84517482
AK
4466 }
4467
5d3acd0d
VV
4468 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4469 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4470 GFP_KERNEL);
4471 if (!flash_cmd.va)
4472 return -ENOMEM;
773a2d7c 4473
773a2d7c
PR
4474 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4475 for (i = 0; i < num_imgs; i++) {
4476 img_hdr_ptr = (struct image_hdr *)(fw->data +
4477 (sizeof(struct flash_file_hdr_g3) +
4478 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4479 if (!BE2_chip(adapter) &&
4480 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4481 continue;
84517482 4482
5d3acd0d
VV
4483 if (skyhawk_chip(adapter))
4484 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4485 num_imgs);
4486 else
4487 status = be_flash_BEx(adapter, fw, &flash_cmd,
4488 num_imgs);
84517482
AK
4489 }
4490
5d3acd0d
VV
4491 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4492 if (!status)
4493 dev_info(dev, "Firmware flashed successfully\n");
84517482 4494
485bf569
SN
4495 return status;
4496}
4497
4498int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4499{
4500 const struct firmware *fw;
4501 int status;
4502
4503 if (!netif_running(adapter->netdev)) {
4504 dev_err(&adapter->pdev->dev,
4505 "Firmware load not allowed (interface is down)\n");
940a3fcd 4506 return -ENETDOWN;
485bf569
SN
4507 }
4508
4509 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4510 if (status)
4511 goto fw_exit;
4512
4513 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4514
4515 if (lancer_chip(adapter))
4516 status = lancer_fw_download(adapter, fw);
4517 else
4518 status = be_fw_download(adapter, fw);
4519
eeb65ced 4520 if (!status)
e97e3cda 4521 be_cmd_get_fw_ver(adapter);
eeb65ced 4522
84517482
AK
4523fw_exit:
4524 release_firmware(fw);
4525 return status;
4526}
4527
add511b3
RP
4528static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4529 u16 flags)
a77dcb8c
AK
4530{
4531 struct be_adapter *adapter = netdev_priv(dev);
4532 struct nlattr *attr, *br_spec;
4533 int rem;
4534 int status = 0;
4535 u16 mode = 0;
4536
4537 if (!sriov_enabled(adapter))
4538 return -EOPNOTSUPP;
4539
4540 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4541 if (!br_spec)
4542 return -EINVAL;
a77dcb8c
AK
4543
4544 nla_for_each_nested(attr, br_spec, rem) {
4545 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4546 continue;
4547
b7c1a314
TG
4548 if (nla_len(attr) < sizeof(mode))
4549 return -EINVAL;
4550
a77dcb8c
AK
4551 mode = nla_get_u16(attr);
4552 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4553 return -EINVAL;
4554
4555 status = be_cmd_set_hsw_config(adapter, 0, 0,
4556 adapter->if_handle,
4557 mode == BRIDGE_MODE_VEPA ?
4558 PORT_FWD_TYPE_VEPA :
4559 PORT_FWD_TYPE_VEB);
4560 if (status)
4561 goto err;
4562
4563 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4564 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4565
4566 return status;
4567 }
4568err:
4569 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4570 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4571
4572 return status;
4573}
4574
4575static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4576 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4577{
4578 struct be_adapter *adapter = netdev_priv(dev);
4579 int status = 0;
4580 u8 hsw_mode;
4581
4582 if (!sriov_enabled(adapter))
4583 return 0;
4584
4585 /* BE and Lancer chips support VEB mode only */
4586 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4587 hsw_mode = PORT_FWD_TYPE_VEB;
4588 } else {
4589 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4590 adapter->if_handle, &hsw_mode);
4591 if (status)
4592 return 0;
4593 }
4594
4595 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4596 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4597 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4598 0, 0);
a77dcb8c
AK
4599}
4600
c5abe7c0 4601#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4602/* VxLAN offload Notes:
4603 *
4604 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4605 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4606 * is expected to work across all types of IP tunnels once exported. Skyhawk
4607 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4608 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4609 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4610 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4611 *
4612 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4613 * adds more than one port, disable offloads and don't re-enable them again
4614 * until after all the tunnels are removed.
4615 */
c9c47142
SP
4616static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4617 __be16 port)
4618{
4619 struct be_adapter *adapter = netdev_priv(netdev);
4620 struct device *dev = &adapter->pdev->dev;
4621 int status;
4622
4623 if (lancer_chip(adapter) || BEx_chip(adapter))
4624 return;
4625
4626 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4627 dev_info(dev,
4628 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4629 dev_info(dev, "Disabling VxLAN offloads\n");
4630 adapter->vxlan_port_count++;
4631 goto err;
c9c47142
SP
4632 }
4633
630f4b70
SB
4634 if (adapter->vxlan_port_count++ >= 1)
4635 return;
4636
c9c47142
SP
4637 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4638 OP_CONVERT_NORMAL_TO_TUNNEL);
4639 if (status) {
4640 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4641 goto err;
4642 }
4643
4644 status = be_cmd_set_vxlan_port(adapter, port);
4645 if (status) {
4646 dev_warn(dev, "Failed to add VxLAN port\n");
4647 goto err;
4648 }
4649 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4650 adapter->vxlan_port = port;
4651
630f4b70
SB
4652 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4653 NETIF_F_TSO | NETIF_F_TSO6 |
4654 NETIF_F_GSO_UDP_TUNNEL;
4655 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4656 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4657
c9c47142
SP
4658 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4659 be16_to_cpu(port));
4660 return;
4661err:
4662 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4663}
4664
4665static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4666 __be16 port)
4667{
4668 struct be_adapter *adapter = netdev_priv(netdev);
4669
4670 if (lancer_chip(adapter) || BEx_chip(adapter))
4671 return;
4672
4673 if (adapter->vxlan_port != port)
630f4b70 4674 goto done;
c9c47142
SP
4675
4676 be_disable_vxlan_offloads(adapter);
4677
4678 dev_info(&adapter->pdev->dev,
4679 "Disabled VxLAN offloads for UDP port %d\n",
4680 be16_to_cpu(port));
630f4b70
SB
4681done:
4682 adapter->vxlan_port_count--;
c9c47142 4683}
725d548f 4684
5f35227e
JG
4685static netdev_features_t be_features_check(struct sk_buff *skb,
4686 struct net_device *dev,
4687 netdev_features_t features)
725d548f 4688{
16dde0d6
SB
4689 struct be_adapter *adapter = netdev_priv(dev);
4690 u8 l4_hdr = 0;
4691
4692 /* The code below restricts offload features for some tunneled packets.
4693 * Offload features for normal (non tunnel) packets are unchanged.
4694 */
4695 if (!skb->encapsulation ||
4696 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4697 return features;
4698
4699 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4700 * should disable tunnel offload features if it's not a VxLAN packet,
4701 * as tunnel offloads have been enabled only for VxLAN. This is done to
4702 * allow other tunneled traffic like GRE work fine while VxLAN
4703 * offloads are configured in Skyhawk-R.
4704 */
4705 switch (vlan_get_protocol(skb)) {
4706 case htons(ETH_P_IP):
4707 l4_hdr = ip_hdr(skb)->protocol;
4708 break;
4709 case htons(ETH_P_IPV6):
4710 l4_hdr = ipv6_hdr(skb)->nexthdr;
4711 break;
4712 default:
4713 return features;
4714 }
4715
4716 if (l4_hdr != IPPROTO_UDP ||
4717 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4718 skb->inner_protocol != htons(ETH_P_TEB) ||
4719 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4720 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4721 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4722
4723 return features;
725d548f 4724}
c5abe7c0 4725#endif
c9c47142 4726
e5686ad8 4727static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4728 .ndo_open = be_open,
4729 .ndo_stop = be_close,
4730 .ndo_start_xmit = be_xmit,
a54769f5 4731 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4732 .ndo_set_mac_address = be_mac_addr_set,
4733 .ndo_change_mtu = be_change_mtu,
ab1594e9 4734 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4735 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4736 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4737 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4738 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4739 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4740 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4741 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4742 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4743#ifdef CONFIG_NET_POLL_CONTROLLER
4744 .ndo_poll_controller = be_netpoll,
4745#endif
a77dcb8c
AK
4746 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4747 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4748#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4749 .ndo_busy_poll = be_busy_poll,
6384a4d0 4750#endif
c5abe7c0 4751#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4752 .ndo_add_vxlan_port = be_add_vxlan_port,
4753 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4754 .ndo_features_check = be_features_check,
c5abe7c0 4755#endif
6b7c5b94
SP
4756};
4757
4758static void be_netdev_init(struct net_device *netdev)
4759{
4760 struct be_adapter *adapter = netdev_priv(netdev);
4761
6332c8d3 4762 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4763 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4764 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4765 if (be_multi_rxq(adapter))
4766 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4767
4768 netdev->features |= netdev->hw_features |
f646968f 4769 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4770
eb8a50d9 4771 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4772 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4773
fbc13f01
AK
4774 netdev->priv_flags |= IFF_UNICAST_FLT;
4775
6b7c5b94
SP
4776 netdev->flags |= IFF_MULTICAST;
4777
b7e5887e 4778 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4779
10ef9ab4 4780 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4781
7ad24ea4 4782 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4783}
4784
4785static void be_unmap_pci_bars(struct be_adapter *adapter)
4786{
c5b3ad4c
SP
4787 if (adapter->csr)
4788 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4789 if (adapter->db)
ce66f781 4790 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4791}
4792
ce66f781
SP
4793static int db_bar(struct be_adapter *adapter)
4794{
4795 if (lancer_chip(adapter) || !be_physfn(adapter))
4796 return 0;
4797 else
4798 return 4;
4799}
4800
4801static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4802{
dbf0f2a7 4803 if (skyhawk_chip(adapter)) {
ce66f781
SP
4804 adapter->roce_db.size = 4096;
4805 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4806 db_bar(adapter));
4807 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4808 db_bar(adapter));
4809 }
045508a8 4810 return 0;
6b7c5b94
SP
4811}
4812
4813static int be_map_pci_bars(struct be_adapter *adapter)
4814{
4815 u8 __iomem *addr;
fe6d2a38 4816
c5b3ad4c
SP
4817 if (BEx_chip(adapter) && be_physfn(adapter)) {
4818 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4819 if (!adapter->csr)
c5b3ad4c
SP
4820 return -ENOMEM;
4821 }
4822
ce66f781 4823 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4824 if (!addr)
6b7c5b94 4825 goto pci_map_err;
ba343c77 4826 adapter->db = addr;
ce66f781
SP
4827
4828 be_roce_map_pci_bars(adapter);
6b7c5b94 4829 return 0;
ce66f781 4830
6b7c5b94 4831pci_map_err:
acbafeb1 4832 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4833 be_unmap_pci_bars(adapter);
4834 return -ENOMEM;
4835}
4836
6b7c5b94
SP
4837static void be_ctrl_cleanup(struct be_adapter *adapter)
4838{
8788fdc2 4839 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4840
4841 be_unmap_pci_bars(adapter);
4842
4843 if (mem->va)
2b7bcebf
IV
4844 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4845 mem->dma);
e7b909a6 4846
5b8821b7 4847 mem = &adapter->rx_filter;
e7b909a6 4848 if (mem->va)
2b7bcebf
IV
4849 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4850 mem->dma);
6b7c5b94
SP
4851}
4852
6b7c5b94
SP
4853static int be_ctrl_init(struct be_adapter *adapter)
4854{
8788fdc2
SP
4855 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4856 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4857 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4858 u32 sli_intf;
6b7c5b94 4859 int status;
6b7c5b94 4860
ce66f781
SP
4861 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4862 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4863 SLI_INTF_FAMILY_SHIFT;
4864 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4865
6b7c5b94
SP
4866 status = be_map_pci_bars(adapter);
4867 if (status)
e7b909a6 4868 goto done;
6b7c5b94
SP
4869
4870 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4871 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4872 mbox_mem_alloc->size,
4873 &mbox_mem_alloc->dma,
4874 GFP_KERNEL);
6b7c5b94 4875 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4876 status = -ENOMEM;
4877 goto unmap_pci_bars;
6b7c5b94
SP
4878 }
4879 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4880 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4881 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4882 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4883
5b8821b7 4884 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4885 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4886 rx_filter->size, &rx_filter->dma,
4887 GFP_KERNEL);
ddf1169f 4888 if (!rx_filter->va) {
e7b909a6
SP
4889 status = -ENOMEM;
4890 goto free_mbox;
4891 }
1f9061d2 4892
2984961c 4893 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4894 spin_lock_init(&adapter->mcc_lock);
4895 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4896
5eeff635 4897 init_completion(&adapter->et_cmd_compl);
cf588477 4898 pci_save_state(adapter->pdev);
6b7c5b94 4899 return 0;
e7b909a6
SP
4900
4901free_mbox:
2b7bcebf
IV
4902 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4903 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4904
4905unmap_pci_bars:
4906 be_unmap_pci_bars(adapter);
4907
4908done:
4909 return status;
6b7c5b94
SP
4910}
4911
4912static void be_stats_cleanup(struct be_adapter *adapter)
4913{
3abcdeda 4914 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4915
4916 if (cmd->va)
2b7bcebf
IV
4917 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4918 cmd->va, cmd->dma);
6b7c5b94
SP
4919}
4920
4921static int be_stats_init(struct be_adapter *adapter)
4922{
3abcdeda 4923 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4924
ca34fe38
SP
4925 if (lancer_chip(adapter))
4926 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4927 else if (BE2_chip(adapter))
89a88ab8 4928 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4929 else if (BE3_chip(adapter))
ca34fe38 4930 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4931 else
4932 /* ALL non-BE ASICs */
4933 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4934
ede23fa8
JP
4935 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4936 GFP_KERNEL);
ddf1169f 4937 if (!cmd->va)
6b568689 4938 return -ENOMEM;
6b7c5b94
SP
4939 return 0;
4940}
4941
3bc6b06c 4942static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4943{
4944 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4945
6b7c5b94
SP
4946 if (!adapter)
4947 return;
4948
045508a8 4949 be_roce_dev_remove(adapter);
8cef7a78 4950 be_intr_set(adapter, false);
045508a8 4951
f67ef7ba
PR
4952 cancel_delayed_work_sync(&adapter->func_recovery_work);
4953
6b7c5b94
SP
4954 unregister_netdev(adapter->netdev);
4955
5fb379ee
SP
4956 be_clear(adapter);
4957
bf99e50d
PR
4958 /* tell fw we're done with firing cmds */
4959 be_cmd_fw_clean(adapter);
4960
6b7c5b94
SP
4961 be_stats_cleanup(adapter);
4962
4963 be_ctrl_cleanup(adapter);
4964
d6b6d987
SP
4965 pci_disable_pcie_error_reporting(pdev);
4966
6b7c5b94
SP
4967 pci_release_regions(pdev);
4968 pci_disable_device(pdev);
4969
4970 free_netdev(adapter->netdev);
4971}
4972
39f1d94d 4973static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4974{
baaa08d1 4975 int status, level;
6b7c5b94 4976
9e1453c5
AK
4977 status = be_cmd_get_cntl_attributes(adapter);
4978 if (status)
4979 return status;
4980
7aeb2156
PR
4981 /* Must be a power of 2 or else MODULO will BUG_ON */
4982 adapter->be_get_temp_freq = 64;
4983
baaa08d1
VV
4984 if (BEx_chip(adapter)) {
4985 level = be_cmd_get_fw_log_level(adapter);
4986 adapter->msg_enable =
4987 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4988 }
941a77d5 4989
92bf14ab 4990 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4991 return 0;
6b7c5b94
SP
4992}
4993
f67ef7ba 4994static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4995{
01e5b2c4 4996 struct device *dev = &adapter->pdev->dev;
d8110f62 4997 int status;
d8110f62 4998
f67ef7ba
PR
4999 status = lancer_test_and_set_rdy_state(adapter);
5000 if (status)
5001 goto err;
d8110f62 5002
f67ef7ba
PR
5003 if (netif_running(adapter->netdev))
5004 be_close(adapter->netdev);
d8110f62 5005
f67ef7ba
PR
5006 be_clear(adapter);
5007
01e5b2c4 5008 be_clear_all_error(adapter);
f67ef7ba
PR
5009
5010 status = be_setup(adapter);
5011 if (status)
5012 goto err;
d8110f62 5013
f67ef7ba
PR
5014 if (netif_running(adapter->netdev)) {
5015 status = be_open(adapter->netdev);
d8110f62
PR
5016 if (status)
5017 goto err;
f67ef7ba 5018 }
d8110f62 5019
4bebb56a 5020 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
5021 return 0;
5022err:
01e5b2c4
SK
5023 if (status == -EAGAIN)
5024 dev_err(dev, "Waiting for resource provisioning\n");
5025 else
4bebb56a 5026 dev_err(dev, "Adapter recovery failed\n");
d8110f62 5027
f67ef7ba
PR
5028 return status;
5029}
5030
5031static void be_func_recovery_task(struct work_struct *work)
5032{
5033 struct be_adapter *adapter =
5034 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 5035 int status = 0;
d8110f62 5036
f67ef7ba 5037 be_detect_error(adapter);
d8110f62 5038
f67ef7ba 5039 if (adapter->hw_error && lancer_chip(adapter)) {
f67ef7ba
PR
5040 rtnl_lock();
5041 netif_device_detach(adapter->netdev);
5042 rtnl_unlock();
d8110f62 5043
f67ef7ba 5044 status = lancer_recover_func(adapter);
f67ef7ba
PR
5045 if (!status)
5046 netif_device_attach(adapter->netdev);
d8110f62 5047 }
f67ef7ba 5048
01e5b2c4
SK
5049 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5050 * no need to attempt further recovery.
5051 */
5052 if (!status || status == -EAGAIN)
5053 schedule_delayed_work(&adapter->func_recovery_work,
5054 msecs_to_jiffies(1000));
d8110f62
PR
5055}
5056
21252377
VV
5057static void be_log_sfp_info(struct be_adapter *adapter)
5058{
5059 int status;
5060
5061 status = be_cmd_query_sfp_info(adapter);
5062 if (!status) {
5063 dev_err(&adapter->pdev->dev,
5064 "Unqualified SFP+ detected on %c from %s part no: %s",
5065 adapter->port_name, adapter->phy.vendor_name,
5066 adapter->phy.vendor_pn);
5067 }
5068 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5069}
5070
d8110f62
PR
5071static void be_worker(struct work_struct *work)
5072{
5073 struct be_adapter *adapter =
5074 container_of(work, struct be_adapter, work.work);
5075 struct be_rx_obj *rxo;
5076 int i;
5077
d8110f62
PR
5078 /* when interrupts are not yet enabled, just reap any pending
5079 * mcc completions */
5080 if (!netif_running(adapter->netdev)) {
072a9c48 5081 local_bh_disable();
10ef9ab4 5082 be_process_mcc(adapter);
072a9c48 5083 local_bh_enable();
d8110f62
PR
5084 goto reschedule;
5085 }
5086
5087 if (!adapter->stats_cmd_sent) {
5088 if (lancer_chip(adapter))
5089 lancer_cmd_get_pport_stats(adapter,
cd3307aa 5090 &adapter->stats_cmd);
d8110f62
PR
5091 else
5092 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5093 }
5094
d696b5e2
VV
5095 if (be_physfn(adapter) &&
5096 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
5097 be_cmd_get_die_temperature(adapter);
5098
d8110f62 5099 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
5100 /* Replenish RX-queues starved due to memory
5101 * allocation failures.
5102 */
5103 if (rxo->rx_post_starved)
c30d7266 5104 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
5105 }
5106
2632bafd 5107 be_eqd_update(adapter);
10ef9ab4 5108
21252377
VV
5109 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5110 be_log_sfp_info(adapter);
5111
d8110f62
PR
5112reschedule:
5113 adapter->work_counter++;
5114 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5115}
5116
257a3feb 5117/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
5118static bool be_reset_required(struct be_adapter *adapter)
5119{
257a3feb 5120 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
5121}
5122
d379142b
SP
5123static char *mc_name(struct be_adapter *adapter)
5124{
f93f160b
VV
5125 char *str = ""; /* default */
5126
5127 switch (adapter->mc_type) {
5128 case UMC:
5129 str = "UMC";
5130 break;
5131 case FLEX10:
5132 str = "FLEX10";
5133 break;
5134 case vNIC1:
5135 str = "vNIC-1";
5136 break;
5137 case nPAR:
5138 str = "nPAR";
5139 break;
5140 case UFP:
5141 str = "UFP";
5142 break;
5143 case vNIC2:
5144 str = "vNIC-2";
5145 break;
5146 default:
5147 str = "";
5148 }
5149
5150 return str;
d379142b
SP
5151}
5152
5153static inline char *func_name(struct be_adapter *adapter)
5154{
5155 return be_physfn(adapter) ? "PF" : "VF";
5156}
5157
f7062ee5
SP
5158static inline char *nic_name(struct pci_dev *pdev)
5159{
5160 switch (pdev->device) {
5161 case OC_DEVICE_ID1:
5162 return OC_NAME;
5163 case OC_DEVICE_ID2:
5164 return OC_NAME_BE;
5165 case OC_DEVICE_ID3:
5166 case OC_DEVICE_ID4:
5167 return OC_NAME_LANCER;
5168 case BE_DEVICE_ID2:
5169 return BE3_NAME;
5170 case OC_DEVICE_ID5:
5171 case OC_DEVICE_ID6:
5172 return OC_NAME_SH;
5173 default:
5174 return BE_NAME;
5175 }
5176}
5177
1dd06ae8 5178static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5179{
6b7c5b94
SP
5180 struct be_adapter *adapter;
5181 struct net_device *netdev;
21252377 5182 int status = 0;
6b7c5b94 5183
acbafeb1
SP
5184 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5185
6b7c5b94
SP
5186 status = pci_enable_device(pdev);
5187 if (status)
5188 goto do_none;
5189
5190 status = pci_request_regions(pdev, DRV_NAME);
5191 if (status)
5192 goto disable_dev;
5193 pci_set_master(pdev);
5194
7f640062 5195 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5196 if (!netdev) {
6b7c5b94
SP
5197 status = -ENOMEM;
5198 goto rel_reg;
5199 }
5200 adapter = netdev_priv(netdev);
5201 adapter->pdev = pdev;
5202 pci_set_drvdata(pdev, adapter);
5203 adapter->netdev = netdev;
2243e2e9 5204 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5205
4c15c243 5206 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5207 if (!status) {
5208 netdev->features |= NETIF_F_HIGHDMA;
5209 } else {
4c15c243 5210 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5211 if (status) {
5212 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5213 goto free_netdev;
5214 }
5215 }
5216
2f951a9a
KA
5217 status = pci_enable_pcie_error_reporting(pdev);
5218 if (!status)
5219 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5220
6b7c5b94
SP
5221 status = be_ctrl_init(adapter);
5222 if (status)
39f1d94d 5223 goto free_netdev;
6b7c5b94 5224
2243e2e9 5225 /* sync up with fw's ready state */
ba343c77 5226 if (be_physfn(adapter)) {
bf99e50d 5227 status = be_fw_wait_ready(adapter);
ba343c77
SB
5228 if (status)
5229 goto ctrl_clean;
ba343c77 5230 }
6b7c5b94 5231
39f1d94d
SP
5232 if (be_reset_required(adapter)) {
5233 status = be_cmd_reset_function(adapter);
5234 if (status)
5235 goto ctrl_clean;
556ae191 5236
2d177be8
KA
5237 /* Wait for interrupts to quiesce after an FLR */
5238 msleep(100);
5239 }
8cef7a78
SK
5240
5241 /* Allow interrupts for other ULPs running on NIC function */
5242 be_intr_set(adapter, true);
10ef9ab4 5243
2d177be8
KA
5244 /* tell fw we're ready to fire cmds */
5245 status = be_cmd_fw_init(adapter);
5246 if (status)
5247 goto ctrl_clean;
5248
2243e2e9
SP
5249 status = be_stats_init(adapter);
5250 if (status)
5251 goto ctrl_clean;
5252
39f1d94d 5253 status = be_get_initial_config(adapter);
6b7c5b94
SP
5254 if (status)
5255 goto stats_clean;
6b7c5b94
SP
5256
5257 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 5258 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5f820b6c
KA
5259 adapter->rx_fc = true;
5260 adapter->tx_fc = true;
6b7c5b94 5261
5fb379ee
SP
5262 status = be_setup(adapter);
5263 if (status)
55f5c3c5 5264 goto stats_clean;
2243e2e9 5265
3abcdeda 5266 be_netdev_init(netdev);
6b7c5b94
SP
5267 status = register_netdev(netdev);
5268 if (status != 0)
5fb379ee 5269 goto unsetup;
6b7c5b94 5270
045508a8
PP
5271 be_roce_dev_add(adapter);
5272
f67ef7ba
PR
5273 schedule_delayed_work(&adapter->func_recovery_work,
5274 msecs_to_jiffies(1000));
b4e32a71 5275
d379142b 5276 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5277 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5278
6b7c5b94
SP
5279 return 0;
5280
5fb379ee
SP
5281unsetup:
5282 be_clear(adapter);
6b7c5b94
SP
5283stats_clean:
5284 be_stats_cleanup(adapter);
5285ctrl_clean:
5286 be_ctrl_cleanup(adapter);
f9449ab7 5287free_netdev:
fe6d2a38 5288 free_netdev(netdev);
6b7c5b94
SP
5289rel_reg:
5290 pci_release_regions(pdev);
5291disable_dev:
5292 pci_disable_device(pdev);
5293do_none:
c4ca2374 5294 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5295 return status;
5296}
5297
5298static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5299{
5300 struct be_adapter *adapter = pci_get_drvdata(pdev);
5301 struct net_device *netdev = adapter->netdev;
5302
76a9e08e 5303 if (adapter->wol_en)
71d8d1b5
AK
5304 be_setup_wol(adapter, true);
5305
d4360d6f 5306 be_intr_set(adapter, false);
f67ef7ba
PR
5307 cancel_delayed_work_sync(&adapter->func_recovery_work);
5308
6b7c5b94
SP
5309 netif_device_detach(netdev);
5310 if (netif_running(netdev)) {
5311 rtnl_lock();
5312 be_close(netdev);
5313 rtnl_unlock();
5314 }
9b0365f1 5315 be_clear(adapter);
6b7c5b94
SP
5316
5317 pci_save_state(pdev);
5318 pci_disable_device(pdev);
5319 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5320 return 0;
5321}
5322
5323static int be_resume(struct pci_dev *pdev)
5324{
5325 int status = 0;
5326 struct be_adapter *adapter = pci_get_drvdata(pdev);
5327 struct net_device *netdev = adapter->netdev;
5328
5329 netif_device_detach(netdev);
5330
5331 status = pci_enable_device(pdev);
5332 if (status)
5333 return status;
5334
1ca01512 5335 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5336 pci_restore_state(pdev);
5337
dd5746bf
SB
5338 status = be_fw_wait_ready(adapter);
5339 if (status)
5340 return status;
5341
9a6d73d9
KA
5342 status = be_cmd_reset_function(adapter);
5343 if (status)
5344 return status;
5345
d4360d6f 5346 be_intr_set(adapter, true);
2243e2e9
SP
5347 /* tell fw we're ready to fire cmds */
5348 status = be_cmd_fw_init(adapter);
5349 if (status)
5350 return status;
5351
9b0365f1 5352 be_setup(adapter);
6b7c5b94
SP
5353 if (netif_running(netdev)) {
5354 rtnl_lock();
5355 be_open(netdev);
5356 rtnl_unlock();
5357 }
f67ef7ba
PR
5358
5359 schedule_delayed_work(&adapter->func_recovery_work,
5360 msecs_to_jiffies(1000));
6b7c5b94 5361 netif_device_attach(netdev);
71d8d1b5 5362
76a9e08e 5363 if (adapter->wol_en)
71d8d1b5 5364 be_setup_wol(adapter, false);
a4ca055f 5365
6b7c5b94
SP
5366 return 0;
5367}
5368
82456b03
SP
5369/*
5370 * An FLR will stop BE from DMAing any data.
5371 */
5372static void be_shutdown(struct pci_dev *pdev)
5373{
5374 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5375
2d5d4154
AK
5376 if (!adapter)
5377 return;
82456b03 5378
d114f99a 5379 be_roce_dev_shutdown(adapter);
0f4a6828 5380 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5381 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5382
2d5d4154 5383 netif_device_detach(adapter->netdev);
82456b03 5384
57841869
AK
5385 be_cmd_reset_function(adapter);
5386
82456b03 5387 pci_disable_device(pdev);
82456b03
SP
5388}
5389
cf588477 5390static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5391 pci_channel_state_t state)
cf588477
SP
5392{
5393 struct be_adapter *adapter = pci_get_drvdata(pdev);
5394 struct net_device *netdev = adapter->netdev;
5395
5396 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5397
01e5b2c4
SK
5398 if (!adapter->eeh_error) {
5399 adapter->eeh_error = true;
cf588477 5400
01e5b2c4 5401 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5402
cf588477 5403 rtnl_lock();
01e5b2c4
SK
5404 netif_device_detach(netdev);
5405 if (netif_running(netdev))
5406 be_close(netdev);
cf588477 5407 rtnl_unlock();
01e5b2c4
SK
5408
5409 be_clear(adapter);
cf588477 5410 }
cf588477
SP
5411
5412 if (state == pci_channel_io_perm_failure)
5413 return PCI_ERS_RESULT_DISCONNECT;
5414
5415 pci_disable_device(pdev);
5416
eeb7fc7b
SK
5417 /* The error could cause the FW to trigger a flash debug dump.
5418 * Resetting the card while flash dump is in progress
c8a54163
PR
5419 * can cause it not to recover; wait for it to finish.
5420 * Wait only for first function as it is needed only once per
5421 * adapter.
eeb7fc7b 5422 */
c8a54163
PR
5423 if (pdev->devfn == 0)
5424 ssleep(30);
5425
cf588477
SP
5426 return PCI_ERS_RESULT_NEED_RESET;
5427}
5428
5429static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5430{
5431 struct be_adapter *adapter = pci_get_drvdata(pdev);
5432 int status;
5433
5434 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5435
5436 status = pci_enable_device(pdev);
5437 if (status)
5438 return PCI_ERS_RESULT_DISCONNECT;
5439
5440 pci_set_master(pdev);
1ca01512 5441 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5442 pci_restore_state(pdev);
5443
5444 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5445 dev_info(&adapter->pdev->dev,
5446 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5447 status = be_fw_wait_ready(adapter);
cf588477
SP
5448 if (status)
5449 return PCI_ERS_RESULT_DISCONNECT;
5450
d6b6d987 5451 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5452 be_clear_all_error(adapter);
cf588477
SP
5453 return PCI_ERS_RESULT_RECOVERED;
5454}
5455
5456static void be_eeh_resume(struct pci_dev *pdev)
5457{
5458 int status = 0;
5459 struct be_adapter *adapter = pci_get_drvdata(pdev);
5460 struct net_device *netdev = adapter->netdev;
5461
5462 dev_info(&adapter->pdev->dev, "EEH resume\n");
5463
5464 pci_save_state(pdev);
5465
2d177be8 5466 status = be_cmd_reset_function(adapter);
cf588477
SP
5467 if (status)
5468 goto err;
5469
03a58baa
KA
5470 /* On some BE3 FW versions, after a HW reset,
5471 * interrupts will remain disabled for each function.
5472 * So, explicitly enable interrupts
5473 */
5474 be_intr_set(adapter, true);
5475
2d177be8
KA
5476 /* tell fw we're ready to fire cmds */
5477 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5478 if (status)
5479 goto err;
5480
cf588477
SP
5481 status = be_setup(adapter);
5482 if (status)
5483 goto err;
5484
5485 if (netif_running(netdev)) {
5486 status = be_open(netdev);
5487 if (status)
5488 goto err;
5489 }
f67ef7ba
PR
5490
5491 schedule_delayed_work(&adapter->func_recovery_work,
5492 msecs_to_jiffies(1000));
cf588477
SP
5493 netif_device_attach(netdev);
5494 return;
5495err:
5496 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5497}
5498
3646f0e5 5499static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5500 .error_detected = be_eeh_err_detected,
5501 .slot_reset = be_eeh_reset,
5502 .resume = be_eeh_resume,
5503};
5504
6b7c5b94
SP
5505static struct pci_driver be_driver = {
5506 .name = DRV_NAME,
5507 .id_table = be_dev_ids,
5508 .probe = be_probe,
5509 .remove = be_remove,
5510 .suspend = be_suspend,
cf588477 5511 .resume = be_resume,
82456b03 5512 .shutdown = be_shutdown,
cf588477 5513 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5514};
5515
5516static int __init be_init_module(void)
5517{
8e95a202
JP
5518 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5519 rx_frag_size != 2048) {
6b7c5b94
SP
5520 printk(KERN_WARNING DRV_NAME
5521 " : Module param rx_frag_size must be 2048/4096/8192."
5522 " Using 2048\n");
5523 rx_frag_size = 2048;
5524 }
6b7c5b94
SP
5525
5526 return pci_register_driver(&be_driver);
5527}
5528module_init(be_init_module);
5529
5530static void __exit be_exit_module(void)
5531{
5532 pci_unregister_driver(&be_driver);
5533}
5534module_exit(be_exit_module);
This page took 1.080608 seconds and 5 git commands to generate.