be2net: fix line wrap and function call indentation in be_ethtool.c
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
6b7c5b94 42static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
42c8b11e 114 "NETC",
7c185276
AK
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
49e4b847 743 if (skb_is_gso(skb)) {
6b7c5b94
SP
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94 749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142
SP
750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
6b7c5b94 757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
c9c47142 758 else if (proto == IPPROTO_UDP)
6b7c5b94
SP
759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
4c5102f9 762 if (vlan_tx_tag_present(skb)) {
6b7c5b94 763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
766 }
767
bc0c3405
AK
768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
2b7bcebf 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 776 bool unmap_single)
7101e111
SP
777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 783 if (wrb->frag_len) {
7101e111 784 if (unmap_single)
2b7bcebf
IV
785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
7101e111 787 else
2b7bcebf 788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
789 }
790}
6b7c5b94 791
3c8def97 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
6b7c5b94 795{
7101e111
SP
796 dma_addr_t busaddr;
797 int i, copied = 0;
2b7bcebf 798 struct device *dev = &adapter->pdev->dev;
6b7c5b94 799 struct sk_buff *first_skb = skb;
6b7c5b94
SP
800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
7101e111
SP
802 bool map_single = false;
803 u16 map_head;
6b7c5b94 804
6b7c5b94
SP
805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
7101e111 807 map_head = txq->head;
6b7c5b94 808
ebc8d2ab 809 if (skb->len > skb->data_len) {
e743d313 810 int len = skb_headlen(skb);
2b7bcebf
IV
811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
7101e111
SP
813 goto dma_err;
814 map_single = true;
ebc8d2ab
DM
815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
6b7c5b94 821
ebc8d2ab 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 824 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 825 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 826 if (dma_mapping_error(dev, busaddr))
7101e111 827 goto dma_err;
ebc8d2ab 828 wrb = queue_head_node(txq);
9e903e08 829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
9e903e08 832 copied += skb_frag_size(frag);
6b7c5b94
SP
833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
bc0c3405 842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
7101e111
SP
846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
2b7bcebf 850 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
6b7c5b94
SP
856}
857
93040ae5 858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
93040ae5
SK
861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
efee8e87 868 if (vlan_tx_tag_present(skb))
93040ae5 869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
bc0c3405
AK
880
881 if (vlan_tag) {
58717686 882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
58717686 891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
93040ae5
SK
898 return skb;
899}
900
bc0c3405
AK
901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
748b539a 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 929{
ee9c799c 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
931}
932
ec495fac
VV
933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
6b7c5b94 936{
d2cb6ce7 937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
93040ae5 940
1297f9db
AK
941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 943 * For padded packets, Lancer computes incorrect checksum.
1ded132d 944 */
ee9c799c
SP
945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 949 is_ipv4_pkt(skb)) {
93040ae5
SK
950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
1ded132d 953
d2cb6ce7 954 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 955 * tagging in pvid-tagging mode
d2cb6ce7 956 */
f93f160b 957 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 959 *skip_hw_vlan = true;
d2cb6ce7 960
93040ae5
SK
961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 968 if (unlikely(!skb))
c9128951 969 goto err;
bc0c3405
AK
970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 990 if (unlikely(!skb))
c9128951 991 goto err;
1ded132d
AK
992 }
993
ee9c799c
SP
994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
c9128951 997err:
ee9c799c
SP
998 return NULL;
999}
1000
ec495fac
VV
1001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
ee9c799c
SP
1024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1037 return NETDEV_TX_OK;
bc617526 1038 }
ee9c799c 1039
fe6d2a38 1040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1041
bc0c3405
AK
1042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
c190e3c8 1044 if (copied) {
cd8f76c0
ED
1045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
c190e3c8 1047 /* record the sent skb in the sent_skb table */
3c8def97
SP
1048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1050
1051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
7101e111 1055 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
3c8def97 1058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1059 stopped = true;
1060 }
6b7c5b94 1061
94d73aaa 1062 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1063
cd8f76c0 1064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1065 } else {
1066 txq->head = start;
bc617526 1067 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1068 dev_kfree_skb_any(skb);
6b7c5b94 1069 }
6b7c5b94
SP
1070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
748b539a 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94 1078 dev_info(&adapter->pdev->dev,
748b539a
SP
1079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
748b539a 1085 netdev->mtu, new_mtu);
6b7c5b94
SP
1086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
82903e4b
AK
1091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1093 */
10329df8 1094static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1095{
10329df8
SP
1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
1097 u16 num = 0, i;
82903e4b 1098 int status = 0;
1da87b7f 1099
c0e64ef4
SP
1100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
92bf14ab 1104 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
1108 for (i = 0; i < VLAN_N_VID; i++)
1109 if (adapter->vlan_tag[i])
10329df8 1110 vids[num++] = cpu_to_le16(i);
0fc16ebf 1111
748b539a 1112 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1113
0fc16ebf 1114 if (status) {
d9d604f8
AK
1115 /* Set to VLAN promisc mode as setting VLAN filter failed */
1116 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1117 goto set_vlan_promisc;
1118 dev_err(&adapter->pdev->dev,
1119 "Setting HW VLAN filtering failed.\n");
1120 } else {
1121 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1122 /* hw VLAN filtering re-enabled. */
1123 status = be_cmd_rx_filter(adapter,
1124 BE_FLAGS_VLAN_PROMISC, OFF);
1125 if (!status) {
1126 dev_info(&adapter->pdev->dev,
1127 "Disabling VLAN Promiscuous mode.\n");
1128 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1129 }
1130 }
6b7c5b94 1131 }
1da87b7f 1132
b31c50a7 1133 return status;
0fc16ebf
PR
1134
1135set_vlan_promisc:
a6b74e01
SK
1136 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1137 return 0;
d9d604f8
AK
1138
1139 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1140 if (!status) {
1141 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1142 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1143 } else
1144 dev_err(&adapter->pdev->dev,
1145 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1146 return status;
6b7c5b94
SP
1147}
1148
80d5c368 1149static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1150{
1151 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1152 int status = 0;
6b7c5b94 1153
a85e9986
PR
1154 /* Packets with VID 0 are always received by Lancer by default */
1155 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1156 return status;
1157
1158 if (adapter->vlan_tag[vid])
1159 return status;
a85e9986 1160
6b7c5b94 1161 adapter->vlan_tag[vid] = 1;
a6b74e01 1162 adapter->vlans_added++;
8e586137 1163
a6b74e01
SK
1164 status = be_vid_config(adapter);
1165 if (status) {
1166 adapter->vlans_added--;
80817cbf 1167 adapter->vlan_tag[vid] = 0;
a6b74e01 1168 }
48291c22 1169
80817cbf 1170 return status;
6b7c5b94
SP
1171}
1172
80d5c368 1173static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1174{
1175 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1176 int status = 0;
6b7c5b94 1177
a85e9986
PR
1178 /* Packets with VID 0 are always received by Lancer by default */
1179 if (lancer_chip(adapter) && vid == 0)
1180 goto ret;
1181
6b7c5b94 1182 adapter->vlan_tag[vid] = 0;
a6b74e01 1183 status = be_vid_config(adapter);
80817cbf
AK
1184 if (!status)
1185 adapter->vlans_added--;
1186 else
1187 adapter->vlan_tag[vid] = 1;
1188ret:
1189 return status;
6b7c5b94
SP
1190}
1191
7ad09458
S
1192static void be_clear_promisc(struct be_adapter *adapter)
1193{
1194 adapter->promiscuous = false;
1195 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1196
1197 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1198}
1199
a54769f5 1200static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1201{
1202 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1203 int status;
6b7c5b94 1204
24307eef 1205 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1206 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1207 adapter->promiscuous = true;
1208 goto done;
6b7c5b94
SP
1209 }
1210
25985edc 1211 /* BE was previously in promiscuous mode; disable it */
24307eef 1212 if (adapter->promiscuous) {
7ad09458 1213 be_clear_promisc(adapter);
c0e64ef4 1214 if (adapter->vlans_added)
10329df8 1215 be_vid_config(adapter);
6b7c5b94
SP
1216 }
1217
e7b909a6 1218 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1219 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1220 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1221 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1222 goto done;
6b7c5b94 1223 }
6b7c5b94 1224
fbc13f01
AK
1225 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1226 struct netdev_hw_addr *ha;
1227 int i = 1; /* First slot is claimed by the Primary MAC */
1228
1229 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1230 be_cmd_pmac_del(adapter, adapter->if_handle,
1231 adapter->pmac_id[i], 0);
1232 }
1233
92bf14ab 1234 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1235 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1236 adapter->promiscuous = true;
1237 goto done;
1238 }
1239
1240 netdev_for_each_uc_addr(ha, adapter->netdev) {
1241 adapter->uc_macs++; /* First slot is for Primary MAC */
1242 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1243 adapter->if_handle,
1244 &adapter->pmac_id[adapter->uc_macs], 0);
1245 }
1246 }
1247
0fc16ebf
PR
1248 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1249
1250 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1251 if (status) {
748b539a
SP
1252 dev_info(&adapter->pdev->dev,
1253 "Exhausted multicast HW filters.\n");
1254 dev_info(&adapter->pdev->dev,
1255 "Disabling HW multicast filtering.\n");
0fc16ebf
PR
1256 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1257 }
24307eef
SP
1258done:
1259 return;
6b7c5b94
SP
1260}
1261
ba343c77
SB
1262static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1263{
1264 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1265 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1266 int status;
1267
11ac75ed 1268 if (!sriov_enabled(adapter))
ba343c77
SB
1269 return -EPERM;
1270
11ac75ed 1271 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1272 return -EINVAL;
1273
3175d8c2
SP
1274 if (BEx_chip(adapter)) {
1275 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1276 vf + 1);
ba343c77 1277
11ac75ed
SP
1278 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1279 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1280 } else {
1281 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1282 vf + 1);
590c391d
PR
1283 }
1284
64600ea5 1285 if (status)
ba343c77 1286 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748b539a 1287 mac, vf);
64600ea5 1288 else
11ac75ed 1289 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1290
ba343c77
SB
1291 return status;
1292}
1293
64600ea5 1294static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1295 struct ifla_vf_info *vi)
64600ea5
AK
1296{
1297 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1298 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1299
11ac75ed 1300 if (!sriov_enabled(adapter))
64600ea5
AK
1301 return -EPERM;
1302
11ac75ed 1303 if (vf >= adapter->num_vfs)
64600ea5
AK
1304 return -EINVAL;
1305
1306 vi->vf = vf;
11ac75ed 1307 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1308 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1309 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1310 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1311 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1312
1313 return 0;
1314}
1315
748b539a 1316static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1317{
1318 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1319 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1320 int status = 0;
1321
11ac75ed 1322 if (!sriov_enabled(adapter))
1da87b7f
AK
1323 return -EPERM;
1324
b9fc0e53 1325 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1326 return -EINVAL;
1327
b9fc0e53
AK
1328 if (vlan || qos) {
1329 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1330 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1331 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1332 vf_cfg->if_handle, 0);
1da87b7f 1333 } else {
f1f3ee1b 1334 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1335 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1336 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1337 }
1338
c502224e
SK
1339 if (!status)
1340 vf_cfg->vlan_tag = vlan;
1341 else
1da87b7f 1342 dev_info(&adapter->pdev->dev,
c502224e 1343 "VLAN %d config on VF %d failed\n", vlan, vf);
1da87b7f
AK
1344 return status;
1345}
1346
748b539a 1347static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
e1d18735
AK
1348{
1349 struct be_adapter *adapter = netdev_priv(netdev);
1350 int status = 0;
1351
11ac75ed 1352 if (!sriov_enabled(adapter))
e1d18735
AK
1353 return -EPERM;
1354
94f434c2 1355 if (vf >= adapter->num_vfs)
e1d18735
AK
1356 return -EINVAL;
1357
94f434c2
AK
1358 if (rate < 100 || rate > 10000) {
1359 dev_err(&adapter->pdev->dev,
1360 "tx rate must be between 100 and 10000 Mbps\n");
1361 return -EINVAL;
1362 }
e1d18735 1363
a401801c 1364 status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
e1d18735 1365 if (status)
94f434c2 1366 dev_err(&adapter->pdev->dev,
748b539a 1367 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1368 else
1369 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1370 return status;
1371}
bdce2ad7
SR
1372static int be_set_vf_link_state(struct net_device *netdev, int vf,
1373 int link_state)
1374{
1375 struct be_adapter *adapter = netdev_priv(netdev);
1376 int status;
1377
1378 if (!sriov_enabled(adapter))
1379 return -EPERM;
1380
1381 if (vf >= adapter->num_vfs)
1382 return -EINVAL;
1383
1384 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1385 if (!status)
1386 adapter->vf_cfg[vf].plink_tracking = link_state;
1387
1388 return status;
1389}
e1d18735 1390
2632bafd
SP
1391static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1392 ulong now)
6b7c5b94 1393{
2632bafd
SP
1394 aic->rx_pkts_prev = rx_pkts;
1395 aic->tx_reqs_prev = tx_pkts;
1396 aic->jiffies = now;
1397}
ac124ff9 1398
2632bafd
SP
1399static void be_eqd_update(struct be_adapter *adapter)
1400{
1401 struct be_set_eqd set_eqd[MAX_EVT_QS];
1402 int eqd, i, num = 0, start;
1403 struct be_aic_obj *aic;
1404 struct be_eq_obj *eqo;
1405 struct be_rx_obj *rxo;
1406 struct be_tx_obj *txo;
1407 u64 rx_pkts, tx_pkts;
1408 ulong now;
1409 u32 pps, delta;
10ef9ab4 1410
2632bafd
SP
1411 for_all_evt_queues(adapter, eqo, i) {
1412 aic = &adapter->aic_obj[eqo->idx];
1413 if (!aic->enable) {
1414 if (aic->jiffies)
1415 aic->jiffies = 0;
1416 eqd = aic->et_eqd;
1417 goto modify_eqd;
1418 }
6b7c5b94 1419
2632bafd
SP
1420 rxo = &adapter->rx_obj[eqo->idx];
1421 do {
57a7744e 1422 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1423 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1424 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1425
2632bafd
SP
1426 txo = &adapter->tx_obj[eqo->idx];
1427 do {
57a7744e 1428 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1429 tx_pkts = txo->stats.tx_reqs;
57a7744e 1430 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1431
6b7c5b94 1432
2632bafd
SP
1433 /* Skip, if wrapped around or first calculation */
1434 now = jiffies;
1435 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1436 rx_pkts < aic->rx_pkts_prev ||
1437 tx_pkts < aic->tx_reqs_prev) {
1438 be_aic_update(aic, rx_pkts, tx_pkts, now);
1439 continue;
1440 }
1441
1442 delta = jiffies_to_msecs(now - aic->jiffies);
1443 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1444 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1445 eqd = (pps / 15000) << 2;
10ef9ab4 1446
2632bafd
SP
1447 if (eqd < 8)
1448 eqd = 0;
1449 eqd = min_t(u32, eqd, aic->max_eqd);
1450 eqd = max_t(u32, eqd, aic->min_eqd);
1451
1452 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1453modify_eqd:
2632bafd
SP
1454 if (eqd != aic->prev_eqd) {
1455 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1456 set_eqd[num].eq_id = eqo->q.id;
1457 aic->prev_eqd = eqd;
1458 num++;
1459 }
ac124ff9 1460 }
2632bafd
SP
1461
1462 if (num)
1463 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1464}
1465
3abcdeda 1466static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1467 struct be_rx_compl_info *rxcp)
4097f663 1468{
ac124ff9 1469 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1470
ab1594e9 1471 u64_stats_update_begin(&stats->sync);
3abcdeda 1472 stats->rx_compl++;
2e588f84 1473 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1474 stats->rx_pkts++;
2e588f84 1475 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1476 stats->rx_mcast_pkts++;
2e588f84 1477 if (rxcp->err)
ac124ff9 1478 stats->rx_compl_err++;
ab1594e9 1479 u64_stats_update_end(&stats->sync);
4097f663
SP
1480}
1481
2e588f84 1482static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1483{
19fad86f 1484 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1485 * Also ignore ipcksm for ipv6 pkts
1486 */
2e588f84 1487 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1488 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1489}
1490
0b0ef1d0 1491static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1492{
10ef9ab4 1493 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1494 struct be_rx_page_info *rx_page_info;
3abcdeda 1495 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1496 u16 frag_idx = rxq->tail;
6b7c5b94 1497
3abcdeda 1498 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1499 BUG_ON(!rx_page_info->page);
1500
e50287be 1501 if (rx_page_info->last_frag) {
2b7bcebf
IV
1502 dma_unmap_page(&adapter->pdev->dev,
1503 dma_unmap_addr(rx_page_info, bus),
1504 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1505 rx_page_info->last_frag = false;
1506 } else {
1507 dma_sync_single_for_cpu(&adapter->pdev->dev,
1508 dma_unmap_addr(rx_page_info, bus),
1509 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1510 }
6b7c5b94 1511
0b0ef1d0 1512 queue_tail_inc(rxq);
6b7c5b94
SP
1513 atomic_dec(&rxq->used);
1514 return rx_page_info;
1515}
1516
1517/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1518static void be_rx_compl_discard(struct be_rx_obj *rxo,
1519 struct be_rx_compl_info *rxcp)
6b7c5b94 1520{
6b7c5b94 1521 struct be_rx_page_info *page_info;
2e588f84 1522 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1523
e80d9da6 1524 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1525 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1526 put_page(page_info->page);
1527 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1528 }
1529}
1530
1531/*
1532 * skb_fill_rx_data forms a complete skb for an ether frame
1533 * indicated by rxcp.
1534 */
10ef9ab4
SP
1535static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1536 struct be_rx_compl_info *rxcp)
6b7c5b94 1537{
6b7c5b94 1538 struct be_rx_page_info *page_info;
2e588f84
SP
1539 u16 i, j;
1540 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1541 u8 *start;
6b7c5b94 1542
0b0ef1d0 1543 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1544 start = page_address(page_info->page) + page_info->page_offset;
1545 prefetch(start);
1546
1547 /* Copy data in the first descriptor of this completion */
2e588f84 1548 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1549
6b7c5b94
SP
1550 skb->len = curr_frag_len;
1551 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1552 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1553 /* Complete packet has now been moved to data */
1554 put_page(page_info->page);
1555 skb->data_len = 0;
1556 skb->tail += curr_frag_len;
1557 } else {
ac1ae5f3
ED
1558 hdr_len = ETH_HLEN;
1559 memcpy(skb->data, start, hdr_len);
6b7c5b94 1560 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1561 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1562 skb_shinfo(skb)->frags[0].page_offset =
1563 page_info->page_offset + hdr_len;
748b539a
SP
1564 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1565 curr_frag_len - hdr_len);
6b7c5b94 1566 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1567 skb->truesize += rx_frag_size;
6b7c5b94
SP
1568 skb->tail += hdr_len;
1569 }
205859a2 1570 page_info->page = NULL;
6b7c5b94 1571
2e588f84
SP
1572 if (rxcp->pkt_size <= rx_frag_size) {
1573 BUG_ON(rxcp->num_rcvd != 1);
1574 return;
6b7c5b94
SP
1575 }
1576
1577 /* More frags present for this completion */
2e588f84
SP
1578 remaining = rxcp->pkt_size - curr_frag_len;
1579 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1580 page_info = get_rx_page_info(rxo);
2e588f84 1581 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1582
bd46cb6c
AK
1583 /* Coalesce all frags from the same physical page in one slot */
1584 if (page_info->page_offset == 0) {
1585 /* Fresh page */
1586 j++;
b061b39e 1587 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1588 skb_shinfo(skb)->frags[j].page_offset =
1589 page_info->page_offset;
9e903e08 1590 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1591 skb_shinfo(skb)->nr_frags++;
1592 } else {
1593 put_page(page_info->page);
1594 }
1595
9e903e08 1596 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1597 skb->len += curr_frag_len;
1598 skb->data_len += curr_frag_len;
bdb28a97 1599 skb->truesize += rx_frag_size;
2e588f84 1600 remaining -= curr_frag_len;
205859a2 1601 page_info->page = NULL;
6b7c5b94 1602 }
bd46cb6c 1603 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1604}
1605
5be93b9a 1606/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1607static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1608 struct be_rx_compl_info *rxcp)
6b7c5b94 1609{
10ef9ab4 1610 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1611 struct net_device *netdev = adapter->netdev;
6b7c5b94 1612 struct sk_buff *skb;
89420424 1613
bb349bb4 1614 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1615 if (unlikely(!skb)) {
ac124ff9 1616 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1617 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1618 return;
1619 }
1620
10ef9ab4 1621 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1622
6332c8d3 1623 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1624 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1625 else
1626 skb_checksum_none_assert(skb);
6b7c5b94 1627
6332c8d3 1628 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1629 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1630 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1631 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1632
1633 skb->encapsulation = rxcp->tunneled;
6384a4d0 1634 skb_mark_napi_id(skb, napi);
6b7c5b94 1635
343e43c0 1636 if (rxcp->vlanf)
86a9bad3 1637 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1638
1639 netif_receive_skb(skb);
6b7c5b94
SP
1640}
1641
5be93b9a 1642/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1643static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1644 struct napi_struct *napi,
1645 struct be_rx_compl_info *rxcp)
6b7c5b94 1646{
10ef9ab4 1647 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1648 struct be_rx_page_info *page_info;
5be93b9a 1649 struct sk_buff *skb = NULL;
2e588f84
SP
1650 u16 remaining, curr_frag_len;
1651 u16 i, j;
3968fa1e 1652
10ef9ab4 1653 skb = napi_get_frags(napi);
5be93b9a 1654 if (!skb) {
10ef9ab4 1655 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1656 return;
1657 }
1658
2e588f84
SP
1659 remaining = rxcp->pkt_size;
1660 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1661 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1662
1663 curr_frag_len = min(remaining, rx_frag_size);
1664
bd46cb6c
AK
1665 /* Coalesce all frags from the same physical page in one slot */
1666 if (i == 0 || page_info->page_offset == 0) {
1667 /* First frag or Fresh page */
1668 j++;
b061b39e 1669 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1670 skb_shinfo(skb)->frags[j].page_offset =
1671 page_info->page_offset;
9e903e08 1672 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1673 } else {
1674 put_page(page_info->page);
1675 }
9e903e08 1676 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1677 skb->truesize += rx_frag_size;
bd46cb6c 1678 remaining -= curr_frag_len;
6b7c5b94
SP
1679 memset(page_info, 0, sizeof(*page_info));
1680 }
bd46cb6c 1681 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1682
5be93b9a 1683 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1684 skb->len = rxcp->pkt_size;
1685 skb->data_len = rxcp->pkt_size;
5be93b9a 1686 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1687 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1688 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1689 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1690
1691 skb->encapsulation = rxcp->tunneled;
6384a4d0 1692 skb_mark_napi_id(skb, napi);
5be93b9a 1693
343e43c0 1694 if (rxcp->vlanf)
86a9bad3 1695 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1696
10ef9ab4 1697 napi_gro_frags(napi);
2e588f84
SP
1698}
1699
10ef9ab4
SP
1700static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1701 struct be_rx_compl_info *rxcp)
2e588f84
SP
1702{
1703 rxcp->pkt_size =
1704 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1705 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1706 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1707 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1708 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1709 rxcp->ip_csum =
1710 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1711 rxcp->l4_csum =
1712 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1713 rxcp->ipv6 =
1714 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1715 rxcp->num_rcvd =
1716 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1717 rxcp->pkt_type =
1718 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1719 rxcp->rss_hash =
c297977e 1720 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1721 if (rxcp->vlanf) {
f93f160b 1722 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f 1723 compl);
748b539a
SP
1724 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1725 vlan_tag, compl);
15d72184 1726 }
12004ae9 1727 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
c9c47142
SP
1728 rxcp->tunneled =
1729 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
2e588f84
SP
1730}
1731
10ef9ab4
SP
1732static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1733 struct be_rx_compl_info *rxcp)
2e588f84
SP
1734{
1735 rxcp->pkt_size =
1736 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1737 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1738 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1739 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1740 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1741 rxcp->ip_csum =
1742 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1743 rxcp->l4_csum =
1744 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1745 rxcp->ipv6 =
1746 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1747 rxcp->num_rcvd =
1748 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1749 rxcp->pkt_type =
1750 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1751 rxcp->rss_hash =
c297977e 1752 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1753 if (rxcp->vlanf) {
f93f160b 1754 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f 1755 compl);
748b539a
SP
1756 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1757 vlan_tag, compl);
15d72184 1758 }
12004ae9 1759 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1760 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1761 ip_frag, compl);
2e588f84
SP
1762}
1763
1764static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1765{
1766 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1767 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1768 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1769
2e588f84
SP
1770 /* For checking the valid bit it is Ok to use either definition as the
1771 * valid bit is at the same position in both v0 and v1 Rx compl */
1772 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1773 return NULL;
6b7c5b94 1774
2e588f84
SP
1775 rmb();
1776 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1777
2e588f84 1778 if (adapter->be3_native)
10ef9ab4 1779 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1780 else
10ef9ab4 1781 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1782
e38b1706
SK
1783 if (rxcp->ip_frag)
1784 rxcp->l4_csum = 0;
1785
15d72184 1786 if (rxcp->vlanf) {
f93f160b
VV
1787 /* In QNQ modes, if qnq bit is not set, then the packet was
1788 * tagged only with the transparent outer vlan-tag and must
1789 * not be treated as a vlan packet by host
1790 */
1791 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1792 rxcp->vlanf = 0;
6b7c5b94 1793
15d72184 1794 if (!lancer_chip(adapter))
3c709f8f 1795 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1796
939cf306 1797 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1798 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1799 rxcp->vlanf = 0;
1800 }
2e588f84
SP
1801
1802 /* As the compl has been parsed, reset it; we wont touch it again */
1803 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1804
3abcdeda 1805 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1806 return rxcp;
1807}
1808
1829b086 1809static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1810{
6b7c5b94 1811 u32 order = get_order(size);
1829b086 1812
6b7c5b94 1813 if (order > 0)
1829b086
ED
1814 gfp |= __GFP_COMP;
1815 return alloc_pages(gfp, order);
6b7c5b94
SP
1816}
1817
1818/*
1819 * Allocate a page, split it to fragments of size rx_frag_size and post as
1820 * receive buffers to BE
1821 */
1829b086 1822static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1823{
3abcdeda 1824 struct be_adapter *adapter = rxo->adapter;
26d92f92 1825 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1826 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1827 struct page *pagep = NULL;
ba42fad0 1828 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1829 struct be_eth_rx_d *rxd;
1830 u64 page_dmaaddr = 0, frag_dmaaddr;
1831 u32 posted, page_offset = 0;
1832
3abcdeda 1833 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1834 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1835 if (!pagep) {
1829b086 1836 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1837 if (unlikely(!pagep)) {
ac124ff9 1838 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1839 break;
1840 }
ba42fad0
IV
1841 page_dmaaddr = dma_map_page(dev, pagep, 0,
1842 adapter->big_page_size,
2b7bcebf 1843 DMA_FROM_DEVICE);
ba42fad0
IV
1844 if (dma_mapping_error(dev, page_dmaaddr)) {
1845 put_page(pagep);
1846 pagep = NULL;
1847 rx_stats(rxo)->rx_post_fail++;
1848 break;
1849 }
e50287be 1850 page_offset = 0;
6b7c5b94
SP
1851 } else {
1852 get_page(pagep);
e50287be 1853 page_offset += rx_frag_size;
6b7c5b94 1854 }
e50287be 1855 page_info->page_offset = page_offset;
6b7c5b94 1856 page_info->page = pagep;
6b7c5b94
SP
1857
1858 rxd = queue_head_node(rxq);
e50287be 1859 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1860 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1861 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1862
1863 /* Any space left in the current big page for another frag? */
1864 if ((page_offset + rx_frag_size + rx_frag_size) >
1865 adapter->big_page_size) {
1866 pagep = NULL;
e50287be
SP
1867 page_info->last_frag = true;
1868 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1869 } else {
1870 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1871 }
26d92f92
SP
1872
1873 prev_page_info = page_info;
1874 queue_head_inc(rxq);
10ef9ab4 1875 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1876 }
e50287be
SP
1877
1878 /* Mark the last frag of a page when we break out of the above loop
1879 * with no more slots available in the RXQ
1880 */
1881 if (pagep) {
1882 prev_page_info->last_frag = true;
1883 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1884 }
6b7c5b94
SP
1885
1886 if (posted) {
6b7c5b94 1887 atomic_add(posted, &rxq->used);
6384a4d0
SP
1888 if (rxo->rx_post_starved)
1889 rxo->rx_post_starved = false;
8788fdc2 1890 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1891 } else if (atomic_read(&rxq->used) == 0) {
1892 /* Let be_worker replenish when memory is available */
3abcdeda 1893 rxo->rx_post_starved = true;
6b7c5b94 1894 }
6b7c5b94
SP
1895}
1896
5fb379ee 1897static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1898{
6b7c5b94
SP
1899 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1900
1901 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1902 return NULL;
1903
f3eb62d2 1904 rmb();
6b7c5b94
SP
1905 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1906
1907 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1908
1909 queue_tail_inc(tx_cq);
1910 return txcp;
1911}
1912
3c8def97 1913static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1914 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1915{
3c8def97 1916 struct be_queue_info *txq = &txo->q;
a73b796e 1917 struct be_eth_wrb *wrb;
3c8def97 1918 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1919 struct sk_buff *sent_skb;
ec43b1a6
SP
1920 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1921 bool unmap_skb_hdr = true;
6b7c5b94 1922
ec43b1a6 1923 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1924 BUG_ON(!sent_skb);
ec43b1a6
SP
1925 sent_skbs[txq->tail] = NULL;
1926
1927 /* skip header wrb */
a73b796e 1928 queue_tail_inc(txq);
6b7c5b94 1929
ec43b1a6 1930 do {
6b7c5b94 1931 cur_index = txq->tail;
a73b796e 1932 wrb = queue_tail_node(txq);
2b7bcebf
IV
1933 unmap_tx_frag(&adapter->pdev->dev, wrb,
1934 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1935 unmap_skb_hdr = false;
1936
6b7c5b94
SP
1937 num_wrbs++;
1938 queue_tail_inc(txq);
ec43b1a6 1939 } while (cur_index != last_index);
6b7c5b94 1940
d8ec2c02 1941 dev_kfree_skb_any(sent_skb);
4d586b82 1942 return num_wrbs;
6b7c5b94
SP
1943}
1944
10ef9ab4
SP
1945/* Return the number of events in the event queue */
1946static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1947{
10ef9ab4
SP
1948 struct be_eq_entry *eqe;
1949 int num = 0;
859b1e4e 1950
10ef9ab4
SP
1951 do {
1952 eqe = queue_tail_node(&eqo->q);
1953 if (eqe->evt == 0)
1954 break;
859b1e4e 1955
10ef9ab4
SP
1956 rmb();
1957 eqe->evt = 0;
1958 num++;
1959 queue_tail_inc(&eqo->q);
1960 } while (true);
1961
1962 return num;
859b1e4e
SP
1963}
1964
10ef9ab4
SP
1965/* Leaves the EQ is disarmed state */
1966static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1967{
10ef9ab4 1968 int num = events_get(eqo);
859b1e4e 1969
10ef9ab4 1970 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1971}
1972
10ef9ab4 1973static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1974{
1975 struct be_rx_page_info *page_info;
3abcdeda
SP
1976 struct be_queue_info *rxq = &rxo->q;
1977 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1978 struct be_rx_compl_info *rxcp;
d23e946c
SP
1979 struct be_adapter *adapter = rxo->adapter;
1980 int flush_wait = 0;
6b7c5b94 1981
d23e946c
SP
1982 /* Consume pending rx completions.
1983 * Wait for the flush completion (identified by zero num_rcvd)
1984 * to arrive. Notify CQ even when there are no more CQ entries
1985 * for HW to flush partially coalesced CQ entries.
1986 * In Lancer, there is no need to wait for flush compl.
1987 */
1988 for (;;) {
1989 rxcp = be_rx_compl_get(rxo);
1990 if (rxcp == NULL) {
1991 if (lancer_chip(adapter))
1992 break;
1993
1994 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1995 dev_warn(&adapter->pdev->dev,
1996 "did not receive flush compl\n");
1997 break;
1998 }
1999 be_cq_notify(adapter, rx_cq->id, true, 0);
2000 mdelay(1);
2001 } else {
2002 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2003 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2004 if (rxcp->num_rcvd == 0)
2005 break;
2006 }
6b7c5b94
SP
2007 }
2008
d23e946c
SP
2009 /* After cleanup, leave the CQ in unarmed state */
2010 be_cq_notify(adapter, rx_cq->id, false, 0);
2011
2012 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2013 while (atomic_read(&rxq->used) > 0) {
2014 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2015 put_page(page_info->page);
2016 memset(page_info, 0, sizeof(*page_info));
2017 }
2018 BUG_ON(atomic_read(&rxq->used));
482c9e79 2019 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2020}
2021
0ae57bb3 2022static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2023{
0ae57bb3
SP
2024 struct be_tx_obj *txo;
2025 struct be_queue_info *txq;
a8e9179a 2026 struct be_eth_tx_compl *txcp;
4d586b82 2027 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2028 struct sk_buff *sent_skb;
2029 bool dummy_wrb;
0ae57bb3 2030 int i, pending_txqs;
a8e9179a 2031
1a3d0717 2032 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2033 do {
0ae57bb3
SP
2034 pending_txqs = adapter->num_tx_qs;
2035
2036 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2037 cmpl = 0;
2038 num_wrbs = 0;
0ae57bb3
SP
2039 txq = &txo->q;
2040 while ((txcp = be_tx_compl_get(&txo->cq))) {
2041 end_idx =
2042 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043 wrb_index, txcp);
2044 num_wrbs += be_tx_compl_process(adapter, txo,
2045 end_idx);
2046 cmpl++;
2047 }
2048 if (cmpl) {
2049 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2050 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2051 timeo = 0;
0ae57bb3
SP
2052 }
2053 if (atomic_read(&txq->used) == 0)
2054 pending_txqs--;
a8e9179a
SP
2055 }
2056
1a3d0717 2057 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2058 break;
2059
2060 mdelay(1);
2061 } while (true);
2062
0ae57bb3
SP
2063 for_all_tx_queues(adapter, txo, i) {
2064 txq = &txo->q;
2065 if (atomic_read(&txq->used))
2066 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2067 atomic_read(&txq->used));
2068
2069 /* free posted tx for which compls will never arrive */
2070 while (atomic_read(&txq->used)) {
2071 sent_skb = txo->sent_skb_list[txq->tail];
2072 end_idx = txq->tail;
2073 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2074 &dummy_wrb);
2075 index_adv(&end_idx, num_wrbs - 1, txq->len);
2076 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2077 atomic_sub(num_wrbs, &txq->used);
2078 }
b03388d6 2079 }
6b7c5b94
SP
2080}
2081
10ef9ab4
SP
2082static void be_evt_queues_destroy(struct be_adapter *adapter)
2083{
2084 struct be_eq_obj *eqo;
2085 int i;
2086
2087 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2088 if (eqo->q.created) {
2089 be_eq_clean(eqo);
10ef9ab4 2090 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2091 napi_hash_del(&eqo->napi);
68d7bdcb 2092 netif_napi_del(&eqo->napi);
19d59aa7 2093 }
10ef9ab4
SP
2094 be_queue_free(adapter, &eqo->q);
2095 }
2096}
2097
2098static int be_evt_queues_create(struct be_adapter *adapter)
2099{
2100 struct be_queue_info *eq;
2101 struct be_eq_obj *eqo;
2632bafd 2102 struct be_aic_obj *aic;
10ef9ab4
SP
2103 int i, rc;
2104
92bf14ab
SP
2105 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2106 adapter->cfg_num_qs);
10ef9ab4
SP
2107
2108 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2109 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2110 BE_NAPI_WEIGHT);
6384a4d0 2111 napi_hash_add(&eqo->napi);
2632bafd 2112 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2113 eqo->adapter = adapter;
2114 eqo->tx_budget = BE_TX_BUDGET;
2115 eqo->idx = i;
2632bafd
SP
2116 aic->max_eqd = BE_MAX_EQD;
2117 aic->enable = true;
10ef9ab4
SP
2118
2119 eq = &eqo->q;
2120 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2121 sizeof(struct be_eq_entry));
10ef9ab4
SP
2122 if (rc)
2123 return rc;
2124
f2f781a7 2125 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2126 if (rc)
2127 return rc;
2128 }
1cfafab9 2129 return 0;
10ef9ab4
SP
2130}
2131
5fb379ee
SP
2132static void be_mcc_queues_destroy(struct be_adapter *adapter)
2133{
2134 struct be_queue_info *q;
5fb379ee 2135
8788fdc2 2136 q = &adapter->mcc_obj.q;
5fb379ee 2137 if (q->created)
8788fdc2 2138 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2139 be_queue_free(adapter, q);
2140
8788fdc2 2141 q = &adapter->mcc_obj.cq;
5fb379ee 2142 if (q->created)
8788fdc2 2143 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2144 be_queue_free(adapter, q);
2145}
2146
2147/* Must be called only after TX qs are created as MCC shares TX EQ */
2148static int be_mcc_queues_create(struct be_adapter *adapter)
2149{
2150 struct be_queue_info *q, *cq;
5fb379ee 2151
8788fdc2 2152 cq = &adapter->mcc_obj.cq;
5fb379ee 2153 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2154 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2155 goto err;
2156
10ef9ab4
SP
2157 /* Use the default EQ for MCC completions */
2158 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2159 goto mcc_cq_free;
2160
8788fdc2 2161 q = &adapter->mcc_obj.q;
5fb379ee
SP
2162 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2163 goto mcc_cq_destroy;
2164
8788fdc2 2165 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2166 goto mcc_q_free;
2167
2168 return 0;
2169
2170mcc_q_free:
2171 be_queue_free(adapter, q);
2172mcc_cq_destroy:
8788fdc2 2173 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2174mcc_cq_free:
2175 be_queue_free(adapter, cq);
2176err:
2177 return -1;
2178}
2179
6b7c5b94
SP
2180static void be_tx_queues_destroy(struct be_adapter *adapter)
2181{
2182 struct be_queue_info *q;
3c8def97
SP
2183 struct be_tx_obj *txo;
2184 u8 i;
6b7c5b94 2185
3c8def97
SP
2186 for_all_tx_queues(adapter, txo, i) {
2187 q = &txo->q;
2188 if (q->created)
2189 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2190 be_queue_free(adapter, q);
6b7c5b94 2191
3c8def97
SP
2192 q = &txo->cq;
2193 if (q->created)
2194 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2195 be_queue_free(adapter, q);
2196 }
6b7c5b94
SP
2197}
2198
7707133c 2199static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2200{
10ef9ab4 2201 struct be_queue_info *cq, *eq;
3c8def97 2202 struct be_tx_obj *txo;
92bf14ab 2203 int status, i;
6b7c5b94 2204
92bf14ab 2205 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2206
10ef9ab4
SP
2207 for_all_tx_queues(adapter, txo, i) {
2208 cq = &txo->cq;
2209 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2210 sizeof(struct be_eth_tx_compl));
2211 if (status)
2212 return status;
3c8def97 2213
827da44c
JS
2214 u64_stats_init(&txo->stats.sync);
2215 u64_stats_init(&txo->stats.sync_compl);
2216
10ef9ab4
SP
2217 /* If num_evt_qs is less than num_tx_qs, then more than
2218 * one txq share an eq
2219 */
2220 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2221 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2222 if (status)
2223 return status;
6b7c5b94 2224
10ef9ab4
SP
2225 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2226 sizeof(struct be_eth_wrb));
2227 if (status)
2228 return status;
6b7c5b94 2229
94d73aaa 2230 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2231 if (status)
2232 return status;
3c8def97 2233 }
6b7c5b94 2234
d379142b
SP
2235 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2236 adapter->num_tx_qs);
10ef9ab4 2237 return 0;
6b7c5b94
SP
2238}
2239
10ef9ab4 2240static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2241{
2242 struct be_queue_info *q;
3abcdeda
SP
2243 struct be_rx_obj *rxo;
2244 int i;
2245
2246 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2247 q = &rxo->cq;
2248 if (q->created)
2249 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2250 be_queue_free(adapter, q);
ac6a0c4a
SP
2251 }
2252}
2253
10ef9ab4 2254static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2255{
10ef9ab4 2256 struct be_queue_info *eq, *cq;
3abcdeda
SP
2257 struct be_rx_obj *rxo;
2258 int rc, i;
6b7c5b94 2259
92bf14ab
SP
2260 /* We can create as many RSS rings as there are EQs. */
2261 adapter->num_rx_qs = adapter->num_evt_qs;
2262
2263 /* We'll use RSS only if atleast 2 RSS rings are supported.
2264 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2265 */
92bf14ab
SP
2266 if (adapter->num_rx_qs > 1)
2267 adapter->num_rx_qs++;
2268
6b7c5b94 2269 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2270 for_all_rx_queues(adapter, rxo, i) {
2271 rxo->adapter = adapter;
3abcdeda
SP
2272 cq = &rxo->cq;
2273 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2274 sizeof(struct be_eth_rx_compl));
3abcdeda 2275 if (rc)
10ef9ab4 2276 return rc;
3abcdeda 2277
827da44c 2278 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2279 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2280 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2281 if (rc)
10ef9ab4 2282 return rc;
3abcdeda 2283 }
6b7c5b94 2284
d379142b
SP
2285 dev_info(&adapter->pdev->dev,
2286 "created %d RSS queue(s) and 1 default RX queue\n",
2287 adapter->num_rx_qs - 1);
10ef9ab4 2288 return 0;
b628bde2
SP
2289}
2290
6b7c5b94
SP
2291static irqreturn_t be_intx(int irq, void *dev)
2292{
e49cc34f
SP
2293 struct be_eq_obj *eqo = dev;
2294 struct be_adapter *adapter = eqo->adapter;
2295 int num_evts = 0;
6b7c5b94 2296
d0b9cec3
SP
2297 /* IRQ is not expected when NAPI is scheduled as the EQ
2298 * will not be armed.
2299 * But, this can happen on Lancer INTx where it takes
2300 * a while to de-assert INTx or in BE2 where occasionaly
2301 * an interrupt may be raised even when EQ is unarmed.
2302 * If NAPI is already scheduled, then counting & notifying
2303 * events will orphan them.
e49cc34f 2304 */
d0b9cec3 2305 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2306 num_evts = events_get(eqo);
d0b9cec3
SP
2307 __napi_schedule(&eqo->napi);
2308 if (num_evts)
2309 eqo->spurious_intr = 0;
2310 }
2311 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2312
d0b9cec3
SP
2313 /* Return IRQ_HANDLED only for the the first spurious intr
2314 * after a valid intr to stop the kernel from branding
2315 * this irq as a bad one!
e49cc34f 2316 */
d0b9cec3
SP
2317 if (num_evts || eqo->spurious_intr++ == 0)
2318 return IRQ_HANDLED;
2319 else
2320 return IRQ_NONE;
6b7c5b94
SP
2321}
2322
10ef9ab4 2323static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2324{
10ef9ab4 2325 struct be_eq_obj *eqo = dev;
6b7c5b94 2326
0b545a62
SP
2327 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2328 napi_schedule(&eqo->napi);
6b7c5b94
SP
2329 return IRQ_HANDLED;
2330}
2331
2e588f84 2332static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2333{
e38b1706 2334 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2335}
2336
10ef9ab4 2337static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2338 int budget, int polling)
6b7c5b94 2339{
3abcdeda
SP
2340 struct be_adapter *adapter = rxo->adapter;
2341 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2342 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2343 u32 work_done;
2344
2345 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2346 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2347 if (!rxcp)
2348 break;
2349
12004ae9
SP
2350 /* Is it a flush compl that has no data */
2351 if (unlikely(rxcp->num_rcvd == 0))
2352 goto loop_continue;
2353
2354 /* Discard compl with partial DMA Lancer B0 */
2355 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2356 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2357 goto loop_continue;
2358 }
2359
2360 /* On BE drop pkts that arrive due to imperfect filtering in
2361 * promiscuous mode on some skews
2362 */
2363 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2364 !lancer_chip(adapter))) {
10ef9ab4 2365 be_rx_compl_discard(rxo, rxcp);
12004ae9 2366 goto loop_continue;
64642811 2367 }
009dd872 2368
6384a4d0
SP
2369 /* Don't do gro when we're busy_polling */
2370 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2371 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2372 else
6384a4d0
SP
2373 be_rx_compl_process(rxo, napi, rxcp);
2374
12004ae9 2375loop_continue:
2e588f84 2376 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2377 }
2378
10ef9ab4
SP
2379 if (work_done) {
2380 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2381
6384a4d0
SP
2382 /* When an rx-obj gets into post_starved state, just
2383 * let be_worker do the posting.
2384 */
2385 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2386 !rxo->rx_post_starved)
10ef9ab4 2387 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2388 }
10ef9ab4 2389
6b7c5b94
SP
2390 return work_done;
2391}
2392
10ef9ab4
SP
2393static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2394 int budget, int idx)
6b7c5b94 2395{
6b7c5b94 2396 struct be_eth_tx_compl *txcp;
10ef9ab4 2397 int num_wrbs = 0, work_done;
3c8def97 2398
10ef9ab4
SP
2399 for (work_done = 0; work_done < budget; work_done++) {
2400 txcp = be_tx_compl_get(&txo->cq);
2401 if (!txcp)
2402 break;
2403 num_wrbs += be_tx_compl_process(adapter, txo,
748b539a
SP
2404 AMAP_GET_BITS(struct
2405 amap_eth_tx_compl,
2406 wrb_index, txcp));
10ef9ab4 2407 }
6b7c5b94 2408
10ef9ab4
SP
2409 if (work_done) {
2410 be_cq_notify(adapter, txo->cq.id, true, work_done);
2411 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2412
10ef9ab4
SP
2413 /* As Tx wrbs have been freed up, wake up netdev queue
2414 * if it was stopped due to lack of tx wrbs. */
2415 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2416 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2417 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2418 }
10ef9ab4
SP
2419
2420 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2421 tx_stats(txo)->tx_compl += work_done;
2422 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2423 }
10ef9ab4
SP
2424 return (work_done < budget); /* Done */
2425}
6b7c5b94 2426
68d7bdcb 2427int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2428{
2429 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2430 struct be_adapter *adapter = eqo->adapter;
0b545a62 2431 int max_work = 0, work, i, num_evts;
6384a4d0 2432 struct be_rx_obj *rxo;
10ef9ab4 2433 bool tx_done;
f31e50a8 2434
0b545a62
SP
2435 num_evts = events_get(eqo);
2436
10ef9ab4
SP
2437 /* Process all TXQs serviced by this EQ */
2438 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2439 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2440 eqo->tx_budget, i);
2441 if (!tx_done)
2442 max_work = budget;
f31e50a8
SP
2443 }
2444
6384a4d0
SP
2445 if (be_lock_napi(eqo)) {
2446 /* This loop will iterate twice for EQ0 in which
2447 * completions of the last RXQ (default one) are also processed
2448 * For other EQs the loop iterates only once
2449 */
2450 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2451 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2452 max_work = max(work, max_work);
2453 }
2454 be_unlock_napi(eqo);
2455 } else {
2456 max_work = budget;
10ef9ab4 2457 }
6b7c5b94 2458
10ef9ab4
SP
2459 if (is_mcc_eqo(eqo))
2460 be_process_mcc(adapter);
93c86700 2461
10ef9ab4
SP
2462 if (max_work < budget) {
2463 napi_complete(napi);
0b545a62 2464 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2465 } else {
2466 /* As we'll continue in polling mode, count and clear events */
0b545a62 2467 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2468 }
10ef9ab4 2469 return max_work;
6b7c5b94
SP
2470}
2471
6384a4d0
SP
2472#ifdef CONFIG_NET_RX_BUSY_POLL
2473static int be_busy_poll(struct napi_struct *napi)
2474{
2475 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2476 struct be_adapter *adapter = eqo->adapter;
2477 struct be_rx_obj *rxo;
2478 int i, work = 0;
2479
2480 if (!be_lock_busy_poll(eqo))
2481 return LL_FLUSH_BUSY;
2482
2483 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2484 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2485 if (work)
2486 break;
2487 }
2488
2489 be_unlock_busy_poll(eqo);
2490 return work;
2491}
2492#endif
2493
f67ef7ba 2494void be_detect_error(struct be_adapter *adapter)
7c185276 2495{
e1cfb67a
PR
2496 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2497 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2498 u32 i;
eb0eecc1
SK
2499 bool error_detected = false;
2500 struct device *dev = &adapter->pdev->dev;
2501 struct net_device *netdev = adapter->netdev;
7c185276 2502
d23e946c 2503 if (be_hw_error(adapter))
72f02485
SP
2504 return;
2505
e1cfb67a
PR
2506 if (lancer_chip(adapter)) {
2507 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2508 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2509 sliport_err1 = ioread32(adapter->db +
748b539a 2510 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2511 sliport_err2 = ioread32(adapter->db +
748b539a 2512 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2513 adapter->hw_error = true;
2514 /* Do not log error messages if its a FW reset */
2515 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2516 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2517 dev_info(dev, "Firmware update in progress\n");
2518 } else {
2519 error_detected = true;
2520 dev_err(dev, "Error detected in the card\n");
2521 dev_err(dev, "ERR: sliport status 0x%x\n",
2522 sliport_status);
2523 dev_err(dev, "ERR: sliport error1 0x%x\n",
2524 sliport_err1);
2525 dev_err(dev, "ERR: sliport error2 0x%x\n",
2526 sliport_err2);
2527 }
e1cfb67a
PR
2528 }
2529 } else {
2530 pci_read_config_dword(adapter->pdev,
748b539a 2531 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2532 pci_read_config_dword(adapter->pdev,
748b539a 2533 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2534 pci_read_config_dword(adapter->pdev,
748b539a 2535 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2536 pci_read_config_dword(adapter->pdev,
748b539a 2537 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2538
f67ef7ba
PR
2539 ue_lo = (ue_lo & ~ue_lo_mask);
2540 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2541
eb0eecc1
SK
2542 /* On certain platforms BE hardware can indicate spurious UEs.
2543 * Allow HW to stop working completely in case of a real UE.
2544 * Hence not setting the hw_error for UE detection.
2545 */
f67ef7ba 2546
eb0eecc1
SK
2547 if (ue_lo || ue_hi) {
2548 error_detected = true;
2549 dev_err(dev,
2550 "Unrecoverable Error detected in the adapter");
2551 dev_err(dev, "Please reboot server to recover");
2552 if (skyhawk_chip(adapter))
2553 adapter->hw_error = true;
2554 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2555 if (ue_lo & 1)
2556 dev_err(dev, "UE: %s bit set\n",
2557 ue_status_low_desc[i]);
2558 }
2559 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2560 if (ue_hi & 1)
2561 dev_err(dev, "UE: %s bit set\n",
2562 ue_status_hi_desc[i]);
2563 }
7c185276
AK
2564 }
2565 }
eb0eecc1
SK
2566 if (error_detected)
2567 netif_carrier_off(netdev);
7c185276
AK
2568}
2569
8d56ff11
SP
2570static void be_msix_disable(struct be_adapter *adapter)
2571{
ac6a0c4a 2572 if (msix_enabled(adapter)) {
8d56ff11 2573 pci_disable_msix(adapter->pdev);
ac6a0c4a 2574 adapter->num_msix_vec = 0;
68d7bdcb 2575 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2576 }
2577}
2578
c2bba3df 2579static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2580{
7dc4c064 2581 int i, num_vec;
d379142b 2582 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2583
92bf14ab
SP
2584 /* If RoCE is supported, program the max number of NIC vectors that
2585 * may be configured via set-channels, along with vectors needed for
2586 * RoCe. Else, just program the number we'll use initially.
2587 */
2588 if (be_roce_supported(adapter))
2589 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2590 2 * num_online_cpus());
2591 else
2592 num_vec = adapter->cfg_num_qs;
3abcdeda 2593
ac6a0c4a 2594 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2595 adapter->msix_entries[i].entry = i;
2596
7dc4c064
AG
2597 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2598 MIN_MSIX_VECTORS, num_vec);
2599 if (num_vec < 0)
2600 goto fail;
92bf14ab 2601
92bf14ab
SP
2602 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2603 adapter->num_msix_roce_vec = num_vec / 2;
2604 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2605 adapter->num_msix_roce_vec);
2606 }
2607
2608 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2609
2610 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2611 adapter->num_msix_vec);
c2bba3df 2612 return 0;
7dc4c064
AG
2613
2614fail:
2615 dev_warn(dev, "MSIx enable failed\n");
2616
2617 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2618 if (!be_physfn(adapter))
2619 return num_vec;
2620 return 0;
6b7c5b94
SP
2621}
2622
fe6d2a38 2623static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2624 struct be_eq_obj *eqo)
b628bde2 2625{
f2f781a7 2626 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2627}
6b7c5b94 2628
b628bde2
SP
2629static int be_msix_register(struct be_adapter *adapter)
2630{
10ef9ab4
SP
2631 struct net_device *netdev = adapter->netdev;
2632 struct be_eq_obj *eqo;
2633 int status, i, vec;
6b7c5b94 2634
10ef9ab4
SP
2635 for_all_evt_queues(adapter, eqo, i) {
2636 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2637 vec = be_msix_vec_get(adapter, eqo);
2638 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2639 if (status)
2640 goto err_msix;
2641 }
b628bde2 2642
6b7c5b94 2643 return 0;
3abcdeda 2644err_msix:
10ef9ab4
SP
2645 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2646 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2647 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2648 status);
ac6a0c4a 2649 be_msix_disable(adapter);
6b7c5b94
SP
2650 return status;
2651}
2652
2653static int be_irq_register(struct be_adapter *adapter)
2654{
2655 struct net_device *netdev = adapter->netdev;
2656 int status;
2657
ac6a0c4a 2658 if (msix_enabled(adapter)) {
6b7c5b94
SP
2659 status = be_msix_register(adapter);
2660 if (status == 0)
2661 goto done;
ba343c77
SB
2662 /* INTx is not supported for VF */
2663 if (!be_physfn(adapter))
2664 return status;
6b7c5b94
SP
2665 }
2666
e49cc34f 2667 /* INTx: only the first EQ is used */
6b7c5b94
SP
2668 netdev->irq = adapter->pdev->irq;
2669 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2670 &adapter->eq_obj[0]);
6b7c5b94
SP
2671 if (status) {
2672 dev_err(&adapter->pdev->dev,
2673 "INTx request IRQ failed - err %d\n", status);
2674 return status;
2675 }
2676done:
2677 adapter->isr_registered = true;
2678 return 0;
2679}
2680
2681static void be_irq_unregister(struct be_adapter *adapter)
2682{
2683 struct net_device *netdev = adapter->netdev;
10ef9ab4 2684 struct be_eq_obj *eqo;
3abcdeda 2685 int i;
6b7c5b94
SP
2686
2687 if (!adapter->isr_registered)
2688 return;
2689
2690 /* INTx */
ac6a0c4a 2691 if (!msix_enabled(adapter)) {
e49cc34f 2692 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2693 goto done;
2694 }
2695
2696 /* MSIx */
10ef9ab4
SP
2697 for_all_evt_queues(adapter, eqo, i)
2698 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2699
6b7c5b94
SP
2700done:
2701 adapter->isr_registered = false;
6b7c5b94
SP
2702}
2703
10ef9ab4 2704static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2705{
2706 struct be_queue_info *q;
2707 struct be_rx_obj *rxo;
2708 int i;
2709
2710 for_all_rx_queues(adapter, rxo, i) {
2711 q = &rxo->q;
2712 if (q->created) {
2713 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2714 be_rx_cq_clean(rxo);
482c9e79 2715 }
10ef9ab4 2716 be_queue_free(adapter, q);
482c9e79
SP
2717 }
2718}
2719
889cd4b2
SP
2720static int be_close(struct net_device *netdev)
2721{
2722 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2723 struct be_eq_obj *eqo;
2724 int i;
889cd4b2 2725
e1ad8e33
KA
2726 /* This protection is needed as be_close() may be called even when the
2727 * adapter is in cleared state (after eeh perm failure)
2728 */
2729 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2730 return 0;
2731
045508a8
PP
2732 be_roce_dev_close(adapter);
2733
dff345c5
IV
2734 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2735 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2736 napi_disable(&eqo->napi);
6384a4d0
SP
2737 be_disable_busy_poll(eqo);
2738 }
71237b6f 2739 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2740 }
a323d9bf
SP
2741
2742 be_async_mcc_disable(adapter);
2743
2744 /* Wait for all pending tx completions to arrive so that
2745 * all tx skbs are freed.
2746 */
fba87559 2747 netif_tx_disable(netdev);
6e1f9975 2748 be_tx_compl_clean(adapter);
a323d9bf
SP
2749
2750 be_rx_qs_destroy(adapter);
2751
d11a347d
AK
2752 for (i = 1; i < (adapter->uc_macs + 1); i++)
2753 be_cmd_pmac_del(adapter, adapter->if_handle,
2754 adapter->pmac_id[i], 0);
2755 adapter->uc_macs = 0;
2756
a323d9bf 2757 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2758 if (msix_enabled(adapter))
2759 synchronize_irq(be_msix_vec_get(adapter, eqo));
2760 else
2761 synchronize_irq(netdev->irq);
2762 be_eq_clean(eqo);
63fcb27f
PR
2763 }
2764
889cd4b2
SP
2765 be_irq_unregister(adapter);
2766
482c9e79
SP
2767 return 0;
2768}
2769
10ef9ab4 2770static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2771{
2772 struct be_rx_obj *rxo;
e9008ee9 2773 int rc, i, j;
e2557877
VD
2774 u8 rss_hkey[RSS_HASH_KEY_LEN];
2775 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2776
2777 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2778 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2779 sizeof(struct be_eth_rx_d));
2780 if (rc)
2781 return rc;
2782 }
2783
2784 /* The FW would like the default RXQ to be created first */
2785 rxo = default_rxo(adapter);
2786 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2787 adapter->if_handle, false, &rxo->rss_id);
2788 if (rc)
2789 return rc;
2790
2791 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2792 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2793 rx_frag_size, adapter->if_handle,
2794 true, &rxo->rss_id);
482c9e79
SP
2795 if (rc)
2796 return rc;
2797 }
2798
2799 if (be_multi_rxq(adapter)) {
e2557877
VD
2800 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2801 j += adapter->num_rx_qs - 1) {
e9008ee9 2802 for_all_rss_queues(adapter, rxo, i) {
e2557877 2803 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2804 break;
e2557877
VD
2805 rss->rsstable[j + i] = rxo->rss_id;
2806 rss->rss_queue[j + i] = i;
e9008ee9
PR
2807 }
2808 }
e2557877
VD
2809 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2810 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2811
2812 if (!BEx_chip(adapter))
e2557877
VD
2813 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2814 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2815 } else {
2816 /* Disable RSS, if only default RX Q is created */
e2557877 2817 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2818 }
594ad54a 2819
e2557877 2820 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2821 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2822 128, rss_hkey);
da1388d6 2823 if (rc) {
e2557877 2824 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2825 return rc;
482c9e79
SP
2826 }
2827
e2557877
VD
2828 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2829
482c9e79 2830 /* First time posting */
10ef9ab4 2831 for_all_rx_queues(adapter, rxo, i)
482c9e79 2832 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2833 return 0;
2834}
2835
6b7c5b94
SP
2836static int be_open(struct net_device *netdev)
2837{
2838 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2839 struct be_eq_obj *eqo;
3abcdeda 2840 struct be_rx_obj *rxo;
10ef9ab4 2841 struct be_tx_obj *txo;
b236916a 2842 u8 link_status;
3abcdeda 2843 int status, i;
5fb379ee 2844
10ef9ab4 2845 status = be_rx_qs_create(adapter);
482c9e79
SP
2846 if (status)
2847 goto err;
2848
c2bba3df
SK
2849 status = be_irq_register(adapter);
2850 if (status)
2851 goto err;
5fb379ee 2852
10ef9ab4 2853 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2854 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2855
10ef9ab4
SP
2856 for_all_tx_queues(adapter, txo, i)
2857 be_cq_notify(adapter, txo->cq.id, true, 0);
2858
7a1e9b20
SP
2859 be_async_mcc_enable(adapter);
2860
10ef9ab4
SP
2861 for_all_evt_queues(adapter, eqo, i) {
2862 napi_enable(&eqo->napi);
6384a4d0 2863 be_enable_busy_poll(eqo);
10ef9ab4
SP
2864 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2865 }
04d3d624 2866 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2867
323ff71e 2868 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2869 if (!status)
2870 be_link_status_update(adapter, link_status);
2871
fba87559 2872 netif_tx_start_all_queues(netdev);
045508a8 2873 be_roce_dev_open(adapter);
c9c47142 2874
c5abe7c0 2875#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2876 if (skyhawk_chip(adapter))
2877 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2878#endif
2879
889cd4b2
SP
2880 return 0;
2881err:
2882 be_close(adapter->netdev);
2883 return -EIO;
5fb379ee
SP
2884}
2885
71d8d1b5
AK
2886static int be_setup_wol(struct be_adapter *adapter, bool enable)
2887{
2888 struct be_dma_mem cmd;
2889 int status = 0;
2890 u8 mac[ETH_ALEN];
2891
2892 memset(mac, 0, ETH_ALEN);
2893
2894 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2895 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2896 GFP_KERNEL);
71d8d1b5
AK
2897 if (cmd.va == NULL)
2898 return -1;
71d8d1b5
AK
2899
2900 if (enable) {
2901 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2902 PCICFG_PM_CONTROL_OFFSET,
2903 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2904 if (status) {
2905 dev_err(&adapter->pdev->dev,
2381a55c 2906 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2907 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2908 cmd.dma);
71d8d1b5
AK
2909 return status;
2910 }
2911 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2912 adapter->netdev->dev_addr,
2913 &cmd);
71d8d1b5
AK
2914 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2915 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2916 } else {
2917 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2918 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2919 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2920 }
2921
2b7bcebf 2922 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2923 return status;
2924}
2925
6d87f5c3
AK
2926/*
2927 * Generate a seed MAC address from the PF MAC Address using jhash.
2928 * MAC Address for VFs are assigned incrementally starting from the seed.
2929 * These addresses are programmed in the ASIC by the PF and the VF driver
2930 * queries for the MAC address during its probe.
2931 */
4c876616 2932static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2933{
f9449ab7 2934 u32 vf;
3abcdeda 2935 int status = 0;
6d87f5c3 2936 u8 mac[ETH_ALEN];
11ac75ed 2937 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2938
2939 be_vf_eth_addr_generate(adapter, mac);
2940
11ac75ed 2941 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2942 if (BEx_chip(adapter))
590c391d 2943 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2944 vf_cfg->if_handle,
2945 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2946 else
2947 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2948 vf + 1);
590c391d 2949
6d87f5c3
AK
2950 if (status)
2951 dev_err(&adapter->pdev->dev,
748b539a
SP
2952 "Mac address assignment failed for VF %d\n",
2953 vf);
6d87f5c3 2954 else
11ac75ed 2955 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2956
2957 mac[5] += 1;
2958 }
2959 return status;
2960}
2961
4c876616
SP
2962static int be_vfs_mac_query(struct be_adapter *adapter)
2963{
2964 int status, vf;
2965 u8 mac[ETH_ALEN];
2966 struct be_vf_cfg *vf_cfg;
4c876616
SP
2967
2968 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
2969 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2970 mac, vf_cfg->if_handle,
2971 false, vf+1);
4c876616
SP
2972 if (status)
2973 return status;
2974 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2975 }
2976 return 0;
2977}
2978
f9449ab7 2979static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2980{
11ac75ed 2981 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2982 u32 vf;
2983
257a3feb 2984 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2985 dev_warn(&adapter->pdev->dev,
2986 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2987 goto done;
2988 }
2989
b4c1df93
SP
2990 pci_disable_sriov(adapter->pdev);
2991
11ac75ed 2992 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2993 if (BEx_chip(adapter))
11ac75ed
SP
2994 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2995 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2996 else
2997 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2998 vf + 1);
f9449ab7 2999
11ac75ed
SP
3000 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3001 }
39f1d94d
SP
3002done:
3003 kfree(adapter->vf_cfg);
3004 adapter->num_vfs = 0;
6d87f5c3
AK
3005}
3006
7707133c
SP
3007static void be_clear_queues(struct be_adapter *adapter)
3008{
3009 be_mcc_queues_destroy(adapter);
3010 be_rx_cqs_destroy(adapter);
3011 be_tx_queues_destroy(adapter);
3012 be_evt_queues_destroy(adapter);
3013}
3014
68d7bdcb 3015static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3016{
191eb756
SP
3017 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3018 cancel_delayed_work_sync(&adapter->work);
3019 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3020 }
68d7bdcb
SP
3021}
3022
b05004ad 3023static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3024{
3025 int i;
3026
b05004ad
SK
3027 if (adapter->pmac_id) {
3028 for (i = 0; i < (adapter->uc_macs + 1); i++)
3029 be_cmd_pmac_del(adapter, adapter->if_handle,
3030 adapter->pmac_id[i], 0);
3031 adapter->uc_macs = 0;
3032
3033 kfree(adapter->pmac_id);
3034 adapter->pmac_id = NULL;
3035 }
3036}
3037
c5abe7c0 3038#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3039static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3040{
3041 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3042 be_cmd_manage_iface(adapter, adapter->if_handle,
3043 OP_CONVERT_TUNNEL_TO_NORMAL);
3044
3045 if (adapter->vxlan_port)
3046 be_cmd_set_vxlan_port(adapter, 0);
3047
3048 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3049 adapter->vxlan_port = 0;
3050}
c5abe7c0 3051#endif
c9c47142 3052
b05004ad
SK
3053static int be_clear(struct be_adapter *adapter)
3054{
68d7bdcb 3055 be_cancel_worker(adapter);
191eb756 3056
11ac75ed 3057 if (sriov_enabled(adapter))
f9449ab7
SP
3058 be_vf_clear(adapter);
3059
c5abe7c0 3060#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3061 be_disable_vxlan_offloads(adapter);
c5abe7c0 3062#endif
2d17f403 3063 /* delete the primary mac along with the uc-mac list */
b05004ad 3064 be_mac_clear(adapter);
fbc13f01 3065
f9449ab7 3066 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3067
7707133c 3068 be_clear_queues(adapter);
a54769f5 3069
10ef9ab4 3070 be_msix_disable(adapter);
e1ad8e33 3071 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3072 return 0;
3073}
3074
4c876616 3075static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3076{
92bf14ab 3077 struct be_resources res = {0};
4c876616
SP
3078 struct be_vf_cfg *vf_cfg;
3079 u32 cap_flags, en_flags, vf;
922bbe88 3080 int status = 0;
abb93951 3081
4c876616
SP
3082 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3083 BE_IF_FLAGS_MULTICAST;
abb93951 3084
4c876616 3085 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3086 if (!BE3_chip(adapter)) {
3087 status = be_cmd_get_profile_config(adapter, &res,
3088 vf + 1);
3089 if (!status)
3090 cap_flags = res.if_cap_flags;
3091 }
4c876616
SP
3092
3093 /* If a FW profile exists, then cap_flags are updated */
3094 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3095 BE_IF_FLAGS_BROADCAST |
3096 BE_IF_FLAGS_MULTICAST);
3097 status =
3098 be_cmd_if_create(adapter, cap_flags, en_flags,
3099 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3100 if (status)
3101 goto err;
3102 }
3103err:
3104 return status;
abb93951
PR
3105}
3106
39f1d94d 3107static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3108{
11ac75ed 3109 struct be_vf_cfg *vf_cfg;
30128031
SP
3110 int vf;
3111
39f1d94d
SP
3112 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3113 GFP_KERNEL);
3114 if (!adapter->vf_cfg)
3115 return -ENOMEM;
3116
11ac75ed
SP
3117 for_all_vfs(adapter, vf_cfg, vf) {
3118 vf_cfg->if_handle = -1;
3119 vf_cfg->pmac_id = -1;
30128031 3120 }
39f1d94d 3121 return 0;
30128031
SP
3122}
3123
f9449ab7
SP
3124static int be_vf_setup(struct be_adapter *adapter)
3125{
c502224e 3126 struct device *dev = &adapter->pdev->dev;
11ac75ed 3127 struct be_vf_cfg *vf_cfg;
4c876616 3128 int status, old_vfs, vf;
04a06028 3129 u32 privileges;
c502224e 3130 u16 lnk_speed;
39f1d94d 3131
257a3feb 3132 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3133 if (old_vfs) {
3134 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3135 if (old_vfs != num_vfs)
3136 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3137 adapter->num_vfs = old_vfs;
39f1d94d 3138 } else {
92bf14ab 3139 if (num_vfs > be_max_vfs(adapter))
4c876616 3140 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3141 be_max_vfs(adapter), num_vfs);
3142 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3143 if (!adapter->num_vfs)
4c876616 3144 return 0;
39f1d94d
SP
3145 }
3146
3147 status = be_vf_setup_init(adapter);
3148 if (status)
3149 goto err;
30128031 3150
4c876616
SP
3151 if (old_vfs) {
3152 for_all_vfs(adapter, vf_cfg, vf) {
3153 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3154 if (status)
3155 goto err;
3156 }
3157 } else {
3158 status = be_vfs_if_create(adapter);
f9449ab7
SP
3159 if (status)
3160 goto err;
f9449ab7
SP
3161 }
3162
4c876616
SP
3163 if (old_vfs) {
3164 status = be_vfs_mac_query(adapter);
3165 if (status)
3166 goto err;
3167 } else {
39f1d94d
SP
3168 status = be_vf_eth_addr_config(adapter);
3169 if (status)
3170 goto err;
3171 }
f9449ab7 3172
11ac75ed 3173 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3174 /* Allow VFs to programs MAC/VLAN filters */
3175 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3176 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3177 status = be_cmd_set_fn_privileges(adapter,
3178 privileges |
3179 BE_PRIV_FILTMGMT,
3180 vf + 1);
3181 if (!status)
3182 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3183 vf);
3184 }
3185
4c876616
SP
3186 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3187 * Allow full available bandwidth
3188 */
3189 if (BE3_chip(adapter) && !old_vfs)
a401801c 3190 be_cmd_config_qos(adapter, 1000, vf + 1);
4c876616
SP
3191
3192 status = be_cmd_link_status_query(adapter, &lnk_speed,
3193 NULL, vf + 1);
3194 if (!status)
3195 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b 3196
bdce2ad7 3197 if (!old_vfs) {
0599863d 3198 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3199 be_cmd_set_logical_link_config(adapter,
3200 IFLA_VF_LINK_STATE_AUTO,
3201 vf+1);
3202 }
f9449ab7 3203 }
b4c1df93
SP
3204
3205 if (!old_vfs) {
3206 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3207 if (status) {
3208 dev_err(dev, "SRIOV enable failed\n");
3209 adapter->num_vfs = 0;
3210 goto err;
3211 }
3212 }
f9449ab7
SP
3213 return 0;
3214err:
4c876616
SP
3215 dev_err(dev, "VF setup failed\n");
3216 be_vf_clear(adapter);
f9449ab7
SP
3217 return status;
3218}
3219
f93f160b
VV
3220/* Converting function_mode bits on BE3 to SH mc_type enums */
3221
3222static u8 be_convert_mc_type(u32 function_mode)
3223{
3224 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3225 return vNIC1;
3226 else if (function_mode & FLEX10_MODE)
3227 return FLEX10;
3228 else if (function_mode & VNIC_MODE)
3229 return vNIC2;
3230 else if (function_mode & UMC_ENABLED)
3231 return UMC;
3232 else
3233 return MC_NONE;
3234}
3235
92bf14ab
SP
3236/* On BE2/BE3 FW does not suggest the supported limits */
3237static void BEx_get_resources(struct be_adapter *adapter,
3238 struct be_resources *res)
3239{
3240 struct pci_dev *pdev = adapter->pdev;
3241 bool use_sriov = false;
ecf1f6e1
SR
3242 int max_vfs = 0;
3243
3244 if (be_physfn(adapter) && BE3_chip(adapter)) {
3245 be_cmd_get_profile_config(adapter, res, 0);
3246 /* Some old versions of BE3 FW don't report max_vfs value */
3247 if (res->max_vfs == 0) {
3248 max_vfs = pci_sriov_get_totalvfs(pdev);
3249 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3250 }
3251 use_sriov = res->max_vfs && sriov_want(adapter);
92bf14ab
SP
3252 }
3253
3254 if (be_physfn(adapter))
3255 res->max_uc_mac = BE_UC_PMAC_COUNT;
3256 else
3257 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3258
f93f160b
VV
3259 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3260
3261 if (be_is_mc(adapter)) {
3262 /* Assuming that there are 4 channels per port,
3263 * when multi-channel is enabled
3264 */
3265 if (be_is_qnq_mode(adapter))
3266 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3267 else
3268 /* In a non-qnq multichannel mode, the pvid
3269 * takes up one vlan entry
3270 */
3271 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3272 } else {
92bf14ab 3273 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3274 }
3275
92bf14ab
SP
3276 res->max_mcast_mac = BE_MAX_MC;
3277
a5243dab
VV
3278 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3279 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3280 * *only* if it is RSS-capable.
3281 */
3282 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3283 !be_physfn(adapter) || (be_is_mc(adapter) &&
3284 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3285 res->max_tx_qs = 1;
3286 else
3287 res->max_tx_qs = BE3_MAX_TX_QS;
3288
3289 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3290 !use_sriov && be_physfn(adapter))
3291 res->max_rss_qs = (adapter->be3_native) ?
3292 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3293 res->max_rx_qs = res->max_rss_qs + 1;
3294
e3dc867c 3295 if (be_physfn(adapter))
ecf1f6e1 3296 res->max_evt_qs = (res->max_vfs > 0) ?
e3dc867c
SR
3297 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3298 else
3299 res->max_evt_qs = 1;
92bf14ab
SP
3300
3301 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3302 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3303 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3304}
3305
30128031
SP
3306static void be_setup_init(struct be_adapter *adapter)
3307{
3308 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3309 adapter->phy.link_speed = -1;
30128031
SP
3310 adapter->if_handle = -1;
3311 adapter->be3_native = false;
3312 adapter->promiscuous = false;
f25b119c
PR
3313 if (be_physfn(adapter))
3314 adapter->cmd_privileges = MAX_PRIVILEGES;
3315 else
3316 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3317}
3318
92bf14ab 3319static int be_get_resources(struct be_adapter *adapter)
abb93951 3320{
92bf14ab
SP
3321 struct device *dev = &adapter->pdev->dev;
3322 struct be_resources res = {0};
3323 int status;
abb93951 3324
92bf14ab
SP
3325 if (BEx_chip(adapter)) {
3326 BEx_get_resources(adapter, &res);
3327 adapter->res = res;
abb93951
PR
3328 }
3329
92bf14ab
SP
3330 /* For Lancer, SH etc read per-function resource limits from FW.
3331 * GET_FUNC_CONFIG returns per function guaranteed limits.
3332 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3333 */
3334 if (!BEx_chip(adapter)) {
3335 status = be_cmd_get_func_config(adapter, &res);
3336 if (status)
3337 return status;
abb93951 3338
92bf14ab
SP
3339 /* If RoCE may be enabled stash away half the EQs for RoCE */
3340 if (be_roce_supported(adapter))
3341 res.max_evt_qs /= 2;
3342 adapter->res = res;
abb93951 3343
92bf14ab
SP
3344 if (be_physfn(adapter)) {
3345 status = be_cmd_get_profile_config(adapter, &res, 0);
3346 if (status)
3347 return status;
3348 adapter->res.max_vfs = res.max_vfs;
3349 }
abb93951 3350
92bf14ab
SP
3351 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3352 be_max_txqs(adapter), be_max_rxqs(adapter),
3353 be_max_rss(adapter), be_max_eqs(adapter),
3354 be_max_vfs(adapter));
3355 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3356 be_max_uc(adapter), be_max_mc(adapter),
3357 be_max_vlans(adapter));
abb93951 3358 }
4c876616 3359
92bf14ab 3360 return 0;
abb93951
PR
3361}
3362
39f1d94d
SP
3363/* Routine to query per function resource limits */
3364static int be_get_config(struct be_adapter *adapter)
3365{
542963b7 3366 u16 profile_id;
4c876616 3367 int status;
39f1d94d 3368
abb93951
PR
3369 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3370 &adapter->function_mode,
0ad3157e
VV
3371 &adapter->function_caps,
3372 &adapter->asic_rev);
abb93951 3373 if (status)
92bf14ab 3374 return status;
abb93951 3375
542963b7
VV
3376 if (be_physfn(adapter)) {
3377 status = be_cmd_get_active_profile(adapter, &profile_id);
3378 if (!status)
3379 dev_info(&adapter->pdev->dev,
3380 "Using profile 0x%x\n", profile_id);
3381 }
3382
92bf14ab
SP
3383 status = be_get_resources(adapter);
3384 if (status)
3385 return status;
abb93951 3386
46ee9c14
RN
3387 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3388 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3389 if (!adapter->pmac_id)
3390 return -ENOMEM;
abb93951 3391
92bf14ab
SP
3392 /* Sanitize cfg_num_qs based on HW and platform limits */
3393 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3394
3395 return 0;
39f1d94d
SP
3396}
3397
95046b92
SP
3398static int be_mac_setup(struct be_adapter *adapter)
3399{
3400 u8 mac[ETH_ALEN];
3401 int status;
3402
3403 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3404 status = be_cmd_get_perm_mac(adapter, mac);
3405 if (status)
3406 return status;
3407
3408 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3409 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3410 } else {
3411 /* Maybe the HW was reset; dev_addr must be re-programmed */
3412 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3413 }
3414
2c7a9dc1
AK
3415 /* For BE3-R VFs, the PF programs the initial MAC address */
3416 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3417 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3418 &adapter->pmac_id[0], 0);
95046b92
SP
3419 return 0;
3420}
3421
68d7bdcb
SP
3422static void be_schedule_worker(struct be_adapter *adapter)
3423{
3424 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3425 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3426}
3427
7707133c 3428static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3429{
68d7bdcb 3430 struct net_device *netdev = adapter->netdev;
10ef9ab4 3431 int status;
ba343c77 3432
7707133c 3433 status = be_evt_queues_create(adapter);
abb93951
PR
3434 if (status)
3435 goto err;
73d540f2 3436
7707133c 3437 status = be_tx_qs_create(adapter);
c2bba3df
SK
3438 if (status)
3439 goto err;
10ef9ab4 3440
7707133c 3441 status = be_rx_cqs_create(adapter);
10ef9ab4 3442 if (status)
a54769f5 3443 goto err;
6b7c5b94 3444
7707133c 3445 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3446 if (status)
3447 goto err;
3448
68d7bdcb
SP
3449 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3450 if (status)
3451 goto err;
3452
3453 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3454 if (status)
3455 goto err;
3456
7707133c
SP
3457 return 0;
3458err:
3459 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3460 return status;
3461}
3462
68d7bdcb
SP
3463int be_update_queues(struct be_adapter *adapter)
3464{
3465 struct net_device *netdev = adapter->netdev;
3466 int status;
3467
3468 if (netif_running(netdev))
3469 be_close(netdev);
3470
3471 be_cancel_worker(adapter);
3472
3473 /* If any vectors have been shared with RoCE we cannot re-program
3474 * the MSIx table.
3475 */
3476 if (!adapter->num_msix_roce_vec)
3477 be_msix_disable(adapter);
3478
3479 be_clear_queues(adapter);
3480
3481 if (!msix_enabled(adapter)) {
3482 status = be_msix_enable(adapter);
3483 if (status)
3484 return status;
3485 }
3486
3487 status = be_setup_queues(adapter);
3488 if (status)
3489 return status;
3490
3491 be_schedule_worker(adapter);
3492
3493 if (netif_running(netdev))
3494 status = be_open(netdev);
3495
3496 return status;
3497}
3498
7707133c
SP
3499static int be_setup(struct be_adapter *adapter)
3500{
3501 struct device *dev = &adapter->pdev->dev;
3502 u32 tx_fc, rx_fc, en_flags;
3503 int status;
3504
3505 be_setup_init(adapter);
3506
3507 if (!lancer_chip(adapter))
3508 be_cmd_req_native_mode(adapter);
3509
3510 status = be_get_config(adapter);
10ef9ab4 3511 if (status)
a54769f5 3512 goto err;
6b7c5b94 3513
7707133c 3514 status = be_msix_enable(adapter);
10ef9ab4 3515 if (status)
a54769f5 3516 goto err;
6b7c5b94 3517
f9449ab7 3518 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3519 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3520 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3521 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3522 en_flags = en_flags & be_if_cap_flags(adapter);
3523 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3524 &adapter->if_handle, 0);
7707133c 3525 if (status)
a54769f5 3526 goto err;
6b7c5b94 3527
68d7bdcb
SP
3528 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3529 rtnl_lock();
7707133c 3530 status = be_setup_queues(adapter);
68d7bdcb 3531 rtnl_unlock();
95046b92 3532 if (status)
1578e777
PR
3533 goto err;
3534
7707133c 3535 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3536
3537 status = be_mac_setup(adapter);
10ef9ab4
SP
3538 if (status)
3539 goto err;
3540
eeb65ced 3541 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3542
e9e2a904
SK
3543 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3544 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3545 adapter->fw_ver);
3546 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3547 }
3548
1d1e9a46 3549 if (adapter->vlans_added)
10329df8 3550 be_vid_config(adapter);
7ab8b0b4 3551
a54769f5 3552 be_set_rx_mode(adapter->netdev);
5fb379ee 3553
76a9e08e
SR
3554 be_cmd_get_acpi_wol_cap(adapter);
3555
ddc3f5cb 3556 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3557
ddc3f5cb
AK
3558 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3559 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3560 adapter->rx_fc);
2dc1deb6 3561
bdce2ad7
SR
3562 if (be_physfn(adapter))
3563 be_cmd_set_logical_link_config(adapter,
3564 IFLA_VF_LINK_STATE_AUTO, 0);
3565
b905b5d4 3566 if (sriov_want(adapter)) {
92bf14ab 3567 if (be_max_vfs(adapter))
39f1d94d
SP
3568 be_vf_setup(adapter);
3569 else
3570 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3571 }
3572
f25b119c
PR
3573 status = be_cmd_get_phy_info(adapter);
3574 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3575 adapter->phy.fc_autoneg = 1;
3576
68d7bdcb 3577 be_schedule_worker(adapter);
e1ad8e33 3578 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3579 return 0;
a54769f5
SP
3580err:
3581 be_clear(adapter);
3582 return status;
3583}
6b7c5b94 3584
66268739
IV
3585#ifdef CONFIG_NET_POLL_CONTROLLER
3586static void be_netpoll(struct net_device *netdev)
3587{
3588 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3589 struct be_eq_obj *eqo;
66268739
IV
3590 int i;
3591
e49cc34f
SP
3592 for_all_evt_queues(adapter, eqo, i) {
3593 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3594 napi_schedule(&eqo->napi);
3595 }
10ef9ab4
SP
3596
3597 return;
66268739
IV
3598}
3599#endif
3600
84517482 3601#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3602static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3603
fa9a6fed 3604static bool be_flash_redboot(struct be_adapter *adapter,
748b539a
SP
3605 const u8 *p, u32 img_start, int image_size,
3606 int hdr_size)
fa9a6fed
SB
3607{
3608 u32 crc_offset;
3609 u8 flashed_crc[4];
3610 int status;
3f0d4560
AK
3611
3612 crc_offset = hdr_size + img_start + image_size - 4;
3613
fa9a6fed 3614 p += crc_offset;
3f0d4560 3615
748b539a 3616 status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
fa9a6fed
SB
3617 if (status) {
3618 dev_err(&adapter->pdev->dev,
748b539a 3619 "could not get crc from flash, not flashing redboot\n");
fa9a6fed
SB
3620 return false;
3621 }
3622
3623 /*update redboot only if crc does not match*/
3624 if (!memcmp(flashed_crc, p, 4))
3625 return false;
3626 else
3627 return true;
fa9a6fed
SB
3628}
3629
306f1348
SP
3630static bool phy_flashing_required(struct be_adapter *adapter)
3631{
42f11cf2
AK
3632 return (adapter->phy.phy_type == TN_8022 &&
3633 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3634}
3635
c165541e
PR
3636static bool is_comp_in_ufi(struct be_adapter *adapter,
3637 struct flash_section_info *fsec, int type)
3638{
3639 int i = 0, img_type = 0;
3640 struct flash_section_info_g2 *fsec_g2 = NULL;
3641
ca34fe38 3642 if (BE2_chip(adapter))
c165541e
PR
3643 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3644
3645 for (i = 0; i < MAX_FLASH_COMP; i++) {
3646 if (fsec_g2)
3647 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3648 else
3649 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3650
3651 if (img_type == type)
3652 return true;
3653 }
3654 return false;
3655
3656}
3657
4188e7df 3658static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3659 int header_size,
3660 const struct firmware *fw)
c165541e
PR
3661{
3662 struct flash_section_info *fsec = NULL;
3663 const u8 *p = fw->data;
3664
3665 p += header_size;
3666 while (p < (fw->data + fw->size)) {
3667 fsec = (struct flash_section_info *)p;
3668 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3669 return fsec;
3670 p += 32;
3671 }
3672 return NULL;
3673}
3674
773a2d7c 3675static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3676 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c
PR
3677{
3678 u32 total_bytes = 0, flash_op, num_bytes = 0;
3679 int status = 0;
3680 struct be_cmd_write_flashrom *req = flash_cmd->va;
3681
3682 total_bytes = img_size;
3683 while (total_bytes) {
3684 num_bytes = min_t(u32, 32*1024, total_bytes);
3685
3686 total_bytes -= num_bytes;
3687
3688 if (!total_bytes) {
3689 if (optype == OPTYPE_PHY_FW)
3690 flash_op = FLASHROM_OPER_PHY_FLASH;
3691 else
3692 flash_op = FLASHROM_OPER_FLASH;
3693 } else {
3694 if (optype == OPTYPE_PHY_FW)
3695 flash_op = FLASHROM_OPER_PHY_SAVE;
3696 else
3697 flash_op = FLASHROM_OPER_SAVE;
3698 }
3699
be716446 3700 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3701 img += num_bytes;
3702 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3703 flash_op, num_bytes);
773a2d7c
PR
3704 if (status) {
3705 if (status == ILLEGAL_IOCTL_REQ &&
3706 optype == OPTYPE_PHY_FW)
3707 break;
3708 dev_err(&adapter->pdev->dev,
3709 "cmd to write to flash rom failed.\n");
3710 return status;
3711 }
3712 }
3713 return 0;
3714}
3715
0ad3157e 3716/* For BE2, BE3 and BE3-R */
ca34fe38 3717static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3718 const struct firmware *fw,
3719 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3720{
3f0d4560 3721 int status = 0, i, filehdr_size = 0;
c165541e 3722 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3723 const u8 *p = fw->data;
215faf9c 3724 const struct flash_comp *pflashcomp;
773a2d7c 3725 int num_comp, redboot;
c165541e
PR
3726 struct flash_section_info *fsec = NULL;
3727
3728 struct flash_comp gen3_flash_types[] = {
3729 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3730 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3731 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3732 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3733 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3734 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3735 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3736 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3737 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3738 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3739 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3740 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3741 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3742 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3743 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3744 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3745 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3746 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3747 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3748 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3749 };
c165541e
PR
3750
3751 struct flash_comp gen2_flash_types[] = {
3752 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3753 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3754 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3755 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3756 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3757 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3758 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3759 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3760 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3761 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3762 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3763 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3764 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3765 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3766 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3767 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3768 };
3769
ca34fe38 3770 if (BE3_chip(adapter)) {
3f0d4560
AK
3771 pflashcomp = gen3_flash_types;
3772 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3773 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3774 } else {
3775 pflashcomp = gen2_flash_types;
3776 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3777 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3778 }
ca34fe38 3779
c165541e
PR
3780 /* Get flash section info*/
3781 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3782 if (!fsec) {
3783 dev_err(&adapter->pdev->dev,
3784 "Invalid Cookie. UFI corrupted ?\n");
3785 return -1;
3786 }
9fe96934 3787 for (i = 0; i < num_comp; i++) {
c165541e 3788 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3789 continue;
c165541e
PR
3790
3791 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3792 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3793 continue;
3794
773a2d7c
PR
3795 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3796 !phy_flashing_required(adapter))
306f1348 3797 continue;
c165541e 3798
773a2d7c
PR
3799 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3800 redboot = be_flash_redboot(adapter, fw->data,
748b539a
SP
3801 pflashcomp[i].offset,
3802 pflashcomp[i].size,
3803 filehdr_size +
3804 img_hdrs_size);
773a2d7c
PR
3805 if (!redboot)
3806 continue;
3807 }
c165541e 3808
3f0d4560 3809 p = fw->data;
c165541e 3810 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3811 if (p + pflashcomp[i].size > fw->data + fw->size)
3812 return -1;
773a2d7c
PR
3813
3814 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3815 pflashcomp[i].size);
773a2d7c
PR
3816 if (status) {
3817 dev_err(&adapter->pdev->dev,
3818 "Flashing section type %d failed.\n",
3819 pflashcomp[i].img_type);
3820 return status;
84517482 3821 }
84517482 3822 }
84517482
AK
3823 return 0;
3824}
3825
773a2d7c 3826static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
3827 const struct firmware *fw,
3828 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3829{
773a2d7c
PR
3830 int status = 0, i, filehdr_size = 0;
3831 int img_offset, img_size, img_optype, redboot;
3832 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3833 const u8 *p = fw->data;
3834 struct flash_section_info *fsec = NULL;
3835
3836 filehdr_size = sizeof(struct flash_file_hdr_g3);
3837 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3838 if (!fsec) {
3839 dev_err(&adapter->pdev->dev,
3840 "Invalid Cookie. UFI corrupted ?\n");
3841 return -1;
3842 }
3843
3844 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3845 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3846 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3847
3848 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3849 case IMAGE_FIRMWARE_iSCSI:
3850 img_optype = OPTYPE_ISCSI_ACTIVE;
3851 break;
3852 case IMAGE_BOOT_CODE:
3853 img_optype = OPTYPE_REDBOOT;
3854 break;
3855 case IMAGE_OPTION_ROM_ISCSI:
3856 img_optype = OPTYPE_BIOS;
3857 break;
3858 case IMAGE_OPTION_ROM_PXE:
3859 img_optype = OPTYPE_PXE_BIOS;
3860 break;
3861 case IMAGE_OPTION_ROM_FCoE:
3862 img_optype = OPTYPE_FCOE_BIOS;
3863 break;
3864 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3865 img_optype = OPTYPE_ISCSI_BACKUP;
3866 break;
3867 case IMAGE_NCSI:
3868 img_optype = OPTYPE_NCSI_FW;
3869 break;
3870 default:
3871 continue;
3872 }
3873
3874 if (img_optype == OPTYPE_REDBOOT) {
3875 redboot = be_flash_redboot(adapter, fw->data,
748b539a
SP
3876 img_offset, img_size,
3877 filehdr_size +
3878 img_hdrs_size);
773a2d7c
PR
3879 if (!redboot)
3880 continue;
3881 }
3882
3883 p = fw->data;
3884 p += filehdr_size + img_offset + img_hdrs_size;
3885 if (p + img_size > fw->data + fw->size)
3886 return -1;
3887
3888 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3889 if (status) {
3890 dev_err(&adapter->pdev->dev,
3891 "Flashing section type %d failed.\n",
3892 fsec->fsec_entry[i].type);
3893 return status;
3894 }
3895 }
3896 return 0;
3f0d4560
AK
3897}
3898
485bf569 3899static int lancer_fw_download(struct be_adapter *adapter,
748b539a 3900 const struct firmware *fw)
84517482 3901{
485bf569
SN
3902#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3903#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3904 struct be_dma_mem flash_cmd;
485bf569
SN
3905 const u8 *data_ptr = NULL;
3906 u8 *dest_image_ptr = NULL;
3907 size_t image_size = 0;
3908 u32 chunk_size = 0;
3909 u32 data_written = 0;
3910 u32 offset = 0;
3911 int status = 0;
3912 u8 add_status = 0;
f67ef7ba 3913 u8 change_status;
84517482 3914
485bf569 3915 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3916 dev_err(&adapter->pdev->dev,
485bf569
SN
3917 "FW Image not properly aligned. "
3918 "Length must be 4 byte aligned.\n");
3919 status = -EINVAL;
3920 goto lancer_fw_exit;
d9efd2af
SB
3921 }
3922
485bf569
SN
3923 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3924 + LANCER_FW_DOWNLOAD_CHUNK;
3925 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3926 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3927 if (!flash_cmd.va) {
3928 status = -ENOMEM;
485bf569
SN
3929 goto lancer_fw_exit;
3930 }
84517482 3931
485bf569
SN
3932 dest_image_ptr = flash_cmd.va +
3933 sizeof(struct lancer_cmd_req_write_object);
3934 image_size = fw->size;
3935 data_ptr = fw->data;
3936
3937 while (image_size) {
3938 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3939
3940 /* Copy the image chunk content. */
3941 memcpy(dest_image_ptr, data_ptr, chunk_size);
3942
3943 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3944 chunk_size, offset,
3945 LANCER_FW_DOWNLOAD_LOCATION,
3946 &data_written, &change_status,
3947 &add_status);
485bf569
SN
3948 if (status)
3949 break;
3950
3951 offset += data_written;
3952 data_ptr += data_written;
3953 image_size -= data_written;
3954 }
3955
3956 if (!status) {
3957 /* Commit the FW written */
3958 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3959 0, offset,
3960 LANCER_FW_DOWNLOAD_LOCATION,
3961 &data_written, &change_status,
3962 &add_status);
485bf569
SN
3963 }
3964
3965 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
748b539a 3966 flash_cmd.dma);
485bf569
SN
3967 if (status) {
3968 dev_err(&adapter->pdev->dev,
3969 "Firmware load error. "
3970 "Status code: 0x%x Additional Status: 0x%x\n",
3971 status, add_status);
3972 goto lancer_fw_exit;
3973 }
3974
f67ef7ba 3975 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3976 dev_info(&adapter->pdev->dev,
3977 "Resetting adapter to activate new FW\n");
5c510811
SK
3978 status = lancer_physdev_ctrl(adapter,
3979 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3980 if (status) {
3981 dev_err(&adapter->pdev->dev,
3982 "Adapter busy for FW reset.\n"
3983 "New FW will not be active.\n");
3984 goto lancer_fw_exit;
3985 }
3986 } else if (change_status != LANCER_NO_RESET_NEEDED) {
748b539a
SP
3987 dev_err(&adapter->pdev->dev,
3988 "System reboot required for new FW to be active\n");
f67ef7ba
PR
3989 }
3990
485bf569
SN
3991 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3992lancer_fw_exit:
3993 return status;
3994}
3995
ca34fe38
SP
3996#define UFI_TYPE2 2
3997#define UFI_TYPE3 3
0ad3157e 3998#define UFI_TYPE3R 10
ca34fe38
SP
3999#define UFI_TYPE4 4
4000static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4001 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
4002{
4003 if (fhdr == NULL)
4004 goto be_get_ufi_exit;
4005
ca34fe38
SP
4006 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4007 return UFI_TYPE4;
0ad3157e
VV
4008 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4009 if (fhdr->asic_type_rev == 0x10)
4010 return UFI_TYPE3R;
4011 else
4012 return UFI_TYPE3;
4013 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4014 return UFI_TYPE2;
773a2d7c
PR
4015
4016be_get_ufi_exit:
4017 dev_err(&adapter->pdev->dev,
4018 "UFI and Interface are not compatible for flashing\n");
4019 return -1;
4020}
4021
485bf569
SN
4022static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4023{
485bf569
SN
4024 struct flash_file_hdr_g3 *fhdr3;
4025 struct image_hdr *img_hdr_ptr = NULL;
4026 struct be_dma_mem flash_cmd;
4027 const u8 *p;
773a2d7c 4028 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4029
be716446 4030 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4031 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4032 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4033 if (!flash_cmd.va) {
4034 status = -ENOMEM;
485bf569 4035 goto be_fw_exit;
84517482
AK
4036 }
4037
773a2d7c 4038 p = fw->data;
0ad3157e 4039 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4040
0ad3157e 4041 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4042
773a2d7c
PR
4043 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4044 for (i = 0; i < num_imgs; i++) {
4045 img_hdr_ptr = (struct image_hdr *)(fw->data +
4046 (sizeof(struct flash_file_hdr_g3) +
4047 i * sizeof(struct image_hdr)));
4048 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4049 switch (ufi_type) {
4050 case UFI_TYPE4:
773a2d7c 4051 status = be_flash_skyhawk(adapter, fw,
748b539a 4052 &flash_cmd, num_imgs);
0ad3157e
VV
4053 break;
4054 case UFI_TYPE3R:
ca34fe38
SP
4055 status = be_flash_BEx(adapter, fw, &flash_cmd,
4056 num_imgs);
0ad3157e
VV
4057 break;
4058 case UFI_TYPE3:
4059 /* Do not flash this ufi on BE3-R cards */
4060 if (adapter->asic_rev < 0x10)
4061 status = be_flash_BEx(adapter, fw,
4062 &flash_cmd,
4063 num_imgs);
4064 else {
4065 status = -1;
4066 dev_err(&adapter->pdev->dev,
4067 "Can't load BE3 UFI on BE3R\n");
4068 }
4069 }
3f0d4560 4070 }
773a2d7c
PR
4071 }
4072
ca34fe38
SP
4073 if (ufi_type == UFI_TYPE2)
4074 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4075 else if (ufi_type == -1)
3f0d4560 4076 status = -1;
84517482 4077
2b7bcebf
IV
4078 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4079 flash_cmd.dma);
84517482
AK
4080 if (status) {
4081 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4082 goto be_fw_exit;
84517482
AK
4083 }
4084
af901ca1 4085 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4086
485bf569
SN
4087be_fw_exit:
4088 return status;
4089}
4090
4091int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4092{
4093 const struct firmware *fw;
4094 int status;
4095
4096 if (!netif_running(adapter->netdev)) {
4097 dev_err(&adapter->pdev->dev,
4098 "Firmware load not allowed (interface is down)\n");
4099 return -1;
4100 }
4101
4102 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4103 if (status)
4104 goto fw_exit;
4105
4106 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4107
4108 if (lancer_chip(adapter))
4109 status = lancer_fw_download(adapter, fw);
4110 else
4111 status = be_fw_download(adapter, fw);
4112
eeb65ced
SK
4113 if (!status)
4114 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4115 adapter->fw_on_flash);
4116
84517482
AK
4117fw_exit:
4118 release_firmware(fw);
4119 return status;
4120}
4121
748b539a 4122static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4123{
4124 struct be_adapter *adapter = netdev_priv(dev);
4125 struct nlattr *attr, *br_spec;
4126 int rem;
4127 int status = 0;
4128 u16 mode = 0;
4129
4130 if (!sriov_enabled(adapter))
4131 return -EOPNOTSUPP;
4132
4133 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4134
4135 nla_for_each_nested(attr, br_spec, rem) {
4136 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4137 continue;
4138
4139 mode = nla_get_u16(attr);
4140 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4141 return -EINVAL;
4142
4143 status = be_cmd_set_hsw_config(adapter, 0, 0,
4144 adapter->if_handle,
4145 mode == BRIDGE_MODE_VEPA ?
4146 PORT_FWD_TYPE_VEPA :
4147 PORT_FWD_TYPE_VEB);
4148 if (status)
4149 goto err;
4150
4151 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4152 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4153
4154 return status;
4155 }
4156err:
4157 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4158 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4159
4160 return status;
4161}
4162
4163static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4164 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4165{
4166 struct be_adapter *adapter = netdev_priv(dev);
4167 int status = 0;
4168 u8 hsw_mode;
4169
4170 if (!sriov_enabled(adapter))
4171 return 0;
4172
4173 /* BE and Lancer chips support VEB mode only */
4174 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4175 hsw_mode = PORT_FWD_TYPE_VEB;
4176 } else {
4177 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4178 adapter->if_handle, &hsw_mode);
4179 if (status)
4180 return 0;
4181 }
4182
4183 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4184 hsw_mode == PORT_FWD_TYPE_VEPA ?
4185 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4186}
4187
c5abe7c0 4188#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4189static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4190 __be16 port)
4191{
4192 struct be_adapter *adapter = netdev_priv(netdev);
4193 struct device *dev = &adapter->pdev->dev;
4194 int status;
4195
4196 if (lancer_chip(adapter) || BEx_chip(adapter))
4197 return;
4198
4199 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4200 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4201 be16_to_cpu(port));
4202 dev_info(dev,
4203 "Only one UDP port supported for VxLAN offloads\n");
4204 return;
4205 }
4206
4207 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4208 OP_CONVERT_NORMAL_TO_TUNNEL);
4209 if (status) {
4210 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4211 goto err;
4212 }
4213
4214 status = be_cmd_set_vxlan_port(adapter, port);
4215 if (status) {
4216 dev_warn(dev, "Failed to add VxLAN port\n");
4217 goto err;
4218 }
4219 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4220 adapter->vxlan_port = port;
4221
4222 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4223 be16_to_cpu(port));
4224 return;
4225err:
4226 be_disable_vxlan_offloads(adapter);
4227 return;
4228}
4229
4230static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4231 __be16 port)
4232{
4233 struct be_adapter *adapter = netdev_priv(netdev);
4234
4235 if (lancer_chip(adapter) || BEx_chip(adapter))
4236 return;
4237
4238 if (adapter->vxlan_port != port)
4239 return;
4240
4241 be_disable_vxlan_offloads(adapter);
4242
4243 dev_info(&adapter->pdev->dev,
4244 "Disabled VxLAN offloads for UDP port %d\n",
4245 be16_to_cpu(port));
4246}
c5abe7c0 4247#endif
c9c47142 4248
e5686ad8 4249static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4250 .ndo_open = be_open,
4251 .ndo_stop = be_close,
4252 .ndo_start_xmit = be_xmit,
a54769f5 4253 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4254 .ndo_set_mac_address = be_mac_addr_set,
4255 .ndo_change_mtu = be_change_mtu,
ab1594e9 4256 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4257 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4258 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4259 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4260 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4261 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4262 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739 4263 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4264 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4265#ifdef CONFIG_NET_POLL_CONTROLLER
4266 .ndo_poll_controller = be_netpoll,
4267#endif
a77dcb8c
AK
4268 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4269 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4270#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4271 .ndo_busy_poll = be_busy_poll,
6384a4d0 4272#endif
c5abe7c0 4273#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4274 .ndo_add_vxlan_port = be_add_vxlan_port,
4275 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4276#endif
6b7c5b94
SP
4277};
4278
4279static void be_netdev_init(struct net_device *netdev)
4280{
4281 struct be_adapter *adapter = netdev_priv(netdev);
4282
c9c47142
SP
4283 if (skyhawk_chip(adapter)) {
4284 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4285 NETIF_F_TSO | NETIF_F_TSO6 |
4286 NETIF_F_GSO_UDP_TUNNEL;
4287 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4288 }
6332c8d3 4289 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4290 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4291 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4292 if (be_multi_rxq(adapter))
4293 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4294
4295 netdev->features |= netdev->hw_features |
f646968f 4296 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4297
eb8a50d9 4298 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4299 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4300
fbc13f01
AK
4301 netdev->priv_flags |= IFF_UNICAST_FLT;
4302
6b7c5b94
SP
4303 netdev->flags |= IFF_MULTICAST;
4304
b7e5887e 4305 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4306
10ef9ab4 4307 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4308
4309 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4310}
4311
4312static void be_unmap_pci_bars(struct be_adapter *adapter)
4313{
c5b3ad4c
SP
4314 if (adapter->csr)
4315 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4316 if (adapter->db)
ce66f781 4317 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4318}
4319
ce66f781
SP
4320static int db_bar(struct be_adapter *adapter)
4321{
4322 if (lancer_chip(adapter) || !be_physfn(adapter))
4323 return 0;
4324 else
4325 return 4;
4326}
4327
4328static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4329{
dbf0f2a7 4330 if (skyhawk_chip(adapter)) {
ce66f781
SP
4331 adapter->roce_db.size = 4096;
4332 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4333 db_bar(adapter));
4334 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4335 db_bar(adapter));
4336 }
045508a8 4337 return 0;
6b7c5b94
SP
4338}
4339
4340static int be_map_pci_bars(struct be_adapter *adapter)
4341{
4342 u8 __iomem *addr;
fe6d2a38 4343
c5b3ad4c
SP
4344 if (BEx_chip(adapter) && be_physfn(adapter)) {
4345 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4346 if (adapter->csr == NULL)
4347 return -ENOMEM;
4348 }
4349
ce66f781 4350 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4351 if (addr == NULL)
4352 goto pci_map_err;
ba343c77 4353 adapter->db = addr;
ce66f781
SP
4354
4355 be_roce_map_pci_bars(adapter);
6b7c5b94 4356 return 0;
ce66f781 4357
6b7c5b94
SP
4358pci_map_err:
4359 be_unmap_pci_bars(adapter);
4360 return -ENOMEM;
4361}
4362
6b7c5b94
SP
4363static void be_ctrl_cleanup(struct be_adapter *adapter)
4364{
8788fdc2 4365 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4366
4367 be_unmap_pci_bars(adapter);
4368
4369 if (mem->va)
2b7bcebf
IV
4370 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4371 mem->dma);
e7b909a6 4372
5b8821b7 4373 mem = &adapter->rx_filter;
e7b909a6 4374 if (mem->va)
2b7bcebf
IV
4375 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4376 mem->dma);
6b7c5b94
SP
4377}
4378
6b7c5b94
SP
4379static int be_ctrl_init(struct be_adapter *adapter)
4380{
8788fdc2
SP
4381 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4382 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4383 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4384 u32 sli_intf;
6b7c5b94 4385 int status;
6b7c5b94 4386
ce66f781
SP
4387 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4388 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4389 SLI_INTF_FAMILY_SHIFT;
4390 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4391
6b7c5b94
SP
4392 status = be_map_pci_bars(adapter);
4393 if (status)
e7b909a6 4394 goto done;
6b7c5b94
SP
4395
4396 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4397 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4398 mbox_mem_alloc->size,
4399 &mbox_mem_alloc->dma,
4400 GFP_KERNEL);
6b7c5b94 4401 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4402 status = -ENOMEM;
4403 goto unmap_pci_bars;
6b7c5b94
SP
4404 }
4405 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4406 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4407 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4408 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4409
5b8821b7 4410 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4411 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4412 rx_filter->size, &rx_filter->dma,
4413 GFP_KERNEL);
5b8821b7 4414 if (rx_filter->va == NULL) {
e7b909a6
SP
4415 status = -ENOMEM;
4416 goto free_mbox;
4417 }
1f9061d2 4418
2984961c 4419 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4420 spin_lock_init(&adapter->mcc_lock);
4421 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4422
5eeff635 4423 init_completion(&adapter->et_cmd_compl);
cf588477 4424 pci_save_state(adapter->pdev);
6b7c5b94 4425 return 0;
e7b909a6
SP
4426
4427free_mbox:
2b7bcebf
IV
4428 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4429 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4430
4431unmap_pci_bars:
4432 be_unmap_pci_bars(adapter);
4433
4434done:
4435 return status;
6b7c5b94
SP
4436}
4437
4438static void be_stats_cleanup(struct be_adapter *adapter)
4439{
3abcdeda 4440 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4441
4442 if (cmd->va)
2b7bcebf
IV
4443 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4444 cmd->va, cmd->dma);
6b7c5b94
SP
4445}
4446
4447static int be_stats_init(struct be_adapter *adapter)
4448{
3abcdeda 4449 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4450
ca34fe38
SP
4451 if (lancer_chip(adapter))
4452 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4453 else if (BE2_chip(adapter))
89a88ab8 4454 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4455 else if (BE3_chip(adapter))
ca34fe38 4456 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4457 else
4458 /* ALL non-BE ASICs */
4459 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4460
ede23fa8
JP
4461 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4462 GFP_KERNEL);
6b7c5b94
SP
4463 if (cmd->va == NULL)
4464 return -1;
4465 return 0;
4466}
4467
3bc6b06c 4468static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4469{
4470 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4471
6b7c5b94
SP
4472 if (!adapter)
4473 return;
4474
045508a8 4475 be_roce_dev_remove(adapter);
8cef7a78 4476 be_intr_set(adapter, false);
045508a8 4477
f67ef7ba
PR
4478 cancel_delayed_work_sync(&adapter->func_recovery_work);
4479
6b7c5b94
SP
4480 unregister_netdev(adapter->netdev);
4481
5fb379ee
SP
4482 be_clear(adapter);
4483
bf99e50d
PR
4484 /* tell fw we're done with firing cmds */
4485 be_cmd_fw_clean(adapter);
4486
6b7c5b94
SP
4487 be_stats_cleanup(adapter);
4488
4489 be_ctrl_cleanup(adapter);
4490
d6b6d987
SP
4491 pci_disable_pcie_error_reporting(pdev);
4492
6b7c5b94
SP
4493 pci_release_regions(pdev);
4494 pci_disable_device(pdev);
4495
4496 free_netdev(adapter->netdev);
4497}
4498
39f1d94d 4499static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4500{
baaa08d1 4501 int status, level;
6b7c5b94 4502
9e1453c5
AK
4503 status = be_cmd_get_cntl_attributes(adapter);
4504 if (status)
4505 return status;
4506
7aeb2156
PR
4507 /* Must be a power of 2 or else MODULO will BUG_ON */
4508 adapter->be_get_temp_freq = 64;
4509
baaa08d1
VV
4510 if (BEx_chip(adapter)) {
4511 level = be_cmd_get_fw_log_level(adapter);
4512 adapter->msg_enable =
4513 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4514 }
941a77d5 4515
92bf14ab 4516 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4517 return 0;
6b7c5b94
SP
4518}
4519
f67ef7ba 4520static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4521{
01e5b2c4 4522 struct device *dev = &adapter->pdev->dev;
d8110f62 4523 int status;
d8110f62 4524
f67ef7ba
PR
4525 status = lancer_test_and_set_rdy_state(adapter);
4526 if (status)
4527 goto err;
d8110f62 4528
f67ef7ba
PR
4529 if (netif_running(adapter->netdev))
4530 be_close(adapter->netdev);
d8110f62 4531
f67ef7ba
PR
4532 be_clear(adapter);
4533
01e5b2c4 4534 be_clear_all_error(adapter);
f67ef7ba
PR
4535
4536 status = be_setup(adapter);
4537 if (status)
4538 goto err;
d8110f62 4539
f67ef7ba
PR
4540 if (netif_running(adapter->netdev)) {
4541 status = be_open(adapter->netdev);
d8110f62
PR
4542 if (status)
4543 goto err;
f67ef7ba 4544 }
d8110f62 4545
4bebb56a 4546 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4547 return 0;
4548err:
01e5b2c4
SK
4549 if (status == -EAGAIN)
4550 dev_err(dev, "Waiting for resource provisioning\n");
4551 else
4bebb56a 4552 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4553
f67ef7ba
PR
4554 return status;
4555}
4556
4557static void be_func_recovery_task(struct work_struct *work)
4558{
4559 struct be_adapter *adapter =
4560 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4561 int status = 0;
d8110f62 4562
f67ef7ba 4563 be_detect_error(adapter);
d8110f62 4564
f67ef7ba 4565 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4566
f67ef7ba
PR
4567 rtnl_lock();
4568 netif_device_detach(adapter->netdev);
4569 rtnl_unlock();
d8110f62 4570
f67ef7ba 4571 status = lancer_recover_func(adapter);
f67ef7ba
PR
4572 if (!status)
4573 netif_device_attach(adapter->netdev);
d8110f62 4574 }
f67ef7ba 4575
01e5b2c4
SK
4576 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4577 * no need to attempt further recovery.
4578 */
4579 if (!status || status == -EAGAIN)
4580 schedule_delayed_work(&adapter->func_recovery_work,
4581 msecs_to_jiffies(1000));
d8110f62
PR
4582}
4583
4584static void be_worker(struct work_struct *work)
4585{
4586 struct be_adapter *adapter =
4587 container_of(work, struct be_adapter, work.work);
4588 struct be_rx_obj *rxo;
4589 int i;
4590
d8110f62
PR
4591 /* when interrupts are not yet enabled, just reap any pending
4592 * mcc completions */
4593 if (!netif_running(adapter->netdev)) {
072a9c48 4594 local_bh_disable();
10ef9ab4 4595 be_process_mcc(adapter);
072a9c48 4596 local_bh_enable();
d8110f62
PR
4597 goto reschedule;
4598 }
4599
4600 if (!adapter->stats_cmd_sent) {
4601 if (lancer_chip(adapter))
4602 lancer_cmd_get_pport_stats(adapter,
4603 &adapter->stats_cmd);
4604 else
4605 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4606 }
4607
d696b5e2
VV
4608 if (be_physfn(adapter) &&
4609 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4610 be_cmd_get_die_temperature(adapter);
4611
d8110f62 4612 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4613 /* Replenish RX-queues starved due to memory
4614 * allocation failures.
4615 */
4616 if (rxo->rx_post_starved)
d8110f62 4617 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4618 }
4619
2632bafd 4620 be_eqd_update(adapter);
10ef9ab4 4621
d8110f62
PR
4622reschedule:
4623 adapter->work_counter++;
4624 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4625}
4626
257a3feb 4627/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4628static bool be_reset_required(struct be_adapter *adapter)
4629{
257a3feb 4630 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4631}
4632
d379142b
SP
4633static char *mc_name(struct be_adapter *adapter)
4634{
f93f160b
VV
4635 char *str = ""; /* default */
4636
4637 switch (adapter->mc_type) {
4638 case UMC:
4639 str = "UMC";
4640 break;
4641 case FLEX10:
4642 str = "FLEX10";
4643 break;
4644 case vNIC1:
4645 str = "vNIC-1";
4646 break;
4647 case nPAR:
4648 str = "nPAR";
4649 break;
4650 case UFP:
4651 str = "UFP";
4652 break;
4653 case vNIC2:
4654 str = "vNIC-2";
4655 break;
4656 default:
4657 str = "";
4658 }
4659
4660 return str;
d379142b
SP
4661}
4662
4663static inline char *func_name(struct be_adapter *adapter)
4664{
4665 return be_physfn(adapter) ? "PF" : "VF";
4666}
4667
1dd06ae8 4668static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4669{
4670 int status = 0;
4671 struct be_adapter *adapter;
4672 struct net_device *netdev;
b4e32a71 4673 char port_name;
6b7c5b94
SP
4674
4675 status = pci_enable_device(pdev);
4676 if (status)
4677 goto do_none;
4678
4679 status = pci_request_regions(pdev, DRV_NAME);
4680 if (status)
4681 goto disable_dev;
4682 pci_set_master(pdev);
4683
7f640062 4684 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4685 if (netdev == NULL) {
4686 status = -ENOMEM;
4687 goto rel_reg;
4688 }
4689 adapter = netdev_priv(netdev);
4690 adapter->pdev = pdev;
4691 pci_set_drvdata(pdev, adapter);
4692 adapter->netdev = netdev;
2243e2e9 4693 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4694
4c15c243 4695 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4696 if (!status) {
4697 netdev->features |= NETIF_F_HIGHDMA;
4698 } else {
4c15c243 4699 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4700 if (status) {
4701 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4702 goto free_netdev;
4703 }
4704 }
4705
ea58c180
AK
4706 if (be_physfn(adapter)) {
4707 status = pci_enable_pcie_error_reporting(pdev);
4708 if (!status)
4709 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4710 }
d6b6d987 4711
6b7c5b94
SP
4712 status = be_ctrl_init(adapter);
4713 if (status)
39f1d94d 4714 goto free_netdev;
6b7c5b94 4715
2243e2e9 4716 /* sync up with fw's ready state */
ba343c77 4717 if (be_physfn(adapter)) {
bf99e50d 4718 status = be_fw_wait_ready(adapter);
ba343c77
SB
4719 if (status)
4720 goto ctrl_clean;
ba343c77 4721 }
6b7c5b94 4722
39f1d94d
SP
4723 if (be_reset_required(adapter)) {
4724 status = be_cmd_reset_function(adapter);
4725 if (status)
4726 goto ctrl_clean;
556ae191 4727
2d177be8
KA
4728 /* Wait for interrupts to quiesce after an FLR */
4729 msleep(100);
4730 }
8cef7a78
SK
4731
4732 /* Allow interrupts for other ULPs running on NIC function */
4733 be_intr_set(adapter, true);
10ef9ab4 4734
2d177be8
KA
4735 /* tell fw we're ready to fire cmds */
4736 status = be_cmd_fw_init(adapter);
4737 if (status)
4738 goto ctrl_clean;
4739
2243e2e9
SP
4740 status = be_stats_init(adapter);
4741 if (status)
4742 goto ctrl_clean;
4743
39f1d94d 4744 status = be_get_initial_config(adapter);
6b7c5b94
SP
4745 if (status)
4746 goto stats_clean;
6b7c5b94
SP
4747
4748 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4749 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4750 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4751
5fb379ee
SP
4752 status = be_setup(adapter);
4753 if (status)
55f5c3c5 4754 goto stats_clean;
2243e2e9 4755
3abcdeda 4756 be_netdev_init(netdev);
6b7c5b94
SP
4757 status = register_netdev(netdev);
4758 if (status != 0)
5fb379ee 4759 goto unsetup;
6b7c5b94 4760
045508a8
PP
4761 be_roce_dev_add(adapter);
4762
f67ef7ba
PR
4763 schedule_delayed_work(&adapter->func_recovery_work,
4764 msecs_to_jiffies(1000));
b4e32a71
PR
4765
4766 be_cmd_query_port_name(adapter, &port_name);
4767
d379142b
SP
4768 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4769 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4770
6b7c5b94
SP
4771 return 0;
4772
5fb379ee
SP
4773unsetup:
4774 be_clear(adapter);
6b7c5b94
SP
4775stats_clean:
4776 be_stats_cleanup(adapter);
4777ctrl_clean:
4778 be_ctrl_cleanup(adapter);
f9449ab7 4779free_netdev:
fe6d2a38 4780 free_netdev(netdev);
6b7c5b94
SP
4781rel_reg:
4782 pci_release_regions(pdev);
4783disable_dev:
4784 pci_disable_device(pdev);
4785do_none:
c4ca2374 4786 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4787 return status;
4788}
4789
4790static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4791{
4792 struct be_adapter *adapter = pci_get_drvdata(pdev);
4793 struct net_device *netdev = adapter->netdev;
4794
76a9e08e 4795 if (adapter->wol_en)
71d8d1b5
AK
4796 be_setup_wol(adapter, true);
4797
d4360d6f 4798 be_intr_set(adapter, false);
f67ef7ba
PR
4799 cancel_delayed_work_sync(&adapter->func_recovery_work);
4800
6b7c5b94
SP
4801 netif_device_detach(netdev);
4802 if (netif_running(netdev)) {
4803 rtnl_lock();
4804 be_close(netdev);
4805 rtnl_unlock();
4806 }
9b0365f1 4807 be_clear(adapter);
6b7c5b94
SP
4808
4809 pci_save_state(pdev);
4810 pci_disable_device(pdev);
4811 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4812 return 0;
4813}
4814
4815static int be_resume(struct pci_dev *pdev)
4816{
4817 int status = 0;
4818 struct be_adapter *adapter = pci_get_drvdata(pdev);
4819 struct net_device *netdev = adapter->netdev;
4820
4821 netif_device_detach(netdev);
4822
4823 status = pci_enable_device(pdev);
4824 if (status)
4825 return status;
4826
1ca01512 4827 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4828 pci_restore_state(pdev);
4829
dd5746bf
SB
4830 status = be_fw_wait_ready(adapter);
4831 if (status)
4832 return status;
4833
d4360d6f 4834 be_intr_set(adapter, true);
2243e2e9
SP
4835 /* tell fw we're ready to fire cmds */
4836 status = be_cmd_fw_init(adapter);
4837 if (status)
4838 return status;
4839
9b0365f1 4840 be_setup(adapter);
6b7c5b94
SP
4841 if (netif_running(netdev)) {
4842 rtnl_lock();
4843 be_open(netdev);
4844 rtnl_unlock();
4845 }
f67ef7ba
PR
4846
4847 schedule_delayed_work(&adapter->func_recovery_work,
4848 msecs_to_jiffies(1000));
6b7c5b94 4849 netif_device_attach(netdev);
71d8d1b5 4850
76a9e08e 4851 if (adapter->wol_en)
71d8d1b5 4852 be_setup_wol(adapter, false);
a4ca055f 4853
6b7c5b94
SP
4854 return 0;
4855}
4856
82456b03
SP
4857/*
4858 * An FLR will stop BE from DMAing any data.
4859 */
4860static void be_shutdown(struct pci_dev *pdev)
4861{
4862 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4863
2d5d4154
AK
4864 if (!adapter)
4865 return;
82456b03 4866
0f4a6828 4867 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4868 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4869
2d5d4154 4870 netif_device_detach(adapter->netdev);
82456b03 4871
57841869
AK
4872 be_cmd_reset_function(adapter);
4873
82456b03 4874 pci_disable_device(pdev);
82456b03
SP
4875}
4876
cf588477 4877static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 4878 pci_channel_state_t state)
cf588477
SP
4879{
4880 struct be_adapter *adapter = pci_get_drvdata(pdev);
4881 struct net_device *netdev = adapter->netdev;
4882
4883 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4884
01e5b2c4
SK
4885 if (!adapter->eeh_error) {
4886 adapter->eeh_error = true;
cf588477 4887
01e5b2c4 4888 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4889
cf588477 4890 rtnl_lock();
01e5b2c4
SK
4891 netif_device_detach(netdev);
4892 if (netif_running(netdev))
4893 be_close(netdev);
cf588477 4894 rtnl_unlock();
01e5b2c4
SK
4895
4896 be_clear(adapter);
cf588477 4897 }
cf588477
SP
4898
4899 if (state == pci_channel_io_perm_failure)
4900 return PCI_ERS_RESULT_DISCONNECT;
4901
4902 pci_disable_device(pdev);
4903
eeb7fc7b
SK
4904 /* The error could cause the FW to trigger a flash debug dump.
4905 * Resetting the card while flash dump is in progress
c8a54163
PR
4906 * can cause it not to recover; wait for it to finish.
4907 * Wait only for first function as it is needed only once per
4908 * adapter.
eeb7fc7b 4909 */
c8a54163
PR
4910 if (pdev->devfn == 0)
4911 ssleep(30);
4912
cf588477
SP
4913 return PCI_ERS_RESULT_NEED_RESET;
4914}
4915
4916static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4917{
4918 struct be_adapter *adapter = pci_get_drvdata(pdev);
4919 int status;
4920
4921 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4922
4923 status = pci_enable_device(pdev);
4924 if (status)
4925 return PCI_ERS_RESULT_DISCONNECT;
4926
4927 pci_set_master(pdev);
1ca01512 4928 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4929 pci_restore_state(pdev);
4930
4931 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4932 dev_info(&adapter->pdev->dev,
4933 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4934 status = be_fw_wait_ready(adapter);
cf588477
SP
4935 if (status)
4936 return PCI_ERS_RESULT_DISCONNECT;
4937
d6b6d987 4938 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4939 be_clear_all_error(adapter);
cf588477
SP
4940 return PCI_ERS_RESULT_RECOVERED;
4941}
4942
4943static void be_eeh_resume(struct pci_dev *pdev)
4944{
4945 int status = 0;
4946 struct be_adapter *adapter = pci_get_drvdata(pdev);
4947 struct net_device *netdev = adapter->netdev;
4948
4949 dev_info(&adapter->pdev->dev, "EEH resume\n");
4950
4951 pci_save_state(pdev);
4952
2d177be8 4953 status = be_cmd_reset_function(adapter);
cf588477
SP
4954 if (status)
4955 goto err;
4956
2d177be8
KA
4957 /* tell fw we're ready to fire cmds */
4958 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4959 if (status)
4960 goto err;
4961
cf588477
SP
4962 status = be_setup(adapter);
4963 if (status)
4964 goto err;
4965
4966 if (netif_running(netdev)) {
4967 status = be_open(netdev);
4968 if (status)
4969 goto err;
4970 }
f67ef7ba
PR
4971
4972 schedule_delayed_work(&adapter->func_recovery_work,
4973 msecs_to_jiffies(1000));
cf588477
SP
4974 netif_device_attach(netdev);
4975 return;
4976err:
4977 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4978}
4979
3646f0e5 4980static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4981 .error_detected = be_eeh_err_detected,
4982 .slot_reset = be_eeh_reset,
4983 .resume = be_eeh_resume,
4984};
4985
6b7c5b94
SP
4986static struct pci_driver be_driver = {
4987 .name = DRV_NAME,
4988 .id_table = be_dev_ids,
4989 .probe = be_probe,
4990 .remove = be_remove,
4991 .suspend = be_suspend,
cf588477 4992 .resume = be_resume,
82456b03 4993 .shutdown = be_shutdown,
cf588477 4994 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4995};
4996
4997static int __init be_init_module(void)
4998{
8e95a202
JP
4999 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5000 rx_frag_size != 2048) {
6b7c5b94
SP
5001 printk(KERN_WARNING DRV_NAME
5002 " : Module param rx_frag_size must be 2048/4096/8192."
5003 " Using 2048\n");
5004 rx_frag_size = 2048;
5005 }
6b7c5b94
SP
5006
5007 return pci_register_driver(&be_driver);
5008}
5009module_init(be_init_module);
5010
5011static void __exit be_exit_module(void)
5012{
5013 pci_unregister_driver(&be_driver);
5014}
5015module_exit(be_exit_module);
This page took 1.035145 seconds and 5 git commands to generate.