net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
6b7c5b94
SP
26
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
6b7c5b94 41static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
42c8b11e 89static const char * const ue_status_hi_desc[] = {
7c185276
AK
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
42c8b11e 113 "NETC",
7c185276
AK
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
6b7c5b94 123
752961a1 124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 128 if (mem->va) {
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
1cfafab9
SP
131 mem->va = NULL;
132 }
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
ede23fa8
JP
144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94 146 if (!mem->va)
10ef9ab4 147 return -ENOMEM;
6b7c5b94
SP
148 return 0;
149}
150
68c45a2d 151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 152{
db3ea781 153 u32 reg, enabled;
5f0b849e 154
db3ea781
SP
155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
5f0b849e 159 if (!enabled && enable)
6b7c5b94 160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 161 else if (enabled && !enable)
6b7c5b94 162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else
6b7c5b94 164 return;
5f0b849e 165
db3ea781
SP
166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
168}
169
68c45a2d
SK
170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
8788fdc2 186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
194}
195
94d73aaa
VV
196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
6b7c5b94
SP
198{
199 u32 val = 0;
94d73aaa 200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
202
203 wmb();
94d73aaa 204 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
205}
206
8788fdc2 207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
461ae379
AK
479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
ac124ff9 517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 521 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 524 drvs->rx_drops_too_many_frags =
ac124ff9 525 pport_stats->rx_drops_too_many_frags_lo;
005d5696 526}
89a88ab8 527
09c1c68f
SP
528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
4188e7df 540static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
89a88ab8
AK
554void be_parse_stats(struct be_adapter *adapter)
555{
61000861 556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
557 struct be_rx_obj *rxo;
558 int i;
a6c578ef 559 u32 erx_stat;
ac124ff9 560
ca34fe38
SP
561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
005d5696 563 } else {
ca34fe38
SP
564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
61000861
AK
566 else if (BE3_chip(adapter))
567 /* for BE3 */
ca34fe38 568 populate_be_v1_stats(adapter);
61000861
AK
569 else
570 populate_be_v2_stats(adapter);
d51ebd33 571
61000861 572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 573 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 576 }
09c1c68f 577 }
89a88ab8
AK
578}
579
ab1594e9
SP
580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
6b7c5b94 582{
ab1594e9 583 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 584 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 585 struct be_rx_obj *rxo;
3c8def97 586 struct be_tx_obj *txo;
ab1594e9
SP
587 u64 pkts, bytes;
588 unsigned int start;
3abcdeda 589 int i;
6b7c5b94 590
3abcdeda 591 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
57a7744e 594 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 597 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
603 }
604
3c8def97 605 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
57a7744e 608 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
57a7744e 611 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
3c8def97 614 }
6b7c5b94
SP
615
616 /* bad pkts received */
ab1594e9 617 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
ab1594e9 626 drvs->rx_dropped_runt;
68110868 627
6b7c5b94 628 /* detailed rx errors */
ab1594e9 629 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
68110868 632
ab1594e9 633 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
634
635 /* frame alignment errors */
ab1594e9 636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 637
6b7c5b94
SP
638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
ab1594e9 643 return stats;
6b7c5b94
SP
644}
645
b236916a 646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 647{
6b7c5b94
SP
648 struct net_device *netdev = adapter->netdev;
649
b236916a 650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 651 netif_carrier_off(netdev);
b236916a 652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 653 }
b236916a 654
bdce2ad7 655 if (link_status)
b236916a
AK
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
6b7c5b94
SP
659}
660
3c8def97 661static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 663{
3c8def97
SP
664 struct be_tx_stats *stats = tx_stats(txo);
665
ab1594e9 666 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 671 if (stopped)
ac124ff9 672 stats->tx_stops++;
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
6b7c5b94 679{
ebc8d2ab
DM
680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
6b7c5b94
SP
684 /* to account for hdr wrb */
685 cnt++;
fe6d2a38
SP
686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
6b7c5b94
SP
689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
fe6d2a38 692 }
6b7c5b94
SP
693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 702 wrb->rsvd0 = 0;
6b7c5b94
SP
703}
704
1ded132d
AK
705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
cc4ce020 721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 723{
1ded132d 724 u16 vlan_tag;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
49e4b847 730 if (skb_is_gso(skb)) {
6b7c5b94
SP
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
4c5102f9 743 if (vlan_tx_tag_present(skb)) {
6b7c5b94 744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
747 }
748
bc0c3405
AK
749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
2b7bcebf 756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 764 if (wrb->frag_len) {
7101e111 765 if (unmap_single)
2b7bcebf
IV
766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
7101e111 768 else
2b7bcebf 769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
770 }
771}
6b7c5b94 772
3c8def97 773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
6b7c5b94 776{
7101e111
SP
777 dma_addr_t busaddr;
778 int i, copied = 0;
2b7bcebf 779 struct device *dev = &adapter->pdev->dev;
6b7c5b94 780 struct sk_buff *first_skb = skb;
6b7c5b94
SP
781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
7101e111
SP
783 bool map_single = false;
784 u16 map_head;
6b7c5b94 785
6b7c5b94
SP
786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
7101e111 788 map_head = txq->head;
6b7c5b94 789
ebc8d2ab 790 if (skb->len > skb->data_len) {
e743d313 791 int len = skb_headlen(skb);
2b7bcebf
IV
792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
7101e111
SP
794 goto dma_err;
795 map_single = true;
ebc8d2ab
DM
796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
6b7c5b94 802
ebc8d2ab 803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 804 const struct skb_frag_struct *frag =
ebc8d2ab 805 &skb_shinfo(skb)->frags[i];
b061b39e 806 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 807 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 808 if (dma_mapping_error(dev, busaddr))
7101e111 809 goto dma_err;
ebc8d2ab 810 wrb = queue_head_node(txq);
9e903e08 811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
9e903e08 814 copied += skb_frag_size(frag);
6b7c5b94
SP
815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
bc0c3405 824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
7101e111
SP
828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
2b7bcebf 832 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
6b7c5b94
SP
838}
839
93040ae5 840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
93040ae5
SK
843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
efee8e87 850 if (vlan_tx_tag_present(skb))
93040ae5 851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
bc0c3405
AK
862
863 if (vlan_tag) {
58717686 864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
865 if (unlikely(!skb))
866 return skb;
bc0c3405
AK
867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
58717686 873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
93040ae5
SK
880 return skb;
881}
882
bc0c3405
AK
883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
ee9c799c
SP
910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
bc0c3405 912{
ee9c799c 913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
914}
915
ec495fac
VV
916static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
6b7c5b94 919{
d2cb6ce7 920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
93040ae5 923
1297f9db
AK
924 /* For padded packets, BE HW modifies tot_len field in IP header
925 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 926 * For padded packets, Lancer computes incorrect checksum.
1ded132d 927 */
ee9c799c
SP
928 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
929 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
930 if (skb->len <= 60 &&
931 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 932 is_ipv4_pkt(skb)) {
93040ae5
SK
933 ip = (struct iphdr *)ip_hdr(skb);
934 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
935 }
1ded132d 936
d2cb6ce7 937 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 938 * tagging in pvid-tagging mode
d2cb6ce7 939 */
f93f160b 940 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 941 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 942 *skip_hw_vlan = true;
d2cb6ce7 943
93040ae5
SK
944 /* HW has a bug wherein it will calculate CSUM for VLAN
945 * pkts even though it is disabled.
946 * Manually insert VLAN in pkt.
947 */
948 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
949 vlan_tx_tag_present(skb)) {
950 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 951 if (unlikely(!skb))
c9128951 952 goto err;
bc0c3405
AK
953 }
954
955 /* HW may lockup when VLAN HW tagging is requested on
956 * certain ipv6 packets. Drop such pkts if the HW workaround to
957 * skip HW tagging is not enabled by FW.
958 */
959 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
960 (adapter->pvid || adapter->qnq_vid) &&
961 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
962 goto tx_drop;
963
964 /* Manual VLAN tag insertion to prevent:
965 * ASIC lockup when the ASIC inserts VLAN tag into
966 * certain ipv6 packets. Insert VLAN tags in driver,
967 * and set event, completion, vlan bits accordingly
968 * in the Tx WRB.
969 */
970 if (be_ipv6_tx_stall_chk(adapter, skb) &&
971 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 972 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 973 if (unlikely(!skb))
c9128951 974 goto err;
1ded132d
AK
975 }
976
ee9c799c
SP
977 return skb;
978tx_drop:
979 dev_kfree_skb_any(skb);
c9128951 980err:
ee9c799c
SP
981 return NULL;
982}
983
ec495fac
VV
984static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
985 struct sk_buff *skb,
986 bool *skip_hw_vlan)
987{
988 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
989 * less may cause a transmit stall on that port. So the work-around is
990 * to pad short packets (<= 32 bytes) to a 36-byte length.
991 */
992 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
993 if (skb_padto(skb, 36))
994 return NULL;
995 skb->len = 36;
996 }
997
998 if (BEx_chip(adapter) || lancer_chip(adapter)) {
999 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1000 if (!skb)
1001 return NULL;
1002 }
1003
1004 return skb;
1005}
1006
ee9c799c
SP
1007static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1008{
1009 struct be_adapter *adapter = netdev_priv(netdev);
1010 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1011 struct be_queue_info *txq = &txo->q;
1012 bool dummy_wrb, stopped = false;
1013 u32 wrb_cnt = 0, copied = 0;
1014 bool skip_hw_vlan = false;
1015 u32 start = txq->head;
1016
1017 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1018 if (!skb) {
1019 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1020 return NETDEV_TX_OK;
bc617526 1021 }
ee9c799c 1022
fe6d2a38 1023 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1024
bc0c3405
AK
1025 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1026 skip_hw_vlan);
c190e3c8 1027 if (copied) {
cd8f76c0
ED
1028 int gso_segs = skb_shinfo(skb)->gso_segs;
1029
c190e3c8 1030 /* record the sent skb in the sent_skb table */
3c8def97
SP
1031 BUG_ON(txo->sent_skb_list[start]);
1032 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1033
1034 /* Ensure txq has space for the next skb; Else stop the queue
1035 * *BEFORE* ringing the tx doorbell, so that we serialze the
1036 * tx compls of the current transmit which'll wake up the queue
1037 */
7101e111 1038 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1039 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1040 txq->len) {
3c8def97 1041 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1042 stopped = true;
1043 }
6b7c5b94 1044
94d73aaa 1045 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1046
cd8f76c0 1047 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1048 } else {
1049 txq->head = start;
bc617526 1050 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1051 dev_kfree_skb_any(skb);
6b7c5b94 1052 }
6b7c5b94
SP
1053 return NETDEV_TX_OK;
1054}
1055
1056static int be_change_mtu(struct net_device *netdev, int new_mtu)
1057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
1059 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
1060 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1061 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
1062 dev_info(&adapter->pdev->dev,
1063 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
1064 BE_MIN_MTU,
1065 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1066 return -EINVAL;
1067 }
1068 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1069 netdev->mtu, new_mtu);
1070 netdev->mtu = new_mtu;
1071 return 0;
1072}
1073
1074/*
82903e4b
AK
1075 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1076 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1077 */
10329df8 1078static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1079{
10329df8
SP
1080 u16 vids[BE_NUM_VLANS_SUPPORTED];
1081 u16 num = 0, i;
82903e4b 1082 int status = 0;
1da87b7f 1083
c0e64ef4
SP
1084 /* No need to further configure vids if in promiscuous mode */
1085 if (adapter->promiscuous)
1086 return 0;
1087
92bf14ab 1088 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1089 goto set_vlan_promisc;
1090
1091 /* Construct VLAN Table to give to HW */
1092 for (i = 0; i < VLAN_N_VID; i++)
1093 if (adapter->vlan_tag[i])
10329df8 1094 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1095
1096 status = be_cmd_vlan_config(adapter, adapter->if_handle,
012bd387 1097 vids, num, 0);
0fc16ebf 1098
0fc16ebf 1099 if (status) {
d9d604f8
AK
1100 /* Set to VLAN promisc mode as setting VLAN filter failed */
1101 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1102 goto set_vlan_promisc;
1103 dev_err(&adapter->pdev->dev,
1104 "Setting HW VLAN filtering failed.\n");
1105 } else {
1106 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1107 /* hw VLAN filtering re-enabled. */
1108 status = be_cmd_rx_filter(adapter,
1109 BE_FLAGS_VLAN_PROMISC, OFF);
1110 if (!status) {
1111 dev_info(&adapter->pdev->dev,
1112 "Disabling VLAN Promiscuous mode.\n");
1113 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1114 }
1115 }
6b7c5b94 1116 }
1da87b7f 1117
b31c50a7 1118 return status;
0fc16ebf
PR
1119
1120set_vlan_promisc:
a6b74e01
SK
1121 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1122 return 0;
d9d604f8
AK
1123
1124 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1125 if (!status) {
1126 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1127 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1128 } else
1129 dev_err(&adapter->pdev->dev,
1130 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1131 return status;
6b7c5b94
SP
1132}
1133
80d5c368 1134static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1135{
1136 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1137 int status = 0;
6b7c5b94 1138
a85e9986
PR
1139 /* Packets with VID 0 are always received by Lancer by default */
1140 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1141 return status;
1142
1143 if (adapter->vlan_tag[vid])
1144 return status;
a85e9986 1145
6b7c5b94 1146 adapter->vlan_tag[vid] = 1;
a6b74e01 1147 adapter->vlans_added++;
8e586137 1148
a6b74e01
SK
1149 status = be_vid_config(adapter);
1150 if (status) {
1151 adapter->vlans_added--;
80817cbf 1152 adapter->vlan_tag[vid] = 0;
a6b74e01 1153 }
48291c22 1154
80817cbf 1155 return status;
6b7c5b94
SP
1156}
1157
80d5c368 1158static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1159{
1160 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1161 int status = 0;
6b7c5b94 1162
a85e9986
PR
1163 /* Packets with VID 0 are always received by Lancer by default */
1164 if (lancer_chip(adapter) && vid == 0)
1165 goto ret;
1166
6b7c5b94 1167 adapter->vlan_tag[vid] = 0;
a6b74e01 1168 status = be_vid_config(adapter);
80817cbf
AK
1169 if (!status)
1170 adapter->vlans_added--;
1171 else
1172 adapter->vlan_tag[vid] = 1;
1173ret:
1174 return status;
6b7c5b94
SP
1175}
1176
7ad09458
S
1177static void be_clear_promisc(struct be_adapter *adapter)
1178{
1179 adapter->promiscuous = false;
1180 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1181
1182 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1183}
1184
a54769f5 1185static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1186{
1187 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1188 int status;
6b7c5b94 1189
24307eef 1190 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1191 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1192 adapter->promiscuous = true;
1193 goto done;
6b7c5b94
SP
1194 }
1195
25985edc 1196 /* BE was previously in promiscuous mode; disable it */
24307eef 1197 if (adapter->promiscuous) {
7ad09458 1198 be_clear_promisc(adapter);
c0e64ef4 1199 if (adapter->vlans_added)
10329df8 1200 be_vid_config(adapter);
6b7c5b94
SP
1201 }
1202
e7b909a6 1203 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1204 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1205 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1206 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1207 goto done;
6b7c5b94 1208 }
6b7c5b94 1209
fbc13f01
AK
1210 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1211 struct netdev_hw_addr *ha;
1212 int i = 1; /* First slot is claimed by the Primary MAC */
1213
1214 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1215 be_cmd_pmac_del(adapter, adapter->if_handle,
1216 adapter->pmac_id[i], 0);
1217 }
1218
92bf14ab 1219 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1220 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1221 adapter->promiscuous = true;
1222 goto done;
1223 }
1224
1225 netdev_for_each_uc_addr(ha, adapter->netdev) {
1226 adapter->uc_macs++; /* First slot is for Primary MAC */
1227 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1228 adapter->if_handle,
1229 &adapter->pmac_id[adapter->uc_macs], 0);
1230 }
1231 }
1232
0fc16ebf
PR
1233 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1234
1235 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1236 if (status) {
1237 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1238 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1239 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1240 }
24307eef
SP
1241done:
1242 return;
6b7c5b94
SP
1243}
1244
ba343c77
SB
1245static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1246{
1247 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1248 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1249 int status;
1250
11ac75ed 1251 if (!sriov_enabled(adapter))
ba343c77
SB
1252 return -EPERM;
1253
11ac75ed 1254 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1255 return -EINVAL;
1256
3175d8c2
SP
1257 if (BEx_chip(adapter)) {
1258 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1259 vf + 1);
ba343c77 1260
11ac75ed
SP
1261 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1262 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1263 } else {
1264 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1265 vf + 1);
590c391d
PR
1266 }
1267
64600ea5 1268 if (status)
ba343c77
SB
1269 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1270 mac, vf);
64600ea5 1271 else
11ac75ed 1272 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1273
ba343c77
SB
1274 return status;
1275}
1276
64600ea5
AK
1277static int be_get_vf_config(struct net_device *netdev, int vf,
1278 struct ifla_vf_info *vi)
1279{
1280 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1281 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1282
11ac75ed 1283 if (!sriov_enabled(adapter))
64600ea5
AK
1284 return -EPERM;
1285
11ac75ed 1286 if (vf >= adapter->num_vfs)
64600ea5
AK
1287 return -EINVAL;
1288
1289 vi->vf = vf;
11ac75ed 1290 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1291 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1292 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1293 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1294 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1295
1296 return 0;
1297}
1298
1da87b7f
AK
1299static int be_set_vf_vlan(struct net_device *netdev,
1300 int vf, u16 vlan, u8 qos)
1301{
1302 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1303 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1304 int status = 0;
1305
11ac75ed 1306 if (!sriov_enabled(adapter))
1da87b7f
AK
1307 return -EPERM;
1308
b9fc0e53 1309 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1310 return -EINVAL;
1311
b9fc0e53
AK
1312 if (vlan || qos) {
1313 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1314 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1315 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1316 vf_cfg->if_handle, 0);
1da87b7f 1317 } else {
f1f3ee1b 1318 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1319 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1320 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1321 }
1322
c502224e
SK
1323 if (!status)
1324 vf_cfg->vlan_tag = vlan;
1325 else
1da87b7f 1326 dev_info(&adapter->pdev->dev,
c502224e 1327 "VLAN %d config on VF %d failed\n", vlan, vf);
1da87b7f
AK
1328 return status;
1329}
1330
e1d18735
AK
1331static int be_set_vf_tx_rate(struct net_device *netdev,
1332 int vf, int rate)
1333{
1334 struct be_adapter *adapter = netdev_priv(netdev);
1335 int status = 0;
1336
11ac75ed 1337 if (!sriov_enabled(adapter))
e1d18735
AK
1338 return -EPERM;
1339
94f434c2 1340 if (vf >= adapter->num_vfs)
e1d18735
AK
1341 return -EINVAL;
1342
94f434c2
AK
1343 if (rate < 100 || rate > 10000) {
1344 dev_err(&adapter->pdev->dev,
1345 "tx rate must be between 100 and 10000 Mbps\n");
1346 return -EINVAL;
1347 }
e1d18735 1348
d5c18473
PR
1349 if (lancer_chip(adapter))
1350 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1351 else
1352 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1353
1354 if (status)
94f434c2 1355 dev_err(&adapter->pdev->dev,
e1d18735 1356 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1357 else
1358 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1359 return status;
1360}
bdce2ad7
SR
1361static int be_set_vf_link_state(struct net_device *netdev, int vf,
1362 int link_state)
1363{
1364 struct be_adapter *adapter = netdev_priv(netdev);
1365 int status;
1366
1367 if (!sriov_enabled(adapter))
1368 return -EPERM;
1369
1370 if (vf >= adapter->num_vfs)
1371 return -EINVAL;
1372
1373 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1374 if (!status)
1375 adapter->vf_cfg[vf].plink_tracking = link_state;
1376
1377 return status;
1378}
e1d18735 1379
2632bafd
SP
1380static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1381 ulong now)
6b7c5b94 1382{
2632bafd
SP
1383 aic->rx_pkts_prev = rx_pkts;
1384 aic->tx_reqs_prev = tx_pkts;
1385 aic->jiffies = now;
1386}
ac124ff9 1387
2632bafd
SP
1388static void be_eqd_update(struct be_adapter *adapter)
1389{
1390 struct be_set_eqd set_eqd[MAX_EVT_QS];
1391 int eqd, i, num = 0, start;
1392 struct be_aic_obj *aic;
1393 struct be_eq_obj *eqo;
1394 struct be_rx_obj *rxo;
1395 struct be_tx_obj *txo;
1396 u64 rx_pkts, tx_pkts;
1397 ulong now;
1398 u32 pps, delta;
10ef9ab4 1399
2632bafd
SP
1400 for_all_evt_queues(adapter, eqo, i) {
1401 aic = &adapter->aic_obj[eqo->idx];
1402 if (!aic->enable) {
1403 if (aic->jiffies)
1404 aic->jiffies = 0;
1405 eqd = aic->et_eqd;
1406 goto modify_eqd;
1407 }
6b7c5b94 1408
2632bafd
SP
1409 rxo = &adapter->rx_obj[eqo->idx];
1410 do {
57a7744e 1411 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1412 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1413 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1414
2632bafd
SP
1415 txo = &adapter->tx_obj[eqo->idx];
1416 do {
57a7744e 1417 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1418 tx_pkts = txo->stats.tx_reqs;
57a7744e 1419 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1420
6b7c5b94 1421
2632bafd
SP
1422 /* Skip, if wrapped around or first calculation */
1423 now = jiffies;
1424 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1425 rx_pkts < aic->rx_pkts_prev ||
1426 tx_pkts < aic->tx_reqs_prev) {
1427 be_aic_update(aic, rx_pkts, tx_pkts, now);
1428 continue;
1429 }
1430
1431 delta = jiffies_to_msecs(now - aic->jiffies);
1432 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1433 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1434 eqd = (pps / 15000) << 2;
10ef9ab4 1435
2632bafd
SP
1436 if (eqd < 8)
1437 eqd = 0;
1438 eqd = min_t(u32, eqd, aic->max_eqd);
1439 eqd = max_t(u32, eqd, aic->min_eqd);
1440
1441 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1442modify_eqd:
2632bafd
SP
1443 if (eqd != aic->prev_eqd) {
1444 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1445 set_eqd[num].eq_id = eqo->q.id;
1446 aic->prev_eqd = eqd;
1447 num++;
1448 }
ac124ff9 1449 }
2632bafd
SP
1450
1451 if (num)
1452 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1453}
1454
3abcdeda 1455static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1456 struct be_rx_compl_info *rxcp)
4097f663 1457{
ac124ff9 1458 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1459
ab1594e9 1460 u64_stats_update_begin(&stats->sync);
3abcdeda 1461 stats->rx_compl++;
2e588f84 1462 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1463 stats->rx_pkts++;
2e588f84 1464 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1465 stats->rx_mcast_pkts++;
2e588f84 1466 if (rxcp->err)
ac124ff9 1467 stats->rx_compl_err++;
ab1594e9 1468 u64_stats_update_end(&stats->sync);
4097f663
SP
1469}
1470
2e588f84 1471static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1472{
19fad86f
PR
1473 /* L4 checksum is not reliable for non TCP/UDP packets.
1474 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1475 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1476 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1477}
1478
0b0ef1d0 1479static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1480{
10ef9ab4 1481 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1482 struct be_rx_page_info *rx_page_info;
3abcdeda 1483 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1484 u16 frag_idx = rxq->tail;
6b7c5b94 1485
3abcdeda 1486 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1487 BUG_ON(!rx_page_info->page);
1488
e50287be 1489 if (rx_page_info->last_frag) {
2b7bcebf
IV
1490 dma_unmap_page(&adapter->pdev->dev,
1491 dma_unmap_addr(rx_page_info, bus),
1492 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1493 rx_page_info->last_frag = false;
1494 } else {
1495 dma_sync_single_for_cpu(&adapter->pdev->dev,
1496 dma_unmap_addr(rx_page_info, bus),
1497 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1498 }
6b7c5b94 1499
0b0ef1d0 1500 queue_tail_inc(rxq);
6b7c5b94
SP
1501 atomic_dec(&rxq->used);
1502 return rx_page_info;
1503}
1504
1505/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1506static void be_rx_compl_discard(struct be_rx_obj *rxo,
1507 struct be_rx_compl_info *rxcp)
6b7c5b94 1508{
6b7c5b94 1509 struct be_rx_page_info *page_info;
2e588f84 1510 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1511
e80d9da6 1512 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1513 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1514 put_page(page_info->page);
1515 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1516 }
1517}
1518
1519/*
1520 * skb_fill_rx_data forms a complete skb for an ether frame
1521 * indicated by rxcp.
1522 */
10ef9ab4
SP
1523static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1524 struct be_rx_compl_info *rxcp)
6b7c5b94 1525{
6b7c5b94 1526 struct be_rx_page_info *page_info;
2e588f84
SP
1527 u16 i, j;
1528 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1529 u8 *start;
6b7c5b94 1530
0b0ef1d0 1531 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1532 start = page_address(page_info->page) + page_info->page_offset;
1533 prefetch(start);
1534
1535 /* Copy data in the first descriptor of this completion */
2e588f84 1536 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1537
6b7c5b94
SP
1538 skb->len = curr_frag_len;
1539 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1540 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1541 /* Complete packet has now been moved to data */
1542 put_page(page_info->page);
1543 skb->data_len = 0;
1544 skb->tail += curr_frag_len;
1545 } else {
ac1ae5f3
ED
1546 hdr_len = ETH_HLEN;
1547 memcpy(skb->data, start, hdr_len);
6b7c5b94 1548 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1549 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1550 skb_shinfo(skb)->frags[0].page_offset =
1551 page_info->page_offset + hdr_len;
9e903e08 1552 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1553 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1554 skb->truesize += rx_frag_size;
6b7c5b94
SP
1555 skb->tail += hdr_len;
1556 }
205859a2 1557 page_info->page = NULL;
6b7c5b94 1558
2e588f84
SP
1559 if (rxcp->pkt_size <= rx_frag_size) {
1560 BUG_ON(rxcp->num_rcvd != 1);
1561 return;
6b7c5b94
SP
1562 }
1563
1564 /* More frags present for this completion */
2e588f84
SP
1565 remaining = rxcp->pkt_size - curr_frag_len;
1566 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1567 page_info = get_rx_page_info(rxo);
2e588f84 1568 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1569
bd46cb6c
AK
1570 /* Coalesce all frags from the same physical page in one slot */
1571 if (page_info->page_offset == 0) {
1572 /* Fresh page */
1573 j++;
b061b39e 1574 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1575 skb_shinfo(skb)->frags[j].page_offset =
1576 page_info->page_offset;
9e903e08 1577 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1578 skb_shinfo(skb)->nr_frags++;
1579 } else {
1580 put_page(page_info->page);
1581 }
1582
9e903e08 1583 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1584 skb->len += curr_frag_len;
1585 skb->data_len += curr_frag_len;
bdb28a97 1586 skb->truesize += rx_frag_size;
2e588f84 1587 remaining -= curr_frag_len;
205859a2 1588 page_info->page = NULL;
6b7c5b94 1589 }
bd46cb6c 1590 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1591}
1592
5be93b9a 1593/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1594static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1595 struct be_rx_compl_info *rxcp)
6b7c5b94 1596{
10ef9ab4 1597 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1598 struct net_device *netdev = adapter->netdev;
6b7c5b94 1599 struct sk_buff *skb;
89420424 1600
bb349bb4 1601 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1602 if (unlikely(!skb)) {
ac124ff9 1603 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1604 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1605 return;
1606 }
1607
10ef9ab4 1608 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1609
6332c8d3 1610 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1611 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1612 else
1613 skb_checksum_none_assert(skb);
6b7c5b94 1614
6332c8d3 1615 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1616 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1617 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1618 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1619 skb_mark_napi_id(skb, napi);
6b7c5b94 1620
343e43c0 1621 if (rxcp->vlanf)
86a9bad3 1622 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1623
1624 netif_receive_skb(skb);
6b7c5b94
SP
1625}
1626
5be93b9a 1627/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1628static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1629 struct napi_struct *napi,
1630 struct be_rx_compl_info *rxcp)
6b7c5b94 1631{
10ef9ab4 1632 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1633 struct be_rx_page_info *page_info;
5be93b9a 1634 struct sk_buff *skb = NULL;
2e588f84
SP
1635 u16 remaining, curr_frag_len;
1636 u16 i, j;
3968fa1e 1637
10ef9ab4 1638 skb = napi_get_frags(napi);
5be93b9a 1639 if (!skb) {
10ef9ab4 1640 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1641 return;
1642 }
1643
2e588f84
SP
1644 remaining = rxcp->pkt_size;
1645 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1646 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1647
1648 curr_frag_len = min(remaining, rx_frag_size);
1649
bd46cb6c
AK
1650 /* Coalesce all frags from the same physical page in one slot */
1651 if (i == 0 || page_info->page_offset == 0) {
1652 /* First frag or Fresh page */
1653 j++;
b061b39e 1654 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1655 skb_shinfo(skb)->frags[j].page_offset =
1656 page_info->page_offset;
9e903e08 1657 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1658 } else {
1659 put_page(page_info->page);
1660 }
9e903e08 1661 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1662 skb->truesize += rx_frag_size;
bd46cb6c 1663 remaining -= curr_frag_len;
6b7c5b94
SP
1664 memset(page_info, 0, sizeof(*page_info));
1665 }
bd46cb6c 1666 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1667
5be93b9a 1668 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1669 skb->len = rxcp->pkt_size;
1670 skb->data_len = rxcp->pkt_size;
5be93b9a 1671 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1672 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1673 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1674 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1675 skb_mark_napi_id(skb, napi);
5be93b9a 1676
343e43c0 1677 if (rxcp->vlanf)
86a9bad3 1678 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1679
10ef9ab4 1680 napi_gro_frags(napi);
2e588f84
SP
1681}
1682
10ef9ab4
SP
1683static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1684 struct be_rx_compl_info *rxcp)
2e588f84
SP
1685{
1686 rxcp->pkt_size =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1688 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1689 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1690 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1691 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1692 rxcp->ip_csum =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1694 rxcp->l4_csum =
1695 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1696 rxcp->ipv6 =
1697 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1698 rxcp->num_rcvd =
1699 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1700 rxcp->pkt_type =
1701 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1702 rxcp->rss_hash =
c297977e 1703 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1704 if (rxcp->vlanf) {
f93f160b 1705 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f
DM
1706 compl);
1707 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1708 compl);
15d72184 1709 }
12004ae9 1710 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1711}
1712
10ef9ab4
SP
1713static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1714 struct be_rx_compl_info *rxcp)
2e588f84
SP
1715{
1716 rxcp->pkt_size =
1717 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1718 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1719 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1720 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1721 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1722 rxcp->ip_csum =
1723 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1724 rxcp->l4_csum =
1725 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1726 rxcp->ipv6 =
1727 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1728 rxcp->num_rcvd =
1729 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1730 rxcp->pkt_type =
1731 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1732 rxcp->rss_hash =
c297977e 1733 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1734 if (rxcp->vlanf) {
f93f160b 1735 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f
DM
1736 compl);
1737 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1738 compl);
15d72184 1739 }
12004ae9 1740 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1741 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1742 ip_frag, compl);
2e588f84
SP
1743}
1744
1745static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1746{
1747 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1748 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1749 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1750
2e588f84
SP
1751 /* For checking the valid bit it is Ok to use either definition as the
1752 * valid bit is at the same position in both v0 and v1 Rx compl */
1753 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1754 return NULL;
6b7c5b94 1755
2e588f84
SP
1756 rmb();
1757 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1758
2e588f84 1759 if (adapter->be3_native)
10ef9ab4 1760 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1761 else
10ef9ab4 1762 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1763
e38b1706
SK
1764 if (rxcp->ip_frag)
1765 rxcp->l4_csum = 0;
1766
15d72184 1767 if (rxcp->vlanf) {
f93f160b
VV
1768 /* In QNQ modes, if qnq bit is not set, then the packet was
1769 * tagged only with the transparent outer vlan-tag and must
1770 * not be treated as a vlan packet by host
1771 */
1772 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1773 rxcp->vlanf = 0;
6b7c5b94 1774
15d72184 1775 if (!lancer_chip(adapter))
3c709f8f 1776 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1777
939cf306 1778 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1779 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1780 rxcp->vlanf = 0;
1781 }
2e588f84
SP
1782
1783 /* As the compl has been parsed, reset it; we wont touch it again */
1784 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1785
3abcdeda 1786 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1787 return rxcp;
1788}
1789
1829b086 1790static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1791{
6b7c5b94 1792 u32 order = get_order(size);
1829b086 1793
6b7c5b94 1794 if (order > 0)
1829b086
ED
1795 gfp |= __GFP_COMP;
1796 return alloc_pages(gfp, order);
6b7c5b94
SP
1797}
1798
1799/*
1800 * Allocate a page, split it to fragments of size rx_frag_size and post as
1801 * receive buffers to BE
1802 */
1829b086 1803static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1804{
3abcdeda 1805 struct be_adapter *adapter = rxo->adapter;
26d92f92 1806 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1807 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1808 struct page *pagep = NULL;
ba42fad0 1809 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1810 struct be_eth_rx_d *rxd;
1811 u64 page_dmaaddr = 0, frag_dmaaddr;
1812 u32 posted, page_offset = 0;
1813
3abcdeda 1814 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1815 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1816 if (!pagep) {
1829b086 1817 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1818 if (unlikely(!pagep)) {
ac124ff9 1819 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1820 break;
1821 }
ba42fad0
IV
1822 page_dmaaddr = dma_map_page(dev, pagep, 0,
1823 adapter->big_page_size,
2b7bcebf 1824 DMA_FROM_DEVICE);
ba42fad0
IV
1825 if (dma_mapping_error(dev, page_dmaaddr)) {
1826 put_page(pagep);
1827 pagep = NULL;
1828 rx_stats(rxo)->rx_post_fail++;
1829 break;
1830 }
e50287be 1831 page_offset = 0;
6b7c5b94
SP
1832 } else {
1833 get_page(pagep);
e50287be 1834 page_offset += rx_frag_size;
6b7c5b94 1835 }
e50287be 1836 page_info->page_offset = page_offset;
6b7c5b94 1837 page_info->page = pagep;
6b7c5b94
SP
1838
1839 rxd = queue_head_node(rxq);
e50287be 1840 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1841 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1842 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1843
1844 /* Any space left in the current big page for another frag? */
1845 if ((page_offset + rx_frag_size + rx_frag_size) >
1846 adapter->big_page_size) {
1847 pagep = NULL;
e50287be
SP
1848 page_info->last_frag = true;
1849 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1850 } else {
1851 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1852 }
26d92f92
SP
1853
1854 prev_page_info = page_info;
1855 queue_head_inc(rxq);
10ef9ab4 1856 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1857 }
e50287be
SP
1858
1859 /* Mark the last frag of a page when we break out of the above loop
1860 * with no more slots available in the RXQ
1861 */
1862 if (pagep) {
1863 prev_page_info->last_frag = true;
1864 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1865 }
6b7c5b94
SP
1866
1867 if (posted) {
6b7c5b94 1868 atomic_add(posted, &rxq->used);
6384a4d0
SP
1869 if (rxo->rx_post_starved)
1870 rxo->rx_post_starved = false;
8788fdc2 1871 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1872 } else if (atomic_read(&rxq->used) == 0) {
1873 /* Let be_worker replenish when memory is available */
3abcdeda 1874 rxo->rx_post_starved = true;
6b7c5b94 1875 }
6b7c5b94
SP
1876}
1877
5fb379ee 1878static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1879{
6b7c5b94
SP
1880 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1881
1882 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1883 return NULL;
1884
f3eb62d2 1885 rmb();
6b7c5b94
SP
1886 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1887
1888 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1889
1890 queue_tail_inc(tx_cq);
1891 return txcp;
1892}
1893
3c8def97
SP
1894static u16 be_tx_compl_process(struct be_adapter *adapter,
1895 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1896{
3c8def97 1897 struct be_queue_info *txq = &txo->q;
a73b796e 1898 struct be_eth_wrb *wrb;
3c8def97 1899 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1900 struct sk_buff *sent_skb;
ec43b1a6
SP
1901 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1902 bool unmap_skb_hdr = true;
6b7c5b94 1903
ec43b1a6 1904 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1905 BUG_ON(!sent_skb);
ec43b1a6
SP
1906 sent_skbs[txq->tail] = NULL;
1907
1908 /* skip header wrb */
a73b796e 1909 queue_tail_inc(txq);
6b7c5b94 1910
ec43b1a6 1911 do {
6b7c5b94 1912 cur_index = txq->tail;
a73b796e 1913 wrb = queue_tail_node(txq);
2b7bcebf
IV
1914 unmap_tx_frag(&adapter->pdev->dev, wrb,
1915 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1916 unmap_skb_hdr = false;
1917
6b7c5b94
SP
1918 num_wrbs++;
1919 queue_tail_inc(txq);
ec43b1a6 1920 } while (cur_index != last_index);
6b7c5b94 1921
d8ec2c02 1922 dev_kfree_skb_any(sent_skb);
4d586b82 1923 return num_wrbs;
6b7c5b94
SP
1924}
1925
10ef9ab4
SP
1926/* Return the number of events in the event queue */
1927static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1928{
10ef9ab4
SP
1929 struct be_eq_entry *eqe;
1930 int num = 0;
859b1e4e 1931
10ef9ab4
SP
1932 do {
1933 eqe = queue_tail_node(&eqo->q);
1934 if (eqe->evt == 0)
1935 break;
859b1e4e 1936
10ef9ab4
SP
1937 rmb();
1938 eqe->evt = 0;
1939 num++;
1940 queue_tail_inc(&eqo->q);
1941 } while (true);
1942
1943 return num;
859b1e4e
SP
1944}
1945
10ef9ab4
SP
1946/* Leaves the EQ is disarmed state */
1947static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1948{
10ef9ab4 1949 int num = events_get(eqo);
859b1e4e 1950
10ef9ab4 1951 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1952}
1953
10ef9ab4 1954static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1955{
1956 struct be_rx_page_info *page_info;
3abcdeda
SP
1957 struct be_queue_info *rxq = &rxo->q;
1958 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1959 struct be_rx_compl_info *rxcp;
d23e946c
SP
1960 struct be_adapter *adapter = rxo->adapter;
1961 int flush_wait = 0;
6b7c5b94 1962
d23e946c
SP
1963 /* Consume pending rx completions.
1964 * Wait for the flush completion (identified by zero num_rcvd)
1965 * to arrive. Notify CQ even when there are no more CQ entries
1966 * for HW to flush partially coalesced CQ entries.
1967 * In Lancer, there is no need to wait for flush compl.
1968 */
1969 for (;;) {
1970 rxcp = be_rx_compl_get(rxo);
1971 if (rxcp == NULL) {
1972 if (lancer_chip(adapter))
1973 break;
1974
1975 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1976 dev_warn(&adapter->pdev->dev,
1977 "did not receive flush compl\n");
1978 break;
1979 }
1980 be_cq_notify(adapter, rx_cq->id, true, 0);
1981 mdelay(1);
1982 } else {
1983 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1984 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1985 if (rxcp->num_rcvd == 0)
1986 break;
1987 }
6b7c5b94
SP
1988 }
1989
d23e946c
SP
1990 /* After cleanup, leave the CQ in unarmed state */
1991 be_cq_notify(adapter, rx_cq->id, false, 0);
1992
1993 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
1994 while (atomic_read(&rxq->used) > 0) {
1995 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1996 put_page(page_info->page);
1997 memset(page_info, 0, sizeof(*page_info));
1998 }
1999 BUG_ON(atomic_read(&rxq->used));
482c9e79 2000 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2001}
2002
0ae57bb3 2003static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2004{
0ae57bb3
SP
2005 struct be_tx_obj *txo;
2006 struct be_queue_info *txq;
a8e9179a 2007 struct be_eth_tx_compl *txcp;
4d586b82 2008 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2009 struct sk_buff *sent_skb;
2010 bool dummy_wrb;
0ae57bb3 2011 int i, pending_txqs;
a8e9179a
SP
2012
2013 /* Wait for a max of 200ms for all the tx-completions to arrive. */
2014 do {
0ae57bb3
SP
2015 pending_txqs = adapter->num_tx_qs;
2016
2017 for_all_tx_queues(adapter, txo, i) {
2018 txq = &txo->q;
2019 while ((txcp = be_tx_compl_get(&txo->cq))) {
2020 end_idx =
2021 AMAP_GET_BITS(struct amap_eth_tx_compl,
2022 wrb_index, txcp);
2023 num_wrbs += be_tx_compl_process(adapter, txo,
2024 end_idx);
2025 cmpl++;
2026 }
2027 if (cmpl) {
2028 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2029 atomic_sub(num_wrbs, &txq->used);
2030 cmpl = 0;
2031 num_wrbs = 0;
2032 }
2033 if (atomic_read(&txq->used) == 0)
2034 pending_txqs--;
a8e9179a
SP
2035 }
2036
0ae57bb3 2037 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
2038 break;
2039
2040 mdelay(1);
2041 } while (true);
2042
0ae57bb3
SP
2043 for_all_tx_queues(adapter, txo, i) {
2044 txq = &txo->q;
2045 if (atomic_read(&txq->used))
2046 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2047 atomic_read(&txq->used));
2048
2049 /* free posted tx for which compls will never arrive */
2050 while (atomic_read(&txq->used)) {
2051 sent_skb = txo->sent_skb_list[txq->tail];
2052 end_idx = txq->tail;
2053 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2054 &dummy_wrb);
2055 index_adv(&end_idx, num_wrbs - 1, txq->len);
2056 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2057 atomic_sub(num_wrbs, &txq->used);
2058 }
b03388d6 2059 }
6b7c5b94
SP
2060}
2061
10ef9ab4
SP
2062static void be_evt_queues_destroy(struct be_adapter *adapter)
2063{
2064 struct be_eq_obj *eqo;
2065 int i;
2066
2067 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2068 if (eqo->q.created) {
2069 be_eq_clean(eqo);
10ef9ab4 2070 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2071 napi_hash_del(&eqo->napi);
68d7bdcb 2072 netif_napi_del(&eqo->napi);
19d59aa7 2073 }
10ef9ab4
SP
2074 be_queue_free(adapter, &eqo->q);
2075 }
2076}
2077
2078static int be_evt_queues_create(struct be_adapter *adapter)
2079{
2080 struct be_queue_info *eq;
2081 struct be_eq_obj *eqo;
2632bafd 2082 struct be_aic_obj *aic;
10ef9ab4
SP
2083 int i, rc;
2084
92bf14ab
SP
2085 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2086 adapter->cfg_num_qs);
10ef9ab4
SP
2087
2088 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2089 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2090 BE_NAPI_WEIGHT);
6384a4d0 2091 napi_hash_add(&eqo->napi);
2632bafd 2092 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2093 eqo->adapter = adapter;
2094 eqo->tx_budget = BE_TX_BUDGET;
2095 eqo->idx = i;
2632bafd
SP
2096 aic->max_eqd = BE_MAX_EQD;
2097 aic->enable = true;
10ef9ab4
SP
2098
2099 eq = &eqo->q;
2100 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2101 sizeof(struct be_eq_entry));
2102 if (rc)
2103 return rc;
2104
f2f781a7 2105 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2106 if (rc)
2107 return rc;
2108 }
1cfafab9 2109 return 0;
10ef9ab4
SP
2110}
2111
5fb379ee
SP
2112static void be_mcc_queues_destroy(struct be_adapter *adapter)
2113{
2114 struct be_queue_info *q;
5fb379ee 2115
8788fdc2 2116 q = &adapter->mcc_obj.q;
5fb379ee 2117 if (q->created)
8788fdc2 2118 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2119 be_queue_free(adapter, q);
2120
8788fdc2 2121 q = &adapter->mcc_obj.cq;
5fb379ee 2122 if (q->created)
8788fdc2 2123 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2124 be_queue_free(adapter, q);
2125}
2126
2127/* Must be called only after TX qs are created as MCC shares TX EQ */
2128static int be_mcc_queues_create(struct be_adapter *adapter)
2129{
2130 struct be_queue_info *q, *cq;
5fb379ee 2131
8788fdc2 2132 cq = &adapter->mcc_obj.cq;
5fb379ee 2133 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 2134 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2135 goto err;
2136
10ef9ab4
SP
2137 /* Use the default EQ for MCC completions */
2138 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2139 goto mcc_cq_free;
2140
8788fdc2 2141 q = &adapter->mcc_obj.q;
5fb379ee
SP
2142 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2143 goto mcc_cq_destroy;
2144
8788fdc2 2145 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2146 goto mcc_q_free;
2147
2148 return 0;
2149
2150mcc_q_free:
2151 be_queue_free(adapter, q);
2152mcc_cq_destroy:
8788fdc2 2153 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2154mcc_cq_free:
2155 be_queue_free(adapter, cq);
2156err:
2157 return -1;
2158}
2159
6b7c5b94
SP
2160static void be_tx_queues_destroy(struct be_adapter *adapter)
2161{
2162 struct be_queue_info *q;
3c8def97
SP
2163 struct be_tx_obj *txo;
2164 u8 i;
6b7c5b94 2165
3c8def97
SP
2166 for_all_tx_queues(adapter, txo, i) {
2167 q = &txo->q;
2168 if (q->created)
2169 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2170 be_queue_free(adapter, q);
6b7c5b94 2171
3c8def97
SP
2172 q = &txo->cq;
2173 if (q->created)
2174 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2175 be_queue_free(adapter, q);
2176 }
6b7c5b94
SP
2177}
2178
7707133c 2179static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2180{
10ef9ab4 2181 struct be_queue_info *cq, *eq;
3c8def97 2182 struct be_tx_obj *txo;
92bf14ab 2183 int status, i;
6b7c5b94 2184
92bf14ab 2185 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2186
10ef9ab4
SP
2187 for_all_tx_queues(adapter, txo, i) {
2188 cq = &txo->cq;
2189 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2190 sizeof(struct be_eth_tx_compl));
2191 if (status)
2192 return status;
3c8def97 2193
827da44c
JS
2194 u64_stats_init(&txo->stats.sync);
2195 u64_stats_init(&txo->stats.sync_compl);
2196
10ef9ab4
SP
2197 /* If num_evt_qs is less than num_tx_qs, then more than
2198 * one txq share an eq
2199 */
2200 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2201 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2202 if (status)
2203 return status;
6b7c5b94 2204
10ef9ab4
SP
2205 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2206 sizeof(struct be_eth_wrb));
2207 if (status)
2208 return status;
6b7c5b94 2209
94d73aaa 2210 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2211 if (status)
2212 return status;
3c8def97 2213 }
6b7c5b94 2214
d379142b
SP
2215 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2216 adapter->num_tx_qs);
10ef9ab4 2217 return 0;
6b7c5b94
SP
2218}
2219
10ef9ab4 2220static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2221{
2222 struct be_queue_info *q;
3abcdeda
SP
2223 struct be_rx_obj *rxo;
2224 int i;
2225
2226 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2227 q = &rxo->cq;
2228 if (q->created)
2229 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2230 be_queue_free(adapter, q);
ac6a0c4a
SP
2231 }
2232}
2233
10ef9ab4 2234static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2235{
10ef9ab4 2236 struct be_queue_info *eq, *cq;
3abcdeda
SP
2237 struct be_rx_obj *rxo;
2238 int rc, i;
6b7c5b94 2239
92bf14ab
SP
2240 /* We can create as many RSS rings as there are EQs. */
2241 adapter->num_rx_qs = adapter->num_evt_qs;
2242
2243 /* We'll use RSS only if atleast 2 RSS rings are supported.
2244 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2245 */
92bf14ab
SP
2246 if (adapter->num_rx_qs > 1)
2247 adapter->num_rx_qs++;
2248
6b7c5b94 2249 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2250 for_all_rx_queues(adapter, rxo, i) {
2251 rxo->adapter = adapter;
3abcdeda
SP
2252 cq = &rxo->cq;
2253 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2254 sizeof(struct be_eth_rx_compl));
2255 if (rc)
10ef9ab4 2256 return rc;
3abcdeda 2257
827da44c 2258 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2259 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2260 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2261 if (rc)
10ef9ab4 2262 return rc;
3abcdeda 2263 }
6b7c5b94 2264
d379142b
SP
2265 dev_info(&adapter->pdev->dev,
2266 "created %d RSS queue(s) and 1 default RX queue\n",
2267 adapter->num_rx_qs - 1);
10ef9ab4 2268 return 0;
b628bde2
SP
2269}
2270
6b7c5b94
SP
2271static irqreturn_t be_intx(int irq, void *dev)
2272{
e49cc34f
SP
2273 struct be_eq_obj *eqo = dev;
2274 struct be_adapter *adapter = eqo->adapter;
2275 int num_evts = 0;
6b7c5b94 2276
d0b9cec3
SP
2277 /* IRQ is not expected when NAPI is scheduled as the EQ
2278 * will not be armed.
2279 * But, this can happen on Lancer INTx where it takes
2280 * a while to de-assert INTx or in BE2 where occasionaly
2281 * an interrupt may be raised even when EQ is unarmed.
2282 * If NAPI is already scheduled, then counting & notifying
2283 * events will orphan them.
e49cc34f 2284 */
d0b9cec3 2285 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2286 num_evts = events_get(eqo);
d0b9cec3
SP
2287 __napi_schedule(&eqo->napi);
2288 if (num_evts)
2289 eqo->spurious_intr = 0;
2290 }
2291 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2292
d0b9cec3
SP
2293 /* Return IRQ_HANDLED only for the the first spurious intr
2294 * after a valid intr to stop the kernel from branding
2295 * this irq as a bad one!
e49cc34f 2296 */
d0b9cec3
SP
2297 if (num_evts || eqo->spurious_intr++ == 0)
2298 return IRQ_HANDLED;
2299 else
2300 return IRQ_NONE;
6b7c5b94
SP
2301}
2302
10ef9ab4 2303static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2304{
10ef9ab4 2305 struct be_eq_obj *eqo = dev;
6b7c5b94 2306
0b545a62
SP
2307 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2308 napi_schedule(&eqo->napi);
6b7c5b94
SP
2309 return IRQ_HANDLED;
2310}
2311
2e588f84 2312static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2313{
e38b1706 2314 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2315}
2316
10ef9ab4 2317static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
6384a4d0 2318 int budget, int polling)
6b7c5b94 2319{
3abcdeda
SP
2320 struct be_adapter *adapter = rxo->adapter;
2321 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2322 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2323 u32 work_done;
2324
2325 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2326 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2327 if (!rxcp)
2328 break;
2329
12004ae9
SP
2330 /* Is it a flush compl that has no data */
2331 if (unlikely(rxcp->num_rcvd == 0))
2332 goto loop_continue;
2333
2334 /* Discard compl with partial DMA Lancer B0 */
2335 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2336 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2337 goto loop_continue;
2338 }
2339
2340 /* On BE drop pkts that arrive due to imperfect filtering in
2341 * promiscuous mode on some skews
2342 */
2343 if (unlikely(rxcp->port != adapter->port_num &&
2344 !lancer_chip(adapter))) {
10ef9ab4 2345 be_rx_compl_discard(rxo, rxcp);
12004ae9 2346 goto loop_continue;
64642811 2347 }
009dd872 2348
6384a4d0
SP
2349 /* Don't do gro when we're busy_polling */
2350 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2351 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2352 else
6384a4d0
SP
2353 be_rx_compl_process(rxo, napi, rxcp);
2354
12004ae9 2355loop_continue:
2e588f84 2356 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2357 }
2358
10ef9ab4
SP
2359 if (work_done) {
2360 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2361
6384a4d0
SP
2362 /* When an rx-obj gets into post_starved state, just
2363 * let be_worker do the posting.
2364 */
2365 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2366 !rxo->rx_post_starved)
10ef9ab4 2367 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2368 }
10ef9ab4 2369
6b7c5b94
SP
2370 return work_done;
2371}
2372
10ef9ab4
SP
2373static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2374 int budget, int idx)
6b7c5b94 2375{
6b7c5b94 2376 struct be_eth_tx_compl *txcp;
10ef9ab4 2377 int num_wrbs = 0, work_done;
3c8def97 2378
10ef9ab4
SP
2379 for (work_done = 0; work_done < budget; work_done++) {
2380 txcp = be_tx_compl_get(&txo->cq);
2381 if (!txcp)
2382 break;
2383 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2384 AMAP_GET_BITS(struct amap_eth_tx_compl,
2385 wrb_index, txcp));
10ef9ab4 2386 }
6b7c5b94 2387
10ef9ab4
SP
2388 if (work_done) {
2389 be_cq_notify(adapter, txo->cq.id, true, work_done);
2390 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2391
10ef9ab4
SP
2392 /* As Tx wrbs have been freed up, wake up netdev queue
2393 * if it was stopped due to lack of tx wrbs. */
2394 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2395 atomic_read(&txo->q.used) < txo->q.len / 2) {
2396 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2397 }
10ef9ab4
SP
2398
2399 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2400 tx_stats(txo)->tx_compl += work_done;
2401 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2402 }
10ef9ab4
SP
2403 return (work_done < budget); /* Done */
2404}
6b7c5b94 2405
68d7bdcb 2406int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2407{
2408 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2409 struct be_adapter *adapter = eqo->adapter;
0b545a62 2410 int max_work = 0, work, i, num_evts;
6384a4d0 2411 struct be_rx_obj *rxo;
10ef9ab4 2412 bool tx_done;
f31e50a8 2413
0b545a62
SP
2414 num_evts = events_get(eqo);
2415
10ef9ab4
SP
2416 /* Process all TXQs serviced by this EQ */
2417 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2418 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2419 eqo->tx_budget, i);
2420 if (!tx_done)
2421 max_work = budget;
f31e50a8
SP
2422 }
2423
6384a4d0
SP
2424 if (be_lock_napi(eqo)) {
2425 /* This loop will iterate twice for EQ0 in which
2426 * completions of the last RXQ (default one) are also processed
2427 * For other EQs the loop iterates only once
2428 */
2429 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2430 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2431 max_work = max(work, max_work);
2432 }
2433 be_unlock_napi(eqo);
2434 } else {
2435 max_work = budget;
10ef9ab4 2436 }
6b7c5b94 2437
10ef9ab4
SP
2438 if (is_mcc_eqo(eqo))
2439 be_process_mcc(adapter);
93c86700 2440
10ef9ab4
SP
2441 if (max_work < budget) {
2442 napi_complete(napi);
0b545a62 2443 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2444 } else {
2445 /* As we'll continue in polling mode, count and clear events */
0b545a62 2446 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2447 }
10ef9ab4 2448 return max_work;
6b7c5b94
SP
2449}
2450
6384a4d0
SP
2451#ifdef CONFIG_NET_RX_BUSY_POLL
2452static int be_busy_poll(struct napi_struct *napi)
2453{
2454 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2455 struct be_adapter *adapter = eqo->adapter;
2456 struct be_rx_obj *rxo;
2457 int i, work = 0;
2458
2459 if (!be_lock_busy_poll(eqo))
2460 return LL_FLUSH_BUSY;
2461
2462 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2463 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2464 if (work)
2465 break;
2466 }
2467
2468 be_unlock_busy_poll(eqo);
2469 return work;
2470}
2471#endif
2472
f67ef7ba 2473void be_detect_error(struct be_adapter *adapter)
7c185276 2474{
e1cfb67a
PR
2475 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2476 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2477 u32 i;
eb0eecc1
SK
2478 bool error_detected = false;
2479 struct device *dev = &adapter->pdev->dev;
2480 struct net_device *netdev = adapter->netdev;
7c185276 2481
d23e946c 2482 if (be_hw_error(adapter))
72f02485
SP
2483 return;
2484
e1cfb67a
PR
2485 if (lancer_chip(adapter)) {
2486 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2487 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2488 sliport_err1 = ioread32(adapter->db +
2489 SLIPORT_ERROR1_OFFSET);
2490 sliport_err2 = ioread32(adapter->db +
2491 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2492 adapter->hw_error = true;
2493 /* Do not log error messages if its a FW reset */
2494 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2495 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2496 dev_info(dev, "Firmware update in progress\n");
2497 } else {
2498 error_detected = true;
2499 dev_err(dev, "Error detected in the card\n");
2500 dev_err(dev, "ERR: sliport status 0x%x\n",
2501 sliport_status);
2502 dev_err(dev, "ERR: sliport error1 0x%x\n",
2503 sliport_err1);
2504 dev_err(dev, "ERR: sliport error2 0x%x\n",
2505 sliport_err2);
2506 }
e1cfb67a
PR
2507 }
2508 } else {
2509 pci_read_config_dword(adapter->pdev,
2510 PCICFG_UE_STATUS_LOW, &ue_lo);
2511 pci_read_config_dword(adapter->pdev,
2512 PCICFG_UE_STATUS_HIGH, &ue_hi);
2513 pci_read_config_dword(adapter->pdev,
2514 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2515 pci_read_config_dword(adapter->pdev,
2516 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2517
f67ef7ba
PR
2518 ue_lo = (ue_lo & ~ue_lo_mask);
2519 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2520
eb0eecc1
SK
2521 /* On certain platforms BE hardware can indicate spurious UEs.
2522 * Allow HW to stop working completely in case of a real UE.
2523 * Hence not setting the hw_error for UE detection.
2524 */
f67ef7ba 2525
eb0eecc1
SK
2526 if (ue_lo || ue_hi) {
2527 error_detected = true;
2528 dev_err(dev,
2529 "Unrecoverable Error detected in the adapter");
2530 dev_err(dev, "Please reboot server to recover");
2531 if (skyhawk_chip(adapter))
2532 adapter->hw_error = true;
2533 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2534 if (ue_lo & 1)
2535 dev_err(dev, "UE: %s bit set\n",
2536 ue_status_low_desc[i]);
2537 }
2538 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2539 if (ue_hi & 1)
2540 dev_err(dev, "UE: %s bit set\n",
2541 ue_status_hi_desc[i]);
2542 }
7c185276
AK
2543 }
2544 }
eb0eecc1
SK
2545 if (error_detected)
2546 netif_carrier_off(netdev);
7c185276
AK
2547}
2548
8d56ff11
SP
2549static void be_msix_disable(struct be_adapter *adapter)
2550{
ac6a0c4a 2551 if (msix_enabled(adapter)) {
8d56ff11 2552 pci_disable_msix(adapter->pdev);
ac6a0c4a 2553 adapter->num_msix_vec = 0;
68d7bdcb 2554 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2555 }
2556}
2557
c2bba3df 2558static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2559{
7dc4c064 2560 int i, num_vec;
d379142b 2561 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2562
92bf14ab
SP
2563 /* If RoCE is supported, program the max number of NIC vectors that
2564 * may be configured via set-channels, along with vectors needed for
2565 * RoCe. Else, just program the number we'll use initially.
2566 */
2567 if (be_roce_supported(adapter))
2568 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2569 2 * num_online_cpus());
2570 else
2571 num_vec = adapter->cfg_num_qs;
3abcdeda 2572
ac6a0c4a 2573 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2574 adapter->msix_entries[i].entry = i;
2575
7dc4c064
AG
2576 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2577 MIN_MSIX_VECTORS, num_vec);
2578 if (num_vec < 0)
2579 goto fail;
92bf14ab 2580
92bf14ab
SP
2581 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2582 adapter->num_msix_roce_vec = num_vec / 2;
2583 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2584 adapter->num_msix_roce_vec);
2585 }
2586
2587 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2588
2589 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2590 adapter->num_msix_vec);
c2bba3df 2591 return 0;
7dc4c064
AG
2592
2593fail:
2594 dev_warn(dev, "MSIx enable failed\n");
2595
2596 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2597 if (!be_physfn(adapter))
2598 return num_vec;
2599 return 0;
6b7c5b94
SP
2600}
2601
fe6d2a38 2602static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2603 struct be_eq_obj *eqo)
b628bde2 2604{
f2f781a7 2605 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2606}
6b7c5b94 2607
b628bde2
SP
2608static int be_msix_register(struct be_adapter *adapter)
2609{
10ef9ab4
SP
2610 struct net_device *netdev = adapter->netdev;
2611 struct be_eq_obj *eqo;
2612 int status, i, vec;
6b7c5b94 2613
10ef9ab4
SP
2614 for_all_evt_queues(adapter, eqo, i) {
2615 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2616 vec = be_msix_vec_get(adapter, eqo);
2617 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2618 if (status)
2619 goto err_msix;
2620 }
b628bde2 2621
6b7c5b94 2622 return 0;
3abcdeda 2623err_msix:
10ef9ab4
SP
2624 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2625 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2626 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2627 status);
ac6a0c4a 2628 be_msix_disable(adapter);
6b7c5b94
SP
2629 return status;
2630}
2631
2632static int be_irq_register(struct be_adapter *adapter)
2633{
2634 struct net_device *netdev = adapter->netdev;
2635 int status;
2636
ac6a0c4a 2637 if (msix_enabled(adapter)) {
6b7c5b94
SP
2638 status = be_msix_register(adapter);
2639 if (status == 0)
2640 goto done;
ba343c77
SB
2641 /* INTx is not supported for VF */
2642 if (!be_physfn(adapter))
2643 return status;
6b7c5b94
SP
2644 }
2645
e49cc34f 2646 /* INTx: only the first EQ is used */
6b7c5b94
SP
2647 netdev->irq = adapter->pdev->irq;
2648 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2649 &adapter->eq_obj[0]);
6b7c5b94
SP
2650 if (status) {
2651 dev_err(&adapter->pdev->dev,
2652 "INTx request IRQ failed - err %d\n", status);
2653 return status;
2654 }
2655done:
2656 adapter->isr_registered = true;
2657 return 0;
2658}
2659
2660static void be_irq_unregister(struct be_adapter *adapter)
2661{
2662 struct net_device *netdev = adapter->netdev;
10ef9ab4 2663 struct be_eq_obj *eqo;
3abcdeda 2664 int i;
6b7c5b94
SP
2665
2666 if (!adapter->isr_registered)
2667 return;
2668
2669 /* INTx */
ac6a0c4a 2670 if (!msix_enabled(adapter)) {
e49cc34f 2671 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2672 goto done;
2673 }
2674
2675 /* MSIx */
10ef9ab4
SP
2676 for_all_evt_queues(adapter, eqo, i)
2677 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2678
6b7c5b94
SP
2679done:
2680 adapter->isr_registered = false;
6b7c5b94
SP
2681}
2682
10ef9ab4 2683static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2684{
2685 struct be_queue_info *q;
2686 struct be_rx_obj *rxo;
2687 int i;
2688
2689 for_all_rx_queues(adapter, rxo, i) {
2690 q = &rxo->q;
2691 if (q->created) {
2692 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2693 be_rx_cq_clean(rxo);
482c9e79 2694 }
10ef9ab4 2695 be_queue_free(adapter, q);
482c9e79
SP
2696 }
2697}
2698
889cd4b2
SP
2699static int be_close(struct net_device *netdev)
2700{
2701 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2702 struct be_eq_obj *eqo;
2703 int i;
889cd4b2 2704
045508a8
PP
2705 be_roce_dev_close(adapter);
2706
dff345c5
IV
2707 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2708 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2709 napi_disable(&eqo->napi);
6384a4d0
SP
2710 be_disable_busy_poll(eqo);
2711 }
71237b6f 2712 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2713 }
a323d9bf
SP
2714
2715 be_async_mcc_disable(adapter);
2716
2717 /* Wait for all pending tx completions to arrive so that
2718 * all tx skbs are freed.
2719 */
fba87559 2720 netif_tx_disable(netdev);
6e1f9975 2721 be_tx_compl_clean(adapter);
a323d9bf
SP
2722
2723 be_rx_qs_destroy(adapter);
2724
d11a347d
AK
2725 for (i = 1; i < (adapter->uc_macs + 1); i++)
2726 be_cmd_pmac_del(adapter, adapter->if_handle,
2727 adapter->pmac_id[i], 0);
2728 adapter->uc_macs = 0;
2729
a323d9bf 2730 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2731 if (msix_enabled(adapter))
2732 synchronize_irq(be_msix_vec_get(adapter, eqo));
2733 else
2734 synchronize_irq(netdev->irq);
2735 be_eq_clean(eqo);
63fcb27f
PR
2736 }
2737
889cd4b2
SP
2738 be_irq_unregister(adapter);
2739
482c9e79
SP
2740 return 0;
2741}
2742
10ef9ab4 2743static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2744{
2745 struct be_rx_obj *rxo;
e9008ee9
PR
2746 int rc, i, j;
2747 u8 rsstable[128];
482c9e79
SP
2748
2749 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2750 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2751 sizeof(struct be_eth_rx_d));
2752 if (rc)
2753 return rc;
2754 }
2755
2756 /* The FW would like the default RXQ to be created first */
2757 rxo = default_rxo(adapter);
2758 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2759 adapter->if_handle, false, &rxo->rss_id);
2760 if (rc)
2761 return rc;
2762
2763 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2764 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2765 rx_frag_size, adapter->if_handle,
2766 true, &rxo->rss_id);
482c9e79
SP
2767 if (rc)
2768 return rc;
2769 }
2770
2771 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2772 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2773 for_all_rss_queues(adapter, rxo, i) {
2774 if ((j + i) >= 128)
2775 break;
2776 rsstable[j + i] = rxo->rss_id;
2777 }
2778 }
594ad54a
SR
2779 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2780 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2781
2782 if (!BEx_chip(adapter))
2783 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2784 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2785 } else {
2786 /* Disable RSS, if only default RX Q is created */
2787 adapter->rss_flags = RSS_ENABLE_NONE;
2788 }
594ad54a 2789
da1388d6
VV
2790 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2791 128);
2792 if (rc) {
2793 adapter->rss_flags = RSS_ENABLE_NONE;
2794 return rc;
482c9e79
SP
2795 }
2796
2797 /* First time posting */
10ef9ab4 2798 for_all_rx_queues(adapter, rxo, i)
482c9e79 2799 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2800 return 0;
2801}
2802
6b7c5b94
SP
2803static int be_open(struct net_device *netdev)
2804{
2805 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2806 struct be_eq_obj *eqo;
3abcdeda 2807 struct be_rx_obj *rxo;
10ef9ab4 2808 struct be_tx_obj *txo;
b236916a 2809 u8 link_status;
3abcdeda 2810 int status, i;
5fb379ee 2811
10ef9ab4 2812 status = be_rx_qs_create(adapter);
482c9e79
SP
2813 if (status)
2814 goto err;
2815
c2bba3df
SK
2816 status = be_irq_register(adapter);
2817 if (status)
2818 goto err;
5fb379ee 2819
10ef9ab4 2820 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2821 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2822
10ef9ab4
SP
2823 for_all_tx_queues(adapter, txo, i)
2824 be_cq_notify(adapter, txo->cq.id, true, 0);
2825
7a1e9b20
SP
2826 be_async_mcc_enable(adapter);
2827
10ef9ab4
SP
2828 for_all_evt_queues(adapter, eqo, i) {
2829 napi_enable(&eqo->napi);
6384a4d0 2830 be_enable_busy_poll(eqo);
10ef9ab4
SP
2831 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2832 }
04d3d624 2833 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2834
323ff71e 2835 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2836 if (!status)
2837 be_link_status_update(adapter, link_status);
2838
fba87559 2839 netif_tx_start_all_queues(netdev);
045508a8 2840 be_roce_dev_open(adapter);
889cd4b2
SP
2841 return 0;
2842err:
2843 be_close(adapter->netdev);
2844 return -EIO;
5fb379ee
SP
2845}
2846
71d8d1b5
AK
2847static int be_setup_wol(struct be_adapter *adapter, bool enable)
2848{
2849 struct be_dma_mem cmd;
2850 int status = 0;
2851 u8 mac[ETH_ALEN];
2852
2853 memset(mac, 0, ETH_ALEN);
2854
2855 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2856 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2857 GFP_KERNEL);
71d8d1b5
AK
2858 if (cmd.va == NULL)
2859 return -1;
71d8d1b5
AK
2860
2861 if (enable) {
2862 status = pci_write_config_dword(adapter->pdev,
2863 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2864 if (status) {
2865 dev_err(&adapter->pdev->dev,
2381a55c 2866 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2867 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2868 cmd.dma);
71d8d1b5
AK
2869 return status;
2870 }
2871 status = be_cmd_enable_magic_wol(adapter,
2872 adapter->netdev->dev_addr, &cmd);
2873 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2874 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2875 } else {
2876 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2877 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2878 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2879 }
2880
2b7bcebf 2881 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2882 return status;
2883}
2884
6d87f5c3
AK
2885/*
2886 * Generate a seed MAC address from the PF MAC Address using jhash.
2887 * MAC Address for VFs are assigned incrementally starting from the seed.
2888 * These addresses are programmed in the ASIC by the PF and the VF driver
2889 * queries for the MAC address during its probe.
2890 */
4c876616 2891static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2892{
f9449ab7 2893 u32 vf;
3abcdeda 2894 int status = 0;
6d87f5c3 2895 u8 mac[ETH_ALEN];
11ac75ed 2896 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2897
2898 be_vf_eth_addr_generate(adapter, mac);
2899
11ac75ed 2900 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2901 if (BEx_chip(adapter))
590c391d 2902 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2903 vf_cfg->if_handle,
2904 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2905 else
2906 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2907 vf + 1);
590c391d 2908
6d87f5c3
AK
2909 if (status)
2910 dev_err(&adapter->pdev->dev,
590c391d 2911 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2912 else
11ac75ed 2913 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2914
2915 mac[5] += 1;
2916 }
2917 return status;
2918}
2919
4c876616
SP
2920static int be_vfs_mac_query(struct be_adapter *adapter)
2921{
2922 int status, vf;
2923 u8 mac[ETH_ALEN];
2924 struct be_vf_cfg *vf_cfg;
4c876616
SP
2925
2926 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
2927 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2928 mac, vf_cfg->if_handle,
2929 false, vf+1);
4c876616
SP
2930 if (status)
2931 return status;
2932 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2933 }
2934 return 0;
2935}
2936
f9449ab7 2937static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2938{
11ac75ed 2939 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2940 u32 vf;
2941
257a3feb 2942 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2943 dev_warn(&adapter->pdev->dev,
2944 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2945 goto done;
2946 }
2947
b4c1df93
SP
2948 pci_disable_sriov(adapter->pdev);
2949
11ac75ed 2950 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2951 if (BEx_chip(adapter))
11ac75ed
SP
2952 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2953 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2954 else
2955 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2956 vf + 1);
f9449ab7 2957
11ac75ed
SP
2958 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2959 }
39f1d94d
SP
2960done:
2961 kfree(adapter->vf_cfg);
2962 adapter->num_vfs = 0;
6d87f5c3
AK
2963}
2964
7707133c
SP
2965static void be_clear_queues(struct be_adapter *adapter)
2966{
2967 be_mcc_queues_destroy(adapter);
2968 be_rx_cqs_destroy(adapter);
2969 be_tx_queues_destroy(adapter);
2970 be_evt_queues_destroy(adapter);
2971}
2972
68d7bdcb 2973static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2974{
191eb756
SP
2975 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2976 cancel_delayed_work_sync(&adapter->work);
2977 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2978 }
68d7bdcb
SP
2979}
2980
b05004ad 2981static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
2982{
2983 int i;
2984
b05004ad
SK
2985 if (adapter->pmac_id) {
2986 for (i = 0; i < (adapter->uc_macs + 1); i++)
2987 be_cmd_pmac_del(adapter, adapter->if_handle,
2988 adapter->pmac_id[i], 0);
2989 adapter->uc_macs = 0;
2990
2991 kfree(adapter->pmac_id);
2992 adapter->pmac_id = NULL;
2993 }
2994}
2995
2996static int be_clear(struct be_adapter *adapter)
2997{
68d7bdcb 2998 be_cancel_worker(adapter);
191eb756 2999
11ac75ed 3000 if (sriov_enabled(adapter))
f9449ab7
SP
3001 be_vf_clear(adapter);
3002
2d17f403 3003 /* delete the primary mac along with the uc-mac list */
b05004ad 3004 be_mac_clear(adapter);
fbc13f01 3005
f9449ab7 3006 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3007
7707133c 3008 be_clear_queues(adapter);
a54769f5 3009
10ef9ab4 3010 be_msix_disable(adapter);
a54769f5
SP
3011 return 0;
3012}
3013
4c876616 3014static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3015{
92bf14ab 3016 struct be_resources res = {0};
4c876616
SP
3017 struct be_vf_cfg *vf_cfg;
3018 u32 cap_flags, en_flags, vf;
922bbe88 3019 int status = 0;
abb93951 3020
4c876616
SP
3021 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3022 BE_IF_FLAGS_MULTICAST;
abb93951 3023
4c876616 3024 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3025 if (!BE3_chip(adapter)) {
3026 status = be_cmd_get_profile_config(adapter, &res,
3027 vf + 1);
3028 if (!status)
3029 cap_flags = res.if_cap_flags;
3030 }
4c876616
SP
3031
3032 /* If a FW profile exists, then cap_flags are updated */
3033 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3034 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3035 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3036 &vf_cfg->if_handle, vf + 1);
3037 if (status)
3038 goto err;
3039 }
3040err:
3041 return status;
abb93951
PR
3042}
3043
39f1d94d 3044static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3045{
11ac75ed 3046 struct be_vf_cfg *vf_cfg;
30128031
SP
3047 int vf;
3048
39f1d94d
SP
3049 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3050 GFP_KERNEL);
3051 if (!adapter->vf_cfg)
3052 return -ENOMEM;
3053
11ac75ed
SP
3054 for_all_vfs(adapter, vf_cfg, vf) {
3055 vf_cfg->if_handle = -1;
3056 vf_cfg->pmac_id = -1;
30128031 3057 }
39f1d94d 3058 return 0;
30128031
SP
3059}
3060
f9449ab7
SP
3061static int be_vf_setup(struct be_adapter *adapter)
3062{
c502224e 3063 struct device *dev = &adapter->pdev->dev;
11ac75ed 3064 struct be_vf_cfg *vf_cfg;
4c876616 3065 int status, old_vfs, vf;
04a06028 3066 u32 privileges;
c502224e 3067 u16 lnk_speed;
39f1d94d 3068
257a3feb 3069 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3070 if (old_vfs) {
3071 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3072 if (old_vfs != num_vfs)
3073 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3074 adapter->num_vfs = old_vfs;
39f1d94d 3075 } else {
92bf14ab 3076 if (num_vfs > be_max_vfs(adapter))
4c876616 3077 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3078 be_max_vfs(adapter), num_vfs);
3079 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3080 if (!adapter->num_vfs)
4c876616 3081 return 0;
39f1d94d
SP
3082 }
3083
3084 status = be_vf_setup_init(adapter);
3085 if (status)
3086 goto err;
30128031 3087
4c876616
SP
3088 if (old_vfs) {
3089 for_all_vfs(adapter, vf_cfg, vf) {
3090 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3091 if (status)
3092 goto err;
3093 }
3094 } else {
3095 status = be_vfs_if_create(adapter);
f9449ab7
SP
3096 if (status)
3097 goto err;
f9449ab7
SP
3098 }
3099
4c876616
SP
3100 if (old_vfs) {
3101 status = be_vfs_mac_query(adapter);
3102 if (status)
3103 goto err;
3104 } else {
39f1d94d
SP
3105 status = be_vf_eth_addr_config(adapter);
3106 if (status)
3107 goto err;
3108 }
f9449ab7 3109
11ac75ed 3110 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3111 /* Allow VFs to programs MAC/VLAN filters */
3112 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3113 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3114 status = be_cmd_set_fn_privileges(adapter,
3115 privileges |
3116 BE_PRIV_FILTMGMT,
3117 vf + 1);
3118 if (!status)
3119 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3120 vf);
3121 }
3122
4c876616
SP
3123 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3124 * Allow full available bandwidth
3125 */
3126 if (BE3_chip(adapter) && !old_vfs)
3127 be_cmd_set_qos(adapter, 1000, vf+1);
3128
3129 status = be_cmd_link_status_query(adapter, &lnk_speed,
3130 NULL, vf + 1);
3131 if (!status)
3132 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b 3133
bdce2ad7 3134 if (!old_vfs) {
0599863d 3135 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3136 be_cmd_set_logical_link_config(adapter,
3137 IFLA_VF_LINK_STATE_AUTO,
3138 vf+1);
3139 }
f9449ab7 3140 }
b4c1df93
SP
3141
3142 if (!old_vfs) {
3143 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3144 if (status) {
3145 dev_err(dev, "SRIOV enable failed\n");
3146 adapter->num_vfs = 0;
3147 goto err;
3148 }
3149 }
f9449ab7
SP
3150 return 0;
3151err:
4c876616
SP
3152 dev_err(dev, "VF setup failed\n");
3153 be_vf_clear(adapter);
f9449ab7
SP
3154 return status;
3155}
3156
f93f160b
VV
3157/* Converting function_mode bits on BE3 to SH mc_type enums */
3158
3159static u8 be_convert_mc_type(u32 function_mode)
3160{
3161 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3162 return vNIC1;
3163 else if (function_mode & FLEX10_MODE)
3164 return FLEX10;
3165 else if (function_mode & VNIC_MODE)
3166 return vNIC2;
3167 else if (function_mode & UMC_ENABLED)
3168 return UMC;
3169 else
3170 return MC_NONE;
3171}
3172
92bf14ab
SP
3173/* On BE2/BE3 FW does not suggest the supported limits */
3174static void BEx_get_resources(struct be_adapter *adapter,
3175 struct be_resources *res)
3176{
3177 struct pci_dev *pdev = adapter->pdev;
3178 bool use_sriov = false;
ecf1f6e1
SR
3179 int max_vfs = 0;
3180
3181 if (be_physfn(adapter) && BE3_chip(adapter)) {
3182 be_cmd_get_profile_config(adapter, res, 0);
3183 /* Some old versions of BE3 FW don't report max_vfs value */
3184 if (res->max_vfs == 0) {
3185 max_vfs = pci_sriov_get_totalvfs(pdev);
3186 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3187 }
3188 use_sriov = res->max_vfs && sriov_want(adapter);
92bf14ab
SP
3189 }
3190
3191 if (be_physfn(adapter))
3192 res->max_uc_mac = BE_UC_PMAC_COUNT;
3193 else
3194 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3195
f93f160b
VV
3196 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3197
3198 if (be_is_mc(adapter)) {
3199 /* Assuming that there are 4 channels per port,
3200 * when multi-channel is enabled
3201 */
3202 if (be_is_qnq_mode(adapter))
3203 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3204 else
3205 /* In a non-qnq multichannel mode, the pvid
3206 * takes up one vlan entry
3207 */
3208 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3209 } else {
92bf14ab 3210 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3211 }
3212
92bf14ab
SP
3213 res->max_mcast_mac = BE_MAX_MC;
3214
a5243dab
VV
3215 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3216 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3217 * *only* if it is RSS-capable.
3218 */
3219 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3220 !be_physfn(adapter) || (be_is_mc(adapter) &&
3221 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3222 res->max_tx_qs = 1;
3223 else
3224 res->max_tx_qs = BE3_MAX_TX_QS;
3225
3226 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3227 !use_sriov && be_physfn(adapter))
3228 res->max_rss_qs = (adapter->be3_native) ?
3229 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3230 res->max_rx_qs = res->max_rss_qs + 1;
3231
e3dc867c 3232 if (be_physfn(adapter))
ecf1f6e1 3233 res->max_evt_qs = (res->max_vfs > 0) ?
e3dc867c
SR
3234 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3235 else
3236 res->max_evt_qs = 1;
92bf14ab
SP
3237
3238 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3239 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3240 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3241}
3242
30128031
SP
3243static void be_setup_init(struct be_adapter *adapter)
3244{
3245 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3246 adapter->phy.link_speed = -1;
30128031
SP
3247 adapter->if_handle = -1;
3248 adapter->be3_native = false;
3249 adapter->promiscuous = false;
f25b119c
PR
3250 if (be_physfn(adapter))
3251 adapter->cmd_privileges = MAX_PRIVILEGES;
3252 else
3253 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3254}
3255
92bf14ab 3256static int be_get_resources(struct be_adapter *adapter)
abb93951 3257{
92bf14ab
SP
3258 struct device *dev = &adapter->pdev->dev;
3259 struct be_resources res = {0};
3260 int status;
abb93951 3261
92bf14ab
SP
3262 if (BEx_chip(adapter)) {
3263 BEx_get_resources(adapter, &res);
3264 adapter->res = res;
abb93951
PR
3265 }
3266
92bf14ab
SP
3267 /* For Lancer, SH etc read per-function resource limits from FW.
3268 * GET_FUNC_CONFIG returns per function guaranteed limits.
3269 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3270 */
3271 if (!BEx_chip(adapter)) {
3272 status = be_cmd_get_func_config(adapter, &res);
3273 if (status)
3274 return status;
abb93951 3275
92bf14ab
SP
3276 /* If RoCE may be enabled stash away half the EQs for RoCE */
3277 if (be_roce_supported(adapter))
3278 res.max_evt_qs /= 2;
3279 adapter->res = res;
abb93951 3280
92bf14ab
SP
3281 if (be_physfn(adapter)) {
3282 status = be_cmd_get_profile_config(adapter, &res, 0);
3283 if (status)
3284 return status;
3285 adapter->res.max_vfs = res.max_vfs;
3286 }
abb93951 3287
92bf14ab
SP
3288 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3289 be_max_txqs(adapter), be_max_rxqs(adapter),
3290 be_max_rss(adapter), be_max_eqs(adapter),
3291 be_max_vfs(adapter));
3292 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3293 be_max_uc(adapter), be_max_mc(adapter),
3294 be_max_vlans(adapter));
abb93951 3295 }
4c876616 3296
92bf14ab 3297 return 0;
abb93951
PR
3298}
3299
39f1d94d
SP
3300/* Routine to query per function resource limits */
3301static int be_get_config(struct be_adapter *adapter)
3302{
542963b7 3303 u16 profile_id;
4c876616 3304 int status;
39f1d94d 3305
abb93951
PR
3306 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3307 &adapter->function_mode,
0ad3157e
VV
3308 &adapter->function_caps,
3309 &adapter->asic_rev);
abb93951 3310 if (status)
92bf14ab 3311 return status;
abb93951 3312
542963b7
VV
3313 if (be_physfn(adapter)) {
3314 status = be_cmd_get_active_profile(adapter, &profile_id);
3315 if (!status)
3316 dev_info(&adapter->pdev->dev,
3317 "Using profile 0x%x\n", profile_id);
3318 }
3319
92bf14ab
SP
3320 status = be_get_resources(adapter);
3321 if (status)
3322 return status;
abb93951 3323
46ee9c14
RN
3324 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3325 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3326 if (!adapter->pmac_id)
3327 return -ENOMEM;
abb93951 3328
92bf14ab
SP
3329 /* Sanitize cfg_num_qs based on HW and platform limits */
3330 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3331
3332 return 0;
39f1d94d
SP
3333}
3334
95046b92
SP
3335static int be_mac_setup(struct be_adapter *adapter)
3336{
3337 u8 mac[ETH_ALEN];
3338 int status;
3339
3340 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3341 status = be_cmd_get_perm_mac(adapter, mac);
3342 if (status)
3343 return status;
3344
3345 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3346 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3347 } else {
3348 /* Maybe the HW was reset; dev_addr must be re-programmed */
3349 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3350 }
3351
2c7a9dc1
AK
3352 /* For BE3-R VFs, the PF programs the initial MAC address */
3353 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3354 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3355 &adapter->pmac_id[0], 0);
95046b92
SP
3356 return 0;
3357}
3358
68d7bdcb
SP
3359static void be_schedule_worker(struct be_adapter *adapter)
3360{
3361 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3362 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3363}
3364
7707133c 3365static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3366{
68d7bdcb 3367 struct net_device *netdev = adapter->netdev;
10ef9ab4 3368 int status;
ba343c77 3369
7707133c 3370 status = be_evt_queues_create(adapter);
abb93951
PR
3371 if (status)
3372 goto err;
73d540f2 3373
7707133c 3374 status = be_tx_qs_create(adapter);
c2bba3df
SK
3375 if (status)
3376 goto err;
10ef9ab4 3377
7707133c 3378 status = be_rx_cqs_create(adapter);
10ef9ab4 3379 if (status)
a54769f5 3380 goto err;
6b7c5b94 3381
7707133c 3382 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3383 if (status)
3384 goto err;
3385
68d7bdcb
SP
3386 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3387 if (status)
3388 goto err;
3389
3390 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3391 if (status)
3392 goto err;
3393
7707133c
SP
3394 return 0;
3395err:
3396 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3397 return status;
3398}
3399
68d7bdcb
SP
3400int be_update_queues(struct be_adapter *adapter)
3401{
3402 struct net_device *netdev = adapter->netdev;
3403 int status;
3404
3405 if (netif_running(netdev))
3406 be_close(netdev);
3407
3408 be_cancel_worker(adapter);
3409
3410 /* If any vectors have been shared with RoCE we cannot re-program
3411 * the MSIx table.
3412 */
3413 if (!adapter->num_msix_roce_vec)
3414 be_msix_disable(adapter);
3415
3416 be_clear_queues(adapter);
3417
3418 if (!msix_enabled(adapter)) {
3419 status = be_msix_enable(adapter);
3420 if (status)
3421 return status;
3422 }
3423
3424 status = be_setup_queues(adapter);
3425 if (status)
3426 return status;
3427
3428 be_schedule_worker(adapter);
3429
3430 if (netif_running(netdev))
3431 status = be_open(netdev);
3432
3433 return status;
3434}
3435
7707133c
SP
3436static int be_setup(struct be_adapter *adapter)
3437{
3438 struct device *dev = &adapter->pdev->dev;
3439 u32 tx_fc, rx_fc, en_flags;
3440 int status;
3441
3442 be_setup_init(adapter);
3443
3444 if (!lancer_chip(adapter))
3445 be_cmd_req_native_mode(adapter);
3446
3447 status = be_get_config(adapter);
10ef9ab4 3448 if (status)
a54769f5 3449 goto err;
6b7c5b94 3450
7707133c 3451 status = be_msix_enable(adapter);
10ef9ab4 3452 if (status)
a54769f5 3453 goto err;
6b7c5b94 3454
f9449ab7 3455 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3456 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3457 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3458 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3459 en_flags = en_flags & be_if_cap_flags(adapter);
3460 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3461 &adapter->if_handle, 0);
7707133c 3462 if (status)
a54769f5 3463 goto err;
6b7c5b94 3464
68d7bdcb
SP
3465 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3466 rtnl_lock();
7707133c 3467 status = be_setup_queues(adapter);
68d7bdcb 3468 rtnl_unlock();
95046b92 3469 if (status)
1578e777
PR
3470 goto err;
3471
7707133c 3472 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3473
3474 status = be_mac_setup(adapter);
10ef9ab4
SP
3475 if (status)
3476 goto err;
3477
eeb65ced 3478 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3479
e9e2a904
SK
3480 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3481 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3482 adapter->fw_ver);
3483 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3484 }
3485
1d1e9a46 3486 if (adapter->vlans_added)
10329df8 3487 be_vid_config(adapter);
7ab8b0b4 3488
a54769f5 3489 be_set_rx_mode(adapter->netdev);
5fb379ee 3490
76a9e08e
SR
3491 be_cmd_get_acpi_wol_cap(adapter);
3492
ddc3f5cb 3493 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3494
ddc3f5cb
AK
3495 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3496 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3497 adapter->rx_fc);
2dc1deb6 3498
bdce2ad7
SR
3499 if (be_physfn(adapter))
3500 be_cmd_set_logical_link_config(adapter,
3501 IFLA_VF_LINK_STATE_AUTO, 0);
3502
b905b5d4 3503 if (sriov_want(adapter)) {
92bf14ab 3504 if (be_max_vfs(adapter))
39f1d94d
SP
3505 be_vf_setup(adapter);
3506 else
3507 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3508 }
3509
f25b119c
PR
3510 status = be_cmd_get_phy_info(adapter);
3511 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3512 adapter->phy.fc_autoneg = 1;
3513
68d7bdcb 3514 be_schedule_worker(adapter);
f9449ab7 3515 return 0;
a54769f5
SP
3516err:
3517 be_clear(adapter);
3518 return status;
3519}
6b7c5b94 3520
66268739
IV
3521#ifdef CONFIG_NET_POLL_CONTROLLER
3522static void be_netpoll(struct net_device *netdev)
3523{
3524 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3525 struct be_eq_obj *eqo;
66268739
IV
3526 int i;
3527
e49cc34f
SP
3528 for_all_evt_queues(adapter, eqo, i) {
3529 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3530 napi_schedule(&eqo->napi);
3531 }
10ef9ab4
SP
3532
3533 return;
66268739
IV
3534}
3535#endif
3536
84517482 3537#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3538static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3539
fa9a6fed 3540static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3541 const u8 *p, u32 img_start, int image_size,
3542 int hdr_size)
fa9a6fed
SB
3543{
3544 u32 crc_offset;
3545 u8 flashed_crc[4];
3546 int status;
3f0d4560
AK
3547
3548 crc_offset = hdr_size + img_start + image_size - 4;
3549
fa9a6fed 3550 p += crc_offset;
3f0d4560
AK
3551
3552 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3553 (image_size - 4));
fa9a6fed
SB
3554 if (status) {
3555 dev_err(&adapter->pdev->dev,
3556 "could not get crc from flash, not flashing redboot\n");
3557 return false;
3558 }
3559
3560 /*update redboot only if crc does not match*/
3561 if (!memcmp(flashed_crc, p, 4))
3562 return false;
3563 else
3564 return true;
fa9a6fed
SB
3565}
3566
306f1348
SP
3567static bool phy_flashing_required(struct be_adapter *adapter)
3568{
42f11cf2
AK
3569 return (adapter->phy.phy_type == TN_8022 &&
3570 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3571}
3572
c165541e
PR
3573static bool is_comp_in_ufi(struct be_adapter *adapter,
3574 struct flash_section_info *fsec, int type)
3575{
3576 int i = 0, img_type = 0;
3577 struct flash_section_info_g2 *fsec_g2 = NULL;
3578
ca34fe38 3579 if (BE2_chip(adapter))
c165541e
PR
3580 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3581
3582 for (i = 0; i < MAX_FLASH_COMP; i++) {
3583 if (fsec_g2)
3584 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3585 else
3586 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3587
3588 if (img_type == type)
3589 return true;
3590 }
3591 return false;
3592
3593}
3594
4188e7df 3595static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3596 int header_size,
3597 const struct firmware *fw)
3598{
3599 struct flash_section_info *fsec = NULL;
3600 const u8 *p = fw->data;
3601
3602 p += header_size;
3603 while (p < (fw->data + fw->size)) {
3604 fsec = (struct flash_section_info *)p;
3605 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3606 return fsec;
3607 p += 32;
3608 }
3609 return NULL;
3610}
3611
773a2d7c
PR
3612static int be_flash(struct be_adapter *adapter, const u8 *img,
3613 struct be_dma_mem *flash_cmd, int optype, int img_size)
3614{
3615 u32 total_bytes = 0, flash_op, num_bytes = 0;
3616 int status = 0;
3617 struct be_cmd_write_flashrom *req = flash_cmd->va;
3618
3619 total_bytes = img_size;
3620 while (total_bytes) {
3621 num_bytes = min_t(u32, 32*1024, total_bytes);
3622
3623 total_bytes -= num_bytes;
3624
3625 if (!total_bytes) {
3626 if (optype == OPTYPE_PHY_FW)
3627 flash_op = FLASHROM_OPER_PHY_FLASH;
3628 else
3629 flash_op = FLASHROM_OPER_FLASH;
3630 } else {
3631 if (optype == OPTYPE_PHY_FW)
3632 flash_op = FLASHROM_OPER_PHY_SAVE;
3633 else
3634 flash_op = FLASHROM_OPER_SAVE;
3635 }
3636
be716446 3637 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3638 img += num_bytes;
3639 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3640 flash_op, num_bytes);
3641 if (status) {
3642 if (status == ILLEGAL_IOCTL_REQ &&
3643 optype == OPTYPE_PHY_FW)
3644 break;
3645 dev_err(&adapter->pdev->dev,
3646 "cmd to write to flash rom failed.\n");
3647 return status;
3648 }
3649 }
3650 return 0;
3651}
3652
0ad3157e 3653/* For BE2, BE3 and BE3-R */
ca34fe38 3654static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3655 const struct firmware *fw,
3656 struct be_dma_mem *flash_cmd,
3657 int num_of_images)
3f0d4560 3658
84517482 3659{
3f0d4560 3660 int status = 0, i, filehdr_size = 0;
c165541e 3661 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3662 const u8 *p = fw->data;
215faf9c 3663 const struct flash_comp *pflashcomp;
773a2d7c 3664 int num_comp, redboot;
c165541e
PR
3665 struct flash_section_info *fsec = NULL;
3666
3667 struct flash_comp gen3_flash_types[] = {
3668 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3669 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3670 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3671 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3672 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3673 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3674 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3675 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3676 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3677 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3678 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3679 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3680 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3681 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3682 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3683 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3684 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3685 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3686 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3687 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3688 };
c165541e
PR
3689
3690 struct flash_comp gen2_flash_types[] = {
3691 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3692 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3693 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3694 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3695 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3696 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3697 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3698 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3699 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3700 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3701 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3702 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3703 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3704 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3705 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3706 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3707 };
3708
ca34fe38 3709 if (BE3_chip(adapter)) {
3f0d4560
AK
3710 pflashcomp = gen3_flash_types;
3711 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3712 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3713 } else {
3714 pflashcomp = gen2_flash_types;
3715 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3716 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3717 }
ca34fe38 3718
c165541e
PR
3719 /* Get flash section info*/
3720 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3721 if (!fsec) {
3722 dev_err(&adapter->pdev->dev,
3723 "Invalid Cookie. UFI corrupted ?\n");
3724 return -1;
3725 }
9fe96934 3726 for (i = 0; i < num_comp; i++) {
c165541e 3727 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3728 continue;
c165541e
PR
3729
3730 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3731 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3732 continue;
3733
773a2d7c
PR
3734 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3735 !phy_flashing_required(adapter))
306f1348 3736 continue;
c165541e 3737
773a2d7c
PR
3738 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3739 redboot = be_flash_redboot(adapter, fw->data,
3740 pflashcomp[i].offset, pflashcomp[i].size,
3741 filehdr_size + img_hdrs_size);
3742 if (!redboot)
3743 continue;
3744 }
c165541e 3745
3f0d4560 3746 p = fw->data;
c165541e 3747 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3748 if (p + pflashcomp[i].size > fw->data + fw->size)
3749 return -1;
773a2d7c
PR
3750
3751 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3752 pflashcomp[i].size);
3753 if (status) {
3754 dev_err(&adapter->pdev->dev,
3755 "Flashing section type %d failed.\n",
3756 pflashcomp[i].img_type);
3757 return status;
84517482 3758 }
84517482 3759 }
84517482
AK
3760 return 0;
3761}
3762
773a2d7c
PR
3763static int be_flash_skyhawk(struct be_adapter *adapter,
3764 const struct firmware *fw,
3765 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3766{
773a2d7c
PR
3767 int status = 0, i, filehdr_size = 0;
3768 int img_offset, img_size, img_optype, redboot;
3769 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3770 const u8 *p = fw->data;
3771 struct flash_section_info *fsec = NULL;
3772
3773 filehdr_size = sizeof(struct flash_file_hdr_g3);
3774 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3775 if (!fsec) {
3776 dev_err(&adapter->pdev->dev,
3777 "Invalid Cookie. UFI corrupted ?\n");
3778 return -1;
3779 }
3780
3781 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3782 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3783 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3784
3785 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3786 case IMAGE_FIRMWARE_iSCSI:
3787 img_optype = OPTYPE_ISCSI_ACTIVE;
3788 break;
3789 case IMAGE_BOOT_CODE:
3790 img_optype = OPTYPE_REDBOOT;
3791 break;
3792 case IMAGE_OPTION_ROM_ISCSI:
3793 img_optype = OPTYPE_BIOS;
3794 break;
3795 case IMAGE_OPTION_ROM_PXE:
3796 img_optype = OPTYPE_PXE_BIOS;
3797 break;
3798 case IMAGE_OPTION_ROM_FCoE:
3799 img_optype = OPTYPE_FCOE_BIOS;
3800 break;
3801 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3802 img_optype = OPTYPE_ISCSI_BACKUP;
3803 break;
3804 case IMAGE_NCSI:
3805 img_optype = OPTYPE_NCSI_FW;
3806 break;
3807 default:
3808 continue;
3809 }
3810
3811 if (img_optype == OPTYPE_REDBOOT) {
3812 redboot = be_flash_redboot(adapter, fw->data,
3813 img_offset, img_size,
3814 filehdr_size + img_hdrs_size);
3815 if (!redboot)
3816 continue;
3817 }
3818
3819 p = fw->data;
3820 p += filehdr_size + img_offset + img_hdrs_size;
3821 if (p + img_size > fw->data + fw->size)
3822 return -1;
3823
3824 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3825 if (status) {
3826 dev_err(&adapter->pdev->dev,
3827 "Flashing section type %d failed.\n",
3828 fsec->fsec_entry[i].type);
3829 return status;
3830 }
3831 }
3832 return 0;
3f0d4560
AK
3833}
3834
485bf569
SN
3835static int lancer_fw_download(struct be_adapter *adapter,
3836 const struct firmware *fw)
84517482 3837{
485bf569
SN
3838#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3839#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3840 struct be_dma_mem flash_cmd;
485bf569
SN
3841 const u8 *data_ptr = NULL;
3842 u8 *dest_image_ptr = NULL;
3843 size_t image_size = 0;
3844 u32 chunk_size = 0;
3845 u32 data_written = 0;
3846 u32 offset = 0;
3847 int status = 0;
3848 u8 add_status = 0;
f67ef7ba 3849 u8 change_status;
84517482 3850
485bf569 3851 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3852 dev_err(&adapter->pdev->dev,
485bf569
SN
3853 "FW Image not properly aligned. "
3854 "Length must be 4 byte aligned.\n");
3855 status = -EINVAL;
3856 goto lancer_fw_exit;
d9efd2af
SB
3857 }
3858
485bf569
SN
3859 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3860 + LANCER_FW_DOWNLOAD_CHUNK;
3861 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3862 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3863 if (!flash_cmd.va) {
3864 status = -ENOMEM;
485bf569
SN
3865 goto lancer_fw_exit;
3866 }
84517482 3867
485bf569
SN
3868 dest_image_ptr = flash_cmd.va +
3869 sizeof(struct lancer_cmd_req_write_object);
3870 image_size = fw->size;
3871 data_ptr = fw->data;
3872
3873 while (image_size) {
3874 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3875
3876 /* Copy the image chunk content. */
3877 memcpy(dest_image_ptr, data_ptr, chunk_size);
3878
3879 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3880 chunk_size, offset,
3881 LANCER_FW_DOWNLOAD_LOCATION,
3882 &data_written, &change_status,
3883 &add_status);
485bf569
SN
3884 if (status)
3885 break;
3886
3887 offset += data_written;
3888 data_ptr += data_written;
3889 image_size -= data_written;
3890 }
3891
3892 if (!status) {
3893 /* Commit the FW written */
3894 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3895 0, offset,
3896 LANCER_FW_DOWNLOAD_LOCATION,
3897 &data_written, &change_status,
3898 &add_status);
485bf569
SN
3899 }
3900
3901 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3902 flash_cmd.dma);
3903 if (status) {
3904 dev_err(&adapter->pdev->dev,
3905 "Firmware load error. "
3906 "Status code: 0x%x Additional Status: 0x%x\n",
3907 status, add_status);
3908 goto lancer_fw_exit;
3909 }
3910
f67ef7ba 3911 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3912 dev_info(&adapter->pdev->dev,
3913 "Resetting adapter to activate new FW\n");
5c510811
SK
3914 status = lancer_physdev_ctrl(adapter,
3915 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3916 if (status) {
3917 dev_err(&adapter->pdev->dev,
3918 "Adapter busy for FW reset.\n"
3919 "New FW will not be active.\n");
3920 goto lancer_fw_exit;
3921 }
3922 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3923 dev_err(&adapter->pdev->dev,
3924 "System reboot required for new FW"
3925 " to be active\n");
3926 }
3927
485bf569
SN
3928 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3929lancer_fw_exit:
3930 return status;
3931}
3932
ca34fe38
SP
3933#define UFI_TYPE2 2
3934#define UFI_TYPE3 3
0ad3157e 3935#define UFI_TYPE3R 10
ca34fe38
SP
3936#define UFI_TYPE4 4
3937static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3938 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3939{
3940 if (fhdr == NULL)
3941 goto be_get_ufi_exit;
3942
ca34fe38
SP
3943 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3944 return UFI_TYPE4;
0ad3157e
VV
3945 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3946 if (fhdr->asic_type_rev == 0x10)
3947 return UFI_TYPE3R;
3948 else
3949 return UFI_TYPE3;
3950 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3951 return UFI_TYPE2;
773a2d7c
PR
3952
3953be_get_ufi_exit:
3954 dev_err(&adapter->pdev->dev,
3955 "UFI and Interface are not compatible for flashing\n");
3956 return -1;
3957}
3958
485bf569
SN
3959static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3960{
485bf569
SN
3961 struct flash_file_hdr_g3 *fhdr3;
3962 struct image_hdr *img_hdr_ptr = NULL;
3963 struct be_dma_mem flash_cmd;
3964 const u8 *p;
773a2d7c 3965 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3966
be716446 3967 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3968 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3969 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3970 if (!flash_cmd.va) {
3971 status = -ENOMEM;
485bf569 3972 goto be_fw_exit;
84517482
AK
3973 }
3974
773a2d7c 3975 p = fw->data;
0ad3157e 3976 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3977
0ad3157e 3978 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3979
773a2d7c
PR
3980 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3981 for (i = 0; i < num_imgs; i++) {
3982 img_hdr_ptr = (struct image_hdr *)(fw->data +
3983 (sizeof(struct flash_file_hdr_g3) +
3984 i * sizeof(struct image_hdr)));
3985 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3986 switch (ufi_type) {
3987 case UFI_TYPE4:
773a2d7c
PR
3988 status = be_flash_skyhawk(adapter, fw,
3989 &flash_cmd, num_imgs);
0ad3157e
VV
3990 break;
3991 case UFI_TYPE3R:
ca34fe38
SP
3992 status = be_flash_BEx(adapter, fw, &flash_cmd,
3993 num_imgs);
0ad3157e
VV
3994 break;
3995 case UFI_TYPE3:
3996 /* Do not flash this ufi on BE3-R cards */
3997 if (adapter->asic_rev < 0x10)
3998 status = be_flash_BEx(adapter, fw,
3999 &flash_cmd,
4000 num_imgs);
4001 else {
4002 status = -1;
4003 dev_err(&adapter->pdev->dev,
4004 "Can't load BE3 UFI on BE3R\n");
4005 }
4006 }
3f0d4560 4007 }
773a2d7c
PR
4008 }
4009
ca34fe38
SP
4010 if (ufi_type == UFI_TYPE2)
4011 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4012 else if (ufi_type == -1)
3f0d4560 4013 status = -1;
84517482 4014
2b7bcebf
IV
4015 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4016 flash_cmd.dma);
84517482
AK
4017 if (status) {
4018 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4019 goto be_fw_exit;
84517482
AK
4020 }
4021
af901ca1 4022 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4023
485bf569
SN
4024be_fw_exit:
4025 return status;
4026}
4027
4028int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4029{
4030 const struct firmware *fw;
4031 int status;
4032
4033 if (!netif_running(adapter->netdev)) {
4034 dev_err(&adapter->pdev->dev,
4035 "Firmware load not allowed (interface is down)\n");
4036 return -1;
4037 }
4038
4039 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4040 if (status)
4041 goto fw_exit;
4042
4043 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4044
4045 if (lancer_chip(adapter))
4046 status = lancer_fw_download(adapter, fw);
4047 else
4048 status = be_fw_download(adapter, fw);
4049
eeb65ced
SK
4050 if (!status)
4051 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4052 adapter->fw_on_flash);
4053
84517482
AK
4054fw_exit:
4055 release_firmware(fw);
4056 return status;
4057}
4058
a77dcb8c
AK
4059static int be_ndo_bridge_setlink(struct net_device *dev,
4060 struct nlmsghdr *nlh)
4061{
4062 struct be_adapter *adapter = netdev_priv(dev);
4063 struct nlattr *attr, *br_spec;
4064 int rem;
4065 int status = 0;
4066 u16 mode = 0;
4067
4068 if (!sriov_enabled(adapter))
4069 return -EOPNOTSUPP;
4070
4071 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4072
4073 nla_for_each_nested(attr, br_spec, rem) {
4074 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4075 continue;
4076
4077 mode = nla_get_u16(attr);
4078 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4079 return -EINVAL;
4080
4081 status = be_cmd_set_hsw_config(adapter, 0, 0,
4082 adapter->if_handle,
4083 mode == BRIDGE_MODE_VEPA ?
4084 PORT_FWD_TYPE_VEPA :
4085 PORT_FWD_TYPE_VEB);
4086 if (status)
4087 goto err;
4088
4089 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4090 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4091
4092 return status;
4093 }
4094err:
4095 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4096 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4097
4098 return status;
4099}
4100
4101static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4102 struct net_device *dev,
4103 u32 filter_mask)
4104{
4105 struct be_adapter *adapter = netdev_priv(dev);
4106 int status = 0;
4107 u8 hsw_mode;
4108
4109 if (!sriov_enabled(adapter))
4110 return 0;
4111
4112 /* BE and Lancer chips support VEB mode only */
4113 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4114 hsw_mode = PORT_FWD_TYPE_VEB;
4115 } else {
4116 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4117 adapter->if_handle, &hsw_mode);
4118 if (status)
4119 return 0;
4120 }
4121
4122 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4123 hsw_mode == PORT_FWD_TYPE_VEPA ?
4124 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4125}
4126
e5686ad8 4127static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4128 .ndo_open = be_open,
4129 .ndo_stop = be_close,
4130 .ndo_start_xmit = be_xmit,
a54769f5 4131 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4132 .ndo_set_mac_address = be_mac_addr_set,
4133 .ndo_change_mtu = be_change_mtu,
ab1594e9 4134 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4135 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4136 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4137 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4138 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4139 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4140 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739 4141 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4142 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4143#ifdef CONFIG_NET_POLL_CONTROLLER
4144 .ndo_poll_controller = be_netpoll,
4145#endif
a77dcb8c
AK
4146 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4147 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0
SP
4148#ifdef CONFIG_NET_RX_BUSY_POLL
4149 .ndo_busy_poll = be_busy_poll
4150#endif
6b7c5b94
SP
4151};
4152
4153static void be_netdev_init(struct net_device *netdev)
4154{
4155 struct be_adapter *adapter = netdev_priv(netdev);
4156
6332c8d3 4157 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4158 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4159 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4160 if (be_multi_rxq(adapter))
4161 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4162
4163 netdev->features |= netdev->hw_features |
f646968f 4164 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4165
eb8a50d9 4166 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4167 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4168
fbc13f01
AK
4169 netdev->priv_flags |= IFF_UNICAST_FLT;
4170
6b7c5b94
SP
4171 netdev->flags |= IFF_MULTICAST;
4172
b7e5887e 4173 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4174
10ef9ab4 4175 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4176
4177 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4178}
4179
4180static void be_unmap_pci_bars(struct be_adapter *adapter)
4181{
c5b3ad4c
SP
4182 if (adapter->csr)
4183 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4184 if (adapter->db)
ce66f781 4185 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4186}
4187
ce66f781
SP
4188static int db_bar(struct be_adapter *adapter)
4189{
4190 if (lancer_chip(adapter) || !be_physfn(adapter))
4191 return 0;
4192 else
4193 return 4;
4194}
4195
4196static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4197{
dbf0f2a7 4198 if (skyhawk_chip(adapter)) {
ce66f781
SP
4199 adapter->roce_db.size = 4096;
4200 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4201 db_bar(adapter));
4202 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4203 db_bar(adapter));
4204 }
045508a8 4205 return 0;
6b7c5b94
SP
4206}
4207
4208static int be_map_pci_bars(struct be_adapter *adapter)
4209{
4210 u8 __iomem *addr;
fe6d2a38 4211
c5b3ad4c
SP
4212 if (BEx_chip(adapter) && be_physfn(adapter)) {
4213 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4214 if (adapter->csr == NULL)
4215 return -ENOMEM;
4216 }
4217
ce66f781 4218 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4219 if (addr == NULL)
4220 goto pci_map_err;
ba343c77 4221 adapter->db = addr;
ce66f781
SP
4222
4223 be_roce_map_pci_bars(adapter);
6b7c5b94 4224 return 0;
ce66f781 4225
6b7c5b94
SP
4226pci_map_err:
4227 be_unmap_pci_bars(adapter);
4228 return -ENOMEM;
4229}
4230
6b7c5b94
SP
4231static void be_ctrl_cleanup(struct be_adapter *adapter)
4232{
8788fdc2 4233 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4234
4235 be_unmap_pci_bars(adapter);
4236
4237 if (mem->va)
2b7bcebf
IV
4238 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4239 mem->dma);
e7b909a6 4240
5b8821b7 4241 mem = &adapter->rx_filter;
e7b909a6 4242 if (mem->va)
2b7bcebf
IV
4243 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4244 mem->dma);
6b7c5b94
SP
4245}
4246
6b7c5b94
SP
4247static int be_ctrl_init(struct be_adapter *adapter)
4248{
8788fdc2
SP
4249 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4250 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4251 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4252 u32 sli_intf;
6b7c5b94 4253 int status;
6b7c5b94 4254
ce66f781
SP
4255 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4256 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4257 SLI_INTF_FAMILY_SHIFT;
4258 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4259
6b7c5b94
SP
4260 status = be_map_pci_bars(adapter);
4261 if (status)
e7b909a6 4262 goto done;
6b7c5b94
SP
4263
4264 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4265 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4266 mbox_mem_alloc->size,
4267 &mbox_mem_alloc->dma,
4268 GFP_KERNEL);
6b7c5b94 4269 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4270 status = -ENOMEM;
4271 goto unmap_pci_bars;
6b7c5b94
SP
4272 }
4273 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4274 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4275 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4276 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4277
5b8821b7 4278 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4279 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4280 rx_filter->size, &rx_filter->dma,
4281 GFP_KERNEL);
5b8821b7 4282 if (rx_filter->va == NULL) {
e7b909a6
SP
4283 status = -ENOMEM;
4284 goto free_mbox;
4285 }
1f9061d2 4286
2984961c 4287 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4288 spin_lock_init(&adapter->mcc_lock);
4289 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4290
5eeff635 4291 init_completion(&adapter->et_cmd_compl);
cf588477 4292 pci_save_state(adapter->pdev);
6b7c5b94 4293 return 0;
e7b909a6
SP
4294
4295free_mbox:
2b7bcebf
IV
4296 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4297 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4298
4299unmap_pci_bars:
4300 be_unmap_pci_bars(adapter);
4301
4302done:
4303 return status;
6b7c5b94
SP
4304}
4305
4306static void be_stats_cleanup(struct be_adapter *adapter)
4307{
3abcdeda 4308 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4309
4310 if (cmd->va)
2b7bcebf
IV
4311 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4312 cmd->va, cmd->dma);
6b7c5b94
SP
4313}
4314
4315static int be_stats_init(struct be_adapter *adapter)
4316{
3abcdeda 4317 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4318
ca34fe38
SP
4319 if (lancer_chip(adapter))
4320 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4321 else if (BE2_chip(adapter))
89a88ab8 4322 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4323 else if (BE3_chip(adapter))
ca34fe38 4324 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4325 else
4326 /* ALL non-BE ASICs */
4327 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4328
ede23fa8
JP
4329 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4330 GFP_KERNEL);
6b7c5b94
SP
4331 if (cmd->va == NULL)
4332 return -1;
4333 return 0;
4334}
4335
3bc6b06c 4336static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4337{
4338 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4339
6b7c5b94
SP
4340 if (!adapter)
4341 return;
4342
045508a8 4343 be_roce_dev_remove(adapter);
8cef7a78 4344 be_intr_set(adapter, false);
045508a8 4345
f67ef7ba
PR
4346 cancel_delayed_work_sync(&adapter->func_recovery_work);
4347
6b7c5b94
SP
4348 unregister_netdev(adapter->netdev);
4349
5fb379ee
SP
4350 be_clear(adapter);
4351
bf99e50d
PR
4352 /* tell fw we're done with firing cmds */
4353 be_cmd_fw_clean(adapter);
4354
6b7c5b94
SP
4355 be_stats_cleanup(adapter);
4356
4357 be_ctrl_cleanup(adapter);
4358
d6b6d987
SP
4359 pci_disable_pcie_error_reporting(pdev);
4360
6b7c5b94
SP
4361 pci_release_regions(pdev);
4362 pci_disable_device(pdev);
4363
4364 free_netdev(adapter->netdev);
4365}
4366
39f1d94d 4367static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4368{
baaa08d1 4369 int status, level;
6b7c5b94 4370
9e1453c5
AK
4371 status = be_cmd_get_cntl_attributes(adapter);
4372 if (status)
4373 return status;
4374
7aeb2156
PR
4375 /* Must be a power of 2 or else MODULO will BUG_ON */
4376 adapter->be_get_temp_freq = 64;
4377
baaa08d1
VV
4378 if (BEx_chip(adapter)) {
4379 level = be_cmd_get_fw_log_level(adapter);
4380 adapter->msg_enable =
4381 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4382 }
941a77d5 4383
92bf14ab 4384 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4385 return 0;
6b7c5b94
SP
4386}
4387
f67ef7ba 4388static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4389{
01e5b2c4 4390 struct device *dev = &adapter->pdev->dev;
d8110f62 4391 int status;
d8110f62 4392
f67ef7ba
PR
4393 status = lancer_test_and_set_rdy_state(adapter);
4394 if (status)
4395 goto err;
d8110f62 4396
f67ef7ba
PR
4397 if (netif_running(adapter->netdev))
4398 be_close(adapter->netdev);
d8110f62 4399
f67ef7ba
PR
4400 be_clear(adapter);
4401
01e5b2c4 4402 be_clear_all_error(adapter);
f67ef7ba
PR
4403
4404 status = be_setup(adapter);
4405 if (status)
4406 goto err;
d8110f62 4407
f67ef7ba
PR
4408 if (netif_running(adapter->netdev)) {
4409 status = be_open(adapter->netdev);
d8110f62
PR
4410 if (status)
4411 goto err;
f67ef7ba 4412 }
d8110f62 4413
4bebb56a 4414 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4415 return 0;
4416err:
01e5b2c4
SK
4417 if (status == -EAGAIN)
4418 dev_err(dev, "Waiting for resource provisioning\n");
4419 else
4bebb56a 4420 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4421
f67ef7ba
PR
4422 return status;
4423}
4424
4425static void be_func_recovery_task(struct work_struct *work)
4426{
4427 struct be_adapter *adapter =
4428 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4429 int status = 0;
d8110f62 4430
f67ef7ba 4431 be_detect_error(adapter);
d8110f62 4432
f67ef7ba 4433 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4434
f67ef7ba
PR
4435 rtnl_lock();
4436 netif_device_detach(adapter->netdev);
4437 rtnl_unlock();
d8110f62 4438
f67ef7ba 4439 status = lancer_recover_func(adapter);
f67ef7ba
PR
4440 if (!status)
4441 netif_device_attach(adapter->netdev);
d8110f62 4442 }
f67ef7ba 4443
01e5b2c4
SK
4444 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4445 * no need to attempt further recovery.
4446 */
4447 if (!status || status == -EAGAIN)
4448 schedule_delayed_work(&adapter->func_recovery_work,
4449 msecs_to_jiffies(1000));
d8110f62
PR
4450}
4451
4452static void be_worker(struct work_struct *work)
4453{
4454 struct be_adapter *adapter =
4455 container_of(work, struct be_adapter, work.work);
4456 struct be_rx_obj *rxo;
4457 int i;
4458
d8110f62
PR
4459 /* when interrupts are not yet enabled, just reap any pending
4460 * mcc completions */
4461 if (!netif_running(adapter->netdev)) {
072a9c48 4462 local_bh_disable();
10ef9ab4 4463 be_process_mcc(adapter);
072a9c48 4464 local_bh_enable();
d8110f62
PR
4465 goto reschedule;
4466 }
4467
4468 if (!adapter->stats_cmd_sent) {
4469 if (lancer_chip(adapter))
4470 lancer_cmd_get_pport_stats(adapter,
4471 &adapter->stats_cmd);
4472 else
4473 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4474 }
4475
d696b5e2
VV
4476 if (be_physfn(adapter) &&
4477 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4478 be_cmd_get_die_temperature(adapter);
4479
d8110f62 4480 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4481 /* Replenish RX-queues starved due to memory
4482 * allocation failures.
4483 */
4484 if (rxo->rx_post_starved)
d8110f62 4485 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4486 }
4487
2632bafd 4488 be_eqd_update(adapter);
10ef9ab4 4489
d8110f62
PR
4490reschedule:
4491 adapter->work_counter++;
4492 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4493}
4494
257a3feb 4495/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4496static bool be_reset_required(struct be_adapter *adapter)
4497{
257a3feb 4498 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4499}
4500
d379142b
SP
4501static char *mc_name(struct be_adapter *adapter)
4502{
f93f160b
VV
4503 char *str = ""; /* default */
4504
4505 switch (adapter->mc_type) {
4506 case UMC:
4507 str = "UMC";
4508 break;
4509 case FLEX10:
4510 str = "FLEX10";
4511 break;
4512 case vNIC1:
4513 str = "vNIC-1";
4514 break;
4515 case nPAR:
4516 str = "nPAR";
4517 break;
4518 case UFP:
4519 str = "UFP";
4520 break;
4521 case vNIC2:
4522 str = "vNIC-2";
4523 break;
4524 default:
4525 str = "";
4526 }
4527
4528 return str;
d379142b
SP
4529}
4530
4531static inline char *func_name(struct be_adapter *adapter)
4532{
4533 return be_physfn(adapter) ? "PF" : "VF";
4534}
4535
1dd06ae8 4536static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4537{
4538 int status = 0;
4539 struct be_adapter *adapter;
4540 struct net_device *netdev;
b4e32a71 4541 char port_name;
6b7c5b94
SP
4542
4543 status = pci_enable_device(pdev);
4544 if (status)
4545 goto do_none;
4546
4547 status = pci_request_regions(pdev, DRV_NAME);
4548 if (status)
4549 goto disable_dev;
4550 pci_set_master(pdev);
4551
7f640062 4552 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4553 if (netdev == NULL) {
4554 status = -ENOMEM;
4555 goto rel_reg;
4556 }
4557 adapter = netdev_priv(netdev);
4558 adapter->pdev = pdev;
4559 pci_set_drvdata(pdev, adapter);
4560 adapter->netdev = netdev;
2243e2e9 4561 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4562
4c15c243 4563 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4564 if (!status) {
4565 netdev->features |= NETIF_F_HIGHDMA;
4566 } else {
4c15c243 4567 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4568 if (status) {
4569 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4570 goto free_netdev;
4571 }
4572 }
4573
ea58c180
AK
4574 if (be_physfn(adapter)) {
4575 status = pci_enable_pcie_error_reporting(pdev);
4576 if (!status)
4577 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4578 }
d6b6d987 4579
6b7c5b94
SP
4580 status = be_ctrl_init(adapter);
4581 if (status)
39f1d94d 4582 goto free_netdev;
6b7c5b94 4583
2243e2e9 4584 /* sync up with fw's ready state */
ba343c77 4585 if (be_physfn(adapter)) {
bf99e50d 4586 status = be_fw_wait_ready(adapter);
ba343c77
SB
4587 if (status)
4588 goto ctrl_clean;
ba343c77 4589 }
6b7c5b94 4590
39f1d94d
SP
4591 if (be_reset_required(adapter)) {
4592 status = be_cmd_reset_function(adapter);
4593 if (status)
4594 goto ctrl_clean;
556ae191 4595
2d177be8
KA
4596 /* Wait for interrupts to quiesce after an FLR */
4597 msleep(100);
4598 }
8cef7a78
SK
4599
4600 /* Allow interrupts for other ULPs running on NIC function */
4601 be_intr_set(adapter, true);
10ef9ab4 4602
2d177be8
KA
4603 /* tell fw we're ready to fire cmds */
4604 status = be_cmd_fw_init(adapter);
4605 if (status)
4606 goto ctrl_clean;
4607
2243e2e9
SP
4608 status = be_stats_init(adapter);
4609 if (status)
4610 goto ctrl_clean;
4611
39f1d94d 4612 status = be_get_initial_config(adapter);
6b7c5b94
SP
4613 if (status)
4614 goto stats_clean;
6b7c5b94
SP
4615
4616 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4617 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4618 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4619
5fb379ee
SP
4620 status = be_setup(adapter);
4621 if (status)
55f5c3c5 4622 goto stats_clean;
2243e2e9 4623
3abcdeda 4624 be_netdev_init(netdev);
6b7c5b94
SP
4625 status = register_netdev(netdev);
4626 if (status != 0)
5fb379ee 4627 goto unsetup;
6b7c5b94 4628
045508a8
PP
4629 be_roce_dev_add(adapter);
4630
f67ef7ba
PR
4631 schedule_delayed_work(&adapter->func_recovery_work,
4632 msecs_to_jiffies(1000));
b4e32a71
PR
4633
4634 be_cmd_query_port_name(adapter, &port_name);
4635
d379142b
SP
4636 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4637 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4638
6b7c5b94
SP
4639 return 0;
4640
5fb379ee
SP
4641unsetup:
4642 be_clear(adapter);
6b7c5b94
SP
4643stats_clean:
4644 be_stats_cleanup(adapter);
4645ctrl_clean:
4646 be_ctrl_cleanup(adapter);
f9449ab7 4647free_netdev:
fe6d2a38 4648 free_netdev(netdev);
6b7c5b94
SP
4649rel_reg:
4650 pci_release_regions(pdev);
4651disable_dev:
4652 pci_disable_device(pdev);
4653do_none:
c4ca2374 4654 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4655 return status;
4656}
4657
4658static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4659{
4660 struct be_adapter *adapter = pci_get_drvdata(pdev);
4661 struct net_device *netdev = adapter->netdev;
4662
76a9e08e 4663 if (adapter->wol_en)
71d8d1b5
AK
4664 be_setup_wol(adapter, true);
4665
d4360d6f 4666 be_intr_set(adapter, false);
f67ef7ba
PR
4667 cancel_delayed_work_sync(&adapter->func_recovery_work);
4668
6b7c5b94
SP
4669 netif_device_detach(netdev);
4670 if (netif_running(netdev)) {
4671 rtnl_lock();
4672 be_close(netdev);
4673 rtnl_unlock();
4674 }
9b0365f1 4675 be_clear(adapter);
6b7c5b94
SP
4676
4677 pci_save_state(pdev);
4678 pci_disable_device(pdev);
4679 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4680 return 0;
4681}
4682
4683static int be_resume(struct pci_dev *pdev)
4684{
4685 int status = 0;
4686 struct be_adapter *adapter = pci_get_drvdata(pdev);
4687 struct net_device *netdev = adapter->netdev;
4688
4689 netif_device_detach(netdev);
4690
4691 status = pci_enable_device(pdev);
4692 if (status)
4693 return status;
4694
1ca01512 4695 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4696 pci_restore_state(pdev);
4697
dd5746bf
SB
4698 status = be_fw_wait_ready(adapter);
4699 if (status)
4700 return status;
4701
d4360d6f 4702 be_intr_set(adapter, true);
2243e2e9
SP
4703 /* tell fw we're ready to fire cmds */
4704 status = be_cmd_fw_init(adapter);
4705 if (status)
4706 return status;
4707
9b0365f1 4708 be_setup(adapter);
6b7c5b94
SP
4709 if (netif_running(netdev)) {
4710 rtnl_lock();
4711 be_open(netdev);
4712 rtnl_unlock();
4713 }
f67ef7ba
PR
4714
4715 schedule_delayed_work(&adapter->func_recovery_work,
4716 msecs_to_jiffies(1000));
6b7c5b94 4717 netif_device_attach(netdev);
71d8d1b5 4718
76a9e08e 4719 if (adapter->wol_en)
71d8d1b5 4720 be_setup_wol(adapter, false);
a4ca055f 4721
6b7c5b94
SP
4722 return 0;
4723}
4724
82456b03
SP
4725/*
4726 * An FLR will stop BE from DMAing any data.
4727 */
4728static void be_shutdown(struct pci_dev *pdev)
4729{
4730 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4731
2d5d4154
AK
4732 if (!adapter)
4733 return;
82456b03 4734
0f4a6828 4735 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4736 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4737
2d5d4154 4738 netif_device_detach(adapter->netdev);
82456b03 4739
57841869
AK
4740 be_cmd_reset_function(adapter);
4741
82456b03 4742 pci_disable_device(pdev);
82456b03
SP
4743}
4744
cf588477
SP
4745static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4746 pci_channel_state_t state)
4747{
4748 struct be_adapter *adapter = pci_get_drvdata(pdev);
4749 struct net_device *netdev = adapter->netdev;
4750
4751 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4752
01e5b2c4
SK
4753 if (!adapter->eeh_error) {
4754 adapter->eeh_error = true;
cf588477 4755
01e5b2c4 4756 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4757
cf588477 4758 rtnl_lock();
01e5b2c4
SK
4759 netif_device_detach(netdev);
4760 if (netif_running(netdev))
4761 be_close(netdev);
cf588477 4762 rtnl_unlock();
01e5b2c4
SK
4763
4764 be_clear(adapter);
cf588477 4765 }
cf588477
SP
4766
4767 if (state == pci_channel_io_perm_failure)
4768 return PCI_ERS_RESULT_DISCONNECT;
4769
4770 pci_disable_device(pdev);
4771
eeb7fc7b
SK
4772 /* The error could cause the FW to trigger a flash debug dump.
4773 * Resetting the card while flash dump is in progress
c8a54163
PR
4774 * can cause it not to recover; wait for it to finish.
4775 * Wait only for first function as it is needed only once per
4776 * adapter.
eeb7fc7b 4777 */
c8a54163
PR
4778 if (pdev->devfn == 0)
4779 ssleep(30);
4780
cf588477
SP
4781 return PCI_ERS_RESULT_NEED_RESET;
4782}
4783
4784static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4785{
4786 struct be_adapter *adapter = pci_get_drvdata(pdev);
4787 int status;
4788
4789 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4790
4791 status = pci_enable_device(pdev);
4792 if (status)
4793 return PCI_ERS_RESULT_DISCONNECT;
4794
4795 pci_set_master(pdev);
1ca01512 4796 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4797 pci_restore_state(pdev);
4798
4799 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4800 dev_info(&adapter->pdev->dev,
4801 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4802 status = be_fw_wait_ready(adapter);
cf588477
SP
4803 if (status)
4804 return PCI_ERS_RESULT_DISCONNECT;
4805
d6b6d987 4806 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4807 be_clear_all_error(adapter);
cf588477
SP
4808 return PCI_ERS_RESULT_RECOVERED;
4809}
4810
4811static void be_eeh_resume(struct pci_dev *pdev)
4812{
4813 int status = 0;
4814 struct be_adapter *adapter = pci_get_drvdata(pdev);
4815 struct net_device *netdev = adapter->netdev;
4816
4817 dev_info(&adapter->pdev->dev, "EEH resume\n");
4818
4819 pci_save_state(pdev);
4820
2d177be8 4821 status = be_cmd_reset_function(adapter);
cf588477
SP
4822 if (status)
4823 goto err;
4824
2d177be8
KA
4825 /* tell fw we're ready to fire cmds */
4826 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4827 if (status)
4828 goto err;
4829
cf588477
SP
4830 status = be_setup(adapter);
4831 if (status)
4832 goto err;
4833
4834 if (netif_running(netdev)) {
4835 status = be_open(netdev);
4836 if (status)
4837 goto err;
4838 }
f67ef7ba
PR
4839
4840 schedule_delayed_work(&adapter->func_recovery_work,
4841 msecs_to_jiffies(1000));
cf588477
SP
4842 netif_device_attach(netdev);
4843 return;
4844err:
4845 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4846}
4847
3646f0e5 4848static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4849 .error_detected = be_eeh_err_detected,
4850 .slot_reset = be_eeh_reset,
4851 .resume = be_eeh_resume,
4852};
4853
6b7c5b94
SP
4854static struct pci_driver be_driver = {
4855 .name = DRV_NAME,
4856 .id_table = be_dev_ids,
4857 .probe = be_probe,
4858 .remove = be_remove,
4859 .suspend = be_suspend,
cf588477 4860 .resume = be_resume,
82456b03 4861 .shutdown = be_shutdown,
cf588477 4862 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4863};
4864
4865static int __init be_init_module(void)
4866{
8e95a202
JP
4867 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4868 rx_frag_size != 2048) {
6b7c5b94
SP
4869 printk(KERN_WARNING DRV_NAME
4870 " : Module param rx_frag_size must be 2048/4096/8192."
4871 " Using 2048\n");
4872 rx_frag_size = 2048;
4873 }
6b7c5b94
SP
4874
4875 return pci_register_driver(&be_driver);
4876}
4877module_init(be_init_module);
4878
4879static void __exit be_exit_module(void)
4880{
4881 pci_unregister_driver(&be_driver);
4882}
4883module_exit(be_exit_module);
This page took 1.287107 seconds and 5 git commands to generate.