be2net: Report a "link down" to the stack when a fatal error or fw reset happens.
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d19261b8 2 * Copyright (C) 2005 - 2015 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
954f6825 182 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
954f6825
VD
194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
6b7c5b94
SP
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
199
200 wmb();
8788fdc2 201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
202}
203
94d73aaa
VV
204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
6b7c5b94
SP
206{
207 u32 val = 0;
03d28ffe 208
954f6825
VD
209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
94d73aaa 212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
214
215 wmb();
94d73aaa 216 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
217}
218
8788fdc2 219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
6b7c5b94
SP
222{
223 u32 val = 0;
03d28ffe 224
6b7c5b94 225 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 227
954f6825 228 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
229 return;
230
6b7c5b94
SP
231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
239}
240
8788fdc2 241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
242{
243 u32 val = 0;
03d28ffe 244
6b7c5b94 245 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 248
954f6825 249 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
250 return;
251
6b7c5b94
SP
252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
256}
257
6b7c5b94
SP
258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 261 struct device *dev = &adapter->pdev->dev;
6b7c5b94 262 struct sockaddr *addr = p;
5a712c13
SP
263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 266
ca9e4988
AK
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
ff32f8ab
VV
270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
5a712c13
SP
276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the
279 * FILTMGMT privilege. This failure is OK, only if the PF programmed
280 * the MAC for the VF.
704e4c88 281 */
5a712c13
SP
282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle, &adapter->pmac_id[0], 0);
284 if (!status) {
285 curr_pmac_id = adapter->pmac_id[0];
286
287 /* Delete the old programmed MAC. This call may fail if the
288 * old MAC was already deleted by the PF driver.
289 */
290 if (adapter->pmac_id[0] != old_pmac_id)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 old_pmac_id, 0);
704e4c88
PR
293 }
294
5a712c13
SP
295 /* Decide if the new MAC is successfully activated only after
296 * querying the FW
704e4c88 297 */
b188f090
SR
298 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
299 adapter->if_handle, true, 0);
a65027e4 300 if (status)
e3a7ae2c 301 goto err;
6b7c5b94 302
5a712c13
SP
303 /* The MAC change did not happen, either due to lack of privilege
304 * or PF didn't pre-provision.
305 */
61d23e9f 306 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
307 status = -EPERM;
308 goto err;
309 }
310
e3a7ae2c 311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 312 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
313 return 0;
314err:
5a712c13 315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
316 return status;
317}
318
ca34fe38
SP
319/* BE2 supports only v0 cmd */
320static void *hw_stats_from_cmd(struct be_adapter *adapter)
321{
322 if (BE2_chip(adapter)) {
323 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
324
325 return &cmd->hw_stats;
61000861 326 } else if (BE3_chip(adapter)) {
ca34fe38
SP
327 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
328
61000861
AK
329 return &cmd->hw_stats;
330 } else {
331 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
332
ca34fe38
SP
333 return &cmd->hw_stats;
334 }
335}
336
337/* BE2 supports only v0 cmd */
338static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
339{
340 if (BE2_chip(adapter)) {
341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342
343 return &hw_stats->erx;
61000861 344 } else if (BE3_chip(adapter)) {
ca34fe38
SP
345 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
346
61000861
AK
347 return &hw_stats->erx;
348 } else {
349 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
350
ca34fe38
SP
351 return &hw_stats->erx;
352 }
353}
354
355static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 356{
ac124ff9
SP
357 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
358 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
359 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 360 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
361 &rxf_stats->port[adapter->port_num];
362 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 363
ac124ff9 364 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
365 drvs->rx_pause_frames = port_stats->rx_pause_frames;
366 drvs->rx_crc_errors = port_stats->rx_crc_errors;
367 drvs->rx_control_frames = port_stats->rx_control_frames;
368 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
370 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
371 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
372 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
373 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
374 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
375 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
376 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
377 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
378 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 379 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
380 drvs->rx_dropped_header_too_small =
381 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
382 drvs->rx_address_filtered =
383 port_stats->rx_address_filtered +
384 port_stats->rx_vlan_filtered;
89a88ab8
AK
385 drvs->rx_alignment_symbol_errors =
386 port_stats->rx_alignment_symbol_errors;
387
388 drvs->tx_pauseframes = port_stats->tx_pauseframes;
389 drvs->tx_controlframes = port_stats->tx_controlframes;
390
391 if (adapter->port_num)
ac124ff9 392 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 393 else
ac124ff9 394 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 395 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 396 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
397 drvs->forwarded_packets = rxf_stats->forwarded_packets;
398 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
399 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
400 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
401 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
402}
403
ca34fe38 404static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 405{
ac124ff9
SP
406 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
407 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
408 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 409 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
410 &rxf_stats->port[adapter->port_num];
411 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 412
ac124ff9 413 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
414 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
415 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
416 drvs->rx_pause_frames = port_stats->rx_pause_frames;
417 drvs->rx_crc_errors = port_stats->rx_crc_errors;
418 drvs->rx_control_frames = port_stats->rx_control_frames;
419 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
420 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
421 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
422 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
423 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
424 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
429 drvs->rx_dropped_header_too_small =
430 port_stats->rx_dropped_header_too_small;
431 drvs->rx_input_fifo_overflow_drop =
432 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 433 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
434 drvs->rx_alignment_symbol_errors =
435 port_stats->rx_alignment_symbol_errors;
ac124ff9 436 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
437 drvs->tx_pauseframes = port_stats->tx_pauseframes;
438 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 439 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
440 drvs->jabber_events = port_stats->jabber_events;
441 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 442 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
443 drvs->forwarded_packets = rxf_stats->forwarded_packets;
444 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
445 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
446 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
447 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
448}
449
61000861
AK
450static void populate_be_v2_stats(struct be_adapter *adapter)
451{
452 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
453 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
454 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
455 struct be_port_rxf_stats_v2 *port_stats =
456 &rxf_stats->port[adapter->port_num];
457 struct be_drv_stats *drvs = &adapter->drv_stats;
458
459 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
460 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
461 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
462 drvs->rx_pause_frames = port_stats->rx_pause_frames;
463 drvs->rx_crc_errors = port_stats->rx_crc_errors;
464 drvs->rx_control_frames = port_stats->rx_control_frames;
465 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
466 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
467 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
468 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
469 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
470 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
471 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
472 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
473 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
474 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
475 drvs->rx_dropped_header_too_small =
476 port_stats->rx_dropped_header_too_small;
477 drvs->rx_input_fifo_overflow_drop =
478 port_stats->rx_input_fifo_overflow_drop;
479 drvs->rx_address_filtered = port_stats->rx_address_filtered;
480 drvs->rx_alignment_symbol_errors =
481 port_stats->rx_alignment_symbol_errors;
482 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
483 drvs->tx_pauseframes = port_stats->tx_pauseframes;
484 drvs->tx_controlframes = port_stats->tx_controlframes;
485 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
486 drvs->jabber_events = port_stats->jabber_events;
487 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
488 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
489 drvs->forwarded_packets = rxf_stats->forwarded_packets;
490 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
491 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
492 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
493 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 494 if (be_roce_supported(adapter)) {
461ae379
AK
495 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
496 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
497 drvs->rx_roce_frames = port_stats->roce_frames_received;
498 drvs->roce_drops_crc = port_stats->roce_drops_crc;
499 drvs->roce_drops_payload_len =
500 port_stats->roce_drops_payload_len;
501 }
61000861
AK
502}
503
005d5696
SX
504static void populate_lancer_stats(struct be_adapter *adapter)
505{
005d5696 506 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 507 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
508
509 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
510 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
511 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
512 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 513 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 514 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
515 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
516 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
517 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
518 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
519 drvs->rx_dropped_tcp_length =
520 pport_stats->rx_dropped_invalid_tcp_length;
521 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
522 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
523 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
524 drvs->rx_dropped_header_too_small =
525 pport_stats->rx_dropped_header_too_small;
526 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
527 drvs->rx_address_filtered =
528 pport_stats->rx_address_filtered +
529 pport_stats->rx_vlan_filtered;
ac124ff9 530 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 531 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
532 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
533 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 534 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
535 drvs->forwarded_packets = pport_stats->num_forwards_lo;
536 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 537 drvs->rx_drops_too_many_frags =
ac124ff9 538 pport_stats->rx_drops_too_many_frags_lo;
005d5696 539}
89a88ab8 540
09c1c68f
SP
541static void accumulate_16bit_val(u32 *acc, u16 val)
542{
543#define lo(x) (x & 0xFFFF)
544#define hi(x) (x & 0xFFFF0000)
545 bool wrapped = val < lo(*acc);
546 u32 newacc = hi(*acc) + val;
547
548 if (wrapped)
549 newacc += 65536;
550 ACCESS_ONCE(*acc) = newacc;
551}
552
4188e7df 553static void populate_erx_stats(struct be_adapter *adapter,
748b539a 554 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
555{
556 if (!BEx_chip(adapter))
557 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
558 else
559 /* below erx HW counter can actually wrap around after
560 * 65535. Driver accumulates a 32-bit value
561 */
562 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
563 (u16)erx_stat);
564}
565
89a88ab8
AK
566void be_parse_stats(struct be_adapter *adapter)
567{
61000861 568 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
569 struct be_rx_obj *rxo;
570 int i;
a6c578ef 571 u32 erx_stat;
ac124ff9 572
ca34fe38
SP
573 if (lancer_chip(adapter)) {
574 populate_lancer_stats(adapter);
005d5696 575 } else {
ca34fe38
SP
576 if (BE2_chip(adapter))
577 populate_be_v0_stats(adapter);
61000861
AK
578 else if (BE3_chip(adapter))
579 /* for BE3 */
ca34fe38 580 populate_be_v1_stats(adapter);
61000861
AK
581 else
582 populate_be_v2_stats(adapter);
d51ebd33 583
61000861 584 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 585 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
586 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
587 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 588 }
09c1c68f 589 }
89a88ab8
AK
590}
591
ab1594e9 592static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 593 struct rtnl_link_stats64 *stats)
6b7c5b94 594{
ab1594e9 595 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 596 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 597 struct be_rx_obj *rxo;
3c8def97 598 struct be_tx_obj *txo;
ab1594e9
SP
599 u64 pkts, bytes;
600 unsigned int start;
3abcdeda 601 int i;
6b7c5b94 602
3abcdeda 603 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 604 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 605
ab1594e9 606 do {
57a7744e 607 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
608 pkts = rx_stats(rxo)->rx_pkts;
609 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 610 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
611 stats->rx_packets += pkts;
612 stats->rx_bytes += bytes;
613 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
614 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
615 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
616 }
617
3c8def97 618 for_all_tx_queues(adapter, txo, i) {
ab1594e9 619 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 620
ab1594e9 621 do {
57a7744e 622 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
623 pkts = tx_stats(txo)->tx_pkts;
624 bytes = tx_stats(txo)->tx_bytes;
57a7744e 625 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
626 stats->tx_packets += pkts;
627 stats->tx_bytes += bytes;
3c8def97 628 }
6b7c5b94
SP
629
630 /* bad pkts received */
ab1594e9 631 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
632 drvs->rx_alignment_symbol_errors +
633 drvs->rx_in_range_errors +
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long +
636 drvs->rx_dropped_too_small +
637 drvs->rx_dropped_too_short +
638 drvs->rx_dropped_header_too_small +
639 drvs->rx_dropped_tcp_length +
ab1594e9 640 drvs->rx_dropped_runt;
68110868 641
6b7c5b94 642 /* detailed rx errors */
ab1594e9 643 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
644 drvs->rx_out_range_errors +
645 drvs->rx_frame_too_long;
68110868 646
ab1594e9 647 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
648
649 /* frame alignment errors */
ab1594e9 650 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 651
6b7c5b94
SP
652 /* receiver fifo overrun */
653 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 654 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
655 drvs->rx_input_fifo_overflow_drop +
656 drvs->rx_drops_no_pbuf;
ab1594e9 657 return stats;
6b7c5b94
SP
658}
659
b236916a 660void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 661{
6b7c5b94
SP
662 struct net_device *netdev = adapter->netdev;
663
b236916a 664 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 665 netif_carrier_off(netdev);
b236916a 666 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 667 }
b236916a 668
bdce2ad7 669 if (link_status)
b236916a
AK
670 netif_carrier_on(netdev);
671 else
672 netif_carrier_off(netdev);
18824894
IV
673
674 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
675}
676
5f07b3c5 677static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 678{
3c8def97
SP
679 struct be_tx_stats *stats = tx_stats(txo);
680
ab1594e9 681 u64_stats_update_begin(&stats->sync);
ac124ff9 682 stats->tx_reqs++;
5f07b3c5
SP
683 stats->tx_bytes += skb->len;
684 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 685 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
686}
687
5f07b3c5
SP
688/* Returns number of WRBs needed for the skb */
689static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 690{
5f07b3c5
SP
691 /* +1 for the header wrb */
692 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
693}
694
695static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
696{
f986afcb
SP
697 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
698 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
699 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
700 wrb->rsvd0 = 0;
701}
702
703/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
704 * to avoid the swap and shift/mask operations in wrb_fill().
705 */
706static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
707{
708 wrb->frag_pa_hi = 0;
709 wrb->frag_pa_lo = 0;
710 wrb->frag_len = 0;
89b1f496 711 wrb->rsvd0 = 0;
6b7c5b94
SP
712}
713
1ded132d 714static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 715 struct sk_buff *skb)
1ded132d
AK
716{
717 u8 vlan_prio;
718 u16 vlan_tag;
719
df8a39de 720 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
721 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
722 /* If vlan priority provided by OS is NOT in available bmap */
723 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
724 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
725 adapter->recommended_prio;
726
727 return vlan_tag;
728}
729
c9c47142
SP
730/* Used only for IP tunnel packets */
731static u16 skb_inner_ip_proto(struct sk_buff *skb)
732{
733 return (inner_ip_hdr(skb)->version == 4) ?
734 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
735}
736
737static u16 skb_ip_proto(struct sk_buff *skb)
738{
739 return (ip_hdr(skb)->version == 4) ?
740 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
741}
742
cf5671e6
SB
743static inline bool be_is_txq_full(struct be_tx_obj *txo)
744{
745 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
746}
747
748static inline bool be_can_txq_wake(struct be_tx_obj *txo)
749{
750 return atomic_read(&txo->q.used) < txo->q.len / 2;
751}
752
753static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
754{
755 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
756}
757
804abcdb
SB
758static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
759 struct sk_buff *skb,
760 struct be_wrb_params *wrb_params)
6b7c5b94 761{
804abcdb 762 u16 proto;
6b7c5b94 763
49e4b847 764 if (skb_is_gso(skb)) {
804abcdb
SB
765 BE_WRB_F_SET(wrb_params->features, LSO, 1);
766 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 767 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 768 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 769 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 770 if (skb->encapsulation) {
804abcdb 771 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
772 proto = skb_inner_ip_proto(skb);
773 } else {
774 proto = skb_ip_proto(skb);
775 }
776 if (proto == IPPROTO_TCP)
804abcdb 777 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 778 else if (proto == IPPROTO_UDP)
804abcdb 779 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
780 }
781
df8a39de 782 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
783 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
784 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
785 }
786
804abcdb
SB
787 BE_WRB_F_SET(wrb_params->features, CRC, 1);
788}
5f07b3c5 789
804abcdb
SB
790static void wrb_fill_hdr(struct be_adapter *adapter,
791 struct be_eth_hdr_wrb *hdr,
792 struct be_wrb_params *wrb_params,
793 struct sk_buff *skb)
794{
795 memset(hdr, 0, sizeof(*hdr));
796
797 SET_TX_WRB_HDR_BITS(crc, hdr,
798 BE_WRB_F_GET(wrb_params->features, CRC));
799 SET_TX_WRB_HDR_BITS(ipcs, hdr,
800 BE_WRB_F_GET(wrb_params->features, IPCS));
801 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
802 BE_WRB_F_GET(wrb_params->features, TCPCS));
803 SET_TX_WRB_HDR_BITS(udpcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, UDPCS));
805
806 SET_TX_WRB_HDR_BITS(lso, hdr,
807 BE_WRB_F_GET(wrb_params->features, LSO));
808 SET_TX_WRB_HDR_BITS(lso6, hdr,
809 BE_WRB_F_GET(wrb_params->features, LSO6));
810 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
811
812 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
813 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 814 */
804abcdb
SB
815 SET_TX_WRB_HDR_BITS(event, hdr,
816 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
817 SET_TX_WRB_HDR_BITS(vlan, hdr,
818 BE_WRB_F_GET(wrb_params->features, VLAN));
819 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
820
821 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
822 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
6b7c5b94
SP
823}
824
2b7bcebf 825static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 826 bool unmap_single)
7101e111
SP
827{
828 dma_addr_t dma;
f986afcb 829 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 830
7101e111 831
f986afcb
SP
832 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
833 (u64)le32_to_cpu(wrb->frag_pa_lo);
834 if (frag_len) {
7101e111 835 if (unmap_single)
f986afcb 836 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 837 else
f986afcb 838 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
839 }
840}
6b7c5b94 841
79a0d7d8
SB
842/* Grab a WRB header for xmit */
843static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
844{
845 u16 head = txo->q.head;
846
847 queue_head_inc(&txo->q);
848 return head;
849}
850
851/* Set up the WRB header for xmit */
852static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
853 struct be_tx_obj *txo,
854 struct be_wrb_params *wrb_params,
855 struct sk_buff *skb, u16 head)
856{
857 u32 num_frags = skb_wrb_cnt(skb);
858 struct be_queue_info *txq = &txo->q;
859 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
860
861 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
862 be_dws_cpu_to_le(hdr, sizeof(*hdr));
863
864 BUG_ON(txo->sent_skb_list[head]);
865 txo->sent_skb_list[head] = skb;
866 txo->last_req_hdr = head;
867 atomic_add(num_frags, &txq->used);
868 txo->last_req_wrb_cnt = num_frags;
869 txo->pend_wrb_cnt += num_frags;
870}
871
872/* Setup a WRB fragment (buffer descriptor) for xmit */
873static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
874 int len)
875{
876 struct be_eth_wrb *wrb;
877 struct be_queue_info *txq = &txo->q;
878
879 wrb = queue_head_node(txq);
880 wrb_fill(wrb, busaddr, len);
881 queue_head_inc(txq);
882}
883
884/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
885 * was invoked. The producer index is restored to the previous packet and the
886 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
887 */
888static void be_xmit_restore(struct be_adapter *adapter,
889 struct be_tx_obj *txo, u16 head, bool map_single,
890 u32 copied)
891{
892 struct device *dev;
893 struct be_eth_wrb *wrb;
894 struct be_queue_info *txq = &txo->q;
895
896 dev = &adapter->pdev->dev;
897 txq->head = head;
898
899 /* skip the first wrb (hdr); it's not mapped */
900 queue_head_inc(txq);
901 while (copied) {
902 wrb = queue_head_node(txq);
903 unmap_tx_frag(dev, wrb, map_single);
904 map_single = false;
905 copied -= le32_to_cpu(wrb->frag_len);
906 queue_head_inc(txq);
907 }
908
909 txq->head = head;
910}
911
912/* Enqueue the given packet for transmit. This routine allocates WRBs for the
913 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
914 * of WRBs used up by the packet.
915 */
5f07b3c5 916static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
917 struct sk_buff *skb,
918 struct be_wrb_params *wrb_params)
6b7c5b94 919{
5f07b3c5 920 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 921 struct device *dev = &adapter->pdev->dev;
5f07b3c5 922 struct be_queue_info *txq = &txo->q;
7101e111 923 bool map_single = false;
5f07b3c5 924 u16 head = txq->head;
79a0d7d8
SB
925 dma_addr_t busaddr;
926 int len;
6b7c5b94 927
79a0d7d8 928 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 929
ebc8d2ab 930 if (skb->len > skb->data_len) {
79a0d7d8 931 len = skb_headlen(skb);
03d28ffe 932
2b7bcebf
IV
933 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
934 if (dma_mapping_error(dev, busaddr))
7101e111
SP
935 goto dma_err;
936 map_single = true;
79a0d7d8 937 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
938 copied += len;
939 }
6b7c5b94 940
ebc8d2ab 941 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 942 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 943 len = skb_frag_size(frag);
03d28ffe 944
79a0d7d8 945 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 946 if (dma_mapping_error(dev, busaddr))
7101e111 947 goto dma_err;
79a0d7d8
SB
948 be_tx_setup_wrb_frag(txo, busaddr, len);
949 copied += len;
6b7c5b94
SP
950 }
951
79a0d7d8 952 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 953
5f07b3c5
SP
954 be_tx_stats_update(txo, skb);
955 return wrb_cnt;
6b7c5b94 956
7101e111 957dma_err:
79a0d7d8
SB
958 adapter->drv_stats.dma_map_errors++;
959 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 960 return 0;
6b7c5b94
SP
961}
962
f7062ee5
SP
963static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
964{
965 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
966}
967
93040ae5 968static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 969 struct sk_buff *skb,
804abcdb
SB
970 struct be_wrb_params
971 *wrb_params)
93040ae5
SK
972{
973 u16 vlan_tag = 0;
974
975 skb = skb_share_check(skb, GFP_ATOMIC);
976 if (unlikely(!skb))
977 return skb;
978
df8a39de 979 if (skb_vlan_tag_present(skb))
93040ae5 980 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
981
982 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
983 if (!vlan_tag)
984 vlan_tag = adapter->pvid;
985 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
986 * skip VLAN insertion
987 */
804abcdb 988 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 989 }
bc0c3405
AK
990
991 if (vlan_tag) {
62749e2c
JP
992 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
993 vlan_tag);
bc0c3405
AK
994 if (unlikely(!skb))
995 return skb;
bc0c3405
AK
996 skb->vlan_tci = 0;
997 }
998
999 /* Insert the outer VLAN, if any */
1000 if (adapter->qnq_vid) {
1001 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1002 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1003 vlan_tag);
bc0c3405
AK
1004 if (unlikely(!skb))
1005 return skb;
804abcdb 1006 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1007 }
1008
93040ae5
SK
1009 return skb;
1010}
1011
bc0c3405
AK
1012static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1013{
1014 struct ethhdr *eh = (struct ethhdr *)skb->data;
1015 u16 offset = ETH_HLEN;
1016
1017 if (eh->h_proto == htons(ETH_P_IPV6)) {
1018 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1019
1020 offset += sizeof(struct ipv6hdr);
1021 if (ip6h->nexthdr != NEXTHDR_TCP &&
1022 ip6h->nexthdr != NEXTHDR_UDP) {
1023 struct ipv6_opt_hdr *ehdr =
504fbf1e 1024 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1025
1026 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1027 if (ehdr->hdrlen == 0xff)
1028 return true;
1029 }
1030 }
1031 return false;
1032}
1033
1034static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1035{
df8a39de 1036 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1037}
1038
748b539a 1039static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1040{
ee9c799c 1041 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1042}
1043
ec495fac
VV
1044static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1045 struct sk_buff *skb,
804abcdb
SB
1046 struct be_wrb_params
1047 *wrb_params)
6b7c5b94 1048{
d2cb6ce7 1049 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1050 unsigned int eth_hdr_len;
1051 struct iphdr *ip;
93040ae5 1052
1297f9db
AK
1053 /* For padded packets, BE HW modifies tot_len field in IP header
1054 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1055 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1056 */
ee9c799c
SP
1057 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1058 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1059 if (skb->len <= 60 &&
df8a39de 1060 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1061 is_ipv4_pkt(skb)) {
93040ae5
SK
1062 ip = (struct iphdr *)ip_hdr(skb);
1063 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1064 }
1ded132d 1065
d2cb6ce7 1066 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1067 * tagging in pvid-tagging mode
d2cb6ce7 1068 */
f93f160b 1069 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1070 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1071 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1072
93040ae5
SK
1073 /* HW has a bug wherein it will calculate CSUM for VLAN
1074 * pkts even though it is disabled.
1075 * Manually insert VLAN in pkt.
1076 */
1077 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1078 skb_vlan_tag_present(skb)) {
804abcdb 1079 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1080 if (unlikely(!skb))
c9128951 1081 goto err;
bc0c3405
AK
1082 }
1083
1084 /* HW may lockup when VLAN HW tagging is requested on
1085 * certain ipv6 packets. Drop such pkts if the HW workaround to
1086 * skip HW tagging is not enabled by FW.
1087 */
1088 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1089 (adapter->pvid || adapter->qnq_vid) &&
1090 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1091 goto tx_drop;
1092
1093 /* Manual VLAN tag insertion to prevent:
1094 * ASIC lockup when the ASIC inserts VLAN tag into
1095 * certain ipv6 packets. Insert VLAN tags in driver,
1096 * and set event, completion, vlan bits accordingly
1097 * in the Tx WRB.
1098 */
1099 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1100 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1101 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1102 if (unlikely(!skb))
c9128951 1103 goto err;
1ded132d
AK
1104 }
1105
ee9c799c
SP
1106 return skb;
1107tx_drop:
1108 dev_kfree_skb_any(skb);
c9128951 1109err:
ee9c799c
SP
1110 return NULL;
1111}
1112
ec495fac
VV
1113static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1114 struct sk_buff *skb,
804abcdb 1115 struct be_wrb_params *wrb_params)
ec495fac
VV
1116{
1117 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1118 * less may cause a transmit stall on that port. So the work-around is
1119 * to pad short packets (<= 32 bytes) to a 36-byte length.
1120 */
1121 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1122 if (skb_put_padto(skb, 36))
ec495fac 1123 return NULL;
ec495fac
VV
1124 }
1125
1126 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1127 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1128 if (!skb)
1129 return NULL;
1130 }
1131
1132 return skb;
1133}
1134
5f07b3c5
SP
1135static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1136{
1137 struct be_queue_info *txq = &txo->q;
1138 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1139
1140 /* Mark the last request eventable if it hasn't been marked already */
1141 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1142 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1143
1144 /* compose a dummy wrb if there are odd set of wrbs to notify */
1145 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1146 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1147 queue_head_inc(txq);
1148 atomic_inc(&txq->used);
1149 txo->pend_wrb_cnt++;
1150 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1151 TX_HDR_WRB_NUM_SHIFT);
1152 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1153 TX_HDR_WRB_NUM_SHIFT);
1154 }
1155 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1156 txo->pend_wrb_cnt = 0;
1157}
1158
ee9c799c
SP
1159static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1160{
1161 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1162 u16 q_idx = skb_get_queue_mapping(skb);
1163 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1164 struct be_wrb_params wrb_params = { 0 };
804abcdb 1165 bool flush = !skb->xmit_more;
5f07b3c5 1166 u16 wrb_cnt;
ee9c799c 1167
804abcdb 1168 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1169 if (unlikely(!skb))
1170 goto drop;
6b7c5b94 1171
804abcdb
SB
1172 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1173
1174 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1175 if (unlikely(!wrb_cnt)) {
1176 dev_kfree_skb_any(skb);
1177 goto drop;
1178 }
cd8f76c0 1179
cf5671e6 1180 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1181 netif_stop_subqueue(netdev, q_idx);
1182 tx_stats(txo)->tx_stops++;
1183 }
c190e3c8 1184
5f07b3c5
SP
1185 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1186 be_xmit_flush(adapter, txo);
6b7c5b94 1187
5f07b3c5
SP
1188 return NETDEV_TX_OK;
1189drop:
1190 tx_stats(txo)->tx_drv_drops++;
1191 /* Flush the already enqueued tx requests */
1192 if (flush && txo->pend_wrb_cnt)
1193 be_xmit_flush(adapter, txo);
6b7c5b94 1194
6b7c5b94
SP
1195 return NETDEV_TX_OK;
1196}
1197
1198static int be_change_mtu(struct net_device *netdev, int new_mtu)
1199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1201 struct device *dev = &adapter->pdev->dev;
1202
1203 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1204 dev_info(dev, "MTU must be between %d and %d bytes\n",
1205 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1206 return -EINVAL;
1207 }
0d3f5cce
KA
1208
1209 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1210 netdev->mtu, new_mtu);
6b7c5b94
SP
1211 netdev->mtu = new_mtu;
1212 return 0;
1213}
1214
f66b7cfd
SP
1215static inline bool be_in_all_promisc(struct be_adapter *adapter)
1216{
1217 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1218 BE_IF_FLAGS_ALL_PROMISCUOUS;
1219}
1220
1221static int be_set_vlan_promisc(struct be_adapter *adapter)
1222{
1223 struct device *dev = &adapter->pdev->dev;
1224 int status;
1225
1226 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1227 return 0;
1228
1229 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1230 if (!status) {
1231 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1232 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1233 } else {
1234 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1235 }
1236 return status;
1237}
1238
1239static int be_clear_vlan_promisc(struct be_adapter *adapter)
1240{
1241 struct device *dev = &adapter->pdev->dev;
1242 int status;
1243
1244 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1245 if (!status) {
1246 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1247 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1248 }
1249 return status;
1250}
1251
6b7c5b94 1252/*
82903e4b
AK
1253 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1254 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1255 */
10329df8 1256static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1257{
50762667 1258 struct device *dev = &adapter->pdev->dev;
10329df8 1259 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1260 u16 num = 0, i = 0;
82903e4b 1261 int status = 0;
1da87b7f 1262
c0e64ef4 1263 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1264 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1265 return 0;
1266
92bf14ab 1267 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1268 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1269
1270 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1271 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1272 vids[num++] = cpu_to_le16(i);
0fc16ebf 1273
435452aa 1274 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1275 if (status) {
f66b7cfd 1276 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1277 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1278 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1279 addl_status(status) ==
4c60005f 1280 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1281 return be_set_vlan_promisc(adapter);
1282 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1283 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1284 }
0fc16ebf 1285 return status;
6b7c5b94
SP
1286}
1287
80d5c368 1288static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1289{
1290 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1291 int status = 0;
6b7c5b94 1292
a85e9986
PR
1293 /* Packets with VID 0 are always received by Lancer by default */
1294 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1295 return status;
1296
f6cbd364 1297 if (test_bit(vid, adapter->vids))
48291c22 1298 return status;
a85e9986 1299
f6cbd364 1300 set_bit(vid, adapter->vids);
a6b74e01 1301 adapter->vlans_added++;
8e586137 1302
a6b74e01
SK
1303 status = be_vid_config(adapter);
1304 if (status) {
1305 adapter->vlans_added--;
f6cbd364 1306 clear_bit(vid, adapter->vids);
a6b74e01 1307 }
48291c22 1308
80817cbf 1309 return status;
6b7c5b94
SP
1310}
1311
80d5c368 1312static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1313{
1314 struct be_adapter *adapter = netdev_priv(netdev);
1315
a85e9986
PR
1316 /* Packets with VID 0 are always received by Lancer by default */
1317 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1318 return 0;
a85e9986 1319
f6cbd364 1320 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1321 adapter->vlans_added--;
1322
1323 return be_vid_config(adapter);
6b7c5b94
SP
1324}
1325
f66b7cfd 1326static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1327{
ac34b743 1328 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1329 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1330}
1331
f66b7cfd
SP
1332static void be_set_all_promisc(struct be_adapter *adapter)
1333{
1334 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1335 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1336}
1337
1338static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1339{
0fc16ebf 1340 int status;
6b7c5b94 1341
f66b7cfd
SP
1342 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1343 return;
6b7c5b94 1344
f66b7cfd
SP
1345 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1346 if (!status)
1347 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1348}
1349
1350static void be_set_mc_list(struct be_adapter *adapter)
1351{
1352 int status;
1353
1354 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1355 if (!status)
1356 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1357 else
1358 be_set_mc_promisc(adapter);
1359}
1360
1361static void be_set_uc_list(struct be_adapter *adapter)
1362{
1363 struct netdev_hw_addr *ha;
1364 int i = 1; /* First slot is claimed by the Primary MAC */
1365
1366 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1367 be_cmd_pmac_del(adapter, adapter->if_handle,
1368 adapter->pmac_id[i], 0);
1369
1370 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1371 be_set_all_promisc(adapter);
1372 return;
6b7c5b94
SP
1373 }
1374
f66b7cfd
SP
1375 netdev_for_each_uc_addr(ha, adapter->netdev) {
1376 adapter->uc_macs++; /* First slot is for Primary MAC */
1377 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1378 &adapter->pmac_id[adapter->uc_macs], 0);
1379 }
1380}
6b7c5b94 1381
f66b7cfd
SP
1382static void be_clear_uc_list(struct be_adapter *adapter)
1383{
1384 int i;
fbc13f01 1385
f66b7cfd
SP
1386 for (i = 1; i < (adapter->uc_macs + 1); i++)
1387 be_cmd_pmac_del(adapter, adapter->if_handle,
1388 adapter->pmac_id[i], 0);
1389 adapter->uc_macs = 0;
1390}
fbc13f01 1391
f66b7cfd
SP
1392static void be_set_rx_mode(struct net_device *netdev)
1393{
1394 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1395
f66b7cfd
SP
1396 if (netdev->flags & IFF_PROMISC) {
1397 be_set_all_promisc(adapter);
1398 return;
fbc13f01
AK
1399 }
1400
f66b7cfd
SP
1401 /* Interface was previously in promiscuous mode; disable it */
1402 if (be_in_all_promisc(adapter)) {
1403 be_clear_all_promisc(adapter);
1404 if (adapter->vlans_added)
1405 be_vid_config(adapter);
0fc16ebf 1406 }
a0794885 1407
f66b7cfd
SP
1408 /* Enable multicast promisc if num configured exceeds what we support */
1409 if (netdev->flags & IFF_ALLMULTI ||
1410 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1411 be_set_mc_promisc(adapter);
a0794885 1412 return;
f66b7cfd 1413 }
a0794885 1414
f66b7cfd
SP
1415 if (netdev_uc_count(netdev) != adapter->uc_macs)
1416 be_set_uc_list(adapter);
1417
1418 be_set_mc_list(adapter);
6b7c5b94
SP
1419}
1420
ba343c77
SB
1421static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1422{
1423 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1424 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1425 int status;
1426
11ac75ed 1427 if (!sriov_enabled(adapter))
ba343c77
SB
1428 return -EPERM;
1429
11ac75ed 1430 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1431 return -EINVAL;
1432
3c31aaf3
VV
1433 /* Proceed further only if user provided MAC is different
1434 * from active MAC
1435 */
1436 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1437 return 0;
1438
3175d8c2
SP
1439 if (BEx_chip(adapter)) {
1440 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1441 vf + 1);
ba343c77 1442
11ac75ed
SP
1443 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1444 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1445 } else {
1446 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1447 vf + 1);
590c391d
PR
1448 }
1449
abccf23e
KA
1450 if (status) {
1451 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1452 mac, vf, status);
1453 return be_cmd_status(status);
1454 }
64600ea5 1455
abccf23e
KA
1456 ether_addr_copy(vf_cfg->mac_addr, mac);
1457
1458 return 0;
ba343c77
SB
1459}
1460
64600ea5 1461static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1462 struct ifla_vf_info *vi)
64600ea5
AK
1463{
1464 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1465 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1466
11ac75ed 1467 if (!sriov_enabled(adapter))
64600ea5
AK
1468 return -EPERM;
1469
11ac75ed 1470 if (vf >= adapter->num_vfs)
64600ea5
AK
1471 return -EINVAL;
1472
1473 vi->vf = vf;
ed616689
SC
1474 vi->max_tx_rate = vf_cfg->tx_rate;
1475 vi->min_tx_rate = 0;
a60b3a13
AK
1476 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1477 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1478 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1479 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1480 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1481
1482 return 0;
1483}
1484
435452aa
VV
1485static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1486{
1487 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1488 u16 vids[BE_NUM_VLANS_SUPPORTED];
1489 int vf_if_id = vf_cfg->if_handle;
1490 int status;
1491
1492 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1493 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1494 if (status)
1495 return status;
1496
1497 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1498 vids[0] = 0;
1499 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1500 if (!status)
1501 dev_info(&adapter->pdev->dev,
1502 "Cleared guest VLANs on VF%d", vf);
1503
1504 /* After TVT is enabled, disallow VFs to program VLAN filters */
1505 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1506 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1507 ~BE_PRIV_FILTMGMT, vf + 1);
1508 if (!status)
1509 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1510 }
1511 return 0;
1512}
1513
1514static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1515{
1516 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1517 struct device *dev = &adapter->pdev->dev;
1518 int status;
1519
1520 /* Reset Transparent VLAN Tagging. */
1521 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1522 vf_cfg->if_handle, 0, 0);
435452aa
VV
1523 if (status)
1524 return status;
1525
1526 /* Allow VFs to program VLAN filtering */
1527 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1528 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1529 BE_PRIV_FILTMGMT, vf + 1);
1530 if (!status) {
1531 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1532 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1533 }
1534 }
1535
1536 dev_info(dev,
1537 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1538 return 0;
1539}
1540
748b539a 1541static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1542{
1543 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1544 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1545 int status;
1da87b7f 1546
11ac75ed 1547 if (!sriov_enabled(adapter))
1da87b7f
AK
1548 return -EPERM;
1549
b9fc0e53 1550 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1551 return -EINVAL;
1552
b9fc0e53
AK
1553 if (vlan || qos) {
1554 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1555 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1556 } else {
435452aa 1557 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1558 }
1559
abccf23e
KA
1560 if (status) {
1561 dev_err(&adapter->pdev->dev,
435452aa
VV
1562 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1563 status);
abccf23e
KA
1564 return be_cmd_status(status);
1565 }
1566
1567 vf_cfg->vlan_tag = vlan;
abccf23e 1568 return 0;
1da87b7f
AK
1569}
1570
ed616689
SC
1571static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1572 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1573{
1574 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1575 struct device *dev = &adapter->pdev->dev;
1576 int percent_rate, status = 0;
1577 u16 link_speed = 0;
1578 u8 link_status;
e1d18735 1579
11ac75ed 1580 if (!sriov_enabled(adapter))
e1d18735
AK
1581 return -EPERM;
1582
94f434c2 1583 if (vf >= adapter->num_vfs)
e1d18735
AK
1584 return -EINVAL;
1585
ed616689
SC
1586 if (min_tx_rate)
1587 return -EINVAL;
1588
0f77ba73
RN
1589 if (!max_tx_rate)
1590 goto config_qos;
1591
1592 status = be_cmd_link_status_query(adapter, &link_speed,
1593 &link_status, 0);
1594 if (status)
1595 goto err;
1596
1597 if (!link_status) {
1598 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1599 status = -ENETDOWN;
0f77ba73
RN
1600 goto err;
1601 }
1602
1603 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1604 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1605 link_speed);
1606 status = -EINVAL;
1607 goto err;
1608 }
1609
1610 /* On Skyhawk the QOS setting must be done only as a % value */
1611 percent_rate = link_speed / 100;
1612 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1613 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1614 percent_rate);
1615 status = -EINVAL;
1616 goto err;
94f434c2 1617 }
e1d18735 1618
0f77ba73
RN
1619config_qos:
1620 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1621 if (status)
0f77ba73
RN
1622 goto err;
1623
1624 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1625 return 0;
1626
1627err:
1628 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1629 max_tx_rate, vf);
abccf23e 1630 return be_cmd_status(status);
e1d18735 1631}
e2fb1afa 1632
bdce2ad7
SR
1633static int be_set_vf_link_state(struct net_device *netdev, int vf,
1634 int link_state)
1635{
1636 struct be_adapter *adapter = netdev_priv(netdev);
1637 int status;
1638
1639 if (!sriov_enabled(adapter))
1640 return -EPERM;
1641
1642 if (vf >= adapter->num_vfs)
1643 return -EINVAL;
1644
1645 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1646 if (status) {
1647 dev_err(&adapter->pdev->dev,
1648 "Link state change on VF %d failed: %#x\n", vf, status);
1649 return be_cmd_status(status);
1650 }
bdce2ad7 1651
abccf23e
KA
1652 adapter->vf_cfg[vf].plink_tracking = link_state;
1653
1654 return 0;
bdce2ad7 1655}
e1d18735 1656
e7bcbd7b
KA
1657static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1658{
1659 struct be_adapter *adapter = netdev_priv(netdev);
1660 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1661 u8 spoofchk;
1662 int status;
1663
1664 if (!sriov_enabled(adapter))
1665 return -EPERM;
1666
1667 if (vf >= adapter->num_vfs)
1668 return -EINVAL;
1669
1670 if (BEx_chip(adapter))
1671 return -EOPNOTSUPP;
1672
1673 if (enable == vf_cfg->spoofchk)
1674 return 0;
1675
1676 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1677
1678 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1679 0, spoofchk);
1680 if (status) {
1681 dev_err(&adapter->pdev->dev,
1682 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1683 return be_cmd_status(status);
1684 }
1685
1686 vf_cfg->spoofchk = enable;
1687 return 0;
1688}
1689
2632bafd
SP
1690static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1691 ulong now)
6b7c5b94 1692{
2632bafd
SP
1693 aic->rx_pkts_prev = rx_pkts;
1694 aic->tx_reqs_prev = tx_pkts;
1695 aic->jiffies = now;
1696}
ac124ff9 1697
20947770 1698static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1699{
20947770
PR
1700 struct be_adapter *adapter = eqo->adapter;
1701 int eqd, start;
2632bafd 1702 struct be_aic_obj *aic;
2632bafd
SP
1703 struct be_rx_obj *rxo;
1704 struct be_tx_obj *txo;
20947770 1705 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1706 ulong now;
1707 u32 pps, delta;
20947770 1708 int i;
10ef9ab4 1709
20947770
PR
1710 aic = &adapter->aic_obj[eqo->idx];
1711 if (!aic->enable) {
1712 if (aic->jiffies)
1713 aic->jiffies = 0;
1714 eqd = aic->et_eqd;
1715 return eqd;
1716 }
6b7c5b94 1717
20947770 1718 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1719 do {
57a7744e 1720 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1721 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1722 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1723 }
10ef9ab4 1724
20947770 1725 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1726 do {
57a7744e 1727 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1728 tx_pkts += txo->stats.tx_reqs;
57a7744e 1729 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1730 }
6b7c5b94 1731
20947770
PR
1732 /* Skip, if wrapped around or first calculation */
1733 now = jiffies;
1734 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1735 rx_pkts < aic->rx_pkts_prev ||
1736 tx_pkts < aic->tx_reqs_prev) {
1737 be_aic_update(aic, rx_pkts, tx_pkts, now);
1738 return aic->prev_eqd;
1739 }
2632bafd 1740
20947770
PR
1741 delta = jiffies_to_msecs(now - aic->jiffies);
1742 if (delta == 0)
1743 return aic->prev_eqd;
10ef9ab4 1744
20947770
PR
1745 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1746 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1747 eqd = (pps / 15000) << 2;
2632bafd 1748
20947770
PR
1749 if (eqd < 8)
1750 eqd = 0;
1751 eqd = min_t(u32, eqd, aic->max_eqd);
1752 eqd = max_t(u32, eqd, aic->min_eqd);
1753
1754 be_aic_update(aic, rx_pkts, tx_pkts, now);
1755
1756 return eqd;
1757}
1758
1759/* For Skyhawk-R only */
1760static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1761{
1762 struct be_adapter *adapter = eqo->adapter;
1763 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1764 ulong now = jiffies;
1765 int eqd;
1766 u32 mult_enc;
1767
1768 if (!aic->enable)
1769 return 0;
1770
1771 if (time_before_eq(now, aic->jiffies) ||
1772 jiffies_to_msecs(now - aic->jiffies) < 1)
1773 eqd = aic->prev_eqd;
1774 else
1775 eqd = be_get_new_eqd(eqo);
1776
1777 if (eqd > 100)
1778 mult_enc = R2I_DLY_ENC_1;
1779 else if (eqd > 60)
1780 mult_enc = R2I_DLY_ENC_2;
1781 else if (eqd > 20)
1782 mult_enc = R2I_DLY_ENC_3;
1783 else
1784 mult_enc = R2I_DLY_ENC_0;
1785
1786 aic->prev_eqd = eqd;
1787
1788 return mult_enc;
1789}
1790
1791void be_eqd_update(struct be_adapter *adapter, bool force_update)
1792{
1793 struct be_set_eqd set_eqd[MAX_EVT_QS];
1794 struct be_aic_obj *aic;
1795 struct be_eq_obj *eqo;
1796 int i, num = 0, eqd;
1797
1798 for_all_evt_queues(adapter, eqo, i) {
1799 aic = &adapter->aic_obj[eqo->idx];
1800 eqd = be_get_new_eqd(eqo);
1801 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1802 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1803 set_eqd[num].eq_id = eqo->q.id;
1804 aic->prev_eqd = eqd;
1805 num++;
1806 }
ac124ff9 1807 }
2632bafd
SP
1808
1809 if (num)
1810 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1811}
1812
3abcdeda 1813static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1814 struct be_rx_compl_info *rxcp)
4097f663 1815{
ac124ff9 1816 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1817
ab1594e9 1818 u64_stats_update_begin(&stats->sync);
3abcdeda 1819 stats->rx_compl++;
2e588f84 1820 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1821 stats->rx_pkts++;
2e588f84 1822 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1823 stats->rx_mcast_pkts++;
2e588f84 1824 if (rxcp->err)
ac124ff9 1825 stats->rx_compl_err++;
ab1594e9 1826 u64_stats_update_end(&stats->sync);
4097f663
SP
1827}
1828
2e588f84 1829static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1830{
19fad86f 1831 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1832 * Also ignore ipcksm for ipv6 pkts
1833 */
2e588f84 1834 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1835 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1836}
1837
0b0ef1d0 1838static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1839{
10ef9ab4 1840 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1841 struct be_rx_page_info *rx_page_info;
3abcdeda 1842 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1843 u16 frag_idx = rxq->tail;
6b7c5b94 1844
3abcdeda 1845 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1846 BUG_ON(!rx_page_info->page);
1847
e50287be 1848 if (rx_page_info->last_frag) {
2b7bcebf
IV
1849 dma_unmap_page(&adapter->pdev->dev,
1850 dma_unmap_addr(rx_page_info, bus),
1851 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1852 rx_page_info->last_frag = false;
1853 } else {
1854 dma_sync_single_for_cpu(&adapter->pdev->dev,
1855 dma_unmap_addr(rx_page_info, bus),
1856 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1857 }
6b7c5b94 1858
0b0ef1d0 1859 queue_tail_inc(rxq);
6b7c5b94
SP
1860 atomic_dec(&rxq->used);
1861 return rx_page_info;
1862}
1863
1864/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1865static void be_rx_compl_discard(struct be_rx_obj *rxo,
1866 struct be_rx_compl_info *rxcp)
6b7c5b94 1867{
6b7c5b94 1868 struct be_rx_page_info *page_info;
2e588f84 1869 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1870
e80d9da6 1871 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1872 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1873 put_page(page_info->page);
1874 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1875 }
1876}
1877
1878/*
1879 * skb_fill_rx_data forms a complete skb for an ether frame
1880 * indicated by rxcp.
1881 */
10ef9ab4
SP
1882static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1883 struct be_rx_compl_info *rxcp)
6b7c5b94 1884{
6b7c5b94 1885 struct be_rx_page_info *page_info;
2e588f84
SP
1886 u16 i, j;
1887 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1888 u8 *start;
6b7c5b94 1889
0b0ef1d0 1890 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1891 start = page_address(page_info->page) + page_info->page_offset;
1892 prefetch(start);
1893
1894 /* Copy data in the first descriptor of this completion */
2e588f84 1895 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1896
6b7c5b94
SP
1897 skb->len = curr_frag_len;
1898 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1899 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1900 /* Complete packet has now been moved to data */
1901 put_page(page_info->page);
1902 skb->data_len = 0;
1903 skb->tail += curr_frag_len;
1904 } else {
ac1ae5f3
ED
1905 hdr_len = ETH_HLEN;
1906 memcpy(skb->data, start, hdr_len);
6b7c5b94 1907 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1908 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1909 skb_shinfo(skb)->frags[0].page_offset =
1910 page_info->page_offset + hdr_len;
748b539a
SP
1911 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1912 curr_frag_len - hdr_len);
6b7c5b94 1913 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1914 skb->truesize += rx_frag_size;
6b7c5b94
SP
1915 skb->tail += hdr_len;
1916 }
205859a2 1917 page_info->page = NULL;
6b7c5b94 1918
2e588f84
SP
1919 if (rxcp->pkt_size <= rx_frag_size) {
1920 BUG_ON(rxcp->num_rcvd != 1);
1921 return;
6b7c5b94
SP
1922 }
1923
1924 /* More frags present for this completion */
2e588f84
SP
1925 remaining = rxcp->pkt_size - curr_frag_len;
1926 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1927 page_info = get_rx_page_info(rxo);
2e588f84 1928 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1929
bd46cb6c
AK
1930 /* Coalesce all frags from the same physical page in one slot */
1931 if (page_info->page_offset == 0) {
1932 /* Fresh page */
1933 j++;
b061b39e 1934 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1935 skb_shinfo(skb)->frags[j].page_offset =
1936 page_info->page_offset;
9e903e08 1937 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1938 skb_shinfo(skb)->nr_frags++;
1939 } else {
1940 put_page(page_info->page);
1941 }
1942
9e903e08 1943 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1944 skb->len += curr_frag_len;
1945 skb->data_len += curr_frag_len;
bdb28a97 1946 skb->truesize += rx_frag_size;
2e588f84 1947 remaining -= curr_frag_len;
205859a2 1948 page_info->page = NULL;
6b7c5b94 1949 }
bd46cb6c 1950 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1951}
1952
5be93b9a 1953/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1954static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1955 struct be_rx_compl_info *rxcp)
6b7c5b94 1956{
10ef9ab4 1957 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1958 struct net_device *netdev = adapter->netdev;
6b7c5b94 1959 struct sk_buff *skb;
89420424 1960
bb349bb4 1961 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1962 if (unlikely(!skb)) {
ac124ff9 1963 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1964 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1965 return;
1966 }
1967
10ef9ab4 1968 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1969
6332c8d3 1970 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1971 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1972 else
1973 skb_checksum_none_assert(skb);
6b7c5b94 1974
6332c8d3 1975 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1976 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1977 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1978 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1979
b6c0e89d 1980 skb->csum_level = rxcp->tunneled;
6384a4d0 1981 skb_mark_napi_id(skb, napi);
6b7c5b94 1982
343e43c0 1983 if (rxcp->vlanf)
86a9bad3 1984 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1985
1986 netif_receive_skb(skb);
6b7c5b94
SP
1987}
1988
5be93b9a 1989/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1990static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1991 struct napi_struct *napi,
1992 struct be_rx_compl_info *rxcp)
6b7c5b94 1993{
10ef9ab4 1994 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1995 struct be_rx_page_info *page_info;
5be93b9a 1996 struct sk_buff *skb = NULL;
2e588f84
SP
1997 u16 remaining, curr_frag_len;
1998 u16 i, j;
3968fa1e 1999
10ef9ab4 2000 skb = napi_get_frags(napi);
5be93b9a 2001 if (!skb) {
10ef9ab4 2002 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2003 return;
2004 }
2005
2e588f84
SP
2006 remaining = rxcp->pkt_size;
2007 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2008 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2009
2010 curr_frag_len = min(remaining, rx_frag_size);
2011
bd46cb6c
AK
2012 /* Coalesce all frags from the same physical page in one slot */
2013 if (i == 0 || page_info->page_offset == 0) {
2014 /* First frag or Fresh page */
2015 j++;
b061b39e 2016 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2017 skb_shinfo(skb)->frags[j].page_offset =
2018 page_info->page_offset;
9e903e08 2019 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2020 } else {
2021 put_page(page_info->page);
2022 }
9e903e08 2023 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2024 skb->truesize += rx_frag_size;
bd46cb6c 2025 remaining -= curr_frag_len;
6b7c5b94
SP
2026 memset(page_info, 0, sizeof(*page_info));
2027 }
bd46cb6c 2028 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2029
5be93b9a 2030 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2031 skb->len = rxcp->pkt_size;
2032 skb->data_len = rxcp->pkt_size;
5be93b9a 2033 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2034 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2035 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2036 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2037
b6c0e89d 2038 skb->csum_level = rxcp->tunneled;
6384a4d0 2039 skb_mark_napi_id(skb, napi);
5be93b9a 2040
343e43c0 2041 if (rxcp->vlanf)
86a9bad3 2042 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2043
10ef9ab4 2044 napi_gro_frags(napi);
2e588f84
SP
2045}
2046
10ef9ab4
SP
2047static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2048 struct be_rx_compl_info *rxcp)
2e588f84 2049{
c3c18bc1
SP
2050 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2051 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2052 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2053 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2054 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2055 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2056 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2057 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2058 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2059 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2060 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2061 if (rxcp->vlanf) {
c3c18bc1
SP
2062 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2063 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2064 }
c3c18bc1 2065 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2066 rxcp->tunneled =
c3c18bc1 2067 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2068}
2069
10ef9ab4
SP
2070static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2071 struct be_rx_compl_info *rxcp)
2e588f84 2072{
c3c18bc1
SP
2073 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2074 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2075 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2076 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2077 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2078 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2079 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2080 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2081 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2082 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2083 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2084 if (rxcp->vlanf) {
c3c18bc1
SP
2085 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2086 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2087 }
c3c18bc1
SP
2088 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2089 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2090}
2091
2092static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2093{
2094 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2095 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2096 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2097
2e588f84
SP
2098 /* For checking the valid bit it is Ok to use either definition as the
2099 * valid bit is at the same position in both v0 and v1 Rx compl */
2100 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2101 return NULL;
6b7c5b94 2102
2e588f84
SP
2103 rmb();
2104 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2105
2e588f84 2106 if (adapter->be3_native)
10ef9ab4 2107 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2108 else
10ef9ab4 2109 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2110
e38b1706
SK
2111 if (rxcp->ip_frag)
2112 rxcp->l4_csum = 0;
2113
15d72184 2114 if (rxcp->vlanf) {
f93f160b
VV
2115 /* In QNQ modes, if qnq bit is not set, then the packet was
2116 * tagged only with the transparent outer vlan-tag and must
2117 * not be treated as a vlan packet by host
2118 */
2119 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2120 rxcp->vlanf = 0;
6b7c5b94 2121
15d72184 2122 if (!lancer_chip(adapter))
3c709f8f 2123 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2124
939cf306 2125 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2126 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2127 rxcp->vlanf = 0;
2128 }
2e588f84
SP
2129
2130 /* As the compl has been parsed, reset it; we wont touch it again */
2131 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2132
3abcdeda 2133 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2134 return rxcp;
2135}
2136
1829b086 2137static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2138{
6b7c5b94 2139 u32 order = get_order(size);
1829b086 2140
6b7c5b94 2141 if (order > 0)
1829b086
ED
2142 gfp |= __GFP_COMP;
2143 return alloc_pages(gfp, order);
6b7c5b94
SP
2144}
2145
2146/*
2147 * Allocate a page, split it to fragments of size rx_frag_size and post as
2148 * receive buffers to BE
2149 */
c30d7266 2150static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2151{
3abcdeda 2152 struct be_adapter *adapter = rxo->adapter;
26d92f92 2153 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2154 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2155 struct page *pagep = NULL;
ba42fad0 2156 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2157 struct be_eth_rx_d *rxd;
2158 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2159 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2160
3abcdeda 2161 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2162 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2163 if (!pagep) {
1829b086 2164 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2165 if (unlikely(!pagep)) {
ac124ff9 2166 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2167 break;
2168 }
ba42fad0
IV
2169 page_dmaaddr = dma_map_page(dev, pagep, 0,
2170 adapter->big_page_size,
2b7bcebf 2171 DMA_FROM_DEVICE);
ba42fad0
IV
2172 if (dma_mapping_error(dev, page_dmaaddr)) {
2173 put_page(pagep);
2174 pagep = NULL;
d3de1540 2175 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2176 break;
2177 }
e50287be 2178 page_offset = 0;
6b7c5b94
SP
2179 } else {
2180 get_page(pagep);
e50287be 2181 page_offset += rx_frag_size;
6b7c5b94 2182 }
e50287be 2183 page_info->page_offset = page_offset;
6b7c5b94 2184 page_info->page = pagep;
6b7c5b94
SP
2185
2186 rxd = queue_head_node(rxq);
e50287be 2187 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2188 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2189 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2190
2191 /* Any space left in the current big page for another frag? */
2192 if ((page_offset + rx_frag_size + rx_frag_size) >
2193 adapter->big_page_size) {
2194 pagep = NULL;
e50287be
SP
2195 page_info->last_frag = true;
2196 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2197 } else {
2198 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2199 }
26d92f92
SP
2200
2201 prev_page_info = page_info;
2202 queue_head_inc(rxq);
10ef9ab4 2203 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2204 }
e50287be
SP
2205
2206 /* Mark the last frag of a page when we break out of the above loop
2207 * with no more slots available in the RXQ
2208 */
2209 if (pagep) {
2210 prev_page_info->last_frag = true;
2211 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2212 }
6b7c5b94
SP
2213
2214 if (posted) {
6b7c5b94 2215 atomic_add(posted, &rxq->used);
6384a4d0
SP
2216 if (rxo->rx_post_starved)
2217 rxo->rx_post_starved = false;
c30d7266 2218 do {
69304cc9 2219 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2220 be_rxq_notify(adapter, rxq->id, notify);
2221 posted -= notify;
2222 } while (posted);
ea1dae11
SP
2223 } else if (atomic_read(&rxq->used) == 0) {
2224 /* Let be_worker replenish when memory is available */
3abcdeda 2225 rxo->rx_post_starved = true;
6b7c5b94 2226 }
6b7c5b94
SP
2227}
2228
152ffe5b 2229static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2230{
152ffe5b
SB
2231 struct be_queue_info *tx_cq = &txo->cq;
2232 struct be_tx_compl_info *txcp = &txo->txcp;
2233 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2234
152ffe5b 2235 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2236 return NULL;
2237
152ffe5b 2238 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2239 rmb();
152ffe5b 2240 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2241
152ffe5b
SB
2242 txcp->status = GET_TX_COMPL_BITS(status, compl);
2243 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2244
152ffe5b 2245 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2246 queue_tail_inc(tx_cq);
2247 return txcp;
2248}
2249
3c8def97 2250static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2251 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2252{
5f07b3c5 2253 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2254 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2255 u16 frag_index, num_wrbs = 0;
2256 struct sk_buff *skb = NULL;
2257 bool unmap_skb_hdr = false;
a73b796e 2258 struct be_eth_wrb *wrb;
6b7c5b94 2259
ec43b1a6 2260 do {
5f07b3c5
SP
2261 if (sent_skbs[txq->tail]) {
2262 /* Free skb from prev req */
2263 if (skb)
2264 dev_consume_skb_any(skb);
2265 skb = sent_skbs[txq->tail];
2266 sent_skbs[txq->tail] = NULL;
2267 queue_tail_inc(txq); /* skip hdr wrb */
2268 num_wrbs++;
2269 unmap_skb_hdr = true;
2270 }
a73b796e 2271 wrb = queue_tail_node(txq);
5f07b3c5 2272 frag_index = txq->tail;
2b7bcebf 2273 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2274 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2275 unmap_skb_hdr = false;
6b7c5b94 2276 queue_tail_inc(txq);
5f07b3c5
SP
2277 num_wrbs++;
2278 } while (frag_index != last_index);
2279 dev_consume_skb_any(skb);
6b7c5b94 2280
4d586b82 2281 return num_wrbs;
6b7c5b94
SP
2282}
2283
10ef9ab4
SP
2284/* Return the number of events in the event queue */
2285static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2286{
10ef9ab4
SP
2287 struct be_eq_entry *eqe;
2288 int num = 0;
859b1e4e 2289
10ef9ab4
SP
2290 do {
2291 eqe = queue_tail_node(&eqo->q);
2292 if (eqe->evt == 0)
2293 break;
859b1e4e 2294
10ef9ab4
SP
2295 rmb();
2296 eqe->evt = 0;
2297 num++;
2298 queue_tail_inc(&eqo->q);
2299 } while (true);
2300
2301 return num;
859b1e4e
SP
2302}
2303
10ef9ab4
SP
2304/* Leaves the EQ is disarmed state */
2305static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2306{
10ef9ab4 2307 int num = events_get(eqo);
859b1e4e 2308
20947770 2309 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2310}
2311
10ef9ab4 2312static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2313{
2314 struct be_rx_page_info *page_info;
3abcdeda
SP
2315 struct be_queue_info *rxq = &rxo->q;
2316 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2317 struct be_rx_compl_info *rxcp;
d23e946c
SP
2318 struct be_adapter *adapter = rxo->adapter;
2319 int flush_wait = 0;
6b7c5b94 2320
d23e946c
SP
2321 /* Consume pending rx completions.
2322 * Wait for the flush completion (identified by zero num_rcvd)
2323 * to arrive. Notify CQ even when there are no more CQ entries
2324 * for HW to flush partially coalesced CQ entries.
2325 * In Lancer, there is no need to wait for flush compl.
2326 */
2327 for (;;) {
2328 rxcp = be_rx_compl_get(rxo);
ddf1169f 2329 if (!rxcp) {
d23e946c
SP
2330 if (lancer_chip(adapter))
2331 break;
2332
954f6825
VD
2333 if (flush_wait++ > 50 ||
2334 be_check_error(adapter,
2335 BE_ERROR_HW)) {
d23e946c
SP
2336 dev_warn(&adapter->pdev->dev,
2337 "did not receive flush compl\n");
2338 break;
2339 }
2340 be_cq_notify(adapter, rx_cq->id, true, 0);
2341 mdelay(1);
2342 } else {
2343 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2344 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2345 if (rxcp->num_rcvd == 0)
2346 break;
2347 }
6b7c5b94
SP
2348 }
2349
d23e946c
SP
2350 /* After cleanup, leave the CQ in unarmed state */
2351 be_cq_notify(adapter, rx_cq->id, false, 0);
2352
2353 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2354 while (atomic_read(&rxq->used) > 0) {
2355 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2356 put_page(page_info->page);
2357 memset(page_info, 0, sizeof(*page_info));
2358 }
2359 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2360 rxq->tail = 0;
2361 rxq->head = 0;
6b7c5b94
SP
2362}
2363
0ae57bb3 2364static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2365{
5f07b3c5
SP
2366 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2367 struct device *dev = &adapter->pdev->dev;
152ffe5b 2368 struct be_tx_compl_info *txcp;
0ae57bb3 2369 struct be_queue_info *txq;
152ffe5b 2370 struct be_tx_obj *txo;
0ae57bb3 2371 int i, pending_txqs;
a8e9179a 2372
1a3d0717 2373 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2374 do {
0ae57bb3
SP
2375 pending_txqs = adapter->num_tx_qs;
2376
2377 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2378 cmpl = 0;
2379 num_wrbs = 0;
0ae57bb3 2380 txq = &txo->q;
152ffe5b
SB
2381 while ((txcp = be_tx_compl_get(txo))) {
2382 num_wrbs +=
2383 be_tx_compl_process(adapter, txo,
2384 txcp->end_index);
0ae57bb3
SP
2385 cmpl++;
2386 }
2387 if (cmpl) {
2388 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2389 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2390 timeo = 0;
0ae57bb3 2391 }
cf5671e6 2392 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2393 pending_txqs--;
a8e9179a
SP
2394 }
2395
954f6825
VD
2396 if (pending_txqs == 0 || ++timeo > 10 ||
2397 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2398 break;
2399
2400 mdelay(1);
2401 } while (true);
2402
5f07b3c5 2403 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2404 for_all_tx_queues(adapter, txo, i) {
2405 txq = &txo->q;
0ae57bb3 2406
5f07b3c5
SP
2407 if (atomic_read(&txq->used)) {
2408 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2409 i, atomic_read(&txq->used));
2410 notified_idx = txq->tail;
0ae57bb3 2411 end_idx = txq->tail;
5f07b3c5
SP
2412 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2413 txq->len);
2414 /* Use the tx-compl process logic to handle requests
2415 * that were not sent to the HW.
2416 */
0ae57bb3
SP
2417 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2418 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2419 BUG_ON(atomic_read(&txq->used));
2420 txo->pend_wrb_cnt = 0;
2421 /* Since hw was never notified of these requests,
2422 * reset TXQ indices
2423 */
2424 txq->head = notified_idx;
2425 txq->tail = notified_idx;
0ae57bb3 2426 }
b03388d6 2427 }
6b7c5b94
SP
2428}
2429
10ef9ab4
SP
2430static void be_evt_queues_destroy(struct be_adapter *adapter)
2431{
2432 struct be_eq_obj *eqo;
2433 int i;
2434
2435 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2436 if (eqo->q.created) {
2437 be_eq_clean(eqo);
10ef9ab4 2438 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2439 napi_hash_del(&eqo->napi);
68d7bdcb 2440 netif_napi_del(&eqo->napi);
19d59aa7 2441 }
d658d98a 2442 free_cpumask_var(eqo->affinity_mask);
10ef9ab4
SP
2443 be_queue_free(adapter, &eqo->q);
2444 }
2445}
2446
2447static int be_evt_queues_create(struct be_adapter *adapter)
2448{
2449 struct be_queue_info *eq;
2450 struct be_eq_obj *eqo;
2632bafd 2451 struct be_aic_obj *aic;
10ef9ab4
SP
2452 int i, rc;
2453
92bf14ab
SP
2454 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2455 adapter->cfg_num_qs);
10ef9ab4
SP
2456
2457 for_all_evt_queues(adapter, eqo, i) {
d658d98a
PR
2458 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2459 return -ENOMEM;
2460 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2461 eqo->affinity_mask);
2462
68d7bdcb
SP
2463 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2464 BE_NAPI_WEIGHT);
6384a4d0 2465 napi_hash_add(&eqo->napi);
2632bafd 2466 aic = &adapter->aic_obj[i];
10ef9ab4 2467 eqo->adapter = adapter;
10ef9ab4 2468 eqo->idx = i;
2632bafd
SP
2469 aic->max_eqd = BE_MAX_EQD;
2470 aic->enable = true;
10ef9ab4
SP
2471
2472 eq = &eqo->q;
2473 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2474 sizeof(struct be_eq_entry));
10ef9ab4
SP
2475 if (rc)
2476 return rc;
2477
f2f781a7 2478 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2479 if (rc)
2480 return rc;
2481 }
1cfafab9 2482 return 0;
10ef9ab4
SP
2483}
2484
5fb379ee
SP
2485static void be_mcc_queues_destroy(struct be_adapter *adapter)
2486{
2487 struct be_queue_info *q;
5fb379ee 2488
8788fdc2 2489 q = &adapter->mcc_obj.q;
5fb379ee 2490 if (q->created)
8788fdc2 2491 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2492 be_queue_free(adapter, q);
2493
8788fdc2 2494 q = &adapter->mcc_obj.cq;
5fb379ee 2495 if (q->created)
8788fdc2 2496 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2497 be_queue_free(adapter, q);
2498}
2499
2500/* Must be called only after TX qs are created as MCC shares TX EQ */
2501static int be_mcc_queues_create(struct be_adapter *adapter)
2502{
2503 struct be_queue_info *q, *cq;
5fb379ee 2504
8788fdc2 2505 cq = &adapter->mcc_obj.cq;
5fb379ee 2506 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2507 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2508 goto err;
2509
10ef9ab4
SP
2510 /* Use the default EQ for MCC completions */
2511 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2512 goto mcc_cq_free;
2513
8788fdc2 2514 q = &adapter->mcc_obj.q;
5fb379ee
SP
2515 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2516 goto mcc_cq_destroy;
2517
8788fdc2 2518 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2519 goto mcc_q_free;
2520
2521 return 0;
2522
2523mcc_q_free:
2524 be_queue_free(adapter, q);
2525mcc_cq_destroy:
8788fdc2 2526 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2527mcc_cq_free:
2528 be_queue_free(adapter, cq);
2529err:
2530 return -1;
2531}
2532
6b7c5b94
SP
2533static void be_tx_queues_destroy(struct be_adapter *adapter)
2534{
2535 struct be_queue_info *q;
3c8def97
SP
2536 struct be_tx_obj *txo;
2537 u8 i;
6b7c5b94 2538
3c8def97
SP
2539 for_all_tx_queues(adapter, txo, i) {
2540 q = &txo->q;
2541 if (q->created)
2542 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2543 be_queue_free(adapter, q);
6b7c5b94 2544
3c8def97
SP
2545 q = &txo->cq;
2546 if (q->created)
2547 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2548 be_queue_free(adapter, q);
2549 }
6b7c5b94
SP
2550}
2551
7707133c 2552static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2553{
73f394e6 2554 struct be_queue_info *cq;
3c8def97 2555 struct be_tx_obj *txo;
73f394e6 2556 struct be_eq_obj *eqo;
92bf14ab 2557 int status, i;
6b7c5b94 2558
92bf14ab 2559 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2560
10ef9ab4
SP
2561 for_all_tx_queues(adapter, txo, i) {
2562 cq = &txo->cq;
2563 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2564 sizeof(struct be_eth_tx_compl));
2565 if (status)
2566 return status;
3c8def97 2567
827da44c
JS
2568 u64_stats_init(&txo->stats.sync);
2569 u64_stats_init(&txo->stats.sync_compl);
2570
10ef9ab4
SP
2571 /* If num_evt_qs is less than num_tx_qs, then more than
2572 * one txq share an eq
2573 */
73f394e6
SP
2574 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2575 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2576 if (status)
2577 return status;
6b7c5b94 2578
10ef9ab4
SP
2579 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2580 sizeof(struct be_eth_wrb));
2581 if (status)
2582 return status;
6b7c5b94 2583
94d73aaa 2584 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2585 if (status)
2586 return status;
73f394e6
SP
2587
2588 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2589 eqo->idx);
3c8def97 2590 }
6b7c5b94 2591
d379142b
SP
2592 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2593 adapter->num_tx_qs);
10ef9ab4 2594 return 0;
6b7c5b94
SP
2595}
2596
10ef9ab4 2597static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2598{
2599 struct be_queue_info *q;
3abcdeda
SP
2600 struct be_rx_obj *rxo;
2601 int i;
2602
2603 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2604 q = &rxo->cq;
2605 if (q->created)
2606 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2607 be_queue_free(adapter, q);
ac6a0c4a
SP
2608 }
2609}
2610
10ef9ab4 2611static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2612{
10ef9ab4 2613 struct be_queue_info *eq, *cq;
3abcdeda
SP
2614 struct be_rx_obj *rxo;
2615 int rc, i;
6b7c5b94 2616
92bf14ab 2617 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2618 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2619
71bb8bd0
VV
2620 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2621 if (adapter->num_rss_qs <= 1)
2622 adapter->num_rss_qs = 0;
2623
2624 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2625
2626 /* When the interface is not capable of RSS rings (and there is no
2627 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2628 */
71bb8bd0
VV
2629 if (adapter->num_rx_qs == 0)
2630 adapter->num_rx_qs = 1;
92bf14ab 2631
6b7c5b94 2632 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2633 for_all_rx_queues(adapter, rxo, i) {
2634 rxo->adapter = adapter;
3abcdeda
SP
2635 cq = &rxo->cq;
2636 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2637 sizeof(struct be_eth_rx_compl));
3abcdeda 2638 if (rc)
10ef9ab4 2639 return rc;
3abcdeda 2640
827da44c 2641 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2642 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2643 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2644 if (rc)
10ef9ab4 2645 return rc;
3abcdeda 2646 }
6b7c5b94 2647
d379142b 2648 dev_info(&adapter->pdev->dev,
71bb8bd0 2649 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2650 return 0;
b628bde2
SP
2651}
2652
6b7c5b94
SP
2653static irqreturn_t be_intx(int irq, void *dev)
2654{
e49cc34f
SP
2655 struct be_eq_obj *eqo = dev;
2656 struct be_adapter *adapter = eqo->adapter;
2657 int num_evts = 0;
6b7c5b94 2658
d0b9cec3
SP
2659 /* IRQ is not expected when NAPI is scheduled as the EQ
2660 * will not be armed.
2661 * But, this can happen on Lancer INTx where it takes
2662 * a while to de-assert INTx or in BE2 where occasionaly
2663 * an interrupt may be raised even when EQ is unarmed.
2664 * If NAPI is already scheduled, then counting & notifying
2665 * events will orphan them.
e49cc34f 2666 */
d0b9cec3 2667 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2668 num_evts = events_get(eqo);
d0b9cec3
SP
2669 __napi_schedule(&eqo->napi);
2670 if (num_evts)
2671 eqo->spurious_intr = 0;
2672 }
20947770 2673 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2674
d0b9cec3
SP
2675 /* Return IRQ_HANDLED only for the the first spurious intr
2676 * after a valid intr to stop the kernel from branding
2677 * this irq as a bad one!
e49cc34f 2678 */
d0b9cec3
SP
2679 if (num_evts || eqo->spurious_intr++ == 0)
2680 return IRQ_HANDLED;
2681 else
2682 return IRQ_NONE;
6b7c5b94
SP
2683}
2684
10ef9ab4 2685static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2686{
10ef9ab4 2687 struct be_eq_obj *eqo = dev;
6b7c5b94 2688
20947770 2689 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2690 napi_schedule(&eqo->napi);
6b7c5b94
SP
2691 return IRQ_HANDLED;
2692}
2693
2e588f84 2694static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2695{
e38b1706 2696 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2697}
2698
10ef9ab4 2699static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2700 int budget, int polling)
6b7c5b94 2701{
3abcdeda
SP
2702 struct be_adapter *adapter = rxo->adapter;
2703 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2704 struct be_rx_compl_info *rxcp;
6b7c5b94 2705 u32 work_done;
c30d7266 2706 u32 frags_consumed = 0;
6b7c5b94
SP
2707
2708 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2709 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2710 if (!rxcp)
2711 break;
2712
12004ae9
SP
2713 /* Is it a flush compl that has no data */
2714 if (unlikely(rxcp->num_rcvd == 0))
2715 goto loop_continue;
2716
2717 /* Discard compl with partial DMA Lancer B0 */
2718 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2719 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2720 goto loop_continue;
2721 }
2722
2723 /* On BE drop pkts that arrive due to imperfect filtering in
2724 * promiscuous mode on some skews
2725 */
2726 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2727 !lancer_chip(adapter))) {
10ef9ab4 2728 be_rx_compl_discard(rxo, rxcp);
12004ae9 2729 goto loop_continue;
64642811 2730 }
009dd872 2731
6384a4d0
SP
2732 /* Don't do gro when we're busy_polling */
2733 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2734 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2735 else
6384a4d0
SP
2736 be_rx_compl_process(rxo, napi, rxcp);
2737
12004ae9 2738loop_continue:
c30d7266 2739 frags_consumed += rxcp->num_rcvd;
2e588f84 2740 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2741 }
2742
10ef9ab4
SP
2743 if (work_done) {
2744 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2745
6384a4d0
SP
2746 /* When an rx-obj gets into post_starved state, just
2747 * let be_worker do the posting.
2748 */
2749 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2750 !rxo->rx_post_starved)
c30d7266
AK
2751 be_post_rx_frags(rxo, GFP_ATOMIC,
2752 max_t(u32, MAX_RX_POST,
2753 frags_consumed));
6b7c5b94 2754 }
10ef9ab4 2755
6b7c5b94
SP
2756 return work_done;
2757}
2758
152ffe5b 2759static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2760{
2761 switch (status) {
2762 case BE_TX_COMP_HDR_PARSE_ERR:
2763 tx_stats(txo)->tx_hdr_parse_err++;
2764 break;
2765 case BE_TX_COMP_NDMA_ERR:
2766 tx_stats(txo)->tx_dma_err++;
2767 break;
2768 case BE_TX_COMP_ACL_ERR:
2769 tx_stats(txo)->tx_spoof_check_err++;
2770 break;
2771 }
2772}
2773
152ffe5b 2774static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2775{
2776 switch (status) {
2777 case LANCER_TX_COMP_LSO_ERR:
2778 tx_stats(txo)->tx_tso_err++;
2779 break;
2780 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2781 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2782 tx_stats(txo)->tx_spoof_check_err++;
2783 break;
2784 case LANCER_TX_COMP_QINQ_ERR:
2785 tx_stats(txo)->tx_qinq_err++;
2786 break;
2787 case LANCER_TX_COMP_PARITY_ERR:
2788 tx_stats(txo)->tx_internal_parity_err++;
2789 break;
2790 case LANCER_TX_COMP_DMA_ERR:
2791 tx_stats(txo)->tx_dma_err++;
2792 break;
2793 }
2794}
2795
c8f64615
SP
2796static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2797 int idx)
6b7c5b94 2798{
c8f64615 2799 int num_wrbs = 0, work_done = 0;
152ffe5b 2800 struct be_tx_compl_info *txcp;
c8f64615 2801
152ffe5b
SB
2802 while ((txcp = be_tx_compl_get(txo))) {
2803 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2804 work_done++;
3c8def97 2805
152ffe5b 2806 if (txcp->status) {
512bb8a2 2807 if (lancer_chip(adapter))
152ffe5b 2808 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2809 else
152ffe5b 2810 be_update_tx_err(txo, txcp->status);
512bb8a2 2811 }
10ef9ab4 2812 }
6b7c5b94 2813
10ef9ab4
SP
2814 if (work_done) {
2815 be_cq_notify(adapter, txo->cq.id, true, work_done);
2816 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2817
10ef9ab4
SP
2818 /* As Tx wrbs have been freed up, wake up netdev queue
2819 * if it was stopped due to lack of tx wrbs. */
2820 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2821 be_can_txq_wake(txo)) {
10ef9ab4 2822 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2823 }
10ef9ab4
SP
2824
2825 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2826 tx_stats(txo)->tx_compl += work_done;
2827 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2828 }
10ef9ab4 2829}
6b7c5b94 2830
f7062ee5
SP
2831#ifdef CONFIG_NET_RX_BUSY_POLL
2832static inline bool be_lock_napi(struct be_eq_obj *eqo)
2833{
2834 bool status = true;
2835
2836 spin_lock(&eqo->lock); /* BH is already disabled */
2837 if (eqo->state & BE_EQ_LOCKED) {
2838 WARN_ON(eqo->state & BE_EQ_NAPI);
2839 eqo->state |= BE_EQ_NAPI_YIELD;
2840 status = false;
2841 } else {
2842 eqo->state = BE_EQ_NAPI;
2843 }
2844 spin_unlock(&eqo->lock);
2845 return status;
2846}
2847
2848static inline void be_unlock_napi(struct be_eq_obj *eqo)
2849{
2850 spin_lock(&eqo->lock); /* BH is already disabled */
2851
2852 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2853 eqo->state = BE_EQ_IDLE;
2854
2855 spin_unlock(&eqo->lock);
2856}
2857
2858static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2859{
2860 bool status = true;
2861
2862 spin_lock_bh(&eqo->lock);
2863 if (eqo->state & BE_EQ_LOCKED) {
2864 eqo->state |= BE_EQ_POLL_YIELD;
2865 status = false;
2866 } else {
2867 eqo->state |= BE_EQ_POLL;
2868 }
2869 spin_unlock_bh(&eqo->lock);
2870 return status;
2871}
2872
2873static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2874{
2875 spin_lock_bh(&eqo->lock);
2876
2877 WARN_ON(eqo->state & (BE_EQ_NAPI));
2878 eqo->state = BE_EQ_IDLE;
2879
2880 spin_unlock_bh(&eqo->lock);
2881}
2882
2883static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2884{
2885 spin_lock_init(&eqo->lock);
2886 eqo->state = BE_EQ_IDLE;
2887}
2888
2889static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2890{
2891 local_bh_disable();
2892
2893 /* It's enough to just acquire napi lock on the eqo to stop
2894 * be_busy_poll() from processing any queueus.
2895 */
2896 while (!be_lock_napi(eqo))
2897 mdelay(1);
2898
2899 local_bh_enable();
2900}
2901
2902#else /* CONFIG_NET_RX_BUSY_POLL */
2903
2904static inline bool be_lock_napi(struct be_eq_obj *eqo)
2905{
2906 return true;
2907}
2908
2909static inline void be_unlock_napi(struct be_eq_obj *eqo)
2910{
2911}
2912
2913static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2914{
2915 return false;
2916}
2917
2918static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2919{
2920}
2921
2922static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2923{
2924}
2925
2926static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2927{
2928}
2929#endif /* CONFIG_NET_RX_BUSY_POLL */
2930
68d7bdcb 2931int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2932{
2933 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2934 struct be_adapter *adapter = eqo->adapter;
0b545a62 2935 int max_work = 0, work, i, num_evts;
6384a4d0 2936 struct be_rx_obj *rxo;
a4906ea0 2937 struct be_tx_obj *txo;
20947770 2938 u32 mult_enc = 0;
f31e50a8 2939
0b545a62
SP
2940 num_evts = events_get(eqo);
2941
a4906ea0
SP
2942 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2943 be_process_tx(adapter, txo, i);
f31e50a8 2944
6384a4d0
SP
2945 if (be_lock_napi(eqo)) {
2946 /* This loop will iterate twice for EQ0 in which
2947 * completions of the last RXQ (default one) are also processed
2948 * For other EQs the loop iterates only once
2949 */
2950 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2951 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2952 max_work = max(work, max_work);
2953 }
2954 be_unlock_napi(eqo);
2955 } else {
2956 max_work = budget;
10ef9ab4 2957 }
6b7c5b94 2958
10ef9ab4
SP
2959 if (is_mcc_eqo(eqo))
2960 be_process_mcc(adapter);
93c86700 2961
10ef9ab4
SP
2962 if (max_work < budget) {
2963 napi_complete(napi);
20947770
PR
2964
2965 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
2966 * delay via a delay multiplier encoding value
2967 */
2968 if (skyhawk_chip(adapter))
2969 mult_enc = be_get_eq_delay_mult_enc(eqo);
2970
2971 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
2972 mult_enc);
10ef9ab4
SP
2973 } else {
2974 /* As we'll continue in polling mode, count and clear events */
20947770 2975 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 2976 }
10ef9ab4 2977 return max_work;
6b7c5b94
SP
2978}
2979
6384a4d0
SP
2980#ifdef CONFIG_NET_RX_BUSY_POLL
2981static int be_busy_poll(struct napi_struct *napi)
2982{
2983 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2984 struct be_adapter *adapter = eqo->adapter;
2985 struct be_rx_obj *rxo;
2986 int i, work = 0;
2987
2988 if (!be_lock_busy_poll(eqo))
2989 return LL_FLUSH_BUSY;
2990
2991 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2992 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2993 if (work)
2994 break;
2995 }
2996
2997 be_unlock_busy_poll(eqo);
2998 return work;
2999}
3000#endif
3001
f67ef7ba 3002void be_detect_error(struct be_adapter *adapter)
7c185276 3003{
e1cfb67a
PR
3004 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3005 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3006 u32 i;
eb0eecc1 3007 struct device *dev = &adapter->pdev->dev;
7c185276 3008
954f6825 3009 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3010 return;
3011
e1cfb67a
PR
3012 if (lancer_chip(adapter)) {
3013 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3014 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3015 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3016 sliport_err1 = ioread32(adapter->db +
748b539a 3017 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3018 sliport_err2 = ioread32(adapter->db +
748b539a 3019 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3020 /* Do not log error messages if its a FW reset */
3021 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3022 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3023 dev_info(dev, "Firmware update in progress\n");
3024 } else {
eb0eecc1
SK
3025 dev_err(dev, "Error detected in the card\n");
3026 dev_err(dev, "ERR: sliport status 0x%x\n",
3027 sliport_status);
3028 dev_err(dev, "ERR: sliport error1 0x%x\n",
3029 sliport_err1);
3030 dev_err(dev, "ERR: sliport error2 0x%x\n",
3031 sliport_err2);
3032 }
e1cfb67a
PR
3033 }
3034 } else {
25848c90
SR
3035 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3036 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3037 ue_lo_mask = ioread32(adapter->pcicfg +
3038 PCICFG_UE_STATUS_LOW_MASK);
3039 ue_hi_mask = ioread32(adapter->pcicfg +
3040 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3041
f67ef7ba
PR
3042 ue_lo = (ue_lo & ~ue_lo_mask);
3043 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3044
eb0eecc1
SK
3045 /* On certain platforms BE hardware can indicate spurious UEs.
3046 * Allow HW to stop working completely in case of a real UE.
3047 * Hence not setting the hw_error for UE detection.
3048 */
f67ef7ba 3049
eb0eecc1 3050 if (ue_lo || ue_hi) {
eb0eecc1
SK
3051 dev_err(dev,
3052 "Unrecoverable Error detected in the adapter");
3053 dev_err(dev, "Please reboot server to recover");
3054 if (skyhawk_chip(adapter))
954f6825
VD
3055 be_set_error(adapter, BE_ERROR_UE);
3056
eb0eecc1
SK
3057 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3058 if (ue_lo & 1)
3059 dev_err(dev, "UE: %s bit set\n",
3060 ue_status_low_desc[i]);
3061 }
3062 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3063 if (ue_hi & 1)
3064 dev_err(dev, "UE: %s bit set\n",
3065 ue_status_hi_desc[i]);
3066 }
7c185276
AK
3067 }
3068 }
7c185276
AK
3069}
3070
8d56ff11
SP
3071static void be_msix_disable(struct be_adapter *adapter)
3072{
ac6a0c4a 3073 if (msix_enabled(adapter)) {
8d56ff11 3074 pci_disable_msix(adapter->pdev);
ac6a0c4a 3075 adapter->num_msix_vec = 0;
68d7bdcb 3076 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3077 }
3078}
3079
c2bba3df 3080static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3081{
7dc4c064 3082 int i, num_vec;
d379142b 3083 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3084
92bf14ab
SP
3085 /* If RoCE is supported, program the max number of NIC vectors that
3086 * may be configured via set-channels, along with vectors needed for
3087 * RoCe. Else, just program the number we'll use initially.
3088 */
3089 if (be_roce_supported(adapter))
3090 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3091 2 * num_online_cpus());
3092 else
3093 num_vec = adapter->cfg_num_qs;
3abcdeda 3094
ac6a0c4a 3095 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3096 adapter->msix_entries[i].entry = i;
3097
7dc4c064
AG
3098 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3099 MIN_MSIX_VECTORS, num_vec);
3100 if (num_vec < 0)
3101 goto fail;
92bf14ab 3102
92bf14ab
SP
3103 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3104 adapter->num_msix_roce_vec = num_vec / 2;
3105 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3106 adapter->num_msix_roce_vec);
3107 }
3108
3109 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3110
3111 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3112 adapter->num_msix_vec);
c2bba3df 3113 return 0;
7dc4c064
AG
3114
3115fail:
3116 dev_warn(dev, "MSIx enable failed\n");
3117
3118 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3119 if (be_virtfn(adapter))
7dc4c064
AG
3120 return num_vec;
3121 return 0;
6b7c5b94
SP
3122}
3123
fe6d2a38 3124static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3125 struct be_eq_obj *eqo)
b628bde2 3126{
f2f781a7 3127 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3128}
6b7c5b94 3129
b628bde2
SP
3130static int be_msix_register(struct be_adapter *adapter)
3131{
10ef9ab4
SP
3132 struct net_device *netdev = adapter->netdev;
3133 struct be_eq_obj *eqo;
3134 int status, i, vec;
6b7c5b94 3135
10ef9ab4
SP
3136 for_all_evt_queues(adapter, eqo, i) {
3137 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3138 vec = be_msix_vec_get(adapter, eqo);
3139 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3140 if (status)
3141 goto err_msix;
d658d98a
PR
3142
3143 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3144 }
b628bde2 3145
6b7c5b94 3146 return 0;
3abcdeda 3147err_msix:
10ef9ab4
SP
3148 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3149 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3150 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3151 status);
ac6a0c4a 3152 be_msix_disable(adapter);
6b7c5b94
SP
3153 return status;
3154}
3155
3156static int be_irq_register(struct be_adapter *adapter)
3157{
3158 struct net_device *netdev = adapter->netdev;
3159 int status;
3160
ac6a0c4a 3161 if (msix_enabled(adapter)) {
6b7c5b94
SP
3162 status = be_msix_register(adapter);
3163 if (status == 0)
3164 goto done;
ba343c77 3165 /* INTx is not supported for VF */
18c57c74 3166 if (be_virtfn(adapter))
ba343c77 3167 return status;
6b7c5b94
SP
3168 }
3169
e49cc34f 3170 /* INTx: only the first EQ is used */
6b7c5b94
SP
3171 netdev->irq = adapter->pdev->irq;
3172 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3173 &adapter->eq_obj[0]);
6b7c5b94
SP
3174 if (status) {
3175 dev_err(&adapter->pdev->dev,
3176 "INTx request IRQ failed - err %d\n", status);
3177 return status;
3178 }
3179done:
3180 adapter->isr_registered = true;
3181 return 0;
3182}
3183
3184static void be_irq_unregister(struct be_adapter *adapter)
3185{
3186 struct net_device *netdev = adapter->netdev;
10ef9ab4 3187 struct be_eq_obj *eqo;
d658d98a 3188 int i, vec;
6b7c5b94
SP
3189
3190 if (!adapter->isr_registered)
3191 return;
3192
3193 /* INTx */
ac6a0c4a 3194 if (!msix_enabled(adapter)) {
e49cc34f 3195 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3196 goto done;
3197 }
3198
3199 /* MSIx */
d658d98a
PR
3200 for_all_evt_queues(adapter, eqo, i) {
3201 vec = be_msix_vec_get(adapter, eqo);
3202 irq_set_affinity_hint(vec, NULL);
3203 free_irq(vec, eqo);
3204 }
3abcdeda 3205
6b7c5b94
SP
3206done:
3207 adapter->isr_registered = false;
6b7c5b94
SP
3208}
3209
10ef9ab4 3210static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3211{
3212 struct be_queue_info *q;
3213 struct be_rx_obj *rxo;
3214 int i;
3215
3216 for_all_rx_queues(adapter, rxo, i) {
3217 q = &rxo->q;
3218 if (q->created) {
3219 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3220 be_rx_cq_clean(rxo);
482c9e79 3221 }
10ef9ab4 3222 be_queue_free(adapter, q);
482c9e79
SP
3223 }
3224}
3225
889cd4b2
SP
3226static int be_close(struct net_device *netdev)
3227{
3228 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3229 struct be_eq_obj *eqo;
3230 int i;
889cd4b2 3231
e1ad8e33
KA
3232 /* This protection is needed as be_close() may be called even when the
3233 * adapter is in cleared state (after eeh perm failure)
3234 */
3235 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3236 return 0;
3237
045508a8
PP
3238 be_roce_dev_close(adapter);
3239
dff345c5
IV
3240 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3241 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3242 napi_disable(&eqo->napi);
6384a4d0
SP
3243 be_disable_busy_poll(eqo);
3244 }
71237b6f 3245 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3246 }
a323d9bf
SP
3247
3248 be_async_mcc_disable(adapter);
3249
3250 /* Wait for all pending tx completions to arrive so that
3251 * all tx skbs are freed.
3252 */
fba87559 3253 netif_tx_disable(netdev);
6e1f9975 3254 be_tx_compl_clean(adapter);
a323d9bf
SP
3255
3256 be_rx_qs_destroy(adapter);
f66b7cfd 3257 be_clear_uc_list(adapter);
d11a347d 3258
a323d9bf 3259 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3260 if (msix_enabled(adapter))
3261 synchronize_irq(be_msix_vec_get(adapter, eqo));
3262 else
3263 synchronize_irq(netdev->irq);
3264 be_eq_clean(eqo);
63fcb27f
PR
3265 }
3266
889cd4b2
SP
3267 be_irq_unregister(adapter);
3268
482c9e79
SP
3269 return 0;
3270}
3271
10ef9ab4 3272static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3273{
1dcf7b1c
ED
3274 struct rss_info *rss = &adapter->rss_info;
3275 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3276 struct be_rx_obj *rxo;
e9008ee9 3277 int rc, i, j;
482c9e79
SP
3278
3279 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3280 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3281 sizeof(struct be_eth_rx_d));
3282 if (rc)
3283 return rc;
3284 }
3285
71bb8bd0
VV
3286 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3287 rxo = default_rxo(adapter);
3288 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3289 rx_frag_size, adapter->if_handle,
3290 false, &rxo->rss_id);
3291 if (rc)
3292 return rc;
3293 }
10ef9ab4
SP
3294
3295 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3296 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3297 rx_frag_size, adapter->if_handle,
3298 true, &rxo->rss_id);
482c9e79
SP
3299 if (rc)
3300 return rc;
3301 }
3302
3303 if (be_multi_rxq(adapter)) {
71bb8bd0 3304 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3305 for_all_rss_queues(adapter, rxo, i) {
e2557877 3306 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3307 break;
e2557877
VD
3308 rss->rsstable[j + i] = rxo->rss_id;
3309 rss->rss_queue[j + i] = i;
e9008ee9
PR
3310 }
3311 }
e2557877
VD
3312 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3313 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3314
3315 if (!BEx_chip(adapter))
e2557877
VD
3316 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3317 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3318 } else {
3319 /* Disable RSS, if only default RX Q is created */
e2557877 3320 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3321 }
594ad54a 3322
1dcf7b1c 3323 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3324 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3325 128, rss_key);
da1388d6 3326 if (rc) {
e2557877 3327 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3328 return rc;
482c9e79
SP
3329 }
3330
1dcf7b1c 3331 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3332
b02e60c8
SR
3333 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3334 * which is a queue empty condition
3335 */
10ef9ab4 3336 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3337 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3338
889cd4b2
SP
3339 return 0;
3340}
3341
6b7c5b94
SP
3342static int be_open(struct net_device *netdev)
3343{
3344 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3345 struct be_eq_obj *eqo;
3abcdeda 3346 struct be_rx_obj *rxo;
10ef9ab4 3347 struct be_tx_obj *txo;
b236916a 3348 u8 link_status;
3abcdeda 3349 int status, i;
5fb379ee 3350
10ef9ab4 3351 status = be_rx_qs_create(adapter);
482c9e79
SP
3352 if (status)
3353 goto err;
3354
c2bba3df
SK
3355 status = be_irq_register(adapter);
3356 if (status)
3357 goto err;
5fb379ee 3358
10ef9ab4 3359 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3360 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3361
10ef9ab4
SP
3362 for_all_tx_queues(adapter, txo, i)
3363 be_cq_notify(adapter, txo->cq.id, true, 0);
3364
7a1e9b20
SP
3365 be_async_mcc_enable(adapter);
3366
10ef9ab4
SP
3367 for_all_evt_queues(adapter, eqo, i) {
3368 napi_enable(&eqo->napi);
6384a4d0 3369 be_enable_busy_poll(eqo);
20947770 3370 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3371 }
04d3d624 3372 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3373
323ff71e 3374 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3375 if (!status)
3376 be_link_status_update(adapter, link_status);
3377
fba87559 3378 netif_tx_start_all_queues(netdev);
045508a8 3379 be_roce_dev_open(adapter);
c9c47142 3380
c5abe7c0 3381#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3382 if (skyhawk_chip(adapter))
3383 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3384#endif
3385
889cd4b2
SP
3386 return 0;
3387err:
3388 be_close(adapter->netdev);
3389 return -EIO;
5fb379ee
SP
3390}
3391
71d8d1b5
AK
3392static int be_setup_wol(struct be_adapter *adapter, bool enable)
3393{
3394 struct be_dma_mem cmd;
3395 int status = 0;
3396 u8 mac[ETH_ALEN];
3397
c7bf7169 3398 eth_zero_addr(mac);
71d8d1b5
AK
3399
3400 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3401 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3402 GFP_KERNEL);
ddf1169f 3403 if (!cmd.va)
6b568689 3404 return -ENOMEM;
71d8d1b5
AK
3405
3406 if (enable) {
3407 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3408 PCICFG_PM_CONTROL_OFFSET,
3409 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3410 if (status) {
3411 dev_err(&adapter->pdev->dev,
2381a55c 3412 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3413 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3414 cmd.dma);
71d8d1b5
AK
3415 return status;
3416 }
3417 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3418 adapter->netdev->dev_addr,
3419 &cmd);
71d8d1b5
AK
3420 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3421 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3422 } else {
3423 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3424 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3425 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3426 }
3427
2b7bcebf 3428 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3429 return status;
3430}
3431
f7062ee5
SP
3432static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3433{
3434 u32 addr;
3435
3436 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3437
3438 mac[5] = (u8)(addr & 0xFF);
3439 mac[4] = (u8)((addr >> 8) & 0xFF);
3440 mac[3] = (u8)((addr >> 16) & 0xFF);
3441 /* Use the OUI from the current MAC address */
3442 memcpy(mac, adapter->netdev->dev_addr, 3);
3443}
3444
6d87f5c3
AK
3445/*
3446 * Generate a seed MAC address from the PF MAC Address using jhash.
3447 * MAC Address for VFs are assigned incrementally starting from the seed.
3448 * These addresses are programmed in the ASIC by the PF and the VF driver
3449 * queries for the MAC address during its probe.
3450 */
4c876616 3451static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3452{
f9449ab7 3453 u32 vf;
3abcdeda 3454 int status = 0;
6d87f5c3 3455 u8 mac[ETH_ALEN];
11ac75ed 3456 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3457
3458 be_vf_eth_addr_generate(adapter, mac);
3459
11ac75ed 3460 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3461 if (BEx_chip(adapter))
590c391d 3462 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3463 vf_cfg->if_handle,
3464 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3465 else
3466 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3467 vf + 1);
590c391d 3468
6d87f5c3
AK
3469 if (status)
3470 dev_err(&adapter->pdev->dev,
748b539a
SP
3471 "Mac address assignment failed for VF %d\n",
3472 vf);
6d87f5c3 3473 else
11ac75ed 3474 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3475
3476 mac[5] += 1;
3477 }
3478 return status;
3479}
3480
4c876616
SP
3481static int be_vfs_mac_query(struct be_adapter *adapter)
3482{
3483 int status, vf;
3484 u8 mac[ETH_ALEN];
3485 struct be_vf_cfg *vf_cfg;
4c876616
SP
3486
3487 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3488 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3489 mac, vf_cfg->if_handle,
3490 false, vf+1);
4c876616
SP
3491 if (status)
3492 return status;
3493 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3494 }
3495 return 0;
3496}
3497
f9449ab7 3498static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3499{
11ac75ed 3500 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3501 u32 vf;
3502
257a3feb 3503 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3504 dev_warn(&adapter->pdev->dev,
3505 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3506 goto done;
3507 }
3508
b4c1df93
SP
3509 pci_disable_sriov(adapter->pdev);
3510
11ac75ed 3511 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3512 if (BEx_chip(adapter))
11ac75ed
SP
3513 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3514 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3515 else
3516 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3517 vf + 1);
f9449ab7 3518
11ac75ed
SP
3519 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3520 }
39f1d94d
SP
3521done:
3522 kfree(adapter->vf_cfg);
3523 adapter->num_vfs = 0;
f174c7ec 3524 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3525}
3526
7707133c
SP
3527static void be_clear_queues(struct be_adapter *adapter)
3528{
3529 be_mcc_queues_destroy(adapter);
3530 be_rx_cqs_destroy(adapter);
3531 be_tx_queues_destroy(adapter);
3532 be_evt_queues_destroy(adapter);
3533}
3534
68d7bdcb 3535static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3536{
191eb756
SP
3537 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3538 cancel_delayed_work_sync(&adapter->work);
3539 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3540 }
68d7bdcb
SP
3541}
3542
eb7dd46c
SP
3543static void be_cancel_err_detection(struct be_adapter *adapter)
3544{
3545 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3546 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3547 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3548 }
3549}
3550
b05004ad 3551static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3552{
b05004ad 3553 if (adapter->pmac_id) {
f66b7cfd
SP
3554 be_cmd_pmac_del(adapter, adapter->if_handle,
3555 adapter->pmac_id[0], 0);
b05004ad
SK
3556 kfree(adapter->pmac_id);
3557 adapter->pmac_id = NULL;
3558 }
3559}
3560
c5abe7c0 3561#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3562static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3563{
630f4b70
SB
3564 struct net_device *netdev = adapter->netdev;
3565
c9c47142
SP
3566 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3567 be_cmd_manage_iface(adapter, adapter->if_handle,
3568 OP_CONVERT_TUNNEL_TO_NORMAL);
3569
3570 if (adapter->vxlan_port)
3571 be_cmd_set_vxlan_port(adapter, 0);
3572
3573 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3574 adapter->vxlan_port = 0;
630f4b70
SB
3575
3576 netdev->hw_enc_features = 0;
3577 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3578 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3579}
c5abe7c0 3580#endif
c9c47142 3581
f2858738
VV
3582static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3583{
3584 struct be_resources res = adapter->pool_res;
3585 u16 num_vf_qs = 1;
3586
3587 /* Distribute the queue resources equally among the PF and it's VFs
3588 * Do not distribute queue resources in multi-channel configuration.
3589 */
3590 if (num_vfs && !be_is_mc(adapter)) {
3591 /* If number of VFs requested is 8 less than max supported,
3592 * assign 8 queue pairs to the PF and divide the remaining
3593 * resources evenly among the VFs
3594 */
3595 if (num_vfs < (be_max_vfs(adapter) - 8))
3596 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3597 else
3598 num_vf_qs = res.max_rss_qs / num_vfs;
3599
3600 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3601 * interfaces per port. Provide RSS on VFs, only if number
3602 * of VFs requested is less than MAX_RSS_IFACES limit.
3603 */
3604 if (num_vfs >= MAX_RSS_IFACES)
3605 num_vf_qs = 1;
3606 }
3607 return num_vf_qs;
3608}
3609
b05004ad
SK
3610static int be_clear(struct be_adapter *adapter)
3611{
f2858738
VV
3612 struct pci_dev *pdev = adapter->pdev;
3613 u16 num_vf_qs;
3614
68d7bdcb 3615 be_cancel_worker(adapter);
191eb756 3616
11ac75ed 3617 if (sriov_enabled(adapter))
f9449ab7
SP
3618 be_vf_clear(adapter);
3619
bec84e6b
VV
3620 /* Re-configure FW to distribute resources evenly across max-supported
3621 * number of VFs, only when VFs are not already enabled.
3622 */
ace40aff
VV
3623 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3624 !pci_vfs_assigned(pdev)) {
f2858738
VV
3625 num_vf_qs = be_calculate_vf_qs(adapter,
3626 pci_sriov_get_totalvfs(pdev));
bec84e6b 3627 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3628 pci_sriov_get_totalvfs(pdev),
3629 num_vf_qs);
3630 }
bec84e6b 3631
c5abe7c0 3632#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3633 be_disable_vxlan_offloads(adapter);
c5abe7c0 3634#endif
2d17f403 3635 /* delete the primary mac along with the uc-mac list */
b05004ad 3636 be_mac_clear(adapter);
fbc13f01 3637
f9449ab7 3638 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3639
7707133c 3640 be_clear_queues(adapter);
a54769f5 3641
10ef9ab4 3642 be_msix_disable(adapter);
e1ad8e33 3643 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3644 return 0;
3645}
3646
0700d816
KA
3647static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3648 u32 cap_flags, u32 vf)
3649{
3650 u32 en_flags;
0700d816
KA
3651
3652 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3653 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
71bb8bd0 3654 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
0700d816
KA
3655
3656 en_flags &= cap_flags;
3657
435452aa 3658 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
0700d816
KA
3659}
3660
4c876616 3661static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3662{
92bf14ab 3663 struct be_resources res = {0};
4c876616 3664 struct be_vf_cfg *vf_cfg;
0700d816
KA
3665 u32 cap_flags, vf;
3666 int status;
abb93951 3667
0700d816 3668 /* If a FW profile exists, then cap_flags are updated */
4c876616 3669 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
0ed7d749 3670 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3671
4c876616 3672 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3673 if (!BE3_chip(adapter)) {
3674 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3675 RESOURCE_LIMITS,
92bf14ab 3676 vf + 1);
435452aa 3677 if (!status) {
92bf14ab 3678 cap_flags = res.if_cap_flags;
435452aa
VV
3679 /* Prevent VFs from enabling VLAN promiscuous
3680 * mode
3681 */
3682 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3683 }
92bf14ab 3684 }
4c876616 3685
0700d816
KA
3686 status = be_if_create(adapter, &vf_cfg->if_handle,
3687 cap_flags, vf + 1);
4c876616 3688 if (status)
0700d816 3689 return status;
4c876616 3690 }
0700d816
KA
3691
3692 return 0;
abb93951
PR
3693}
3694
39f1d94d 3695static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3696{
11ac75ed 3697 struct be_vf_cfg *vf_cfg;
30128031
SP
3698 int vf;
3699
39f1d94d
SP
3700 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3701 GFP_KERNEL);
3702 if (!adapter->vf_cfg)
3703 return -ENOMEM;
3704
11ac75ed
SP
3705 for_all_vfs(adapter, vf_cfg, vf) {
3706 vf_cfg->if_handle = -1;
3707 vf_cfg->pmac_id = -1;
30128031 3708 }
39f1d94d 3709 return 0;
30128031
SP
3710}
3711
f9449ab7
SP
3712static int be_vf_setup(struct be_adapter *adapter)
3713{
c502224e 3714 struct device *dev = &adapter->pdev->dev;
11ac75ed 3715 struct be_vf_cfg *vf_cfg;
4c876616 3716 int status, old_vfs, vf;
e7bcbd7b 3717 bool spoofchk;
39f1d94d 3718
257a3feb 3719 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3720
3721 status = be_vf_setup_init(adapter);
3722 if (status)
3723 goto err;
30128031 3724
4c876616
SP
3725 if (old_vfs) {
3726 for_all_vfs(adapter, vf_cfg, vf) {
3727 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3728 if (status)
3729 goto err;
3730 }
f9449ab7 3731
4c876616
SP
3732 status = be_vfs_mac_query(adapter);
3733 if (status)
3734 goto err;
3735 } else {
bec84e6b
VV
3736 status = be_vfs_if_create(adapter);
3737 if (status)
3738 goto err;
3739
39f1d94d
SP
3740 status = be_vf_eth_addr_config(adapter);
3741 if (status)
3742 goto err;
3743 }
f9449ab7 3744
11ac75ed 3745 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3746 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3747 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3748 vf + 1);
3749 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3750 status = be_cmd_set_fn_privileges(adapter,
435452aa 3751 vf_cfg->privileges |
04a06028
SP
3752 BE_PRIV_FILTMGMT,
3753 vf + 1);
435452aa
VV
3754 if (!status) {
3755 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3756 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3757 vf);
435452aa 3758 }
04a06028
SP
3759 }
3760
0f77ba73
RN
3761 /* Allow full available bandwidth */
3762 if (!old_vfs)
3763 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3764
e7bcbd7b
KA
3765 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3766 vf_cfg->if_handle, NULL,
3767 &spoofchk);
3768 if (!status)
3769 vf_cfg->spoofchk = spoofchk;
3770
bdce2ad7 3771 if (!old_vfs) {
0599863d 3772 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3773 be_cmd_set_logical_link_config(adapter,
3774 IFLA_VF_LINK_STATE_AUTO,
3775 vf+1);
3776 }
f9449ab7 3777 }
b4c1df93
SP
3778
3779 if (!old_vfs) {
3780 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3781 if (status) {
3782 dev_err(dev, "SRIOV enable failed\n");
3783 adapter->num_vfs = 0;
3784 goto err;
3785 }
3786 }
f174c7ec
VV
3787
3788 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3789 return 0;
3790err:
4c876616
SP
3791 dev_err(dev, "VF setup failed\n");
3792 be_vf_clear(adapter);
f9449ab7
SP
3793 return status;
3794}
3795
f93f160b
VV
3796/* Converting function_mode bits on BE3 to SH mc_type enums */
3797
3798static u8 be_convert_mc_type(u32 function_mode)
3799{
66064dbc 3800 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3801 return vNIC1;
66064dbc 3802 else if (function_mode & QNQ_MODE)
f93f160b
VV
3803 return FLEX10;
3804 else if (function_mode & VNIC_MODE)
3805 return vNIC2;
3806 else if (function_mode & UMC_ENABLED)
3807 return UMC;
3808 else
3809 return MC_NONE;
3810}
3811
92bf14ab
SP
3812/* On BE2/BE3 FW does not suggest the supported limits */
3813static void BEx_get_resources(struct be_adapter *adapter,
3814 struct be_resources *res)
3815{
bec84e6b 3816 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3817
3818 if (be_physfn(adapter))
3819 res->max_uc_mac = BE_UC_PMAC_COUNT;
3820 else
3821 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3822
f93f160b
VV
3823 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3824
3825 if (be_is_mc(adapter)) {
3826 /* Assuming that there are 4 channels per port,
3827 * when multi-channel is enabled
3828 */
3829 if (be_is_qnq_mode(adapter))
3830 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3831 else
3832 /* In a non-qnq multichannel mode, the pvid
3833 * takes up one vlan entry
3834 */
3835 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3836 } else {
92bf14ab 3837 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3838 }
3839
92bf14ab
SP
3840 res->max_mcast_mac = BE_MAX_MC;
3841
a5243dab
VV
3842 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3843 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3844 * *only* if it is RSS-capable.
3845 */
3846 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
3847 be_virtfn(adapter) ||
3848 (be_is_mc(adapter) &&
3849 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3850 res->max_tx_qs = 1;
a28277dc
SR
3851 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3852 struct be_resources super_nic_res = {0};
3853
3854 /* On a SuperNIC profile, the driver needs to use the
3855 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3856 */
f2858738
VV
3857 be_cmd_get_profile_config(adapter, &super_nic_res,
3858 RESOURCE_LIMITS, 0);
a28277dc
SR
3859 /* Some old versions of BE3 FW don't report max_tx_qs value */
3860 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3861 } else {
92bf14ab 3862 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3863 }
92bf14ab
SP
3864
3865 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3866 !use_sriov && be_physfn(adapter))
3867 res->max_rss_qs = (adapter->be3_native) ?
3868 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3869 res->max_rx_qs = res->max_rss_qs + 1;
3870
e3dc867c 3871 if (be_physfn(adapter))
d3518e21 3872 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3873 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3874 else
3875 res->max_evt_qs = 1;
92bf14ab
SP
3876
3877 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 3878 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
3879 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3880 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3881}
3882
30128031
SP
3883static void be_setup_init(struct be_adapter *adapter)
3884{
3885 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3886 adapter->phy.link_speed = -1;
30128031
SP
3887 adapter->if_handle = -1;
3888 adapter->be3_native = false;
f66b7cfd 3889 adapter->if_flags = 0;
f25b119c
PR
3890 if (be_physfn(adapter))
3891 adapter->cmd_privileges = MAX_PRIVILEGES;
3892 else
3893 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3894}
3895
bec84e6b
VV
3896static int be_get_sriov_config(struct be_adapter *adapter)
3897{
bec84e6b 3898 struct be_resources res = {0};
d3d18312 3899 int max_vfs, old_vfs;
bec84e6b 3900
f2858738 3901 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 3902
ace40aff 3903 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
3904 if (BE3_chip(adapter) && !res.max_vfs) {
3905 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3906 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3907 }
3908
d3d18312 3909 adapter->pool_res = res;
bec84e6b 3910
ace40aff
VV
3911 /* If during previous unload of the driver, the VFs were not disabled,
3912 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3913 * Instead use the TotalVFs value stored in the pci-dev struct.
3914 */
bec84e6b
VV
3915 old_vfs = pci_num_vf(adapter->pdev);
3916 if (old_vfs) {
ace40aff
VV
3917 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3918 old_vfs);
3919
3920 adapter->pool_res.max_vfs =
3921 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 3922 adapter->num_vfs = old_vfs;
bec84e6b
VV
3923 }
3924
3925 return 0;
3926}
3927
ace40aff
VV
3928static void be_alloc_sriov_res(struct be_adapter *adapter)
3929{
3930 int old_vfs = pci_num_vf(adapter->pdev);
3931 u16 num_vf_qs;
3932 int status;
3933
3934 be_get_sriov_config(adapter);
3935
3936 if (!old_vfs)
3937 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3938
3939 /* When the HW is in SRIOV capable configuration, the PF-pool
3940 * resources are given to PF during driver load, if there are no
3941 * old VFs. This facility is not available in BE3 FW.
3942 * Also, this is done by FW in Lancer chip.
3943 */
3944 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3945 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3946 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3947 num_vf_qs);
3948 if (status)
3949 dev_err(&adapter->pdev->dev,
3950 "Failed to optimize SRIOV resources\n");
3951 }
3952}
3953
92bf14ab 3954static int be_get_resources(struct be_adapter *adapter)
abb93951 3955{
92bf14ab
SP
3956 struct device *dev = &adapter->pdev->dev;
3957 struct be_resources res = {0};
3958 int status;
abb93951 3959
92bf14ab
SP
3960 if (BEx_chip(adapter)) {
3961 BEx_get_resources(adapter, &res);
3962 adapter->res = res;
abb93951
PR
3963 }
3964
92bf14ab
SP
3965 /* For Lancer, SH etc read per-function resource limits from FW.
3966 * GET_FUNC_CONFIG returns per function guaranteed limits.
3967 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3968 */
3969 if (!BEx_chip(adapter)) {
3970 status = be_cmd_get_func_config(adapter, &res);
3971 if (status)
3972 return status;
abb93951 3973
71bb8bd0
VV
3974 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3975 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3976 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3977 res.max_rss_qs -= 1;
3978
92bf14ab
SP
3979 /* If RoCE may be enabled stash away half the EQs for RoCE */
3980 if (be_roce_supported(adapter))
3981 res.max_evt_qs /= 2;
3982 adapter->res = res;
abb93951 3983 }
4c876616 3984
71bb8bd0
VV
3985 /* If FW supports RSS default queue, then skip creating non-RSS
3986 * queue for non-IP traffic.
3987 */
3988 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3989 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3990
acbafeb1
SP
3991 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3992 be_max_txqs(adapter), be_max_rxqs(adapter),
3993 be_max_rss(adapter), be_max_eqs(adapter),
3994 be_max_vfs(adapter));
3995 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3996 be_max_uc(adapter), be_max_mc(adapter),
3997 be_max_vlans(adapter));
3998
ace40aff
VV
3999 /* Sanitize cfg_num_qs based on HW and platform limits */
4000 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4001 be_max_qs(adapter));
92bf14ab 4002 return 0;
abb93951
PR
4003}
4004
39f1d94d
SP
4005static int be_get_config(struct be_adapter *adapter)
4006{
6b085ba9 4007 int status, level;
542963b7 4008 u16 profile_id;
6b085ba9
SP
4009
4010 status = be_cmd_get_cntl_attributes(adapter);
4011 if (status)
4012 return status;
39f1d94d 4013
e97e3cda 4014 status = be_cmd_query_fw_cfg(adapter);
abb93951 4015 if (status)
92bf14ab 4016 return status;
abb93951 4017
6b085ba9
SP
4018 if (BEx_chip(adapter)) {
4019 level = be_cmd_get_fw_log_level(adapter);
4020 adapter->msg_enable =
4021 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4022 }
4023
4024 be_cmd_get_acpi_wol_cap(adapter);
4025
21252377
VV
4026 be_cmd_query_port_name(adapter);
4027
4028 if (be_physfn(adapter)) {
542963b7
VV
4029 status = be_cmd_get_active_profile(adapter, &profile_id);
4030 if (!status)
4031 dev_info(&adapter->pdev->dev,
4032 "Using profile 0x%x\n", profile_id);
962bcb75 4033 }
bec84e6b 4034
92bf14ab
SP
4035 status = be_get_resources(adapter);
4036 if (status)
4037 return status;
abb93951 4038
46ee9c14
RN
4039 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4040 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
4041 if (!adapter->pmac_id)
4042 return -ENOMEM;
abb93951 4043
92bf14ab 4044 return 0;
39f1d94d
SP
4045}
4046
95046b92
SP
4047static int be_mac_setup(struct be_adapter *adapter)
4048{
4049 u8 mac[ETH_ALEN];
4050 int status;
4051
4052 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4053 status = be_cmd_get_perm_mac(adapter, mac);
4054 if (status)
4055 return status;
4056
4057 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4058 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4059 } else {
4060 /* Maybe the HW was reset; dev_addr must be re-programmed */
4061 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4062 }
4063
2c7a9dc1
AK
4064 /* For BE3-R VFs, the PF programs the initial MAC address */
4065 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4066 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4067 &adapter->pmac_id[0], 0);
95046b92
SP
4068 return 0;
4069}
4070
68d7bdcb
SP
4071static void be_schedule_worker(struct be_adapter *adapter)
4072{
4073 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4074 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4075}
4076
eb7dd46c
SP
4077static void be_schedule_err_detection(struct be_adapter *adapter)
4078{
4079 schedule_delayed_work(&adapter->be_err_detection_work,
4080 msecs_to_jiffies(1000));
4081 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4082}
4083
7707133c 4084static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4085{
68d7bdcb 4086 struct net_device *netdev = adapter->netdev;
10ef9ab4 4087 int status;
ba343c77 4088
7707133c 4089 status = be_evt_queues_create(adapter);
abb93951
PR
4090 if (status)
4091 goto err;
73d540f2 4092
7707133c 4093 status = be_tx_qs_create(adapter);
c2bba3df
SK
4094 if (status)
4095 goto err;
10ef9ab4 4096
7707133c 4097 status = be_rx_cqs_create(adapter);
10ef9ab4 4098 if (status)
a54769f5 4099 goto err;
6b7c5b94 4100
7707133c 4101 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4102 if (status)
4103 goto err;
4104
68d7bdcb
SP
4105 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4106 if (status)
4107 goto err;
4108
4109 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4110 if (status)
4111 goto err;
4112
7707133c
SP
4113 return 0;
4114err:
4115 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4116 return status;
4117}
4118
68d7bdcb
SP
4119int be_update_queues(struct be_adapter *adapter)
4120{
4121 struct net_device *netdev = adapter->netdev;
4122 int status;
4123
4124 if (netif_running(netdev))
4125 be_close(netdev);
4126
4127 be_cancel_worker(adapter);
4128
4129 /* If any vectors have been shared with RoCE we cannot re-program
4130 * the MSIx table.
4131 */
4132 if (!adapter->num_msix_roce_vec)
4133 be_msix_disable(adapter);
4134
4135 be_clear_queues(adapter);
4136
4137 if (!msix_enabled(adapter)) {
4138 status = be_msix_enable(adapter);
4139 if (status)
4140 return status;
4141 }
4142
4143 status = be_setup_queues(adapter);
4144 if (status)
4145 return status;
4146
4147 be_schedule_worker(adapter);
4148
4149 if (netif_running(netdev))
4150 status = be_open(netdev);
4151
4152 return status;
4153}
4154
f7062ee5
SP
4155static inline int fw_major_num(const char *fw_ver)
4156{
4157 int fw_major = 0, i;
4158
4159 i = sscanf(fw_ver, "%d.", &fw_major);
4160 if (i != 1)
4161 return 0;
4162
4163 return fw_major;
4164}
4165
f962f840
SP
4166/* If any VFs are already enabled don't FLR the PF */
4167static bool be_reset_required(struct be_adapter *adapter)
4168{
4169 return pci_num_vf(adapter->pdev) ? false : true;
4170}
4171
4172/* Wait for the FW to be ready and perform the required initialization */
4173static int be_func_init(struct be_adapter *adapter)
4174{
4175 int status;
4176
4177 status = be_fw_wait_ready(adapter);
4178 if (status)
4179 return status;
4180
4181 if (be_reset_required(adapter)) {
4182 status = be_cmd_reset_function(adapter);
4183 if (status)
4184 return status;
4185
4186 /* Wait for interrupts to quiesce after an FLR */
4187 msleep(100);
4188
4189 /* We can clear all errors when function reset succeeds */
954f6825 4190 be_clear_error(adapter, BE_CLEAR_ALL);
f962f840
SP
4191 }
4192
4193 /* Tell FW we're ready to fire cmds */
4194 status = be_cmd_fw_init(adapter);
4195 if (status)
4196 return status;
4197
4198 /* Allow interrupts for other ULPs running on NIC function */
4199 be_intr_set(adapter, true);
4200
4201 return 0;
4202}
4203
7707133c
SP
4204static int be_setup(struct be_adapter *adapter)
4205{
4206 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4207 int status;
4208
f962f840
SP
4209 status = be_func_init(adapter);
4210 if (status)
4211 return status;
4212
7707133c
SP
4213 be_setup_init(adapter);
4214
4215 if (!lancer_chip(adapter))
4216 be_cmd_req_native_mode(adapter);
4217
ace40aff
VV
4218 if (!BE2_chip(adapter) && be_physfn(adapter))
4219 be_alloc_sriov_res(adapter);
4220
7707133c 4221 status = be_get_config(adapter);
10ef9ab4 4222 if (status)
a54769f5 4223 goto err;
6b7c5b94 4224
7707133c 4225 status = be_msix_enable(adapter);
10ef9ab4 4226 if (status)
a54769f5 4227 goto err;
6b7c5b94 4228
0700d816
KA
4229 status = be_if_create(adapter, &adapter->if_handle,
4230 be_if_cap_flags(adapter), 0);
7707133c 4231 if (status)
a54769f5 4232 goto err;
6b7c5b94 4233
68d7bdcb
SP
4234 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4235 rtnl_lock();
7707133c 4236 status = be_setup_queues(adapter);
68d7bdcb 4237 rtnl_unlock();
95046b92 4238 if (status)
1578e777
PR
4239 goto err;
4240
7707133c 4241 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4242
4243 status = be_mac_setup(adapter);
10ef9ab4
SP
4244 if (status)
4245 goto err;
4246
e97e3cda 4247 be_cmd_get_fw_ver(adapter);
acbafeb1 4248 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4249
e9e2a904 4250 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4251 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4252 adapter->fw_ver);
4253 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4254 }
4255
1d1e9a46 4256 if (adapter->vlans_added)
10329df8 4257 be_vid_config(adapter);
7ab8b0b4 4258
a54769f5 4259 be_set_rx_mode(adapter->netdev);
5fb379ee 4260
00d594c3
KA
4261 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4262 adapter->rx_fc);
4263 if (status)
4264 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4265 &adapter->rx_fc);
590c391d 4266
00d594c3
KA
4267 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4268 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4269
bdce2ad7
SR
4270 if (be_physfn(adapter))
4271 be_cmd_set_logical_link_config(adapter,
4272 IFLA_VF_LINK_STATE_AUTO, 0);
4273
bec84e6b
VV
4274 if (adapter->num_vfs)
4275 be_vf_setup(adapter);
f9449ab7 4276
f25b119c
PR
4277 status = be_cmd_get_phy_info(adapter);
4278 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4279 adapter->phy.fc_autoneg = 1;
4280
68d7bdcb 4281 be_schedule_worker(adapter);
e1ad8e33 4282 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4283 return 0;
a54769f5
SP
4284err:
4285 be_clear(adapter);
4286 return status;
4287}
6b7c5b94 4288
66268739
IV
4289#ifdef CONFIG_NET_POLL_CONTROLLER
4290static void be_netpoll(struct net_device *netdev)
4291{
4292 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4293 struct be_eq_obj *eqo;
66268739
IV
4294 int i;
4295
e49cc34f 4296 for_all_evt_queues(adapter, eqo, i) {
20947770 4297 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4298 napi_schedule(&eqo->napi);
4299 }
66268739
IV
4300}
4301#endif
4302
96c9b2e4 4303static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4304
306f1348
SP
4305static bool phy_flashing_required(struct be_adapter *adapter)
4306{
e02cfd96 4307 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4308 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4309}
4310
c165541e
PR
4311static bool is_comp_in_ufi(struct be_adapter *adapter,
4312 struct flash_section_info *fsec, int type)
4313{
4314 int i = 0, img_type = 0;
4315 struct flash_section_info_g2 *fsec_g2 = NULL;
4316
ca34fe38 4317 if (BE2_chip(adapter))
c165541e
PR
4318 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4319
4320 for (i = 0; i < MAX_FLASH_COMP; i++) {
4321 if (fsec_g2)
4322 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4323 else
4324 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4325
4326 if (img_type == type)
4327 return true;
4328 }
4329 return false;
4330
4331}
4332
4188e7df 4333static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4334 int header_size,
4335 const struct firmware *fw)
c165541e
PR
4336{
4337 struct flash_section_info *fsec = NULL;
4338 const u8 *p = fw->data;
4339
4340 p += header_size;
4341 while (p < (fw->data + fw->size)) {
4342 fsec = (struct flash_section_info *)p;
4343 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4344 return fsec;
4345 p += 32;
4346 }
4347 return NULL;
4348}
4349
96c9b2e4
VV
4350static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4351 u32 img_offset, u32 img_size, int hdr_size,
4352 u16 img_optype, bool *crc_match)
4353{
4354 u32 crc_offset;
4355 int status;
4356 u8 crc[4];
4357
70a7b525
VV
4358 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4359 img_size - 4);
96c9b2e4
VV
4360 if (status)
4361 return status;
4362
4363 crc_offset = hdr_size + img_offset + img_size - 4;
4364
4365 /* Skip flashing, if crc of flashed region matches */
4366 if (!memcmp(crc, p + crc_offset, 4))
4367 *crc_match = true;
4368 else
4369 *crc_match = false;
4370
4371 return status;
4372}
4373
773a2d7c 4374static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4375 struct be_dma_mem *flash_cmd, int optype, int img_size,
4376 u32 img_offset)
773a2d7c 4377{
70a7b525 4378 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4379 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4380 int status;
773a2d7c 4381
773a2d7c
PR
4382 while (total_bytes) {
4383 num_bytes = min_t(u32, 32*1024, total_bytes);
4384
4385 total_bytes -= num_bytes;
4386
4387 if (!total_bytes) {
4388 if (optype == OPTYPE_PHY_FW)
4389 flash_op = FLASHROM_OPER_PHY_FLASH;
4390 else
4391 flash_op = FLASHROM_OPER_FLASH;
4392 } else {
4393 if (optype == OPTYPE_PHY_FW)
4394 flash_op = FLASHROM_OPER_PHY_SAVE;
4395 else
4396 flash_op = FLASHROM_OPER_SAVE;
4397 }
4398
be716446 4399 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4400 img += num_bytes;
4401 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4402 flash_op, img_offset +
4403 bytes_sent, num_bytes);
4c60005f 4404 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4405 optype == OPTYPE_PHY_FW)
4406 break;
4407 else if (status)
773a2d7c 4408 return status;
70a7b525
VV
4409
4410 bytes_sent += num_bytes;
773a2d7c
PR
4411 }
4412 return 0;
4413}
4414
0ad3157e 4415/* For BE2, BE3 and BE3-R */
ca34fe38 4416static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4417 const struct firmware *fw,
4418 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4419{
c165541e 4420 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4421 struct device *dev = &adapter->pdev->dev;
c165541e 4422 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4423 int status, i, filehdr_size, num_comp;
4424 const struct flash_comp *pflashcomp;
4425 bool crc_match;
4426 const u8 *p;
c165541e
PR
4427
4428 struct flash_comp gen3_flash_types[] = {
4429 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4430 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4431 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4432 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4433 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4434 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4435 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4436 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4437 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4438 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4439 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4440 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4441 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4442 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4443 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4444 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4445 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4446 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4447 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4448 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4449 };
c165541e
PR
4450
4451 struct flash_comp gen2_flash_types[] = {
4452 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4453 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4454 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4455 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4456 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4457 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4458 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4459 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4460 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4461 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4462 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4463 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4464 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4465 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4466 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4467 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4468 };
4469
ca34fe38 4470 if (BE3_chip(adapter)) {
3f0d4560
AK
4471 pflashcomp = gen3_flash_types;
4472 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4473 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4474 } else {
4475 pflashcomp = gen2_flash_types;
4476 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4477 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4478 img_hdrs_size = 0;
84517482 4479 }
ca34fe38 4480
c165541e
PR
4481 /* Get flash section info*/
4482 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4483 if (!fsec) {
96c9b2e4 4484 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4485 return -1;
4486 }
9fe96934 4487 for (i = 0; i < num_comp; i++) {
c165541e 4488 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4489 continue;
c165541e
PR
4490
4491 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4492 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4493 continue;
4494
773a2d7c
PR
4495 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4496 !phy_flashing_required(adapter))
306f1348 4497 continue;
c165541e 4498
773a2d7c 4499 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4500 status = be_check_flash_crc(adapter, fw->data,
4501 pflashcomp[i].offset,
4502 pflashcomp[i].size,
4503 filehdr_size +
4504 img_hdrs_size,
4505 OPTYPE_REDBOOT, &crc_match);
4506 if (status) {
4507 dev_err(dev,
4508 "Could not get CRC for 0x%x region\n",
4509 pflashcomp[i].optype);
4510 continue;
4511 }
4512
4513 if (crc_match)
773a2d7c
PR
4514 continue;
4515 }
c165541e 4516
96c9b2e4
VV
4517 p = fw->data + filehdr_size + pflashcomp[i].offset +
4518 img_hdrs_size;
306f1348
SP
4519 if (p + pflashcomp[i].size > fw->data + fw->size)
4520 return -1;
773a2d7c
PR
4521
4522 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4523 pflashcomp[i].size, 0);
773a2d7c 4524 if (status) {
96c9b2e4 4525 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4526 pflashcomp[i].img_type);
4527 return status;
84517482 4528 }
84517482 4529 }
84517482
AK
4530 return 0;
4531}
4532
96c9b2e4
VV
4533static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4534{
4535 u32 img_type = le32_to_cpu(fsec_entry.type);
4536 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4537
4538 if (img_optype != 0xFFFF)
4539 return img_optype;
4540
4541 switch (img_type) {
4542 case IMAGE_FIRMWARE_iSCSI:
4543 img_optype = OPTYPE_ISCSI_ACTIVE;
4544 break;
4545 case IMAGE_BOOT_CODE:
4546 img_optype = OPTYPE_REDBOOT;
4547 break;
4548 case IMAGE_OPTION_ROM_ISCSI:
4549 img_optype = OPTYPE_BIOS;
4550 break;
4551 case IMAGE_OPTION_ROM_PXE:
4552 img_optype = OPTYPE_PXE_BIOS;
4553 break;
4554 case IMAGE_OPTION_ROM_FCoE:
4555 img_optype = OPTYPE_FCOE_BIOS;
4556 break;
4557 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4558 img_optype = OPTYPE_ISCSI_BACKUP;
4559 break;
4560 case IMAGE_NCSI:
4561 img_optype = OPTYPE_NCSI_FW;
4562 break;
4563 case IMAGE_FLASHISM_JUMPVECTOR:
4564 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4565 break;
4566 case IMAGE_FIRMWARE_PHY:
4567 img_optype = OPTYPE_SH_PHY_FW;
4568 break;
4569 case IMAGE_REDBOOT_DIR:
4570 img_optype = OPTYPE_REDBOOT_DIR;
4571 break;
4572 case IMAGE_REDBOOT_CONFIG:
4573 img_optype = OPTYPE_REDBOOT_CONFIG;
4574 break;
4575 case IMAGE_UFI_DIR:
4576 img_optype = OPTYPE_UFI_DIR;
4577 break;
4578 default:
4579 break;
4580 }
4581
4582 return img_optype;
4583}
4584
773a2d7c 4585static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4586 const struct firmware *fw,
4587 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4588{
773a2d7c 4589 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4590 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4591 struct device *dev = &adapter->pdev->dev;
773a2d7c 4592 struct flash_section_info *fsec = NULL;
96c9b2e4 4593 u32 img_offset, img_size, img_type;
70a7b525 4594 u16 img_optype, flash_optype;
96c9b2e4 4595 int status, i, filehdr_size;
96c9b2e4 4596 const u8 *p;
773a2d7c
PR
4597
4598 filehdr_size = sizeof(struct flash_file_hdr_g3);
4599 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4600 if (!fsec) {
96c9b2e4 4601 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4602 return -EINVAL;
773a2d7c
PR
4603 }
4604
70a7b525 4605retry_flash:
773a2d7c
PR
4606 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4607 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4608 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4609 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4610 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4611 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4612
96c9b2e4 4613 if (img_optype == 0xFFFF)
773a2d7c 4614 continue;
70a7b525
VV
4615
4616 if (flash_offset_support)
4617 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4618 else
4619 flash_optype = img_optype;
4620
96c9b2e4
VV
4621 /* Don't bother verifying CRC if an old FW image is being
4622 * flashed
4623 */
4624 if (old_fw_img)
4625 goto flash;
4626
4627 status = be_check_flash_crc(adapter, fw->data, img_offset,
4628 img_size, filehdr_size +
70a7b525 4629 img_hdrs_size, flash_optype,
96c9b2e4 4630 &crc_match);
4c60005f
KA
4631 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4632 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4633 /* The current FW image on the card does not support
4634 * OFFSET based flashing. Retry using older mechanism
4635 * of OPTYPE based flashing
4636 */
4637 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4638 flash_offset_support = false;
4639 goto retry_flash;
4640 }
4641
4642 /* The current FW image on the card does not recognize
4643 * the new FLASH op_type. The FW download is partially
4644 * complete. Reboot the server now to enable FW image
4645 * to recognize the new FLASH op_type. To complete the
4646 * remaining process, download the same FW again after
4647 * the reboot.
4648 */
96c9b2e4
VV
4649 dev_err(dev, "Flash incomplete. Reset the server\n");
4650 dev_err(dev, "Download FW image again after reset\n");
4651 return -EAGAIN;
4652 } else if (status) {
4653 dev_err(dev, "Could not get CRC for 0x%x region\n",
4654 img_optype);
4655 return -EFAULT;
773a2d7c
PR
4656 }
4657
96c9b2e4
VV
4658 if (crc_match)
4659 continue;
773a2d7c 4660
96c9b2e4
VV
4661flash:
4662 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4663 if (p + img_size > fw->data + fw->size)
4664 return -1;
4665
70a7b525
VV
4666 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4667 img_offset);
4668
4669 /* The current FW image on the card does not support OFFSET
4670 * based flashing. Retry using older mechanism of OPTYPE based
4671 * flashing
4672 */
4673 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4674 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4675 flash_offset_support = false;
4676 goto retry_flash;
4677 }
4678
96c9b2e4
VV
4679 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4680 * UFI_DIR region
4681 */
4c60005f
KA
4682 if (old_fw_img &&
4683 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4684 (img_optype == OPTYPE_UFI_DIR &&
4685 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4686 continue;
4687 } else if (status) {
4688 dev_err(dev, "Flashing section type 0x%x failed\n",
4689 img_type);
4690 return -EFAULT;
773a2d7c
PR
4691 }
4692 }
4693 return 0;
3f0d4560
AK
4694}
4695
485bf569 4696static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4697 const struct firmware *fw)
84517482 4698{
485bf569
SN
4699#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4700#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4701 struct device *dev = &adapter->pdev->dev;
84517482 4702 struct be_dma_mem flash_cmd;
485bf569
SN
4703 const u8 *data_ptr = NULL;
4704 u8 *dest_image_ptr = NULL;
4705 size_t image_size = 0;
4706 u32 chunk_size = 0;
4707 u32 data_written = 0;
4708 u32 offset = 0;
4709 int status = 0;
4710 u8 add_status = 0;
f67ef7ba 4711 u8 change_status;
84517482 4712
485bf569 4713 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4714 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4715 return -EINVAL;
d9efd2af
SB
4716 }
4717
485bf569
SN
4718 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4719 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4720 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4721 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4722 if (!flash_cmd.va)
4723 return -ENOMEM;
84517482 4724
485bf569
SN
4725 dest_image_ptr = flash_cmd.va +
4726 sizeof(struct lancer_cmd_req_write_object);
4727 image_size = fw->size;
4728 data_ptr = fw->data;
4729
4730 while (image_size) {
4731 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4732
4733 /* Copy the image chunk content. */
4734 memcpy(dest_image_ptr, data_ptr, chunk_size);
4735
4736 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4737 chunk_size, offset,
4738 LANCER_FW_DOWNLOAD_LOCATION,
4739 &data_written, &change_status,
4740 &add_status);
485bf569
SN
4741 if (status)
4742 break;
4743
4744 offset += data_written;
4745 data_ptr += data_written;
4746 image_size -= data_written;
4747 }
4748
4749 if (!status) {
4750 /* Commit the FW written */
4751 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4752 0, offset,
4753 LANCER_FW_DOWNLOAD_LOCATION,
4754 &data_written, &change_status,
4755 &add_status);
485bf569
SN
4756 }
4757
bb864e07 4758 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4759 if (status) {
bb864e07 4760 dev_err(dev, "Firmware load error\n");
3fb8cb80 4761 return be_cmd_status(status);
485bf569
SN
4762 }
4763
bb864e07
KA
4764 dev_info(dev, "Firmware flashed successfully\n");
4765
f67ef7ba 4766 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4767 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4768 status = lancer_physdev_ctrl(adapter,
4769 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4770 if (status) {
bb864e07
KA
4771 dev_err(dev, "Adapter busy, could not reset FW\n");
4772 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4773 }
4774 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4775 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4776 }
3fb8cb80
KA
4777
4778 return 0;
485bf569
SN
4779}
4780
a6e6ff6e
VV
4781/* Check if the flash image file is compatible with the adapter that
4782 * is being flashed.
4783 */
4784static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4785 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4786{
5d3acd0d
VV
4787 if (!fhdr) {
4788 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4789 return -1;
4790 }
773a2d7c 4791
5d3acd0d
VV
4792 /* First letter of the build version is used to identify
4793 * which chip this image file is meant for.
4794 */
4795 switch (fhdr->build[0]) {
4796 case BLD_STR_UFI_TYPE_SH:
a6e6ff6e
VV
4797 if (!skyhawk_chip(adapter))
4798 return false;
4799 break;
5d3acd0d 4800 case BLD_STR_UFI_TYPE_BE3:
a6e6ff6e
VV
4801 if (!BE3_chip(adapter))
4802 return false;
4803 break;
5d3acd0d 4804 case BLD_STR_UFI_TYPE_BE2:
a6e6ff6e
VV
4805 if (!BE2_chip(adapter))
4806 return false;
4807 break;
5d3acd0d
VV
4808 default:
4809 return false;
4810 }
a6e6ff6e
VV
4811
4812 return (fhdr->asic_type_rev >= adapter->asic_rev);
773a2d7c
PR
4813}
4814
485bf569
SN
4815static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4816{
5d3acd0d 4817 struct device *dev = &adapter->pdev->dev;
485bf569 4818 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4819 struct image_hdr *img_hdr_ptr;
4820 int status = 0, i, num_imgs;
485bf569 4821 struct be_dma_mem flash_cmd;
84517482 4822
5d3acd0d
VV
4823 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4824 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4825 dev_err(dev, "Flash image is not compatible with adapter\n");
4826 return -EINVAL;
84517482
AK
4827 }
4828
5d3acd0d
VV
4829 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4830 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4831 GFP_KERNEL);
4832 if (!flash_cmd.va)
4833 return -ENOMEM;
773a2d7c 4834
773a2d7c
PR
4835 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4836 for (i = 0; i < num_imgs; i++) {
4837 img_hdr_ptr = (struct image_hdr *)(fw->data +
4838 (sizeof(struct flash_file_hdr_g3) +
4839 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4840 if (!BE2_chip(adapter) &&
4841 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4842 continue;
84517482 4843
5d3acd0d
VV
4844 if (skyhawk_chip(adapter))
4845 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4846 num_imgs);
4847 else
4848 status = be_flash_BEx(adapter, fw, &flash_cmd,
4849 num_imgs);
84517482
AK
4850 }
4851
5d3acd0d
VV
4852 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4853 if (!status)
4854 dev_info(dev, "Firmware flashed successfully\n");
84517482 4855
485bf569
SN
4856 return status;
4857}
4858
4859int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4860{
4861 const struct firmware *fw;
4862 int status;
4863
4864 if (!netif_running(adapter->netdev)) {
4865 dev_err(&adapter->pdev->dev,
4866 "Firmware load not allowed (interface is down)\n");
940a3fcd 4867 return -ENETDOWN;
485bf569
SN
4868 }
4869
4870 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4871 if (status)
4872 goto fw_exit;
4873
4874 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4875
4876 if (lancer_chip(adapter))
4877 status = lancer_fw_download(adapter, fw);
4878 else
4879 status = be_fw_download(adapter, fw);
4880
eeb65ced 4881 if (!status)
e97e3cda 4882 be_cmd_get_fw_ver(adapter);
eeb65ced 4883
84517482
AK
4884fw_exit:
4885 release_firmware(fw);
4886 return status;
4887}
4888
add511b3
RP
4889static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4890 u16 flags)
a77dcb8c
AK
4891{
4892 struct be_adapter *adapter = netdev_priv(dev);
4893 struct nlattr *attr, *br_spec;
4894 int rem;
4895 int status = 0;
4896 u16 mode = 0;
4897
4898 if (!sriov_enabled(adapter))
4899 return -EOPNOTSUPP;
4900
4901 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4902 if (!br_spec)
4903 return -EINVAL;
a77dcb8c
AK
4904
4905 nla_for_each_nested(attr, br_spec, rem) {
4906 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4907 continue;
4908
b7c1a314
TG
4909 if (nla_len(attr) < sizeof(mode))
4910 return -EINVAL;
4911
a77dcb8c
AK
4912 mode = nla_get_u16(attr);
4913 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4914 return -EINVAL;
4915
4916 status = be_cmd_set_hsw_config(adapter, 0, 0,
4917 adapter->if_handle,
4918 mode == BRIDGE_MODE_VEPA ?
4919 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4920 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4921 if (status)
4922 goto err;
4923
4924 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4925 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4926
4927 return status;
4928 }
4929err:
4930 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4931 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4932
4933 return status;
4934}
4935
4936static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4937 struct net_device *dev, u32 filter_mask,
4938 int nlflags)
a77dcb8c
AK
4939{
4940 struct be_adapter *adapter = netdev_priv(dev);
4941 int status = 0;
4942 u8 hsw_mode;
4943
4944 if (!sriov_enabled(adapter))
4945 return 0;
4946
4947 /* BE and Lancer chips support VEB mode only */
4948 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4949 hsw_mode = PORT_FWD_TYPE_VEB;
4950 } else {
4951 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4952 adapter->if_handle, &hsw_mode,
4953 NULL);
a77dcb8c
AK
4954 if (status)
4955 return 0;
4956 }
4957
4958 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4959 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4960 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
46c264da 4961 0, 0, nlflags);
a77dcb8c
AK
4962}
4963
c5abe7c0 4964#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4965/* VxLAN offload Notes:
4966 *
4967 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4968 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4969 * is expected to work across all types of IP tunnels once exported. Skyhawk
4970 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4971 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4972 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4973 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4974 *
4975 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4976 * adds more than one port, disable offloads and don't re-enable them again
4977 * until after all the tunnels are removed.
4978 */
c9c47142
SP
4979static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4980 __be16 port)
4981{
4982 struct be_adapter *adapter = netdev_priv(netdev);
4983 struct device *dev = &adapter->pdev->dev;
4984 int status;
4985
4986 if (lancer_chip(adapter) || BEx_chip(adapter))
4987 return;
4988
4989 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4990 dev_info(dev,
4991 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4992 dev_info(dev, "Disabling VxLAN offloads\n");
4993 adapter->vxlan_port_count++;
4994 goto err;
c9c47142
SP
4995 }
4996
630f4b70
SB
4997 if (adapter->vxlan_port_count++ >= 1)
4998 return;
4999
c9c47142
SP
5000 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5001 OP_CONVERT_NORMAL_TO_TUNNEL);
5002 if (status) {
5003 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5004 goto err;
5005 }
5006
5007 status = be_cmd_set_vxlan_port(adapter, port);
5008 if (status) {
5009 dev_warn(dev, "Failed to add VxLAN port\n");
5010 goto err;
5011 }
5012 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5013 adapter->vxlan_port = port;
5014
630f4b70
SB
5015 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5016 NETIF_F_TSO | NETIF_F_TSO6 |
5017 NETIF_F_GSO_UDP_TUNNEL;
5018 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 5019 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 5020
c9c47142
SP
5021 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5022 be16_to_cpu(port));
5023 return;
5024err:
5025 be_disable_vxlan_offloads(adapter);
c9c47142
SP
5026}
5027
5028static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5029 __be16 port)
5030{
5031 struct be_adapter *adapter = netdev_priv(netdev);
5032
5033 if (lancer_chip(adapter) || BEx_chip(adapter))
5034 return;
5035
5036 if (adapter->vxlan_port != port)
630f4b70 5037 goto done;
c9c47142
SP
5038
5039 be_disable_vxlan_offloads(adapter);
5040
5041 dev_info(&adapter->pdev->dev,
5042 "Disabled VxLAN offloads for UDP port %d\n",
5043 be16_to_cpu(port));
630f4b70
SB
5044done:
5045 adapter->vxlan_port_count--;
c9c47142 5046}
725d548f 5047
5f35227e
JG
5048static netdev_features_t be_features_check(struct sk_buff *skb,
5049 struct net_device *dev,
5050 netdev_features_t features)
725d548f 5051{
16dde0d6
SB
5052 struct be_adapter *adapter = netdev_priv(dev);
5053 u8 l4_hdr = 0;
5054
5055 /* The code below restricts offload features for some tunneled packets.
5056 * Offload features for normal (non tunnel) packets are unchanged.
5057 */
5058 if (!skb->encapsulation ||
5059 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5060 return features;
5061
5062 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5063 * should disable tunnel offload features if it's not a VxLAN packet,
5064 * as tunnel offloads have been enabled only for VxLAN. This is done to
5065 * allow other tunneled traffic like GRE work fine while VxLAN
5066 * offloads are configured in Skyhawk-R.
5067 */
5068 switch (vlan_get_protocol(skb)) {
5069 case htons(ETH_P_IP):
5070 l4_hdr = ip_hdr(skb)->protocol;
5071 break;
5072 case htons(ETH_P_IPV6):
5073 l4_hdr = ipv6_hdr(skb)->nexthdr;
5074 break;
5075 default:
5076 return features;
5077 }
5078
5079 if (l4_hdr != IPPROTO_UDP ||
5080 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5081 skb->inner_protocol != htons(ETH_P_TEB) ||
5082 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5083 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5084 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5085
5086 return features;
725d548f 5087}
c5abe7c0 5088#endif
c9c47142 5089
e5686ad8 5090static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5091 .ndo_open = be_open,
5092 .ndo_stop = be_close,
5093 .ndo_start_xmit = be_xmit,
a54769f5 5094 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5095 .ndo_set_mac_address = be_mac_addr_set,
5096 .ndo_change_mtu = be_change_mtu,
ab1594e9 5097 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5098 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5099 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5100 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5101 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5102 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5103 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5104 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5105 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5106 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5107#ifdef CONFIG_NET_POLL_CONTROLLER
5108 .ndo_poll_controller = be_netpoll,
5109#endif
a77dcb8c
AK
5110 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5111 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5112#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5113 .ndo_busy_poll = be_busy_poll,
6384a4d0 5114#endif
c5abe7c0 5115#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
5116 .ndo_add_vxlan_port = be_add_vxlan_port,
5117 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 5118 .ndo_features_check = be_features_check,
c5abe7c0 5119#endif
6b7c5b94
SP
5120};
5121
5122static void be_netdev_init(struct net_device *netdev)
5123{
5124 struct be_adapter *adapter = netdev_priv(netdev);
5125
6332c8d3 5126 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5127 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5128 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
5129 if (be_multi_rxq(adapter))
5130 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5131
5132 netdev->features |= netdev->hw_features |
f646968f 5133 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5134
eb8a50d9 5135 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5136 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5137
fbc13f01
AK
5138 netdev->priv_flags |= IFF_UNICAST_FLT;
5139
6b7c5b94
SP
5140 netdev->flags |= IFF_MULTICAST;
5141
b7e5887e 5142 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 5143
10ef9ab4 5144 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5145
7ad24ea4 5146 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5147}
5148
87ac1a52
KA
5149static void be_cleanup(struct be_adapter *adapter)
5150{
5151 struct net_device *netdev = adapter->netdev;
5152
5153 rtnl_lock();
5154 netif_device_detach(netdev);
5155 if (netif_running(netdev))
5156 be_close(netdev);
5157 rtnl_unlock();
5158
5159 be_clear(adapter);
5160}
5161
484d76fd 5162static int be_resume(struct be_adapter *adapter)
78fad34e 5163{
d0e1b319 5164 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5165 int status;
5166
78fad34e
SP
5167 status = be_setup(adapter);
5168 if (status)
484d76fd 5169 return status;
78fad34e 5170
d0e1b319
KA
5171 if (netif_running(netdev)) {
5172 status = be_open(netdev);
78fad34e 5173 if (status)
484d76fd 5174 return status;
78fad34e
SP
5175 }
5176
d0e1b319
KA
5177 netif_device_attach(netdev);
5178
484d76fd
KA
5179 return 0;
5180}
5181
5182static int be_err_recover(struct be_adapter *adapter)
5183{
5184 struct device *dev = &adapter->pdev->dev;
5185 int status;
5186
5187 status = be_resume(adapter);
5188 if (status)
5189 goto err;
5190
9fa465c0 5191 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5192 return 0;
5193err:
9fa465c0 5194 if (be_physfn(adapter))
78fad34e 5195 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5196 else
5197 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5198
5199 return status;
5200}
5201
eb7dd46c 5202static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5203{
5204 struct be_adapter *adapter =
eb7dd46c
SP
5205 container_of(work, struct be_adapter,
5206 be_err_detection_work.work);
78fad34e
SP
5207 int status = 0;
5208
5209 be_detect_error(adapter);
5210
954f6825 5211 if (be_check_error(adapter, BE_ERROR_HW)) {
87ac1a52 5212 be_cleanup(adapter);
d0e1b319
KA
5213
5214 /* As of now error recovery support is in Lancer only */
5215 if (lancer_chip(adapter))
5216 status = be_err_recover(adapter);
78fad34e
SP
5217 }
5218
9fa465c0
SP
5219 /* Always attempt recovery on VFs */
5220 if (!status || be_virtfn(adapter))
eb7dd46c 5221 be_schedule_err_detection(adapter);
78fad34e
SP
5222}
5223
5224static void be_log_sfp_info(struct be_adapter *adapter)
5225{
5226 int status;
5227
5228 status = be_cmd_query_sfp_info(adapter);
5229 if (!status) {
5230 dev_err(&adapter->pdev->dev,
5231 "Unqualified SFP+ detected on %c from %s part no: %s",
5232 adapter->port_name, adapter->phy.vendor_name,
5233 adapter->phy.vendor_pn);
5234 }
5235 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5236}
5237
5238static void be_worker(struct work_struct *work)
5239{
5240 struct be_adapter *adapter =
5241 container_of(work, struct be_adapter, work.work);
5242 struct be_rx_obj *rxo;
5243 int i;
5244
5245 /* when interrupts are not yet enabled, just reap any pending
5246 * mcc completions
5247 */
5248 if (!netif_running(adapter->netdev)) {
5249 local_bh_disable();
5250 be_process_mcc(adapter);
5251 local_bh_enable();
5252 goto reschedule;
5253 }
5254
5255 if (!adapter->stats_cmd_sent) {
5256 if (lancer_chip(adapter))
5257 lancer_cmd_get_pport_stats(adapter,
5258 &adapter->stats_cmd);
5259 else
5260 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5261 }
5262
5263 if (be_physfn(adapter) &&
5264 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5265 be_cmd_get_die_temperature(adapter);
5266
5267 for_all_rx_queues(adapter, rxo, i) {
5268 /* Replenish RX-queues starved due to memory
5269 * allocation failures.
5270 */
5271 if (rxo->rx_post_starved)
5272 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5273 }
5274
20947770
PR
5275 /* EQ-delay update for Skyhawk is done while notifying EQ */
5276 if (!skyhawk_chip(adapter))
5277 be_eqd_update(adapter, false);
78fad34e
SP
5278
5279 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5280 be_log_sfp_info(adapter);
5281
5282reschedule:
5283 adapter->work_counter++;
5284 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5285}
5286
6b7c5b94
SP
5287static void be_unmap_pci_bars(struct be_adapter *adapter)
5288{
c5b3ad4c
SP
5289 if (adapter->csr)
5290 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5291 if (adapter->db)
ce66f781 5292 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5293}
5294
ce66f781
SP
5295static int db_bar(struct be_adapter *adapter)
5296{
18c57c74 5297 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5298 return 0;
5299 else
5300 return 4;
5301}
5302
5303static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5304{
dbf0f2a7 5305 if (skyhawk_chip(adapter)) {
ce66f781
SP
5306 adapter->roce_db.size = 4096;
5307 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5308 db_bar(adapter));
5309 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5310 db_bar(adapter));
5311 }
045508a8 5312 return 0;
6b7c5b94
SP
5313}
5314
5315static int be_map_pci_bars(struct be_adapter *adapter)
5316{
0fa74a4b 5317 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5318 u8 __iomem *addr;
78fad34e
SP
5319 u32 sli_intf;
5320
5321 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5322 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5323 SLI_INTF_FAMILY_SHIFT;
5324 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5325
c5b3ad4c 5326 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5327 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5328 if (!adapter->csr)
c5b3ad4c
SP
5329 return -ENOMEM;
5330 }
5331
25848c90 5332 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5333 if (!addr)
6b7c5b94 5334 goto pci_map_err;
ba343c77 5335 adapter->db = addr;
ce66f781 5336
25848c90
SR
5337 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5338 if (be_physfn(adapter)) {
5339 /* PCICFG is the 2nd BAR in BE2 */
5340 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5341 if (!addr)
5342 goto pci_map_err;
5343 adapter->pcicfg = addr;
5344 } else {
5345 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5346 }
5347 }
5348
ce66f781 5349 be_roce_map_pci_bars(adapter);
6b7c5b94 5350 return 0;
ce66f781 5351
6b7c5b94 5352pci_map_err:
25848c90 5353 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5354 be_unmap_pci_bars(adapter);
5355 return -ENOMEM;
5356}
5357
78fad34e 5358static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5359{
8788fdc2 5360 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5361 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5362
5363 if (mem->va)
78fad34e 5364 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5365
5b8821b7 5366 mem = &adapter->rx_filter;
e7b909a6 5367 if (mem->va)
78fad34e
SP
5368 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5369
5370 mem = &adapter->stats_cmd;
5371 if (mem->va)
5372 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5373}
5374
78fad34e
SP
5375/* Allocate and initialize various fields in be_adapter struct */
5376static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5377{
8788fdc2
SP
5378 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5379 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5380 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5381 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5382 struct device *dev = &adapter->pdev->dev;
5383 int status = 0;
6b7c5b94
SP
5384
5385 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
78fad34e 5386 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
2b7bcebf
IV
5387 &mbox_mem_alloc->dma,
5388 GFP_KERNEL);
78fad34e
SP
5389 if (!mbox_mem_alloc->va)
5390 return -ENOMEM;
5391
6b7c5b94
SP
5392 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5393 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5394 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5395 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 5396
5b8821b7 5397 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5398 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5399 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5400 if (!rx_filter->va) {
e7b909a6
SP
5401 status = -ENOMEM;
5402 goto free_mbox;
5403 }
1f9061d2 5404
78fad34e
SP
5405 if (lancer_chip(adapter))
5406 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5407 else if (BE2_chip(adapter))
5408 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5409 else if (BE3_chip(adapter))
5410 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5411 else
5412 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5413 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5414 &stats_cmd->dma, GFP_KERNEL);
5415 if (!stats_cmd->va) {
5416 status = -ENOMEM;
5417 goto free_rx_filter;
5418 }
5419
2984961c 5420 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5421 spin_lock_init(&adapter->mcc_lock);
5422 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5423 init_completion(&adapter->et_cmd_compl);
e7b909a6 5424
78fad34e 5425 pci_save_state(adapter->pdev);
6b7c5b94 5426
78fad34e 5427 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5428 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5429 be_err_detection_task);
6b7c5b94 5430
78fad34e
SP
5431 adapter->rx_fc = true;
5432 adapter->tx_fc = true;
6b7c5b94 5433
78fad34e
SP
5434 /* Must be a power of 2 or else MODULO will BUG_ON */
5435 adapter->be_get_temp_freq = 64;
ca34fe38 5436
6b7c5b94 5437 return 0;
78fad34e
SP
5438
5439free_rx_filter:
5440 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5441free_mbox:
5442 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5443 mbox_mem_alloc->dma);
5444 return status;
6b7c5b94
SP
5445}
5446
3bc6b06c 5447static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5448{
5449 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5450
6b7c5b94
SP
5451 if (!adapter)
5452 return;
5453
045508a8 5454 be_roce_dev_remove(adapter);
8cef7a78 5455 be_intr_set(adapter, false);
045508a8 5456
eb7dd46c 5457 be_cancel_err_detection(adapter);
f67ef7ba 5458
6b7c5b94
SP
5459 unregister_netdev(adapter->netdev);
5460
5fb379ee
SP
5461 be_clear(adapter);
5462
bf99e50d
PR
5463 /* tell fw we're done with firing cmds */
5464 be_cmd_fw_clean(adapter);
5465
78fad34e
SP
5466 be_unmap_pci_bars(adapter);
5467 be_drv_cleanup(adapter);
6b7c5b94 5468
d6b6d987
SP
5469 pci_disable_pcie_error_reporting(pdev);
5470
6b7c5b94
SP
5471 pci_release_regions(pdev);
5472 pci_disable_device(pdev);
5473
5474 free_netdev(adapter->netdev);
5475}
5476
29e9122b
VD
5477ssize_t be_hwmon_show_temp(struct device *dev,
5478 struct device_attribute *dev_attr,
5479 char *buf)
5480{
5481 struct be_adapter *adapter = dev_get_drvdata(dev);
5482
5483 /* Unit: millidegree Celsius */
5484 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5485 return -EIO;
5486 else
5487 return sprintf(buf, "%u\n",
5488 adapter->hwmon_info.be_on_die_temp * 1000);
5489}
5490
5491static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5492 be_hwmon_show_temp, NULL, 1);
5493
5494static struct attribute *be_hwmon_attrs[] = {
5495 &sensor_dev_attr_temp1_input.dev_attr.attr,
5496 NULL
5497};
5498
5499ATTRIBUTE_GROUPS(be_hwmon);
5500
d379142b
SP
5501static char *mc_name(struct be_adapter *adapter)
5502{
f93f160b
VV
5503 char *str = ""; /* default */
5504
5505 switch (adapter->mc_type) {
5506 case UMC:
5507 str = "UMC";
5508 break;
5509 case FLEX10:
5510 str = "FLEX10";
5511 break;
5512 case vNIC1:
5513 str = "vNIC-1";
5514 break;
5515 case nPAR:
5516 str = "nPAR";
5517 break;
5518 case UFP:
5519 str = "UFP";
5520 break;
5521 case vNIC2:
5522 str = "vNIC-2";
5523 break;
5524 default:
5525 str = "";
5526 }
5527
5528 return str;
d379142b
SP
5529}
5530
5531static inline char *func_name(struct be_adapter *adapter)
5532{
5533 return be_physfn(adapter) ? "PF" : "VF";
5534}
5535
f7062ee5
SP
5536static inline char *nic_name(struct pci_dev *pdev)
5537{
5538 switch (pdev->device) {
5539 case OC_DEVICE_ID1:
5540 return OC_NAME;
5541 case OC_DEVICE_ID2:
5542 return OC_NAME_BE;
5543 case OC_DEVICE_ID3:
5544 case OC_DEVICE_ID4:
5545 return OC_NAME_LANCER;
5546 case BE_DEVICE_ID2:
5547 return BE3_NAME;
5548 case OC_DEVICE_ID5:
5549 case OC_DEVICE_ID6:
5550 return OC_NAME_SH;
5551 default:
5552 return BE_NAME;
5553 }
5554}
5555
1dd06ae8 5556static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5557{
6b7c5b94
SP
5558 struct be_adapter *adapter;
5559 struct net_device *netdev;
21252377 5560 int status = 0;
6b7c5b94 5561
acbafeb1
SP
5562 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5563
6b7c5b94
SP
5564 status = pci_enable_device(pdev);
5565 if (status)
5566 goto do_none;
5567
5568 status = pci_request_regions(pdev, DRV_NAME);
5569 if (status)
5570 goto disable_dev;
5571 pci_set_master(pdev);
5572
7f640062 5573 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5574 if (!netdev) {
6b7c5b94
SP
5575 status = -ENOMEM;
5576 goto rel_reg;
5577 }
5578 adapter = netdev_priv(netdev);
5579 adapter->pdev = pdev;
5580 pci_set_drvdata(pdev, adapter);
5581 adapter->netdev = netdev;
2243e2e9 5582 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5583
4c15c243 5584 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5585 if (!status) {
5586 netdev->features |= NETIF_F_HIGHDMA;
5587 } else {
4c15c243 5588 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5589 if (status) {
5590 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5591 goto free_netdev;
5592 }
5593 }
5594
2f951a9a
KA
5595 status = pci_enable_pcie_error_reporting(pdev);
5596 if (!status)
5597 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5598
78fad34e 5599 status = be_map_pci_bars(adapter);
6b7c5b94 5600 if (status)
39f1d94d 5601 goto free_netdev;
6b7c5b94 5602
78fad34e
SP
5603 status = be_drv_init(adapter);
5604 if (status)
5605 goto unmap_bars;
5606
5fb379ee
SP
5607 status = be_setup(adapter);
5608 if (status)
78fad34e 5609 goto drv_cleanup;
2243e2e9 5610
3abcdeda 5611 be_netdev_init(netdev);
6b7c5b94
SP
5612 status = register_netdev(netdev);
5613 if (status != 0)
5fb379ee 5614 goto unsetup;
6b7c5b94 5615
045508a8
PP
5616 be_roce_dev_add(adapter);
5617
eb7dd46c 5618 be_schedule_err_detection(adapter);
b4e32a71 5619
29e9122b
VD
5620 /* On Die temperature not supported for VF. */
5621 if (be_physfn(adapter)) {
5622 adapter->hwmon_info.hwmon_dev =
5623 devm_hwmon_device_register_with_groups(&pdev->dev,
5624 DRV_NAME,
5625 adapter,
5626 be_hwmon_groups);
5627 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5628 }
5629
d379142b 5630 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5631 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5632
6b7c5b94
SP
5633 return 0;
5634
5fb379ee
SP
5635unsetup:
5636 be_clear(adapter);
78fad34e
SP
5637drv_cleanup:
5638 be_drv_cleanup(adapter);
5639unmap_bars:
5640 be_unmap_pci_bars(adapter);
f9449ab7 5641free_netdev:
fe6d2a38 5642 free_netdev(netdev);
6b7c5b94
SP
5643rel_reg:
5644 pci_release_regions(pdev);
5645disable_dev:
5646 pci_disable_device(pdev);
5647do_none:
c4ca2374 5648 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5649 return status;
5650}
5651
5652static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5653{
5654 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5655
76a9e08e 5656 if (adapter->wol_en)
71d8d1b5
AK
5657 be_setup_wol(adapter, true);
5658
d4360d6f 5659 be_intr_set(adapter, false);
eb7dd46c 5660 be_cancel_err_detection(adapter);
f67ef7ba 5661
87ac1a52 5662 be_cleanup(adapter);
6b7c5b94
SP
5663
5664 pci_save_state(pdev);
5665 pci_disable_device(pdev);
5666 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5667 return 0;
5668}
5669
484d76fd 5670static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5671{
6b7c5b94 5672 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5673 int status = 0;
6b7c5b94
SP
5674
5675 status = pci_enable_device(pdev);
5676 if (status)
5677 return status;
5678
1ca01512 5679 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5680 pci_restore_state(pdev);
5681
484d76fd 5682 status = be_resume(adapter);
2243e2e9
SP
5683 if (status)
5684 return status;
5685
eb7dd46c
SP
5686 be_schedule_err_detection(adapter);
5687
76a9e08e 5688 if (adapter->wol_en)
71d8d1b5 5689 be_setup_wol(adapter, false);
a4ca055f 5690
6b7c5b94
SP
5691 return 0;
5692}
5693
82456b03
SP
5694/*
5695 * An FLR will stop BE from DMAing any data.
5696 */
5697static void be_shutdown(struct pci_dev *pdev)
5698{
5699 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5700
2d5d4154
AK
5701 if (!adapter)
5702 return;
82456b03 5703
d114f99a 5704 be_roce_dev_shutdown(adapter);
0f4a6828 5705 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5706 be_cancel_err_detection(adapter);
a4ca055f 5707
2d5d4154 5708 netif_device_detach(adapter->netdev);
82456b03 5709
57841869
AK
5710 be_cmd_reset_function(adapter);
5711
82456b03 5712 pci_disable_device(pdev);
82456b03
SP
5713}
5714
cf588477 5715static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5716 pci_channel_state_t state)
cf588477
SP
5717{
5718 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5719
5720 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5721
954f6825
VD
5722 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5723 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5724
eb7dd46c 5725 be_cancel_err_detection(adapter);
cf588477 5726
87ac1a52 5727 be_cleanup(adapter);
cf588477 5728 }
cf588477
SP
5729
5730 if (state == pci_channel_io_perm_failure)
5731 return PCI_ERS_RESULT_DISCONNECT;
5732
5733 pci_disable_device(pdev);
5734
eeb7fc7b
SK
5735 /* The error could cause the FW to trigger a flash debug dump.
5736 * Resetting the card while flash dump is in progress
c8a54163
PR
5737 * can cause it not to recover; wait for it to finish.
5738 * Wait only for first function as it is needed only once per
5739 * adapter.
eeb7fc7b 5740 */
c8a54163
PR
5741 if (pdev->devfn == 0)
5742 ssleep(30);
5743
cf588477
SP
5744 return PCI_ERS_RESULT_NEED_RESET;
5745}
5746
5747static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5748{
5749 struct be_adapter *adapter = pci_get_drvdata(pdev);
5750 int status;
5751
5752 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5753
5754 status = pci_enable_device(pdev);
5755 if (status)
5756 return PCI_ERS_RESULT_DISCONNECT;
5757
5758 pci_set_master(pdev);
1ca01512 5759 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5760 pci_restore_state(pdev);
5761
5762 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5763 dev_info(&adapter->pdev->dev,
5764 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5765 status = be_fw_wait_ready(adapter);
cf588477
SP
5766 if (status)
5767 return PCI_ERS_RESULT_DISCONNECT;
5768
d6b6d987 5769 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 5770 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
5771 return PCI_ERS_RESULT_RECOVERED;
5772}
5773
5774static void be_eeh_resume(struct pci_dev *pdev)
5775{
5776 int status = 0;
5777 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5778
5779 dev_info(&adapter->pdev->dev, "EEH resume\n");
5780
5781 pci_save_state(pdev);
5782
484d76fd 5783 status = be_resume(adapter);
bf99e50d
PR
5784 if (status)
5785 goto err;
5786
eb7dd46c 5787 be_schedule_err_detection(adapter);
cf588477
SP
5788 return;
5789err:
5790 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5791}
5792
ace40aff
VV
5793static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5794{
5795 struct be_adapter *adapter = pci_get_drvdata(pdev);
5796 u16 num_vf_qs;
5797 int status;
5798
5799 if (!num_vfs)
5800 be_vf_clear(adapter);
5801
5802 adapter->num_vfs = num_vfs;
5803
5804 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5805 dev_warn(&pdev->dev,
5806 "Cannot disable VFs while they are assigned\n");
5807 return -EBUSY;
5808 }
5809
5810 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5811 * are equally distributed across the max-number of VFs. The user may
5812 * request only a subset of the max-vfs to be enabled.
5813 * Based on num_vfs, redistribute the resources across num_vfs so that
5814 * each VF will have access to more number of resources.
5815 * This facility is not available in BE3 FW.
5816 * Also, this is done by FW in Lancer chip.
5817 */
5818 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5819 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5820 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5821 adapter->num_vfs, num_vf_qs);
5822 if (status)
5823 dev_err(&pdev->dev,
5824 "Failed to optimize SR-IOV resources\n");
5825 }
5826
5827 status = be_get_resources(adapter);
5828 if (status)
5829 return be_cmd_status(status);
5830
5831 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5832 rtnl_lock();
5833 status = be_update_queues(adapter);
5834 rtnl_unlock();
5835 if (status)
5836 return be_cmd_status(status);
5837
5838 if (adapter->num_vfs)
5839 status = be_vf_setup(adapter);
5840
5841 if (!status)
5842 return adapter->num_vfs;
5843
5844 return 0;
5845}
5846
3646f0e5 5847static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5848 .error_detected = be_eeh_err_detected,
5849 .slot_reset = be_eeh_reset,
5850 .resume = be_eeh_resume,
5851};
5852
6b7c5b94
SP
5853static struct pci_driver be_driver = {
5854 .name = DRV_NAME,
5855 .id_table = be_dev_ids,
5856 .probe = be_probe,
5857 .remove = be_remove,
5858 .suspend = be_suspend,
484d76fd 5859 .resume = be_pci_resume,
82456b03 5860 .shutdown = be_shutdown,
ace40aff 5861 .sriov_configure = be_pci_sriov_configure,
cf588477 5862 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5863};
5864
5865static int __init be_init_module(void)
5866{
8e95a202
JP
5867 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5868 rx_frag_size != 2048) {
6b7c5b94
SP
5869 printk(KERN_WARNING DRV_NAME
5870 " : Module param rx_frag_size must be 2048/4096/8192."
5871 " Using 2048\n");
5872 rx_frag_size = 2048;
5873 }
6b7c5b94 5874
ace40aff
VV
5875 if (num_vfs > 0) {
5876 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5877 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5878 }
5879
6b7c5b94
SP
5880 return pci_register_driver(&be_driver);
5881}
5882module_init(be_init_module);
5883
5884static void __exit be_exit_module(void)
5885{
5886 pci_unregister_driver(&be_driver);
5887}
5888module_exit(be_exit_module);
This page took 1.681412 seconds and 5 git commands to generate.