Merge remote-tracking branch 'iommu/next'
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
7dfbe7d7 2 * Copyright (C) 2005 - 2016 Broadcom
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
710f3e59
SB
44/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
47struct workqueue_struct *be_err_recovery_workq;
48
9baa3c34 49static const struct pci_device_id be_dev_ids[] = {
c4ca2374 50 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 51 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
52 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 54 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 55 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 56 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 57 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
58 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
b7172414
SP
61
62/* Workqueue used by all functions for defering cmd calls to the adapter */
63struct workqueue_struct *be_wq;
64
7c185276 65/* UE Status Low CSR */
42c8b11e 66static const char * const ue_status_low_desc[] = {
7c185276
AK
67 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
6bdf8f55
VV
95 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
7c185276 99};
e2fb1afa 100
7c185276 101/* UE Status High CSR */
42c8b11e 102static const char * const ue_status_hi_desc[] = {
7c185276
AK
103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
6bdf8f55
VV
124 "ECRC",
125 "Poison TLP",
42c8b11e 126 "NETC",
6bdf8f55
VV
127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
7c185276
AK
134 "Unknown"
135};
6b7c5b94 136
c1bb0a55
VD
137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
6b7c5b94
SP
142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 145
1cfafab9 146 if (mem->va) {
2b7bcebf
IV
147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
1cfafab9
SP
149 mem->va = NULL;
150 }
6b7c5b94
SP
151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 154 u16 len, u16 entry_size)
6b7c5b94
SP
155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
ede23fa8
JP
162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
6b7c5b94 164 if (!mem->va)
10ef9ab4 165 return -ENOMEM;
6b7c5b94
SP
166 return 0;
167}
168
68c45a2d 169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 170{
db3ea781 171 u32 reg, enabled;
5f0b849e 172
db3ea781 173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 174 &reg);
db3ea781
SP
175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
5f0b849e 177 if (!enabled && enable)
6b7c5b94 178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 179 else if (enabled && !enable)
6b7c5b94 180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 181 else
6b7c5b94 182 return;
5f0b849e 183
db3ea781 184 pci_write_config_dword(adapter->pdev,
748b539a 185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
186}
187
68c45a2d
SK
188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
954f6825 196 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
8788fdc2 204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
205{
206 u32 val = 0;
03d28ffe 207
954f6825
VD
208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
6b7c5b94
SP
211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
213
214 wmb();
8788fdc2 215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
216}
217
94d73aaa
VV
218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
6b7c5b94
SP
220{
221 u32 val = 0;
03d28ffe 222
954f6825
VD
223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
94d73aaa 226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
228
229 wmb();
94d73aaa 230 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
231}
232
8788fdc2 233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
6b7c5b94
SP
236{
237 u32 val = 0;
03d28ffe 238
6b7c5b94 239 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 241
954f6825 242 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
243 return;
244
6b7c5b94
SP
245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
253}
254
8788fdc2 255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
256{
257 u32 val = 0;
03d28ffe 258
6b7c5b94 259 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 262
954f6825 263 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
264 return;
265
6b7c5b94
SP
266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
270}
271
988d44b1
SR
272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
278 if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
279 mac)) {
280 /* mac already added, skip addition */
281 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
282 return 0;
283 }
284 }
285
286 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
287 &adapter->pmac_id[0], 0);
288}
289
290static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
291{
292 int i;
293
294 /* Skip deletion if the programmed mac is
295 * being used in uc-list
296 */
297 for (i = 0; i < adapter->uc_macs; i++) {
298 if (adapter->pmac_id[i + 1] == pmac_id)
299 return;
300 }
301 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
302}
303
6b7c5b94
SP
304static int be_mac_addr_set(struct net_device *netdev, void *p)
305{
306 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 307 struct device *dev = &adapter->pdev->dev;
6b7c5b94 308 struct sockaddr *addr = p;
5a712c13
SP
309 int status;
310 u8 mac[ETH_ALEN];
988d44b1 311 u32 old_pmac_id = adapter->pmac_id[0];
6b7c5b94 312
ca9e4988
AK
313 if (!is_valid_ether_addr(addr->sa_data))
314 return -EADDRNOTAVAIL;
315
ff32f8ab
VV
316 /* Proceed further only if, User provided MAC is different
317 * from active MAC
318 */
c27ebf58 319 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
ff32f8ab
VV
320 return 0;
321
bcc84140
KA
322 /* if device is not running, copy MAC to netdev->dev_addr */
323 if (!netif_running(netdev))
324 goto done;
325
5a712c13
SP
326 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
327 * privilege or if PF did not provision the new MAC address.
328 * On BE3, this cmd will always fail if the VF doesn't have the
329 * FILTMGMT privilege. This failure is OK, only if the PF programmed
330 * the MAC for the VF.
704e4c88 331 */
988d44b1
SR
332 mutex_lock(&adapter->rx_filter_lock);
333 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
5a712c13 334 if (!status) {
5a712c13
SP
335
336 /* Delete the old programmed MAC. This call may fail if the
337 * old MAC was already deleted by the PF driver.
338 */
339 if (adapter->pmac_id[0] != old_pmac_id)
988d44b1 340 be_dev_mac_del(adapter, old_pmac_id);
704e4c88
PR
341 }
342
988d44b1 343 mutex_unlock(&adapter->rx_filter_lock);
5a712c13
SP
344 /* Decide if the new MAC is successfully activated only after
345 * querying the FW
704e4c88 346 */
988d44b1 347 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
b188f090 348 adapter->if_handle, true, 0);
a65027e4 349 if (status)
e3a7ae2c 350 goto err;
6b7c5b94 351
5a712c13
SP
352 /* The MAC change did not happen, either due to lack of privilege
353 * or PF didn't pre-provision.
354 */
61d23e9f 355 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
356 status = -EPERM;
357 goto err;
358 }
bcc84140 359done:
c27ebf58 360 ether_addr_copy(adapter->dev_mac, addr->sa_data);
bcc84140
KA
361 ether_addr_copy(netdev->dev_addr, addr->sa_data);
362 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
363 return 0;
364err:
5a712c13 365 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
366 return status;
367}
368
ca34fe38
SP
369/* BE2 supports only v0 cmd */
370static void *hw_stats_from_cmd(struct be_adapter *adapter)
371{
372 if (BE2_chip(adapter)) {
373 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
374
375 return &cmd->hw_stats;
61000861 376 } else if (BE3_chip(adapter)) {
ca34fe38
SP
377 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
378
61000861
AK
379 return &cmd->hw_stats;
380 } else {
381 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
382
ca34fe38
SP
383 return &cmd->hw_stats;
384 }
385}
386
387/* BE2 supports only v0 cmd */
388static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
389{
390 if (BE2_chip(adapter)) {
391 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
392
393 return &hw_stats->erx;
61000861 394 } else if (BE3_chip(adapter)) {
ca34fe38
SP
395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396
61000861
AK
397 return &hw_stats->erx;
398 } else {
399 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
400
ca34fe38
SP
401 return &hw_stats->erx;
402 }
403}
404
405static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 406{
ac124ff9
SP
407 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
408 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
409 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 410 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
411 &rxf_stats->port[adapter->port_num];
412 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 413
ac124ff9 414 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
415 drvs->rx_pause_frames = port_stats->rx_pause_frames;
416 drvs->rx_crc_errors = port_stats->rx_crc_errors;
417 drvs->rx_control_frames = port_stats->rx_control_frames;
418 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
419 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
420 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
421 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
422 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
423 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
424 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 429 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
430 drvs->rx_dropped_header_too_small =
431 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
432 drvs->rx_address_filtered =
433 port_stats->rx_address_filtered +
434 port_stats->rx_vlan_filtered;
89a88ab8
AK
435 drvs->rx_alignment_symbol_errors =
436 port_stats->rx_alignment_symbol_errors;
437
438 drvs->tx_pauseframes = port_stats->tx_pauseframes;
439 drvs->tx_controlframes = port_stats->tx_controlframes;
440
441 if (adapter->port_num)
ac124ff9 442 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 443 else
ac124ff9 444 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
ca34fe38 454static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 455{
ac124ff9
SP
456 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 459 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 462
ac124ff9 463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
ac124ff9 486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
498}
499
61000861
AK
500static void populate_be_v2_stats(struct be_adapter *adapter)
501{
502 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
503 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
504 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
505 struct be_port_rxf_stats_v2 *port_stats =
506 &rxf_stats->port[adapter->port_num];
507 struct be_drv_stats *drvs = &adapter->drv_stats;
508
509 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
510 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
511 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
512 drvs->rx_pause_frames = port_stats->rx_pause_frames;
513 drvs->rx_crc_errors = port_stats->rx_crc_errors;
514 drvs->rx_control_frames = port_stats->rx_control_frames;
515 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
516 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
517 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
518 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
519 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
520 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
521 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
522 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
523 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
524 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
525 drvs->rx_dropped_header_too_small =
526 port_stats->rx_dropped_header_too_small;
527 drvs->rx_input_fifo_overflow_drop =
528 port_stats->rx_input_fifo_overflow_drop;
529 drvs->rx_address_filtered = port_stats->rx_address_filtered;
530 drvs->rx_alignment_symbol_errors =
531 port_stats->rx_alignment_symbol_errors;
532 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
533 drvs->tx_pauseframes = port_stats->tx_pauseframes;
534 drvs->tx_controlframes = port_stats->tx_controlframes;
535 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
536 drvs->jabber_events = port_stats->jabber_events;
537 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
538 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
539 drvs->forwarded_packets = rxf_stats->forwarded_packets;
540 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
541 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
542 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
543 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 544 if (be_roce_supported(adapter)) {
461ae379
AK
545 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
546 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
547 drvs->rx_roce_frames = port_stats->roce_frames_received;
548 drvs->roce_drops_crc = port_stats->roce_drops_crc;
549 drvs->roce_drops_payload_len =
550 port_stats->roce_drops_payload_len;
551 }
61000861
AK
552}
553
005d5696
SX
554static void populate_lancer_stats(struct be_adapter *adapter)
555{
005d5696 556 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 557 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
558
559 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
560 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
561 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
562 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 563 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 564 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
565 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
566 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
567 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
568 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
569 drvs->rx_dropped_tcp_length =
570 pport_stats->rx_dropped_invalid_tcp_length;
571 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
572 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
573 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
574 drvs->rx_dropped_header_too_small =
575 pport_stats->rx_dropped_header_too_small;
576 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
577 drvs->rx_address_filtered =
578 pport_stats->rx_address_filtered +
579 pport_stats->rx_vlan_filtered;
ac124ff9 580 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 581 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
582 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
583 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 584 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
585 drvs->forwarded_packets = pport_stats->num_forwards_lo;
586 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 587 drvs->rx_drops_too_many_frags =
ac124ff9 588 pport_stats->rx_drops_too_many_frags_lo;
005d5696 589}
89a88ab8 590
09c1c68f
SP
591static void accumulate_16bit_val(u32 *acc, u16 val)
592{
593#define lo(x) (x & 0xFFFF)
594#define hi(x) (x & 0xFFFF0000)
595 bool wrapped = val < lo(*acc);
596 u32 newacc = hi(*acc) + val;
597
598 if (wrapped)
599 newacc += 65536;
600 ACCESS_ONCE(*acc) = newacc;
601}
602
4188e7df 603static void populate_erx_stats(struct be_adapter *adapter,
748b539a 604 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
605{
606 if (!BEx_chip(adapter))
607 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
608 else
609 /* below erx HW counter can actually wrap around after
610 * 65535. Driver accumulates a 32-bit value
611 */
612 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
613 (u16)erx_stat);
614}
615
89a88ab8
AK
616void be_parse_stats(struct be_adapter *adapter)
617{
61000861 618 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
619 struct be_rx_obj *rxo;
620 int i;
a6c578ef 621 u32 erx_stat;
ac124ff9 622
ca34fe38
SP
623 if (lancer_chip(adapter)) {
624 populate_lancer_stats(adapter);
005d5696 625 } else {
ca34fe38
SP
626 if (BE2_chip(adapter))
627 populate_be_v0_stats(adapter);
61000861
AK
628 else if (BE3_chip(adapter))
629 /* for BE3 */
ca34fe38 630 populate_be_v1_stats(adapter);
61000861
AK
631 else
632 populate_be_v2_stats(adapter);
d51ebd33 633
61000861 634 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 635 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
636 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
637 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 638 }
09c1c68f 639 }
89a88ab8
AK
640}
641
ab1594e9 642static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 643 struct rtnl_link_stats64 *stats)
6b7c5b94 644{
ab1594e9 645 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 646 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 647 struct be_rx_obj *rxo;
3c8def97 648 struct be_tx_obj *txo;
ab1594e9
SP
649 u64 pkts, bytes;
650 unsigned int start;
3abcdeda 651 int i;
6b7c5b94 652
3abcdeda 653 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 654 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 655
ab1594e9 656 do {
57a7744e 657 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
658 pkts = rx_stats(rxo)->rx_pkts;
659 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 660 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
661 stats->rx_packets += pkts;
662 stats->rx_bytes += bytes;
663 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
664 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
665 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
666 }
667
3c8def97 668 for_all_tx_queues(adapter, txo, i) {
ab1594e9 669 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 670
ab1594e9 671 do {
57a7744e 672 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
673 pkts = tx_stats(txo)->tx_pkts;
674 bytes = tx_stats(txo)->tx_bytes;
57a7744e 675 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
676 stats->tx_packets += pkts;
677 stats->tx_bytes += bytes;
3c8def97 678 }
6b7c5b94
SP
679
680 /* bad pkts received */
ab1594e9 681 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
682 drvs->rx_alignment_symbol_errors +
683 drvs->rx_in_range_errors +
684 drvs->rx_out_range_errors +
685 drvs->rx_frame_too_long +
686 drvs->rx_dropped_too_small +
687 drvs->rx_dropped_too_short +
688 drvs->rx_dropped_header_too_small +
689 drvs->rx_dropped_tcp_length +
ab1594e9 690 drvs->rx_dropped_runt;
68110868 691
6b7c5b94 692 /* detailed rx errors */
ab1594e9 693 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
694 drvs->rx_out_range_errors +
695 drvs->rx_frame_too_long;
68110868 696
ab1594e9 697 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
698
699 /* frame alignment errors */
ab1594e9 700 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 701
6b7c5b94
SP
702 /* receiver fifo overrun */
703 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 704 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
705 drvs->rx_input_fifo_overflow_drop +
706 drvs->rx_drops_no_pbuf;
ab1594e9 707 return stats;
6b7c5b94
SP
708}
709
b236916a 710void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 711{
6b7c5b94
SP
712 struct net_device *netdev = adapter->netdev;
713
b236916a 714 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 715 netif_carrier_off(netdev);
b236916a 716 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 717 }
b236916a 718
bdce2ad7 719 if (link_status)
b236916a
AK
720 netif_carrier_on(netdev);
721 else
722 netif_carrier_off(netdev);
18824894
IV
723
724 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
725}
726
5f07b3c5 727static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 728{
3c8def97 729 struct be_tx_stats *stats = tx_stats(txo);
8670f2a5 730 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
3c8def97 731
ab1594e9 732 u64_stats_update_begin(&stats->sync);
ac124ff9 733 stats->tx_reqs++;
5f07b3c5 734 stats->tx_bytes += skb->len;
8670f2a5
SB
735 stats->tx_pkts += tx_pkts;
736 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
737 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 738 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
739}
740
5f07b3c5
SP
741/* Returns number of WRBs needed for the skb */
742static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 743{
5f07b3c5
SP
744 /* +1 for the header wrb */
745 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
746}
747
748static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
749{
f986afcb
SP
750 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
751 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
752 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
753 wrb->rsvd0 = 0;
754}
755
756/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
757 * to avoid the swap and shift/mask operations in wrb_fill().
758 */
759static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
760{
761 wrb->frag_pa_hi = 0;
762 wrb->frag_pa_lo = 0;
763 wrb->frag_len = 0;
89b1f496 764 wrb->rsvd0 = 0;
6b7c5b94
SP
765}
766
1ded132d 767static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 768 struct sk_buff *skb)
1ded132d
AK
769{
770 u8 vlan_prio;
771 u16 vlan_tag;
772
df8a39de 773 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
774 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
775 /* If vlan priority provided by OS is NOT in available bmap */
776 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
777 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 778 adapter->recommended_prio_bits;
1ded132d
AK
779
780 return vlan_tag;
781}
782
c9c47142
SP
783/* Used only for IP tunnel packets */
784static u16 skb_inner_ip_proto(struct sk_buff *skb)
785{
786 return (inner_ip_hdr(skb)->version == 4) ?
787 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
788}
789
790static u16 skb_ip_proto(struct sk_buff *skb)
791{
792 return (ip_hdr(skb)->version == 4) ?
793 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
794}
795
cf5671e6
SB
796static inline bool be_is_txq_full(struct be_tx_obj *txo)
797{
798 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
799}
800
801static inline bool be_can_txq_wake(struct be_tx_obj *txo)
802{
803 return atomic_read(&txo->q.used) < txo->q.len / 2;
804}
805
806static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
807{
808 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
809}
810
804abcdb
SB
811static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
812 struct sk_buff *skb,
813 struct be_wrb_params *wrb_params)
6b7c5b94 814{
804abcdb 815 u16 proto;
6b7c5b94 816
49e4b847 817 if (skb_is_gso(skb)) {
804abcdb
SB
818 BE_WRB_F_SET(wrb_params->features, LSO, 1);
819 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 820 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 821 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 822 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 823 if (skb->encapsulation) {
804abcdb 824 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
825 proto = skb_inner_ip_proto(skb);
826 } else {
827 proto = skb_ip_proto(skb);
828 }
829 if (proto == IPPROTO_TCP)
804abcdb 830 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 831 else if (proto == IPPROTO_UDP)
804abcdb 832 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
833 }
834
df8a39de 835 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
836 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
837 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
838 }
839
804abcdb
SB
840 BE_WRB_F_SET(wrb_params->features, CRC, 1);
841}
5f07b3c5 842
804abcdb
SB
843static void wrb_fill_hdr(struct be_adapter *adapter,
844 struct be_eth_hdr_wrb *hdr,
845 struct be_wrb_params *wrb_params,
846 struct sk_buff *skb)
847{
848 memset(hdr, 0, sizeof(*hdr));
849
850 SET_TX_WRB_HDR_BITS(crc, hdr,
851 BE_WRB_F_GET(wrb_params->features, CRC));
852 SET_TX_WRB_HDR_BITS(ipcs, hdr,
853 BE_WRB_F_GET(wrb_params->features, IPCS));
854 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
855 BE_WRB_F_GET(wrb_params->features, TCPCS));
856 SET_TX_WRB_HDR_BITS(udpcs, hdr,
857 BE_WRB_F_GET(wrb_params->features, UDPCS));
858
859 SET_TX_WRB_HDR_BITS(lso, hdr,
860 BE_WRB_F_GET(wrb_params->features, LSO));
861 SET_TX_WRB_HDR_BITS(lso6, hdr,
862 BE_WRB_F_GET(wrb_params->features, LSO6));
863 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
864
865 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
866 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 867 */
804abcdb
SB
868 SET_TX_WRB_HDR_BITS(event, hdr,
869 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
870 SET_TX_WRB_HDR_BITS(vlan, hdr,
871 BE_WRB_F_GET(wrb_params->features, VLAN));
872 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
873
874 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
875 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
876 SET_TX_WRB_HDR_BITS(mgmt, hdr,
877 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
878}
879
2b7bcebf 880static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 881 bool unmap_single)
7101e111
SP
882{
883 dma_addr_t dma;
f986afcb 884 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 885
7101e111 886
f986afcb
SP
887 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
888 (u64)le32_to_cpu(wrb->frag_pa_lo);
889 if (frag_len) {
7101e111 890 if (unmap_single)
f986afcb 891 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 892 else
f986afcb 893 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
894 }
895}
6b7c5b94 896
79a0d7d8 897/* Grab a WRB header for xmit */
b0fd2eb2 898static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
79a0d7d8 899{
b0fd2eb2 900 u32 head = txo->q.head;
79a0d7d8
SB
901
902 queue_head_inc(&txo->q);
903 return head;
904}
905
906/* Set up the WRB header for xmit */
907static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
908 struct be_tx_obj *txo,
909 struct be_wrb_params *wrb_params,
910 struct sk_buff *skb, u16 head)
911{
912 u32 num_frags = skb_wrb_cnt(skb);
913 struct be_queue_info *txq = &txo->q;
914 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
915
916 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
917 be_dws_cpu_to_le(hdr, sizeof(*hdr));
918
919 BUG_ON(txo->sent_skb_list[head]);
920 txo->sent_skb_list[head] = skb;
921 txo->last_req_hdr = head;
922 atomic_add(num_frags, &txq->used);
923 txo->last_req_wrb_cnt = num_frags;
924 txo->pend_wrb_cnt += num_frags;
925}
926
927/* Setup a WRB fragment (buffer descriptor) for xmit */
928static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
929 int len)
930{
931 struct be_eth_wrb *wrb;
932 struct be_queue_info *txq = &txo->q;
933
934 wrb = queue_head_node(txq);
935 wrb_fill(wrb, busaddr, len);
936 queue_head_inc(txq);
937}
938
939/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
940 * was invoked. The producer index is restored to the previous packet and the
941 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
942 */
943static void be_xmit_restore(struct be_adapter *adapter,
b0fd2eb2 944 struct be_tx_obj *txo, u32 head, bool map_single,
79a0d7d8
SB
945 u32 copied)
946{
947 struct device *dev;
948 struct be_eth_wrb *wrb;
949 struct be_queue_info *txq = &txo->q;
950
951 dev = &adapter->pdev->dev;
952 txq->head = head;
953
954 /* skip the first wrb (hdr); it's not mapped */
955 queue_head_inc(txq);
956 while (copied) {
957 wrb = queue_head_node(txq);
958 unmap_tx_frag(dev, wrb, map_single);
959 map_single = false;
960 copied -= le32_to_cpu(wrb->frag_len);
961 queue_head_inc(txq);
962 }
963
964 txq->head = head;
965}
966
967/* Enqueue the given packet for transmit. This routine allocates WRBs for the
968 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
969 * of WRBs used up by the packet.
970 */
5f07b3c5 971static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
972 struct sk_buff *skb,
973 struct be_wrb_params *wrb_params)
6b7c5b94 974{
5f07b3c5 975 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 976 struct device *dev = &adapter->pdev->dev;
5f07b3c5 977 struct be_queue_info *txq = &txo->q;
7101e111 978 bool map_single = false;
b0fd2eb2 979 u32 head = txq->head;
79a0d7d8
SB
980 dma_addr_t busaddr;
981 int len;
6b7c5b94 982
79a0d7d8 983 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 984
ebc8d2ab 985 if (skb->len > skb->data_len) {
79a0d7d8 986 len = skb_headlen(skb);
03d28ffe 987
2b7bcebf
IV
988 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
989 if (dma_mapping_error(dev, busaddr))
7101e111
SP
990 goto dma_err;
991 map_single = true;
79a0d7d8 992 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
993 copied += len;
994 }
6b7c5b94 995
ebc8d2ab 996 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 997 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 998 len = skb_frag_size(frag);
03d28ffe 999
79a0d7d8 1000 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 1001 if (dma_mapping_error(dev, busaddr))
7101e111 1002 goto dma_err;
79a0d7d8
SB
1003 be_tx_setup_wrb_frag(txo, busaddr, len);
1004 copied += len;
6b7c5b94
SP
1005 }
1006
79a0d7d8 1007 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 1008
5f07b3c5
SP
1009 be_tx_stats_update(txo, skb);
1010 return wrb_cnt;
6b7c5b94 1011
7101e111 1012dma_err:
79a0d7d8
SB
1013 adapter->drv_stats.dma_map_errors++;
1014 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 1015 return 0;
6b7c5b94
SP
1016}
1017
f7062ee5
SP
1018static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1019{
1020 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1021}
1022
93040ae5 1023static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 1024 struct sk_buff *skb,
804abcdb
SB
1025 struct be_wrb_params
1026 *wrb_params)
93040ae5
SK
1027{
1028 u16 vlan_tag = 0;
1029
1030 skb = skb_share_check(skb, GFP_ATOMIC);
1031 if (unlikely(!skb))
1032 return skb;
1033
df8a39de 1034 if (skb_vlan_tag_present(skb))
93040ae5 1035 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
1036
1037 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1038 if (!vlan_tag)
1039 vlan_tag = adapter->pvid;
1040 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1041 * skip VLAN insertion
1042 */
804abcdb 1043 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 1044 }
bc0c3405
AK
1045
1046 if (vlan_tag) {
62749e2c
JP
1047 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1048 vlan_tag);
bc0c3405
AK
1049 if (unlikely(!skb))
1050 return skb;
bc0c3405
AK
1051 skb->vlan_tci = 0;
1052 }
1053
1054 /* Insert the outer VLAN, if any */
1055 if (adapter->qnq_vid) {
1056 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1057 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1058 vlan_tag);
bc0c3405
AK
1059 if (unlikely(!skb))
1060 return skb;
804abcdb 1061 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1062 }
1063
93040ae5
SK
1064 return skb;
1065}
1066
bc0c3405
AK
1067static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1068{
1069 struct ethhdr *eh = (struct ethhdr *)skb->data;
1070 u16 offset = ETH_HLEN;
1071
1072 if (eh->h_proto == htons(ETH_P_IPV6)) {
1073 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1074
1075 offset += sizeof(struct ipv6hdr);
1076 if (ip6h->nexthdr != NEXTHDR_TCP &&
1077 ip6h->nexthdr != NEXTHDR_UDP) {
1078 struct ipv6_opt_hdr *ehdr =
504fbf1e 1079 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1080
1081 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1082 if (ehdr->hdrlen == 0xff)
1083 return true;
1084 }
1085 }
1086 return false;
1087}
1088
1089static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1090{
df8a39de 1091 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1092}
1093
748b539a 1094static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1095{
ee9c799c 1096 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1097}
1098
ec495fac
VV
1099static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1100 struct sk_buff *skb,
804abcdb
SB
1101 struct be_wrb_params
1102 *wrb_params)
6b7c5b94 1103{
d2cb6ce7 1104 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1105 unsigned int eth_hdr_len;
1106 struct iphdr *ip;
93040ae5 1107
1297f9db
AK
1108 /* For padded packets, BE HW modifies tot_len field in IP header
1109 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1110 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1111 */
ee9c799c
SP
1112 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1113 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1114 if (skb->len <= 60 &&
df8a39de 1115 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1116 is_ipv4_pkt(skb)) {
93040ae5
SK
1117 ip = (struct iphdr *)ip_hdr(skb);
1118 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1119 }
1ded132d 1120
d2cb6ce7 1121 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1122 * tagging in pvid-tagging mode
d2cb6ce7 1123 */
f93f160b 1124 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1125 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1126 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1127
93040ae5
SK
1128 /* HW has a bug wherein it will calculate CSUM for VLAN
1129 * pkts even though it is disabled.
1130 * Manually insert VLAN in pkt.
1131 */
1132 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1133 skb_vlan_tag_present(skb)) {
804abcdb 1134 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1135 if (unlikely(!skb))
c9128951 1136 goto err;
bc0c3405
AK
1137 }
1138
1139 /* HW may lockup when VLAN HW tagging is requested on
1140 * certain ipv6 packets. Drop such pkts if the HW workaround to
1141 * skip HW tagging is not enabled by FW.
1142 */
1143 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1144 (adapter->pvid || adapter->qnq_vid) &&
1145 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1146 goto tx_drop;
1147
1148 /* Manual VLAN tag insertion to prevent:
1149 * ASIC lockup when the ASIC inserts VLAN tag into
1150 * certain ipv6 packets. Insert VLAN tags in driver,
1151 * and set event, completion, vlan bits accordingly
1152 * in the Tx WRB.
1153 */
1154 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1155 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1156 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1157 if (unlikely(!skb))
c9128951 1158 goto err;
1ded132d
AK
1159 }
1160
ee9c799c
SP
1161 return skb;
1162tx_drop:
1163 dev_kfree_skb_any(skb);
c9128951 1164err:
ee9c799c
SP
1165 return NULL;
1166}
1167
ec495fac
VV
1168static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1169 struct sk_buff *skb,
804abcdb 1170 struct be_wrb_params *wrb_params)
ec495fac 1171{
127bfce5 1172 int err;
1173
8227e990
SR
1174 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1175 * packets that are 32b or less may cause a transmit stall
1176 * on that port. The workaround is to pad such packets
1177 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1178 */
8227e990 1179 if (skb->len <= 32) {
74b6939d 1180 if (skb_put_padto(skb, 36))
ec495fac 1181 return NULL;
ec495fac
VV
1182 }
1183
1184 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1185 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1186 if (!skb)
1187 return NULL;
1188 }
1189
127bfce5 1190 /* The stack can send us skbs with length greater than
1191 * what the HW can handle. Trim the extra bytes.
1192 */
1193 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1194 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1195 WARN_ON(err);
1196
ec495fac
VV
1197 return skb;
1198}
1199
5f07b3c5
SP
1200static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1201{
1202 struct be_queue_info *txq = &txo->q;
1203 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1204
1205 /* Mark the last request eventable if it hasn't been marked already */
1206 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1207 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1208
1209 /* compose a dummy wrb if there are odd set of wrbs to notify */
1210 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1211 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1212 queue_head_inc(txq);
1213 atomic_inc(&txq->used);
1214 txo->pend_wrb_cnt++;
1215 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1216 TX_HDR_WRB_NUM_SHIFT);
1217 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1218 TX_HDR_WRB_NUM_SHIFT);
1219 }
1220 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1221 txo->pend_wrb_cnt = 0;
1222}
1223
760c295e
VD
1224/* OS2BMC related */
1225
1226#define DHCP_CLIENT_PORT 68
1227#define DHCP_SERVER_PORT 67
1228#define NET_BIOS_PORT1 137
1229#define NET_BIOS_PORT2 138
1230#define DHCPV6_RAS_PORT 547
1231
1232#define is_mc_allowed_on_bmc(adapter, eh) \
1233 (!is_multicast_filt_enabled(adapter) && \
1234 is_multicast_ether_addr(eh->h_dest) && \
1235 !is_broadcast_ether_addr(eh->h_dest))
1236
1237#define is_bc_allowed_on_bmc(adapter, eh) \
1238 (!is_broadcast_filt_enabled(adapter) && \
1239 is_broadcast_ether_addr(eh->h_dest))
1240
1241#define is_arp_allowed_on_bmc(adapter, skb) \
1242 (is_arp(skb) && is_arp_filt_enabled(adapter))
1243
1244#define is_broadcast_packet(eh, adapter) \
1245 (is_multicast_ether_addr(eh->h_dest) && \
1246 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1247
1248#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1249
1250#define is_arp_filt_enabled(adapter) \
1251 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1252
1253#define is_dhcp_client_filt_enabled(adapter) \
1254 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1255
1256#define is_dhcp_srvr_filt_enabled(adapter) \
1257 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1258
1259#define is_nbios_filt_enabled(adapter) \
1260 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1261
1262#define is_ipv6_na_filt_enabled(adapter) \
1263 (adapter->bmc_filt_mask & \
1264 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1265
1266#define is_ipv6_ra_filt_enabled(adapter) \
1267 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1268
1269#define is_ipv6_ras_filt_enabled(adapter) \
1270 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1271
1272#define is_broadcast_filt_enabled(adapter) \
1273 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1274
1275#define is_multicast_filt_enabled(adapter) \
1276 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1277
1278static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1279 struct sk_buff **skb)
1280{
1281 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1282 bool os2bmc = false;
1283
1284 if (!be_is_os2bmc_enabled(adapter))
1285 goto done;
1286
1287 if (!is_multicast_ether_addr(eh->h_dest))
1288 goto done;
1289
1290 if (is_mc_allowed_on_bmc(adapter, eh) ||
1291 is_bc_allowed_on_bmc(adapter, eh) ||
1292 is_arp_allowed_on_bmc(adapter, (*skb))) {
1293 os2bmc = true;
1294 goto done;
1295 }
1296
1297 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1298 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1299 u8 nexthdr = hdr->nexthdr;
1300
1301 if (nexthdr == IPPROTO_ICMPV6) {
1302 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1303
1304 switch (icmp6->icmp6_type) {
1305 case NDISC_ROUTER_ADVERTISEMENT:
1306 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1307 goto done;
1308 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1309 os2bmc = is_ipv6_na_filt_enabled(adapter);
1310 goto done;
1311 default:
1312 break;
1313 }
1314 }
1315 }
1316
1317 if (is_udp_pkt((*skb))) {
1318 struct udphdr *udp = udp_hdr((*skb));
1319
1645d997 1320 switch (ntohs(udp->dest)) {
760c295e
VD
1321 case DHCP_CLIENT_PORT:
1322 os2bmc = is_dhcp_client_filt_enabled(adapter);
1323 goto done;
1324 case DHCP_SERVER_PORT:
1325 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1326 goto done;
1327 case NET_BIOS_PORT1:
1328 case NET_BIOS_PORT2:
1329 os2bmc = is_nbios_filt_enabled(adapter);
1330 goto done;
1331 case DHCPV6_RAS_PORT:
1332 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1333 goto done;
1334 default:
1335 break;
1336 }
1337 }
1338done:
1339 /* For packets over a vlan, which are destined
1340 * to BMC, asic expects the vlan to be inline in the packet.
1341 */
1342 if (os2bmc)
1343 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1344
1345 return os2bmc;
1346}
1347
ee9c799c
SP
1348static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1349{
1350 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1351 u16 q_idx = skb_get_queue_mapping(skb);
1352 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1353 struct be_wrb_params wrb_params = { 0 };
804abcdb 1354 bool flush = !skb->xmit_more;
5f07b3c5 1355 u16 wrb_cnt;
ee9c799c 1356
804abcdb 1357 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1358 if (unlikely(!skb))
1359 goto drop;
6b7c5b94 1360
804abcdb
SB
1361 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1362
1363 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1364 if (unlikely(!wrb_cnt)) {
1365 dev_kfree_skb_any(skb);
1366 goto drop;
1367 }
cd8f76c0 1368
760c295e
VD
1369 /* if os2bmc is enabled and if the pkt is destined to bmc,
1370 * enqueue the pkt a 2nd time with mgmt bit set.
1371 */
1372 if (be_send_pkt_to_bmc(adapter, &skb)) {
1373 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1374 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1375 if (unlikely(!wrb_cnt))
1376 goto drop;
1377 else
1378 skb_get(skb);
1379 }
1380
cf5671e6 1381 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1382 netif_stop_subqueue(netdev, q_idx);
1383 tx_stats(txo)->tx_stops++;
1384 }
c190e3c8 1385
5f07b3c5
SP
1386 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1387 be_xmit_flush(adapter, txo);
6b7c5b94 1388
5f07b3c5
SP
1389 return NETDEV_TX_OK;
1390drop:
1391 tx_stats(txo)->tx_drv_drops++;
1392 /* Flush the already enqueued tx requests */
1393 if (flush && txo->pend_wrb_cnt)
1394 be_xmit_flush(adapter, txo);
6b7c5b94 1395
6b7c5b94
SP
1396 return NETDEV_TX_OK;
1397}
1398
1399static int be_change_mtu(struct net_device *netdev, int new_mtu)
1400{
1401 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1402 struct device *dev = &adapter->pdev->dev;
1403
1404 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1405 dev_info(dev, "MTU must be between %d and %d bytes\n",
1406 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1407 return -EINVAL;
1408 }
0d3f5cce
KA
1409
1410 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1411 netdev->mtu, new_mtu);
6b7c5b94
SP
1412 netdev->mtu = new_mtu;
1413 return 0;
1414}
1415
f66b7cfd
SP
1416static inline bool be_in_all_promisc(struct be_adapter *adapter)
1417{
1418 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1419 BE_IF_FLAGS_ALL_PROMISCUOUS;
1420}
1421
1422static int be_set_vlan_promisc(struct be_adapter *adapter)
1423{
1424 struct device *dev = &adapter->pdev->dev;
1425 int status;
1426
1427 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1428 return 0;
1429
1430 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1431 if (!status) {
1432 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1433 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1434 } else {
1435 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1436 }
1437 return status;
1438}
1439
1440static int be_clear_vlan_promisc(struct be_adapter *adapter)
1441{
1442 struct device *dev = &adapter->pdev->dev;
1443 int status;
1444
1445 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1446 if (!status) {
1447 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1448 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1449 }
1450 return status;
1451}
1452
6b7c5b94 1453/*
82903e4b
AK
1454 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1455 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1456 */
10329df8 1457static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1458{
50762667 1459 struct device *dev = &adapter->pdev->dev;
10329df8 1460 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1461 u16 num = 0, i = 0;
82903e4b 1462 int status = 0;
1da87b7f 1463
92fbb1df
SB
1464 /* No need to change the VLAN state if the I/F is in promiscuous */
1465 if (adapter->netdev->flags & IFF_PROMISC)
c0e64ef4
SP
1466 return 0;
1467
92bf14ab 1468 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1469 return be_set_vlan_promisc(adapter);
0fc16ebf 1470
841f60fc
SK
1471 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1472 status = be_clear_vlan_promisc(adapter);
1473 if (status)
1474 return status;
1475 }
0fc16ebf 1476 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1477 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1478 vids[num++] = cpu_to_le16(i);
0fc16ebf 1479
435452aa 1480 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1481 if (status) {
f66b7cfd 1482 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1483 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1484 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1485 addl_status(status) ==
4c60005f 1486 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd 1487 return be_set_vlan_promisc(adapter);
6b7c5b94 1488 }
0fc16ebf 1489 return status;
6b7c5b94
SP
1490}
1491
80d5c368 1492static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1493{
1494 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1495 int status = 0;
6b7c5b94 1496
b7172414
SP
1497 mutex_lock(&adapter->rx_filter_lock);
1498
a85e9986
PR
1499 /* Packets with VID 0 are always received by Lancer by default */
1500 if (lancer_chip(adapter) && vid == 0)
b7172414 1501 goto done;
48291c22 1502
f6cbd364 1503 if (test_bit(vid, adapter->vids))
b7172414 1504 goto done;
a85e9986 1505
f6cbd364 1506 set_bit(vid, adapter->vids);
a6b74e01 1507 adapter->vlans_added++;
8e586137 1508
b7172414
SP
1509 status = be_vid_config(adapter);
1510done:
1511 mutex_unlock(&adapter->rx_filter_lock);
1512 return status;
6b7c5b94
SP
1513}
1514
80d5c368 1515static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1516{
1517 struct be_adapter *adapter = netdev_priv(netdev);
b7172414
SP
1518 int status = 0;
1519
1520 mutex_lock(&adapter->rx_filter_lock);
6b7c5b94 1521
a85e9986
PR
1522 /* Packets with VID 0 are always received by Lancer by default */
1523 if (lancer_chip(adapter) && vid == 0)
b7172414 1524 goto done;
a85e9986 1525
41dcdfbd 1526 if (!test_bit(vid, adapter->vids))
b7172414 1527 goto done;
41dcdfbd 1528
f6cbd364 1529 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1530 adapter->vlans_added--;
1531
b7172414
SP
1532 status = be_vid_config(adapter);
1533done:
1534 mutex_unlock(&adapter->rx_filter_lock);
1535 return status;
6b7c5b94
SP
1536}
1537
f66b7cfd
SP
1538static void be_set_all_promisc(struct be_adapter *adapter)
1539{
1540 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1541 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1542}
1543
1544static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1545{
0fc16ebf 1546 int status;
6b7c5b94 1547
f66b7cfd
SP
1548 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1549 return;
6b7c5b94 1550
f66b7cfd
SP
1551 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1552 if (!status)
1553 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1554}
1555
92fbb1df 1556static void be_set_uc_promisc(struct be_adapter *adapter)
f66b7cfd
SP
1557{
1558 int status;
1559
92fbb1df
SB
1560 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1561 return;
1562
1563 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
f66b7cfd 1564 if (!status)
92fbb1df
SB
1565 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1566}
1567
1568static void be_clear_uc_promisc(struct be_adapter *adapter)
1569{
1570 int status;
1571
1572 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1573 return;
1574
1575 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1576 if (!status)
1577 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1578}
1579
1580/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1581 * We use a single callback function for both sync and unsync. We really don't
1582 * add/remove addresses through this callback. But, we use it to detect changes
1583 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1584 */
1585static int be_uc_list_update(struct net_device *netdev,
1586 const unsigned char *addr)
1587{
1588 struct be_adapter *adapter = netdev_priv(netdev);
1589
1590 adapter->update_uc_list = true;
1591 return 0;
1592}
1593
1594static int be_mc_list_update(struct net_device *netdev,
1595 const unsigned char *addr)
1596{
1597 struct be_adapter *adapter = netdev_priv(netdev);
1598
1599 adapter->update_mc_list = true;
1600 return 0;
1601}
1602
1603static void be_set_mc_list(struct be_adapter *adapter)
1604{
1605 struct net_device *netdev = adapter->netdev;
b7172414 1606 struct netdev_hw_addr *ha;
92fbb1df
SB
1607 bool mc_promisc = false;
1608 int status;
1609
b7172414 1610 netif_addr_lock_bh(netdev);
92fbb1df
SB
1611 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1612
1613 if (netdev->flags & IFF_PROMISC) {
1614 adapter->update_mc_list = false;
1615 } else if (netdev->flags & IFF_ALLMULTI ||
1616 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1617 /* Enable multicast promisc if num configured exceeds
1618 * what we support
1619 */
1620 mc_promisc = true;
1621 adapter->update_mc_list = false;
1622 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1623 /* Update mc-list unconditionally if the iface was previously
1624 * in mc-promisc mode and now is out of that mode.
1625 */
1626 adapter->update_mc_list = true;
1627 }
1628
b7172414
SP
1629 if (adapter->update_mc_list) {
1630 int i = 0;
1631
1632 /* cache the mc-list in adapter */
1633 netdev_for_each_mc_addr(ha, netdev) {
1634 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1635 i++;
1636 }
1637 adapter->mc_count = netdev_mc_count(netdev);
1638 }
1639 netif_addr_unlock_bh(netdev);
1640
92fbb1df 1641 if (mc_promisc) {
f66b7cfd 1642 be_set_mc_promisc(adapter);
92fbb1df
SB
1643 } else if (adapter->update_mc_list) {
1644 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1645 if (!status)
1646 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1647 else
1648 be_set_mc_promisc(adapter);
1649
1650 adapter->update_mc_list = false;
1651 }
1652}
1653
1654static void be_clear_mc_list(struct be_adapter *adapter)
1655{
1656 struct net_device *netdev = adapter->netdev;
1657
1658 __dev_mc_unsync(netdev, NULL);
1659 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
b7172414 1660 adapter->mc_count = 0;
f66b7cfd
SP
1661}
1662
988d44b1
SR
1663static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1664{
1665 if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
c27ebf58 1666 adapter->dev_mac)) {
988d44b1
SR
1667 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1668 return 0;
1669 }
1670
1671 return be_cmd_pmac_add(adapter,
1672 (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1673 adapter->if_handle,
1674 &adapter->pmac_id[uc_idx + 1], 0);
1675}
1676
1677static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1678{
1679 if (pmac_id == adapter->pmac_id[0])
1680 return;
1681
1682 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1683}
1684
f66b7cfd
SP
1685static void be_set_uc_list(struct be_adapter *adapter)
1686{
92fbb1df 1687 struct net_device *netdev = adapter->netdev;
f66b7cfd 1688 struct netdev_hw_addr *ha;
92fbb1df 1689 bool uc_promisc = false;
b7172414 1690 int curr_uc_macs = 0, i;
f66b7cfd 1691
b7172414 1692 netif_addr_lock_bh(netdev);
92fbb1df 1693 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
f66b7cfd 1694
92fbb1df
SB
1695 if (netdev->flags & IFF_PROMISC) {
1696 adapter->update_uc_list = false;
1697 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1698 uc_promisc = true;
1699 adapter->update_uc_list = false;
1700 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1701 /* Update uc-list unconditionally if the iface was previously
1702 * in uc-promisc mode and now is out of that mode.
1703 */
1704 adapter->update_uc_list = true;
6b7c5b94
SP
1705 }
1706
b7172414
SP
1707 if (adapter->update_uc_list) {
1708 i = 1; /* First slot is claimed by the Primary MAC */
1709
1710 /* cache the uc-list in adapter array */
1711 netdev_for_each_uc_addr(ha, netdev) {
1712 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1713 i++;
1714 }
1715 curr_uc_macs = netdev_uc_count(netdev);
1716 }
1717 netif_addr_unlock_bh(netdev);
1718
92fbb1df
SB
1719 if (uc_promisc) {
1720 be_set_uc_promisc(adapter);
1721 } else if (adapter->update_uc_list) {
1722 be_clear_uc_promisc(adapter);
1723
b7172414 1724 for (i = 0; i < adapter->uc_macs; i++)
988d44b1 1725 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
92fbb1df 1726
b7172414 1727 for (i = 0; i < curr_uc_macs; i++)
988d44b1 1728 be_uc_mac_add(adapter, i);
b7172414 1729 adapter->uc_macs = curr_uc_macs;
92fbb1df 1730 adapter->update_uc_list = false;
f66b7cfd
SP
1731 }
1732}
6b7c5b94 1733
f66b7cfd
SP
1734static void be_clear_uc_list(struct be_adapter *adapter)
1735{
92fbb1df 1736 struct net_device *netdev = adapter->netdev;
f66b7cfd 1737 int i;
fbc13f01 1738
92fbb1df 1739 __dev_uc_unsync(netdev, NULL);
b7172414 1740 for (i = 0; i < adapter->uc_macs; i++)
988d44b1
SR
1741 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1742
f66b7cfd
SP
1743 adapter->uc_macs = 0;
1744}
fbc13f01 1745
b7172414 1746static void __be_set_rx_mode(struct be_adapter *adapter)
f66b7cfd 1747{
b7172414
SP
1748 struct net_device *netdev = adapter->netdev;
1749
1750 mutex_lock(&adapter->rx_filter_lock);
fbc13f01 1751
f66b7cfd 1752 if (netdev->flags & IFF_PROMISC) {
92fbb1df
SB
1753 if (!be_in_all_promisc(adapter))
1754 be_set_all_promisc(adapter);
1755 } else if (be_in_all_promisc(adapter)) {
1756 /* We need to re-program the vlan-list or clear
1757 * vlan-promisc mode (if needed) when the interface
1758 * comes out of promisc mode.
1759 */
1760 be_vid_config(adapter);
f66b7cfd 1761 }
a0794885 1762
92fbb1df 1763 be_set_uc_list(adapter);
f66b7cfd 1764 be_set_mc_list(adapter);
b7172414
SP
1765
1766 mutex_unlock(&adapter->rx_filter_lock);
1767}
1768
1769static void be_work_set_rx_mode(struct work_struct *work)
1770{
1771 struct be_cmd_work *cmd_work =
1772 container_of(work, struct be_cmd_work, work);
1773
1774 __be_set_rx_mode(cmd_work->adapter);
1775 kfree(cmd_work);
6b7c5b94
SP
1776}
1777
ba343c77
SB
1778static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1779{
1780 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1781 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1782 int status;
1783
11ac75ed 1784 if (!sriov_enabled(adapter))
ba343c77
SB
1785 return -EPERM;
1786
11ac75ed 1787 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1788 return -EINVAL;
1789
3c31aaf3
VV
1790 /* Proceed further only if user provided MAC is different
1791 * from active MAC
1792 */
1793 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1794 return 0;
1795
3175d8c2
SP
1796 if (BEx_chip(adapter)) {
1797 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1798 vf + 1);
ba343c77 1799
11ac75ed
SP
1800 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1801 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1802 } else {
1803 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1804 vf + 1);
590c391d
PR
1805 }
1806
abccf23e
KA
1807 if (status) {
1808 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1809 mac, vf, status);
1810 return be_cmd_status(status);
1811 }
64600ea5 1812
abccf23e
KA
1813 ether_addr_copy(vf_cfg->mac_addr, mac);
1814
1815 return 0;
ba343c77
SB
1816}
1817
64600ea5 1818static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1819 struct ifla_vf_info *vi)
64600ea5
AK
1820{
1821 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1822 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1823
11ac75ed 1824 if (!sriov_enabled(adapter))
64600ea5
AK
1825 return -EPERM;
1826
11ac75ed 1827 if (vf >= adapter->num_vfs)
64600ea5
AK
1828 return -EINVAL;
1829
1830 vi->vf = vf;
ed616689
SC
1831 vi->max_tx_rate = vf_cfg->tx_rate;
1832 vi->min_tx_rate = 0;
a60b3a13
AK
1833 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1834 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1835 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1836 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1837 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1838
1839 return 0;
1840}
1841
435452aa
VV
1842static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1843{
1844 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1845 u16 vids[BE_NUM_VLANS_SUPPORTED];
1846 int vf_if_id = vf_cfg->if_handle;
1847 int status;
1848
1849 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1850 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1851 if (status)
1852 return status;
1853
1854 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1855 vids[0] = 0;
1856 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1857 if (!status)
1858 dev_info(&adapter->pdev->dev,
1859 "Cleared guest VLANs on VF%d", vf);
1860
1861 /* After TVT is enabled, disallow VFs to program VLAN filters */
1862 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1863 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1864 ~BE_PRIV_FILTMGMT, vf + 1);
1865 if (!status)
1866 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1867 }
1868 return 0;
1869}
1870
1871static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1872{
1873 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1874 struct device *dev = &adapter->pdev->dev;
1875 int status;
1876
1877 /* Reset Transparent VLAN Tagging. */
1878 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1879 vf_cfg->if_handle, 0, 0);
435452aa
VV
1880 if (status)
1881 return status;
1882
1883 /* Allow VFs to program VLAN filtering */
1884 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1885 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1886 BE_PRIV_FILTMGMT, vf + 1);
1887 if (!status) {
1888 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1889 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1890 }
1891 }
1892
1893 dev_info(dev,
1894 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1895 return 0;
1896}
1897
748b539a 1898static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1899{
1900 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1901 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1902 int status;
1da87b7f 1903
11ac75ed 1904 if (!sriov_enabled(adapter))
1da87b7f
AK
1905 return -EPERM;
1906
b9fc0e53 1907 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1908 return -EINVAL;
1909
b9fc0e53
AK
1910 if (vlan || qos) {
1911 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1912 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1913 } else {
435452aa 1914 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1915 }
1916
abccf23e
KA
1917 if (status) {
1918 dev_err(&adapter->pdev->dev,
435452aa
VV
1919 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1920 status);
abccf23e
KA
1921 return be_cmd_status(status);
1922 }
1923
1924 vf_cfg->vlan_tag = vlan;
abccf23e 1925 return 0;
1da87b7f
AK
1926}
1927
ed616689
SC
1928static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1929 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1930{
1931 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1932 struct device *dev = &adapter->pdev->dev;
1933 int percent_rate, status = 0;
1934 u16 link_speed = 0;
1935 u8 link_status;
e1d18735 1936
11ac75ed 1937 if (!sriov_enabled(adapter))
e1d18735
AK
1938 return -EPERM;
1939
94f434c2 1940 if (vf >= adapter->num_vfs)
e1d18735
AK
1941 return -EINVAL;
1942
ed616689
SC
1943 if (min_tx_rate)
1944 return -EINVAL;
1945
0f77ba73
RN
1946 if (!max_tx_rate)
1947 goto config_qos;
1948
1949 status = be_cmd_link_status_query(adapter, &link_speed,
1950 &link_status, 0);
1951 if (status)
1952 goto err;
1953
1954 if (!link_status) {
1955 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1956 status = -ENETDOWN;
0f77ba73
RN
1957 goto err;
1958 }
1959
1960 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1961 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1962 link_speed);
1963 status = -EINVAL;
1964 goto err;
1965 }
1966
1967 /* On Skyhawk the QOS setting must be done only as a % value */
1968 percent_rate = link_speed / 100;
1969 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1970 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1971 percent_rate);
1972 status = -EINVAL;
1973 goto err;
94f434c2 1974 }
e1d18735 1975
0f77ba73
RN
1976config_qos:
1977 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1978 if (status)
0f77ba73
RN
1979 goto err;
1980
1981 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1982 return 0;
1983
1984err:
1985 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1986 max_tx_rate, vf);
abccf23e 1987 return be_cmd_status(status);
e1d18735 1988}
e2fb1afa 1989
bdce2ad7
SR
1990static int be_set_vf_link_state(struct net_device *netdev, int vf,
1991 int link_state)
1992{
1993 struct be_adapter *adapter = netdev_priv(netdev);
1994 int status;
1995
1996 if (!sriov_enabled(adapter))
1997 return -EPERM;
1998
1999 if (vf >= adapter->num_vfs)
2000 return -EINVAL;
2001
2002 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
2003 if (status) {
2004 dev_err(&adapter->pdev->dev,
2005 "Link state change on VF %d failed: %#x\n", vf, status);
2006 return be_cmd_status(status);
2007 }
bdce2ad7 2008
abccf23e
KA
2009 adapter->vf_cfg[vf].plink_tracking = link_state;
2010
2011 return 0;
bdce2ad7 2012}
e1d18735 2013
e7bcbd7b
KA
2014static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2015{
2016 struct be_adapter *adapter = netdev_priv(netdev);
2017 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2018 u8 spoofchk;
2019 int status;
2020
2021 if (!sriov_enabled(adapter))
2022 return -EPERM;
2023
2024 if (vf >= adapter->num_vfs)
2025 return -EINVAL;
2026
2027 if (BEx_chip(adapter))
2028 return -EOPNOTSUPP;
2029
2030 if (enable == vf_cfg->spoofchk)
2031 return 0;
2032
2033 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2034
2035 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2036 0, spoofchk);
2037 if (status) {
2038 dev_err(&adapter->pdev->dev,
2039 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2040 return be_cmd_status(status);
2041 }
2042
2043 vf_cfg->spoofchk = enable;
2044 return 0;
2045}
2046
2632bafd
SP
2047static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2048 ulong now)
6b7c5b94 2049{
2632bafd
SP
2050 aic->rx_pkts_prev = rx_pkts;
2051 aic->tx_reqs_prev = tx_pkts;
2052 aic->jiffies = now;
2053}
ac124ff9 2054
20947770 2055static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 2056{
20947770
PR
2057 struct be_adapter *adapter = eqo->adapter;
2058 int eqd, start;
2632bafd 2059 struct be_aic_obj *aic;
2632bafd
SP
2060 struct be_rx_obj *rxo;
2061 struct be_tx_obj *txo;
20947770 2062 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
2063 ulong now;
2064 u32 pps, delta;
20947770 2065 int i;
10ef9ab4 2066
20947770
PR
2067 aic = &adapter->aic_obj[eqo->idx];
2068 if (!aic->enable) {
2069 if (aic->jiffies)
2070 aic->jiffies = 0;
2071 eqd = aic->et_eqd;
2072 return eqd;
2073 }
6b7c5b94 2074
20947770 2075 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 2076 do {
57a7744e 2077 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 2078 rx_pkts += rxo->stats.rx_pkts;
57a7744e 2079 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 2080 }
10ef9ab4 2081
20947770 2082 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 2083 do {
57a7744e 2084 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 2085 tx_pkts += txo->stats.tx_reqs;
57a7744e 2086 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 2087 }
6b7c5b94 2088
20947770
PR
2089 /* Skip, if wrapped around or first calculation */
2090 now = jiffies;
2091 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2092 rx_pkts < aic->rx_pkts_prev ||
2093 tx_pkts < aic->tx_reqs_prev) {
2094 be_aic_update(aic, rx_pkts, tx_pkts, now);
2095 return aic->prev_eqd;
2096 }
2632bafd 2097
20947770
PR
2098 delta = jiffies_to_msecs(now - aic->jiffies);
2099 if (delta == 0)
2100 return aic->prev_eqd;
10ef9ab4 2101
20947770
PR
2102 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2103 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2104 eqd = (pps / 15000) << 2;
2632bafd 2105
20947770
PR
2106 if (eqd < 8)
2107 eqd = 0;
2108 eqd = min_t(u32, eqd, aic->max_eqd);
2109 eqd = max_t(u32, eqd, aic->min_eqd);
2110
2111 be_aic_update(aic, rx_pkts, tx_pkts, now);
2112
2113 return eqd;
2114}
2115
2116/* For Skyhawk-R only */
2117static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2118{
2119 struct be_adapter *adapter = eqo->adapter;
2120 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2121 ulong now = jiffies;
2122 int eqd;
2123 u32 mult_enc;
2124
2125 if (!aic->enable)
2126 return 0;
2127
3c0d49aa 2128 if (jiffies_to_msecs(now - aic->jiffies) < 1)
20947770
PR
2129 eqd = aic->prev_eqd;
2130 else
2131 eqd = be_get_new_eqd(eqo);
2132
2133 if (eqd > 100)
2134 mult_enc = R2I_DLY_ENC_1;
2135 else if (eqd > 60)
2136 mult_enc = R2I_DLY_ENC_2;
2137 else if (eqd > 20)
2138 mult_enc = R2I_DLY_ENC_3;
2139 else
2140 mult_enc = R2I_DLY_ENC_0;
2141
2142 aic->prev_eqd = eqd;
2143
2144 return mult_enc;
2145}
2146
2147void be_eqd_update(struct be_adapter *adapter, bool force_update)
2148{
2149 struct be_set_eqd set_eqd[MAX_EVT_QS];
2150 struct be_aic_obj *aic;
2151 struct be_eq_obj *eqo;
2152 int i, num = 0, eqd;
2153
2154 for_all_evt_queues(adapter, eqo, i) {
2155 aic = &adapter->aic_obj[eqo->idx];
2156 eqd = be_get_new_eqd(eqo);
2157 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
2158 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2159 set_eqd[num].eq_id = eqo->q.id;
2160 aic->prev_eqd = eqd;
2161 num++;
2162 }
ac124ff9 2163 }
2632bafd
SP
2164
2165 if (num)
2166 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
2167}
2168
3abcdeda 2169static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 2170 struct be_rx_compl_info *rxcp)
4097f663 2171{
ac124ff9 2172 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 2173
ab1594e9 2174 u64_stats_update_begin(&stats->sync);
3abcdeda 2175 stats->rx_compl++;
2e588f84 2176 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 2177 stats->rx_pkts++;
8670f2a5
SB
2178 if (rxcp->tunneled)
2179 stats->rx_vxlan_offload_pkts++;
2e588f84 2180 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 2181 stats->rx_mcast_pkts++;
2e588f84 2182 if (rxcp->err)
ac124ff9 2183 stats->rx_compl_err++;
ab1594e9 2184 u64_stats_update_end(&stats->sync);
4097f663
SP
2185}
2186
2e588f84 2187static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 2188{
19fad86f 2189 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
2190 * Also ignore ipcksm for ipv6 pkts
2191 */
2e588f84 2192 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 2193 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
2194}
2195
0b0ef1d0 2196static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 2197{
10ef9ab4 2198 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2199 struct be_rx_page_info *rx_page_info;
3abcdeda 2200 struct be_queue_info *rxq = &rxo->q;
b0fd2eb2 2201 u32 frag_idx = rxq->tail;
6b7c5b94 2202
3abcdeda 2203 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
2204 BUG_ON(!rx_page_info->page);
2205
e50287be 2206 if (rx_page_info->last_frag) {
2b7bcebf
IV
2207 dma_unmap_page(&adapter->pdev->dev,
2208 dma_unmap_addr(rx_page_info, bus),
2209 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2210 rx_page_info->last_frag = false;
2211 } else {
2212 dma_sync_single_for_cpu(&adapter->pdev->dev,
2213 dma_unmap_addr(rx_page_info, bus),
2214 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2215 }
6b7c5b94 2216
0b0ef1d0 2217 queue_tail_inc(rxq);
6b7c5b94
SP
2218 atomic_dec(&rxq->used);
2219 return rx_page_info;
2220}
2221
2222/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2223static void be_rx_compl_discard(struct be_rx_obj *rxo,
2224 struct be_rx_compl_info *rxcp)
6b7c5b94 2225{
6b7c5b94 2226 struct be_rx_page_info *page_info;
2e588f84 2227 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2228
e80d9da6 2229 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2230 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2231 put_page(page_info->page);
2232 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2233 }
2234}
2235
2236/*
2237 * skb_fill_rx_data forms a complete skb for an ether frame
2238 * indicated by rxcp.
2239 */
10ef9ab4
SP
2240static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2241 struct be_rx_compl_info *rxcp)
6b7c5b94 2242{
6b7c5b94 2243 struct be_rx_page_info *page_info;
2e588f84
SP
2244 u16 i, j;
2245 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2246 u8 *start;
6b7c5b94 2247
0b0ef1d0 2248 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2249 start = page_address(page_info->page) + page_info->page_offset;
2250 prefetch(start);
2251
2252 /* Copy data in the first descriptor of this completion */
2e588f84 2253 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2254
6b7c5b94
SP
2255 skb->len = curr_frag_len;
2256 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2257 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2258 /* Complete packet has now been moved to data */
2259 put_page(page_info->page);
2260 skb->data_len = 0;
2261 skb->tail += curr_frag_len;
2262 } else {
ac1ae5f3
ED
2263 hdr_len = ETH_HLEN;
2264 memcpy(skb->data, start, hdr_len);
6b7c5b94 2265 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2266 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2267 skb_shinfo(skb)->frags[0].page_offset =
2268 page_info->page_offset + hdr_len;
748b539a
SP
2269 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2270 curr_frag_len - hdr_len);
6b7c5b94 2271 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2272 skb->truesize += rx_frag_size;
6b7c5b94
SP
2273 skb->tail += hdr_len;
2274 }
205859a2 2275 page_info->page = NULL;
6b7c5b94 2276
2e588f84
SP
2277 if (rxcp->pkt_size <= rx_frag_size) {
2278 BUG_ON(rxcp->num_rcvd != 1);
2279 return;
6b7c5b94
SP
2280 }
2281
2282 /* More frags present for this completion */
2e588f84
SP
2283 remaining = rxcp->pkt_size - curr_frag_len;
2284 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2285 page_info = get_rx_page_info(rxo);
2e588f84 2286 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2287
bd46cb6c
AK
2288 /* Coalesce all frags from the same physical page in one slot */
2289 if (page_info->page_offset == 0) {
2290 /* Fresh page */
2291 j++;
b061b39e 2292 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2293 skb_shinfo(skb)->frags[j].page_offset =
2294 page_info->page_offset;
9e903e08 2295 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2296 skb_shinfo(skb)->nr_frags++;
2297 } else {
2298 put_page(page_info->page);
2299 }
2300
9e903e08 2301 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2302 skb->len += curr_frag_len;
2303 skb->data_len += curr_frag_len;
bdb28a97 2304 skb->truesize += rx_frag_size;
2e588f84 2305 remaining -= curr_frag_len;
205859a2 2306 page_info->page = NULL;
6b7c5b94 2307 }
bd46cb6c 2308 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2309}
2310
5be93b9a 2311/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2312static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2313 struct be_rx_compl_info *rxcp)
6b7c5b94 2314{
10ef9ab4 2315 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2316 struct net_device *netdev = adapter->netdev;
6b7c5b94 2317 struct sk_buff *skb;
89420424 2318
bb349bb4 2319 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2320 if (unlikely(!skb)) {
ac124ff9 2321 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2322 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2323 return;
2324 }
2325
10ef9ab4 2326 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2327
6332c8d3 2328 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2329 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2330 else
2331 skb_checksum_none_assert(skb);
6b7c5b94 2332
6332c8d3 2333 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2334 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2335 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2336 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2337
b6c0e89d 2338 skb->csum_level = rxcp->tunneled;
6384a4d0 2339 skb_mark_napi_id(skb, napi);
6b7c5b94 2340
343e43c0 2341 if (rxcp->vlanf)
86a9bad3 2342 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2343
2344 netif_receive_skb(skb);
6b7c5b94
SP
2345}
2346
5be93b9a 2347/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2348static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2349 struct napi_struct *napi,
2350 struct be_rx_compl_info *rxcp)
6b7c5b94 2351{
10ef9ab4 2352 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2353 struct be_rx_page_info *page_info;
5be93b9a 2354 struct sk_buff *skb = NULL;
2e588f84
SP
2355 u16 remaining, curr_frag_len;
2356 u16 i, j;
3968fa1e 2357
10ef9ab4 2358 skb = napi_get_frags(napi);
5be93b9a 2359 if (!skb) {
10ef9ab4 2360 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2361 return;
2362 }
2363
2e588f84
SP
2364 remaining = rxcp->pkt_size;
2365 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2366 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2367
2368 curr_frag_len = min(remaining, rx_frag_size);
2369
bd46cb6c
AK
2370 /* Coalesce all frags from the same physical page in one slot */
2371 if (i == 0 || page_info->page_offset == 0) {
2372 /* First frag or Fresh page */
2373 j++;
b061b39e 2374 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2375 skb_shinfo(skb)->frags[j].page_offset =
2376 page_info->page_offset;
9e903e08 2377 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2378 } else {
2379 put_page(page_info->page);
2380 }
9e903e08 2381 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2382 skb->truesize += rx_frag_size;
bd46cb6c 2383 remaining -= curr_frag_len;
6b7c5b94
SP
2384 memset(page_info, 0, sizeof(*page_info));
2385 }
bd46cb6c 2386 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2387
5be93b9a 2388 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2389 skb->len = rxcp->pkt_size;
2390 skb->data_len = rxcp->pkt_size;
5be93b9a 2391 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2392 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2393 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2394 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2395
b6c0e89d 2396 skb->csum_level = rxcp->tunneled;
5be93b9a 2397
343e43c0 2398 if (rxcp->vlanf)
86a9bad3 2399 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2400
10ef9ab4 2401 napi_gro_frags(napi);
2e588f84
SP
2402}
2403
10ef9ab4
SP
2404static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2405 struct be_rx_compl_info *rxcp)
2e588f84 2406{
c3c18bc1
SP
2407 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2408 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2409 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2410 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2411 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2412 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2413 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2414 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2415 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2416 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2417 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2418 if (rxcp->vlanf) {
c3c18bc1
SP
2419 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2420 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2421 }
c3c18bc1 2422 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2423 rxcp->tunneled =
c3c18bc1 2424 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2425}
2426
10ef9ab4
SP
2427static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2428 struct be_rx_compl_info *rxcp)
2e588f84 2429{
c3c18bc1
SP
2430 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2431 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2432 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2433 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2434 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2435 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2436 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2437 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2438 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2439 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2440 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2441 if (rxcp->vlanf) {
c3c18bc1
SP
2442 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2443 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2444 }
c3c18bc1
SP
2445 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2446 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2447}
2448
2449static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2450{
2451 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2452 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2453 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2454
2e588f84
SP
2455 /* For checking the valid bit it is Ok to use either definition as the
2456 * valid bit is at the same position in both v0 and v1 Rx compl */
2457 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2458 return NULL;
6b7c5b94 2459
2e588f84
SP
2460 rmb();
2461 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2462
2e588f84 2463 if (adapter->be3_native)
10ef9ab4 2464 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2465 else
10ef9ab4 2466 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2467
e38b1706
SK
2468 if (rxcp->ip_frag)
2469 rxcp->l4_csum = 0;
2470
15d72184 2471 if (rxcp->vlanf) {
f93f160b
VV
2472 /* In QNQ modes, if qnq bit is not set, then the packet was
2473 * tagged only with the transparent outer vlan-tag and must
2474 * not be treated as a vlan packet by host
2475 */
2476 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2477 rxcp->vlanf = 0;
6b7c5b94 2478
15d72184 2479 if (!lancer_chip(adapter))
3c709f8f 2480 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2481
939cf306 2482 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2483 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2484 rxcp->vlanf = 0;
2485 }
2e588f84
SP
2486
2487 /* As the compl has been parsed, reset it; we wont touch it again */
2488 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2489
3abcdeda 2490 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2491 return rxcp;
2492}
2493
1829b086 2494static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2495{
6b7c5b94 2496 u32 order = get_order(size);
1829b086 2497
6b7c5b94 2498 if (order > 0)
1829b086
ED
2499 gfp |= __GFP_COMP;
2500 return alloc_pages(gfp, order);
6b7c5b94
SP
2501}
2502
2503/*
2504 * Allocate a page, split it to fragments of size rx_frag_size and post as
2505 * receive buffers to BE
2506 */
c30d7266 2507static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2508{
3abcdeda 2509 struct be_adapter *adapter = rxo->adapter;
26d92f92 2510 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2511 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2512 struct page *pagep = NULL;
ba42fad0 2513 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2514 struct be_eth_rx_d *rxd;
2515 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2516 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2517
3abcdeda 2518 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2519 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2520 if (!pagep) {
1829b086 2521 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2522 if (unlikely(!pagep)) {
ac124ff9 2523 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2524 break;
2525 }
ba42fad0
IV
2526 page_dmaaddr = dma_map_page(dev, pagep, 0,
2527 adapter->big_page_size,
2b7bcebf 2528 DMA_FROM_DEVICE);
ba42fad0
IV
2529 if (dma_mapping_error(dev, page_dmaaddr)) {
2530 put_page(pagep);
2531 pagep = NULL;
d3de1540 2532 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2533 break;
2534 }
e50287be 2535 page_offset = 0;
6b7c5b94
SP
2536 } else {
2537 get_page(pagep);
e50287be 2538 page_offset += rx_frag_size;
6b7c5b94 2539 }
e50287be 2540 page_info->page_offset = page_offset;
6b7c5b94 2541 page_info->page = pagep;
6b7c5b94
SP
2542
2543 rxd = queue_head_node(rxq);
e50287be 2544 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2545 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2546 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2547
2548 /* Any space left in the current big page for another frag? */
2549 if ((page_offset + rx_frag_size + rx_frag_size) >
2550 adapter->big_page_size) {
2551 pagep = NULL;
e50287be
SP
2552 page_info->last_frag = true;
2553 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2554 } else {
2555 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2556 }
26d92f92
SP
2557
2558 prev_page_info = page_info;
2559 queue_head_inc(rxq);
10ef9ab4 2560 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2561 }
e50287be
SP
2562
2563 /* Mark the last frag of a page when we break out of the above loop
2564 * with no more slots available in the RXQ
2565 */
2566 if (pagep) {
2567 prev_page_info->last_frag = true;
2568 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2569 }
6b7c5b94
SP
2570
2571 if (posted) {
6b7c5b94 2572 atomic_add(posted, &rxq->used);
6384a4d0
SP
2573 if (rxo->rx_post_starved)
2574 rxo->rx_post_starved = false;
c30d7266 2575 do {
69304cc9 2576 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2577 be_rxq_notify(adapter, rxq->id, notify);
2578 posted -= notify;
2579 } while (posted);
ea1dae11
SP
2580 } else if (atomic_read(&rxq->used) == 0) {
2581 /* Let be_worker replenish when memory is available */
3abcdeda 2582 rxo->rx_post_starved = true;
6b7c5b94 2583 }
6b7c5b94
SP
2584}
2585
152ffe5b 2586static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2587{
152ffe5b
SB
2588 struct be_queue_info *tx_cq = &txo->cq;
2589 struct be_tx_compl_info *txcp = &txo->txcp;
2590 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2591
152ffe5b 2592 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2593 return NULL;
2594
152ffe5b 2595 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2596 rmb();
152ffe5b 2597 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2598
152ffe5b
SB
2599 txcp->status = GET_TX_COMPL_BITS(status, compl);
2600 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2601
152ffe5b 2602 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2603 queue_tail_inc(tx_cq);
2604 return txcp;
2605}
2606
3c8def97 2607static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2608 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2609{
5f07b3c5 2610 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2611 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2612 struct sk_buff *skb = NULL;
2613 bool unmap_skb_hdr = false;
a73b796e 2614 struct be_eth_wrb *wrb;
b0fd2eb2 2615 u16 num_wrbs = 0;
2616 u32 frag_index;
6b7c5b94 2617
ec43b1a6 2618 do {
5f07b3c5
SP
2619 if (sent_skbs[txq->tail]) {
2620 /* Free skb from prev req */
2621 if (skb)
2622 dev_consume_skb_any(skb);
2623 skb = sent_skbs[txq->tail];
2624 sent_skbs[txq->tail] = NULL;
2625 queue_tail_inc(txq); /* skip hdr wrb */
2626 num_wrbs++;
2627 unmap_skb_hdr = true;
2628 }
a73b796e 2629 wrb = queue_tail_node(txq);
5f07b3c5 2630 frag_index = txq->tail;
2b7bcebf 2631 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2632 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2633 unmap_skb_hdr = false;
6b7c5b94 2634 queue_tail_inc(txq);
5f07b3c5
SP
2635 num_wrbs++;
2636 } while (frag_index != last_index);
2637 dev_consume_skb_any(skb);
6b7c5b94 2638
4d586b82 2639 return num_wrbs;
6b7c5b94
SP
2640}
2641
10ef9ab4
SP
2642/* Return the number of events in the event queue */
2643static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2644{
10ef9ab4
SP
2645 struct be_eq_entry *eqe;
2646 int num = 0;
859b1e4e 2647
10ef9ab4
SP
2648 do {
2649 eqe = queue_tail_node(&eqo->q);
2650 if (eqe->evt == 0)
2651 break;
859b1e4e 2652
10ef9ab4
SP
2653 rmb();
2654 eqe->evt = 0;
2655 num++;
2656 queue_tail_inc(&eqo->q);
2657 } while (true);
2658
2659 return num;
859b1e4e
SP
2660}
2661
10ef9ab4
SP
2662/* Leaves the EQ is disarmed state */
2663static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2664{
10ef9ab4 2665 int num = events_get(eqo);
859b1e4e 2666
20947770 2667 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2668}
2669
99b44304
KA
2670/* Free posted rx buffers that were not used */
2671static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2672{
3abcdeda 2673 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2674 struct be_rx_page_info *page_info;
2675
2676 while (atomic_read(&rxq->used) > 0) {
2677 page_info = get_rx_page_info(rxo);
2678 put_page(page_info->page);
2679 memset(page_info, 0, sizeof(*page_info));
2680 }
2681 BUG_ON(atomic_read(&rxq->used));
2682 rxq->tail = 0;
2683 rxq->head = 0;
2684}
2685
2686static void be_rx_cq_clean(struct be_rx_obj *rxo)
2687{
3abcdeda 2688 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2689 struct be_rx_compl_info *rxcp;
d23e946c
SP
2690 struct be_adapter *adapter = rxo->adapter;
2691 int flush_wait = 0;
6b7c5b94 2692
d23e946c
SP
2693 /* Consume pending rx completions.
2694 * Wait for the flush completion (identified by zero num_rcvd)
2695 * to arrive. Notify CQ even when there are no more CQ entries
2696 * for HW to flush partially coalesced CQ entries.
2697 * In Lancer, there is no need to wait for flush compl.
2698 */
2699 for (;;) {
2700 rxcp = be_rx_compl_get(rxo);
ddf1169f 2701 if (!rxcp) {
d23e946c
SP
2702 if (lancer_chip(adapter))
2703 break;
2704
954f6825
VD
2705 if (flush_wait++ > 50 ||
2706 be_check_error(adapter,
2707 BE_ERROR_HW)) {
d23e946c
SP
2708 dev_warn(&adapter->pdev->dev,
2709 "did not receive flush compl\n");
2710 break;
2711 }
2712 be_cq_notify(adapter, rx_cq->id, true, 0);
2713 mdelay(1);
2714 } else {
2715 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2716 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2717 if (rxcp->num_rcvd == 0)
2718 break;
2719 }
6b7c5b94
SP
2720 }
2721
d23e946c
SP
2722 /* After cleanup, leave the CQ in unarmed state */
2723 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2724}
2725
0ae57bb3 2726static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2727{
5f07b3c5 2728 struct device *dev = &adapter->pdev->dev;
b0fd2eb2 2729 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
152ffe5b 2730 struct be_tx_compl_info *txcp;
0ae57bb3 2731 struct be_queue_info *txq;
b0fd2eb2 2732 u32 end_idx, notified_idx;
152ffe5b 2733 struct be_tx_obj *txo;
0ae57bb3 2734 int i, pending_txqs;
a8e9179a 2735
1a3d0717 2736 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2737 do {
0ae57bb3
SP
2738 pending_txqs = adapter->num_tx_qs;
2739
2740 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2741 cmpl = 0;
2742 num_wrbs = 0;
0ae57bb3 2743 txq = &txo->q;
152ffe5b
SB
2744 while ((txcp = be_tx_compl_get(txo))) {
2745 num_wrbs +=
2746 be_tx_compl_process(adapter, txo,
2747 txcp->end_index);
0ae57bb3
SP
2748 cmpl++;
2749 }
2750 if (cmpl) {
2751 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2752 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2753 timeo = 0;
0ae57bb3 2754 }
cf5671e6 2755 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2756 pending_txqs--;
a8e9179a
SP
2757 }
2758
954f6825
VD
2759 if (pending_txqs == 0 || ++timeo > 10 ||
2760 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2761 break;
2762
2763 mdelay(1);
2764 } while (true);
2765
5f07b3c5 2766 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2767 for_all_tx_queues(adapter, txo, i) {
2768 txq = &txo->q;
0ae57bb3 2769
5f07b3c5
SP
2770 if (atomic_read(&txq->used)) {
2771 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2772 i, atomic_read(&txq->used));
2773 notified_idx = txq->tail;
0ae57bb3 2774 end_idx = txq->tail;
5f07b3c5
SP
2775 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2776 txq->len);
2777 /* Use the tx-compl process logic to handle requests
2778 * that were not sent to the HW.
2779 */
0ae57bb3
SP
2780 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2781 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2782 BUG_ON(atomic_read(&txq->used));
2783 txo->pend_wrb_cnt = 0;
2784 /* Since hw was never notified of these requests,
2785 * reset TXQ indices
2786 */
2787 txq->head = notified_idx;
2788 txq->tail = notified_idx;
0ae57bb3 2789 }
b03388d6 2790 }
6b7c5b94
SP
2791}
2792
10ef9ab4
SP
2793static void be_evt_queues_destroy(struct be_adapter *adapter)
2794{
2795 struct be_eq_obj *eqo;
2796 int i;
2797
2798 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2799 if (eqo->q.created) {
2800 be_eq_clean(eqo);
10ef9ab4 2801 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2802 napi_hash_del(&eqo->napi);
68d7bdcb 2803 netif_napi_del(&eqo->napi);
649886a3 2804 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2805 }
10ef9ab4
SP
2806 be_queue_free(adapter, &eqo->q);
2807 }
2808}
2809
2810static int be_evt_queues_create(struct be_adapter *adapter)
2811{
2812 struct be_queue_info *eq;
2813 struct be_eq_obj *eqo;
2632bafd 2814 struct be_aic_obj *aic;
10ef9ab4
SP
2815 int i, rc;
2816
e261768e 2817 /* need enough EQs to service both RX and TX queues */
92bf14ab 2818 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
e261768e
SP
2819 max(adapter->cfg_num_rx_irqs,
2820 adapter->cfg_num_tx_irqs));
10ef9ab4
SP
2821
2822 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2823 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2824
2632bafd 2825 aic = &adapter->aic_obj[i];
10ef9ab4 2826 eqo->adapter = adapter;
10ef9ab4 2827 eqo->idx = i;
2632bafd
SP
2828 aic->max_eqd = BE_MAX_EQD;
2829 aic->enable = true;
10ef9ab4
SP
2830
2831 eq = &eqo->q;
2832 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2833 sizeof(struct be_eq_entry));
10ef9ab4
SP
2834 if (rc)
2835 return rc;
2836
f2f781a7 2837 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2838 if (rc)
2839 return rc;
649886a3
KA
2840
2841 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2842 return -ENOMEM;
2843 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2844 eqo->affinity_mask);
2845 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2846 BE_NAPI_WEIGHT);
10ef9ab4 2847 }
1cfafab9 2848 return 0;
10ef9ab4
SP
2849}
2850
5fb379ee
SP
2851static void be_mcc_queues_destroy(struct be_adapter *adapter)
2852{
2853 struct be_queue_info *q;
5fb379ee 2854
8788fdc2 2855 q = &adapter->mcc_obj.q;
5fb379ee 2856 if (q->created)
8788fdc2 2857 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2858 be_queue_free(adapter, q);
2859
8788fdc2 2860 q = &adapter->mcc_obj.cq;
5fb379ee 2861 if (q->created)
8788fdc2 2862 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2863 be_queue_free(adapter, q);
2864}
2865
2866/* Must be called only after TX qs are created as MCC shares TX EQ */
2867static int be_mcc_queues_create(struct be_adapter *adapter)
2868{
2869 struct be_queue_info *q, *cq;
5fb379ee 2870
8788fdc2 2871 cq = &adapter->mcc_obj.cq;
5fb379ee 2872 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2873 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2874 goto err;
2875
10ef9ab4
SP
2876 /* Use the default EQ for MCC completions */
2877 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2878 goto mcc_cq_free;
2879
8788fdc2 2880 q = &adapter->mcc_obj.q;
5fb379ee
SP
2881 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2882 goto mcc_cq_destroy;
2883
8788fdc2 2884 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2885 goto mcc_q_free;
2886
2887 return 0;
2888
2889mcc_q_free:
2890 be_queue_free(adapter, q);
2891mcc_cq_destroy:
8788fdc2 2892 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2893mcc_cq_free:
2894 be_queue_free(adapter, cq);
2895err:
2896 return -1;
2897}
2898
6b7c5b94
SP
2899static void be_tx_queues_destroy(struct be_adapter *adapter)
2900{
2901 struct be_queue_info *q;
3c8def97
SP
2902 struct be_tx_obj *txo;
2903 u8 i;
6b7c5b94 2904
3c8def97
SP
2905 for_all_tx_queues(adapter, txo, i) {
2906 q = &txo->q;
2907 if (q->created)
2908 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2909 be_queue_free(adapter, q);
6b7c5b94 2910
3c8def97
SP
2911 q = &txo->cq;
2912 if (q->created)
2913 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2914 be_queue_free(adapter, q);
2915 }
6b7c5b94
SP
2916}
2917
7707133c 2918static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2919{
73f394e6 2920 struct be_queue_info *cq;
3c8def97 2921 struct be_tx_obj *txo;
73f394e6 2922 struct be_eq_obj *eqo;
92bf14ab 2923 int status, i;
6b7c5b94 2924
e261768e 2925 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
dafc0fe3 2926
10ef9ab4
SP
2927 for_all_tx_queues(adapter, txo, i) {
2928 cq = &txo->cq;
2929 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2930 sizeof(struct be_eth_tx_compl));
2931 if (status)
2932 return status;
3c8def97 2933
827da44c
JS
2934 u64_stats_init(&txo->stats.sync);
2935 u64_stats_init(&txo->stats.sync_compl);
2936
10ef9ab4
SP
2937 /* If num_evt_qs is less than num_tx_qs, then more than
2938 * one txq share an eq
2939 */
73f394e6
SP
2940 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2941 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2942 if (status)
2943 return status;
6b7c5b94 2944
10ef9ab4
SP
2945 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2946 sizeof(struct be_eth_wrb));
2947 if (status)
2948 return status;
6b7c5b94 2949
94d73aaa 2950 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2951 if (status)
2952 return status;
73f394e6
SP
2953
2954 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2955 eqo->idx);
3c8def97 2956 }
6b7c5b94 2957
d379142b
SP
2958 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2959 adapter->num_tx_qs);
10ef9ab4 2960 return 0;
6b7c5b94
SP
2961}
2962
10ef9ab4 2963static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2964{
2965 struct be_queue_info *q;
3abcdeda
SP
2966 struct be_rx_obj *rxo;
2967 int i;
2968
2969 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2970 q = &rxo->cq;
2971 if (q->created)
2972 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2973 be_queue_free(adapter, q);
ac6a0c4a
SP
2974 }
2975}
2976
10ef9ab4 2977static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2978{
10ef9ab4 2979 struct be_queue_info *eq, *cq;
3abcdeda
SP
2980 struct be_rx_obj *rxo;
2981 int rc, i;
6b7c5b94 2982
e261768e
SP
2983 adapter->num_rss_qs =
2984 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
92bf14ab 2985
71bb8bd0 2986 /* We'll use RSS only if atleast 2 RSS rings are supported. */
e261768e 2987 if (adapter->num_rss_qs < 2)
71bb8bd0
VV
2988 adapter->num_rss_qs = 0;
2989
2990 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2991
2992 /* When the interface is not capable of RSS rings (and there is no
2993 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2994 */
71bb8bd0
VV
2995 if (adapter->num_rx_qs == 0)
2996 adapter->num_rx_qs = 1;
92bf14ab 2997
6b7c5b94 2998 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2999 for_all_rx_queues(adapter, rxo, i) {
3000 rxo->adapter = adapter;
3abcdeda
SP
3001 cq = &rxo->cq;
3002 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 3003 sizeof(struct be_eth_rx_compl));
3abcdeda 3004 if (rc)
10ef9ab4 3005 return rc;
3abcdeda 3006
827da44c 3007 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
3008 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3009 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 3010 if (rc)
10ef9ab4 3011 return rc;
3abcdeda 3012 }
6b7c5b94 3013
d379142b 3014 dev_info(&adapter->pdev->dev,
71bb8bd0 3015 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 3016 return 0;
b628bde2
SP
3017}
3018
6b7c5b94
SP
3019static irqreturn_t be_intx(int irq, void *dev)
3020{
e49cc34f
SP
3021 struct be_eq_obj *eqo = dev;
3022 struct be_adapter *adapter = eqo->adapter;
3023 int num_evts = 0;
6b7c5b94 3024
d0b9cec3
SP
3025 /* IRQ is not expected when NAPI is scheduled as the EQ
3026 * will not be armed.
3027 * But, this can happen on Lancer INTx where it takes
3028 * a while to de-assert INTx or in BE2 where occasionaly
3029 * an interrupt may be raised even when EQ is unarmed.
3030 * If NAPI is already scheduled, then counting & notifying
3031 * events will orphan them.
e49cc34f 3032 */
d0b9cec3 3033 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 3034 num_evts = events_get(eqo);
d0b9cec3
SP
3035 __napi_schedule(&eqo->napi);
3036 if (num_evts)
3037 eqo->spurious_intr = 0;
3038 }
20947770 3039 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 3040
d0b9cec3
SP
3041 /* Return IRQ_HANDLED only for the the first spurious intr
3042 * after a valid intr to stop the kernel from branding
3043 * this irq as a bad one!
e49cc34f 3044 */
d0b9cec3
SP
3045 if (num_evts || eqo->spurious_intr++ == 0)
3046 return IRQ_HANDLED;
3047 else
3048 return IRQ_NONE;
6b7c5b94
SP
3049}
3050
10ef9ab4 3051static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 3052{
10ef9ab4 3053 struct be_eq_obj *eqo = dev;
6b7c5b94 3054
20947770 3055 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 3056 napi_schedule(&eqo->napi);
6b7c5b94
SP
3057 return IRQ_HANDLED;
3058}
3059
2e588f84 3060static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 3061{
e38b1706 3062 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
3063}
3064
10ef9ab4 3065static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 3066 int budget, int polling)
6b7c5b94 3067{
3abcdeda
SP
3068 struct be_adapter *adapter = rxo->adapter;
3069 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 3070 struct be_rx_compl_info *rxcp;
6b7c5b94 3071 u32 work_done;
c30d7266 3072 u32 frags_consumed = 0;
6b7c5b94
SP
3073
3074 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 3075 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
3076 if (!rxcp)
3077 break;
3078
12004ae9
SP
3079 /* Is it a flush compl that has no data */
3080 if (unlikely(rxcp->num_rcvd == 0))
3081 goto loop_continue;
3082
3083 /* Discard compl with partial DMA Lancer B0 */
3084 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 3085 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
3086 goto loop_continue;
3087 }
3088
3089 /* On BE drop pkts that arrive due to imperfect filtering in
3090 * promiscuous mode on some skews
3091 */
3092 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 3093 !lancer_chip(adapter))) {
10ef9ab4 3094 be_rx_compl_discard(rxo, rxcp);
12004ae9 3095 goto loop_continue;
64642811 3096 }
009dd872 3097
6384a4d0
SP
3098 /* Don't do gro when we're busy_polling */
3099 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 3100 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 3101 else
6384a4d0
SP
3102 be_rx_compl_process(rxo, napi, rxcp);
3103
12004ae9 3104loop_continue:
c30d7266 3105 frags_consumed += rxcp->num_rcvd;
2e588f84 3106 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
3107 }
3108
10ef9ab4
SP
3109 if (work_done) {
3110 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 3111
6384a4d0
SP
3112 /* When an rx-obj gets into post_starved state, just
3113 * let be_worker do the posting.
3114 */
3115 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3116 !rxo->rx_post_starved)
c30d7266
AK
3117 be_post_rx_frags(rxo, GFP_ATOMIC,
3118 max_t(u32, MAX_RX_POST,
3119 frags_consumed));
6b7c5b94 3120 }
10ef9ab4 3121
6b7c5b94
SP
3122 return work_done;
3123}
3124
152ffe5b 3125static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
3126{
3127 switch (status) {
3128 case BE_TX_COMP_HDR_PARSE_ERR:
3129 tx_stats(txo)->tx_hdr_parse_err++;
3130 break;
3131 case BE_TX_COMP_NDMA_ERR:
3132 tx_stats(txo)->tx_dma_err++;
3133 break;
3134 case BE_TX_COMP_ACL_ERR:
3135 tx_stats(txo)->tx_spoof_check_err++;
3136 break;
3137 }
3138}
3139
152ffe5b 3140static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
3141{
3142 switch (status) {
3143 case LANCER_TX_COMP_LSO_ERR:
3144 tx_stats(txo)->tx_tso_err++;
3145 break;
3146 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3147 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3148 tx_stats(txo)->tx_spoof_check_err++;
3149 break;
3150 case LANCER_TX_COMP_QINQ_ERR:
3151 tx_stats(txo)->tx_qinq_err++;
3152 break;
3153 case LANCER_TX_COMP_PARITY_ERR:
3154 tx_stats(txo)->tx_internal_parity_err++;
3155 break;
3156 case LANCER_TX_COMP_DMA_ERR:
3157 tx_stats(txo)->tx_dma_err++;
3158 break;
3159 }
3160}
3161
c8f64615
SP
3162static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3163 int idx)
6b7c5b94 3164{
c8f64615 3165 int num_wrbs = 0, work_done = 0;
152ffe5b 3166 struct be_tx_compl_info *txcp;
c8f64615 3167
152ffe5b
SB
3168 while ((txcp = be_tx_compl_get(txo))) {
3169 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 3170 work_done++;
3c8def97 3171
152ffe5b 3172 if (txcp->status) {
512bb8a2 3173 if (lancer_chip(adapter))
152ffe5b 3174 lancer_update_tx_err(txo, txcp->status);
512bb8a2 3175 else
152ffe5b 3176 be_update_tx_err(txo, txcp->status);
512bb8a2 3177 }
10ef9ab4 3178 }
6b7c5b94 3179
10ef9ab4
SP
3180 if (work_done) {
3181 be_cq_notify(adapter, txo->cq.id, true, work_done);
3182 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 3183
10ef9ab4
SP
3184 /* As Tx wrbs have been freed up, wake up netdev queue
3185 * if it was stopped due to lack of tx wrbs. */
3186 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 3187 be_can_txq_wake(txo)) {
10ef9ab4 3188 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 3189 }
10ef9ab4
SP
3190
3191 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3192 tx_stats(txo)->tx_compl += work_done;
3193 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 3194 }
10ef9ab4 3195}
6b7c5b94 3196
f7062ee5
SP
3197#ifdef CONFIG_NET_RX_BUSY_POLL
3198static inline bool be_lock_napi(struct be_eq_obj *eqo)
3199{
3200 bool status = true;
3201
3202 spin_lock(&eqo->lock); /* BH is already disabled */
3203 if (eqo->state & BE_EQ_LOCKED) {
3204 WARN_ON(eqo->state & BE_EQ_NAPI);
3205 eqo->state |= BE_EQ_NAPI_YIELD;
3206 status = false;
3207 } else {
3208 eqo->state = BE_EQ_NAPI;
3209 }
3210 spin_unlock(&eqo->lock);
3211 return status;
3212}
3213
3214static inline void be_unlock_napi(struct be_eq_obj *eqo)
3215{
3216 spin_lock(&eqo->lock); /* BH is already disabled */
3217
3218 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3219 eqo->state = BE_EQ_IDLE;
3220
3221 spin_unlock(&eqo->lock);
3222}
3223
3224static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3225{
3226 bool status = true;
3227
3228 spin_lock_bh(&eqo->lock);
3229 if (eqo->state & BE_EQ_LOCKED) {
3230 eqo->state |= BE_EQ_POLL_YIELD;
3231 status = false;
3232 } else {
3233 eqo->state |= BE_EQ_POLL;
3234 }
3235 spin_unlock_bh(&eqo->lock);
3236 return status;
3237}
3238
3239static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3240{
3241 spin_lock_bh(&eqo->lock);
3242
3243 WARN_ON(eqo->state & (BE_EQ_NAPI));
3244 eqo->state = BE_EQ_IDLE;
3245
3246 spin_unlock_bh(&eqo->lock);
3247}
3248
3249static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3250{
3251 spin_lock_init(&eqo->lock);
3252 eqo->state = BE_EQ_IDLE;
3253}
3254
3255static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3256{
3257 local_bh_disable();
3258
3259 /* It's enough to just acquire napi lock on the eqo to stop
3260 * be_busy_poll() from processing any queueus.
3261 */
3262 while (!be_lock_napi(eqo))
3263 mdelay(1);
3264
3265 local_bh_enable();
3266}
3267
3268#else /* CONFIG_NET_RX_BUSY_POLL */
3269
3270static inline bool be_lock_napi(struct be_eq_obj *eqo)
3271{
3272 return true;
3273}
3274
3275static inline void be_unlock_napi(struct be_eq_obj *eqo)
3276{
3277}
3278
3279static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3280{
3281 return false;
3282}
3283
3284static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3285{
3286}
3287
3288static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3289{
3290}
3291
3292static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3293{
3294}
3295#endif /* CONFIG_NET_RX_BUSY_POLL */
3296
68d7bdcb 3297int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3298{
3299 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3300 struct be_adapter *adapter = eqo->adapter;
0b545a62 3301 int max_work = 0, work, i, num_evts;
6384a4d0 3302 struct be_rx_obj *rxo;
a4906ea0 3303 struct be_tx_obj *txo;
20947770 3304 u32 mult_enc = 0;
f31e50a8 3305
0b545a62
SP
3306 num_evts = events_get(eqo);
3307
a4906ea0
SP
3308 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3309 be_process_tx(adapter, txo, i);
f31e50a8 3310
6384a4d0
SP
3311 if (be_lock_napi(eqo)) {
3312 /* This loop will iterate twice for EQ0 in which
3313 * completions of the last RXQ (default one) are also processed
3314 * For other EQs the loop iterates only once
3315 */
3316 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3317 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3318 max_work = max(work, max_work);
3319 }
3320 be_unlock_napi(eqo);
3321 } else {
3322 max_work = budget;
10ef9ab4 3323 }
6b7c5b94 3324
10ef9ab4
SP
3325 if (is_mcc_eqo(eqo))
3326 be_process_mcc(adapter);
93c86700 3327
10ef9ab4
SP
3328 if (max_work < budget) {
3329 napi_complete(napi);
20947770
PR
3330
3331 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3332 * delay via a delay multiplier encoding value
3333 */
3334 if (skyhawk_chip(adapter))
3335 mult_enc = be_get_eq_delay_mult_enc(eqo);
3336
3337 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3338 mult_enc);
10ef9ab4
SP
3339 } else {
3340 /* As we'll continue in polling mode, count and clear events */
20947770 3341 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3342 }
10ef9ab4 3343 return max_work;
6b7c5b94
SP
3344}
3345
6384a4d0
SP
3346#ifdef CONFIG_NET_RX_BUSY_POLL
3347static int be_busy_poll(struct napi_struct *napi)
3348{
3349 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3350 struct be_adapter *adapter = eqo->adapter;
3351 struct be_rx_obj *rxo;
3352 int i, work = 0;
3353
3354 if (!be_lock_busy_poll(eqo))
3355 return LL_FLUSH_BUSY;
3356
3357 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3358 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3359 if (work)
3360 break;
3361 }
3362
3363 be_unlock_busy_poll(eqo);
3364 return work;
3365}
3366#endif
3367
f67ef7ba 3368void be_detect_error(struct be_adapter *adapter)
7c185276 3369{
e1cfb67a
PR
3370 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3371 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3372 u32 i;
eb0eecc1 3373 struct device *dev = &adapter->pdev->dev;
7c185276 3374
954f6825 3375 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3376 return;
3377
e1cfb67a
PR
3378 if (lancer_chip(adapter)) {
3379 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3380 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3381 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3382 sliport_err1 = ioread32(adapter->db +
748b539a 3383 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3384 sliport_err2 = ioread32(adapter->db +
748b539a 3385 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3386 /* Do not log error messages if its a FW reset */
3387 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3388 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3389 dev_info(dev, "Firmware update in progress\n");
3390 } else {
eb0eecc1
SK
3391 dev_err(dev, "Error detected in the card\n");
3392 dev_err(dev, "ERR: sliport status 0x%x\n",
3393 sliport_status);
3394 dev_err(dev, "ERR: sliport error1 0x%x\n",
3395 sliport_err1);
3396 dev_err(dev, "ERR: sliport error2 0x%x\n",
3397 sliport_err2);
3398 }
e1cfb67a
PR
3399 }
3400 } else {
25848c90
SR
3401 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3402 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3403 ue_lo_mask = ioread32(adapter->pcicfg +
3404 PCICFG_UE_STATUS_LOW_MASK);
3405 ue_hi_mask = ioread32(adapter->pcicfg +
3406 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3407
f67ef7ba
PR
3408 ue_lo = (ue_lo & ~ue_lo_mask);
3409 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3410
eb0eecc1
SK
3411 /* On certain platforms BE hardware can indicate spurious UEs.
3412 * Allow HW to stop working completely in case of a real UE.
3413 * Hence not setting the hw_error for UE detection.
3414 */
f67ef7ba 3415
eb0eecc1 3416 if (ue_lo || ue_hi) {
710f3e59 3417 dev_err(dev, "Error detected in the adapter");
eb0eecc1 3418 if (skyhawk_chip(adapter))
954f6825
VD
3419 be_set_error(adapter, BE_ERROR_UE);
3420
eb0eecc1
SK
3421 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3422 if (ue_lo & 1)
3423 dev_err(dev, "UE: %s bit set\n",
3424 ue_status_low_desc[i]);
3425 }
3426 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3427 if (ue_hi & 1)
3428 dev_err(dev, "UE: %s bit set\n",
3429 ue_status_hi_desc[i]);
3430 }
7c185276
AK
3431 }
3432 }
7c185276
AK
3433}
3434
8d56ff11
SP
3435static void be_msix_disable(struct be_adapter *adapter)
3436{
ac6a0c4a 3437 if (msix_enabled(adapter)) {
8d56ff11 3438 pci_disable_msix(adapter->pdev);
ac6a0c4a 3439 adapter->num_msix_vec = 0;
68d7bdcb 3440 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3441 }
3442}
3443
c2bba3df 3444static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3445{
6fde0e63 3446 unsigned int i, max_roce_eqs;
d379142b 3447 struct device *dev = &adapter->pdev->dev;
6fde0e63 3448 int num_vec;
6b7c5b94 3449
ce7faf0a
SP
3450 /* If RoCE is supported, program the max number of vectors that
3451 * could be used for NIC and RoCE, else, just program the number
3452 * we'll use initially.
92bf14ab 3453 */
e261768e
SP
3454 if (be_roce_supported(adapter)) {
3455 max_roce_eqs =
3456 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3457 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3458 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3459 } else {
3460 num_vec = max(adapter->cfg_num_rx_irqs,
3461 adapter->cfg_num_tx_irqs);
3462 }
3abcdeda 3463
ac6a0c4a 3464 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3465 adapter->msix_entries[i].entry = i;
3466
7dc4c064
AG
3467 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3468 MIN_MSIX_VECTORS, num_vec);
3469 if (num_vec < 0)
3470 goto fail;
92bf14ab 3471
92bf14ab
SP
3472 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3473 adapter->num_msix_roce_vec = num_vec / 2;
3474 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3475 adapter->num_msix_roce_vec);
3476 }
3477
3478 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3479
3480 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3481 adapter->num_msix_vec);
c2bba3df 3482 return 0;
7dc4c064
AG
3483
3484fail:
3485 dev_warn(dev, "MSIx enable failed\n");
3486
3487 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3488 if (be_virtfn(adapter))
7dc4c064
AG
3489 return num_vec;
3490 return 0;
6b7c5b94
SP
3491}
3492
fe6d2a38 3493static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3494 struct be_eq_obj *eqo)
b628bde2 3495{
f2f781a7 3496 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3497}
6b7c5b94 3498
b628bde2
SP
3499static int be_msix_register(struct be_adapter *adapter)
3500{
10ef9ab4
SP
3501 struct net_device *netdev = adapter->netdev;
3502 struct be_eq_obj *eqo;
3503 int status, i, vec;
6b7c5b94 3504
10ef9ab4
SP
3505 for_all_evt_queues(adapter, eqo, i) {
3506 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3507 vec = be_msix_vec_get(adapter, eqo);
3508 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3509 if (status)
3510 goto err_msix;
d658d98a
PR
3511
3512 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3513 }
b628bde2 3514
6b7c5b94 3515 return 0;
3abcdeda 3516err_msix:
6e3cd5fa
VD
3517 for (i--; i >= 0; i--) {
3518 eqo = &adapter->eq_obj[i];
10ef9ab4 3519 free_irq(be_msix_vec_get(adapter, eqo), eqo);
6e3cd5fa 3520 }
10ef9ab4 3521 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3522 status);
ac6a0c4a 3523 be_msix_disable(adapter);
6b7c5b94
SP
3524 return status;
3525}
3526
3527static int be_irq_register(struct be_adapter *adapter)
3528{
3529 struct net_device *netdev = adapter->netdev;
3530 int status;
3531
ac6a0c4a 3532 if (msix_enabled(adapter)) {
6b7c5b94
SP
3533 status = be_msix_register(adapter);
3534 if (status == 0)
3535 goto done;
ba343c77 3536 /* INTx is not supported for VF */
18c57c74 3537 if (be_virtfn(adapter))
ba343c77 3538 return status;
6b7c5b94
SP
3539 }
3540
e49cc34f 3541 /* INTx: only the first EQ is used */
6b7c5b94
SP
3542 netdev->irq = adapter->pdev->irq;
3543 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3544 &adapter->eq_obj[0]);
6b7c5b94
SP
3545 if (status) {
3546 dev_err(&adapter->pdev->dev,
3547 "INTx request IRQ failed - err %d\n", status);
3548 return status;
3549 }
3550done:
3551 adapter->isr_registered = true;
3552 return 0;
3553}
3554
3555static void be_irq_unregister(struct be_adapter *adapter)
3556{
3557 struct net_device *netdev = adapter->netdev;
10ef9ab4 3558 struct be_eq_obj *eqo;
d658d98a 3559 int i, vec;
6b7c5b94
SP
3560
3561 if (!adapter->isr_registered)
3562 return;
3563
3564 /* INTx */
ac6a0c4a 3565 if (!msix_enabled(adapter)) {
e49cc34f 3566 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3567 goto done;
3568 }
3569
3570 /* MSIx */
d658d98a
PR
3571 for_all_evt_queues(adapter, eqo, i) {
3572 vec = be_msix_vec_get(adapter, eqo);
3573 irq_set_affinity_hint(vec, NULL);
3574 free_irq(vec, eqo);
3575 }
3abcdeda 3576
6b7c5b94
SP
3577done:
3578 adapter->isr_registered = false;
6b7c5b94
SP
3579}
3580
10ef9ab4 3581static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79 3582{
62219066 3583 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
3584 struct be_queue_info *q;
3585 struct be_rx_obj *rxo;
3586 int i;
3587
3588 for_all_rx_queues(adapter, rxo, i) {
3589 q = &rxo->q;
3590 if (q->created) {
99b44304
KA
3591 /* If RXQs are destroyed while in an "out of buffer"
3592 * state, there is a possibility of an HW stall on
3593 * Lancer. So, post 64 buffers to each queue to relieve
3594 * the "out of buffer" condition.
3595 * Make sure there's space in the RXQ before posting.
3596 */
3597 if (lancer_chip(adapter)) {
3598 be_rx_cq_clean(rxo);
3599 if (atomic_read(&q->used) == 0)
3600 be_post_rx_frags(rxo, GFP_KERNEL,
3601 MAX_RX_POST);
3602 }
3603
482c9e79 3604 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3605 be_rx_cq_clean(rxo);
99b44304 3606 be_rxq_clean(rxo);
482c9e79 3607 }
10ef9ab4 3608 be_queue_free(adapter, q);
482c9e79 3609 }
62219066
AK
3610
3611 if (rss->rss_flags) {
3612 rss->rss_flags = RSS_ENABLE_NONE;
3613 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3614 128, rss->rss_hkey);
3615 }
482c9e79
SP
3616}
3617
bcc84140
KA
3618static void be_disable_if_filters(struct be_adapter *adapter)
3619{
988d44b1 3620 be_dev_mac_del(adapter, adapter->pmac_id[0]);
bcc84140 3621 be_clear_uc_list(adapter);
92fbb1df 3622 be_clear_mc_list(adapter);
bcc84140
KA
3623
3624 /* The IFACE flags are enabled in the open path and cleared
3625 * in the close path. When a VF gets detached from the host and
3626 * assigned to a VM the following happens:
3627 * - VF's IFACE flags get cleared in the detach path
3628 * - IFACE create is issued by the VF in the attach path
3629 * Due to a bug in the BE3/Skyhawk-R FW
3630 * (Lancer FW doesn't have the bug), the IFACE capability flags
3631 * specified along with the IFACE create cmd issued by a VF are not
3632 * honoured by FW. As a consequence, if a *new* driver
3633 * (that enables/disables IFACE flags in open/close)
3634 * is loaded in the host and an *old* driver is * used by a VM/VF,
3635 * the IFACE gets created *without* the needed flags.
3636 * To avoid this, disable RX-filter flags only for Lancer.
3637 */
3638 if (lancer_chip(adapter)) {
3639 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3640 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3641 }
3642}
3643
889cd4b2
SP
3644static int be_close(struct net_device *netdev)
3645{
3646 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3647 struct be_eq_obj *eqo;
3648 int i;
889cd4b2 3649
e1ad8e33
KA
3650 /* This protection is needed as be_close() may be called even when the
3651 * adapter is in cleared state (after eeh perm failure)
3652 */
3653 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3654 return 0;
3655
b7172414
SP
3656 /* Before attempting cleanup ensure all the pending cmds in the
3657 * config_wq have finished execution
3658 */
3659 flush_workqueue(be_wq);
3660
bcc84140
KA
3661 be_disable_if_filters(adapter);
3662
dff345c5
IV
3663 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3664 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3665 napi_disable(&eqo->napi);
6384a4d0
SP
3666 be_disable_busy_poll(eqo);
3667 }
71237b6f 3668 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3669 }
a323d9bf
SP
3670
3671 be_async_mcc_disable(adapter);
3672
3673 /* Wait for all pending tx completions to arrive so that
3674 * all tx skbs are freed.
3675 */
fba87559 3676 netif_tx_disable(netdev);
6e1f9975 3677 be_tx_compl_clean(adapter);
a323d9bf
SP
3678
3679 be_rx_qs_destroy(adapter);
d11a347d 3680
a323d9bf 3681 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3682 if (msix_enabled(adapter))
3683 synchronize_irq(be_msix_vec_get(adapter, eqo));
3684 else
3685 synchronize_irq(netdev->irq);
3686 be_eq_clean(eqo);
63fcb27f
PR
3687 }
3688
889cd4b2
SP
3689 be_irq_unregister(adapter);
3690
482c9e79
SP
3691 return 0;
3692}
3693
10ef9ab4 3694static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3695{
1dcf7b1c
ED
3696 struct rss_info *rss = &adapter->rss_info;
3697 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3698 struct be_rx_obj *rxo;
e9008ee9 3699 int rc, i, j;
482c9e79
SP
3700
3701 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3702 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3703 sizeof(struct be_eth_rx_d));
3704 if (rc)
3705 return rc;
3706 }
3707
71bb8bd0
VV
3708 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3709 rxo = default_rxo(adapter);
3710 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3711 rx_frag_size, adapter->if_handle,
3712 false, &rxo->rss_id);
3713 if (rc)
3714 return rc;
3715 }
10ef9ab4
SP
3716
3717 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3718 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3719 rx_frag_size, adapter->if_handle,
3720 true, &rxo->rss_id);
482c9e79
SP
3721 if (rc)
3722 return rc;
3723 }
3724
3725 if (be_multi_rxq(adapter)) {
71bb8bd0 3726 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3727 for_all_rss_queues(adapter, rxo, i) {
e2557877 3728 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3729 break;
e2557877
VD
3730 rss->rsstable[j + i] = rxo->rss_id;
3731 rss->rss_queue[j + i] = i;
e9008ee9
PR
3732 }
3733 }
e2557877
VD
3734 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3735 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3736
3737 if (!BEx_chip(adapter))
e2557877
VD
3738 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3739 RSS_ENABLE_UDP_IPV6;
62219066
AK
3740
3741 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3742 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3743 RSS_INDIR_TABLE_LEN, rss_key);
3744 if (rc) {
3745 rss->rss_flags = RSS_ENABLE_NONE;
3746 return rc;
3747 }
3748
3749 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
da1388d6
VV
3750 } else {
3751 /* Disable RSS, if only default RX Q is created */
e2557877 3752 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3753 }
594ad54a 3754
e2557877 3755
b02e60c8
SR
3756 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3757 * which is a queue empty condition
3758 */
10ef9ab4 3759 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3760 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3761
889cd4b2
SP
3762 return 0;
3763}
3764
bcc84140
KA
3765static int be_enable_if_filters(struct be_adapter *adapter)
3766{
3767 int status;
3768
c1bb0a55 3769 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
bcc84140
KA
3770 if (status)
3771 return status;
3772
3773 /* For BE3 VFs, the PF programs the initial MAC address */
3774 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
988d44b1 3775 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
bcc84140
KA
3776 if (status)
3777 return status;
c27ebf58 3778 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
bcc84140
KA
3779 }
3780
3781 if (adapter->vlans_added)
3782 be_vid_config(adapter);
3783
b7172414 3784 __be_set_rx_mode(adapter);
bcc84140
KA
3785
3786 return 0;
3787}
3788
6b7c5b94
SP
3789static int be_open(struct net_device *netdev)
3790{
3791 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3792 struct be_eq_obj *eqo;
3abcdeda 3793 struct be_rx_obj *rxo;
10ef9ab4 3794 struct be_tx_obj *txo;
b236916a 3795 u8 link_status;
3abcdeda 3796 int status, i;
5fb379ee 3797
10ef9ab4 3798 status = be_rx_qs_create(adapter);
482c9e79
SP
3799 if (status)
3800 goto err;
3801
bcc84140
KA
3802 status = be_enable_if_filters(adapter);
3803 if (status)
3804 goto err;
3805
c2bba3df
SK
3806 status = be_irq_register(adapter);
3807 if (status)
3808 goto err;
5fb379ee 3809
10ef9ab4 3810 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3811 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3812
10ef9ab4
SP
3813 for_all_tx_queues(adapter, txo, i)
3814 be_cq_notify(adapter, txo->cq.id, true, 0);
3815
7a1e9b20
SP
3816 be_async_mcc_enable(adapter);
3817
10ef9ab4
SP
3818 for_all_evt_queues(adapter, eqo, i) {
3819 napi_enable(&eqo->napi);
6384a4d0 3820 be_enable_busy_poll(eqo);
20947770 3821 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3822 }
04d3d624 3823 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3824
323ff71e 3825 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3826 if (!status)
3827 be_link_status_update(adapter, link_status);
3828
fba87559 3829 netif_tx_start_all_queues(netdev);
c9c47142 3830 if (skyhawk_chip(adapter))
bde6b7cd 3831 udp_tunnel_get_rx_info(netdev);
c5abe7c0 3832
889cd4b2
SP
3833 return 0;
3834err:
3835 be_close(adapter->netdev);
3836 return -EIO;
5fb379ee
SP
3837}
3838
f7062ee5
SP
3839static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3840{
3841 u32 addr;
3842
3843 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3844
3845 mac[5] = (u8)(addr & 0xFF);
3846 mac[4] = (u8)((addr >> 8) & 0xFF);
3847 mac[3] = (u8)((addr >> 16) & 0xFF);
3848 /* Use the OUI from the current MAC address */
3849 memcpy(mac, adapter->netdev->dev_addr, 3);
3850}
3851
6d87f5c3
AK
3852/*
3853 * Generate a seed MAC address from the PF MAC Address using jhash.
3854 * MAC Address for VFs are assigned incrementally starting from the seed.
3855 * These addresses are programmed in the ASIC by the PF and the VF driver
3856 * queries for the MAC address during its probe.
3857 */
4c876616 3858static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3859{
f9449ab7 3860 u32 vf;
3abcdeda 3861 int status = 0;
6d87f5c3 3862 u8 mac[ETH_ALEN];
11ac75ed 3863 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3864
3865 be_vf_eth_addr_generate(adapter, mac);
3866
11ac75ed 3867 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3868 if (BEx_chip(adapter))
590c391d 3869 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3870 vf_cfg->if_handle,
3871 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3872 else
3873 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3874 vf + 1);
590c391d 3875
6d87f5c3
AK
3876 if (status)
3877 dev_err(&adapter->pdev->dev,
748b539a
SP
3878 "Mac address assignment failed for VF %d\n",
3879 vf);
6d87f5c3 3880 else
11ac75ed 3881 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3882
3883 mac[5] += 1;
3884 }
3885 return status;
3886}
3887
4c876616
SP
3888static int be_vfs_mac_query(struct be_adapter *adapter)
3889{
3890 int status, vf;
3891 u8 mac[ETH_ALEN];
3892 struct be_vf_cfg *vf_cfg;
4c876616
SP
3893
3894 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3895 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3896 mac, vf_cfg->if_handle,
3897 false, vf+1);
4c876616
SP
3898 if (status)
3899 return status;
3900 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3901 }
3902 return 0;
3903}
3904
f9449ab7 3905static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3906{
11ac75ed 3907 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3908 u32 vf;
3909
257a3feb 3910 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3911 dev_warn(&adapter->pdev->dev,
3912 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3913 goto done;
3914 }
3915
b4c1df93
SP
3916 pci_disable_sriov(adapter->pdev);
3917
11ac75ed 3918 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3919 if (BEx_chip(adapter))
11ac75ed
SP
3920 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3921 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3922 else
3923 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3924 vf + 1);
f9449ab7 3925
11ac75ed
SP
3926 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3927 }
884476be
SK
3928
3929 if (BE3_chip(adapter))
3930 be_cmd_set_hsw_config(adapter, 0, 0,
3931 adapter->if_handle,
3932 PORT_FWD_TYPE_PASSTHRU, 0);
39f1d94d
SP
3933done:
3934 kfree(adapter->vf_cfg);
3935 adapter->num_vfs = 0;
f174c7ec 3936 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3937}
3938
7707133c
SP
3939static void be_clear_queues(struct be_adapter *adapter)
3940{
3941 be_mcc_queues_destroy(adapter);
3942 be_rx_cqs_destroy(adapter);
3943 be_tx_queues_destroy(adapter);
3944 be_evt_queues_destroy(adapter);
3945}
3946
68d7bdcb 3947static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3948{
191eb756
SP
3949 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3950 cancel_delayed_work_sync(&adapter->work);
3951 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3952 }
68d7bdcb
SP
3953}
3954
eb7dd46c
SP
3955static void be_cancel_err_detection(struct be_adapter *adapter)
3956{
710f3e59
SB
3957 struct be_error_recovery *err_rec = &adapter->error_recovery;
3958
3959 if (!be_err_recovery_workq)
3960 return;
3961
eb7dd46c 3962 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
710f3e59 3963 cancel_delayed_work_sync(&err_rec->err_detection_work);
eb7dd46c
SP
3964 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3965 }
3966}
3967
c9c47142
SP
3968static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3969{
630f4b70
SB
3970 struct net_device *netdev = adapter->netdev;
3971
c9c47142
SP
3972 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3973 be_cmd_manage_iface(adapter, adapter->if_handle,
3974 OP_CONVERT_TUNNEL_TO_NORMAL);
3975
3976 if (adapter->vxlan_port)
3977 be_cmd_set_vxlan_port(adapter, 0);
3978
3979 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3980 adapter->vxlan_port = 0;
630f4b70
SB
3981
3982 netdev->hw_enc_features = 0;
3983 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3984 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142
SP
3985}
3986
b9263cbf
SR
3987static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3988 struct be_resources *vft_res)
f2858738
VV
3989{
3990 struct be_resources res = adapter->pool_res;
b9263cbf
SR
3991 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3992 struct be_resources res_mod = {0};
f2858738
VV
3993 u16 num_vf_qs = 1;
3994
de2b1e03
SK
3995 /* Distribute the queue resources among the PF and it's VFs */
3996 if (num_vfs) {
3997 /* Divide the rx queues evenly among the VFs and the PF, capped
3998 * at VF-EQ-count. Any remainder queues belong to the PF.
3999 */
ee9ad280
SB
4000 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4001 res.max_rss_qs / (num_vfs + 1));
f2858738 4002
de2b1e03
SK
4003 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4004 * RSS Tables per port. Provide RSS on VFs, only if number of
4005 * VFs requested is less than it's PF Pool's RSS Tables limit.
f2858738 4006 */
de2b1e03 4007 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
f2858738
VV
4008 num_vf_qs = 1;
4009 }
b9263cbf
SR
4010
4011 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4012 * which are modifiable using SET_PROFILE_CONFIG cmd.
4013 */
de2b1e03
SK
4014 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4015 RESOURCE_MODIFIABLE, 0);
b9263cbf
SR
4016
4017 /* If RSS IFACE capability flags are modifiable for a VF, set the
4018 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4019 * more than 1 RSSQ is available for a VF.
4020 * Otherwise, provision only 1 queue pair for VF.
4021 */
4022 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4023 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4024 if (num_vf_qs > 1) {
4025 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4026 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4027 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4028 } else {
4029 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4030 BE_IF_FLAGS_DEFQ_RSS);
4031 }
4032 } else {
4033 num_vf_qs = 1;
4034 }
4035
4036 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4037 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4038 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4039 }
4040
4041 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4042 vft_res->max_rx_qs = num_vf_qs;
4043 vft_res->max_rss_qs = num_vf_qs;
4044 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4045 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4046
4047 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4048 * among the PF and it's VFs, if the fields are changeable
4049 */
4050 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4051 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4052
4053 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4054 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4055
4056 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4057 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4058
4059 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4060 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
f2858738
VV
4061}
4062
b7172414
SP
4063static void be_if_destroy(struct be_adapter *adapter)
4064{
4065 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4066
4067 kfree(adapter->pmac_id);
4068 adapter->pmac_id = NULL;
4069
4070 kfree(adapter->mc_list);
4071 adapter->mc_list = NULL;
4072
4073 kfree(adapter->uc_list);
4074 adapter->uc_list = NULL;
4075}
4076
b05004ad
SK
4077static int be_clear(struct be_adapter *adapter)
4078{
f2858738 4079 struct pci_dev *pdev = adapter->pdev;
b9263cbf 4080 struct be_resources vft_res = {0};
f2858738 4081
68d7bdcb 4082 be_cancel_worker(adapter);
191eb756 4083
b7172414
SP
4084 flush_workqueue(be_wq);
4085
11ac75ed 4086 if (sriov_enabled(adapter))
f9449ab7
SP
4087 be_vf_clear(adapter);
4088
bec84e6b
VV
4089 /* Re-configure FW to distribute resources evenly across max-supported
4090 * number of VFs, only when VFs are not already enabled.
4091 */
ace40aff
VV
4092 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4093 !pci_vfs_assigned(pdev)) {
b9263cbf
SR
4094 be_calculate_vf_res(adapter,
4095 pci_sriov_get_totalvfs(pdev),
4096 &vft_res);
bec84e6b 4097 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738 4098 pci_sriov_get_totalvfs(pdev),
b9263cbf 4099 &vft_res);
f2858738 4100 }
bec84e6b 4101
c9c47142 4102 be_disable_vxlan_offloads(adapter);
fbc13f01 4103
b7172414 4104 be_if_destroy(adapter);
a54769f5 4105
7707133c 4106 be_clear_queues(adapter);
a54769f5 4107
10ef9ab4 4108 be_msix_disable(adapter);
e1ad8e33 4109 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
4110 return 0;
4111}
4112
4c876616 4113static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 4114{
92bf14ab 4115 struct be_resources res = {0};
bcc84140 4116 u32 cap_flags, en_flags, vf;
4c876616 4117 struct be_vf_cfg *vf_cfg;
0700d816 4118 int status;
abb93951 4119
0700d816 4120 /* If a FW profile exists, then cap_flags are updated */
c1bb0a55 4121 cap_flags = BE_VF_IF_EN_FLAGS;
abb93951 4122
4c876616 4123 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab 4124 if (!BE3_chip(adapter)) {
de2b1e03
SK
4125 status = be_cmd_get_profile_config(adapter, &res, NULL,
4126 ACTIVE_PROFILE_TYPE,
f2858738 4127 RESOURCE_LIMITS,
92bf14ab 4128 vf + 1);
435452aa 4129 if (!status) {
92bf14ab 4130 cap_flags = res.if_cap_flags;
435452aa
VV
4131 /* Prevent VFs from enabling VLAN promiscuous
4132 * mode
4133 */
4134 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4135 }
92bf14ab 4136 }
4c876616 4137
c1bb0a55
VD
4138 /* PF should enable IF flags during proxy if_create call */
4139 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
bcc84140
KA
4140 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4141 &vf_cfg->if_handle, vf + 1);
4c876616 4142 if (status)
0700d816 4143 return status;
4c876616 4144 }
0700d816
KA
4145
4146 return 0;
abb93951
PR
4147}
4148
39f1d94d 4149static int be_vf_setup_init(struct be_adapter *adapter)
30128031 4150{
11ac75ed 4151 struct be_vf_cfg *vf_cfg;
30128031
SP
4152 int vf;
4153
39f1d94d
SP
4154 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4155 GFP_KERNEL);
4156 if (!adapter->vf_cfg)
4157 return -ENOMEM;
4158
11ac75ed
SP
4159 for_all_vfs(adapter, vf_cfg, vf) {
4160 vf_cfg->if_handle = -1;
4161 vf_cfg->pmac_id = -1;
30128031 4162 }
39f1d94d 4163 return 0;
30128031
SP
4164}
4165
f9449ab7
SP
4166static int be_vf_setup(struct be_adapter *adapter)
4167{
c502224e 4168 struct device *dev = &adapter->pdev->dev;
11ac75ed 4169 struct be_vf_cfg *vf_cfg;
4c876616 4170 int status, old_vfs, vf;
e7bcbd7b 4171 bool spoofchk;
39f1d94d 4172
257a3feb 4173 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
4174
4175 status = be_vf_setup_init(adapter);
4176 if (status)
4177 goto err;
30128031 4178
4c876616
SP
4179 if (old_vfs) {
4180 for_all_vfs(adapter, vf_cfg, vf) {
4181 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4182 if (status)
4183 goto err;
4184 }
f9449ab7 4185
4c876616
SP
4186 status = be_vfs_mac_query(adapter);
4187 if (status)
4188 goto err;
4189 } else {
bec84e6b
VV
4190 status = be_vfs_if_create(adapter);
4191 if (status)
4192 goto err;
4193
39f1d94d
SP
4194 status = be_vf_eth_addr_config(adapter);
4195 if (status)
4196 goto err;
4197 }
f9449ab7 4198
11ac75ed 4199 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 4200 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
4201 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4202 vf + 1);
4203 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 4204 status = be_cmd_set_fn_privileges(adapter,
435452aa 4205 vf_cfg->privileges |
04a06028
SP
4206 BE_PRIV_FILTMGMT,
4207 vf + 1);
435452aa
VV
4208 if (!status) {
4209 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
4210 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4211 vf);
435452aa 4212 }
04a06028
SP
4213 }
4214
0f77ba73
RN
4215 /* Allow full available bandwidth */
4216 if (!old_vfs)
4217 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 4218
e7bcbd7b
KA
4219 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4220 vf_cfg->if_handle, NULL,
4221 &spoofchk);
4222 if (!status)
4223 vf_cfg->spoofchk = spoofchk;
4224
bdce2ad7 4225 if (!old_vfs) {
0599863d 4226 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
4227 be_cmd_set_logical_link_config(adapter,
4228 IFLA_VF_LINK_STATE_AUTO,
4229 vf+1);
4230 }
f9449ab7 4231 }
b4c1df93
SP
4232
4233 if (!old_vfs) {
4234 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4235 if (status) {
4236 dev_err(dev, "SRIOV enable failed\n");
4237 adapter->num_vfs = 0;
4238 goto err;
4239 }
4240 }
f174c7ec 4241
884476be
SK
4242 if (BE3_chip(adapter)) {
4243 /* On BE3, enable VEB only when SRIOV is enabled */
4244 status = be_cmd_set_hsw_config(adapter, 0, 0,
4245 adapter->if_handle,
4246 PORT_FWD_TYPE_VEB, 0);
4247 if (status)
4248 goto err;
4249 }
4250
f174c7ec 4251 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
4252 return 0;
4253err:
4c876616
SP
4254 dev_err(dev, "VF setup failed\n");
4255 be_vf_clear(adapter);
f9449ab7
SP
4256 return status;
4257}
4258
f93f160b
VV
4259/* Converting function_mode bits on BE3 to SH mc_type enums */
4260
4261static u8 be_convert_mc_type(u32 function_mode)
4262{
66064dbc 4263 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4264 return vNIC1;
66064dbc 4265 else if (function_mode & QNQ_MODE)
f93f160b
VV
4266 return FLEX10;
4267 else if (function_mode & VNIC_MODE)
4268 return vNIC2;
4269 else if (function_mode & UMC_ENABLED)
4270 return UMC;
4271 else
4272 return MC_NONE;
4273}
4274
92bf14ab
SP
4275/* On BE2/BE3 FW does not suggest the supported limits */
4276static void BEx_get_resources(struct be_adapter *adapter,
4277 struct be_resources *res)
4278{
bec84e6b 4279 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4280
4281 if (be_physfn(adapter))
4282 res->max_uc_mac = BE_UC_PMAC_COUNT;
4283 else
4284 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4285
f93f160b
VV
4286 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4287
4288 if (be_is_mc(adapter)) {
4289 /* Assuming that there are 4 channels per port,
4290 * when multi-channel is enabled
4291 */
4292 if (be_is_qnq_mode(adapter))
4293 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4294 else
4295 /* In a non-qnq multichannel mode, the pvid
4296 * takes up one vlan entry
4297 */
4298 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4299 } else {
92bf14ab 4300 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4301 }
4302
92bf14ab
SP
4303 res->max_mcast_mac = BE_MAX_MC;
4304
a5243dab
VV
4305 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4306 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4307 * *only* if it is RSS-capable.
4308 */
4309 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4310 be_virtfn(adapter) ||
4311 (be_is_mc(adapter) &&
4312 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4313 res->max_tx_qs = 1;
a28277dc
SR
4314 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4315 struct be_resources super_nic_res = {0};
4316
4317 /* On a SuperNIC profile, the driver needs to use the
4318 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4319 */
de2b1e03
SK
4320 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4321 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4322 0);
a28277dc
SR
4323 /* Some old versions of BE3 FW don't report max_tx_qs value */
4324 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4325 } else {
92bf14ab 4326 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4327 }
92bf14ab
SP
4328
4329 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4330 !use_sriov && be_physfn(adapter))
4331 res->max_rss_qs = (adapter->be3_native) ?
4332 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4333 res->max_rx_qs = res->max_rss_qs + 1;
4334
e3dc867c 4335 if (be_physfn(adapter))
d3518e21 4336 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4337 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4338 else
4339 res->max_evt_qs = 1;
92bf14ab
SP
4340
4341 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4342 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4343 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4344 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4345}
4346
30128031
SP
4347static void be_setup_init(struct be_adapter *adapter)
4348{
4349 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4350 adapter->phy.link_speed = -1;
30128031
SP
4351 adapter->if_handle = -1;
4352 adapter->be3_native = false;
f66b7cfd 4353 adapter->if_flags = 0;
51d1f98a 4354 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
f25b119c
PR
4355 if (be_physfn(adapter))
4356 adapter->cmd_privileges = MAX_PRIVILEGES;
4357 else
4358 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4359}
4360
de2b1e03
SK
4361/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4362 * However, this HW limitation is not exposed to the host via any SLI cmd.
4363 * As a result, in the case of SRIOV and in particular multi-partition configs
4364 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4365 * for distribution between the VFs. This self-imposed limit will determine the
4366 * no: of VFs for which RSS can be enabled.
4367 */
4368void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4369{
4370 struct be_port_resources port_res = {0};
4371 u8 rss_tables_on_port;
4372 u16 max_vfs = be_max_vfs(adapter);
4373
4374 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4375 RESOURCE_LIMITS, 0);
4376
4377 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4378
4379 /* Each PF Pool's RSS Tables limit =
4380 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4381 */
4382 adapter->pool_res.max_rss_tables =
4383 max_vfs * rss_tables_on_port / port_res.max_vfs;
4384}
4385
bec84e6b
VV
4386static int be_get_sriov_config(struct be_adapter *adapter)
4387{
bec84e6b 4388 struct be_resources res = {0};
d3d18312 4389 int max_vfs, old_vfs;
bec84e6b 4390
de2b1e03
SK
4391 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4392 RESOURCE_LIMITS, 0);
d3d18312 4393
ace40aff 4394 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4395 if (BE3_chip(adapter) && !res.max_vfs) {
4396 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4397 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4398 }
4399
d3d18312 4400 adapter->pool_res = res;
bec84e6b 4401
ace40aff
VV
4402 /* If during previous unload of the driver, the VFs were not disabled,
4403 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4404 * Instead use the TotalVFs value stored in the pci-dev struct.
4405 */
bec84e6b
VV
4406 old_vfs = pci_num_vf(adapter->pdev);
4407 if (old_vfs) {
ace40aff
VV
4408 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4409 old_vfs);
4410
4411 adapter->pool_res.max_vfs =
4412 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4413 adapter->num_vfs = old_vfs;
bec84e6b
VV
4414 }
4415
de2b1e03
SK
4416 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4417 be_calculate_pf_pool_rss_tables(adapter);
4418 dev_info(&adapter->pdev->dev,
4419 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4420 be_max_pf_pool_rss_tables(adapter));
4421 }
bec84e6b
VV
4422 return 0;
4423}
4424
ace40aff
VV
4425static void be_alloc_sriov_res(struct be_adapter *adapter)
4426{
4427 int old_vfs = pci_num_vf(adapter->pdev);
b9263cbf 4428 struct be_resources vft_res = {0};
ace40aff
VV
4429 int status;
4430
4431 be_get_sriov_config(adapter);
4432
4433 if (!old_vfs)
4434 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4435
4436 /* When the HW is in SRIOV capable configuration, the PF-pool
4437 * resources are given to PF during driver load, if there are no
4438 * old VFs. This facility is not available in BE3 FW.
4439 * Also, this is done by FW in Lancer chip.
4440 */
4441 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
b9263cbf 4442 be_calculate_vf_res(adapter, 0, &vft_res);
ace40aff 4443 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
b9263cbf 4444 &vft_res);
ace40aff
VV
4445 if (status)
4446 dev_err(&adapter->pdev->dev,
4447 "Failed to optimize SRIOV resources\n");
4448 }
4449}
4450
92bf14ab 4451static int be_get_resources(struct be_adapter *adapter)
abb93951 4452{
92bf14ab
SP
4453 struct device *dev = &adapter->pdev->dev;
4454 struct be_resources res = {0};
4455 int status;
abb93951 4456
92bf14ab
SP
4457 /* For Lancer, SH etc read per-function resource limits from FW.
4458 * GET_FUNC_CONFIG returns per function guaranteed limits.
4459 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4460 */
ce7faf0a
SP
4461 if (BEx_chip(adapter)) {
4462 BEx_get_resources(adapter, &res);
4463 } else {
92bf14ab
SP
4464 status = be_cmd_get_func_config(adapter, &res);
4465 if (status)
4466 return status;
abb93951 4467
71bb8bd0
VV
4468 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4469 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4470 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4471 res.max_rss_qs -= 1;
abb93951 4472 }
4c876616 4473
ce7faf0a
SP
4474 /* If RoCE is supported stash away half the EQs for RoCE */
4475 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4476 res.max_evt_qs / 2 : res.max_evt_qs;
4477 adapter->res = res;
4478
71bb8bd0
VV
4479 /* If FW supports RSS default queue, then skip creating non-RSS
4480 * queue for non-IP traffic.
4481 */
4482 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4483 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4484
acbafeb1
SP
4485 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4486 be_max_txqs(adapter), be_max_rxqs(adapter),
ce7faf0a 4487 be_max_rss(adapter), be_max_nic_eqs(adapter),
acbafeb1
SP
4488 be_max_vfs(adapter));
4489 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4490 be_max_uc(adapter), be_max_mc(adapter),
4491 be_max_vlans(adapter));
4492
e261768e
SP
4493 /* Ensure RX and TX queues are created in pairs at init time */
4494 adapter->cfg_num_rx_irqs =
4495 min_t(u16, netif_get_num_default_rss_queues(),
4496 be_max_qp_irqs(adapter));
4497 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
92bf14ab 4498 return 0;
abb93951
PR
4499}
4500
39f1d94d
SP
4501static int be_get_config(struct be_adapter *adapter)
4502{
6b085ba9 4503 int status, level;
542963b7 4504 u16 profile_id;
6b085ba9 4505
980df249
SR
4506 status = be_cmd_get_cntl_attributes(adapter);
4507 if (status)
4508 return status;
4509
e97e3cda 4510 status = be_cmd_query_fw_cfg(adapter);
abb93951 4511 if (status)
92bf14ab 4512 return status;
abb93951 4513
fd7ff6f0
VD
4514 if (!lancer_chip(adapter) && be_physfn(adapter))
4515 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4516
6b085ba9
SP
4517 if (BEx_chip(adapter)) {
4518 level = be_cmd_get_fw_log_level(adapter);
4519 adapter->msg_enable =
4520 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4521 }
4522
4523 be_cmd_get_acpi_wol_cap(adapter);
45f13df7
SB
4524 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4525 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
6b085ba9 4526
21252377
VV
4527 be_cmd_query_port_name(adapter);
4528
4529 if (be_physfn(adapter)) {
542963b7
VV
4530 status = be_cmd_get_active_profile(adapter, &profile_id);
4531 if (!status)
4532 dev_info(&adapter->pdev->dev,
4533 "Using profile 0x%x\n", profile_id);
962bcb75 4534 }
bec84e6b 4535
92bf14ab 4536 return 0;
39f1d94d
SP
4537}
4538
95046b92
SP
4539static int be_mac_setup(struct be_adapter *adapter)
4540{
4541 u8 mac[ETH_ALEN];
4542 int status;
4543
4544 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4545 status = be_cmd_get_perm_mac(adapter, mac);
4546 if (status)
4547 return status;
4548
4549 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4550 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
95046b92
SP
4551 }
4552
95046b92
SP
4553 return 0;
4554}
4555
68d7bdcb
SP
4556static void be_schedule_worker(struct be_adapter *adapter)
4557{
b7172414 4558 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
68d7bdcb
SP
4559 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4560}
4561
710f3e59
SB
4562static void be_destroy_err_recovery_workq(void)
4563{
4564 if (!be_err_recovery_workq)
4565 return;
4566
4567 flush_workqueue(be_err_recovery_workq);
4568 destroy_workqueue(be_err_recovery_workq);
4569 be_err_recovery_workq = NULL;
4570}
4571
972f37b4 4572static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
eb7dd46c 4573{
710f3e59
SB
4574 struct be_error_recovery *err_rec = &adapter->error_recovery;
4575
4576 if (!be_err_recovery_workq)
4577 return;
4578
4579 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4580 msecs_to_jiffies(delay));
eb7dd46c
SP
4581 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4582}
4583
7707133c 4584static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4585{
68d7bdcb 4586 struct net_device *netdev = adapter->netdev;
10ef9ab4 4587 int status;
ba343c77 4588
7707133c 4589 status = be_evt_queues_create(adapter);
abb93951
PR
4590 if (status)
4591 goto err;
73d540f2 4592
7707133c 4593 status = be_tx_qs_create(adapter);
c2bba3df
SK
4594 if (status)
4595 goto err;
10ef9ab4 4596
7707133c 4597 status = be_rx_cqs_create(adapter);
10ef9ab4 4598 if (status)
a54769f5 4599 goto err;
6b7c5b94 4600
7707133c 4601 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4602 if (status)
4603 goto err;
4604
68d7bdcb
SP
4605 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4606 if (status)
4607 goto err;
4608
4609 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4610 if (status)
4611 goto err;
4612
7707133c
SP
4613 return 0;
4614err:
4615 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4616 return status;
4617}
4618
62219066
AK
4619static int be_if_create(struct be_adapter *adapter)
4620{
4621 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4622 u32 cap_flags = be_if_cap_flags(adapter);
4623 int status;
4624
b7172414
SP
4625 /* alloc required memory for other filtering fields */
4626 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4627 sizeof(*adapter->pmac_id), GFP_KERNEL);
4628 if (!adapter->pmac_id)
4629 return -ENOMEM;
4630
4631 adapter->mc_list = kcalloc(be_max_mc(adapter),
4632 sizeof(*adapter->mc_list), GFP_KERNEL);
4633 if (!adapter->mc_list)
4634 return -ENOMEM;
4635
4636 adapter->uc_list = kcalloc(be_max_uc(adapter),
4637 sizeof(*adapter->uc_list), GFP_KERNEL);
4638 if (!adapter->uc_list)
4639 return -ENOMEM;
4640
e261768e 4641 if (adapter->cfg_num_rx_irqs == 1)
62219066
AK
4642 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4643
4644 en_flags &= cap_flags;
4645 /* will enable all the needed filter flags in be_open() */
4646 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4647 &adapter->if_handle, 0);
4648
b7172414
SP
4649 if (status)
4650 return status;
4651
4652 return 0;
62219066
AK
4653}
4654
68d7bdcb
SP
4655int be_update_queues(struct be_adapter *adapter)
4656{
4657 struct net_device *netdev = adapter->netdev;
4658 int status;
4659
4660 if (netif_running(netdev))
4661 be_close(netdev);
4662
4663 be_cancel_worker(adapter);
4664
4665 /* If any vectors have been shared with RoCE we cannot re-program
4666 * the MSIx table.
4667 */
4668 if (!adapter->num_msix_roce_vec)
4669 be_msix_disable(adapter);
4670
4671 be_clear_queues(adapter);
62219066
AK
4672 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4673 if (status)
4674 return status;
68d7bdcb
SP
4675
4676 if (!msix_enabled(adapter)) {
4677 status = be_msix_enable(adapter);
4678 if (status)
4679 return status;
4680 }
4681
62219066
AK
4682 status = be_if_create(adapter);
4683 if (status)
4684 return status;
4685
68d7bdcb
SP
4686 status = be_setup_queues(adapter);
4687 if (status)
4688 return status;
4689
4690 be_schedule_worker(adapter);
4691
4692 if (netif_running(netdev))
4693 status = be_open(netdev);
4694
4695 return status;
4696}
4697
f7062ee5
SP
4698static inline int fw_major_num(const char *fw_ver)
4699{
4700 int fw_major = 0, i;
4701
4702 i = sscanf(fw_ver, "%d.", &fw_major);
4703 if (i != 1)
4704 return 0;
4705
4706 return fw_major;
4707}
4708
710f3e59
SB
4709/* If it is error recovery, FLR the PF
4710 * Else if any VFs are already enabled don't FLR the PF
4711 */
f962f840
SP
4712static bool be_reset_required(struct be_adapter *adapter)
4713{
710f3e59
SB
4714 if (be_error_recovering(adapter))
4715 return true;
4716 else
4717 return pci_num_vf(adapter->pdev) == 0;
f962f840
SP
4718}
4719
4720/* Wait for the FW to be ready and perform the required initialization */
4721static int be_func_init(struct be_adapter *adapter)
4722{
4723 int status;
4724
4725 status = be_fw_wait_ready(adapter);
4726 if (status)
4727 return status;
4728
710f3e59
SB
4729 /* FW is now ready; clear errors to allow cmds/doorbell */
4730 be_clear_error(adapter, BE_CLEAR_ALL);
4731
f962f840
SP
4732 if (be_reset_required(adapter)) {
4733 status = be_cmd_reset_function(adapter);
4734 if (status)
4735 return status;
4736
4737 /* Wait for interrupts to quiesce after an FLR */
4738 msleep(100);
f962f840
SP
4739 }
4740
4741 /* Tell FW we're ready to fire cmds */
4742 status = be_cmd_fw_init(adapter);
4743 if (status)
4744 return status;
4745
4746 /* Allow interrupts for other ULPs running on NIC function */
4747 be_intr_set(adapter, true);
4748
4749 return 0;
4750}
4751
7707133c
SP
4752static int be_setup(struct be_adapter *adapter)
4753{
4754 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4755 int status;
4756
f962f840
SP
4757 status = be_func_init(adapter);
4758 if (status)
4759 return status;
4760
7707133c
SP
4761 be_setup_init(adapter);
4762
4763 if (!lancer_chip(adapter))
4764 be_cmd_req_native_mode(adapter);
4765
980df249
SR
4766 /* invoke this cmd first to get pf_num and vf_num which are needed
4767 * for issuing profile related cmds
4768 */
4769 if (!BEx_chip(adapter)) {
4770 status = be_cmd_get_func_config(adapter, NULL);
4771 if (status)
4772 return status;
4773 }
72ef3a88 4774
de2b1e03
SK
4775 status = be_get_config(adapter);
4776 if (status)
4777 goto err;
4778
ace40aff
VV
4779 if (!BE2_chip(adapter) && be_physfn(adapter))
4780 be_alloc_sriov_res(adapter);
4781
de2b1e03 4782 status = be_get_resources(adapter);
10ef9ab4 4783 if (status)
a54769f5 4784 goto err;
6b7c5b94 4785
7707133c 4786 status = be_msix_enable(adapter);
10ef9ab4 4787 if (status)
a54769f5 4788 goto err;
6b7c5b94 4789
bcc84140 4790 /* will enable all the needed filter flags in be_open() */
62219066 4791 status = be_if_create(adapter);
7707133c 4792 if (status)
a54769f5 4793 goto err;
6b7c5b94 4794
68d7bdcb
SP
4795 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4796 rtnl_lock();
7707133c 4797 status = be_setup_queues(adapter);
68d7bdcb 4798 rtnl_unlock();
95046b92 4799 if (status)
1578e777
PR
4800 goto err;
4801
7707133c 4802 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4803
4804 status = be_mac_setup(adapter);
10ef9ab4
SP
4805 if (status)
4806 goto err;
4807
e97e3cda 4808 be_cmd_get_fw_ver(adapter);
acbafeb1 4809 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4810
e9e2a904 4811 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4812 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4813 adapter->fw_ver);
4814 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4815 }
4816
00d594c3
KA
4817 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4818 adapter->rx_fc);
4819 if (status)
4820 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4821 &adapter->rx_fc);
590c391d 4822
00d594c3
KA
4823 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4824 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4825
bdce2ad7
SR
4826 if (be_physfn(adapter))
4827 be_cmd_set_logical_link_config(adapter,
4828 IFLA_VF_LINK_STATE_AUTO, 0);
4829
884476be
SK
4830 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4831 * confusing a linux bridge or OVS that it might be connected to.
4832 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4833 * when SRIOV is not enabled.
4834 */
4835 if (BE3_chip(adapter))
4836 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4837 PORT_FWD_TYPE_PASSTHRU, 0);
4838
bec84e6b
VV
4839 if (adapter->num_vfs)
4840 be_vf_setup(adapter);
f9449ab7 4841
f25b119c
PR
4842 status = be_cmd_get_phy_info(adapter);
4843 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4844 adapter->phy.fc_autoneg = 1;
4845
710f3e59
SB
4846 if (be_physfn(adapter) && !lancer_chip(adapter))
4847 be_cmd_set_features(adapter);
4848
68d7bdcb 4849 be_schedule_worker(adapter);
e1ad8e33 4850 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4851 return 0;
a54769f5
SP
4852err:
4853 be_clear(adapter);
4854 return status;
4855}
6b7c5b94 4856
66268739
IV
4857#ifdef CONFIG_NET_POLL_CONTROLLER
4858static void be_netpoll(struct net_device *netdev)
4859{
4860 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4861 struct be_eq_obj *eqo;
66268739
IV
4862 int i;
4863
e49cc34f 4864 for_all_evt_queues(adapter, eqo, i) {
20947770 4865 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4866 napi_schedule(&eqo->napi);
4867 }
66268739
IV
4868}
4869#endif
4870
485bf569
SN
4871int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4872{
4873 const struct firmware *fw;
4874 int status;
4875
4876 if (!netif_running(adapter->netdev)) {
4877 dev_err(&adapter->pdev->dev,
4878 "Firmware load not allowed (interface is down)\n");
940a3fcd 4879 return -ENETDOWN;
485bf569
SN
4880 }
4881
4882 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4883 if (status)
4884 goto fw_exit;
4885
4886 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4887
4888 if (lancer_chip(adapter))
4889 status = lancer_fw_download(adapter, fw);
4890 else
4891 status = be_fw_download(adapter, fw);
4892
eeb65ced 4893 if (!status)
e97e3cda 4894 be_cmd_get_fw_ver(adapter);
eeb65ced 4895
84517482
AK
4896fw_exit:
4897 release_firmware(fw);
4898 return status;
4899}
4900
add511b3
RP
4901static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4902 u16 flags)
a77dcb8c
AK
4903{
4904 struct be_adapter *adapter = netdev_priv(dev);
4905 struct nlattr *attr, *br_spec;
4906 int rem;
4907 int status = 0;
4908 u16 mode = 0;
4909
4910 if (!sriov_enabled(adapter))
4911 return -EOPNOTSUPP;
4912
4913 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4914 if (!br_spec)
4915 return -EINVAL;
a77dcb8c
AK
4916
4917 nla_for_each_nested(attr, br_spec, rem) {
4918 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4919 continue;
4920
b7c1a314
TG
4921 if (nla_len(attr) < sizeof(mode))
4922 return -EINVAL;
4923
a77dcb8c 4924 mode = nla_get_u16(attr);
ac0f5fba
SR
4925 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4926 return -EOPNOTSUPP;
4927
a77dcb8c
AK
4928 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4929 return -EINVAL;
4930
4931 status = be_cmd_set_hsw_config(adapter, 0, 0,
4932 adapter->if_handle,
4933 mode == BRIDGE_MODE_VEPA ?
4934 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4935 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4936 if (status)
4937 goto err;
4938
4939 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4940 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4941
4942 return status;
4943 }
4944err:
4945 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4946 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4947
4948 return status;
4949}
4950
4951static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4952 struct net_device *dev, u32 filter_mask,
4953 int nlflags)
a77dcb8c
AK
4954{
4955 struct be_adapter *adapter = netdev_priv(dev);
4956 int status = 0;
4957 u8 hsw_mode;
4958
a77dcb8c
AK
4959 /* BE and Lancer chips support VEB mode only */
4960 if (BEx_chip(adapter) || lancer_chip(adapter)) {
8431706b
IV
4961 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4962 if (!pci_sriov_get_totalvfs(adapter->pdev))
4963 return 0;
a77dcb8c
AK
4964 hsw_mode = PORT_FWD_TYPE_VEB;
4965 } else {
4966 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4967 adapter->if_handle, &hsw_mode,
4968 NULL);
a77dcb8c
AK
4969 if (status)
4970 return 0;
ff9ed19d
KP
4971
4972 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4973 return 0;
a77dcb8c
AK
4974 }
4975
4976 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4977 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4978 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 4979 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
4980}
4981
b7172414
SP
4982static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4983 void (*func)(struct work_struct *))
4984{
4985 struct be_cmd_work *work;
4986
4987 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4988 if (!work) {
4989 dev_err(&adapter->pdev->dev,
4990 "be_work memory allocation failed\n");
4991 return NULL;
4992 }
4993
4994 INIT_WORK(&work->work, func);
4995 work->adapter = adapter;
4996 return work;
4997}
4998
630f4b70
SB
4999/* VxLAN offload Notes:
5000 *
5001 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5002 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5003 * is expected to work across all types of IP tunnels once exported. Skyhawk
5004 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
5005 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5006 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5007 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
5008 *
5009 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5010 * adds more than one port, disable offloads and don't re-enable them again
5011 * until after all the tunnels are removed.
5012 */
b7172414 5013static void be_work_add_vxlan_port(struct work_struct *work)
c9c47142 5014{
b7172414
SP
5015 struct be_cmd_work *cmd_work =
5016 container_of(work, struct be_cmd_work, work);
5017 struct be_adapter *adapter = cmd_work->adapter;
5018 struct net_device *netdev = adapter->netdev;
c9c47142 5019 struct device *dev = &adapter->pdev->dev;
b7172414 5020 __be16 port = cmd_work->info.vxlan_port;
c9c47142
SP
5021 int status;
5022
1e5b311a
JB
5023 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5024 adapter->vxlan_port_aliases++;
b7172414 5025 goto done;
1e5b311a
JB
5026 }
5027
c9c47142 5028 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
5029 dev_info(dev,
5030 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
5031 dev_info(dev, "Disabling VxLAN offloads\n");
5032 adapter->vxlan_port_count++;
5033 goto err;
c9c47142
SP
5034 }
5035
630f4b70 5036 if (adapter->vxlan_port_count++ >= 1)
b7172414 5037 goto done;
630f4b70 5038
c9c47142
SP
5039 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5040 OP_CONVERT_NORMAL_TO_TUNNEL);
5041 if (status) {
5042 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5043 goto err;
5044 }
5045
5046 status = be_cmd_set_vxlan_port(adapter, port);
5047 if (status) {
5048 dev_warn(dev, "Failed to add VxLAN port\n");
5049 goto err;
5050 }
5051 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5052 adapter->vxlan_port = port;
5053
630f4b70
SB
5054 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5055 NETIF_F_TSO | NETIF_F_TSO6 |
5056 NETIF_F_GSO_UDP_TUNNEL;
5057 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 5058 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 5059
c9c47142
SP
5060 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5061 be16_to_cpu(port));
b7172414 5062 goto done;
c9c47142
SP
5063err:
5064 be_disable_vxlan_offloads(adapter);
b7172414
SP
5065done:
5066 kfree(cmd_work);
c9c47142
SP
5067}
5068
b7172414 5069static void be_work_del_vxlan_port(struct work_struct *work)
c9c47142 5070{
b7172414
SP
5071 struct be_cmd_work *cmd_work =
5072 container_of(work, struct be_cmd_work, work);
5073 struct be_adapter *adapter = cmd_work->adapter;
5074 __be16 port = cmd_work->info.vxlan_port;
c9c47142
SP
5075
5076 if (adapter->vxlan_port != port)
630f4b70 5077 goto done;
c9c47142 5078
1e5b311a
JB
5079 if (adapter->vxlan_port_aliases) {
5080 adapter->vxlan_port_aliases--;
b7172414 5081 goto out;
1e5b311a
JB
5082 }
5083
c9c47142
SP
5084 be_disable_vxlan_offloads(adapter);
5085
5086 dev_info(&adapter->pdev->dev,
5087 "Disabled VxLAN offloads for UDP port %d\n",
5088 be16_to_cpu(port));
630f4b70
SB
5089done:
5090 adapter->vxlan_port_count--;
b7172414
SP
5091out:
5092 kfree(cmd_work);
5093}
5094
5095static void be_cfg_vxlan_port(struct net_device *netdev,
5096 struct udp_tunnel_info *ti,
5097 void (*func)(struct work_struct *))
5098{
5099 struct be_adapter *adapter = netdev_priv(netdev);
5100 struct be_cmd_work *cmd_work;
5101
5102 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5103 return;
5104
5105 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5106 return;
5107
5108 cmd_work = be_alloc_work(adapter, func);
5109 if (cmd_work) {
5110 cmd_work->info.vxlan_port = ti->port;
5111 queue_work(be_wq, &cmd_work->work);
5112 }
5113}
5114
5115static void be_del_vxlan_port(struct net_device *netdev,
5116 struct udp_tunnel_info *ti)
5117{
5118 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5119}
5120
5121static void be_add_vxlan_port(struct net_device *netdev,
5122 struct udp_tunnel_info *ti)
5123{
5124 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
c9c47142 5125}
725d548f 5126
5f35227e
JG
5127static netdev_features_t be_features_check(struct sk_buff *skb,
5128 struct net_device *dev,
5129 netdev_features_t features)
725d548f 5130{
16dde0d6
SB
5131 struct be_adapter *adapter = netdev_priv(dev);
5132 u8 l4_hdr = 0;
5133
5134 /* The code below restricts offload features for some tunneled packets.
5135 * Offload features for normal (non tunnel) packets are unchanged.
5136 */
5137 if (!skb->encapsulation ||
5138 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5139 return features;
5140
5141 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5142 * should disable tunnel offload features if it's not a VxLAN packet,
5143 * as tunnel offloads have been enabled only for VxLAN. This is done to
5144 * allow other tunneled traffic like GRE work fine while VxLAN
5145 * offloads are configured in Skyhawk-R.
5146 */
5147 switch (vlan_get_protocol(skb)) {
5148 case htons(ETH_P_IP):
5149 l4_hdr = ip_hdr(skb)->protocol;
5150 break;
5151 case htons(ETH_P_IPV6):
5152 l4_hdr = ipv6_hdr(skb)->nexthdr;
5153 break;
5154 default:
5155 return features;
5156 }
5157
5158 if (l4_hdr != IPPROTO_UDP ||
5159 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5160 skb->inner_protocol != htons(ETH_P_TEB) ||
5161 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5162 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
a188222b 5163 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
5164
5165 return features;
725d548f 5166}
c9c47142 5167
a155a5db
SB
5168static int be_get_phys_port_id(struct net_device *dev,
5169 struct netdev_phys_item_id *ppid)
5170{
5171 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5172 struct be_adapter *adapter = netdev_priv(dev);
5173 u8 *id;
5174
5175 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5176 return -ENOSPC;
5177
5178 ppid->id[0] = adapter->hba_port_num + 1;
5179 id = &ppid->id[1];
5180 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5181 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5182 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5183
5184 ppid->id_len = id_len;
5185
5186 return 0;
5187}
5188
b7172414
SP
5189static void be_set_rx_mode(struct net_device *dev)
5190{
5191 struct be_adapter *adapter = netdev_priv(dev);
5192 struct be_cmd_work *work;
5193
5194 work = be_alloc_work(adapter, be_work_set_rx_mode);
5195 if (work)
5196 queue_work(be_wq, &work->work);
5197}
5198
e5686ad8 5199static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5200 .ndo_open = be_open,
5201 .ndo_stop = be_close,
5202 .ndo_start_xmit = be_xmit,
a54769f5 5203 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5204 .ndo_set_mac_address = be_mac_addr_set,
5205 .ndo_change_mtu = be_change_mtu,
ab1594e9 5206 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5207 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5208 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5209 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5210 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5211 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5212 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5213 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5214 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5215 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5216#ifdef CONFIG_NET_POLL_CONTROLLER
5217 .ndo_poll_controller = be_netpoll,
5218#endif
a77dcb8c
AK
5219 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5220 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5221#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5222 .ndo_busy_poll = be_busy_poll,
6384a4d0 5223#endif
bde6b7cd
AD
5224 .ndo_udp_tunnel_add = be_add_vxlan_port,
5225 .ndo_udp_tunnel_del = be_del_vxlan_port,
5f35227e 5226 .ndo_features_check = be_features_check,
a155a5db 5227 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
5228};
5229
5230static void be_netdev_init(struct net_device *netdev)
5231{
5232 struct be_adapter *adapter = netdev_priv(netdev);
5233
6332c8d3 5234 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5235 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5236 NETIF_F_HW_VLAN_CTAG_TX;
62219066 5237 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
8b8ddc68 5238 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5239
5240 netdev->features |= netdev->hw_features |
f646968f 5241 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5242
eb8a50d9 5243 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5244 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5245
fbc13f01
AK
5246 netdev->priv_flags |= IFF_UNICAST_FLT;
5247
6b7c5b94
SP
5248 netdev->flags |= IFF_MULTICAST;
5249
127bfce5 5250 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
c190e3c8 5251
10ef9ab4 5252 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5253
7ad24ea4 5254 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5255}
5256
87ac1a52
KA
5257static void be_cleanup(struct be_adapter *adapter)
5258{
5259 struct net_device *netdev = adapter->netdev;
5260
5261 rtnl_lock();
5262 netif_device_detach(netdev);
5263 if (netif_running(netdev))
5264 be_close(netdev);
5265 rtnl_unlock();
5266
5267 be_clear(adapter);
5268}
5269
484d76fd 5270static int be_resume(struct be_adapter *adapter)
78fad34e 5271{
d0e1b319 5272 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5273 int status;
5274
78fad34e
SP
5275 status = be_setup(adapter);
5276 if (status)
484d76fd 5277 return status;
78fad34e 5278
08d9910c
HFS
5279 rtnl_lock();
5280 if (netif_running(netdev))
d0e1b319 5281 status = be_open(netdev);
08d9910c
HFS
5282 rtnl_unlock();
5283
5284 if (status)
5285 return status;
78fad34e 5286
d0e1b319
KA
5287 netif_device_attach(netdev);
5288
484d76fd
KA
5289 return 0;
5290}
5291
710f3e59
SB
5292static void be_soft_reset(struct be_adapter *adapter)
5293{
5294 u32 val;
5295
5296 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5297 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5298 val |= SLIPORT_SOFTRESET_SR_MASK;
5299 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5300}
5301
5302static bool be_err_is_recoverable(struct be_adapter *adapter)
5303{
5304 struct be_error_recovery *err_rec = &adapter->error_recovery;
5305 unsigned long initial_idle_time =
5306 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5307 unsigned long recovery_interval =
5308 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5309 u16 ue_err_code;
5310 u32 val;
5311
5312 val = be_POST_stage_get(adapter);
5313 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5314 return false;
5315 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5316 if (ue_err_code == 0)
5317 return false;
5318
5319 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5320 ue_err_code);
5321
5322 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5323 dev_err(&adapter->pdev->dev,
5324 "Cannot recover within %lu sec from driver load\n",
5325 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5326 return false;
5327 }
5328
5329 if (err_rec->last_recovery_time &&
5330 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5331 dev_err(&adapter->pdev->dev,
5332 "Cannot recover within %lu sec from last recovery\n",
5333 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5334 return false;
5335 }
5336
5337 if (ue_err_code == err_rec->last_err_code) {
5338 dev_err(&adapter->pdev->dev,
5339 "Cannot recover from a consecutive TPE error\n");
5340 return false;
5341 }
5342
5343 err_rec->last_recovery_time = jiffies;
5344 err_rec->last_err_code = ue_err_code;
5345 return true;
5346}
5347
5348static int be_tpe_recover(struct be_adapter *adapter)
5349{
5350 struct be_error_recovery *err_rec = &adapter->error_recovery;
5351 int status = -EAGAIN;
5352 u32 val;
5353
5354 switch (err_rec->recovery_state) {
5355 case ERR_RECOVERY_ST_NONE:
5356 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5357 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5358 break;
5359
5360 case ERR_RECOVERY_ST_DETECT:
5361 val = be_POST_stage_get(adapter);
5362 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5363 POST_STAGE_RECOVERABLE_ERR) {
5364 dev_err(&adapter->pdev->dev,
5365 "Unrecoverable HW error detected: 0x%x\n", val);
5366 status = -EINVAL;
5367 err_rec->resched_delay = 0;
5368 break;
5369 }
5370
5371 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5372
5373 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5374 * milliseconds before it checks for final error status in
5375 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5376 * If it does, then PF0 initiates a Soft Reset.
5377 */
5378 if (adapter->pf_num == 0) {
5379 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5380 err_rec->resched_delay = err_rec->ue_to_reset_time -
5381 ERR_RECOVERY_UE_DETECT_DURATION;
5382 break;
5383 }
5384
5385 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5386 err_rec->resched_delay = err_rec->ue_to_poll_time -
5387 ERR_RECOVERY_UE_DETECT_DURATION;
5388 break;
5389
5390 case ERR_RECOVERY_ST_RESET:
5391 if (!be_err_is_recoverable(adapter)) {
5392 dev_err(&adapter->pdev->dev,
5393 "Failed to meet recovery criteria\n");
5394 status = -EIO;
5395 err_rec->resched_delay = 0;
5396 break;
5397 }
5398 be_soft_reset(adapter);
5399 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5400 err_rec->resched_delay = err_rec->ue_to_poll_time -
5401 err_rec->ue_to_reset_time;
5402 break;
5403
5404 case ERR_RECOVERY_ST_PRE_POLL:
5405 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5406 err_rec->resched_delay = 0;
5407 status = 0; /* done */
5408 break;
5409
5410 default:
5411 status = -EINVAL;
5412 err_rec->resched_delay = 0;
5413 break;
5414 }
5415
5416 return status;
5417}
5418
484d76fd
KA
5419static int be_err_recover(struct be_adapter *adapter)
5420{
484d76fd
KA
5421 int status;
5422
710f3e59
SB
5423 if (!lancer_chip(adapter)) {
5424 if (!adapter->error_recovery.recovery_supported ||
5425 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5426 return -EIO;
5427 status = be_tpe_recover(adapter);
5428 if (status)
5429 goto err;
5430 }
1babbad4
PR
5431
5432 /* Wait for adapter to reach quiescent state before
5433 * destroying queues
5434 */
5435 status = be_fw_wait_ready(adapter);
5436 if (status)
5437 goto err;
5438
710f3e59
SB
5439 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5440
1babbad4
PR
5441 be_cleanup(adapter);
5442
484d76fd
KA
5443 status = be_resume(adapter);
5444 if (status)
5445 goto err;
5446
710f3e59
SB
5447 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5448
78fad34e 5449err:
78fad34e
SP
5450 return status;
5451}
5452
eb7dd46c 5453static void be_err_detection_task(struct work_struct *work)
78fad34e 5454{
710f3e59
SB
5455 struct be_error_recovery *err_rec =
5456 container_of(work, struct be_error_recovery,
5457 err_detection_work.work);
78fad34e 5458 struct be_adapter *adapter =
710f3e59
SB
5459 container_of(err_rec, struct be_adapter,
5460 error_recovery);
5461 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
1babbad4
PR
5462 struct device *dev = &adapter->pdev->dev;
5463 int recovery_status;
78fad34e
SP
5464
5465 be_detect_error(adapter);
710f3e59 5466 if (!be_check_error(adapter, BE_ERROR_HW))
1babbad4
PR
5467 goto reschedule_task;
5468
710f3e59 5469 recovery_status = be_err_recover(adapter);
1babbad4 5470 if (!recovery_status) {
710f3e59
SB
5471 err_rec->recovery_retries = 0;
5472 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
1babbad4
PR
5473 dev_info(dev, "Adapter recovery successful\n");
5474 goto reschedule_task;
710f3e59
SB
5475 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5476 /* BEx/SH recovery state machine */
5477 if (adapter->pf_num == 0 &&
5478 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5479 dev_err(&adapter->pdev->dev,
5480 "Adapter recovery in progress\n");
5481 resched_delay = err_rec->resched_delay;
5482 goto reschedule_task;
5483 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
1babbad4
PR
5484 /* For VFs, check if PF have allocated resources
5485 * every second.
5486 */
5487 dev_err(dev, "Re-trying adapter recovery\n");
5488 goto reschedule_task;
710f3e59
SB
5489 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5490 ERR_RECOVERY_MAX_RETRY_COUNT) {
972f37b4
PR
5491 /* In case of another error during recovery, it takes 30 sec
5492 * for adapter to come out of error. Retry error recovery after
5493 * this time interval.
5494 */
5495 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
710f3e59 5496 resched_delay = ERR_RECOVERY_RETRY_DELAY;
972f37b4 5497 goto reschedule_task;
1babbad4
PR
5498 } else {
5499 dev_err(dev, "Adapter recovery failed\n");
710f3e59 5500 dev_err(dev, "Please reboot server to recover\n");
78fad34e
SP
5501 }
5502
1babbad4 5503 return;
710f3e59 5504
1babbad4 5505reschedule_task:
710f3e59 5506 be_schedule_err_detection(adapter, resched_delay);
78fad34e
SP
5507}
5508
5509static void be_log_sfp_info(struct be_adapter *adapter)
5510{
5511 int status;
5512
5513 status = be_cmd_query_sfp_info(adapter);
5514 if (!status) {
5515 dev_err(&adapter->pdev->dev,
51d1f98a
AK
5516 "Port %c: %s Vendor: %s part no: %s",
5517 adapter->port_name,
5518 be_misconfig_evt_port_state[adapter->phy_state],
5519 adapter->phy.vendor_name,
78fad34e
SP
5520 adapter->phy.vendor_pn);
5521 }
51d1f98a 5522 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
78fad34e
SP
5523}
5524
5525static void be_worker(struct work_struct *work)
5526{
5527 struct be_adapter *adapter =
5528 container_of(work, struct be_adapter, work.work);
5529 struct be_rx_obj *rxo;
5530 int i;
5531
d3480615
GP
5532 if (be_physfn(adapter) &&
5533 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5534 be_cmd_get_die_temperature(adapter);
5535
78fad34e
SP
5536 /* when interrupts are not yet enabled, just reap any pending
5537 * mcc completions
5538 */
5539 if (!netif_running(adapter->netdev)) {
5540 local_bh_disable();
5541 be_process_mcc(adapter);
5542 local_bh_enable();
5543 goto reschedule;
5544 }
5545
5546 if (!adapter->stats_cmd_sent) {
5547 if (lancer_chip(adapter))
5548 lancer_cmd_get_pport_stats(adapter,
5549 &adapter->stats_cmd);
5550 else
5551 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5552 }
5553
78fad34e
SP
5554 for_all_rx_queues(adapter, rxo, i) {
5555 /* Replenish RX-queues starved due to memory
5556 * allocation failures.
5557 */
5558 if (rxo->rx_post_starved)
5559 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5560 }
5561
20947770
PR
5562 /* EQ-delay update for Skyhawk is done while notifying EQ */
5563 if (!skyhawk_chip(adapter))
5564 be_eqd_update(adapter, false);
78fad34e 5565
51d1f98a 5566 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
78fad34e
SP
5567 be_log_sfp_info(adapter);
5568
5569reschedule:
5570 adapter->work_counter++;
b7172414 5571 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
78fad34e
SP
5572}
5573
6b7c5b94
SP
5574static void be_unmap_pci_bars(struct be_adapter *adapter)
5575{
c5b3ad4c
SP
5576 if (adapter->csr)
5577 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5578 if (adapter->db)
ce66f781 5579 pci_iounmap(adapter->pdev, adapter->db);
a69bf3c5
DM
5580 if (adapter->pcicfg && adapter->pcicfg_mapped)
5581 pci_iounmap(adapter->pdev, adapter->pcicfg);
045508a8
PP
5582}
5583
ce66f781
SP
5584static int db_bar(struct be_adapter *adapter)
5585{
18c57c74 5586 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5587 return 0;
5588 else
5589 return 4;
5590}
5591
5592static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5593{
dbf0f2a7 5594 if (skyhawk_chip(adapter)) {
ce66f781
SP
5595 adapter->roce_db.size = 4096;
5596 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5597 db_bar(adapter));
5598 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5599 db_bar(adapter));
5600 }
045508a8 5601 return 0;
6b7c5b94
SP
5602}
5603
5604static int be_map_pci_bars(struct be_adapter *adapter)
5605{
0fa74a4b 5606 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5607 u8 __iomem *addr;
78fad34e
SP
5608 u32 sli_intf;
5609
5610 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5611 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5612 SLI_INTF_FAMILY_SHIFT;
5613 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5614
c5b3ad4c 5615 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5616 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5617 if (!adapter->csr)
c5b3ad4c
SP
5618 return -ENOMEM;
5619 }
5620
25848c90 5621 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5622 if (!addr)
6b7c5b94 5623 goto pci_map_err;
ba343c77 5624 adapter->db = addr;
ce66f781 5625
25848c90
SR
5626 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5627 if (be_physfn(adapter)) {
5628 /* PCICFG is the 2nd BAR in BE2 */
5629 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5630 if (!addr)
5631 goto pci_map_err;
5632 adapter->pcicfg = addr;
a69bf3c5 5633 adapter->pcicfg_mapped = true;
25848c90
SR
5634 } else {
5635 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
a69bf3c5 5636 adapter->pcicfg_mapped = false;
25848c90
SR
5637 }
5638 }
5639
ce66f781 5640 be_roce_map_pci_bars(adapter);
6b7c5b94 5641 return 0;
ce66f781 5642
6b7c5b94 5643pci_map_err:
25848c90 5644 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5645 be_unmap_pci_bars(adapter);
5646 return -ENOMEM;
5647}
5648
78fad34e 5649static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5650{
8788fdc2 5651 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5652 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5653
5654 if (mem->va)
78fad34e 5655 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5656
5b8821b7 5657 mem = &adapter->rx_filter;
e7b909a6 5658 if (mem->va)
78fad34e
SP
5659 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5660
5661 mem = &adapter->stats_cmd;
5662 if (mem->va)
5663 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5664}
5665
78fad34e
SP
5666/* Allocate and initialize various fields in be_adapter struct */
5667static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5668{
8788fdc2
SP
5669 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5670 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5671 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5672 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5673 struct device *dev = &adapter->pdev->dev;
5674 int status = 0;
6b7c5b94
SP
5675
5676 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5677 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5678 &mbox_mem_alloc->dma,
5679 GFP_KERNEL);
78fad34e
SP
5680 if (!mbox_mem_alloc->va)
5681 return -ENOMEM;
5682
6b7c5b94
SP
5683 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5684 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5685 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5686
5b8821b7 5687 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5688 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5689 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5690 if (!rx_filter->va) {
e7b909a6
SP
5691 status = -ENOMEM;
5692 goto free_mbox;
5693 }
1f9061d2 5694
78fad34e
SP
5695 if (lancer_chip(adapter))
5696 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5697 else if (BE2_chip(adapter))
5698 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5699 else if (BE3_chip(adapter))
5700 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5701 else
5702 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5703 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5704 &stats_cmd->dma, GFP_KERNEL);
5705 if (!stats_cmd->va) {
5706 status = -ENOMEM;
5707 goto free_rx_filter;
5708 }
5709
2984961c 5710 mutex_init(&adapter->mbox_lock);
b7172414
SP
5711 mutex_init(&adapter->mcc_lock);
5712 mutex_init(&adapter->rx_filter_lock);
8788fdc2 5713 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5714 init_completion(&adapter->et_cmd_compl);
e7b909a6 5715
78fad34e 5716 pci_save_state(adapter->pdev);
6b7c5b94 5717
78fad34e 5718 INIT_DELAYED_WORK(&adapter->work, be_worker);
710f3e59
SB
5719
5720 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5721 adapter->error_recovery.resched_delay = 0;
5722 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
eb7dd46c 5723 be_err_detection_task);
6b7c5b94 5724
78fad34e
SP
5725 adapter->rx_fc = true;
5726 adapter->tx_fc = true;
6b7c5b94 5727
78fad34e
SP
5728 /* Must be a power of 2 or else MODULO will BUG_ON */
5729 adapter->be_get_temp_freq = 64;
ca34fe38 5730
6b7c5b94 5731 return 0;
78fad34e
SP
5732
5733free_rx_filter:
5734 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5735free_mbox:
5736 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5737 mbox_mem_alloc->dma);
5738 return status;
6b7c5b94
SP
5739}
5740
3bc6b06c 5741static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5742{
5743 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5744
6b7c5b94
SP
5745 if (!adapter)
5746 return;
5747
045508a8 5748 be_roce_dev_remove(adapter);
8cef7a78 5749 be_intr_set(adapter, false);
045508a8 5750
eb7dd46c 5751 be_cancel_err_detection(adapter);
f67ef7ba 5752
6b7c5b94
SP
5753 unregister_netdev(adapter->netdev);
5754
5fb379ee
SP
5755 be_clear(adapter);
5756
f72099e0
SK
5757 if (!pci_vfs_assigned(adapter->pdev))
5758 be_cmd_reset_function(adapter);
5759
bf99e50d
PR
5760 /* tell fw we're done with firing cmds */
5761 be_cmd_fw_clean(adapter);
5762
78fad34e
SP
5763 be_unmap_pci_bars(adapter);
5764 be_drv_cleanup(adapter);
6b7c5b94 5765
d6b6d987
SP
5766 pci_disable_pcie_error_reporting(pdev);
5767
6b7c5b94
SP
5768 pci_release_regions(pdev);
5769 pci_disable_device(pdev);
5770
5771 free_netdev(adapter->netdev);
5772}
5773
9a03259c
AB
5774static ssize_t be_hwmon_show_temp(struct device *dev,
5775 struct device_attribute *dev_attr,
5776 char *buf)
29e9122b
VD
5777{
5778 struct be_adapter *adapter = dev_get_drvdata(dev);
5779
5780 /* Unit: millidegree Celsius */
5781 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5782 return -EIO;
5783 else
5784 return sprintf(buf, "%u\n",
5785 adapter->hwmon_info.be_on_die_temp * 1000);
5786}
5787
5788static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5789 be_hwmon_show_temp, NULL, 1);
5790
5791static struct attribute *be_hwmon_attrs[] = {
5792 &sensor_dev_attr_temp1_input.dev_attr.attr,
5793 NULL
5794};
5795
5796ATTRIBUTE_GROUPS(be_hwmon);
5797
d379142b
SP
5798static char *mc_name(struct be_adapter *adapter)
5799{
f93f160b
VV
5800 char *str = ""; /* default */
5801
5802 switch (adapter->mc_type) {
5803 case UMC:
5804 str = "UMC";
5805 break;
5806 case FLEX10:
5807 str = "FLEX10";
5808 break;
5809 case vNIC1:
5810 str = "vNIC-1";
5811 break;
5812 case nPAR:
5813 str = "nPAR";
5814 break;
5815 case UFP:
5816 str = "UFP";
5817 break;
5818 case vNIC2:
5819 str = "vNIC-2";
5820 break;
5821 default:
5822 str = "";
5823 }
5824
5825 return str;
d379142b
SP
5826}
5827
5828static inline char *func_name(struct be_adapter *adapter)
5829{
5830 return be_physfn(adapter) ? "PF" : "VF";
5831}
5832
f7062ee5
SP
5833static inline char *nic_name(struct pci_dev *pdev)
5834{
5835 switch (pdev->device) {
5836 case OC_DEVICE_ID1:
5837 return OC_NAME;
5838 case OC_DEVICE_ID2:
5839 return OC_NAME_BE;
5840 case OC_DEVICE_ID3:
5841 case OC_DEVICE_ID4:
5842 return OC_NAME_LANCER;
5843 case BE_DEVICE_ID2:
5844 return BE3_NAME;
5845 case OC_DEVICE_ID5:
5846 case OC_DEVICE_ID6:
5847 return OC_NAME_SH;
5848 default:
5849 return BE_NAME;
5850 }
5851}
5852
1dd06ae8 5853static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5854{
6b7c5b94
SP
5855 struct be_adapter *adapter;
5856 struct net_device *netdev;
21252377 5857 int status = 0;
6b7c5b94 5858
acbafeb1
SP
5859 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5860
6b7c5b94
SP
5861 status = pci_enable_device(pdev);
5862 if (status)
5863 goto do_none;
5864
5865 status = pci_request_regions(pdev, DRV_NAME);
5866 if (status)
5867 goto disable_dev;
5868 pci_set_master(pdev);
5869
7f640062 5870 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5871 if (!netdev) {
6b7c5b94
SP
5872 status = -ENOMEM;
5873 goto rel_reg;
5874 }
5875 adapter = netdev_priv(netdev);
5876 adapter->pdev = pdev;
5877 pci_set_drvdata(pdev, adapter);
5878 adapter->netdev = netdev;
2243e2e9 5879 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5880
4c15c243 5881 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5882 if (!status) {
5883 netdev->features |= NETIF_F_HIGHDMA;
5884 } else {
4c15c243 5885 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5886 if (status) {
5887 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5888 goto free_netdev;
5889 }
5890 }
5891
2f951a9a
KA
5892 status = pci_enable_pcie_error_reporting(pdev);
5893 if (!status)
5894 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5895
78fad34e 5896 status = be_map_pci_bars(adapter);
6b7c5b94 5897 if (status)
39f1d94d 5898 goto free_netdev;
6b7c5b94 5899
78fad34e
SP
5900 status = be_drv_init(adapter);
5901 if (status)
5902 goto unmap_bars;
5903
5fb379ee
SP
5904 status = be_setup(adapter);
5905 if (status)
78fad34e 5906 goto drv_cleanup;
2243e2e9 5907
3abcdeda 5908 be_netdev_init(netdev);
6b7c5b94
SP
5909 status = register_netdev(netdev);
5910 if (status != 0)
5fb379ee 5911 goto unsetup;
6b7c5b94 5912
045508a8
PP
5913 be_roce_dev_add(adapter);
5914
972f37b4 5915 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
710f3e59 5916 adapter->error_recovery.probe_time = jiffies;
b4e32a71 5917
29e9122b 5918 /* On Die temperature not supported for VF. */
9a03259c 5919 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5920 adapter->hwmon_info.hwmon_dev =
5921 devm_hwmon_device_register_with_groups(&pdev->dev,
5922 DRV_NAME,
5923 adapter,
5924 be_hwmon_groups);
5925 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5926 }
5927
d379142b 5928 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5929 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5930
6b7c5b94
SP
5931 return 0;
5932
5fb379ee
SP
5933unsetup:
5934 be_clear(adapter);
78fad34e
SP
5935drv_cleanup:
5936 be_drv_cleanup(adapter);
5937unmap_bars:
5938 be_unmap_pci_bars(adapter);
f9449ab7 5939free_netdev:
fe6d2a38 5940 free_netdev(netdev);
6b7c5b94
SP
5941rel_reg:
5942 pci_release_regions(pdev);
5943disable_dev:
5944 pci_disable_device(pdev);
5945do_none:
c4ca2374 5946 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5947 return status;
5948}
5949
5950static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5951{
5952 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5953
d4360d6f 5954 be_intr_set(adapter, false);
eb7dd46c 5955 be_cancel_err_detection(adapter);
f67ef7ba 5956
87ac1a52 5957 be_cleanup(adapter);
6b7c5b94
SP
5958
5959 pci_save_state(pdev);
5960 pci_disable_device(pdev);
5961 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5962 return 0;
5963}
5964
484d76fd 5965static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5966{
6b7c5b94 5967 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5968 int status = 0;
6b7c5b94
SP
5969
5970 status = pci_enable_device(pdev);
5971 if (status)
5972 return status;
5973
6b7c5b94
SP
5974 pci_restore_state(pdev);
5975
484d76fd 5976 status = be_resume(adapter);
2243e2e9
SP
5977 if (status)
5978 return status;
5979
972f37b4 5980 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
eb7dd46c 5981
6b7c5b94
SP
5982 return 0;
5983}
5984
82456b03
SP
5985/*
5986 * An FLR will stop BE from DMAing any data.
5987 */
5988static void be_shutdown(struct pci_dev *pdev)
5989{
5990 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5991
2d5d4154
AK
5992 if (!adapter)
5993 return;
82456b03 5994
d114f99a 5995 be_roce_dev_shutdown(adapter);
0f4a6828 5996 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5997 be_cancel_err_detection(adapter);
a4ca055f 5998
2d5d4154 5999 netif_device_detach(adapter->netdev);
82456b03 6000
57841869
AK
6001 be_cmd_reset_function(adapter);
6002
82456b03 6003 pci_disable_device(pdev);
82456b03
SP
6004}
6005
cf588477 6006static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 6007 pci_channel_state_t state)
cf588477
SP
6008{
6009 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6010
6011 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6012
68f22793
PR
6013 be_roce_dev_remove(adapter);
6014
954f6825
VD
6015 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6016 be_set_error(adapter, BE_ERROR_EEH);
cf588477 6017
eb7dd46c 6018 be_cancel_err_detection(adapter);
cf588477 6019
87ac1a52 6020 be_cleanup(adapter);
cf588477 6021 }
cf588477
SP
6022
6023 if (state == pci_channel_io_perm_failure)
6024 return PCI_ERS_RESULT_DISCONNECT;
6025
6026 pci_disable_device(pdev);
6027
eeb7fc7b
SK
6028 /* The error could cause the FW to trigger a flash debug dump.
6029 * Resetting the card while flash dump is in progress
c8a54163
PR
6030 * can cause it not to recover; wait for it to finish.
6031 * Wait only for first function as it is needed only once per
6032 * adapter.
eeb7fc7b 6033 */
c8a54163
PR
6034 if (pdev->devfn == 0)
6035 ssleep(30);
6036
cf588477
SP
6037 return PCI_ERS_RESULT_NEED_RESET;
6038}
6039
6040static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6041{
6042 struct be_adapter *adapter = pci_get_drvdata(pdev);
6043 int status;
6044
6045 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
6046
6047 status = pci_enable_device(pdev);
6048 if (status)
6049 return PCI_ERS_RESULT_DISCONNECT;
6050
6051 pci_set_master(pdev);
cf588477
SP
6052 pci_restore_state(pdev);
6053
6054 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
6055 dev_info(&adapter->pdev->dev,
6056 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 6057 status = be_fw_wait_ready(adapter);
cf588477
SP
6058 if (status)
6059 return PCI_ERS_RESULT_DISCONNECT;
6060
d6b6d987 6061 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 6062 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
6063 return PCI_ERS_RESULT_RECOVERED;
6064}
6065
6066static void be_eeh_resume(struct pci_dev *pdev)
6067{
6068 int status = 0;
6069 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6070
6071 dev_info(&adapter->pdev->dev, "EEH resume\n");
6072
6073 pci_save_state(pdev);
6074
484d76fd 6075 status = be_resume(adapter);
bf99e50d
PR
6076 if (status)
6077 goto err;
6078
68f22793
PR
6079 be_roce_dev_add(adapter);
6080
972f37b4 6081 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
cf588477
SP
6082 return;
6083err:
6084 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
6085}
6086
ace40aff
VV
6087static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6088{
6089 struct be_adapter *adapter = pci_get_drvdata(pdev);
b9263cbf 6090 struct be_resources vft_res = {0};
ace40aff
VV
6091 int status;
6092
6093 if (!num_vfs)
6094 be_vf_clear(adapter);
6095
6096 adapter->num_vfs = num_vfs;
6097
6098 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6099 dev_warn(&pdev->dev,
6100 "Cannot disable VFs while they are assigned\n");
6101 return -EBUSY;
6102 }
6103
6104 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6105 * are equally distributed across the max-number of VFs. The user may
6106 * request only a subset of the max-vfs to be enabled.
6107 * Based on num_vfs, redistribute the resources across num_vfs so that
6108 * each VF will have access to more number of resources.
6109 * This facility is not available in BE3 FW.
6110 * Also, this is done by FW in Lancer chip.
6111 */
6112 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
b9263cbf
SR
6113 be_calculate_vf_res(adapter, adapter->num_vfs,
6114 &vft_res);
ace40aff 6115 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
b9263cbf 6116 adapter->num_vfs, &vft_res);
ace40aff
VV
6117 if (status)
6118 dev_err(&pdev->dev,
6119 "Failed to optimize SR-IOV resources\n");
6120 }
6121
6122 status = be_get_resources(adapter);
6123 if (status)
6124 return be_cmd_status(status);
6125
6126 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6127 rtnl_lock();
6128 status = be_update_queues(adapter);
6129 rtnl_unlock();
6130 if (status)
6131 return be_cmd_status(status);
6132
6133 if (adapter->num_vfs)
6134 status = be_vf_setup(adapter);
6135
6136 if (!status)
6137 return adapter->num_vfs;
6138
6139 return 0;
6140}
6141
3646f0e5 6142static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
6143 .error_detected = be_eeh_err_detected,
6144 .slot_reset = be_eeh_reset,
6145 .resume = be_eeh_resume,
6146};
6147
6b7c5b94
SP
6148static struct pci_driver be_driver = {
6149 .name = DRV_NAME,
6150 .id_table = be_dev_ids,
6151 .probe = be_probe,
6152 .remove = be_remove,
6153 .suspend = be_suspend,
484d76fd 6154 .resume = be_pci_resume,
82456b03 6155 .shutdown = be_shutdown,
ace40aff 6156 .sriov_configure = be_pci_sriov_configure,
cf588477 6157 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6158};
6159
6160static int __init be_init_module(void)
6161{
710f3e59
SB
6162 int status;
6163
8e95a202
JP
6164 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6165 rx_frag_size != 2048) {
6b7c5b94
SP
6166 printk(KERN_WARNING DRV_NAME
6167 " : Module param rx_frag_size must be 2048/4096/8192."
6168 " Using 2048\n");
6169 rx_frag_size = 2048;
6170 }
6b7c5b94 6171
ace40aff
VV
6172 if (num_vfs > 0) {
6173 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6174 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6175 }
6176
b7172414
SP
6177 be_wq = create_singlethread_workqueue("be_wq");
6178 if (!be_wq) {
6179 pr_warn(DRV_NAME "workqueue creation failed\n");
6180 return -1;
6181 }
6182
710f3e59
SB
6183 be_err_recovery_workq =
6184 create_singlethread_workqueue("be_err_recover");
6185 if (!be_err_recovery_workq)
6186 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6187
6188 status = pci_register_driver(&be_driver);
6189 if (status) {
6190 destroy_workqueue(be_wq);
6191 be_destroy_err_recovery_workq();
6192 }
6193 return status;
6b7c5b94
SP
6194}
6195module_init(be_init_module);
6196
6197static void __exit be_exit_module(void)
6198{
6199 pci_unregister_driver(&be_driver);
b7172414 6200
710f3e59
SB
6201 be_destroy_err_recovery_workq();
6202
b7172414
SP
6203 if (be_wq)
6204 destroy_workqueue(be_wq);
6b7c5b94
SP
6205}
6206module_exit(be_exit_module);
This page took 1.480414 seconds and 5 git commands to generate.