be2net: fix port-res desc query of GET_PROFILE_CONFIG FW cmd
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d19261b8 2 * Copyright (C) 2005 - 2015 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
954f6825 182 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
954f6825
VD
194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
6b7c5b94
SP
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
199
200 wmb();
8788fdc2 201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
202}
203
94d73aaa
VV
204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
6b7c5b94
SP
206{
207 u32 val = 0;
03d28ffe 208
954f6825
VD
209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
94d73aaa 212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
214
215 wmb();
94d73aaa 216 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
217}
218
8788fdc2 219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
6b7c5b94
SP
222{
223 u32 val = 0;
03d28ffe 224
6b7c5b94 225 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 227
954f6825 228 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
229 return;
230
6b7c5b94
SP
231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
239}
240
8788fdc2 241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
242{
243 u32 val = 0;
03d28ffe 244
6b7c5b94 245 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 248
954f6825 249 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
250 return;
251
6b7c5b94
SP
252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
256}
257
6b7c5b94
SP
258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 261 struct device *dev = &adapter->pdev->dev;
6b7c5b94 262 struct sockaddr *addr = p;
5a712c13
SP
263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 266
ca9e4988
AK
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
ff32f8ab
VV
270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
bcc84140
KA
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
5a712c13
SP
280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
704e4c88 285 */
5a712c13
SP
286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
704e4c88
PR
297 }
298
5a712c13
SP
299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
704e4c88 301 */
b188f090
SR
302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
a65027e4 304 if (status)
e3a7ae2c 305 goto err;
6b7c5b94 306
5a712c13
SP
307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
61d23e9f 310 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
311 status = -EPERM;
312 goto err;
313 }
bcc84140
KA
314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
317 return 0;
318err:
5a712c13 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
320 return status;
321}
322
ca34fe38
SP
323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
61000861 330 } else if (BE3_chip(adapter)) {
ca34fe38
SP
331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
61000861
AK
333 return &cmd->hw_stats;
334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
ca34fe38
SP
337 return &cmd->hw_stats;
338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
61000861 348 } else if (BE3_chip(adapter)) {
ca34fe38
SP
349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
61000861
AK
351 return &hw_stats->erx;
352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
ca34fe38
SP
355 return &hw_stats->erx;
356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 360{
ac124ff9
SP
361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 364 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 367
ac124ff9 368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
89a88ab8
AK
389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
ac124ff9 396 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 397 else
ac124ff9 398 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
ca34fe38 408static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 409{
ac124ff9
SP
410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 413 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 416
ac124ff9 417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
ac124ff9 440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
61000861
AK
454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 498 if (be_roce_supported(adapter)) {
461ae379
AK
499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
61000861
AK
506}
507
005d5696
SX
508static void populate_lancer_stats(struct be_adapter *adapter)
509{
005d5696 510 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
ac124ff9 534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 538 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 541 drvs->rx_drops_too_many_frags =
ac124ff9 542 pport_stats->rx_drops_too_many_frags_lo;
005d5696 543}
89a88ab8 544
09c1c68f
SP
545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
4188e7df 557static void populate_erx_stats(struct be_adapter *adapter,
748b539a 558 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
89a88ab8
AK
570void be_parse_stats(struct be_adapter *adapter)
571{
61000861 572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
573 struct be_rx_obj *rxo;
574 int i;
a6c578ef 575 u32 erx_stat;
ac124ff9 576
ca34fe38
SP
577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
005d5696 579 } else {
ca34fe38
SP
580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
61000861
AK
582 else if (BE3_chip(adapter))
583 /* for BE3 */
ca34fe38 584 populate_be_v1_stats(adapter);
61000861
AK
585 else
586 populate_be_v2_stats(adapter);
d51ebd33 587
61000861 588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 589 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 592 }
09c1c68f 593 }
89a88ab8
AK
594}
595
ab1594e9 596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 597 struct rtnl_link_stats64 *stats)
6b7c5b94 598{
ab1594e9 599 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 600 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 601 struct be_rx_obj *rxo;
3c8def97 602 struct be_tx_obj *txo;
ab1594e9
SP
603 u64 pkts, bytes;
604 unsigned int start;
3abcdeda 605 int i;
6b7c5b94 606
3abcdeda 607 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
620 }
621
3c8def97 622 for_all_tx_queues(adapter, txo, i) {
ab1594e9 623 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 624
ab1594e9 625 do {
57a7744e 626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
57a7744e 629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
3c8def97 632 }
6b7c5b94
SP
633
634 /* bad pkts received */
ab1594e9 635 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
ab1594e9 644 drvs->rx_dropped_runt;
68110868 645
6b7c5b94 646 /* detailed rx errors */
ab1594e9 647 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
68110868 650
ab1594e9 651 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
652
653 /* frame alignment errors */
ab1594e9 654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 655
6b7c5b94
SP
656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
ab1594e9 661 return stats;
6b7c5b94
SP
662}
663
b236916a 664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 665{
6b7c5b94
SP
666 struct net_device *netdev = adapter->netdev;
667
b236916a 668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 669 netif_carrier_off(netdev);
b236916a 670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 671 }
b236916a 672
bdce2ad7 673 if (link_status)
b236916a
AK
674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
18824894
IV
677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
679}
680
5f07b3c5 681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 682{
3c8def97 683 struct be_tx_stats *stats = tx_stats(txo);
8670f2a5 684 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
3c8def97 685
ab1594e9 686 u64_stats_update_begin(&stats->sync);
ac124ff9 687 stats->tx_reqs++;
5f07b3c5 688 stats->tx_bytes += skb->len;
8670f2a5
SB
689 stats->tx_pkts += tx_pkts;
690 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
691 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 692 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
693}
694
5f07b3c5
SP
695/* Returns number of WRBs needed for the skb */
696static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 697{
5f07b3c5
SP
698 /* +1 for the header wrb */
699 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
f986afcb
SP
704 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
707 wrb->rsvd0 = 0;
708}
709
710/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
711 * to avoid the swap and shift/mask operations in wrb_fill().
712 */
713static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
714{
715 wrb->frag_pa_hi = 0;
716 wrb->frag_pa_lo = 0;
717 wrb->frag_len = 0;
89b1f496 718 wrb->rsvd0 = 0;
6b7c5b94
SP
719}
720
1ded132d 721static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 722 struct sk_buff *skb)
1ded132d
AK
723{
724 u8 vlan_prio;
725 u16 vlan_tag;
726
df8a39de 727 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
728 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
729 /* If vlan priority provided by OS is NOT in available bmap */
730 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
731 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 732 adapter->recommended_prio_bits;
1ded132d
AK
733
734 return vlan_tag;
735}
736
c9c47142
SP
737/* Used only for IP tunnel packets */
738static u16 skb_inner_ip_proto(struct sk_buff *skb)
739{
740 return (inner_ip_hdr(skb)->version == 4) ?
741 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
742}
743
744static u16 skb_ip_proto(struct sk_buff *skb)
745{
746 return (ip_hdr(skb)->version == 4) ?
747 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
748}
749
cf5671e6
SB
750static inline bool be_is_txq_full(struct be_tx_obj *txo)
751{
752 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
753}
754
755static inline bool be_can_txq_wake(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) < txo->q.len / 2;
758}
759
760static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
763}
764
804abcdb
SB
765static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
766 struct sk_buff *skb,
767 struct be_wrb_params *wrb_params)
6b7c5b94 768{
804abcdb 769 u16 proto;
6b7c5b94 770
49e4b847 771 if (skb_is_gso(skb)) {
804abcdb
SB
772 BE_WRB_F_SET(wrb_params->features, LSO, 1);
773 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 774 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 775 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 777 if (skb->encapsulation) {
804abcdb 778 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
779 proto = skb_inner_ip_proto(skb);
780 } else {
781 proto = skb_ip_proto(skb);
782 }
783 if (proto == IPPROTO_TCP)
804abcdb 784 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 785 else if (proto == IPPROTO_UDP)
804abcdb 786 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
787 }
788
df8a39de 789 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
790 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
791 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
792 }
793
804abcdb
SB
794 BE_WRB_F_SET(wrb_params->features, CRC, 1);
795}
5f07b3c5 796
804abcdb
SB
797static void wrb_fill_hdr(struct be_adapter *adapter,
798 struct be_eth_hdr_wrb *hdr,
799 struct be_wrb_params *wrb_params,
800 struct sk_buff *skb)
801{
802 memset(hdr, 0, sizeof(*hdr));
803
804 SET_TX_WRB_HDR_BITS(crc, hdr,
805 BE_WRB_F_GET(wrb_params->features, CRC));
806 SET_TX_WRB_HDR_BITS(ipcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, IPCS));
808 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
809 BE_WRB_F_GET(wrb_params->features, TCPCS));
810 SET_TX_WRB_HDR_BITS(udpcs, hdr,
811 BE_WRB_F_GET(wrb_params->features, UDPCS));
812
813 SET_TX_WRB_HDR_BITS(lso, hdr,
814 BE_WRB_F_GET(wrb_params->features, LSO));
815 SET_TX_WRB_HDR_BITS(lso6, hdr,
816 BE_WRB_F_GET(wrb_params->features, LSO6));
817 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
818
819 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
820 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 821 */
804abcdb
SB
822 SET_TX_WRB_HDR_BITS(event, hdr,
823 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
824 SET_TX_WRB_HDR_BITS(vlan, hdr,
825 BE_WRB_F_GET(wrb_params->features, VLAN));
826 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
827
828 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
829 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
830 SET_TX_WRB_HDR_BITS(mgmt, hdr,
831 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
832}
833
2b7bcebf 834static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 835 bool unmap_single)
7101e111
SP
836{
837 dma_addr_t dma;
f986afcb 838 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 839
7101e111 840
f986afcb
SP
841 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
842 (u64)le32_to_cpu(wrb->frag_pa_lo);
843 if (frag_len) {
7101e111 844 if (unmap_single)
f986afcb 845 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 846 else
f986afcb 847 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
848 }
849}
6b7c5b94 850
79a0d7d8
SB
851/* Grab a WRB header for xmit */
852static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
853{
854 u16 head = txo->q.head;
855
856 queue_head_inc(&txo->q);
857 return head;
858}
859
860/* Set up the WRB header for xmit */
861static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
862 struct be_tx_obj *txo,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb, u16 head)
865{
866 u32 num_frags = skb_wrb_cnt(skb);
867 struct be_queue_info *txq = &txo->q;
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
869
870 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
871 be_dws_cpu_to_le(hdr, sizeof(*hdr));
872
873 BUG_ON(txo->sent_skb_list[head]);
874 txo->sent_skb_list[head] = skb;
875 txo->last_req_hdr = head;
876 atomic_add(num_frags, &txq->used);
877 txo->last_req_wrb_cnt = num_frags;
878 txo->pend_wrb_cnt += num_frags;
879}
880
881/* Setup a WRB fragment (buffer descriptor) for xmit */
882static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
883 int len)
884{
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
887
888 wrb = queue_head_node(txq);
889 wrb_fill(wrb, busaddr, len);
890 queue_head_inc(txq);
891}
892
893/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
894 * was invoked. The producer index is restored to the previous packet and the
895 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
896 */
897static void be_xmit_restore(struct be_adapter *adapter,
898 struct be_tx_obj *txo, u16 head, bool map_single,
899 u32 copied)
900{
901 struct device *dev;
902 struct be_eth_wrb *wrb;
903 struct be_queue_info *txq = &txo->q;
904
905 dev = &adapter->pdev->dev;
906 txq->head = head;
907
908 /* skip the first wrb (hdr); it's not mapped */
909 queue_head_inc(txq);
910 while (copied) {
911 wrb = queue_head_node(txq);
912 unmap_tx_frag(dev, wrb, map_single);
913 map_single = false;
914 copied -= le32_to_cpu(wrb->frag_len);
915 queue_head_inc(txq);
916 }
917
918 txq->head = head;
919}
920
921/* Enqueue the given packet for transmit. This routine allocates WRBs for the
922 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
923 * of WRBs used up by the packet.
924 */
5f07b3c5 925static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
926 struct sk_buff *skb,
927 struct be_wrb_params *wrb_params)
6b7c5b94 928{
5f07b3c5 929 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 930 struct device *dev = &adapter->pdev->dev;
5f07b3c5 931 struct be_queue_info *txq = &txo->q;
7101e111 932 bool map_single = false;
5f07b3c5 933 u16 head = txq->head;
79a0d7d8
SB
934 dma_addr_t busaddr;
935 int len;
6b7c5b94 936
79a0d7d8 937 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 938
ebc8d2ab 939 if (skb->len > skb->data_len) {
79a0d7d8 940 len = skb_headlen(skb);
03d28ffe 941
2b7bcebf
IV
942 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, busaddr))
7101e111
SP
944 goto dma_err;
945 map_single = true;
79a0d7d8 946 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
947 copied += len;
948 }
6b7c5b94 949
ebc8d2ab 950 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 951 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 952 len = skb_frag_size(frag);
03d28ffe 953
79a0d7d8 954 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 955 if (dma_mapping_error(dev, busaddr))
7101e111 956 goto dma_err;
79a0d7d8
SB
957 be_tx_setup_wrb_frag(txo, busaddr, len);
958 copied += len;
6b7c5b94
SP
959 }
960
79a0d7d8 961 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 962
5f07b3c5
SP
963 be_tx_stats_update(txo, skb);
964 return wrb_cnt;
6b7c5b94 965
7101e111 966dma_err:
79a0d7d8
SB
967 adapter->drv_stats.dma_map_errors++;
968 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 969 return 0;
6b7c5b94
SP
970}
971
f7062ee5
SP
972static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
973{
974 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
975}
976
93040ae5 977static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 978 struct sk_buff *skb,
804abcdb
SB
979 struct be_wrb_params
980 *wrb_params)
93040ae5
SK
981{
982 u16 vlan_tag = 0;
983
984 skb = skb_share_check(skb, GFP_ATOMIC);
985 if (unlikely(!skb))
986 return skb;
987
df8a39de 988 if (skb_vlan_tag_present(skb))
93040ae5 989 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
990
991 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
992 if (!vlan_tag)
993 vlan_tag = adapter->pvid;
994 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
995 * skip VLAN insertion
996 */
804abcdb 997 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 998 }
bc0c3405
AK
999
1000 if (vlan_tag) {
62749e2c
JP
1001 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1002 vlan_tag);
bc0c3405
AK
1003 if (unlikely(!skb))
1004 return skb;
bc0c3405
AK
1005 skb->vlan_tci = 0;
1006 }
1007
1008 /* Insert the outer VLAN, if any */
1009 if (adapter->qnq_vid) {
1010 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1011 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1012 vlan_tag);
bc0c3405
AK
1013 if (unlikely(!skb))
1014 return skb;
804abcdb 1015 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1016 }
1017
93040ae5
SK
1018 return skb;
1019}
1020
bc0c3405
AK
1021static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1022{
1023 struct ethhdr *eh = (struct ethhdr *)skb->data;
1024 u16 offset = ETH_HLEN;
1025
1026 if (eh->h_proto == htons(ETH_P_IPV6)) {
1027 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1028
1029 offset += sizeof(struct ipv6hdr);
1030 if (ip6h->nexthdr != NEXTHDR_TCP &&
1031 ip6h->nexthdr != NEXTHDR_UDP) {
1032 struct ipv6_opt_hdr *ehdr =
504fbf1e 1033 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1034
1035 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1036 if (ehdr->hdrlen == 0xff)
1037 return true;
1038 }
1039 }
1040 return false;
1041}
1042
1043static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1044{
df8a39de 1045 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1046}
1047
748b539a 1048static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1049{
ee9c799c 1050 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1051}
1052
ec495fac
VV
1053static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1054 struct sk_buff *skb,
804abcdb
SB
1055 struct be_wrb_params
1056 *wrb_params)
6b7c5b94 1057{
d2cb6ce7 1058 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1059 unsigned int eth_hdr_len;
1060 struct iphdr *ip;
93040ae5 1061
1297f9db
AK
1062 /* For padded packets, BE HW modifies tot_len field in IP header
1063 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1064 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1065 */
ee9c799c
SP
1066 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1067 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1068 if (skb->len <= 60 &&
df8a39de 1069 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1070 is_ipv4_pkt(skb)) {
93040ae5
SK
1071 ip = (struct iphdr *)ip_hdr(skb);
1072 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1073 }
1ded132d 1074
d2cb6ce7 1075 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1076 * tagging in pvid-tagging mode
d2cb6ce7 1077 */
f93f160b 1078 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1079 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1080 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1081
93040ae5
SK
1082 /* HW has a bug wherein it will calculate CSUM for VLAN
1083 * pkts even though it is disabled.
1084 * Manually insert VLAN in pkt.
1085 */
1086 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1087 skb_vlan_tag_present(skb)) {
804abcdb 1088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1089 if (unlikely(!skb))
c9128951 1090 goto err;
bc0c3405
AK
1091 }
1092
1093 /* HW may lockup when VLAN HW tagging is requested on
1094 * certain ipv6 packets. Drop such pkts if the HW workaround to
1095 * skip HW tagging is not enabled by FW.
1096 */
1097 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1098 (adapter->pvid || adapter->qnq_vid) &&
1099 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1100 goto tx_drop;
1101
1102 /* Manual VLAN tag insertion to prevent:
1103 * ASIC lockup when the ASIC inserts VLAN tag into
1104 * certain ipv6 packets. Insert VLAN tags in driver,
1105 * and set event, completion, vlan bits accordingly
1106 * in the Tx WRB.
1107 */
1108 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1109 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1110 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1111 if (unlikely(!skb))
c9128951 1112 goto err;
1ded132d
AK
1113 }
1114
ee9c799c
SP
1115 return skb;
1116tx_drop:
1117 dev_kfree_skb_any(skb);
c9128951 1118err:
ee9c799c
SP
1119 return NULL;
1120}
1121
ec495fac
VV
1122static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
804abcdb 1124 struct be_wrb_params *wrb_params)
ec495fac 1125{
8227e990
SR
1126 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1127 * packets that are 32b or less may cause a transmit stall
1128 * on that port. The workaround is to pad such packets
1129 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1130 */
8227e990 1131 if (skb->len <= 32) {
74b6939d 1132 if (skb_put_padto(skb, 36))
ec495fac 1133 return NULL;
ec495fac
VV
1134 }
1135
1136 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1137 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1138 if (!skb)
1139 return NULL;
1140 }
1141
1142 return skb;
1143}
1144
5f07b3c5
SP
1145static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1146{
1147 struct be_queue_info *txq = &txo->q;
1148 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1149
1150 /* Mark the last request eventable if it hasn't been marked already */
1151 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1152 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1153
1154 /* compose a dummy wrb if there are odd set of wrbs to notify */
1155 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1156 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1157 queue_head_inc(txq);
1158 atomic_inc(&txq->used);
1159 txo->pend_wrb_cnt++;
1160 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1161 TX_HDR_WRB_NUM_SHIFT);
1162 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1163 TX_HDR_WRB_NUM_SHIFT);
1164 }
1165 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1166 txo->pend_wrb_cnt = 0;
1167}
1168
760c295e
VD
1169/* OS2BMC related */
1170
1171#define DHCP_CLIENT_PORT 68
1172#define DHCP_SERVER_PORT 67
1173#define NET_BIOS_PORT1 137
1174#define NET_BIOS_PORT2 138
1175#define DHCPV6_RAS_PORT 547
1176
1177#define is_mc_allowed_on_bmc(adapter, eh) \
1178 (!is_multicast_filt_enabled(adapter) && \
1179 is_multicast_ether_addr(eh->h_dest) && \
1180 !is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_bc_allowed_on_bmc(adapter, eh) \
1183 (!is_broadcast_filt_enabled(adapter) && \
1184 is_broadcast_ether_addr(eh->h_dest))
1185
1186#define is_arp_allowed_on_bmc(adapter, skb) \
1187 (is_arp(skb) && is_arp_filt_enabled(adapter))
1188
1189#define is_broadcast_packet(eh, adapter) \
1190 (is_multicast_ether_addr(eh->h_dest) && \
1191 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1192
1193#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1194
1195#define is_arp_filt_enabled(adapter) \
1196 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1197
1198#define is_dhcp_client_filt_enabled(adapter) \
1199 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1200
1201#define is_dhcp_srvr_filt_enabled(adapter) \
1202 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1203
1204#define is_nbios_filt_enabled(adapter) \
1205 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1206
1207#define is_ipv6_na_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & \
1209 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1210
1211#define is_ipv6_ra_filt_enabled(adapter) \
1212 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1213
1214#define is_ipv6_ras_filt_enabled(adapter) \
1215 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1216
1217#define is_broadcast_filt_enabled(adapter) \
1218 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1219
1220#define is_multicast_filt_enabled(adapter) \
1221 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1222
1223static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1224 struct sk_buff **skb)
1225{
1226 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1227 bool os2bmc = false;
1228
1229 if (!be_is_os2bmc_enabled(adapter))
1230 goto done;
1231
1232 if (!is_multicast_ether_addr(eh->h_dest))
1233 goto done;
1234
1235 if (is_mc_allowed_on_bmc(adapter, eh) ||
1236 is_bc_allowed_on_bmc(adapter, eh) ||
1237 is_arp_allowed_on_bmc(adapter, (*skb))) {
1238 os2bmc = true;
1239 goto done;
1240 }
1241
1242 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1243 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1244 u8 nexthdr = hdr->nexthdr;
1245
1246 if (nexthdr == IPPROTO_ICMPV6) {
1247 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1248
1249 switch (icmp6->icmp6_type) {
1250 case NDISC_ROUTER_ADVERTISEMENT:
1251 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1252 goto done;
1253 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1254 os2bmc = is_ipv6_na_filt_enabled(adapter);
1255 goto done;
1256 default:
1257 break;
1258 }
1259 }
1260 }
1261
1262 if (is_udp_pkt((*skb))) {
1263 struct udphdr *udp = udp_hdr((*skb));
1264
1645d997 1265 switch (ntohs(udp->dest)) {
760c295e
VD
1266 case DHCP_CLIENT_PORT:
1267 os2bmc = is_dhcp_client_filt_enabled(adapter);
1268 goto done;
1269 case DHCP_SERVER_PORT:
1270 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1271 goto done;
1272 case NET_BIOS_PORT1:
1273 case NET_BIOS_PORT2:
1274 os2bmc = is_nbios_filt_enabled(adapter);
1275 goto done;
1276 case DHCPV6_RAS_PORT:
1277 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1278 goto done;
1279 default:
1280 break;
1281 }
1282 }
1283done:
1284 /* For packets over a vlan, which are destined
1285 * to BMC, asic expects the vlan to be inline in the packet.
1286 */
1287 if (os2bmc)
1288 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1289
1290 return os2bmc;
1291}
1292
ee9c799c
SP
1293static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1296 u16 q_idx = skb_get_queue_mapping(skb);
1297 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1298 struct be_wrb_params wrb_params = { 0 };
804abcdb 1299 bool flush = !skb->xmit_more;
5f07b3c5 1300 u16 wrb_cnt;
ee9c799c 1301
804abcdb 1302 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1303 if (unlikely(!skb))
1304 goto drop;
6b7c5b94 1305
804abcdb
SB
1306 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1307
1308 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1309 if (unlikely(!wrb_cnt)) {
1310 dev_kfree_skb_any(skb);
1311 goto drop;
1312 }
cd8f76c0 1313
760c295e
VD
1314 /* if os2bmc is enabled and if the pkt is destined to bmc,
1315 * enqueue the pkt a 2nd time with mgmt bit set.
1316 */
1317 if (be_send_pkt_to_bmc(adapter, &skb)) {
1318 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1319 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1320 if (unlikely(!wrb_cnt))
1321 goto drop;
1322 else
1323 skb_get(skb);
1324 }
1325
cf5671e6 1326 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1327 netif_stop_subqueue(netdev, q_idx);
1328 tx_stats(txo)->tx_stops++;
1329 }
c190e3c8 1330
5f07b3c5
SP
1331 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1332 be_xmit_flush(adapter, txo);
6b7c5b94 1333
5f07b3c5
SP
1334 return NETDEV_TX_OK;
1335drop:
1336 tx_stats(txo)->tx_drv_drops++;
1337 /* Flush the already enqueued tx requests */
1338 if (flush && txo->pend_wrb_cnt)
1339 be_xmit_flush(adapter, txo);
6b7c5b94 1340
6b7c5b94
SP
1341 return NETDEV_TX_OK;
1342}
1343
1344static int be_change_mtu(struct net_device *netdev, int new_mtu)
1345{
1346 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1347 struct device *dev = &adapter->pdev->dev;
1348
1349 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1350 dev_info(dev, "MTU must be between %d and %d bytes\n",
1351 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1352 return -EINVAL;
1353 }
0d3f5cce
KA
1354
1355 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1356 netdev->mtu, new_mtu);
6b7c5b94
SP
1357 netdev->mtu = new_mtu;
1358 return 0;
1359}
1360
f66b7cfd
SP
1361static inline bool be_in_all_promisc(struct be_adapter *adapter)
1362{
1363 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1364 BE_IF_FLAGS_ALL_PROMISCUOUS;
1365}
1366
1367static int be_set_vlan_promisc(struct be_adapter *adapter)
1368{
1369 struct device *dev = &adapter->pdev->dev;
1370 int status;
1371
1372 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1373 return 0;
1374
1375 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1376 if (!status) {
1377 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1378 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1379 } else {
1380 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1381 }
1382 return status;
1383}
1384
1385static int be_clear_vlan_promisc(struct be_adapter *adapter)
1386{
1387 struct device *dev = &adapter->pdev->dev;
1388 int status;
1389
1390 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1391 if (!status) {
1392 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1393 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1394 }
1395 return status;
1396}
1397
6b7c5b94 1398/*
82903e4b
AK
1399 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1400 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1401 */
10329df8 1402static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1403{
50762667 1404 struct device *dev = &adapter->pdev->dev;
10329df8 1405 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1406 u16 num = 0, i = 0;
82903e4b 1407 int status = 0;
1da87b7f 1408
c0e64ef4 1409 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1410 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1411 return 0;
1412
92bf14ab 1413 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1414 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1415
1416 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1417 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1418 vids[num++] = cpu_to_le16(i);
0fc16ebf 1419
435452aa 1420 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1421 if (status) {
f66b7cfd 1422 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1423 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1424 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1425 addl_status(status) ==
4c60005f 1426 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1427 return be_set_vlan_promisc(adapter);
1428 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1429 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1430 }
0fc16ebf 1431 return status;
6b7c5b94
SP
1432}
1433
80d5c368 1434static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1435{
1436 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1437 int status = 0;
6b7c5b94 1438
a85e9986
PR
1439 /* Packets with VID 0 are always received by Lancer by default */
1440 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1441 return status;
1442
f6cbd364 1443 if (test_bit(vid, adapter->vids))
48291c22 1444 return status;
a85e9986 1445
f6cbd364 1446 set_bit(vid, adapter->vids);
a6b74e01 1447 adapter->vlans_added++;
8e586137 1448
a6b74e01
SK
1449 status = be_vid_config(adapter);
1450 if (status) {
1451 adapter->vlans_added--;
f6cbd364 1452 clear_bit(vid, adapter->vids);
a6b74e01 1453 }
48291c22 1454
80817cbf 1455 return status;
6b7c5b94
SP
1456}
1457
80d5c368 1458static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1459{
1460 struct be_adapter *adapter = netdev_priv(netdev);
1461
a85e9986
PR
1462 /* Packets with VID 0 are always received by Lancer by default */
1463 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1464 return 0;
a85e9986 1465
f6cbd364 1466 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1467 adapter->vlans_added--;
1468
1469 return be_vid_config(adapter);
6b7c5b94
SP
1470}
1471
f66b7cfd 1472static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1473{
ac34b743 1474 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1475 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1476}
1477
f66b7cfd
SP
1478static void be_set_all_promisc(struct be_adapter *adapter)
1479{
1480 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1481 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1482}
1483
1484static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1485{
0fc16ebf 1486 int status;
6b7c5b94 1487
f66b7cfd
SP
1488 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1489 return;
6b7c5b94 1490
f66b7cfd
SP
1491 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1492 if (!status)
1493 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1494}
1495
1496static void be_set_mc_list(struct be_adapter *adapter)
1497{
1498 int status;
1499
1500 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1501 if (!status)
1502 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1503 else
1504 be_set_mc_promisc(adapter);
1505}
1506
1507static void be_set_uc_list(struct be_adapter *adapter)
1508{
1509 struct netdev_hw_addr *ha;
1510 int i = 1; /* First slot is claimed by the Primary MAC */
1511
1512 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1513 be_cmd_pmac_del(adapter, adapter->if_handle,
1514 adapter->pmac_id[i], 0);
1515
1516 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1517 be_set_all_promisc(adapter);
1518 return;
6b7c5b94
SP
1519 }
1520
f66b7cfd
SP
1521 netdev_for_each_uc_addr(ha, adapter->netdev) {
1522 adapter->uc_macs++; /* First slot is for Primary MAC */
1523 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1524 &adapter->pmac_id[adapter->uc_macs], 0);
1525 }
1526}
6b7c5b94 1527
f66b7cfd
SP
1528static void be_clear_uc_list(struct be_adapter *adapter)
1529{
1530 int i;
fbc13f01 1531
f66b7cfd
SP
1532 for (i = 1; i < (adapter->uc_macs + 1); i++)
1533 be_cmd_pmac_del(adapter, adapter->if_handle,
1534 adapter->pmac_id[i], 0);
1535 adapter->uc_macs = 0;
1536}
fbc13f01 1537
f66b7cfd
SP
1538static void be_set_rx_mode(struct net_device *netdev)
1539{
1540 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1541
f66b7cfd
SP
1542 if (netdev->flags & IFF_PROMISC) {
1543 be_set_all_promisc(adapter);
1544 return;
fbc13f01
AK
1545 }
1546
f66b7cfd
SP
1547 /* Interface was previously in promiscuous mode; disable it */
1548 if (be_in_all_promisc(adapter)) {
1549 be_clear_all_promisc(adapter);
1550 if (adapter->vlans_added)
1551 be_vid_config(adapter);
0fc16ebf 1552 }
a0794885 1553
f66b7cfd
SP
1554 /* Enable multicast promisc if num configured exceeds what we support */
1555 if (netdev->flags & IFF_ALLMULTI ||
1556 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1557 be_set_mc_promisc(adapter);
a0794885 1558 return;
f66b7cfd 1559 }
a0794885 1560
f66b7cfd
SP
1561 if (netdev_uc_count(netdev) != adapter->uc_macs)
1562 be_set_uc_list(adapter);
1563
1564 be_set_mc_list(adapter);
6b7c5b94
SP
1565}
1566
ba343c77
SB
1567static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1568{
1569 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1570 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1571 int status;
1572
11ac75ed 1573 if (!sriov_enabled(adapter))
ba343c77
SB
1574 return -EPERM;
1575
11ac75ed 1576 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1577 return -EINVAL;
1578
3c31aaf3
VV
1579 /* Proceed further only if user provided MAC is different
1580 * from active MAC
1581 */
1582 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1583 return 0;
1584
3175d8c2
SP
1585 if (BEx_chip(adapter)) {
1586 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1587 vf + 1);
ba343c77 1588
11ac75ed
SP
1589 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1590 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1591 } else {
1592 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1593 vf + 1);
590c391d
PR
1594 }
1595
abccf23e
KA
1596 if (status) {
1597 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1598 mac, vf, status);
1599 return be_cmd_status(status);
1600 }
64600ea5 1601
abccf23e
KA
1602 ether_addr_copy(vf_cfg->mac_addr, mac);
1603
1604 return 0;
ba343c77
SB
1605}
1606
64600ea5 1607static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1608 struct ifla_vf_info *vi)
64600ea5
AK
1609{
1610 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1611 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1612
11ac75ed 1613 if (!sriov_enabled(adapter))
64600ea5
AK
1614 return -EPERM;
1615
11ac75ed 1616 if (vf >= adapter->num_vfs)
64600ea5
AK
1617 return -EINVAL;
1618
1619 vi->vf = vf;
ed616689
SC
1620 vi->max_tx_rate = vf_cfg->tx_rate;
1621 vi->min_tx_rate = 0;
a60b3a13
AK
1622 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1623 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1624 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1625 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1626 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1627
1628 return 0;
1629}
1630
435452aa
VV
1631static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1632{
1633 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1634 u16 vids[BE_NUM_VLANS_SUPPORTED];
1635 int vf_if_id = vf_cfg->if_handle;
1636 int status;
1637
1638 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1639 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1640 if (status)
1641 return status;
1642
1643 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1644 vids[0] = 0;
1645 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1646 if (!status)
1647 dev_info(&adapter->pdev->dev,
1648 "Cleared guest VLANs on VF%d", vf);
1649
1650 /* After TVT is enabled, disallow VFs to program VLAN filters */
1651 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1652 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1653 ~BE_PRIV_FILTMGMT, vf + 1);
1654 if (!status)
1655 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1656 }
1657 return 0;
1658}
1659
1660static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1661{
1662 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1663 struct device *dev = &adapter->pdev->dev;
1664 int status;
1665
1666 /* Reset Transparent VLAN Tagging. */
1667 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1668 vf_cfg->if_handle, 0, 0);
435452aa
VV
1669 if (status)
1670 return status;
1671
1672 /* Allow VFs to program VLAN filtering */
1673 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1674 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1675 BE_PRIV_FILTMGMT, vf + 1);
1676 if (!status) {
1677 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1678 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1679 }
1680 }
1681
1682 dev_info(dev,
1683 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1684 return 0;
1685}
1686
748b539a 1687static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1688{
1689 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1690 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1691 int status;
1da87b7f 1692
11ac75ed 1693 if (!sriov_enabled(adapter))
1da87b7f
AK
1694 return -EPERM;
1695
b9fc0e53 1696 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1697 return -EINVAL;
1698
b9fc0e53
AK
1699 if (vlan || qos) {
1700 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1701 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1702 } else {
435452aa 1703 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1704 }
1705
abccf23e
KA
1706 if (status) {
1707 dev_err(&adapter->pdev->dev,
435452aa
VV
1708 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1709 status);
abccf23e
KA
1710 return be_cmd_status(status);
1711 }
1712
1713 vf_cfg->vlan_tag = vlan;
abccf23e 1714 return 0;
1da87b7f
AK
1715}
1716
ed616689
SC
1717static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1718 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1719{
1720 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1721 struct device *dev = &adapter->pdev->dev;
1722 int percent_rate, status = 0;
1723 u16 link_speed = 0;
1724 u8 link_status;
e1d18735 1725
11ac75ed 1726 if (!sriov_enabled(adapter))
e1d18735
AK
1727 return -EPERM;
1728
94f434c2 1729 if (vf >= adapter->num_vfs)
e1d18735
AK
1730 return -EINVAL;
1731
ed616689
SC
1732 if (min_tx_rate)
1733 return -EINVAL;
1734
0f77ba73
RN
1735 if (!max_tx_rate)
1736 goto config_qos;
1737
1738 status = be_cmd_link_status_query(adapter, &link_speed,
1739 &link_status, 0);
1740 if (status)
1741 goto err;
1742
1743 if (!link_status) {
1744 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1745 status = -ENETDOWN;
0f77ba73
RN
1746 goto err;
1747 }
1748
1749 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1750 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1751 link_speed);
1752 status = -EINVAL;
1753 goto err;
1754 }
1755
1756 /* On Skyhawk the QOS setting must be done only as a % value */
1757 percent_rate = link_speed / 100;
1758 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1759 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1760 percent_rate);
1761 status = -EINVAL;
1762 goto err;
94f434c2 1763 }
e1d18735 1764
0f77ba73
RN
1765config_qos:
1766 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1767 if (status)
0f77ba73
RN
1768 goto err;
1769
1770 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1771 return 0;
1772
1773err:
1774 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1775 max_tx_rate, vf);
abccf23e 1776 return be_cmd_status(status);
e1d18735 1777}
e2fb1afa 1778
bdce2ad7
SR
1779static int be_set_vf_link_state(struct net_device *netdev, int vf,
1780 int link_state)
1781{
1782 struct be_adapter *adapter = netdev_priv(netdev);
1783 int status;
1784
1785 if (!sriov_enabled(adapter))
1786 return -EPERM;
1787
1788 if (vf >= adapter->num_vfs)
1789 return -EINVAL;
1790
1791 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1792 if (status) {
1793 dev_err(&adapter->pdev->dev,
1794 "Link state change on VF %d failed: %#x\n", vf, status);
1795 return be_cmd_status(status);
1796 }
bdce2ad7 1797
abccf23e
KA
1798 adapter->vf_cfg[vf].plink_tracking = link_state;
1799
1800 return 0;
bdce2ad7 1801}
e1d18735 1802
e7bcbd7b
KA
1803static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1804{
1805 struct be_adapter *adapter = netdev_priv(netdev);
1806 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1807 u8 spoofchk;
1808 int status;
1809
1810 if (!sriov_enabled(adapter))
1811 return -EPERM;
1812
1813 if (vf >= adapter->num_vfs)
1814 return -EINVAL;
1815
1816 if (BEx_chip(adapter))
1817 return -EOPNOTSUPP;
1818
1819 if (enable == vf_cfg->spoofchk)
1820 return 0;
1821
1822 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1823
1824 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1825 0, spoofchk);
1826 if (status) {
1827 dev_err(&adapter->pdev->dev,
1828 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1829 return be_cmd_status(status);
1830 }
1831
1832 vf_cfg->spoofchk = enable;
1833 return 0;
1834}
1835
2632bafd
SP
1836static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1837 ulong now)
6b7c5b94 1838{
2632bafd
SP
1839 aic->rx_pkts_prev = rx_pkts;
1840 aic->tx_reqs_prev = tx_pkts;
1841 aic->jiffies = now;
1842}
ac124ff9 1843
20947770 1844static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1845{
20947770
PR
1846 struct be_adapter *adapter = eqo->adapter;
1847 int eqd, start;
2632bafd 1848 struct be_aic_obj *aic;
2632bafd
SP
1849 struct be_rx_obj *rxo;
1850 struct be_tx_obj *txo;
20947770 1851 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1852 ulong now;
1853 u32 pps, delta;
20947770 1854 int i;
10ef9ab4 1855
20947770
PR
1856 aic = &adapter->aic_obj[eqo->idx];
1857 if (!aic->enable) {
1858 if (aic->jiffies)
1859 aic->jiffies = 0;
1860 eqd = aic->et_eqd;
1861 return eqd;
1862 }
6b7c5b94 1863
20947770 1864 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1865 do {
57a7744e 1866 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1867 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1868 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1869 }
10ef9ab4 1870
20947770 1871 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1872 do {
57a7744e 1873 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1874 tx_pkts += txo->stats.tx_reqs;
57a7744e 1875 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1876 }
6b7c5b94 1877
20947770
PR
1878 /* Skip, if wrapped around or first calculation */
1879 now = jiffies;
1880 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1881 rx_pkts < aic->rx_pkts_prev ||
1882 tx_pkts < aic->tx_reqs_prev) {
1883 be_aic_update(aic, rx_pkts, tx_pkts, now);
1884 return aic->prev_eqd;
1885 }
2632bafd 1886
20947770
PR
1887 delta = jiffies_to_msecs(now - aic->jiffies);
1888 if (delta == 0)
1889 return aic->prev_eqd;
10ef9ab4 1890
20947770
PR
1891 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1892 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1893 eqd = (pps / 15000) << 2;
2632bafd 1894
20947770
PR
1895 if (eqd < 8)
1896 eqd = 0;
1897 eqd = min_t(u32, eqd, aic->max_eqd);
1898 eqd = max_t(u32, eqd, aic->min_eqd);
1899
1900 be_aic_update(aic, rx_pkts, tx_pkts, now);
1901
1902 return eqd;
1903}
1904
1905/* For Skyhawk-R only */
1906static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1907{
1908 struct be_adapter *adapter = eqo->adapter;
1909 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1910 ulong now = jiffies;
1911 int eqd;
1912 u32 mult_enc;
1913
1914 if (!aic->enable)
1915 return 0;
1916
1917 if (time_before_eq(now, aic->jiffies) ||
1918 jiffies_to_msecs(now - aic->jiffies) < 1)
1919 eqd = aic->prev_eqd;
1920 else
1921 eqd = be_get_new_eqd(eqo);
1922
1923 if (eqd > 100)
1924 mult_enc = R2I_DLY_ENC_1;
1925 else if (eqd > 60)
1926 mult_enc = R2I_DLY_ENC_2;
1927 else if (eqd > 20)
1928 mult_enc = R2I_DLY_ENC_3;
1929 else
1930 mult_enc = R2I_DLY_ENC_0;
1931
1932 aic->prev_eqd = eqd;
1933
1934 return mult_enc;
1935}
1936
1937void be_eqd_update(struct be_adapter *adapter, bool force_update)
1938{
1939 struct be_set_eqd set_eqd[MAX_EVT_QS];
1940 struct be_aic_obj *aic;
1941 struct be_eq_obj *eqo;
1942 int i, num = 0, eqd;
1943
1944 for_all_evt_queues(adapter, eqo, i) {
1945 aic = &adapter->aic_obj[eqo->idx];
1946 eqd = be_get_new_eqd(eqo);
1947 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1948 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1949 set_eqd[num].eq_id = eqo->q.id;
1950 aic->prev_eqd = eqd;
1951 num++;
1952 }
ac124ff9 1953 }
2632bafd
SP
1954
1955 if (num)
1956 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1957}
1958
3abcdeda 1959static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1960 struct be_rx_compl_info *rxcp)
4097f663 1961{
ac124ff9 1962 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1963
ab1594e9 1964 u64_stats_update_begin(&stats->sync);
3abcdeda 1965 stats->rx_compl++;
2e588f84 1966 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1967 stats->rx_pkts++;
8670f2a5
SB
1968 if (rxcp->tunneled)
1969 stats->rx_vxlan_offload_pkts++;
2e588f84 1970 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1971 stats->rx_mcast_pkts++;
2e588f84 1972 if (rxcp->err)
ac124ff9 1973 stats->rx_compl_err++;
ab1594e9 1974 u64_stats_update_end(&stats->sync);
4097f663
SP
1975}
1976
2e588f84 1977static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1978{
19fad86f 1979 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1980 * Also ignore ipcksm for ipv6 pkts
1981 */
2e588f84 1982 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1983 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1984}
1985
0b0ef1d0 1986static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1987{
10ef9ab4 1988 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1989 struct be_rx_page_info *rx_page_info;
3abcdeda 1990 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1991 u16 frag_idx = rxq->tail;
6b7c5b94 1992
3abcdeda 1993 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1994 BUG_ON(!rx_page_info->page);
1995
e50287be 1996 if (rx_page_info->last_frag) {
2b7bcebf
IV
1997 dma_unmap_page(&adapter->pdev->dev,
1998 dma_unmap_addr(rx_page_info, bus),
1999 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2000 rx_page_info->last_frag = false;
2001 } else {
2002 dma_sync_single_for_cpu(&adapter->pdev->dev,
2003 dma_unmap_addr(rx_page_info, bus),
2004 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2005 }
6b7c5b94 2006
0b0ef1d0 2007 queue_tail_inc(rxq);
6b7c5b94
SP
2008 atomic_dec(&rxq->used);
2009 return rx_page_info;
2010}
2011
2012/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2013static void be_rx_compl_discard(struct be_rx_obj *rxo,
2014 struct be_rx_compl_info *rxcp)
6b7c5b94 2015{
6b7c5b94 2016 struct be_rx_page_info *page_info;
2e588f84 2017 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2018
e80d9da6 2019 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2020 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2021 put_page(page_info->page);
2022 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2023 }
2024}
2025
2026/*
2027 * skb_fill_rx_data forms a complete skb for an ether frame
2028 * indicated by rxcp.
2029 */
10ef9ab4
SP
2030static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2031 struct be_rx_compl_info *rxcp)
6b7c5b94 2032{
6b7c5b94 2033 struct be_rx_page_info *page_info;
2e588f84
SP
2034 u16 i, j;
2035 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2036 u8 *start;
6b7c5b94 2037
0b0ef1d0 2038 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2039 start = page_address(page_info->page) + page_info->page_offset;
2040 prefetch(start);
2041
2042 /* Copy data in the first descriptor of this completion */
2e588f84 2043 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2044
6b7c5b94
SP
2045 skb->len = curr_frag_len;
2046 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2047 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2048 /* Complete packet has now been moved to data */
2049 put_page(page_info->page);
2050 skb->data_len = 0;
2051 skb->tail += curr_frag_len;
2052 } else {
ac1ae5f3
ED
2053 hdr_len = ETH_HLEN;
2054 memcpy(skb->data, start, hdr_len);
6b7c5b94 2055 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2056 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2057 skb_shinfo(skb)->frags[0].page_offset =
2058 page_info->page_offset + hdr_len;
748b539a
SP
2059 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2060 curr_frag_len - hdr_len);
6b7c5b94 2061 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2062 skb->truesize += rx_frag_size;
6b7c5b94
SP
2063 skb->tail += hdr_len;
2064 }
205859a2 2065 page_info->page = NULL;
6b7c5b94 2066
2e588f84
SP
2067 if (rxcp->pkt_size <= rx_frag_size) {
2068 BUG_ON(rxcp->num_rcvd != 1);
2069 return;
6b7c5b94
SP
2070 }
2071
2072 /* More frags present for this completion */
2e588f84
SP
2073 remaining = rxcp->pkt_size - curr_frag_len;
2074 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2075 page_info = get_rx_page_info(rxo);
2e588f84 2076 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2077
bd46cb6c
AK
2078 /* Coalesce all frags from the same physical page in one slot */
2079 if (page_info->page_offset == 0) {
2080 /* Fresh page */
2081 j++;
b061b39e 2082 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2083 skb_shinfo(skb)->frags[j].page_offset =
2084 page_info->page_offset;
9e903e08 2085 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2086 skb_shinfo(skb)->nr_frags++;
2087 } else {
2088 put_page(page_info->page);
2089 }
2090
9e903e08 2091 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2092 skb->len += curr_frag_len;
2093 skb->data_len += curr_frag_len;
bdb28a97 2094 skb->truesize += rx_frag_size;
2e588f84 2095 remaining -= curr_frag_len;
205859a2 2096 page_info->page = NULL;
6b7c5b94 2097 }
bd46cb6c 2098 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2099}
2100
5be93b9a 2101/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2102static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2103 struct be_rx_compl_info *rxcp)
6b7c5b94 2104{
10ef9ab4 2105 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2106 struct net_device *netdev = adapter->netdev;
6b7c5b94 2107 struct sk_buff *skb;
89420424 2108
bb349bb4 2109 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2110 if (unlikely(!skb)) {
ac124ff9 2111 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2112 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2113 return;
2114 }
2115
10ef9ab4 2116 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2117
6332c8d3 2118 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2119 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2120 else
2121 skb_checksum_none_assert(skb);
6b7c5b94 2122
6332c8d3 2123 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2124 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2125 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2126 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2127
b6c0e89d 2128 skb->csum_level = rxcp->tunneled;
6384a4d0 2129 skb_mark_napi_id(skb, napi);
6b7c5b94 2130
343e43c0 2131 if (rxcp->vlanf)
86a9bad3 2132 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2133
2134 netif_receive_skb(skb);
6b7c5b94
SP
2135}
2136
5be93b9a 2137/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2138static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2139 struct napi_struct *napi,
2140 struct be_rx_compl_info *rxcp)
6b7c5b94 2141{
10ef9ab4 2142 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2143 struct be_rx_page_info *page_info;
5be93b9a 2144 struct sk_buff *skb = NULL;
2e588f84
SP
2145 u16 remaining, curr_frag_len;
2146 u16 i, j;
3968fa1e 2147
10ef9ab4 2148 skb = napi_get_frags(napi);
5be93b9a 2149 if (!skb) {
10ef9ab4 2150 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2151 return;
2152 }
2153
2e588f84
SP
2154 remaining = rxcp->pkt_size;
2155 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2156 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2157
2158 curr_frag_len = min(remaining, rx_frag_size);
2159
bd46cb6c
AK
2160 /* Coalesce all frags from the same physical page in one slot */
2161 if (i == 0 || page_info->page_offset == 0) {
2162 /* First frag or Fresh page */
2163 j++;
b061b39e 2164 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2165 skb_shinfo(skb)->frags[j].page_offset =
2166 page_info->page_offset;
9e903e08 2167 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2168 } else {
2169 put_page(page_info->page);
2170 }
9e903e08 2171 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2172 skb->truesize += rx_frag_size;
bd46cb6c 2173 remaining -= curr_frag_len;
6b7c5b94
SP
2174 memset(page_info, 0, sizeof(*page_info));
2175 }
bd46cb6c 2176 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2177
5be93b9a 2178 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2179 skb->len = rxcp->pkt_size;
2180 skb->data_len = rxcp->pkt_size;
5be93b9a 2181 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2182 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2183 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2184 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2185
b6c0e89d 2186 skb->csum_level = rxcp->tunneled;
5be93b9a 2187
343e43c0 2188 if (rxcp->vlanf)
86a9bad3 2189 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2190
10ef9ab4 2191 napi_gro_frags(napi);
2e588f84
SP
2192}
2193
10ef9ab4
SP
2194static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2195 struct be_rx_compl_info *rxcp)
2e588f84 2196{
c3c18bc1
SP
2197 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2198 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2199 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2200 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2201 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2202 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2203 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2204 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2205 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2206 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2207 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2208 if (rxcp->vlanf) {
c3c18bc1
SP
2209 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2210 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2211 }
c3c18bc1 2212 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2213 rxcp->tunneled =
c3c18bc1 2214 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2215}
2216
10ef9ab4
SP
2217static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2218 struct be_rx_compl_info *rxcp)
2e588f84 2219{
c3c18bc1
SP
2220 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2221 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2222 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2223 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2224 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2225 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2226 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2227 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2228 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2229 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2230 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2231 if (rxcp->vlanf) {
c3c18bc1
SP
2232 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2233 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2234 }
c3c18bc1
SP
2235 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2236 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2237}
2238
2239static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2240{
2241 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2242 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2243 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2244
2e588f84
SP
2245 /* For checking the valid bit it is Ok to use either definition as the
2246 * valid bit is at the same position in both v0 and v1 Rx compl */
2247 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2248 return NULL;
6b7c5b94 2249
2e588f84
SP
2250 rmb();
2251 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2252
2e588f84 2253 if (adapter->be3_native)
10ef9ab4 2254 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2255 else
10ef9ab4 2256 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2257
e38b1706
SK
2258 if (rxcp->ip_frag)
2259 rxcp->l4_csum = 0;
2260
15d72184 2261 if (rxcp->vlanf) {
f93f160b
VV
2262 /* In QNQ modes, if qnq bit is not set, then the packet was
2263 * tagged only with the transparent outer vlan-tag and must
2264 * not be treated as a vlan packet by host
2265 */
2266 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2267 rxcp->vlanf = 0;
6b7c5b94 2268
15d72184 2269 if (!lancer_chip(adapter))
3c709f8f 2270 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2271
939cf306 2272 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2273 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2274 rxcp->vlanf = 0;
2275 }
2e588f84
SP
2276
2277 /* As the compl has been parsed, reset it; we wont touch it again */
2278 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2279
3abcdeda 2280 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2281 return rxcp;
2282}
2283
1829b086 2284static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2285{
6b7c5b94 2286 u32 order = get_order(size);
1829b086 2287
6b7c5b94 2288 if (order > 0)
1829b086
ED
2289 gfp |= __GFP_COMP;
2290 return alloc_pages(gfp, order);
6b7c5b94
SP
2291}
2292
2293/*
2294 * Allocate a page, split it to fragments of size rx_frag_size and post as
2295 * receive buffers to BE
2296 */
c30d7266 2297static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2298{
3abcdeda 2299 struct be_adapter *adapter = rxo->adapter;
26d92f92 2300 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2301 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2302 struct page *pagep = NULL;
ba42fad0 2303 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2304 struct be_eth_rx_d *rxd;
2305 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2306 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2307
3abcdeda 2308 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2309 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2310 if (!pagep) {
1829b086 2311 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2312 if (unlikely(!pagep)) {
ac124ff9 2313 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2314 break;
2315 }
ba42fad0
IV
2316 page_dmaaddr = dma_map_page(dev, pagep, 0,
2317 adapter->big_page_size,
2b7bcebf 2318 DMA_FROM_DEVICE);
ba42fad0
IV
2319 if (dma_mapping_error(dev, page_dmaaddr)) {
2320 put_page(pagep);
2321 pagep = NULL;
d3de1540 2322 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2323 break;
2324 }
e50287be 2325 page_offset = 0;
6b7c5b94
SP
2326 } else {
2327 get_page(pagep);
e50287be 2328 page_offset += rx_frag_size;
6b7c5b94 2329 }
e50287be 2330 page_info->page_offset = page_offset;
6b7c5b94 2331 page_info->page = pagep;
6b7c5b94
SP
2332
2333 rxd = queue_head_node(rxq);
e50287be 2334 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2335 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2336 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2337
2338 /* Any space left in the current big page for another frag? */
2339 if ((page_offset + rx_frag_size + rx_frag_size) >
2340 adapter->big_page_size) {
2341 pagep = NULL;
e50287be
SP
2342 page_info->last_frag = true;
2343 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2344 } else {
2345 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2346 }
26d92f92
SP
2347
2348 prev_page_info = page_info;
2349 queue_head_inc(rxq);
10ef9ab4 2350 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2351 }
e50287be
SP
2352
2353 /* Mark the last frag of a page when we break out of the above loop
2354 * with no more slots available in the RXQ
2355 */
2356 if (pagep) {
2357 prev_page_info->last_frag = true;
2358 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2359 }
6b7c5b94
SP
2360
2361 if (posted) {
6b7c5b94 2362 atomic_add(posted, &rxq->used);
6384a4d0
SP
2363 if (rxo->rx_post_starved)
2364 rxo->rx_post_starved = false;
c30d7266 2365 do {
69304cc9 2366 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2367 be_rxq_notify(adapter, rxq->id, notify);
2368 posted -= notify;
2369 } while (posted);
ea1dae11
SP
2370 } else if (atomic_read(&rxq->used) == 0) {
2371 /* Let be_worker replenish when memory is available */
3abcdeda 2372 rxo->rx_post_starved = true;
6b7c5b94 2373 }
6b7c5b94
SP
2374}
2375
152ffe5b 2376static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2377{
152ffe5b
SB
2378 struct be_queue_info *tx_cq = &txo->cq;
2379 struct be_tx_compl_info *txcp = &txo->txcp;
2380 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2381
152ffe5b 2382 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2383 return NULL;
2384
152ffe5b 2385 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2386 rmb();
152ffe5b 2387 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2388
152ffe5b
SB
2389 txcp->status = GET_TX_COMPL_BITS(status, compl);
2390 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2391
152ffe5b 2392 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2393 queue_tail_inc(tx_cq);
2394 return txcp;
2395}
2396
3c8def97 2397static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2398 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2399{
5f07b3c5 2400 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2401 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2402 u16 frag_index, num_wrbs = 0;
2403 struct sk_buff *skb = NULL;
2404 bool unmap_skb_hdr = false;
a73b796e 2405 struct be_eth_wrb *wrb;
6b7c5b94 2406
ec43b1a6 2407 do {
5f07b3c5
SP
2408 if (sent_skbs[txq->tail]) {
2409 /* Free skb from prev req */
2410 if (skb)
2411 dev_consume_skb_any(skb);
2412 skb = sent_skbs[txq->tail];
2413 sent_skbs[txq->tail] = NULL;
2414 queue_tail_inc(txq); /* skip hdr wrb */
2415 num_wrbs++;
2416 unmap_skb_hdr = true;
2417 }
a73b796e 2418 wrb = queue_tail_node(txq);
5f07b3c5 2419 frag_index = txq->tail;
2b7bcebf 2420 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2421 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2422 unmap_skb_hdr = false;
6b7c5b94 2423 queue_tail_inc(txq);
5f07b3c5
SP
2424 num_wrbs++;
2425 } while (frag_index != last_index);
2426 dev_consume_skb_any(skb);
6b7c5b94 2427
4d586b82 2428 return num_wrbs;
6b7c5b94
SP
2429}
2430
10ef9ab4
SP
2431/* Return the number of events in the event queue */
2432static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2433{
10ef9ab4
SP
2434 struct be_eq_entry *eqe;
2435 int num = 0;
859b1e4e 2436
10ef9ab4
SP
2437 do {
2438 eqe = queue_tail_node(&eqo->q);
2439 if (eqe->evt == 0)
2440 break;
859b1e4e 2441
10ef9ab4
SP
2442 rmb();
2443 eqe->evt = 0;
2444 num++;
2445 queue_tail_inc(&eqo->q);
2446 } while (true);
2447
2448 return num;
859b1e4e
SP
2449}
2450
10ef9ab4
SP
2451/* Leaves the EQ is disarmed state */
2452static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2453{
10ef9ab4 2454 int num = events_get(eqo);
859b1e4e 2455
20947770 2456 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2457}
2458
99b44304
KA
2459/* Free posted rx buffers that were not used */
2460static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2461{
3abcdeda 2462 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2463 struct be_rx_page_info *page_info;
2464
2465 while (atomic_read(&rxq->used) > 0) {
2466 page_info = get_rx_page_info(rxo);
2467 put_page(page_info->page);
2468 memset(page_info, 0, sizeof(*page_info));
2469 }
2470 BUG_ON(atomic_read(&rxq->used));
2471 rxq->tail = 0;
2472 rxq->head = 0;
2473}
2474
2475static void be_rx_cq_clean(struct be_rx_obj *rxo)
2476{
3abcdeda 2477 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2478 struct be_rx_compl_info *rxcp;
d23e946c
SP
2479 struct be_adapter *adapter = rxo->adapter;
2480 int flush_wait = 0;
6b7c5b94 2481
d23e946c
SP
2482 /* Consume pending rx completions.
2483 * Wait for the flush completion (identified by zero num_rcvd)
2484 * to arrive. Notify CQ even when there are no more CQ entries
2485 * for HW to flush partially coalesced CQ entries.
2486 * In Lancer, there is no need to wait for flush compl.
2487 */
2488 for (;;) {
2489 rxcp = be_rx_compl_get(rxo);
ddf1169f 2490 if (!rxcp) {
d23e946c
SP
2491 if (lancer_chip(adapter))
2492 break;
2493
954f6825
VD
2494 if (flush_wait++ > 50 ||
2495 be_check_error(adapter,
2496 BE_ERROR_HW)) {
d23e946c
SP
2497 dev_warn(&adapter->pdev->dev,
2498 "did not receive flush compl\n");
2499 break;
2500 }
2501 be_cq_notify(adapter, rx_cq->id, true, 0);
2502 mdelay(1);
2503 } else {
2504 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2505 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2506 if (rxcp->num_rcvd == 0)
2507 break;
2508 }
6b7c5b94
SP
2509 }
2510
d23e946c
SP
2511 /* After cleanup, leave the CQ in unarmed state */
2512 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2513}
2514
0ae57bb3 2515static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2516{
5f07b3c5
SP
2517 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2518 struct device *dev = &adapter->pdev->dev;
152ffe5b 2519 struct be_tx_compl_info *txcp;
0ae57bb3 2520 struct be_queue_info *txq;
152ffe5b 2521 struct be_tx_obj *txo;
0ae57bb3 2522 int i, pending_txqs;
a8e9179a 2523
1a3d0717 2524 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2525 do {
0ae57bb3
SP
2526 pending_txqs = adapter->num_tx_qs;
2527
2528 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2529 cmpl = 0;
2530 num_wrbs = 0;
0ae57bb3 2531 txq = &txo->q;
152ffe5b
SB
2532 while ((txcp = be_tx_compl_get(txo))) {
2533 num_wrbs +=
2534 be_tx_compl_process(adapter, txo,
2535 txcp->end_index);
0ae57bb3
SP
2536 cmpl++;
2537 }
2538 if (cmpl) {
2539 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2540 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2541 timeo = 0;
0ae57bb3 2542 }
cf5671e6 2543 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2544 pending_txqs--;
a8e9179a
SP
2545 }
2546
954f6825
VD
2547 if (pending_txqs == 0 || ++timeo > 10 ||
2548 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2549 break;
2550
2551 mdelay(1);
2552 } while (true);
2553
5f07b3c5 2554 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2555 for_all_tx_queues(adapter, txo, i) {
2556 txq = &txo->q;
0ae57bb3 2557
5f07b3c5
SP
2558 if (atomic_read(&txq->used)) {
2559 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2560 i, atomic_read(&txq->used));
2561 notified_idx = txq->tail;
0ae57bb3 2562 end_idx = txq->tail;
5f07b3c5
SP
2563 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2564 txq->len);
2565 /* Use the tx-compl process logic to handle requests
2566 * that were not sent to the HW.
2567 */
0ae57bb3
SP
2568 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2569 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2570 BUG_ON(atomic_read(&txq->used));
2571 txo->pend_wrb_cnt = 0;
2572 /* Since hw was never notified of these requests,
2573 * reset TXQ indices
2574 */
2575 txq->head = notified_idx;
2576 txq->tail = notified_idx;
0ae57bb3 2577 }
b03388d6 2578 }
6b7c5b94
SP
2579}
2580
10ef9ab4
SP
2581static void be_evt_queues_destroy(struct be_adapter *adapter)
2582{
2583 struct be_eq_obj *eqo;
2584 int i;
2585
2586 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2587 if (eqo->q.created) {
2588 be_eq_clean(eqo);
10ef9ab4 2589 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2590 napi_hash_del(&eqo->napi);
68d7bdcb 2591 netif_napi_del(&eqo->napi);
649886a3 2592 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2593 }
10ef9ab4
SP
2594 be_queue_free(adapter, &eqo->q);
2595 }
2596}
2597
2598static int be_evt_queues_create(struct be_adapter *adapter)
2599{
2600 struct be_queue_info *eq;
2601 struct be_eq_obj *eqo;
2632bafd 2602 struct be_aic_obj *aic;
10ef9ab4
SP
2603 int i, rc;
2604
92bf14ab
SP
2605 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2606 adapter->cfg_num_qs);
10ef9ab4
SP
2607
2608 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2609 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2610
2632bafd 2611 aic = &adapter->aic_obj[i];
10ef9ab4 2612 eqo->adapter = adapter;
10ef9ab4 2613 eqo->idx = i;
2632bafd
SP
2614 aic->max_eqd = BE_MAX_EQD;
2615 aic->enable = true;
10ef9ab4
SP
2616
2617 eq = &eqo->q;
2618 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2619 sizeof(struct be_eq_entry));
10ef9ab4
SP
2620 if (rc)
2621 return rc;
2622
f2f781a7 2623 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2624 if (rc)
2625 return rc;
649886a3
KA
2626
2627 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2628 return -ENOMEM;
2629 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2630 eqo->affinity_mask);
2631 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2632 BE_NAPI_WEIGHT);
10ef9ab4 2633 }
1cfafab9 2634 return 0;
10ef9ab4
SP
2635}
2636
5fb379ee
SP
2637static void be_mcc_queues_destroy(struct be_adapter *adapter)
2638{
2639 struct be_queue_info *q;
5fb379ee 2640
8788fdc2 2641 q = &adapter->mcc_obj.q;
5fb379ee 2642 if (q->created)
8788fdc2 2643 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2644 be_queue_free(adapter, q);
2645
8788fdc2 2646 q = &adapter->mcc_obj.cq;
5fb379ee 2647 if (q->created)
8788fdc2 2648 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2649 be_queue_free(adapter, q);
2650}
2651
2652/* Must be called only after TX qs are created as MCC shares TX EQ */
2653static int be_mcc_queues_create(struct be_adapter *adapter)
2654{
2655 struct be_queue_info *q, *cq;
5fb379ee 2656
8788fdc2 2657 cq = &adapter->mcc_obj.cq;
5fb379ee 2658 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2659 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2660 goto err;
2661
10ef9ab4
SP
2662 /* Use the default EQ for MCC completions */
2663 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2664 goto mcc_cq_free;
2665
8788fdc2 2666 q = &adapter->mcc_obj.q;
5fb379ee
SP
2667 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2668 goto mcc_cq_destroy;
2669
8788fdc2 2670 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2671 goto mcc_q_free;
2672
2673 return 0;
2674
2675mcc_q_free:
2676 be_queue_free(adapter, q);
2677mcc_cq_destroy:
8788fdc2 2678 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2679mcc_cq_free:
2680 be_queue_free(adapter, cq);
2681err:
2682 return -1;
2683}
2684
6b7c5b94
SP
2685static void be_tx_queues_destroy(struct be_adapter *adapter)
2686{
2687 struct be_queue_info *q;
3c8def97
SP
2688 struct be_tx_obj *txo;
2689 u8 i;
6b7c5b94 2690
3c8def97
SP
2691 for_all_tx_queues(adapter, txo, i) {
2692 q = &txo->q;
2693 if (q->created)
2694 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2695 be_queue_free(adapter, q);
6b7c5b94 2696
3c8def97
SP
2697 q = &txo->cq;
2698 if (q->created)
2699 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2700 be_queue_free(adapter, q);
2701 }
6b7c5b94
SP
2702}
2703
7707133c 2704static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2705{
73f394e6 2706 struct be_queue_info *cq;
3c8def97 2707 struct be_tx_obj *txo;
73f394e6 2708 struct be_eq_obj *eqo;
92bf14ab 2709 int status, i;
6b7c5b94 2710
92bf14ab 2711 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2712
10ef9ab4
SP
2713 for_all_tx_queues(adapter, txo, i) {
2714 cq = &txo->cq;
2715 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2716 sizeof(struct be_eth_tx_compl));
2717 if (status)
2718 return status;
3c8def97 2719
827da44c
JS
2720 u64_stats_init(&txo->stats.sync);
2721 u64_stats_init(&txo->stats.sync_compl);
2722
10ef9ab4
SP
2723 /* If num_evt_qs is less than num_tx_qs, then more than
2724 * one txq share an eq
2725 */
73f394e6
SP
2726 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2727 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2728 if (status)
2729 return status;
6b7c5b94 2730
10ef9ab4
SP
2731 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2732 sizeof(struct be_eth_wrb));
2733 if (status)
2734 return status;
6b7c5b94 2735
94d73aaa 2736 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2737 if (status)
2738 return status;
73f394e6
SP
2739
2740 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2741 eqo->idx);
3c8def97 2742 }
6b7c5b94 2743
d379142b
SP
2744 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2745 adapter->num_tx_qs);
10ef9ab4 2746 return 0;
6b7c5b94
SP
2747}
2748
10ef9ab4 2749static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2750{
2751 struct be_queue_info *q;
3abcdeda
SP
2752 struct be_rx_obj *rxo;
2753 int i;
2754
2755 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2756 q = &rxo->cq;
2757 if (q->created)
2758 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2759 be_queue_free(adapter, q);
ac6a0c4a
SP
2760 }
2761}
2762
10ef9ab4 2763static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2764{
10ef9ab4 2765 struct be_queue_info *eq, *cq;
3abcdeda
SP
2766 struct be_rx_obj *rxo;
2767 int rc, i;
6b7c5b94 2768
92bf14ab 2769 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2770 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2771
71bb8bd0
VV
2772 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2773 if (adapter->num_rss_qs <= 1)
2774 adapter->num_rss_qs = 0;
2775
2776 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2777
2778 /* When the interface is not capable of RSS rings (and there is no
2779 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2780 */
71bb8bd0
VV
2781 if (adapter->num_rx_qs == 0)
2782 adapter->num_rx_qs = 1;
92bf14ab 2783
6b7c5b94 2784 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2785 for_all_rx_queues(adapter, rxo, i) {
2786 rxo->adapter = adapter;
3abcdeda
SP
2787 cq = &rxo->cq;
2788 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2789 sizeof(struct be_eth_rx_compl));
3abcdeda 2790 if (rc)
10ef9ab4 2791 return rc;
3abcdeda 2792
827da44c 2793 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2794 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2795 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2796 if (rc)
10ef9ab4 2797 return rc;
3abcdeda 2798 }
6b7c5b94 2799
d379142b 2800 dev_info(&adapter->pdev->dev,
71bb8bd0 2801 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2802 return 0;
b628bde2
SP
2803}
2804
6b7c5b94
SP
2805static irqreturn_t be_intx(int irq, void *dev)
2806{
e49cc34f
SP
2807 struct be_eq_obj *eqo = dev;
2808 struct be_adapter *adapter = eqo->adapter;
2809 int num_evts = 0;
6b7c5b94 2810
d0b9cec3
SP
2811 /* IRQ is not expected when NAPI is scheduled as the EQ
2812 * will not be armed.
2813 * But, this can happen on Lancer INTx where it takes
2814 * a while to de-assert INTx or in BE2 where occasionaly
2815 * an interrupt may be raised even when EQ is unarmed.
2816 * If NAPI is already scheduled, then counting & notifying
2817 * events will orphan them.
e49cc34f 2818 */
d0b9cec3 2819 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2820 num_evts = events_get(eqo);
d0b9cec3
SP
2821 __napi_schedule(&eqo->napi);
2822 if (num_evts)
2823 eqo->spurious_intr = 0;
2824 }
20947770 2825 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2826
d0b9cec3
SP
2827 /* Return IRQ_HANDLED only for the the first spurious intr
2828 * after a valid intr to stop the kernel from branding
2829 * this irq as a bad one!
e49cc34f 2830 */
d0b9cec3
SP
2831 if (num_evts || eqo->spurious_intr++ == 0)
2832 return IRQ_HANDLED;
2833 else
2834 return IRQ_NONE;
6b7c5b94
SP
2835}
2836
10ef9ab4 2837static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2838{
10ef9ab4 2839 struct be_eq_obj *eqo = dev;
6b7c5b94 2840
20947770 2841 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2842 napi_schedule(&eqo->napi);
6b7c5b94
SP
2843 return IRQ_HANDLED;
2844}
2845
2e588f84 2846static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2847{
e38b1706 2848 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2849}
2850
10ef9ab4 2851static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2852 int budget, int polling)
6b7c5b94 2853{
3abcdeda
SP
2854 struct be_adapter *adapter = rxo->adapter;
2855 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2856 struct be_rx_compl_info *rxcp;
6b7c5b94 2857 u32 work_done;
c30d7266 2858 u32 frags_consumed = 0;
6b7c5b94
SP
2859
2860 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2861 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2862 if (!rxcp)
2863 break;
2864
12004ae9
SP
2865 /* Is it a flush compl that has no data */
2866 if (unlikely(rxcp->num_rcvd == 0))
2867 goto loop_continue;
2868
2869 /* Discard compl with partial DMA Lancer B0 */
2870 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2871 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2872 goto loop_continue;
2873 }
2874
2875 /* On BE drop pkts that arrive due to imperfect filtering in
2876 * promiscuous mode on some skews
2877 */
2878 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2879 !lancer_chip(adapter))) {
10ef9ab4 2880 be_rx_compl_discard(rxo, rxcp);
12004ae9 2881 goto loop_continue;
64642811 2882 }
009dd872 2883
6384a4d0
SP
2884 /* Don't do gro when we're busy_polling */
2885 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2886 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2887 else
6384a4d0
SP
2888 be_rx_compl_process(rxo, napi, rxcp);
2889
12004ae9 2890loop_continue:
c30d7266 2891 frags_consumed += rxcp->num_rcvd;
2e588f84 2892 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2893 }
2894
10ef9ab4
SP
2895 if (work_done) {
2896 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2897
6384a4d0
SP
2898 /* When an rx-obj gets into post_starved state, just
2899 * let be_worker do the posting.
2900 */
2901 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2902 !rxo->rx_post_starved)
c30d7266
AK
2903 be_post_rx_frags(rxo, GFP_ATOMIC,
2904 max_t(u32, MAX_RX_POST,
2905 frags_consumed));
6b7c5b94 2906 }
10ef9ab4 2907
6b7c5b94
SP
2908 return work_done;
2909}
2910
152ffe5b 2911static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2912{
2913 switch (status) {
2914 case BE_TX_COMP_HDR_PARSE_ERR:
2915 tx_stats(txo)->tx_hdr_parse_err++;
2916 break;
2917 case BE_TX_COMP_NDMA_ERR:
2918 tx_stats(txo)->tx_dma_err++;
2919 break;
2920 case BE_TX_COMP_ACL_ERR:
2921 tx_stats(txo)->tx_spoof_check_err++;
2922 break;
2923 }
2924}
2925
152ffe5b 2926static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2927{
2928 switch (status) {
2929 case LANCER_TX_COMP_LSO_ERR:
2930 tx_stats(txo)->tx_tso_err++;
2931 break;
2932 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2933 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2934 tx_stats(txo)->tx_spoof_check_err++;
2935 break;
2936 case LANCER_TX_COMP_QINQ_ERR:
2937 tx_stats(txo)->tx_qinq_err++;
2938 break;
2939 case LANCER_TX_COMP_PARITY_ERR:
2940 tx_stats(txo)->tx_internal_parity_err++;
2941 break;
2942 case LANCER_TX_COMP_DMA_ERR:
2943 tx_stats(txo)->tx_dma_err++;
2944 break;
2945 }
2946}
2947
c8f64615
SP
2948static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2949 int idx)
6b7c5b94 2950{
c8f64615 2951 int num_wrbs = 0, work_done = 0;
152ffe5b 2952 struct be_tx_compl_info *txcp;
c8f64615 2953
152ffe5b
SB
2954 while ((txcp = be_tx_compl_get(txo))) {
2955 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2956 work_done++;
3c8def97 2957
152ffe5b 2958 if (txcp->status) {
512bb8a2 2959 if (lancer_chip(adapter))
152ffe5b 2960 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2961 else
152ffe5b 2962 be_update_tx_err(txo, txcp->status);
512bb8a2 2963 }
10ef9ab4 2964 }
6b7c5b94 2965
10ef9ab4
SP
2966 if (work_done) {
2967 be_cq_notify(adapter, txo->cq.id, true, work_done);
2968 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2969
10ef9ab4
SP
2970 /* As Tx wrbs have been freed up, wake up netdev queue
2971 * if it was stopped due to lack of tx wrbs. */
2972 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2973 be_can_txq_wake(txo)) {
10ef9ab4 2974 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2975 }
10ef9ab4
SP
2976
2977 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2978 tx_stats(txo)->tx_compl += work_done;
2979 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2980 }
10ef9ab4 2981}
6b7c5b94 2982
f7062ee5
SP
2983#ifdef CONFIG_NET_RX_BUSY_POLL
2984static inline bool be_lock_napi(struct be_eq_obj *eqo)
2985{
2986 bool status = true;
2987
2988 spin_lock(&eqo->lock); /* BH is already disabled */
2989 if (eqo->state & BE_EQ_LOCKED) {
2990 WARN_ON(eqo->state & BE_EQ_NAPI);
2991 eqo->state |= BE_EQ_NAPI_YIELD;
2992 status = false;
2993 } else {
2994 eqo->state = BE_EQ_NAPI;
2995 }
2996 spin_unlock(&eqo->lock);
2997 return status;
2998}
2999
3000static inline void be_unlock_napi(struct be_eq_obj *eqo)
3001{
3002 spin_lock(&eqo->lock); /* BH is already disabled */
3003
3004 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3005 eqo->state = BE_EQ_IDLE;
3006
3007 spin_unlock(&eqo->lock);
3008}
3009
3010static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3011{
3012 bool status = true;
3013
3014 spin_lock_bh(&eqo->lock);
3015 if (eqo->state & BE_EQ_LOCKED) {
3016 eqo->state |= BE_EQ_POLL_YIELD;
3017 status = false;
3018 } else {
3019 eqo->state |= BE_EQ_POLL;
3020 }
3021 spin_unlock_bh(&eqo->lock);
3022 return status;
3023}
3024
3025static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3026{
3027 spin_lock_bh(&eqo->lock);
3028
3029 WARN_ON(eqo->state & (BE_EQ_NAPI));
3030 eqo->state = BE_EQ_IDLE;
3031
3032 spin_unlock_bh(&eqo->lock);
3033}
3034
3035static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3036{
3037 spin_lock_init(&eqo->lock);
3038 eqo->state = BE_EQ_IDLE;
3039}
3040
3041static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3042{
3043 local_bh_disable();
3044
3045 /* It's enough to just acquire napi lock on the eqo to stop
3046 * be_busy_poll() from processing any queueus.
3047 */
3048 while (!be_lock_napi(eqo))
3049 mdelay(1);
3050
3051 local_bh_enable();
3052}
3053
3054#else /* CONFIG_NET_RX_BUSY_POLL */
3055
3056static inline bool be_lock_napi(struct be_eq_obj *eqo)
3057{
3058 return true;
3059}
3060
3061static inline void be_unlock_napi(struct be_eq_obj *eqo)
3062{
3063}
3064
3065static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3066{
3067 return false;
3068}
3069
3070static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3071{
3072}
3073
3074static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3075{
3076}
3077
3078static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3079{
3080}
3081#endif /* CONFIG_NET_RX_BUSY_POLL */
3082
68d7bdcb 3083int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3084{
3085 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3086 struct be_adapter *adapter = eqo->adapter;
0b545a62 3087 int max_work = 0, work, i, num_evts;
6384a4d0 3088 struct be_rx_obj *rxo;
a4906ea0 3089 struct be_tx_obj *txo;
20947770 3090 u32 mult_enc = 0;
f31e50a8 3091
0b545a62
SP
3092 num_evts = events_get(eqo);
3093
a4906ea0
SP
3094 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3095 be_process_tx(adapter, txo, i);
f31e50a8 3096
6384a4d0
SP
3097 if (be_lock_napi(eqo)) {
3098 /* This loop will iterate twice for EQ0 in which
3099 * completions of the last RXQ (default one) are also processed
3100 * For other EQs the loop iterates only once
3101 */
3102 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3103 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3104 max_work = max(work, max_work);
3105 }
3106 be_unlock_napi(eqo);
3107 } else {
3108 max_work = budget;
10ef9ab4 3109 }
6b7c5b94 3110
10ef9ab4
SP
3111 if (is_mcc_eqo(eqo))
3112 be_process_mcc(adapter);
93c86700 3113
10ef9ab4
SP
3114 if (max_work < budget) {
3115 napi_complete(napi);
20947770
PR
3116
3117 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3118 * delay via a delay multiplier encoding value
3119 */
3120 if (skyhawk_chip(adapter))
3121 mult_enc = be_get_eq_delay_mult_enc(eqo);
3122
3123 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3124 mult_enc);
10ef9ab4
SP
3125 } else {
3126 /* As we'll continue in polling mode, count and clear events */
20947770 3127 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3128 }
10ef9ab4 3129 return max_work;
6b7c5b94
SP
3130}
3131
6384a4d0
SP
3132#ifdef CONFIG_NET_RX_BUSY_POLL
3133static int be_busy_poll(struct napi_struct *napi)
3134{
3135 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3136 struct be_adapter *adapter = eqo->adapter;
3137 struct be_rx_obj *rxo;
3138 int i, work = 0;
3139
3140 if (!be_lock_busy_poll(eqo))
3141 return LL_FLUSH_BUSY;
3142
3143 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3144 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3145 if (work)
3146 break;
3147 }
3148
3149 be_unlock_busy_poll(eqo);
3150 return work;
3151}
3152#endif
3153
f67ef7ba 3154void be_detect_error(struct be_adapter *adapter)
7c185276 3155{
e1cfb67a
PR
3156 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3157 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3158 u32 i;
eb0eecc1 3159 struct device *dev = &adapter->pdev->dev;
7c185276 3160
954f6825 3161 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3162 return;
3163
e1cfb67a
PR
3164 if (lancer_chip(adapter)) {
3165 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3166 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3167 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3168 sliport_err1 = ioread32(adapter->db +
748b539a 3169 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3170 sliport_err2 = ioread32(adapter->db +
748b539a 3171 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3172 /* Do not log error messages if its a FW reset */
3173 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3174 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3175 dev_info(dev, "Firmware update in progress\n");
3176 } else {
eb0eecc1
SK
3177 dev_err(dev, "Error detected in the card\n");
3178 dev_err(dev, "ERR: sliport status 0x%x\n",
3179 sliport_status);
3180 dev_err(dev, "ERR: sliport error1 0x%x\n",
3181 sliport_err1);
3182 dev_err(dev, "ERR: sliport error2 0x%x\n",
3183 sliport_err2);
3184 }
e1cfb67a
PR
3185 }
3186 } else {
25848c90
SR
3187 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3188 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3189 ue_lo_mask = ioread32(adapter->pcicfg +
3190 PCICFG_UE_STATUS_LOW_MASK);
3191 ue_hi_mask = ioread32(adapter->pcicfg +
3192 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3193
f67ef7ba
PR
3194 ue_lo = (ue_lo & ~ue_lo_mask);
3195 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3196
eb0eecc1
SK
3197 /* On certain platforms BE hardware can indicate spurious UEs.
3198 * Allow HW to stop working completely in case of a real UE.
3199 * Hence not setting the hw_error for UE detection.
3200 */
f67ef7ba 3201
eb0eecc1 3202 if (ue_lo || ue_hi) {
eb0eecc1
SK
3203 dev_err(dev,
3204 "Unrecoverable Error detected in the adapter");
3205 dev_err(dev, "Please reboot server to recover");
3206 if (skyhawk_chip(adapter))
954f6825
VD
3207 be_set_error(adapter, BE_ERROR_UE);
3208
eb0eecc1
SK
3209 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3210 if (ue_lo & 1)
3211 dev_err(dev, "UE: %s bit set\n",
3212 ue_status_low_desc[i]);
3213 }
3214 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3215 if (ue_hi & 1)
3216 dev_err(dev, "UE: %s bit set\n",
3217 ue_status_hi_desc[i]);
3218 }
7c185276
AK
3219 }
3220 }
7c185276
AK
3221}
3222
8d56ff11
SP
3223static void be_msix_disable(struct be_adapter *adapter)
3224{
ac6a0c4a 3225 if (msix_enabled(adapter)) {
8d56ff11 3226 pci_disable_msix(adapter->pdev);
ac6a0c4a 3227 adapter->num_msix_vec = 0;
68d7bdcb 3228 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3229 }
3230}
3231
c2bba3df 3232static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3233{
7dc4c064 3234 int i, num_vec;
d379142b 3235 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3236
92bf14ab
SP
3237 /* If RoCE is supported, program the max number of NIC vectors that
3238 * may be configured via set-channels, along with vectors needed for
3239 * RoCe. Else, just program the number we'll use initially.
3240 */
3241 if (be_roce_supported(adapter))
3242 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3243 2 * num_online_cpus());
3244 else
3245 num_vec = adapter->cfg_num_qs;
3abcdeda 3246
ac6a0c4a 3247 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3248 adapter->msix_entries[i].entry = i;
3249
7dc4c064
AG
3250 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3251 MIN_MSIX_VECTORS, num_vec);
3252 if (num_vec < 0)
3253 goto fail;
92bf14ab 3254
92bf14ab
SP
3255 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3256 adapter->num_msix_roce_vec = num_vec / 2;
3257 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3258 adapter->num_msix_roce_vec);
3259 }
3260
3261 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3262
3263 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3264 adapter->num_msix_vec);
c2bba3df 3265 return 0;
7dc4c064
AG
3266
3267fail:
3268 dev_warn(dev, "MSIx enable failed\n");
3269
3270 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3271 if (be_virtfn(adapter))
7dc4c064
AG
3272 return num_vec;
3273 return 0;
6b7c5b94
SP
3274}
3275
fe6d2a38 3276static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3277 struct be_eq_obj *eqo)
b628bde2 3278{
f2f781a7 3279 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3280}
6b7c5b94 3281
b628bde2
SP
3282static int be_msix_register(struct be_adapter *adapter)
3283{
10ef9ab4
SP
3284 struct net_device *netdev = adapter->netdev;
3285 struct be_eq_obj *eqo;
3286 int status, i, vec;
6b7c5b94 3287
10ef9ab4
SP
3288 for_all_evt_queues(adapter, eqo, i) {
3289 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3290 vec = be_msix_vec_get(adapter, eqo);
3291 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3292 if (status)
3293 goto err_msix;
d658d98a
PR
3294
3295 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3296 }
b628bde2 3297
6b7c5b94 3298 return 0;
3abcdeda 3299err_msix:
10ef9ab4
SP
3300 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3301 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3302 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3303 status);
ac6a0c4a 3304 be_msix_disable(adapter);
6b7c5b94
SP
3305 return status;
3306}
3307
3308static int be_irq_register(struct be_adapter *adapter)
3309{
3310 struct net_device *netdev = adapter->netdev;
3311 int status;
3312
ac6a0c4a 3313 if (msix_enabled(adapter)) {
6b7c5b94
SP
3314 status = be_msix_register(adapter);
3315 if (status == 0)
3316 goto done;
ba343c77 3317 /* INTx is not supported for VF */
18c57c74 3318 if (be_virtfn(adapter))
ba343c77 3319 return status;
6b7c5b94
SP
3320 }
3321
e49cc34f 3322 /* INTx: only the first EQ is used */
6b7c5b94
SP
3323 netdev->irq = adapter->pdev->irq;
3324 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3325 &adapter->eq_obj[0]);
6b7c5b94
SP
3326 if (status) {
3327 dev_err(&adapter->pdev->dev,
3328 "INTx request IRQ failed - err %d\n", status);
3329 return status;
3330 }
3331done:
3332 adapter->isr_registered = true;
3333 return 0;
3334}
3335
3336static void be_irq_unregister(struct be_adapter *adapter)
3337{
3338 struct net_device *netdev = adapter->netdev;
10ef9ab4 3339 struct be_eq_obj *eqo;
d658d98a 3340 int i, vec;
6b7c5b94
SP
3341
3342 if (!adapter->isr_registered)
3343 return;
3344
3345 /* INTx */
ac6a0c4a 3346 if (!msix_enabled(adapter)) {
e49cc34f 3347 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3348 goto done;
3349 }
3350
3351 /* MSIx */
d658d98a
PR
3352 for_all_evt_queues(adapter, eqo, i) {
3353 vec = be_msix_vec_get(adapter, eqo);
3354 irq_set_affinity_hint(vec, NULL);
3355 free_irq(vec, eqo);
3356 }
3abcdeda 3357
6b7c5b94
SP
3358done:
3359 adapter->isr_registered = false;
6b7c5b94
SP
3360}
3361
10ef9ab4 3362static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3363{
3364 struct be_queue_info *q;
3365 struct be_rx_obj *rxo;
3366 int i;
3367
3368 for_all_rx_queues(adapter, rxo, i) {
3369 q = &rxo->q;
3370 if (q->created) {
99b44304
KA
3371 /* If RXQs are destroyed while in an "out of buffer"
3372 * state, there is a possibility of an HW stall on
3373 * Lancer. So, post 64 buffers to each queue to relieve
3374 * the "out of buffer" condition.
3375 * Make sure there's space in the RXQ before posting.
3376 */
3377 if (lancer_chip(adapter)) {
3378 be_rx_cq_clean(rxo);
3379 if (atomic_read(&q->used) == 0)
3380 be_post_rx_frags(rxo, GFP_KERNEL,
3381 MAX_RX_POST);
3382 }
3383
482c9e79 3384 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3385 be_rx_cq_clean(rxo);
99b44304 3386 be_rxq_clean(rxo);
482c9e79 3387 }
10ef9ab4 3388 be_queue_free(adapter, q);
482c9e79
SP
3389 }
3390}
3391
bcc84140
KA
3392static void be_disable_if_filters(struct be_adapter *adapter)
3393{
3394 be_cmd_pmac_del(adapter, adapter->if_handle,
3395 adapter->pmac_id[0], 0);
3396
3397 be_clear_uc_list(adapter);
3398
3399 /* The IFACE flags are enabled in the open path and cleared
3400 * in the close path. When a VF gets detached from the host and
3401 * assigned to a VM the following happens:
3402 * - VF's IFACE flags get cleared in the detach path
3403 * - IFACE create is issued by the VF in the attach path
3404 * Due to a bug in the BE3/Skyhawk-R FW
3405 * (Lancer FW doesn't have the bug), the IFACE capability flags
3406 * specified along with the IFACE create cmd issued by a VF are not
3407 * honoured by FW. As a consequence, if a *new* driver
3408 * (that enables/disables IFACE flags in open/close)
3409 * is loaded in the host and an *old* driver is * used by a VM/VF,
3410 * the IFACE gets created *without* the needed flags.
3411 * To avoid this, disable RX-filter flags only for Lancer.
3412 */
3413 if (lancer_chip(adapter)) {
3414 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3415 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3416 }
3417}
3418
889cd4b2
SP
3419static int be_close(struct net_device *netdev)
3420{
3421 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3422 struct be_eq_obj *eqo;
3423 int i;
889cd4b2 3424
e1ad8e33
KA
3425 /* This protection is needed as be_close() may be called even when the
3426 * adapter is in cleared state (after eeh perm failure)
3427 */
3428 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3429 return 0;
3430
bcc84140
KA
3431 be_disable_if_filters(adapter);
3432
045508a8
PP
3433 be_roce_dev_close(adapter);
3434
dff345c5
IV
3435 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3436 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3437 napi_disable(&eqo->napi);
6384a4d0
SP
3438 be_disable_busy_poll(eqo);
3439 }
71237b6f 3440 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3441 }
a323d9bf
SP
3442
3443 be_async_mcc_disable(adapter);
3444
3445 /* Wait for all pending tx completions to arrive so that
3446 * all tx skbs are freed.
3447 */
fba87559 3448 netif_tx_disable(netdev);
6e1f9975 3449 be_tx_compl_clean(adapter);
a323d9bf
SP
3450
3451 be_rx_qs_destroy(adapter);
d11a347d 3452
a323d9bf 3453 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3454 if (msix_enabled(adapter))
3455 synchronize_irq(be_msix_vec_get(adapter, eqo));
3456 else
3457 synchronize_irq(netdev->irq);
3458 be_eq_clean(eqo);
63fcb27f
PR
3459 }
3460
889cd4b2
SP
3461 be_irq_unregister(adapter);
3462
482c9e79
SP
3463 return 0;
3464}
3465
10ef9ab4 3466static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3467{
1dcf7b1c
ED
3468 struct rss_info *rss = &adapter->rss_info;
3469 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3470 struct be_rx_obj *rxo;
e9008ee9 3471 int rc, i, j;
482c9e79
SP
3472
3473 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3474 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3475 sizeof(struct be_eth_rx_d));
3476 if (rc)
3477 return rc;
3478 }
3479
71bb8bd0
VV
3480 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3481 rxo = default_rxo(adapter);
3482 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3483 rx_frag_size, adapter->if_handle,
3484 false, &rxo->rss_id);
3485 if (rc)
3486 return rc;
3487 }
10ef9ab4
SP
3488
3489 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3490 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3491 rx_frag_size, adapter->if_handle,
3492 true, &rxo->rss_id);
482c9e79
SP
3493 if (rc)
3494 return rc;
3495 }
3496
3497 if (be_multi_rxq(adapter)) {
71bb8bd0 3498 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3499 for_all_rss_queues(adapter, rxo, i) {
e2557877 3500 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3501 break;
e2557877
VD
3502 rss->rsstable[j + i] = rxo->rss_id;
3503 rss->rss_queue[j + i] = i;
e9008ee9
PR
3504 }
3505 }
e2557877
VD
3506 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3507 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3508
3509 if (!BEx_chip(adapter))
e2557877
VD
3510 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3511 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3512 } else {
3513 /* Disable RSS, if only default RX Q is created */
e2557877 3514 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3515 }
594ad54a 3516
1dcf7b1c 3517 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3518 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
d5d30981 3519 RSS_INDIR_TABLE_LEN, rss_key);
da1388d6 3520 if (rc) {
e2557877 3521 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3522 return rc;
482c9e79
SP
3523 }
3524
1dcf7b1c 3525 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3526
b02e60c8
SR
3527 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3528 * which is a queue empty condition
3529 */
10ef9ab4 3530 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3531 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3532
889cd4b2
SP
3533 return 0;
3534}
3535
bcc84140
KA
3536static int be_enable_if_filters(struct be_adapter *adapter)
3537{
3538 int status;
3539
3540 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3541 if (status)
3542 return status;
3543
3544 /* For BE3 VFs, the PF programs the initial MAC address */
3545 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3546 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3547 adapter->if_handle,
3548 &adapter->pmac_id[0], 0);
3549 if (status)
3550 return status;
3551 }
3552
3553 if (adapter->vlans_added)
3554 be_vid_config(adapter);
3555
3556 be_set_rx_mode(adapter->netdev);
3557
3558 return 0;
3559}
3560
6b7c5b94
SP
3561static int be_open(struct net_device *netdev)
3562{
3563 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3564 struct be_eq_obj *eqo;
3abcdeda 3565 struct be_rx_obj *rxo;
10ef9ab4 3566 struct be_tx_obj *txo;
b236916a 3567 u8 link_status;
3abcdeda 3568 int status, i;
5fb379ee 3569
10ef9ab4 3570 status = be_rx_qs_create(adapter);
482c9e79
SP
3571 if (status)
3572 goto err;
3573
bcc84140
KA
3574 status = be_enable_if_filters(adapter);
3575 if (status)
3576 goto err;
3577
c2bba3df
SK
3578 status = be_irq_register(adapter);
3579 if (status)
3580 goto err;
5fb379ee 3581
10ef9ab4 3582 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3583 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3584
10ef9ab4
SP
3585 for_all_tx_queues(adapter, txo, i)
3586 be_cq_notify(adapter, txo->cq.id, true, 0);
3587
7a1e9b20
SP
3588 be_async_mcc_enable(adapter);
3589
10ef9ab4
SP
3590 for_all_evt_queues(adapter, eqo, i) {
3591 napi_enable(&eqo->napi);
6384a4d0 3592 be_enable_busy_poll(eqo);
20947770 3593 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3594 }
04d3d624 3595 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3596
323ff71e 3597 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3598 if (!status)
3599 be_link_status_update(adapter, link_status);
3600
fba87559 3601 netif_tx_start_all_queues(netdev);
045508a8 3602 be_roce_dev_open(adapter);
c9c47142 3603
c5abe7c0 3604#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3605 if (skyhawk_chip(adapter))
3606 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3607#endif
3608
889cd4b2
SP
3609 return 0;
3610err:
3611 be_close(adapter->netdev);
3612 return -EIO;
5fb379ee
SP
3613}
3614
71d8d1b5
AK
3615static int be_setup_wol(struct be_adapter *adapter, bool enable)
3616{
145155e7 3617 struct device *dev = &adapter->pdev->dev;
71d8d1b5 3618 struct be_dma_mem cmd;
71d8d1b5 3619 u8 mac[ETH_ALEN];
145155e7 3620 int status;
71d8d1b5 3621
c7bf7169 3622 eth_zero_addr(mac);
71d8d1b5
AK
3623
3624 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
145155e7 3625 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
ddf1169f 3626 if (!cmd.va)
6b568689 3627 return -ENOMEM;
71d8d1b5
AK
3628
3629 if (enable) {
3630 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3631 PCICFG_PM_CONTROL_OFFSET,
3632 PCICFG_PM_CONTROL_MASK);
71d8d1b5 3633 if (status) {
145155e7
KP
3634 dev_err(dev, "Could not enable Wake-on-lan\n");
3635 goto err;
71d8d1b5 3636 }
71d8d1b5 3637 } else {
145155e7 3638 ether_addr_copy(mac, adapter->netdev->dev_addr);
71d8d1b5
AK
3639 }
3640
145155e7
KP
3641 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3642 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3643 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3644err:
3645 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3646 return status;
3647}
3648
f7062ee5
SP
3649static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3650{
3651 u32 addr;
3652
3653 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3654
3655 mac[5] = (u8)(addr & 0xFF);
3656 mac[4] = (u8)((addr >> 8) & 0xFF);
3657 mac[3] = (u8)((addr >> 16) & 0xFF);
3658 /* Use the OUI from the current MAC address */
3659 memcpy(mac, adapter->netdev->dev_addr, 3);
3660}
3661
6d87f5c3
AK
3662/*
3663 * Generate a seed MAC address from the PF MAC Address using jhash.
3664 * MAC Address for VFs are assigned incrementally starting from the seed.
3665 * These addresses are programmed in the ASIC by the PF and the VF driver
3666 * queries for the MAC address during its probe.
3667 */
4c876616 3668static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3669{
f9449ab7 3670 u32 vf;
3abcdeda 3671 int status = 0;
6d87f5c3 3672 u8 mac[ETH_ALEN];
11ac75ed 3673 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3674
3675 be_vf_eth_addr_generate(adapter, mac);
3676
11ac75ed 3677 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3678 if (BEx_chip(adapter))
590c391d 3679 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3680 vf_cfg->if_handle,
3681 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3682 else
3683 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3684 vf + 1);
590c391d 3685
6d87f5c3
AK
3686 if (status)
3687 dev_err(&adapter->pdev->dev,
748b539a
SP
3688 "Mac address assignment failed for VF %d\n",
3689 vf);
6d87f5c3 3690 else
11ac75ed 3691 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3692
3693 mac[5] += 1;
3694 }
3695 return status;
3696}
3697
4c876616
SP
3698static int be_vfs_mac_query(struct be_adapter *adapter)
3699{
3700 int status, vf;
3701 u8 mac[ETH_ALEN];
3702 struct be_vf_cfg *vf_cfg;
4c876616
SP
3703
3704 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3705 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3706 mac, vf_cfg->if_handle,
3707 false, vf+1);
4c876616
SP
3708 if (status)
3709 return status;
3710 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3711 }
3712 return 0;
3713}
3714
f9449ab7 3715static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3716{
11ac75ed 3717 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3718 u32 vf;
3719
257a3feb 3720 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3721 dev_warn(&adapter->pdev->dev,
3722 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3723 goto done;
3724 }
3725
b4c1df93
SP
3726 pci_disable_sriov(adapter->pdev);
3727
11ac75ed 3728 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3729 if (BEx_chip(adapter))
11ac75ed
SP
3730 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3731 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3732 else
3733 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3734 vf + 1);
f9449ab7 3735
11ac75ed
SP
3736 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3737 }
39f1d94d
SP
3738done:
3739 kfree(adapter->vf_cfg);
3740 adapter->num_vfs = 0;
f174c7ec 3741 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3742}
3743
7707133c
SP
3744static void be_clear_queues(struct be_adapter *adapter)
3745{
3746 be_mcc_queues_destroy(adapter);
3747 be_rx_cqs_destroy(adapter);
3748 be_tx_queues_destroy(adapter);
3749 be_evt_queues_destroy(adapter);
3750}
3751
68d7bdcb 3752static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3753{
191eb756
SP
3754 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3755 cancel_delayed_work_sync(&adapter->work);
3756 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3757 }
68d7bdcb
SP
3758}
3759
eb7dd46c
SP
3760static void be_cancel_err_detection(struct be_adapter *adapter)
3761{
3762 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3763 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3764 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3765 }
3766}
3767
c5abe7c0 3768#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3769static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3770{
630f4b70
SB
3771 struct net_device *netdev = adapter->netdev;
3772
c9c47142
SP
3773 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3774 be_cmd_manage_iface(adapter, adapter->if_handle,
3775 OP_CONVERT_TUNNEL_TO_NORMAL);
3776
3777 if (adapter->vxlan_port)
3778 be_cmd_set_vxlan_port(adapter, 0);
3779
3780 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3781 adapter->vxlan_port = 0;
630f4b70
SB
3782
3783 netdev->hw_enc_features = 0;
3784 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3785 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3786}
c5abe7c0 3787#endif
c9c47142 3788
f2858738
VV
3789static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3790{
3791 struct be_resources res = adapter->pool_res;
3792 u16 num_vf_qs = 1;
3793
3794 /* Distribute the queue resources equally among the PF and it's VFs
3795 * Do not distribute queue resources in multi-channel configuration.
3796 */
3797 if (num_vfs && !be_is_mc(adapter)) {
3798 /* If number of VFs requested is 8 less than max supported,
3799 * assign 8 queue pairs to the PF and divide the remaining
3800 * resources evenly among the VFs
3801 */
3802 if (num_vfs < (be_max_vfs(adapter) - 8))
3803 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3804 else
3805 num_vf_qs = res.max_rss_qs / num_vfs;
3806
3807 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3808 * interfaces per port. Provide RSS on VFs, only if number
3809 * of VFs requested is less than MAX_RSS_IFACES limit.
3810 */
3811 if (num_vfs >= MAX_RSS_IFACES)
3812 num_vf_qs = 1;
3813 }
3814 return num_vf_qs;
3815}
3816
b05004ad
SK
3817static int be_clear(struct be_adapter *adapter)
3818{
f2858738
VV
3819 struct pci_dev *pdev = adapter->pdev;
3820 u16 num_vf_qs;
3821
68d7bdcb 3822 be_cancel_worker(adapter);
191eb756 3823
11ac75ed 3824 if (sriov_enabled(adapter))
f9449ab7
SP
3825 be_vf_clear(adapter);
3826
bec84e6b
VV
3827 /* Re-configure FW to distribute resources evenly across max-supported
3828 * number of VFs, only when VFs are not already enabled.
3829 */
ace40aff
VV
3830 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3831 !pci_vfs_assigned(pdev)) {
f2858738
VV
3832 num_vf_qs = be_calculate_vf_qs(adapter,
3833 pci_sriov_get_totalvfs(pdev));
bec84e6b 3834 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3835 pci_sriov_get_totalvfs(pdev),
3836 num_vf_qs);
3837 }
bec84e6b 3838
c5abe7c0 3839#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3840 be_disable_vxlan_offloads(adapter);
c5abe7c0 3841#endif
bcc84140
KA
3842 kfree(adapter->pmac_id);
3843 adapter->pmac_id = NULL;
fbc13f01 3844
f9449ab7 3845 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3846
7707133c 3847 be_clear_queues(adapter);
a54769f5 3848
10ef9ab4 3849 be_msix_disable(adapter);
e1ad8e33 3850 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3851 return 0;
3852}
3853
4c876616 3854static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3855{
92bf14ab 3856 struct be_resources res = {0};
bcc84140 3857 u32 cap_flags, en_flags, vf;
4c876616 3858 struct be_vf_cfg *vf_cfg;
0700d816 3859 int status;
abb93951 3860
0700d816 3861 /* If a FW profile exists, then cap_flags are updated */
4c876616 3862 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
0ed7d749 3863 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3864
4c876616 3865 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3866 if (!BE3_chip(adapter)) {
3867 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3868 RESOURCE_LIMITS,
92bf14ab 3869 vf + 1);
435452aa 3870 if (!status) {
92bf14ab 3871 cap_flags = res.if_cap_flags;
435452aa
VV
3872 /* Prevent VFs from enabling VLAN promiscuous
3873 * mode
3874 */
3875 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3876 }
92bf14ab 3877 }
4c876616 3878
bcc84140
KA
3879 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3880 BE_IF_FLAGS_BROADCAST |
3881 BE_IF_FLAGS_MULTICAST |
3882 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3883 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3884 &vf_cfg->if_handle, vf + 1);
4c876616 3885 if (status)
0700d816 3886 return status;
4c876616 3887 }
0700d816
KA
3888
3889 return 0;
abb93951
PR
3890}
3891
39f1d94d 3892static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3893{
11ac75ed 3894 struct be_vf_cfg *vf_cfg;
30128031
SP
3895 int vf;
3896
39f1d94d
SP
3897 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3898 GFP_KERNEL);
3899 if (!adapter->vf_cfg)
3900 return -ENOMEM;
3901
11ac75ed
SP
3902 for_all_vfs(adapter, vf_cfg, vf) {
3903 vf_cfg->if_handle = -1;
3904 vf_cfg->pmac_id = -1;
30128031 3905 }
39f1d94d 3906 return 0;
30128031
SP
3907}
3908
f9449ab7
SP
3909static int be_vf_setup(struct be_adapter *adapter)
3910{
c502224e 3911 struct device *dev = &adapter->pdev->dev;
11ac75ed 3912 struct be_vf_cfg *vf_cfg;
4c876616 3913 int status, old_vfs, vf;
e7bcbd7b 3914 bool spoofchk;
39f1d94d 3915
257a3feb 3916 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3917
3918 status = be_vf_setup_init(adapter);
3919 if (status)
3920 goto err;
30128031 3921
4c876616
SP
3922 if (old_vfs) {
3923 for_all_vfs(adapter, vf_cfg, vf) {
3924 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3925 if (status)
3926 goto err;
3927 }
f9449ab7 3928
4c876616
SP
3929 status = be_vfs_mac_query(adapter);
3930 if (status)
3931 goto err;
3932 } else {
bec84e6b
VV
3933 status = be_vfs_if_create(adapter);
3934 if (status)
3935 goto err;
3936
39f1d94d
SP
3937 status = be_vf_eth_addr_config(adapter);
3938 if (status)
3939 goto err;
3940 }
f9449ab7 3941
11ac75ed 3942 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3943 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3944 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3945 vf + 1);
3946 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3947 status = be_cmd_set_fn_privileges(adapter,
435452aa 3948 vf_cfg->privileges |
04a06028
SP
3949 BE_PRIV_FILTMGMT,
3950 vf + 1);
435452aa
VV
3951 if (!status) {
3952 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3953 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3954 vf);
435452aa 3955 }
04a06028
SP
3956 }
3957
0f77ba73
RN
3958 /* Allow full available bandwidth */
3959 if (!old_vfs)
3960 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3961
e7bcbd7b
KA
3962 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3963 vf_cfg->if_handle, NULL,
3964 &spoofchk);
3965 if (!status)
3966 vf_cfg->spoofchk = spoofchk;
3967
bdce2ad7 3968 if (!old_vfs) {
0599863d 3969 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3970 be_cmd_set_logical_link_config(adapter,
3971 IFLA_VF_LINK_STATE_AUTO,
3972 vf+1);
3973 }
f9449ab7 3974 }
b4c1df93
SP
3975
3976 if (!old_vfs) {
3977 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3978 if (status) {
3979 dev_err(dev, "SRIOV enable failed\n");
3980 adapter->num_vfs = 0;
3981 goto err;
3982 }
3983 }
f174c7ec
VV
3984
3985 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3986 return 0;
3987err:
4c876616
SP
3988 dev_err(dev, "VF setup failed\n");
3989 be_vf_clear(adapter);
f9449ab7
SP
3990 return status;
3991}
3992
f93f160b
VV
3993/* Converting function_mode bits on BE3 to SH mc_type enums */
3994
3995static u8 be_convert_mc_type(u32 function_mode)
3996{
66064dbc 3997 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3998 return vNIC1;
66064dbc 3999 else if (function_mode & QNQ_MODE)
f93f160b
VV
4000 return FLEX10;
4001 else if (function_mode & VNIC_MODE)
4002 return vNIC2;
4003 else if (function_mode & UMC_ENABLED)
4004 return UMC;
4005 else
4006 return MC_NONE;
4007}
4008
92bf14ab
SP
4009/* On BE2/BE3 FW does not suggest the supported limits */
4010static void BEx_get_resources(struct be_adapter *adapter,
4011 struct be_resources *res)
4012{
bec84e6b 4013 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4014
4015 if (be_physfn(adapter))
4016 res->max_uc_mac = BE_UC_PMAC_COUNT;
4017 else
4018 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4019
f93f160b
VV
4020 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4021
4022 if (be_is_mc(adapter)) {
4023 /* Assuming that there are 4 channels per port,
4024 * when multi-channel is enabled
4025 */
4026 if (be_is_qnq_mode(adapter))
4027 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4028 else
4029 /* In a non-qnq multichannel mode, the pvid
4030 * takes up one vlan entry
4031 */
4032 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4033 } else {
92bf14ab 4034 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4035 }
4036
92bf14ab
SP
4037 res->max_mcast_mac = BE_MAX_MC;
4038
a5243dab
VV
4039 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4040 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4041 * *only* if it is RSS-capable.
4042 */
4043 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4044 be_virtfn(adapter) ||
4045 (be_is_mc(adapter) &&
4046 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4047 res->max_tx_qs = 1;
a28277dc
SR
4048 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4049 struct be_resources super_nic_res = {0};
4050
4051 /* On a SuperNIC profile, the driver needs to use the
4052 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4053 */
f2858738
VV
4054 be_cmd_get_profile_config(adapter, &super_nic_res,
4055 RESOURCE_LIMITS, 0);
a28277dc
SR
4056 /* Some old versions of BE3 FW don't report max_tx_qs value */
4057 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4058 } else {
92bf14ab 4059 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4060 }
92bf14ab
SP
4061
4062 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4063 !use_sriov && be_physfn(adapter))
4064 res->max_rss_qs = (adapter->be3_native) ?
4065 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4066 res->max_rx_qs = res->max_rss_qs + 1;
4067
e3dc867c 4068 if (be_physfn(adapter))
d3518e21 4069 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4070 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4071 else
4072 res->max_evt_qs = 1;
92bf14ab
SP
4073
4074 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4075 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4076 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4077 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4078}
4079
30128031
SP
4080static void be_setup_init(struct be_adapter *adapter)
4081{
4082 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4083 adapter->phy.link_speed = -1;
30128031
SP
4084 adapter->if_handle = -1;
4085 adapter->be3_native = false;
f66b7cfd 4086 adapter->if_flags = 0;
f25b119c
PR
4087 if (be_physfn(adapter))
4088 adapter->cmd_privileges = MAX_PRIVILEGES;
4089 else
4090 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4091}
4092
bec84e6b
VV
4093static int be_get_sriov_config(struct be_adapter *adapter)
4094{
bec84e6b 4095 struct be_resources res = {0};
d3d18312 4096 int max_vfs, old_vfs;
bec84e6b 4097
f2858738 4098 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 4099
ace40aff 4100 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4101 if (BE3_chip(adapter) && !res.max_vfs) {
4102 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4103 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4104 }
4105
d3d18312 4106 adapter->pool_res = res;
bec84e6b 4107
ace40aff
VV
4108 /* If during previous unload of the driver, the VFs were not disabled,
4109 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4110 * Instead use the TotalVFs value stored in the pci-dev struct.
4111 */
bec84e6b
VV
4112 old_vfs = pci_num_vf(adapter->pdev);
4113 if (old_vfs) {
ace40aff
VV
4114 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4115 old_vfs);
4116
4117 adapter->pool_res.max_vfs =
4118 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4119 adapter->num_vfs = old_vfs;
bec84e6b
VV
4120 }
4121
4122 return 0;
4123}
4124
ace40aff
VV
4125static void be_alloc_sriov_res(struct be_adapter *adapter)
4126{
4127 int old_vfs = pci_num_vf(adapter->pdev);
4128 u16 num_vf_qs;
4129 int status;
4130
4131 be_get_sriov_config(adapter);
4132
4133 if (!old_vfs)
4134 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4135
4136 /* When the HW is in SRIOV capable configuration, the PF-pool
4137 * resources are given to PF during driver load, if there are no
4138 * old VFs. This facility is not available in BE3 FW.
4139 * Also, this is done by FW in Lancer chip.
4140 */
4141 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4142 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4143 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4144 num_vf_qs);
4145 if (status)
4146 dev_err(&adapter->pdev->dev,
4147 "Failed to optimize SRIOV resources\n");
4148 }
4149}
4150
92bf14ab 4151static int be_get_resources(struct be_adapter *adapter)
abb93951 4152{
92bf14ab
SP
4153 struct device *dev = &adapter->pdev->dev;
4154 struct be_resources res = {0};
4155 int status;
abb93951 4156
92bf14ab
SP
4157 if (BEx_chip(adapter)) {
4158 BEx_get_resources(adapter, &res);
4159 adapter->res = res;
abb93951
PR
4160 }
4161
92bf14ab
SP
4162 /* For Lancer, SH etc read per-function resource limits from FW.
4163 * GET_FUNC_CONFIG returns per function guaranteed limits.
4164 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4165 */
4166 if (!BEx_chip(adapter)) {
4167 status = be_cmd_get_func_config(adapter, &res);
4168 if (status)
4169 return status;
abb93951 4170
71bb8bd0
VV
4171 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4172 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4173 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4174 res.max_rss_qs -= 1;
4175
92bf14ab
SP
4176 /* If RoCE may be enabled stash away half the EQs for RoCE */
4177 if (be_roce_supported(adapter))
4178 res.max_evt_qs /= 2;
4179 adapter->res = res;
abb93951 4180 }
4c876616 4181
71bb8bd0
VV
4182 /* If FW supports RSS default queue, then skip creating non-RSS
4183 * queue for non-IP traffic.
4184 */
4185 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4186 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4187
acbafeb1
SP
4188 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4189 be_max_txqs(adapter), be_max_rxqs(adapter),
4190 be_max_rss(adapter), be_max_eqs(adapter),
4191 be_max_vfs(adapter));
4192 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4193 be_max_uc(adapter), be_max_mc(adapter),
4194 be_max_vlans(adapter));
4195
ace40aff
VV
4196 /* Sanitize cfg_num_qs based on HW and platform limits */
4197 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4198 be_max_qs(adapter));
92bf14ab 4199 return 0;
abb93951
PR
4200}
4201
39f1d94d
SP
4202static int be_get_config(struct be_adapter *adapter)
4203{
6b085ba9 4204 int status, level;
542963b7 4205 u16 profile_id;
6b085ba9 4206
980df249
SR
4207 status = be_cmd_get_cntl_attributes(adapter);
4208 if (status)
4209 return status;
4210
e97e3cda 4211 status = be_cmd_query_fw_cfg(adapter);
abb93951 4212 if (status)
92bf14ab 4213 return status;
abb93951 4214
6b085ba9
SP
4215 if (BEx_chip(adapter)) {
4216 level = be_cmd_get_fw_log_level(adapter);
4217 adapter->msg_enable =
4218 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4219 }
4220
4221 be_cmd_get_acpi_wol_cap(adapter);
4222
21252377
VV
4223 be_cmd_query_port_name(adapter);
4224
4225 if (be_physfn(adapter)) {
542963b7
VV
4226 status = be_cmd_get_active_profile(adapter, &profile_id);
4227 if (!status)
4228 dev_info(&adapter->pdev->dev,
4229 "Using profile 0x%x\n", profile_id);
962bcb75 4230 }
bec84e6b 4231
92bf14ab
SP
4232 status = be_get_resources(adapter);
4233 if (status)
4234 return status;
abb93951 4235
46ee9c14
RN
4236 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4237 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
4238 if (!adapter->pmac_id)
4239 return -ENOMEM;
abb93951 4240
92bf14ab 4241 return 0;
39f1d94d
SP
4242}
4243
95046b92
SP
4244static int be_mac_setup(struct be_adapter *adapter)
4245{
4246 u8 mac[ETH_ALEN];
4247 int status;
4248
4249 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4250 status = be_cmd_get_perm_mac(adapter, mac);
4251 if (status)
4252 return status;
4253
4254 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4255 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
95046b92
SP
4256 }
4257
95046b92
SP
4258 return 0;
4259}
4260
68d7bdcb
SP
4261static void be_schedule_worker(struct be_adapter *adapter)
4262{
4263 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4264 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4265}
4266
eb7dd46c
SP
4267static void be_schedule_err_detection(struct be_adapter *adapter)
4268{
4269 schedule_delayed_work(&adapter->be_err_detection_work,
4270 msecs_to_jiffies(1000));
4271 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4272}
4273
7707133c 4274static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4275{
68d7bdcb 4276 struct net_device *netdev = adapter->netdev;
10ef9ab4 4277 int status;
ba343c77 4278
7707133c 4279 status = be_evt_queues_create(adapter);
abb93951
PR
4280 if (status)
4281 goto err;
73d540f2 4282
7707133c 4283 status = be_tx_qs_create(adapter);
c2bba3df
SK
4284 if (status)
4285 goto err;
10ef9ab4 4286
7707133c 4287 status = be_rx_cqs_create(adapter);
10ef9ab4 4288 if (status)
a54769f5 4289 goto err;
6b7c5b94 4290
7707133c 4291 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4292 if (status)
4293 goto err;
4294
68d7bdcb
SP
4295 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4296 if (status)
4297 goto err;
4298
4299 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4300 if (status)
4301 goto err;
4302
7707133c
SP
4303 return 0;
4304err:
4305 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4306 return status;
4307}
4308
68d7bdcb
SP
4309int be_update_queues(struct be_adapter *adapter)
4310{
4311 struct net_device *netdev = adapter->netdev;
4312 int status;
4313
4314 if (netif_running(netdev))
4315 be_close(netdev);
4316
4317 be_cancel_worker(adapter);
4318
4319 /* If any vectors have been shared with RoCE we cannot re-program
4320 * the MSIx table.
4321 */
4322 if (!adapter->num_msix_roce_vec)
4323 be_msix_disable(adapter);
4324
4325 be_clear_queues(adapter);
4326
4327 if (!msix_enabled(adapter)) {
4328 status = be_msix_enable(adapter);
4329 if (status)
4330 return status;
4331 }
4332
4333 status = be_setup_queues(adapter);
4334 if (status)
4335 return status;
4336
4337 be_schedule_worker(adapter);
4338
4339 if (netif_running(netdev))
4340 status = be_open(netdev);
4341
4342 return status;
4343}
4344
f7062ee5
SP
4345static inline int fw_major_num(const char *fw_ver)
4346{
4347 int fw_major = 0, i;
4348
4349 i = sscanf(fw_ver, "%d.", &fw_major);
4350 if (i != 1)
4351 return 0;
4352
4353 return fw_major;
4354}
4355
f962f840
SP
4356/* If any VFs are already enabled don't FLR the PF */
4357static bool be_reset_required(struct be_adapter *adapter)
4358{
4359 return pci_num_vf(adapter->pdev) ? false : true;
4360}
4361
4362/* Wait for the FW to be ready and perform the required initialization */
4363static int be_func_init(struct be_adapter *adapter)
4364{
4365 int status;
4366
4367 status = be_fw_wait_ready(adapter);
4368 if (status)
4369 return status;
4370
4371 if (be_reset_required(adapter)) {
4372 status = be_cmd_reset_function(adapter);
4373 if (status)
4374 return status;
4375
4376 /* Wait for interrupts to quiesce after an FLR */
4377 msleep(100);
4378
4379 /* We can clear all errors when function reset succeeds */
954f6825 4380 be_clear_error(adapter, BE_CLEAR_ALL);
f962f840
SP
4381 }
4382
4383 /* Tell FW we're ready to fire cmds */
4384 status = be_cmd_fw_init(adapter);
4385 if (status)
4386 return status;
4387
4388 /* Allow interrupts for other ULPs running on NIC function */
4389 be_intr_set(adapter, true);
4390
4391 return 0;
4392}
4393
7707133c
SP
4394static int be_setup(struct be_adapter *adapter)
4395{
4396 struct device *dev = &adapter->pdev->dev;
bcc84140 4397 u32 en_flags;
7707133c
SP
4398 int status;
4399
f962f840
SP
4400 status = be_func_init(adapter);
4401 if (status)
4402 return status;
4403
7707133c
SP
4404 be_setup_init(adapter);
4405
4406 if (!lancer_chip(adapter))
4407 be_cmd_req_native_mode(adapter);
4408
980df249
SR
4409 /* invoke this cmd first to get pf_num and vf_num which are needed
4410 * for issuing profile related cmds
4411 */
4412 if (!BEx_chip(adapter)) {
4413 status = be_cmd_get_func_config(adapter, NULL);
4414 if (status)
4415 return status;
4416 }
72ef3a88 4417
ace40aff
VV
4418 if (!BE2_chip(adapter) && be_physfn(adapter))
4419 be_alloc_sriov_res(adapter);
4420
7707133c 4421 status = be_get_config(adapter);
10ef9ab4 4422 if (status)
a54769f5 4423 goto err;
6b7c5b94 4424
7707133c 4425 status = be_msix_enable(adapter);
10ef9ab4 4426 if (status)
a54769f5 4427 goto err;
6b7c5b94 4428
bcc84140
KA
4429 /* will enable all the needed filter flags in be_open() */
4430 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4431 en_flags = en_flags & be_if_cap_flags(adapter);
4432 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4433 &adapter->if_handle, 0);
7707133c 4434 if (status)
a54769f5 4435 goto err;
6b7c5b94 4436
68d7bdcb
SP
4437 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4438 rtnl_lock();
7707133c 4439 status = be_setup_queues(adapter);
68d7bdcb 4440 rtnl_unlock();
95046b92 4441 if (status)
1578e777
PR
4442 goto err;
4443
7707133c 4444 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4445
4446 status = be_mac_setup(adapter);
10ef9ab4
SP
4447 if (status)
4448 goto err;
4449
e97e3cda 4450 be_cmd_get_fw_ver(adapter);
acbafeb1 4451 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4452
e9e2a904 4453 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4454 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4455 adapter->fw_ver);
4456 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4457 }
4458
00d594c3
KA
4459 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4460 adapter->rx_fc);
4461 if (status)
4462 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4463 &adapter->rx_fc);
590c391d 4464
00d594c3
KA
4465 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4466 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4467
bdce2ad7
SR
4468 if (be_physfn(adapter))
4469 be_cmd_set_logical_link_config(adapter,
4470 IFLA_VF_LINK_STATE_AUTO, 0);
4471
bec84e6b
VV
4472 if (adapter->num_vfs)
4473 be_vf_setup(adapter);
f9449ab7 4474
f25b119c
PR
4475 status = be_cmd_get_phy_info(adapter);
4476 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4477 adapter->phy.fc_autoneg = 1;
4478
68d7bdcb 4479 be_schedule_worker(adapter);
e1ad8e33 4480 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4481 return 0;
a54769f5
SP
4482err:
4483 be_clear(adapter);
4484 return status;
4485}
6b7c5b94 4486
66268739
IV
4487#ifdef CONFIG_NET_POLL_CONTROLLER
4488static void be_netpoll(struct net_device *netdev)
4489{
4490 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4491 struct be_eq_obj *eqo;
66268739
IV
4492 int i;
4493
e49cc34f 4494 for_all_evt_queues(adapter, eqo, i) {
20947770 4495 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4496 napi_schedule(&eqo->napi);
4497 }
66268739
IV
4498}
4499#endif
4500
485bf569
SN
4501int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4502{
4503 const struct firmware *fw;
4504 int status;
4505
4506 if (!netif_running(adapter->netdev)) {
4507 dev_err(&adapter->pdev->dev,
4508 "Firmware load not allowed (interface is down)\n");
940a3fcd 4509 return -ENETDOWN;
485bf569
SN
4510 }
4511
4512 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4513 if (status)
4514 goto fw_exit;
4515
4516 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4517
4518 if (lancer_chip(adapter))
4519 status = lancer_fw_download(adapter, fw);
4520 else
4521 status = be_fw_download(adapter, fw);
4522
eeb65ced 4523 if (!status)
e97e3cda 4524 be_cmd_get_fw_ver(adapter);
eeb65ced 4525
84517482
AK
4526fw_exit:
4527 release_firmware(fw);
4528 return status;
4529}
4530
add511b3
RP
4531static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4532 u16 flags)
a77dcb8c
AK
4533{
4534 struct be_adapter *adapter = netdev_priv(dev);
4535 struct nlattr *attr, *br_spec;
4536 int rem;
4537 int status = 0;
4538 u16 mode = 0;
4539
4540 if (!sriov_enabled(adapter))
4541 return -EOPNOTSUPP;
4542
4543 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4544 if (!br_spec)
4545 return -EINVAL;
a77dcb8c
AK
4546
4547 nla_for_each_nested(attr, br_spec, rem) {
4548 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4549 continue;
4550
b7c1a314
TG
4551 if (nla_len(attr) < sizeof(mode))
4552 return -EINVAL;
4553
a77dcb8c 4554 mode = nla_get_u16(attr);
ac0f5fba
SR
4555 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4556 return -EOPNOTSUPP;
4557
a77dcb8c
AK
4558 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4559 return -EINVAL;
4560
4561 status = be_cmd_set_hsw_config(adapter, 0, 0,
4562 adapter->if_handle,
4563 mode == BRIDGE_MODE_VEPA ?
4564 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4565 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4566 if (status)
4567 goto err;
4568
4569 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4570 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4571
4572 return status;
4573 }
4574err:
4575 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4576 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4577
4578 return status;
4579}
4580
4581static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4582 struct net_device *dev, u32 filter_mask,
4583 int nlflags)
a77dcb8c
AK
4584{
4585 struct be_adapter *adapter = netdev_priv(dev);
4586 int status = 0;
4587 u8 hsw_mode;
4588
a77dcb8c
AK
4589 /* BE and Lancer chips support VEB mode only */
4590 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4591 hsw_mode = PORT_FWD_TYPE_VEB;
4592 } else {
4593 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4594 adapter->if_handle, &hsw_mode,
4595 NULL);
a77dcb8c
AK
4596 if (status)
4597 return 0;
ff9ed19d
KP
4598
4599 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4600 return 0;
a77dcb8c
AK
4601 }
4602
4603 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4604 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4605 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 4606 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
4607}
4608
c5abe7c0 4609#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4610/* VxLAN offload Notes:
4611 *
4612 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4613 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4614 * is expected to work across all types of IP tunnels once exported. Skyhawk
4615 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4616 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4617 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4618 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4619 *
4620 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4621 * adds more than one port, disable offloads and don't re-enable them again
4622 * until after all the tunnels are removed.
4623 */
c9c47142
SP
4624static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4625 __be16 port)
4626{
4627 struct be_adapter *adapter = netdev_priv(netdev);
4628 struct device *dev = &adapter->pdev->dev;
4629 int status;
4630
af19e686 4631 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
4632 return;
4633
1e5b311a
JB
4634 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4635 adapter->vxlan_port_aliases++;
4636 return;
4637 }
4638
c9c47142 4639 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4640 dev_info(dev,
4641 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4642 dev_info(dev, "Disabling VxLAN offloads\n");
4643 adapter->vxlan_port_count++;
4644 goto err;
c9c47142
SP
4645 }
4646
630f4b70
SB
4647 if (adapter->vxlan_port_count++ >= 1)
4648 return;
4649
c9c47142
SP
4650 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4651 OP_CONVERT_NORMAL_TO_TUNNEL);
4652 if (status) {
4653 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4654 goto err;
4655 }
4656
4657 status = be_cmd_set_vxlan_port(adapter, port);
4658 if (status) {
4659 dev_warn(dev, "Failed to add VxLAN port\n");
4660 goto err;
4661 }
4662 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4663 adapter->vxlan_port = port;
4664
630f4b70
SB
4665 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4666 NETIF_F_TSO | NETIF_F_TSO6 |
4667 NETIF_F_GSO_UDP_TUNNEL;
4668 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4669 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4670
c9c47142
SP
4671 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4672 be16_to_cpu(port));
4673 return;
4674err:
4675 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4676}
4677
4678static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4679 __be16 port)
4680{
4681 struct be_adapter *adapter = netdev_priv(netdev);
4682
af19e686 4683 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
4684 return;
4685
4686 if (adapter->vxlan_port != port)
630f4b70 4687 goto done;
c9c47142 4688
1e5b311a
JB
4689 if (adapter->vxlan_port_aliases) {
4690 adapter->vxlan_port_aliases--;
4691 return;
4692 }
4693
c9c47142
SP
4694 be_disable_vxlan_offloads(adapter);
4695
4696 dev_info(&adapter->pdev->dev,
4697 "Disabled VxLAN offloads for UDP port %d\n",
4698 be16_to_cpu(port));
630f4b70
SB
4699done:
4700 adapter->vxlan_port_count--;
c9c47142 4701}
725d548f 4702
5f35227e
JG
4703static netdev_features_t be_features_check(struct sk_buff *skb,
4704 struct net_device *dev,
4705 netdev_features_t features)
725d548f 4706{
16dde0d6
SB
4707 struct be_adapter *adapter = netdev_priv(dev);
4708 u8 l4_hdr = 0;
4709
4710 /* The code below restricts offload features for some tunneled packets.
4711 * Offload features for normal (non tunnel) packets are unchanged.
4712 */
4713 if (!skb->encapsulation ||
4714 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4715 return features;
4716
4717 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4718 * should disable tunnel offload features if it's not a VxLAN packet,
4719 * as tunnel offloads have been enabled only for VxLAN. This is done to
4720 * allow other tunneled traffic like GRE work fine while VxLAN
4721 * offloads are configured in Skyhawk-R.
4722 */
4723 switch (vlan_get_protocol(skb)) {
4724 case htons(ETH_P_IP):
4725 l4_hdr = ip_hdr(skb)->protocol;
4726 break;
4727 case htons(ETH_P_IPV6):
4728 l4_hdr = ipv6_hdr(skb)->nexthdr;
4729 break;
4730 default:
4731 return features;
4732 }
4733
4734 if (l4_hdr != IPPROTO_UDP ||
4735 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4736 skb->inner_protocol != htons(ETH_P_TEB) ||
4737 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4738 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
a188222b 4739 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
4740
4741 return features;
725d548f 4742}
c5abe7c0 4743#endif
c9c47142 4744
a155a5db
SB
4745static int be_get_phys_port_id(struct net_device *dev,
4746 struct netdev_phys_item_id *ppid)
4747{
4748 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4749 struct be_adapter *adapter = netdev_priv(dev);
4750 u8 *id;
4751
4752 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4753 return -ENOSPC;
4754
4755 ppid->id[0] = adapter->hba_port_num + 1;
4756 id = &ppid->id[1];
4757 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4758 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4759 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4760
4761 ppid->id_len = id_len;
4762
4763 return 0;
4764}
4765
e5686ad8 4766static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4767 .ndo_open = be_open,
4768 .ndo_stop = be_close,
4769 .ndo_start_xmit = be_xmit,
a54769f5 4770 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4771 .ndo_set_mac_address = be_mac_addr_set,
4772 .ndo_change_mtu = be_change_mtu,
ab1594e9 4773 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4774 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4775 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4776 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4777 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4778 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4779 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4780 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4781 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 4782 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
4783#ifdef CONFIG_NET_POLL_CONTROLLER
4784 .ndo_poll_controller = be_netpoll,
4785#endif
a77dcb8c
AK
4786 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4787 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4788#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4789 .ndo_busy_poll = be_busy_poll,
6384a4d0 4790#endif
c5abe7c0 4791#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4792 .ndo_add_vxlan_port = be_add_vxlan_port,
4793 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4794 .ndo_features_check = be_features_check,
c5abe7c0 4795#endif
a155a5db 4796 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
4797};
4798
4799static void be_netdev_init(struct net_device *netdev)
4800{
4801 struct be_adapter *adapter = netdev_priv(netdev);
4802
6332c8d3 4803 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4804 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4805 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4806 if (be_multi_rxq(adapter))
4807 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4808
4809 netdev->features |= netdev->hw_features |
f646968f 4810 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4811
eb8a50d9 4812 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4813 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4814
fbc13f01
AK
4815 netdev->priv_flags |= IFF_UNICAST_FLT;
4816
6b7c5b94
SP
4817 netdev->flags |= IFF_MULTICAST;
4818
b7e5887e 4819 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4820
10ef9ab4 4821 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4822
7ad24ea4 4823 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4824}
4825
87ac1a52
KA
4826static void be_cleanup(struct be_adapter *adapter)
4827{
4828 struct net_device *netdev = adapter->netdev;
4829
4830 rtnl_lock();
4831 netif_device_detach(netdev);
4832 if (netif_running(netdev))
4833 be_close(netdev);
4834 rtnl_unlock();
4835
4836 be_clear(adapter);
4837}
4838
484d76fd 4839static int be_resume(struct be_adapter *adapter)
78fad34e 4840{
d0e1b319 4841 struct net_device *netdev = adapter->netdev;
78fad34e
SP
4842 int status;
4843
78fad34e
SP
4844 status = be_setup(adapter);
4845 if (status)
484d76fd 4846 return status;
78fad34e 4847
d0e1b319
KA
4848 if (netif_running(netdev)) {
4849 status = be_open(netdev);
78fad34e 4850 if (status)
484d76fd 4851 return status;
78fad34e
SP
4852 }
4853
d0e1b319
KA
4854 netif_device_attach(netdev);
4855
484d76fd
KA
4856 return 0;
4857}
4858
4859static int be_err_recover(struct be_adapter *adapter)
4860{
4861 struct device *dev = &adapter->pdev->dev;
4862 int status;
4863
4864 status = be_resume(adapter);
4865 if (status)
4866 goto err;
4867
9fa465c0 4868 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
4869 return 0;
4870err:
9fa465c0 4871 if (be_physfn(adapter))
78fad34e 4872 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
4873 else
4874 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
4875
4876 return status;
4877}
4878
eb7dd46c 4879static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
4880{
4881 struct be_adapter *adapter =
eb7dd46c
SP
4882 container_of(work, struct be_adapter,
4883 be_err_detection_work.work);
78fad34e
SP
4884 int status = 0;
4885
4886 be_detect_error(adapter);
4887
954f6825 4888 if (be_check_error(adapter, BE_ERROR_HW)) {
87ac1a52 4889 be_cleanup(adapter);
d0e1b319
KA
4890
4891 /* As of now error recovery support is in Lancer only */
4892 if (lancer_chip(adapter))
4893 status = be_err_recover(adapter);
78fad34e
SP
4894 }
4895
9fa465c0
SP
4896 /* Always attempt recovery on VFs */
4897 if (!status || be_virtfn(adapter))
eb7dd46c 4898 be_schedule_err_detection(adapter);
78fad34e
SP
4899}
4900
4901static void be_log_sfp_info(struct be_adapter *adapter)
4902{
4903 int status;
4904
4905 status = be_cmd_query_sfp_info(adapter);
4906 if (!status) {
4907 dev_err(&adapter->pdev->dev,
4908 "Unqualified SFP+ detected on %c from %s part no: %s",
4909 adapter->port_name, adapter->phy.vendor_name,
4910 adapter->phy.vendor_pn);
4911 }
4912 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
4913}
4914
4915static void be_worker(struct work_struct *work)
4916{
4917 struct be_adapter *adapter =
4918 container_of(work, struct be_adapter, work.work);
4919 struct be_rx_obj *rxo;
4920 int i;
4921
4922 /* when interrupts are not yet enabled, just reap any pending
4923 * mcc completions
4924 */
4925 if (!netif_running(adapter->netdev)) {
4926 local_bh_disable();
4927 be_process_mcc(adapter);
4928 local_bh_enable();
4929 goto reschedule;
4930 }
4931
4932 if (!adapter->stats_cmd_sent) {
4933 if (lancer_chip(adapter))
4934 lancer_cmd_get_pport_stats(adapter,
4935 &adapter->stats_cmd);
4936 else
4937 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4938 }
4939
4940 if (be_physfn(adapter) &&
4941 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4942 be_cmd_get_die_temperature(adapter);
4943
4944 for_all_rx_queues(adapter, rxo, i) {
4945 /* Replenish RX-queues starved due to memory
4946 * allocation failures.
4947 */
4948 if (rxo->rx_post_starved)
4949 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
4950 }
4951
20947770
PR
4952 /* EQ-delay update for Skyhawk is done while notifying EQ */
4953 if (!skyhawk_chip(adapter))
4954 be_eqd_update(adapter, false);
78fad34e
SP
4955
4956 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
4957 be_log_sfp_info(adapter);
4958
4959reschedule:
4960 adapter->work_counter++;
4961 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4962}
4963
6b7c5b94
SP
4964static void be_unmap_pci_bars(struct be_adapter *adapter)
4965{
c5b3ad4c
SP
4966 if (adapter->csr)
4967 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4968 if (adapter->db)
ce66f781 4969 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4970}
4971
ce66f781
SP
4972static int db_bar(struct be_adapter *adapter)
4973{
18c57c74 4974 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
4975 return 0;
4976 else
4977 return 4;
4978}
4979
4980static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4981{
dbf0f2a7 4982 if (skyhawk_chip(adapter)) {
ce66f781
SP
4983 adapter->roce_db.size = 4096;
4984 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4985 db_bar(adapter));
4986 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4987 db_bar(adapter));
4988 }
045508a8 4989 return 0;
6b7c5b94
SP
4990}
4991
4992static int be_map_pci_bars(struct be_adapter *adapter)
4993{
0fa74a4b 4994 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 4995 u8 __iomem *addr;
78fad34e
SP
4996 u32 sli_intf;
4997
4998 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4999 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5000 SLI_INTF_FAMILY_SHIFT;
5001 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5002
c5b3ad4c 5003 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5004 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5005 if (!adapter->csr)
c5b3ad4c
SP
5006 return -ENOMEM;
5007 }
5008
25848c90 5009 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5010 if (!addr)
6b7c5b94 5011 goto pci_map_err;
ba343c77 5012 adapter->db = addr;
ce66f781 5013
25848c90
SR
5014 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5015 if (be_physfn(adapter)) {
5016 /* PCICFG is the 2nd BAR in BE2 */
5017 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5018 if (!addr)
5019 goto pci_map_err;
5020 adapter->pcicfg = addr;
5021 } else {
5022 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5023 }
5024 }
5025
ce66f781 5026 be_roce_map_pci_bars(adapter);
6b7c5b94 5027 return 0;
ce66f781 5028
6b7c5b94 5029pci_map_err:
25848c90 5030 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5031 be_unmap_pci_bars(adapter);
5032 return -ENOMEM;
5033}
5034
78fad34e 5035static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5036{
8788fdc2 5037 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5038 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5039
5040 if (mem->va)
78fad34e 5041 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5042
5b8821b7 5043 mem = &adapter->rx_filter;
e7b909a6 5044 if (mem->va)
78fad34e
SP
5045 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5046
5047 mem = &adapter->stats_cmd;
5048 if (mem->va)
5049 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5050}
5051
78fad34e
SP
5052/* Allocate and initialize various fields in be_adapter struct */
5053static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5054{
8788fdc2
SP
5055 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5056 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5057 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5058 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5059 struct device *dev = &adapter->pdev->dev;
5060 int status = 0;
6b7c5b94
SP
5061
5062 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5063 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5064 &mbox_mem_alloc->dma,
5065 GFP_KERNEL);
78fad34e
SP
5066 if (!mbox_mem_alloc->va)
5067 return -ENOMEM;
5068
6b7c5b94
SP
5069 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5070 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5071 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5072
5b8821b7 5073 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5074 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5075 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5076 if (!rx_filter->va) {
e7b909a6
SP
5077 status = -ENOMEM;
5078 goto free_mbox;
5079 }
1f9061d2 5080
78fad34e
SP
5081 if (lancer_chip(adapter))
5082 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5083 else if (BE2_chip(adapter))
5084 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5085 else if (BE3_chip(adapter))
5086 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5087 else
5088 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5089 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5090 &stats_cmd->dma, GFP_KERNEL);
5091 if (!stats_cmd->va) {
5092 status = -ENOMEM;
5093 goto free_rx_filter;
5094 }
5095
2984961c 5096 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5097 spin_lock_init(&adapter->mcc_lock);
5098 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5099 init_completion(&adapter->et_cmd_compl);
e7b909a6 5100
78fad34e 5101 pci_save_state(adapter->pdev);
6b7c5b94 5102
78fad34e 5103 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5104 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5105 be_err_detection_task);
6b7c5b94 5106
78fad34e
SP
5107 adapter->rx_fc = true;
5108 adapter->tx_fc = true;
6b7c5b94 5109
78fad34e
SP
5110 /* Must be a power of 2 or else MODULO will BUG_ON */
5111 adapter->be_get_temp_freq = 64;
ca34fe38 5112
6b7c5b94 5113 return 0;
78fad34e
SP
5114
5115free_rx_filter:
5116 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5117free_mbox:
5118 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5119 mbox_mem_alloc->dma);
5120 return status;
6b7c5b94
SP
5121}
5122
3bc6b06c 5123static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5124{
5125 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5126
6b7c5b94
SP
5127 if (!adapter)
5128 return;
5129
045508a8 5130 be_roce_dev_remove(adapter);
8cef7a78 5131 be_intr_set(adapter, false);
045508a8 5132
eb7dd46c 5133 be_cancel_err_detection(adapter);
f67ef7ba 5134
6b7c5b94
SP
5135 unregister_netdev(adapter->netdev);
5136
5fb379ee
SP
5137 be_clear(adapter);
5138
bf99e50d
PR
5139 /* tell fw we're done with firing cmds */
5140 be_cmd_fw_clean(adapter);
5141
78fad34e
SP
5142 be_unmap_pci_bars(adapter);
5143 be_drv_cleanup(adapter);
6b7c5b94 5144
d6b6d987
SP
5145 pci_disable_pcie_error_reporting(pdev);
5146
6b7c5b94
SP
5147 pci_release_regions(pdev);
5148 pci_disable_device(pdev);
5149
5150 free_netdev(adapter->netdev);
5151}
5152
9a03259c
AB
5153static ssize_t be_hwmon_show_temp(struct device *dev,
5154 struct device_attribute *dev_attr,
5155 char *buf)
29e9122b
VD
5156{
5157 struct be_adapter *adapter = dev_get_drvdata(dev);
5158
5159 /* Unit: millidegree Celsius */
5160 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5161 return -EIO;
5162 else
5163 return sprintf(buf, "%u\n",
5164 adapter->hwmon_info.be_on_die_temp * 1000);
5165}
5166
5167static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5168 be_hwmon_show_temp, NULL, 1);
5169
5170static struct attribute *be_hwmon_attrs[] = {
5171 &sensor_dev_attr_temp1_input.dev_attr.attr,
5172 NULL
5173};
5174
5175ATTRIBUTE_GROUPS(be_hwmon);
5176
d379142b
SP
5177static char *mc_name(struct be_adapter *adapter)
5178{
f93f160b
VV
5179 char *str = ""; /* default */
5180
5181 switch (adapter->mc_type) {
5182 case UMC:
5183 str = "UMC";
5184 break;
5185 case FLEX10:
5186 str = "FLEX10";
5187 break;
5188 case vNIC1:
5189 str = "vNIC-1";
5190 break;
5191 case nPAR:
5192 str = "nPAR";
5193 break;
5194 case UFP:
5195 str = "UFP";
5196 break;
5197 case vNIC2:
5198 str = "vNIC-2";
5199 break;
5200 default:
5201 str = "";
5202 }
5203
5204 return str;
d379142b
SP
5205}
5206
5207static inline char *func_name(struct be_adapter *adapter)
5208{
5209 return be_physfn(adapter) ? "PF" : "VF";
5210}
5211
f7062ee5
SP
5212static inline char *nic_name(struct pci_dev *pdev)
5213{
5214 switch (pdev->device) {
5215 case OC_DEVICE_ID1:
5216 return OC_NAME;
5217 case OC_DEVICE_ID2:
5218 return OC_NAME_BE;
5219 case OC_DEVICE_ID3:
5220 case OC_DEVICE_ID4:
5221 return OC_NAME_LANCER;
5222 case BE_DEVICE_ID2:
5223 return BE3_NAME;
5224 case OC_DEVICE_ID5:
5225 case OC_DEVICE_ID6:
5226 return OC_NAME_SH;
5227 default:
5228 return BE_NAME;
5229 }
5230}
5231
1dd06ae8 5232static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5233{
6b7c5b94
SP
5234 struct be_adapter *adapter;
5235 struct net_device *netdev;
21252377 5236 int status = 0;
6b7c5b94 5237
acbafeb1
SP
5238 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5239
6b7c5b94
SP
5240 status = pci_enable_device(pdev);
5241 if (status)
5242 goto do_none;
5243
5244 status = pci_request_regions(pdev, DRV_NAME);
5245 if (status)
5246 goto disable_dev;
5247 pci_set_master(pdev);
5248
7f640062 5249 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5250 if (!netdev) {
6b7c5b94
SP
5251 status = -ENOMEM;
5252 goto rel_reg;
5253 }
5254 adapter = netdev_priv(netdev);
5255 adapter->pdev = pdev;
5256 pci_set_drvdata(pdev, adapter);
5257 adapter->netdev = netdev;
2243e2e9 5258 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5259
4c15c243 5260 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5261 if (!status) {
5262 netdev->features |= NETIF_F_HIGHDMA;
5263 } else {
4c15c243 5264 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5265 if (status) {
5266 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5267 goto free_netdev;
5268 }
5269 }
5270
2f951a9a
KA
5271 status = pci_enable_pcie_error_reporting(pdev);
5272 if (!status)
5273 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5274
78fad34e 5275 status = be_map_pci_bars(adapter);
6b7c5b94 5276 if (status)
39f1d94d 5277 goto free_netdev;
6b7c5b94 5278
78fad34e
SP
5279 status = be_drv_init(adapter);
5280 if (status)
5281 goto unmap_bars;
5282
5fb379ee
SP
5283 status = be_setup(adapter);
5284 if (status)
78fad34e 5285 goto drv_cleanup;
2243e2e9 5286
3abcdeda 5287 be_netdev_init(netdev);
6b7c5b94
SP
5288 status = register_netdev(netdev);
5289 if (status != 0)
5fb379ee 5290 goto unsetup;
6b7c5b94 5291
045508a8
PP
5292 be_roce_dev_add(adapter);
5293
eb7dd46c 5294 be_schedule_err_detection(adapter);
b4e32a71 5295
29e9122b 5296 /* On Die temperature not supported for VF. */
9a03259c 5297 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5298 adapter->hwmon_info.hwmon_dev =
5299 devm_hwmon_device_register_with_groups(&pdev->dev,
5300 DRV_NAME,
5301 adapter,
5302 be_hwmon_groups);
5303 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5304 }
5305
d379142b 5306 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5307 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5308
6b7c5b94
SP
5309 return 0;
5310
5fb379ee
SP
5311unsetup:
5312 be_clear(adapter);
78fad34e
SP
5313drv_cleanup:
5314 be_drv_cleanup(adapter);
5315unmap_bars:
5316 be_unmap_pci_bars(adapter);
f9449ab7 5317free_netdev:
fe6d2a38 5318 free_netdev(netdev);
6b7c5b94
SP
5319rel_reg:
5320 pci_release_regions(pdev);
5321disable_dev:
5322 pci_disable_device(pdev);
5323do_none:
c4ca2374 5324 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5325 return status;
5326}
5327
5328static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5329{
5330 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5331
76a9e08e 5332 if (adapter->wol_en)
71d8d1b5
AK
5333 be_setup_wol(adapter, true);
5334
d4360d6f 5335 be_intr_set(adapter, false);
eb7dd46c 5336 be_cancel_err_detection(adapter);
f67ef7ba 5337
87ac1a52 5338 be_cleanup(adapter);
6b7c5b94
SP
5339
5340 pci_save_state(pdev);
5341 pci_disable_device(pdev);
5342 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5343 return 0;
5344}
5345
484d76fd 5346static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5347{
6b7c5b94 5348 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5349 int status = 0;
6b7c5b94
SP
5350
5351 status = pci_enable_device(pdev);
5352 if (status)
5353 return status;
5354
6b7c5b94
SP
5355 pci_restore_state(pdev);
5356
484d76fd 5357 status = be_resume(adapter);
2243e2e9
SP
5358 if (status)
5359 return status;
5360
eb7dd46c
SP
5361 be_schedule_err_detection(adapter);
5362
76a9e08e 5363 if (adapter->wol_en)
71d8d1b5 5364 be_setup_wol(adapter, false);
a4ca055f 5365
6b7c5b94
SP
5366 return 0;
5367}
5368
82456b03
SP
5369/*
5370 * An FLR will stop BE from DMAing any data.
5371 */
5372static void be_shutdown(struct pci_dev *pdev)
5373{
5374 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5375
2d5d4154
AK
5376 if (!adapter)
5377 return;
82456b03 5378
d114f99a 5379 be_roce_dev_shutdown(adapter);
0f4a6828 5380 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5381 be_cancel_err_detection(adapter);
a4ca055f 5382
2d5d4154 5383 netif_device_detach(adapter->netdev);
82456b03 5384
57841869
AK
5385 be_cmd_reset_function(adapter);
5386
82456b03 5387 pci_disable_device(pdev);
82456b03
SP
5388}
5389
cf588477 5390static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5391 pci_channel_state_t state)
cf588477
SP
5392{
5393 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5394
5395 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5396
954f6825
VD
5397 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5398 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5399
eb7dd46c 5400 be_cancel_err_detection(adapter);
cf588477 5401
87ac1a52 5402 be_cleanup(adapter);
cf588477 5403 }
cf588477
SP
5404
5405 if (state == pci_channel_io_perm_failure)
5406 return PCI_ERS_RESULT_DISCONNECT;
5407
5408 pci_disable_device(pdev);
5409
eeb7fc7b
SK
5410 /* The error could cause the FW to trigger a flash debug dump.
5411 * Resetting the card while flash dump is in progress
c8a54163
PR
5412 * can cause it not to recover; wait for it to finish.
5413 * Wait only for first function as it is needed only once per
5414 * adapter.
eeb7fc7b 5415 */
c8a54163
PR
5416 if (pdev->devfn == 0)
5417 ssleep(30);
5418
cf588477
SP
5419 return PCI_ERS_RESULT_NEED_RESET;
5420}
5421
5422static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5423{
5424 struct be_adapter *adapter = pci_get_drvdata(pdev);
5425 int status;
5426
5427 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5428
5429 status = pci_enable_device(pdev);
5430 if (status)
5431 return PCI_ERS_RESULT_DISCONNECT;
5432
5433 pci_set_master(pdev);
cf588477
SP
5434 pci_restore_state(pdev);
5435
5436 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5437 dev_info(&adapter->pdev->dev,
5438 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5439 status = be_fw_wait_ready(adapter);
cf588477
SP
5440 if (status)
5441 return PCI_ERS_RESULT_DISCONNECT;
5442
d6b6d987 5443 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 5444 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
5445 return PCI_ERS_RESULT_RECOVERED;
5446}
5447
5448static void be_eeh_resume(struct pci_dev *pdev)
5449{
5450 int status = 0;
5451 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5452
5453 dev_info(&adapter->pdev->dev, "EEH resume\n");
5454
5455 pci_save_state(pdev);
5456
484d76fd 5457 status = be_resume(adapter);
bf99e50d
PR
5458 if (status)
5459 goto err;
5460
eb7dd46c 5461 be_schedule_err_detection(adapter);
cf588477
SP
5462 return;
5463err:
5464 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5465}
5466
ace40aff
VV
5467static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5468{
5469 struct be_adapter *adapter = pci_get_drvdata(pdev);
5470 u16 num_vf_qs;
5471 int status;
5472
5473 if (!num_vfs)
5474 be_vf_clear(adapter);
5475
5476 adapter->num_vfs = num_vfs;
5477
5478 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5479 dev_warn(&pdev->dev,
5480 "Cannot disable VFs while they are assigned\n");
5481 return -EBUSY;
5482 }
5483
5484 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5485 * are equally distributed across the max-number of VFs. The user may
5486 * request only a subset of the max-vfs to be enabled.
5487 * Based on num_vfs, redistribute the resources across num_vfs so that
5488 * each VF will have access to more number of resources.
5489 * This facility is not available in BE3 FW.
5490 * Also, this is done by FW in Lancer chip.
5491 */
5492 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5493 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5494 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5495 adapter->num_vfs, num_vf_qs);
5496 if (status)
5497 dev_err(&pdev->dev,
5498 "Failed to optimize SR-IOV resources\n");
5499 }
5500
5501 status = be_get_resources(adapter);
5502 if (status)
5503 return be_cmd_status(status);
5504
5505 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5506 rtnl_lock();
5507 status = be_update_queues(adapter);
5508 rtnl_unlock();
5509 if (status)
5510 return be_cmd_status(status);
5511
5512 if (adapter->num_vfs)
5513 status = be_vf_setup(adapter);
5514
5515 if (!status)
5516 return adapter->num_vfs;
5517
5518 return 0;
5519}
5520
3646f0e5 5521static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5522 .error_detected = be_eeh_err_detected,
5523 .slot_reset = be_eeh_reset,
5524 .resume = be_eeh_resume,
5525};
5526
6b7c5b94
SP
5527static struct pci_driver be_driver = {
5528 .name = DRV_NAME,
5529 .id_table = be_dev_ids,
5530 .probe = be_probe,
5531 .remove = be_remove,
5532 .suspend = be_suspend,
484d76fd 5533 .resume = be_pci_resume,
82456b03 5534 .shutdown = be_shutdown,
ace40aff 5535 .sriov_configure = be_pci_sriov_configure,
cf588477 5536 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5537};
5538
5539static int __init be_init_module(void)
5540{
8e95a202
JP
5541 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5542 rx_frag_size != 2048) {
6b7c5b94
SP
5543 printk(KERN_WARNING DRV_NAME
5544 " : Module param rx_frag_size must be 2048/4096/8192."
5545 " Using 2048\n");
5546 rx_frag_size = 2048;
5547 }
6b7c5b94 5548
ace40aff
VV
5549 if (num_vfs > 0) {
5550 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5551 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5552 }
5553
6b7c5b94
SP
5554 return pci_register_driver(&be_driver);
5555}
5556module_init(be_init_module);
5557
5558static void __exit be_exit_module(void)
5559{
5560 pci_unregister_driver(&be_driver);
5561}
5562module_exit(be_exit_module);
This page took 1.193852 seconds and 5 git commands to generate.