MODULE_DEVICE_TABLE: fix some callsites
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
9baa3c34 41static const struct pci_device_id be_dev_ids[] = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
6bdf8f55
VV
83 "ERX2 ",
84 "SPARE ",
85 "JTAG ",
86 "MPU_INTPEND "
7c185276 87};
e2fb1afa 88
7c185276 89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94
SP
124
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 128
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
03d28ffe 190
6b7c5b94
SP
191 val |= qid & DB_RQ_RING_ID_MASK;
192 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
196}
197
94d73aaa
VV
198static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
199 u16 posted)
6b7c5b94
SP
200{
201 u32 val = 0;
03d28ffe 202
94d73aaa 203 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
205
206 wmb();
94d73aaa 207 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
208}
209
8788fdc2 210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 211 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
212{
213 u32 val = 0;
03d28ffe 214
6b7c5b94 215 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 217
f67ef7ba 218 if (adapter->eeh_error)
cf588477
SP
219 return;
220
6b7c5b94
SP
221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
228}
229
8788fdc2 230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
231{
232 u32 val = 0;
03d28ffe 233
6b7c5b94 234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 250 struct device *dev = &adapter->pdev->dev;
6b7c5b94 251 struct sockaddr *addr = p;
5a712c13
SP
252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
ff32f8ab
VV
259 /* Proceed further only if, User provided MAC is different
260 * from active MAC
261 */
262 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
263 return 0;
264
5a712c13
SP
265 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
266 * privilege or if PF did not provision the new MAC address.
267 * On BE3, this cmd will always fail if the VF doesn't have the
268 * FILTMGMT privilege. This failure is OK, only if the PF programmed
269 * the MAC for the VF.
704e4c88 270 */
5a712c13
SP
271 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
272 adapter->if_handle, &adapter->pmac_id[0], 0);
273 if (!status) {
274 curr_pmac_id = adapter->pmac_id[0];
275
276 /* Delete the old programmed MAC. This call may fail if the
277 * old MAC was already deleted by the PF driver.
278 */
279 if (adapter->pmac_id[0] != old_pmac_id)
280 be_cmd_pmac_del(adapter, adapter->if_handle,
281 old_pmac_id, 0);
704e4c88
PR
282 }
283
5a712c13
SP
284 /* Decide if the new MAC is successfully activated only after
285 * querying the FW
704e4c88 286 */
b188f090
SR
287 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
288 adapter->if_handle, true, 0);
a65027e4 289 if (status)
e3a7ae2c 290 goto err;
6b7c5b94 291
5a712c13
SP
292 /* The MAC change did not happen, either due to lack of privilege
293 * or PF didn't pre-provision.
294 */
61d23e9f 295 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
296 status = -EPERM;
297 goto err;
298 }
299
e3a7ae2c 300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 301 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
302 return 0;
303err:
5a712c13 304 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
305 return status;
306}
307
ca34fe38
SP
308/* BE2 supports only v0 cmd */
309static void *hw_stats_from_cmd(struct be_adapter *adapter)
310{
311 if (BE2_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
61000861 315 } else if (BE3_chip(adapter)) {
ca34fe38
SP
316 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
317
61000861
AK
318 return &cmd->hw_stats;
319 } else {
320 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
321
ca34fe38
SP
322 return &cmd->hw_stats;
323 }
324}
325
326/* BE2 supports only v0 cmd */
327static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
328{
329 if (BE2_chip(adapter)) {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
61000861 333 } else if (BE3_chip(adapter)) {
ca34fe38
SP
334 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
335
61000861
AK
336 return &hw_stats->erx;
337 } else {
338 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
339
ca34fe38
SP
340 return &hw_stats->erx;
341 }
342}
343
344static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 345{
ac124ff9
SP
346 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
348 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 349 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
350 &rxf_stats->port[adapter->port_num];
351 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 352
ac124ff9 353 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
354 drvs->rx_pause_frames = port_stats->rx_pause_frames;
355 drvs->rx_crc_errors = port_stats->rx_crc_errors;
356 drvs->rx_control_frames = port_stats->rx_control_frames;
357 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
358 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
359 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
360 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
361 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
362 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
363 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
364 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
365 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
366 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
367 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 368 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
369 drvs->rx_dropped_header_too_small =
370 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
371 drvs->rx_address_filtered =
372 port_stats->rx_address_filtered +
373 port_stats->rx_vlan_filtered;
89a88ab8
AK
374 drvs->rx_alignment_symbol_errors =
375 port_stats->rx_alignment_symbol_errors;
376
377 drvs->tx_pauseframes = port_stats->tx_pauseframes;
378 drvs->tx_controlframes = port_stats->tx_controlframes;
379
380 if (adapter->port_num)
ac124ff9 381 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 382 else
ac124ff9 383 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 384 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 385 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
386 drvs->forwarded_packets = rxf_stats->forwarded_packets;
387 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
388 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
389 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
390 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
391}
392
ca34fe38 393static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 394{
ac124ff9
SP
395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
397 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 398 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
399 &rxf_stats->port[adapter->port_num];
400 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 401
ac124ff9 402 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
403 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
404 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
405 drvs->rx_pause_frames = port_stats->rx_pause_frames;
406 drvs->rx_crc_errors = port_stats->rx_crc_errors;
407 drvs->rx_control_frames = port_stats->rx_control_frames;
408 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
409 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
410 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
411 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
412 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
413 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
414 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
415 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
416 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
417 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
418 drvs->rx_dropped_header_too_small =
419 port_stats->rx_dropped_header_too_small;
420 drvs->rx_input_fifo_overflow_drop =
421 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 422 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
423 drvs->rx_alignment_symbol_errors =
424 port_stats->rx_alignment_symbol_errors;
ac124ff9 425 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
426 drvs->tx_pauseframes = port_stats->tx_pauseframes;
427 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 428 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
429 drvs->jabber_events = port_stats->jabber_events;
430 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 431 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
432 drvs->forwarded_packets = rxf_stats->forwarded_packets;
433 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
434 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
435 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
436 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
437}
438
61000861
AK
439static void populate_be_v2_stats(struct be_adapter *adapter)
440{
441 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
442 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
443 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
444 struct be_port_rxf_stats_v2 *port_stats =
445 &rxf_stats->port[adapter->port_num];
446 struct be_drv_stats *drvs = &adapter->drv_stats;
447
448 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
449 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
450 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
451 drvs->rx_pause_frames = port_stats->rx_pause_frames;
452 drvs->rx_crc_errors = port_stats->rx_crc_errors;
453 drvs->rx_control_frames = port_stats->rx_control_frames;
454 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
455 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
456 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
457 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
458 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
459 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
460 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
461 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
462 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
463 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
464 drvs->rx_dropped_header_too_small =
465 port_stats->rx_dropped_header_too_small;
466 drvs->rx_input_fifo_overflow_drop =
467 port_stats->rx_input_fifo_overflow_drop;
468 drvs->rx_address_filtered = port_stats->rx_address_filtered;
469 drvs->rx_alignment_symbol_errors =
470 port_stats->rx_alignment_symbol_errors;
471 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
472 drvs->tx_pauseframes = port_stats->tx_pauseframes;
473 drvs->tx_controlframes = port_stats->tx_controlframes;
474 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
475 drvs->jabber_events = port_stats->jabber_events;
476 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
477 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
478 drvs->forwarded_packets = rxf_stats->forwarded_packets;
479 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
480 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
481 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
482 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 483 if (be_roce_supported(adapter)) {
461ae379
AK
484 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
485 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
486 drvs->rx_roce_frames = port_stats->roce_frames_received;
487 drvs->roce_drops_crc = port_stats->roce_drops_crc;
488 drvs->roce_drops_payload_len =
489 port_stats->roce_drops_payload_len;
490 }
61000861
AK
491}
492
005d5696
SX
493static void populate_lancer_stats(struct be_adapter *adapter)
494{
005d5696 495 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 496 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
497
498 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
499 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
500 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
501 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 502 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 503 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
504 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
505 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
506 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
507 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
508 drvs->rx_dropped_tcp_length =
509 pport_stats->rx_dropped_invalid_tcp_length;
510 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
511 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
512 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
513 drvs->rx_dropped_header_too_small =
514 pport_stats->rx_dropped_header_too_small;
515 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
516 drvs->rx_address_filtered =
517 pport_stats->rx_address_filtered +
518 pport_stats->rx_vlan_filtered;
ac124ff9 519 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 520 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
521 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
522 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 523 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
524 drvs->forwarded_packets = pport_stats->num_forwards_lo;
525 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 526 drvs->rx_drops_too_many_frags =
ac124ff9 527 pport_stats->rx_drops_too_many_frags_lo;
005d5696 528}
89a88ab8 529
09c1c68f
SP
530static void accumulate_16bit_val(u32 *acc, u16 val)
531{
532#define lo(x) (x & 0xFFFF)
533#define hi(x) (x & 0xFFFF0000)
534 bool wrapped = val < lo(*acc);
535 u32 newacc = hi(*acc) + val;
536
537 if (wrapped)
538 newacc += 65536;
539 ACCESS_ONCE(*acc) = newacc;
540}
541
4188e7df 542static void populate_erx_stats(struct be_adapter *adapter,
748b539a 543 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
544{
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547 else
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
550 */
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552 (u16)erx_stat);
553}
554
89a88ab8
AK
555void be_parse_stats(struct be_adapter *adapter)
556{
61000861 557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
558 struct be_rx_obj *rxo;
559 int i;
a6c578ef 560 u32 erx_stat;
ac124ff9 561
ca34fe38
SP
562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
005d5696 564 } else {
ca34fe38
SP
565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
61000861
AK
567 else if (BE3_chip(adapter))
568 /* for BE3 */
ca34fe38 569 populate_be_v1_stats(adapter);
61000861
AK
570 else
571 populate_be_v2_stats(adapter);
d51ebd33 572
61000861 573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 574 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 577 }
09c1c68f 578 }
89a88ab8
AK
579}
580
ab1594e9 581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 582 struct rtnl_link_stats64 *stats)
6b7c5b94 583{
ab1594e9 584 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 585 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 586 struct be_rx_obj *rxo;
3c8def97 587 struct be_tx_obj *txo;
ab1594e9
SP
588 u64 pkts, bytes;
589 unsigned int start;
3abcdeda 590 int i;
6b7c5b94 591
3abcdeda 592 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 594
ab1594e9 595 do {
57a7744e 596 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
597 pkts = rx_stats(rxo)->rx_pkts;
598 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 599 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
600 stats->rx_packets += pkts;
601 stats->rx_bytes += bytes;
602 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
603 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
604 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
605 }
606
3c8def97 607 for_all_tx_queues(adapter, txo, i) {
ab1594e9 608 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
612 pkts = tx_stats(txo)->tx_pkts;
613 bytes = tx_stats(txo)->tx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
615 stats->tx_packets += pkts;
616 stats->tx_bytes += bytes;
3c8def97 617 }
6b7c5b94
SP
618
619 /* bad pkts received */
ab1594e9 620 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
621 drvs->rx_alignment_symbol_errors +
622 drvs->rx_in_range_errors +
623 drvs->rx_out_range_errors +
624 drvs->rx_frame_too_long +
625 drvs->rx_dropped_too_small +
626 drvs->rx_dropped_too_short +
627 drvs->rx_dropped_header_too_small +
628 drvs->rx_dropped_tcp_length +
ab1594e9 629 drvs->rx_dropped_runt;
68110868 630
6b7c5b94 631 /* detailed rx errors */
ab1594e9 632 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
633 drvs->rx_out_range_errors +
634 drvs->rx_frame_too_long;
68110868 635
ab1594e9 636 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
637
638 /* frame alignment errors */
ab1594e9 639 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 640
6b7c5b94
SP
641 /* receiver fifo overrun */
642 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 643 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
644 drvs->rx_input_fifo_overflow_drop +
645 drvs->rx_drops_no_pbuf;
ab1594e9 646 return stats;
6b7c5b94
SP
647}
648
b236916a 649void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 650{
6b7c5b94
SP
651 struct net_device *netdev = adapter->netdev;
652
b236916a 653 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 654 netif_carrier_off(netdev);
b236916a 655 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 656 }
b236916a 657
bdce2ad7 658 if (link_status)
b236916a
AK
659 netif_carrier_on(netdev);
660 else
661 netif_carrier_off(netdev);
6b7c5b94
SP
662}
663
5f07b3c5 664static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 665{
3c8def97
SP
666 struct be_tx_stats *stats = tx_stats(txo);
667
ab1594e9 668 u64_stats_update_begin(&stats->sync);
ac124ff9 669 stats->tx_reqs++;
5f07b3c5
SP
670 stats->tx_bytes += skb->len;
671 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
5f07b3c5
SP
675/* Returns number of WRBs needed for the skb */
676static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 677{
5f07b3c5
SP
678 /* +1 for the header wrb */
679 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
680}
681
682static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
683{
f986afcb
SP
684 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
685 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
686 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
687 wrb->rsvd0 = 0;
688}
689
690/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
691 * to avoid the swap and shift/mask operations in wrb_fill().
692 */
693static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
694{
695 wrb->frag_pa_hi = 0;
696 wrb->frag_pa_lo = 0;
697 wrb->frag_len = 0;
89b1f496 698 wrb->rsvd0 = 0;
6b7c5b94
SP
699}
700
1ded132d 701static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 702 struct sk_buff *skb)
1ded132d
AK
703{
704 u8 vlan_prio;
705 u16 vlan_tag;
706
df8a39de 707 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
708 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
709 /* If vlan priority provided by OS is NOT in available bmap */
710 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
711 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
712 adapter->recommended_prio;
713
714 return vlan_tag;
715}
716
c9c47142
SP
717/* Used only for IP tunnel packets */
718static u16 skb_inner_ip_proto(struct sk_buff *skb)
719{
720 return (inner_ip_hdr(skb)->version == 4) ?
721 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
722}
723
724static u16 skb_ip_proto(struct sk_buff *skb)
725{
726 return (ip_hdr(skb)->version == 4) ?
727 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
728}
729
cc4ce020 730static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
731 struct sk_buff *skb, u32 wrb_cnt, u32 len,
732 bool skip_hw_vlan)
6b7c5b94 733{
c9c47142 734 u16 vlan_tag, proto;
cc4ce020 735
6b7c5b94
SP
736 memset(hdr, 0, sizeof(*hdr));
737
c3c18bc1 738 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 739
49e4b847 740 if (skb_is_gso(skb)) {
c3c18bc1
SP
741 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
742 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 743 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 744 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 745 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 746 if (skb->encapsulation) {
c3c18bc1 747 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
748 proto = skb_inner_ip_proto(skb);
749 } else {
750 proto = skb_ip_proto(skb);
751 }
752 if (proto == IPPROTO_TCP)
c3c18bc1 753 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 754 else if (proto == IPPROTO_UDP)
c3c18bc1 755 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
756 }
757
df8a39de 758 if (skb_vlan_tag_present(skb)) {
c3c18bc1 759 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 760 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 761 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
762 }
763
c3c18bc1
SP
764 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
765 SET_TX_WRB_HDR_BITS(len, hdr, len);
5f07b3c5
SP
766
767 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
768 * When this hack is not needed, the evt bit is set while ringing DB
769 */
770 if (skip_hw_vlan)
771 SET_TX_WRB_HDR_BITS(event, hdr, 1);
6b7c5b94
SP
772}
773
2b7bcebf 774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 775 bool unmap_single)
7101e111
SP
776{
777 dma_addr_t dma;
f986afcb 778 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 779
7101e111 780
f986afcb
SP
781 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
782 (u64)le32_to_cpu(wrb->frag_pa_lo);
783 if (frag_len) {
7101e111 784 if (unmap_single)
f986afcb 785 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 786 else
f986afcb 787 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
788 }
789}
6b7c5b94 790
5f07b3c5
SP
791/* Returns the number of WRBs used up by the skb */
792static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
793 struct sk_buff *skb, bool skip_hw_vlan)
6b7c5b94 794{
5f07b3c5 795 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 796 struct device *dev = &adapter->pdev->dev;
5f07b3c5 797 struct be_queue_info *txq = &txo->q;
6b7c5b94 798 struct be_eth_hdr_wrb *hdr;
7101e111 799 bool map_single = false;
5f07b3c5
SP
800 struct be_eth_wrb *wrb;
801 dma_addr_t busaddr;
802 u16 head = txq->head;
6b7c5b94 803
6b7c5b94 804 hdr = queue_head_node(txq);
5f07b3c5
SP
805 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
806 be_dws_cpu_to_le(hdr, sizeof(*hdr));
807
6b7c5b94
SP
808 queue_head_inc(txq);
809
ebc8d2ab 810 if (skb->len > skb->data_len) {
e743d313 811 int len = skb_headlen(skb);
03d28ffe 812
2b7bcebf
IV
813 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
814 if (dma_mapping_error(dev, busaddr))
7101e111
SP
815 goto dma_err;
816 map_single = true;
ebc8d2ab
DM
817 wrb = queue_head_node(txq);
818 wrb_fill(wrb, busaddr, len);
ebc8d2ab
DM
819 queue_head_inc(txq);
820 copied += len;
821 }
6b7c5b94 822
ebc8d2ab 823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 824 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
03d28ffe 825
b061b39e 826 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 827 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 828 if (dma_mapping_error(dev, busaddr))
7101e111 829 goto dma_err;
ebc8d2ab 830 wrb = queue_head_node(txq);
9e903e08 831 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab 832 queue_head_inc(txq);
9e903e08 833 copied += skb_frag_size(frag);
6b7c5b94
SP
834 }
835
5f07b3c5
SP
836 BUG_ON(txo->sent_skb_list[head]);
837 txo->sent_skb_list[head] = skb;
838 txo->last_req_hdr = head;
839 atomic_add(wrb_cnt, &txq->used);
840 txo->last_req_wrb_cnt = wrb_cnt;
841 txo->pend_wrb_cnt += wrb_cnt;
6b7c5b94 842
5f07b3c5
SP
843 be_tx_stats_update(txo, skb);
844 return wrb_cnt;
6b7c5b94 845
7101e111 846dma_err:
5f07b3c5
SP
847 /* Bring the queue back to the state it was in before this
848 * routine was invoked.
849 */
850 txq->head = head;
851 /* skip the first wrb (hdr); it's not mapped */
852 queue_head_inc(txq);
7101e111
SP
853 while (copied) {
854 wrb = queue_head_node(txq);
2b7bcebf 855 unmap_tx_frag(dev, wrb, map_single);
7101e111 856 map_single = false;
f986afcb 857 copied -= le32_to_cpu(wrb->frag_len);
d3de1540 858 adapter->drv_stats.dma_map_errors++;
7101e111
SP
859 queue_head_inc(txq);
860 }
5f07b3c5 861 txq->head = head;
7101e111 862 return 0;
6b7c5b94
SP
863}
864
f7062ee5
SP
865static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
866{
867 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
868}
869
93040ae5 870static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
871 struct sk_buff *skb,
872 bool *skip_hw_vlan)
93040ae5
SK
873{
874 u16 vlan_tag = 0;
875
876 skb = skb_share_check(skb, GFP_ATOMIC);
877 if (unlikely(!skb))
878 return skb;
879
df8a39de 880 if (skb_vlan_tag_present(skb))
93040ae5 881 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
882
883 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
884 if (!vlan_tag)
885 vlan_tag = adapter->pvid;
886 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
887 * skip VLAN insertion
888 */
889 if (skip_hw_vlan)
890 *skip_hw_vlan = true;
891 }
bc0c3405
AK
892
893 if (vlan_tag) {
62749e2c
JP
894 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
895 vlan_tag);
bc0c3405
AK
896 if (unlikely(!skb))
897 return skb;
bc0c3405
AK
898 skb->vlan_tci = 0;
899 }
900
901 /* Insert the outer VLAN, if any */
902 if (adapter->qnq_vid) {
903 vlan_tag = adapter->qnq_vid;
62749e2c
JP
904 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
905 vlan_tag);
bc0c3405
AK
906 if (unlikely(!skb))
907 return skb;
908 if (skip_hw_vlan)
909 *skip_hw_vlan = true;
910 }
911
93040ae5
SK
912 return skb;
913}
914
bc0c3405
AK
915static bool be_ipv6_exthdr_check(struct sk_buff *skb)
916{
917 struct ethhdr *eh = (struct ethhdr *)skb->data;
918 u16 offset = ETH_HLEN;
919
920 if (eh->h_proto == htons(ETH_P_IPV6)) {
921 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
922
923 offset += sizeof(struct ipv6hdr);
924 if (ip6h->nexthdr != NEXTHDR_TCP &&
925 ip6h->nexthdr != NEXTHDR_UDP) {
926 struct ipv6_opt_hdr *ehdr =
504fbf1e 927 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
928
929 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
930 if (ehdr->hdrlen == 0xff)
931 return true;
932 }
933 }
934 return false;
935}
936
937static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
938{
df8a39de 939 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
940}
941
748b539a 942static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 943{
ee9c799c 944 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
945}
946
ec495fac
VV
947static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
948 struct sk_buff *skb,
949 bool *skip_hw_vlan)
6b7c5b94 950{
d2cb6ce7 951 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
952 unsigned int eth_hdr_len;
953 struct iphdr *ip;
93040ae5 954
1297f9db
AK
955 /* For padded packets, BE HW modifies tot_len field in IP header
956 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 957 * For padded packets, Lancer computes incorrect checksum.
1ded132d 958 */
ee9c799c
SP
959 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
960 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 961 if (skb->len <= 60 &&
df8a39de 962 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 963 is_ipv4_pkt(skb)) {
93040ae5
SK
964 ip = (struct iphdr *)ip_hdr(skb);
965 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
966 }
1ded132d 967
d2cb6ce7 968 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 969 * tagging in pvid-tagging mode
d2cb6ce7 970 */
f93f160b 971 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 972 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 973 *skip_hw_vlan = true;
d2cb6ce7 974
93040ae5
SK
975 /* HW has a bug wherein it will calculate CSUM for VLAN
976 * pkts even though it is disabled.
977 * Manually insert VLAN in pkt.
978 */
979 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 980 skb_vlan_tag_present(skb)) {
ee9c799c 981 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 982 if (unlikely(!skb))
c9128951 983 goto err;
bc0c3405
AK
984 }
985
986 /* HW may lockup when VLAN HW tagging is requested on
987 * certain ipv6 packets. Drop such pkts if the HW workaround to
988 * skip HW tagging is not enabled by FW.
989 */
990 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
991 (adapter->pvid || adapter->qnq_vid) &&
992 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
993 goto tx_drop;
994
995 /* Manual VLAN tag insertion to prevent:
996 * ASIC lockup when the ASIC inserts VLAN tag into
997 * certain ipv6 packets. Insert VLAN tags in driver,
998 * and set event, completion, vlan bits accordingly
999 * in the Tx WRB.
1000 */
1001 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1002 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 1003 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 1004 if (unlikely(!skb))
c9128951 1005 goto err;
1ded132d
AK
1006 }
1007
ee9c799c
SP
1008 return skb;
1009tx_drop:
1010 dev_kfree_skb_any(skb);
c9128951 1011err:
ee9c799c
SP
1012 return NULL;
1013}
1014
ec495fac
VV
1015static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1016 struct sk_buff *skb,
1017 bool *skip_hw_vlan)
1018{
1019 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1020 * less may cause a transmit stall on that port. So the work-around is
1021 * to pad short packets (<= 32 bytes) to a 36-byte length.
1022 */
1023 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1024 if (skb_put_padto(skb, 36))
ec495fac 1025 return NULL;
ec495fac
VV
1026 }
1027
1028 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1029 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1030 if (!skb)
1031 return NULL;
1032 }
1033
1034 return skb;
1035}
1036
5f07b3c5
SP
1037static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1038{
1039 struct be_queue_info *txq = &txo->q;
1040 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1041
1042 /* Mark the last request eventable if it hasn't been marked already */
1043 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1044 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1045
1046 /* compose a dummy wrb if there are odd set of wrbs to notify */
1047 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1048 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1049 queue_head_inc(txq);
1050 atomic_inc(&txq->used);
1051 txo->pend_wrb_cnt++;
1052 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1053 TX_HDR_WRB_NUM_SHIFT);
1054 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1055 TX_HDR_WRB_NUM_SHIFT);
1056 }
1057 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1058 txo->pend_wrb_cnt = 0;
1059}
1060
ee9c799c
SP
1061static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1062{
5f07b3c5 1063 bool skip_hw_vlan = false, flush = !skb->xmit_more;
ee9c799c 1064 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1065 u16 q_idx = skb_get_queue_mapping(skb);
1066 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
ee9c799c 1067 struct be_queue_info *txq = &txo->q;
5f07b3c5 1068 u16 wrb_cnt;
ee9c799c
SP
1069
1070 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
5f07b3c5
SP
1071 if (unlikely(!skb))
1072 goto drop;
6b7c5b94 1073
5f07b3c5
SP
1074 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1075 if (unlikely(!wrb_cnt)) {
1076 dev_kfree_skb_any(skb);
1077 goto drop;
1078 }
cd8f76c0 1079
5f07b3c5
SP
1080 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1081 netif_stop_subqueue(netdev, q_idx);
1082 tx_stats(txo)->tx_stops++;
1083 }
c190e3c8 1084
5f07b3c5
SP
1085 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1086 be_xmit_flush(adapter, txo);
6b7c5b94 1087
5f07b3c5
SP
1088 return NETDEV_TX_OK;
1089drop:
1090 tx_stats(txo)->tx_drv_drops++;
1091 /* Flush the already enqueued tx requests */
1092 if (flush && txo->pend_wrb_cnt)
1093 be_xmit_flush(adapter, txo);
6b7c5b94 1094
6b7c5b94
SP
1095 return NETDEV_TX_OK;
1096}
1097
1098static int be_change_mtu(struct net_device *netdev, int new_mtu)
1099{
1100 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1101 struct device *dev = &adapter->pdev->dev;
1102
1103 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1104 dev_info(dev, "MTU must be between %d and %d bytes\n",
1105 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1106 return -EINVAL;
1107 }
0d3f5cce
KA
1108
1109 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1110 netdev->mtu, new_mtu);
6b7c5b94
SP
1111 netdev->mtu = new_mtu;
1112 return 0;
1113}
1114
f66b7cfd
SP
1115static inline bool be_in_all_promisc(struct be_adapter *adapter)
1116{
1117 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1118 BE_IF_FLAGS_ALL_PROMISCUOUS;
1119}
1120
1121static int be_set_vlan_promisc(struct be_adapter *adapter)
1122{
1123 struct device *dev = &adapter->pdev->dev;
1124 int status;
1125
1126 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1127 return 0;
1128
1129 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1130 if (!status) {
1131 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1132 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1133 } else {
1134 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1135 }
1136 return status;
1137}
1138
1139static int be_clear_vlan_promisc(struct be_adapter *adapter)
1140{
1141 struct device *dev = &adapter->pdev->dev;
1142 int status;
1143
1144 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1145 if (!status) {
1146 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1147 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1148 }
1149 return status;
1150}
1151
6b7c5b94 1152/*
82903e4b
AK
1153 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1154 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1155 */
10329df8 1156static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1157{
50762667 1158 struct device *dev = &adapter->pdev->dev;
10329df8 1159 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1160 u16 num = 0, i = 0;
82903e4b 1161 int status = 0;
1da87b7f 1162
c0e64ef4 1163 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1164 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1165 return 0;
1166
92bf14ab 1167 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1168 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1169
1170 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1172 vids[num++] = cpu_to_le16(i);
0fc16ebf 1173
4d567d97 1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1175 if (status) {
f66b7cfd 1176 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1177 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1178 if (addl_status(status) ==
1179 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1180 return be_set_vlan_promisc(adapter);
1181 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1182 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1183 }
0fc16ebf 1184 return status;
6b7c5b94
SP
1185}
1186
80d5c368 1187static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1188{
1189 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1190 int status = 0;
6b7c5b94 1191
a85e9986
PR
1192 /* Packets with VID 0 are always received by Lancer by default */
1193 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1194 return status;
1195
f6cbd364 1196 if (test_bit(vid, adapter->vids))
48291c22 1197 return status;
a85e9986 1198
f6cbd364 1199 set_bit(vid, adapter->vids);
a6b74e01 1200 adapter->vlans_added++;
8e586137 1201
a6b74e01
SK
1202 status = be_vid_config(adapter);
1203 if (status) {
1204 adapter->vlans_added--;
f6cbd364 1205 clear_bit(vid, adapter->vids);
a6b74e01 1206 }
48291c22 1207
80817cbf 1208 return status;
6b7c5b94
SP
1209}
1210
80d5c368 1211static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1212{
1213 struct be_adapter *adapter = netdev_priv(netdev);
1214
a85e9986
PR
1215 /* Packets with VID 0 are always received by Lancer by default */
1216 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1217 return 0;
a85e9986 1218
f6cbd364 1219 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1220 adapter->vlans_added--;
1221
1222 return be_vid_config(adapter);
6b7c5b94
SP
1223}
1224
f66b7cfd 1225static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1226{
ac34b743 1227 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1228 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1229}
1230
f66b7cfd
SP
1231static void be_set_all_promisc(struct be_adapter *adapter)
1232{
1233 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1234 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1235}
1236
1237static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1238{
0fc16ebf 1239 int status;
6b7c5b94 1240
f66b7cfd
SP
1241 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1242 return;
6b7c5b94 1243
f66b7cfd
SP
1244 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1245 if (!status)
1246 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1247}
1248
1249static void be_set_mc_list(struct be_adapter *adapter)
1250{
1251 int status;
1252
1253 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1254 if (!status)
1255 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1256 else
1257 be_set_mc_promisc(adapter);
1258}
1259
1260static void be_set_uc_list(struct be_adapter *adapter)
1261{
1262 struct netdev_hw_addr *ha;
1263 int i = 1; /* First slot is claimed by the Primary MAC */
1264
1265 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1266 be_cmd_pmac_del(adapter, adapter->if_handle,
1267 adapter->pmac_id[i], 0);
1268
1269 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1270 be_set_all_promisc(adapter);
1271 return;
6b7c5b94
SP
1272 }
1273
f66b7cfd
SP
1274 netdev_for_each_uc_addr(ha, adapter->netdev) {
1275 adapter->uc_macs++; /* First slot is for Primary MAC */
1276 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1277 &adapter->pmac_id[adapter->uc_macs], 0);
1278 }
1279}
6b7c5b94 1280
f66b7cfd
SP
1281static void be_clear_uc_list(struct be_adapter *adapter)
1282{
1283 int i;
fbc13f01 1284
f66b7cfd
SP
1285 for (i = 1; i < (adapter->uc_macs + 1); i++)
1286 be_cmd_pmac_del(adapter, adapter->if_handle,
1287 adapter->pmac_id[i], 0);
1288 adapter->uc_macs = 0;
1289}
fbc13f01 1290
f66b7cfd
SP
1291static void be_set_rx_mode(struct net_device *netdev)
1292{
1293 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1294
f66b7cfd
SP
1295 if (netdev->flags & IFF_PROMISC) {
1296 be_set_all_promisc(adapter);
1297 return;
fbc13f01
AK
1298 }
1299
f66b7cfd
SP
1300 /* Interface was previously in promiscuous mode; disable it */
1301 if (be_in_all_promisc(adapter)) {
1302 be_clear_all_promisc(adapter);
1303 if (adapter->vlans_added)
1304 be_vid_config(adapter);
0fc16ebf 1305 }
a0794885 1306
f66b7cfd
SP
1307 /* Enable multicast promisc if num configured exceeds what we support */
1308 if (netdev->flags & IFF_ALLMULTI ||
1309 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1310 be_set_mc_promisc(adapter);
a0794885 1311 return;
f66b7cfd 1312 }
a0794885 1313
f66b7cfd
SP
1314 if (netdev_uc_count(netdev) != adapter->uc_macs)
1315 be_set_uc_list(adapter);
1316
1317 be_set_mc_list(adapter);
6b7c5b94
SP
1318}
1319
ba343c77
SB
1320static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1321{
1322 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1323 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1324 int status;
1325
11ac75ed 1326 if (!sriov_enabled(adapter))
ba343c77
SB
1327 return -EPERM;
1328
11ac75ed 1329 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1330 return -EINVAL;
1331
3c31aaf3
VV
1332 /* Proceed further only if user provided MAC is different
1333 * from active MAC
1334 */
1335 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1336 return 0;
1337
3175d8c2
SP
1338 if (BEx_chip(adapter)) {
1339 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1340 vf + 1);
ba343c77 1341
11ac75ed
SP
1342 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1343 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1344 } else {
1345 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1346 vf + 1);
590c391d
PR
1347 }
1348
abccf23e
KA
1349 if (status) {
1350 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1351 mac, vf, status);
1352 return be_cmd_status(status);
1353 }
64600ea5 1354
abccf23e
KA
1355 ether_addr_copy(vf_cfg->mac_addr, mac);
1356
1357 return 0;
ba343c77
SB
1358}
1359
64600ea5 1360static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1361 struct ifla_vf_info *vi)
64600ea5
AK
1362{
1363 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1364 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1365
11ac75ed 1366 if (!sriov_enabled(adapter))
64600ea5
AK
1367 return -EPERM;
1368
11ac75ed 1369 if (vf >= adapter->num_vfs)
64600ea5
AK
1370 return -EINVAL;
1371
1372 vi->vf = vf;
ed616689
SC
1373 vi->max_tx_rate = vf_cfg->tx_rate;
1374 vi->min_tx_rate = 0;
a60b3a13
AK
1375 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1376 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1377 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1378 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1379
1380 return 0;
1381}
1382
748b539a 1383static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1384{
1385 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1386 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1387 int status = 0;
1388
11ac75ed 1389 if (!sriov_enabled(adapter))
1da87b7f
AK
1390 return -EPERM;
1391
b9fc0e53 1392 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1393 return -EINVAL;
1394
b9fc0e53
AK
1395 if (vlan || qos) {
1396 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1397 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1398 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1399 vf_cfg->if_handle, 0);
1da87b7f 1400 } else {
f1f3ee1b 1401 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1402 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1403 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1404 }
1405
abccf23e
KA
1406 if (status) {
1407 dev_err(&adapter->pdev->dev,
1408 "VLAN %d config on VF %d failed : %#x\n", vlan,
1409 vf, status);
1410 return be_cmd_status(status);
1411 }
1412
1413 vf_cfg->vlan_tag = vlan;
1414
1415 return 0;
1da87b7f
AK
1416}
1417
ed616689
SC
1418static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1419 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1420{
1421 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1422 struct device *dev = &adapter->pdev->dev;
1423 int percent_rate, status = 0;
1424 u16 link_speed = 0;
1425 u8 link_status;
e1d18735 1426
11ac75ed 1427 if (!sriov_enabled(adapter))
e1d18735
AK
1428 return -EPERM;
1429
94f434c2 1430 if (vf >= adapter->num_vfs)
e1d18735
AK
1431 return -EINVAL;
1432
ed616689
SC
1433 if (min_tx_rate)
1434 return -EINVAL;
1435
0f77ba73
RN
1436 if (!max_tx_rate)
1437 goto config_qos;
1438
1439 status = be_cmd_link_status_query(adapter, &link_speed,
1440 &link_status, 0);
1441 if (status)
1442 goto err;
1443
1444 if (!link_status) {
1445 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1446 status = -ENETDOWN;
0f77ba73
RN
1447 goto err;
1448 }
1449
1450 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1451 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1452 link_speed);
1453 status = -EINVAL;
1454 goto err;
1455 }
1456
1457 /* On Skyhawk the QOS setting must be done only as a % value */
1458 percent_rate = link_speed / 100;
1459 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1460 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1461 percent_rate);
1462 status = -EINVAL;
1463 goto err;
94f434c2 1464 }
e1d18735 1465
0f77ba73
RN
1466config_qos:
1467 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1468 if (status)
0f77ba73
RN
1469 goto err;
1470
1471 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1472 return 0;
1473
1474err:
1475 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1476 max_tx_rate, vf);
abccf23e 1477 return be_cmd_status(status);
e1d18735 1478}
e2fb1afa 1479
bdce2ad7
SR
1480static int be_set_vf_link_state(struct net_device *netdev, int vf,
1481 int link_state)
1482{
1483 struct be_adapter *adapter = netdev_priv(netdev);
1484 int status;
1485
1486 if (!sriov_enabled(adapter))
1487 return -EPERM;
1488
1489 if (vf >= adapter->num_vfs)
1490 return -EINVAL;
1491
1492 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1493 if (status) {
1494 dev_err(&adapter->pdev->dev,
1495 "Link state change on VF %d failed: %#x\n", vf, status);
1496 return be_cmd_status(status);
1497 }
bdce2ad7 1498
abccf23e
KA
1499 adapter->vf_cfg[vf].plink_tracking = link_state;
1500
1501 return 0;
bdce2ad7 1502}
e1d18735 1503
2632bafd
SP
1504static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1505 ulong now)
6b7c5b94 1506{
2632bafd
SP
1507 aic->rx_pkts_prev = rx_pkts;
1508 aic->tx_reqs_prev = tx_pkts;
1509 aic->jiffies = now;
1510}
ac124ff9 1511
2632bafd
SP
1512static void be_eqd_update(struct be_adapter *adapter)
1513{
1514 struct be_set_eqd set_eqd[MAX_EVT_QS];
1515 int eqd, i, num = 0, start;
1516 struct be_aic_obj *aic;
1517 struct be_eq_obj *eqo;
1518 struct be_rx_obj *rxo;
1519 struct be_tx_obj *txo;
1520 u64 rx_pkts, tx_pkts;
1521 ulong now;
1522 u32 pps, delta;
10ef9ab4 1523
2632bafd
SP
1524 for_all_evt_queues(adapter, eqo, i) {
1525 aic = &adapter->aic_obj[eqo->idx];
1526 if (!aic->enable) {
1527 if (aic->jiffies)
1528 aic->jiffies = 0;
1529 eqd = aic->et_eqd;
1530 goto modify_eqd;
1531 }
6b7c5b94 1532
2632bafd
SP
1533 rxo = &adapter->rx_obj[eqo->idx];
1534 do {
57a7744e 1535 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1536 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1537 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1538
2632bafd
SP
1539 txo = &adapter->tx_obj[eqo->idx];
1540 do {
57a7744e 1541 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1542 tx_pkts = txo->stats.tx_reqs;
57a7744e 1543 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1544
2632bafd
SP
1545 /* Skip, if wrapped around or first calculation */
1546 now = jiffies;
1547 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1548 rx_pkts < aic->rx_pkts_prev ||
1549 tx_pkts < aic->tx_reqs_prev) {
1550 be_aic_update(aic, rx_pkts, tx_pkts, now);
1551 continue;
1552 }
1553
1554 delta = jiffies_to_msecs(now - aic->jiffies);
1555 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1556 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1557 eqd = (pps / 15000) << 2;
10ef9ab4 1558
2632bafd
SP
1559 if (eqd < 8)
1560 eqd = 0;
1561 eqd = min_t(u32, eqd, aic->max_eqd);
1562 eqd = max_t(u32, eqd, aic->min_eqd);
1563
1564 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1565modify_eqd:
2632bafd
SP
1566 if (eqd != aic->prev_eqd) {
1567 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1568 set_eqd[num].eq_id = eqo->q.id;
1569 aic->prev_eqd = eqd;
1570 num++;
1571 }
ac124ff9 1572 }
2632bafd
SP
1573
1574 if (num)
1575 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1576}
1577
3abcdeda 1578static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1579 struct be_rx_compl_info *rxcp)
4097f663 1580{
ac124ff9 1581 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1582
ab1594e9 1583 u64_stats_update_begin(&stats->sync);
3abcdeda 1584 stats->rx_compl++;
2e588f84 1585 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1586 stats->rx_pkts++;
2e588f84 1587 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1588 stats->rx_mcast_pkts++;
2e588f84 1589 if (rxcp->err)
ac124ff9 1590 stats->rx_compl_err++;
ab1594e9 1591 u64_stats_update_end(&stats->sync);
4097f663
SP
1592}
1593
2e588f84 1594static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1595{
19fad86f 1596 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1597 * Also ignore ipcksm for ipv6 pkts
1598 */
2e588f84 1599 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1600 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1601}
1602
0b0ef1d0 1603static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1604{
10ef9ab4 1605 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1606 struct be_rx_page_info *rx_page_info;
3abcdeda 1607 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1608 u16 frag_idx = rxq->tail;
6b7c5b94 1609
3abcdeda 1610 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1611 BUG_ON(!rx_page_info->page);
1612
e50287be 1613 if (rx_page_info->last_frag) {
2b7bcebf
IV
1614 dma_unmap_page(&adapter->pdev->dev,
1615 dma_unmap_addr(rx_page_info, bus),
1616 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1617 rx_page_info->last_frag = false;
1618 } else {
1619 dma_sync_single_for_cpu(&adapter->pdev->dev,
1620 dma_unmap_addr(rx_page_info, bus),
1621 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1622 }
6b7c5b94 1623
0b0ef1d0 1624 queue_tail_inc(rxq);
6b7c5b94
SP
1625 atomic_dec(&rxq->used);
1626 return rx_page_info;
1627}
1628
1629/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1630static void be_rx_compl_discard(struct be_rx_obj *rxo,
1631 struct be_rx_compl_info *rxcp)
6b7c5b94 1632{
6b7c5b94 1633 struct be_rx_page_info *page_info;
2e588f84 1634 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1635
e80d9da6 1636 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1637 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1638 put_page(page_info->page);
1639 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1640 }
1641}
1642
1643/*
1644 * skb_fill_rx_data forms a complete skb for an ether frame
1645 * indicated by rxcp.
1646 */
10ef9ab4
SP
1647static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1648 struct be_rx_compl_info *rxcp)
6b7c5b94 1649{
6b7c5b94 1650 struct be_rx_page_info *page_info;
2e588f84
SP
1651 u16 i, j;
1652 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1653 u8 *start;
6b7c5b94 1654
0b0ef1d0 1655 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1656 start = page_address(page_info->page) + page_info->page_offset;
1657 prefetch(start);
1658
1659 /* Copy data in the first descriptor of this completion */
2e588f84 1660 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1661
6b7c5b94
SP
1662 skb->len = curr_frag_len;
1663 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1664 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1665 /* Complete packet has now been moved to data */
1666 put_page(page_info->page);
1667 skb->data_len = 0;
1668 skb->tail += curr_frag_len;
1669 } else {
ac1ae5f3
ED
1670 hdr_len = ETH_HLEN;
1671 memcpy(skb->data, start, hdr_len);
6b7c5b94 1672 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1673 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1674 skb_shinfo(skb)->frags[0].page_offset =
1675 page_info->page_offset + hdr_len;
748b539a
SP
1676 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1677 curr_frag_len - hdr_len);
6b7c5b94 1678 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1679 skb->truesize += rx_frag_size;
6b7c5b94
SP
1680 skb->tail += hdr_len;
1681 }
205859a2 1682 page_info->page = NULL;
6b7c5b94 1683
2e588f84
SP
1684 if (rxcp->pkt_size <= rx_frag_size) {
1685 BUG_ON(rxcp->num_rcvd != 1);
1686 return;
6b7c5b94
SP
1687 }
1688
1689 /* More frags present for this completion */
2e588f84
SP
1690 remaining = rxcp->pkt_size - curr_frag_len;
1691 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1692 page_info = get_rx_page_info(rxo);
2e588f84 1693 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1694
bd46cb6c
AK
1695 /* Coalesce all frags from the same physical page in one slot */
1696 if (page_info->page_offset == 0) {
1697 /* Fresh page */
1698 j++;
b061b39e 1699 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1700 skb_shinfo(skb)->frags[j].page_offset =
1701 page_info->page_offset;
9e903e08 1702 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1703 skb_shinfo(skb)->nr_frags++;
1704 } else {
1705 put_page(page_info->page);
1706 }
1707
9e903e08 1708 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1709 skb->len += curr_frag_len;
1710 skb->data_len += curr_frag_len;
bdb28a97 1711 skb->truesize += rx_frag_size;
2e588f84 1712 remaining -= curr_frag_len;
205859a2 1713 page_info->page = NULL;
6b7c5b94 1714 }
bd46cb6c 1715 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1716}
1717
5be93b9a 1718/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1719static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1720 struct be_rx_compl_info *rxcp)
6b7c5b94 1721{
10ef9ab4 1722 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1723 struct net_device *netdev = adapter->netdev;
6b7c5b94 1724 struct sk_buff *skb;
89420424 1725
bb349bb4 1726 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1727 if (unlikely(!skb)) {
ac124ff9 1728 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1729 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1730 return;
1731 }
1732
10ef9ab4 1733 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1734
6332c8d3 1735 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1736 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1737 else
1738 skb_checksum_none_assert(skb);
6b7c5b94 1739
6332c8d3 1740 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1741 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1742 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1743 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1744
b6c0e89d 1745 skb->csum_level = rxcp->tunneled;
6384a4d0 1746 skb_mark_napi_id(skb, napi);
6b7c5b94 1747
343e43c0 1748 if (rxcp->vlanf)
86a9bad3 1749 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1750
1751 netif_receive_skb(skb);
6b7c5b94
SP
1752}
1753
5be93b9a 1754/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1755static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1756 struct napi_struct *napi,
1757 struct be_rx_compl_info *rxcp)
6b7c5b94 1758{
10ef9ab4 1759 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1760 struct be_rx_page_info *page_info;
5be93b9a 1761 struct sk_buff *skb = NULL;
2e588f84
SP
1762 u16 remaining, curr_frag_len;
1763 u16 i, j;
3968fa1e 1764
10ef9ab4 1765 skb = napi_get_frags(napi);
5be93b9a 1766 if (!skb) {
10ef9ab4 1767 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1768 return;
1769 }
1770
2e588f84
SP
1771 remaining = rxcp->pkt_size;
1772 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1773 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1774
1775 curr_frag_len = min(remaining, rx_frag_size);
1776
bd46cb6c
AK
1777 /* Coalesce all frags from the same physical page in one slot */
1778 if (i == 0 || page_info->page_offset == 0) {
1779 /* First frag or Fresh page */
1780 j++;
b061b39e 1781 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1782 skb_shinfo(skb)->frags[j].page_offset =
1783 page_info->page_offset;
9e903e08 1784 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1785 } else {
1786 put_page(page_info->page);
1787 }
9e903e08 1788 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1789 skb->truesize += rx_frag_size;
bd46cb6c 1790 remaining -= curr_frag_len;
6b7c5b94
SP
1791 memset(page_info, 0, sizeof(*page_info));
1792 }
bd46cb6c 1793 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1794
5be93b9a 1795 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1796 skb->len = rxcp->pkt_size;
1797 skb->data_len = rxcp->pkt_size;
5be93b9a 1798 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1799 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1800 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1801 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1802
b6c0e89d 1803 skb->csum_level = rxcp->tunneled;
6384a4d0 1804 skb_mark_napi_id(skb, napi);
5be93b9a 1805
343e43c0 1806 if (rxcp->vlanf)
86a9bad3 1807 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1808
10ef9ab4 1809 napi_gro_frags(napi);
2e588f84
SP
1810}
1811
10ef9ab4
SP
1812static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1813 struct be_rx_compl_info *rxcp)
2e588f84 1814{
c3c18bc1
SP
1815 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1816 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1817 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1818 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1819 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1820 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1821 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1822 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1823 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1824 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1825 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1826 if (rxcp->vlanf) {
c3c18bc1
SP
1827 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1828 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1829 }
c3c18bc1 1830 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1831 rxcp->tunneled =
c3c18bc1 1832 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1833}
1834
10ef9ab4
SP
1835static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1836 struct be_rx_compl_info *rxcp)
2e588f84 1837{
c3c18bc1
SP
1838 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1839 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1840 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1841 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1842 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1843 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1844 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1845 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1846 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1847 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1848 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1849 if (rxcp->vlanf) {
c3c18bc1
SP
1850 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1851 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1852 }
c3c18bc1
SP
1853 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1854 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1855}
1856
1857static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1858{
1859 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1860 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1861 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1862
2e588f84
SP
1863 /* For checking the valid bit it is Ok to use either definition as the
1864 * valid bit is at the same position in both v0 and v1 Rx compl */
1865 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1866 return NULL;
6b7c5b94 1867
2e588f84
SP
1868 rmb();
1869 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1870
2e588f84 1871 if (adapter->be3_native)
10ef9ab4 1872 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1873 else
10ef9ab4 1874 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1875
e38b1706
SK
1876 if (rxcp->ip_frag)
1877 rxcp->l4_csum = 0;
1878
15d72184 1879 if (rxcp->vlanf) {
f93f160b
VV
1880 /* In QNQ modes, if qnq bit is not set, then the packet was
1881 * tagged only with the transparent outer vlan-tag and must
1882 * not be treated as a vlan packet by host
1883 */
1884 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1885 rxcp->vlanf = 0;
6b7c5b94 1886
15d72184 1887 if (!lancer_chip(adapter))
3c709f8f 1888 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1889
939cf306 1890 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1891 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1892 rxcp->vlanf = 0;
1893 }
2e588f84
SP
1894
1895 /* As the compl has been parsed, reset it; we wont touch it again */
1896 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1897
3abcdeda 1898 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1899 return rxcp;
1900}
1901
1829b086 1902static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1903{
6b7c5b94 1904 u32 order = get_order(size);
1829b086 1905
6b7c5b94 1906 if (order > 0)
1829b086
ED
1907 gfp |= __GFP_COMP;
1908 return alloc_pages(gfp, order);
6b7c5b94
SP
1909}
1910
1911/*
1912 * Allocate a page, split it to fragments of size rx_frag_size and post as
1913 * receive buffers to BE
1914 */
c30d7266 1915static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1916{
3abcdeda 1917 struct be_adapter *adapter = rxo->adapter;
26d92f92 1918 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1919 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1920 struct page *pagep = NULL;
ba42fad0 1921 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1922 struct be_eth_rx_d *rxd;
1923 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1924 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1925
3abcdeda 1926 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 1927 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 1928 if (!pagep) {
1829b086 1929 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1930 if (unlikely(!pagep)) {
ac124ff9 1931 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1932 break;
1933 }
ba42fad0
IV
1934 page_dmaaddr = dma_map_page(dev, pagep, 0,
1935 adapter->big_page_size,
2b7bcebf 1936 DMA_FROM_DEVICE);
ba42fad0
IV
1937 if (dma_mapping_error(dev, page_dmaaddr)) {
1938 put_page(pagep);
1939 pagep = NULL;
d3de1540 1940 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1941 break;
1942 }
e50287be 1943 page_offset = 0;
6b7c5b94
SP
1944 } else {
1945 get_page(pagep);
e50287be 1946 page_offset += rx_frag_size;
6b7c5b94 1947 }
e50287be 1948 page_info->page_offset = page_offset;
6b7c5b94 1949 page_info->page = pagep;
6b7c5b94
SP
1950
1951 rxd = queue_head_node(rxq);
e50287be 1952 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1953 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1954 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1955
1956 /* Any space left in the current big page for another frag? */
1957 if ((page_offset + rx_frag_size + rx_frag_size) >
1958 adapter->big_page_size) {
1959 pagep = NULL;
e50287be
SP
1960 page_info->last_frag = true;
1961 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1962 } else {
1963 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1964 }
26d92f92
SP
1965
1966 prev_page_info = page_info;
1967 queue_head_inc(rxq);
10ef9ab4 1968 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1969 }
e50287be
SP
1970
1971 /* Mark the last frag of a page when we break out of the above loop
1972 * with no more slots available in the RXQ
1973 */
1974 if (pagep) {
1975 prev_page_info->last_frag = true;
1976 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1977 }
6b7c5b94
SP
1978
1979 if (posted) {
6b7c5b94 1980 atomic_add(posted, &rxq->used);
6384a4d0
SP
1981 if (rxo->rx_post_starved)
1982 rxo->rx_post_starved = false;
c30d7266
AK
1983 do {
1984 notify = min(256u, posted);
1985 be_rxq_notify(adapter, rxq->id, notify);
1986 posted -= notify;
1987 } while (posted);
ea1dae11
SP
1988 } else if (atomic_read(&rxq->used) == 0) {
1989 /* Let be_worker replenish when memory is available */
3abcdeda 1990 rxo->rx_post_starved = true;
6b7c5b94 1991 }
6b7c5b94
SP
1992}
1993
5fb379ee 1994static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1995{
6b7c5b94
SP
1996 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1997
1998 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1999 return NULL;
2000
f3eb62d2 2001 rmb();
6b7c5b94
SP
2002 be_dws_le_to_cpu(txcp, sizeof(*txcp));
2003
2004 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2005
2006 queue_tail_inc(tx_cq);
2007 return txcp;
2008}
2009
3c8def97 2010static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2011 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2012{
5f07b3c5 2013 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2014 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2015 u16 frag_index, num_wrbs = 0;
2016 struct sk_buff *skb = NULL;
2017 bool unmap_skb_hdr = false;
a73b796e 2018 struct be_eth_wrb *wrb;
6b7c5b94 2019
ec43b1a6 2020 do {
5f07b3c5
SP
2021 if (sent_skbs[txq->tail]) {
2022 /* Free skb from prev req */
2023 if (skb)
2024 dev_consume_skb_any(skb);
2025 skb = sent_skbs[txq->tail];
2026 sent_skbs[txq->tail] = NULL;
2027 queue_tail_inc(txq); /* skip hdr wrb */
2028 num_wrbs++;
2029 unmap_skb_hdr = true;
2030 }
a73b796e 2031 wrb = queue_tail_node(txq);
5f07b3c5 2032 frag_index = txq->tail;
2b7bcebf 2033 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2034 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2035 unmap_skb_hdr = false;
6b7c5b94 2036 queue_tail_inc(txq);
5f07b3c5
SP
2037 num_wrbs++;
2038 } while (frag_index != last_index);
2039 dev_consume_skb_any(skb);
6b7c5b94 2040
4d586b82 2041 return num_wrbs;
6b7c5b94
SP
2042}
2043
10ef9ab4
SP
2044/* Return the number of events in the event queue */
2045static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2046{
10ef9ab4
SP
2047 struct be_eq_entry *eqe;
2048 int num = 0;
859b1e4e 2049
10ef9ab4
SP
2050 do {
2051 eqe = queue_tail_node(&eqo->q);
2052 if (eqe->evt == 0)
2053 break;
859b1e4e 2054
10ef9ab4
SP
2055 rmb();
2056 eqe->evt = 0;
2057 num++;
2058 queue_tail_inc(&eqo->q);
2059 } while (true);
2060
2061 return num;
859b1e4e
SP
2062}
2063
10ef9ab4
SP
2064/* Leaves the EQ is disarmed state */
2065static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2066{
10ef9ab4 2067 int num = events_get(eqo);
859b1e4e 2068
10ef9ab4 2069 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2070}
2071
10ef9ab4 2072static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2073{
2074 struct be_rx_page_info *page_info;
3abcdeda
SP
2075 struct be_queue_info *rxq = &rxo->q;
2076 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2077 struct be_rx_compl_info *rxcp;
d23e946c
SP
2078 struct be_adapter *adapter = rxo->adapter;
2079 int flush_wait = 0;
6b7c5b94 2080
d23e946c
SP
2081 /* Consume pending rx completions.
2082 * Wait for the flush completion (identified by zero num_rcvd)
2083 * to arrive. Notify CQ even when there are no more CQ entries
2084 * for HW to flush partially coalesced CQ entries.
2085 * In Lancer, there is no need to wait for flush compl.
2086 */
2087 for (;;) {
2088 rxcp = be_rx_compl_get(rxo);
ddf1169f 2089 if (!rxcp) {
d23e946c
SP
2090 if (lancer_chip(adapter))
2091 break;
2092
2093 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2094 dev_warn(&adapter->pdev->dev,
2095 "did not receive flush compl\n");
2096 break;
2097 }
2098 be_cq_notify(adapter, rx_cq->id, true, 0);
2099 mdelay(1);
2100 } else {
2101 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2102 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2103 if (rxcp->num_rcvd == 0)
2104 break;
2105 }
6b7c5b94
SP
2106 }
2107
d23e946c
SP
2108 /* After cleanup, leave the CQ in unarmed state */
2109 be_cq_notify(adapter, rx_cq->id, false, 0);
2110
2111 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2112 while (atomic_read(&rxq->used) > 0) {
2113 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2114 put_page(page_info->page);
2115 memset(page_info, 0, sizeof(*page_info));
2116 }
2117 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2118 rxq->tail = 0;
2119 rxq->head = 0;
6b7c5b94
SP
2120}
2121
0ae57bb3 2122static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2123{
5f07b3c5
SP
2124 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2125 struct device *dev = &adapter->pdev->dev;
0ae57bb3
SP
2126 struct be_tx_obj *txo;
2127 struct be_queue_info *txq;
a8e9179a 2128 struct be_eth_tx_compl *txcp;
0ae57bb3 2129 int i, pending_txqs;
a8e9179a 2130
1a3d0717 2131 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2132 do {
0ae57bb3
SP
2133 pending_txqs = adapter->num_tx_qs;
2134
2135 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2136 cmpl = 0;
2137 num_wrbs = 0;
0ae57bb3
SP
2138 txq = &txo->q;
2139 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2140 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2141 num_wrbs += be_tx_compl_process(adapter, txo,
2142 end_idx);
2143 cmpl++;
2144 }
2145 if (cmpl) {
2146 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2147 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2148 timeo = 0;
0ae57bb3 2149 }
5f07b3c5 2150 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
0ae57bb3 2151 pending_txqs--;
a8e9179a
SP
2152 }
2153
1a3d0717 2154 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2155 break;
2156
2157 mdelay(1);
2158 } while (true);
2159
5f07b3c5 2160 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2161 for_all_tx_queues(adapter, txo, i) {
2162 txq = &txo->q;
0ae57bb3 2163
5f07b3c5
SP
2164 if (atomic_read(&txq->used)) {
2165 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2166 i, atomic_read(&txq->used));
2167 notified_idx = txq->tail;
0ae57bb3 2168 end_idx = txq->tail;
5f07b3c5
SP
2169 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2170 txq->len);
2171 /* Use the tx-compl process logic to handle requests
2172 * that were not sent to the HW.
2173 */
0ae57bb3
SP
2174 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2175 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2176 BUG_ON(atomic_read(&txq->used));
2177 txo->pend_wrb_cnt = 0;
2178 /* Since hw was never notified of these requests,
2179 * reset TXQ indices
2180 */
2181 txq->head = notified_idx;
2182 txq->tail = notified_idx;
0ae57bb3 2183 }
b03388d6 2184 }
6b7c5b94
SP
2185}
2186
10ef9ab4
SP
2187static void be_evt_queues_destroy(struct be_adapter *adapter)
2188{
2189 struct be_eq_obj *eqo;
2190 int i;
2191
2192 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2193 if (eqo->q.created) {
2194 be_eq_clean(eqo);
10ef9ab4 2195 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2196 napi_hash_del(&eqo->napi);
68d7bdcb 2197 netif_napi_del(&eqo->napi);
19d59aa7 2198 }
10ef9ab4
SP
2199 be_queue_free(adapter, &eqo->q);
2200 }
2201}
2202
2203static int be_evt_queues_create(struct be_adapter *adapter)
2204{
2205 struct be_queue_info *eq;
2206 struct be_eq_obj *eqo;
2632bafd 2207 struct be_aic_obj *aic;
10ef9ab4
SP
2208 int i, rc;
2209
92bf14ab
SP
2210 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2211 adapter->cfg_num_qs);
10ef9ab4
SP
2212
2213 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2214 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2215 BE_NAPI_WEIGHT);
6384a4d0 2216 napi_hash_add(&eqo->napi);
2632bafd 2217 aic = &adapter->aic_obj[i];
10ef9ab4 2218 eqo->adapter = adapter;
10ef9ab4 2219 eqo->idx = i;
2632bafd
SP
2220 aic->max_eqd = BE_MAX_EQD;
2221 aic->enable = true;
10ef9ab4
SP
2222
2223 eq = &eqo->q;
2224 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2225 sizeof(struct be_eq_entry));
10ef9ab4
SP
2226 if (rc)
2227 return rc;
2228
f2f781a7 2229 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2230 if (rc)
2231 return rc;
2232 }
1cfafab9 2233 return 0;
10ef9ab4
SP
2234}
2235
5fb379ee
SP
2236static void be_mcc_queues_destroy(struct be_adapter *adapter)
2237{
2238 struct be_queue_info *q;
5fb379ee 2239
8788fdc2 2240 q = &adapter->mcc_obj.q;
5fb379ee 2241 if (q->created)
8788fdc2 2242 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2243 be_queue_free(adapter, q);
2244
8788fdc2 2245 q = &adapter->mcc_obj.cq;
5fb379ee 2246 if (q->created)
8788fdc2 2247 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2248 be_queue_free(adapter, q);
2249}
2250
2251/* Must be called only after TX qs are created as MCC shares TX EQ */
2252static int be_mcc_queues_create(struct be_adapter *adapter)
2253{
2254 struct be_queue_info *q, *cq;
5fb379ee 2255
8788fdc2 2256 cq = &adapter->mcc_obj.cq;
5fb379ee 2257 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2258 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2259 goto err;
2260
10ef9ab4
SP
2261 /* Use the default EQ for MCC completions */
2262 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2263 goto mcc_cq_free;
2264
8788fdc2 2265 q = &adapter->mcc_obj.q;
5fb379ee
SP
2266 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2267 goto mcc_cq_destroy;
2268
8788fdc2 2269 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2270 goto mcc_q_free;
2271
2272 return 0;
2273
2274mcc_q_free:
2275 be_queue_free(adapter, q);
2276mcc_cq_destroy:
8788fdc2 2277 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2278mcc_cq_free:
2279 be_queue_free(adapter, cq);
2280err:
2281 return -1;
2282}
2283
6b7c5b94
SP
2284static void be_tx_queues_destroy(struct be_adapter *adapter)
2285{
2286 struct be_queue_info *q;
3c8def97
SP
2287 struct be_tx_obj *txo;
2288 u8 i;
6b7c5b94 2289
3c8def97
SP
2290 for_all_tx_queues(adapter, txo, i) {
2291 q = &txo->q;
2292 if (q->created)
2293 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2294 be_queue_free(adapter, q);
6b7c5b94 2295
3c8def97
SP
2296 q = &txo->cq;
2297 if (q->created)
2298 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2299 be_queue_free(adapter, q);
2300 }
6b7c5b94
SP
2301}
2302
7707133c 2303static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2304{
10ef9ab4 2305 struct be_queue_info *cq, *eq;
3c8def97 2306 struct be_tx_obj *txo;
92bf14ab 2307 int status, i;
6b7c5b94 2308
92bf14ab 2309 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2310
10ef9ab4
SP
2311 for_all_tx_queues(adapter, txo, i) {
2312 cq = &txo->cq;
2313 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2314 sizeof(struct be_eth_tx_compl));
2315 if (status)
2316 return status;
3c8def97 2317
827da44c
JS
2318 u64_stats_init(&txo->stats.sync);
2319 u64_stats_init(&txo->stats.sync_compl);
2320
10ef9ab4
SP
2321 /* If num_evt_qs is less than num_tx_qs, then more than
2322 * one txq share an eq
2323 */
2324 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2325 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2326 if (status)
2327 return status;
6b7c5b94 2328
10ef9ab4
SP
2329 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2330 sizeof(struct be_eth_wrb));
2331 if (status)
2332 return status;
6b7c5b94 2333
94d73aaa 2334 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2335 if (status)
2336 return status;
3c8def97 2337 }
6b7c5b94 2338
d379142b
SP
2339 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2340 adapter->num_tx_qs);
10ef9ab4 2341 return 0;
6b7c5b94
SP
2342}
2343
10ef9ab4 2344static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2345{
2346 struct be_queue_info *q;
3abcdeda
SP
2347 struct be_rx_obj *rxo;
2348 int i;
2349
2350 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2351 q = &rxo->cq;
2352 if (q->created)
2353 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2354 be_queue_free(adapter, q);
ac6a0c4a
SP
2355 }
2356}
2357
10ef9ab4 2358static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2359{
10ef9ab4 2360 struct be_queue_info *eq, *cq;
3abcdeda
SP
2361 struct be_rx_obj *rxo;
2362 int rc, i;
6b7c5b94 2363
92bf14ab
SP
2364 /* We can create as many RSS rings as there are EQs. */
2365 adapter->num_rx_qs = adapter->num_evt_qs;
2366
2367 /* We'll use RSS only if atleast 2 RSS rings are supported.
2368 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2369 */
92bf14ab
SP
2370 if (adapter->num_rx_qs > 1)
2371 adapter->num_rx_qs++;
2372
6b7c5b94 2373 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2374 for_all_rx_queues(adapter, rxo, i) {
2375 rxo->adapter = adapter;
3abcdeda
SP
2376 cq = &rxo->cq;
2377 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2378 sizeof(struct be_eth_rx_compl));
3abcdeda 2379 if (rc)
10ef9ab4 2380 return rc;
3abcdeda 2381
827da44c 2382 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2383 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2384 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2385 if (rc)
10ef9ab4 2386 return rc;
3abcdeda 2387 }
6b7c5b94 2388
d379142b
SP
2389 dev_info(&adapter->pdev->dev,
2390 "created %d RSS queue(s) and 1 default RX queue\n",
2391 adapter->num_rx_qs - 1);
10ef9ab4 2392 return 0;
b628bde2
SP
2393}
2394
6b7c5b94
SP
2395static irqreturn_t be_intx(int irq, void *dev)
2396{
e49cc34f
SP
2397 struct be_eq_obj *eqo = dev;
2398 struct be_adapter *adapter = eqo->adapter;
2399 int num_evts = 0;
6b7c5b94 2400
d0b9cec3
SP
2401 /* IRQ is not expected when NAPI is scheduled as the EQ
2402 * will not be armed.
2403 * But, this can happen on Lancer INTx where it takes
2404 * a while to de-assert INTx or in BE2 where occasionaly
2405 * an interrupt may be raised even when EQ is unarmed.
2406 * If NAPI is already scheduled, then counting & notifying
2407 * events will orphan them.
e49cc34f 2408 */
d0b9cec3 2409 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2410 num_evts = events_get(eqo);
d0b9cec3
SP
2411 __napi_schedule(&eqo->napi);
2412 if (num_evts)
2413 eqo->spurious_intr = 0;
2414 }
2415 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2416
d0b9cec3
SP
2417 /* Return IRQ_HANDLED only for the the first spurious intr
2418 * after a valid intr to stop the kernel from branding
2419 * this irq as a bad one!
e49cc34f 2420 */
d0b9cec3
SP
2421 if (num_evts || eqo->spurious_intr++ == 0)
2422 return IRQ_HANDLED;
2423 else
2424 return IRQ_NONE;
6b7c5b94
SP
2425}
2426
10ef9ab4 2427static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2428{
10ef9ab4 2429 struct be_eq_obj *eqo = dev;
6b7c5b94 2430
0b545a62
SP
2431 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2432 napi_schedule(&eqo->napi);
6b7c5b94
SP
2433 return IRQ_HANDLED;
2434}
2435
2e588f84 2436static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2437{
e38b1706 2438 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2439}
2440
10ef9ab4 2441static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2442 int budget, int polling)
6b7c5b94 2443{
3abcdeda
SP
2444 struct be_adapter *adapter = rxo->adapter;
2445 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2446 struct be_rx_compl_info *rxcp;
6b7c5b94 2447 u32 work_done;
c30d7266 2448 u32 frags_consumed = 0;
6b7c5b94
SP
2449
2450 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2451 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2452 if (!rxcp)
2453 break;
2454
12004ae9
SP
2455 /* Is it a flush compl that has no data */
2456 if (unlikely(rxcp->num_rcvd == 0))
2457 goto loop_continue;
2458
2459 /* Discard compl with partial DMA Lancer B0 */
2460 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2461 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2462 goto loop_continue;
2463 }
2464
2465 /* On BE drop pkts that arrive due to imperfect filtering in
2466 * promiscuous mode on some skews
2467 */
2468 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2469 !lancer_chip(adapter))) {
10ef9ab4 2470 be_rx_compl_discard(rxo, rxcp);
12004ae9 2471 goto loop_continue;
64642811 2472 }
009dd872 2473
6384a4d0
SP
2474 /* Don't do gro when we're busy_polling */
2475 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2476 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2477 else
6384a4d0
SP
2478 be_rx_compl_process(rxo, napi, rxcp);
2479
12004ae9 2480loop_continue:
c30d7266 2481 frags_consumed += rxcp->num_rcvd;
2e588f84 2482 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2483 }
2484
10ef9ab4
SP
2485 if (work_done) {
2486 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2487
6384a4d0
SP
2488 /* When an rx-obj gets into post_starved state, just
2489 * let be_worker do the posting.
2490 */
2491 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2492 !rxo->rx_post_starved)
c30d7266
AK
2493 be_post_rx_frags(rxo, GFP_ATOMIC,
2494 max_t(u32, MAX_RX_POST,
2495 frags_consumed));
6b7c5b94 2496 }
10ef9ab4 2497
6b7c5b94
SP
2498 return work_done;
2499}
2500
512bb8a2
KA
2501static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2502{
2503 switch (status) {
2504 case BE_TX_COMP_HDR_PARSE_ERR:
2505 tx_stats(txo)->tx_hdr_parse_err++;
2506 break;
2507 case BE_TX_COMP_NDMA_ERR:
2508 tx_stats(txo)->tx_dma_err++;
2509 break;
2510 case BE_TX_COMP_ACL_ERR:
2511 tx_stats(txo)->tx_spoof_check_err++;
2512 break;
2513 }
2514}
2515
2516static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2517{
2518 switch (status) {
2519 case LANCER_TX_COMP_LSO_ERR:
2520 tx_stats(txo)->tx_tso_err++;
2521 break;
2522 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2523 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2524 tx_stats(txo)->tx_spoof_check_err++;
2525 break;
2526 case LANCER_TX_COMP_QINQ_ERR:
2527 tx_stats(txo)->tx_qinq_err++;
2528 break;
2529 case LANCER_TX_COMP_PARITY_ERR:
2530 tx_stats(txo)->tx_internal_parity_err++;
2531 break;
2532 case LANCER_TX_COMP_DMA_ERR:
2533 tx_stats(txo)->tx_dma_err++;
2534 break;
2535 }
2536}
2537
c8f64615
SP
2538static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2539 int idx)
6b7c5b94 2540{
6b7c5b94 2541 struct be_eth_tx_compl *txcp;
c8f64615 2542 int num_wrbs = 0, work_done = 0;
512bb8a2 2543 u32 compl_status;
c8f64615
SP
2544 u16 last_idx;
2545
2546 while ((txcp = be_tx_compl_get(&txo->cq))) {
2547 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2548 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2549 work_done++;
3c8def97 2550
512bb8a2
KA
2551 compl_status = GET_TX_COMPL_BITS(status, txcp);
2552 if (compl_status) {
2553 if (lancer_chip(adapter))
2554 lancer_update_tx_err(txo, compl_status);
2555 else
2556 be_update_tx_err(txo, compl_status);
2557 }
10ef9ab4 2558 }
6b7c5b94 2559
10ef9ab4
SP
2560 if (work_done) {
2561 be_cq_notify(adapter, txo->cq.id, true, work_done);
2562 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2563
10ef9ab4
SP
2564 /* As Tx wrbs have been freed up, wake up netdev queue
2565 * if it was stopped due to lack of tx wrbs. */
2566 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2567 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2568 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2569 }
10ef9ab4
SP
2570
2571 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2572 tx_stats(txo)->tx_compl += work_done;
2573 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2574 }
10ef9ab4 2575}
6b7c5b94 2576
f7062ee5
SP
2577#ifdef CONFIG_NET_RX_BUSY_POLL
2578static inline bool be_lock_napi(struct be_eq_obj *eqo)
2579{
2580 bool status = true;
2581
2582 spin_lock(&eqo->lock); /* BH is already disabled */
2583 if (eqo->state & BE_EQ_LOCKED) {
2584 WARN_ON(eqo->state & BE_EQ_NAPI);
2585 eqo->state |= BE_EQ_NAPI_YIELD;
2586 status = false;
2587 } else {
2588 eqo->state = BE_EQ_NAPI;
2589 }
2590 spin_unlock(&eqo->lock);
2591 return status;
2592}
2593
2594static inline void be_unlock_napi(struct be_eq_obj *eqo)
2595{
2596 spin_lock(&eqo->lock); /* BH is already disabled */
2597
2598 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2599 eqo->state = BE_EQ_IDLE;
2600
2601 spin_unlock(&eqo->lock);
2602}
2603
2604static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2605{
2606 bool status = true;
2607
2608 spin_lock_bh(&eqo->lock);
2609 if (eqo->state & BE_EQ_LOCKED) {
2610 eqo->state |= BE_EQ_POLL_YIELD;
2611 status = false;
2612 } else {
2613 eqo->state |= BE_EQ_POLL;
2614 }
2615 spin_unlock_bh(&eqo->lock);
2616 return status;
2617}
2618
2619static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2620{
2621 spin_lock_bh(&eqo->lock);
2622
2623 WARN_ON(eqo->state & (BE_EQ_NAPI));
2624 eqo->state = BE_EQ_IDLE;
2625
2626 spin_unlock_bh(&eqo->lock);
2627}
2628
2629static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2630{
2631 spin_lock_init(&eqo->lock);
2632 eqo->state = BE_EQ_IDLE;
2633}
2634
2635static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2636{
2637 local_bh_disable();
2638
2639 /* It's enough to just acquire napi lock on the eqo to stop
2640 * be_busy_poll() from processing any queueus.
2641 */
2642 while (!be_lock_napi(eqo))
2643 mdelay(1);
2644
2645 local_bh_enable();
2646}
2647
2648#else /* CONFIG_NET_RX_BUSY_POLL */
2649
2650static inline bool be_lock_napi(struct be_eq_obj *eqo)
2651{
2652 return true;
2653}
2654
2655static inline void be_unlock_napi(struct be_eq_obj *eqo)
2656{
2657}
2658
2659static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2660{
2661 return false;
2662}
2663
2664static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2665{
2666}
2667
2668static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2669{
2670}
2671
2672static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2673{
2674}
2675#endif /* CONFIG_NET_RX_BUSY_POLL */
2676
68d7bdcb 2677int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2678{
2679 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2680 struct be_adapter *adapter = eqo->adapter;
0b545a62 2681 int max_work = 0, work, i, num_evts;
6384a4d0 2682 struct be_rx_obj *rxo;
a4906ea0 2683 struct be_tx_obj *txo;
f31e50a8 2684
0b545a62
SP
2685 num_evts = events_get(eqo);
2686
a4906ea0
SP
2687 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2688 be_process_tx(adapter, txo, i);
f31e50a8 2689
6384a4d0
SP
2690 if (be_lock_napi(eqo)) {
2691 /* This loop will iterate twice for EQ0 in which
2692 * completions of the last RXQ (default one) are also processed
2693 * For other EQs the loop iterates only once
2694 */
2695 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2696 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2697 max_work = max(work, max_work);
2698 }
2699 be_unlock_napi(eqo);
2700 } else {
2701 max_work = budget;
10ef9ab4 2702 }
6b7c5b94 2703
10ef9ab4
SP
2704 if (is_mcc_eqo(eqo))
2705 be_process_mcc(adapter);
93c86700 2706
10ef9ab4
SP
2707 if (max_work < budget) {
2708 napi_complete(napi);
0b545a62 2709 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2710 } else {
2711 /* As we'll continue in polling mode, count and clear events */
0b545a62 2712 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2713 }
10ef9ab4 2714 return max_work;
6b7c5b94
SP
2715}
2716
6384a4d0
SP
2717#ifdef CONFIG_NET_RX_BUSY_POLL
2718static int be_busy_poll(struct napi_struct *napi)
2719{
2720 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2721 struct be_adapter *adapter = eqo->adapter;
2722 struct be_rx_obj *rxo;
2723 int i, work = 0;
2724
2725 if (!be_lock_busy_poll(eqo))
2726 return LL_FLUSH_BUSY;
2727
2728 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2729 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2730 if (work)
2731 break;
2732 }
2733
2734 be_unlock_busy_poll(eqo);
2735 return work;
2736}
2737#endif
2738
f67ef7ba 2739void be_detect_error(struct be_adapter *adapter)
7c185276 2740{
e1cfb67a
PR
2741 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2742 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2743 u32 i;
eb0eecc1
SK
2744 bool error_detected = false;
2745 struct device *dev = &adapter->pdev->dev;
2746 struct net_device *netdev = adapter->netdev;
7c185276 2747
d23e946c 2748 if (be_hw_error(adapter))
72f02485
SP
2749 return;
2750
e1cfb67a
PR
2751 if (lancer_chip(adapter)) {
2752 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2753 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2754 sliport_err1 = ioread32(adapter->db +
748b539a 2755 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2756 sliport_err2 = ioread32(adapter->db +
748b539a 2757 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2758 adapter->hw_error = true;
2759 /* Do not log error messages if its a FW reset */
2760 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2761 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2762 dev_info(dev, "Firmware update in progress\n");
2763 } else {
2764 error_detected = true;
2765 dev_err(dev, "Error detected in the card\n");
2766 dev_err(dev, "ERR: sliport status 0x%x\n",
2767 sliport_status);
2768 dev_err(dev, "ERR: sliport error1 0x%x\n",
2769 sliport_err1);
2770 dev_err(dev, "ERR: sliport error2 0x%x\n",
2771 sliport_err2);
2772 }
e1cfb67a
PR
2773 }
2774 } else {
2775 pci_read_config_dword(adapter->pdev,
748b539a 2776 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2777 pci_read_config_dword(adapter->pdev,
748b539a 2778 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2779 pci_read_config_dword(adapter->pdev,
748b539a 2780 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2781 pci_read_config_dword(adapter->pdev,
748b539a 2782 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2783
f67ef7ba
PR
2784 ue_lo = (ue_lo & ~ue_lo_mask);
2785 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2786
eb0eecc1
SK
2787 /* On certain platforms BE hardware can indicate spurious UEs.
2788 * Allow HW to stop working completely in case of a real UE.
2789 * Hence not setting the hw_error for UE detection.
2790 */
f67ef7ba 2791
eb0eecc1
SK
2792 if (ue_lo || ue_hi) {
2793 error_detected = true;
2794 dev_err(dev,
2795 "Unrecoverable Error detected in the adapter");
2796 dev_err(dev, "Please reboot server to recover");
2797 if (skyhawk_chip(adapter))
2798 adapter->hw_error = true;
2799 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2800 if (ue_lo & 1)
2801 dev_err(dev, "UE: %s bit set\n",
2802 ue_status_low_desc[i]);
2803 }
2804 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2805 if (ue_hi & 1)
2806 dev_err(dev, "UE: %s bit set\n",
2807 ue_status_hi_desc[i]);
2808 }
7c185276
AK
2809 }
2810 }
eb0eecc1
SK
2811 if (error_detected)
2812 netif_carrier_off(netdev);
7c185276
AK
2813}
2814
8d56ff11
SP
2815static void be_msix_disable(struct be_adapter *adapter)
2816{
ac6a0c4a 2817 if (msix_enabled(adapter)) {
8d56ff11 2818 pci_disable_msix(adapter->pdev);
ac6a0c4a 2819 adapter->num_msix_vec = 0;
68d7bdcb 2820 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2821 }
2822}
2823
c2bba3df 2824static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2825{
7dc4c064 2826 int i, num_vec;
d379142b 2827 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2828
92bf14ab
SP
2829 /* If RoCE is supported, program the max number of NIC vectors that
2830 * may be configured via set-channels, along with vectors needed for
2831 * RoCe. Else, just program the number we'll use initially.
2832 */
2833 if (be_roce_supported(adapter))
2834 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2835 2 * num_online_cpus());
2836 else
2837 num_vec = adapter->cfg_num_qs;
3abcdeda 2838
ac6a0c4a 2839 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2840 adapter->msix_entries[i].entry = i;
2841
7dc4c064
AG
2842 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2843 MIN_MSIX_VECTORS, num_vec);
2844 if (num_vec < 0)
2845 goto fail;
92bf14ab 2846
92bf14ab
SP
2847 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2848 adapter->num_msix_roce_vec = num_vec / 2;
2849 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2850 adapter->num_msix_roce_vec);
2851 }
2852
2853 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2854
2855 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2856 adapter->num_msix_vec);
c2bba3df 2857 return 0;
7dc4c064
AG
2858
2859fail:
2860 dev_warn(dev, "MSIx enable failed\n");
2861
2862 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2863 if (!be_physfn(adapter))
2864 return num_vec;
2865 return 0;
6b7c5b94
SP
2866}
2867
fe6d2a38 2868static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2869 struct be_eq_obj *eqo)
b628bde2 2870{
f2f781a7 2871 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2872}
6b7c5b94 2873
b628bde2
SP
2874static int be_msix_register(struct be_adapter *adapter)
2875{
10ef9ab4
SP
2876 struct net_device *netdev = adapter->netdev;
2877 struct be_eq_obj *eqo;
2878 int status, i, vec;
6b7c5b94 2879
10ef9ab4
SP
2880 for_all_evt_queues(adapter, eqo, i) {
2881 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2882 vec = be_msix_vec_get(adapter, eqo);
2883 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2884 if (status)
2885 goto err_msix;
2886 }
b628bde2 2887
6b7c5b94 2888 return 0;
3abcdeda 2889err_msix:
10ef9ab4
SP
2890 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2891 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2892 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2893 status);
ac6a0c4a 2894 be_msix_disable(adapter);
6b7c5b94
SP
2895 return status;
2896}
2897
2898static int be_irq_register(struct be_adapter *adapter)
2899{
2900 struct net_device *netdev = adapter->netdev;
2901 int status;
2902
ac6a0c4a 2903 if (msix_enabled(adapter)) {
6b7c5b94
SP
2904 status = be_msix_register(adapter);
2905 if (status == 0)
2906 goto done;
ba343c77
SB
2907 /* INTx is not supported for VF */
2908 if (!be_physfn(adapter))
2909 return status;
6b7c5b94
SP
2910 }
2911
e49cc34f 2912 /* INTx: only the first EQ is used */
6b7c5b94
SP
2913 netdev->irq = adapter->pdev->irq;
2914 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2915 &adapter->eq_obj[0]);
6b7c5b94
SP
2916 if (status) {
2917 dev_err(&adapter->pdev->dev,
2918 "INTx request IRQ failed - err %d\n", status);
2919 return status;
2920 }
2921done:
2922 adapter->isr_registered = true;
2923 return 0;
2924}
2925
2926static void be_irq_unregister(struct be_adapter *adapter)
2927{
2928 struct net_device *netdev = adapter->netdev;
10ef9ab4 2929 struct be_eq_obj *eqo;
3abcdeda 2930 int i;
6b7c5b94
SP
2931
2932 if (!adapter->isr_registered)
2933 return;
2934
2935 /* INTx */
ac6a0c4a 2936 if (!msix_enabled(adapter)) {
e49cc34f 2937 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2938 goto done;
2939 }
2940
2941 /* MSIx */
10ef9ab4
SP
2942 for_all_evt_queues(adapter, eqo, i)
2943 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2944
6b7c5b94
SP
2945done:
2946 adapter->isr_registered = false;
6b7c5b94
SP
2947}
2948
10ef9ab4 2949static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2950{
2951 struct be_queue_info *q;
2952 struct be_rx_obj *rxo;
2953 int i;
2954
2955 for_all_rx_queues(adapter, rxo, i) {
2956 q = &rxo->q;
2957 if (q->created) {
2958 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2959 be_rx_cq_clean(rxo);
482c9e79 2960 }
10ef9ab4 2961 be_queue_free(adapter, q);
482c9e79
SP
2962 }
2963}
2964
889cd4b2
SP
2965static int be_close(struct net_device *netdev)
2966{
2967 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2968 struct be_eq_obj *eqo;
2969 int i;
889cd4b2 2970
e1ad8e33
KA
2971 /* This protection is needed as be_close() may be called even when the
2972 * adapter is in cleared state (after eeh perm failure)
2973 */
2974 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2975 return 0;
2976
045508a8
PP
2977 be_roce_dev_close(adapter);
2978
dff345c5
IV
2979 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2980 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2981 napi_disable(&eqo->napi);
6384a4d0
SP
2982 be_disable_busy_poll(eqo);
2983 }
71237b6f 2984 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2985 }
a323d9bf
SP
2986
2987 be_async_mcc_disable(adapter);
2988
2989 /* Wait for all pending tx completions to arrive so that
2990 * all tx skbs are freed.
2991 */
fba87559 2992 netif_tx_disable(netdev);
6e1f9975 2993 be_tx_compl_clean(adapter);
a323d9bf
SP
2994
2995 be_rx_qs_destroy(adapter);
f66b7cfd 2996 be_clear_uc_list(adapter);
d11a347d 2997
a323d9bf 2998 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2999 if (msix_enabled(adapter))
3000 synchronize_irq(be_msix_vec_get(adapter, eqo));
3001 else
3002 synchronize_irq(netdev->irq);
3003 be_eq_clean(eqo);
63fcb27f
PR
3004 }
3005
889cd4b2
SP
3006 be_irq_unregister(adapter);
3007
482c9e79
SP
3008 return 0;
3009}
3010
10ef9ab4 3011static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3012{
1dcf7b1c
ED
3013 struct rss_info *rss = &adapter->rss_info;
3014 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3015 struct be_rx_obj *rxo;
e9008ee9 3016 int rc, i, j;
482c9e79
SP
3017
3018 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3019 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3020 sizeof(struct be_eth_rx_d));
3021 if (rc)
3022 return rc;
3023 }
3024
3025 /* The FW would like the default RXQ to be created first */
3026 rxo = default_rxo(adapter);
3027 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3028 adapter->if_handle, false, &rxo->rss_id);
3029 if (rc)
3030 return rc;
3031
3032 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3033 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3034 rx_frag_size, adapter->if_handle,
3035 true, &rxo->rss_id);
482c9e79
SP
3036 if (rc)
3037 return rc;
3038 }
3039
3040 if (be_multi_rxq(adapter)) {
e2557877
VD
3041 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3042 j += adapter->num_rx_qs - 1) {
e9008ee9 3043 for_all_rss_queues(adapter, rxo, i) {
e2557877 3044 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3045 break;
e2557877
VD
3046 rss->rsstable[j + i] = rxo->rss_id;
3047 rss->rss_queue[j + i] = i;
e9008ee9
PR
3048 }
3049 }
e2557877
VD
3050 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3051 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3052
3053 if (!BEx_chip(adapter))
e2557877
VD
3054 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3055 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3056 } else {
3057 /* Disable RSS, if only default RX Q is created */
e2557877 3058 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3059 }
594ad54a 3060
1dcf7b1c 3061 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3062 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3063 128, rss_key);
da1388d6 3064 if (rc) {
e2557877 3065 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3066 return rc;
482c9e79
SP
3067 }
3068
1dcf7b1c 3069 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3070
482c9e79 3071 /* First time posting */
10ef9ab4 3072 for_all_rx_queues(adapter, rxo, i)
c30d7266 3073 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3074 return 0;
3075}
3076
6b7c5b94
SP
3077static int be_open(struct net_device *netdev)
3078{
3079 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3080 struct be_eq_obj *eqo;
3abcdeda 3081 struct be_rx_obj *rxo;
10ef9ab4 3082 struct be_tx_obj *txo;
b236916a 3083 u8 link_status;
3abcdeda 3084 int status, i;
5fb379ee 3085
10ef9ab4 3086 status = be_rx_qs_create(adapter);
482c9e79
SP
3087 if (status)
3088 goto err;
3089
c2bba3df
SK
3090 status = be_irq_register(adapter);
3091 if (status)
3092 goto err;
5fb379ee 3093
10ef9ab4 3094 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3095 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3096
10ef9ab4
SP
3097 for_all_tx_queues(adapter, txo, i)
3098 be_cq_notify(adapter, txo->cq.id, true, 0);
3099
7a1e9b20
SP
3100 be_async_mcc_enable(adapter);
3101
10ef9ab4
SP
3102 for_all_evt_queues(adapter, eqo, i) {
3103 napi_enable(&eqo->napi);
6384a4d0 3104 be_enable_busy_poll(eqo);
4cad9f3b 3105 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3106 }
04d3d624 3107 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3108
323ff71e 3109 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3110 if (!status)
3111 be_link_status_update(adapter, link_status);
3112
fba87559 3113 netif_tx_start_all_queues(netdev);
045508a8 3114 be_roce_dev_open(adapter);
c9c47142 3115
c5abe7c0 3116#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3117 if (skyhawk_chip(adapter))
3118 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3119#endif
3120
889cd4b2
SP
3121 return 0;
3122err:
3123 be_close(adapter->netdev);
3124 return -EIO;
5fb379ee
SP
3125}
3126
71d8d1b5
AK
3127static int be_setup_wol(struct be_adapter *adapter, bool enable)
3128{
3129 struct be_dma_mem cmd;
3130 int status = 0;
3131 u8 mac[ETH_ALEN];
3132
3133 memset(mac, 0, ETH_ALEN);
3134
3135 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3136 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3137 GFP_KERNEL);
ddf1169f 3138 if (!cmd.va)
6b568689 3139 return -ENOMEM;
71d8d1b5
AK
3140
3141 if (enable) {
3142 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3143 PCICFG_PM_CONTROL_OFFSET,
3144 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3145 if (status) {
3146 dev_err(&adapter->pdev->dev,
2381a55c 3147 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3148 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3149 cmd.dma);
71d8d1b5
AK
3150 return status;
3151 }
3152 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3153 adapter->netdev->dev_addr,
3154 &cmd);
71d8d1b5
AK
3155 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3156 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3157 } else {
3158 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3159 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3160 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3161 }
3162
2b7bcebf 3163 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3164 return status;
3165}
3166
f7062ee5
SP
3167static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3168{
3169 u32 addr;
3170
3171 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3172
3173 mac[5] = (u8)(addr & 0xFF);
3174 mac[4] = (u8)((addr >> 8) & 0xFF);
3175 mac[3] = (u8)((addr >> 16) & 0xFF);
3176 /* Use the OUI from the current MAC address */
3177 memcpy(mac, adapter->netdev->dev_addr, 3);
3178}
3179
6d87f5c3
AK
3180/*
3181 * Generate a seed MAC address from the PF MAC Address using jhash.
3182 * MAC Address for VFs are assigned incrementally starting from the seed.
3183 * These addresses are programmed in the ASIC by the PF and the VF driver
3184 * queries for the MAC address during its probe.
3185 */
4c876616 3186static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3187{
f9449ab7 3188 u32 vf;
3abcdeda 3189 int status = 0;
6d87f5c3 3190 u8 mac[ETH_ALEN];
11ac75ed 3191 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3192
3193 be_vf_eth_addr_generate(adapter, mac);
3194
11ac75ed 3195 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3196 if (BEx_chip(adapter))
590c391d 3197 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3198 vf_cfg->if_handle,
3199 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3200 else
3201 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3202 vf + 1);
590c391d 3203
6d87f5c3
AK
3204 if (status)
3205 dev_err(&adapter->pdev->dev,
748b539a
SP
3206 "Mac address assignment failed for VF %d\n",
3207 vf);
6d87f5c3 3208 else
11ac75ed 3209 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3210
3211 mac[5] += 1;
3212 }
3213 return status;
3214}
3215
4c876616
SP
3216static int be_vfs_mac_query(struct be_adapter *adapter)
3217{
3218 int status, vf;
3219 u8 mac[ETH_ALEN];
3220 struct be_vf_cfg *vf_cfg;
4c876616
SP
3221
3222 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3223 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3224 mac, vf_cfg->if_handle,
3225 false, vf+1);
4c876616
SP
3226 if (status)
3227 return status;
3228 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3229 }
3230 return 0;
3231}
3232
f9449ab7 3233static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3234{
11ac75ed 3235 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3236 u32 vf;
3237
257a3feb 3238 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3239 dev_warn(&adapter->pdev->dev,
3240 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3241 goto done;
3242 }
3243
b4c1df93
SP
3244 pci_disable_sriov(adapter->pdev);
3245
11ac75ed 3246 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3247 if (BEx_chip(adapter))
11ac75ed
SP
3248 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3249 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3250 else
3251 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3252 vf + 1);
f9449ab7 3253
11ac75ed
SP
3254 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3255 }
39f1d94d
SP
3256done:
3257 kfree(adapter->vf_cfg);
3258 adapter->num_vfs = 0;
f174c7ec 3259 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3260}
3261
7707133c
SP
3262static void be_clear_queues(struct be_adapter *adapter)
3263{
3264 be_mcc_queues_destroy(adapter);
3265 be_rx_cqs_destroy(adapter);
3266 be_tx_queues_destroy(adapter);
3267 be_evt_queues_destroy(adapter);
3268}
3269
68d7bdcb 3270static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3271{
191eb756
SP
3272 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3273 cancel_delayed_work_sync(&adapter->work);
3274 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3275 }
68d7bdcb
SP
3276}
3277
b05004ad 3278static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3279{
b05004ad 3280 if (adapter->pmac_id) {
f66b7cfd
SP
3281 be_cmd_pmac_del(adapter, adapter->if_handle,
3282 adapter->pmac_id[0], 0);
b05004ad
SK
3283 kfree(adapter->pmac_id);
3284 adapter->pmac_id = NULL;
3285 }
3286}
3287
c5abe7c0 3288#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3289static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3290{
630f4b70
SB
3291 struct net_device *netdev = adapter->netdev;
3292
c9c47142
SP
3293 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3294 be_cmd_manage_iface(adapter, adapter->if_handle,
3295 OP_CONVERT_TUNNEL_TO_NORMAL);
3296
3297 if (adapter->vxlan_port)
3298 be_cmd_set_vxlan_port(adapter, 0);
3299
3300 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3301 adapter->vxlan_port = 0;
630f4b70
SB
3302
3303 netdev->hw_enc_features = 0;
3304 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3305 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3306}
c5abe7c0 3307#endif
c9c47142 3308
b05004ad
SK
3309static int be_clear(struct be_adapter *adapter)
3310{
68d7bdcb 3311 be_cancel_worker(adapter);
191eb756 3312
11ac75ed 3313 if (sriov_enabled(adapter))
f9449ab7
SP
3314 be_vf_clear(adapter);
3315
bec84e6b
VV
3316 /* Re-configure FW to distribute resources evenly across max-supported
3317 * number of VFs, only when VFs are not already enabled.
3318 */
3319 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3320 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3321 pci_sriov_get_totalvfs(adapter->pdev));
3322
c5abe7c0 3323#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3324 be_disable_vxlan_offloads(adapter);
c5abe7c0 3325#endif
2d17f403 3326 /* delete the primary mac along with the uc-mac list */
b05004ad 3327 be_mac_clear(adapter);
fbc13f01 3328
f9449ab7 3329 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3330
7707133c 3331 be_clear_queues(adapter);
a54769f5 3332
10ef9ab4 3333 be_msix_disable(adapter);
e1ad8e33 3334 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3335 return 0;
3336}
3337
0700d816
KA
3338static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3339 u32 cap_flags, u32 vf)
3340{
3341 u32 en_flags;
3342 int status;
3343
3344 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3345 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3346 BE_IF_FLAGS_RSS;
3347
3348 en_flags &= cap_flags;
3349
3350 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3351 if_handle, vf);
3352
3353 return status;
3354}
3355
4c876616 3356static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3357{
92bf14ab 3358 struct be_resources res = {0};
4c876616 3359 struct be_vf_cfg *vf_cfg;
0700d816
KA
3360 u32 cap_flags, vf;
3361 int status;
abb93951 3362
0700d816 3363 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3364 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3365 BE_IF_FLAGS_MULTICAST;
abb93951 3366
4c876616 3367 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3368 if (!BE3_chip(adapter)) {
3369 status = be_cmd_get_profile_config(adapter, &res,
3370 vf + 1);
3371 if (!status)
3372 cap_flags = res.if_cap_flags;
3373 }
4c876616 3374
0700d816
KA
3375 status = be_if_create(adapter, &vf_cfg->if_handle,
3376 cap_flags, vf + 1);
4c876616 3377 if (status)
0700d816 3378 return status;
4c876616 3379 }
0700d816
KA
3380
3381 return 0;
abb93951
PR
3382}
3383
39f1d94d 3384static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3385{
11ac75ed 3386 struct be_vf_cfg *vf_cfg;
30128031
SP
3387 int vf;
3388
39f1d94d
SP
3389 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3390 GFP_KERNEL);
3391 if (!adapter->vf_cfg)
3392 return -ENOMEM;
3393
11ac75ed
SP
3394 for_all_vfs(adapter, vf_cfg, vf) {
3395 vf_cfg->if_handle = -1;
3396 vf_cfg->pmac_id = -1;
30128031 3397 }
39f1d94d 3398 return 0;
30128031
SP
3399}
3400
f9449ab7
SP
3401static int be_vf_setup(struct be_adapter *adapter)
3402{
c502224e 3403 struct device *dev = &adapter->pdev->dev;
11ac75ed 3404 struct be_vf_cfg *vf_cfg;
4c876616 3405 int status, old_vfs, vf;
04a06028 3406 u32 privileges;
39f1d94d 3407
257a3feb 3408 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3409
3410 status = be_vf_setup_init(adapter);
3411 if (status)
3412 goto err;
30128031 3413
4c876616
SP
3414 if (old_vfs) {
3415 for_all_vfs(adapter, vf_cfg, vf) {
3416 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3417 if (status)
3418 goto err;
3419 }
f9449ab7 3420
4c876616
SP
3421 status = be_vfs_mac_query(adapter);
3422 if (status)
3423 goto err;
3424 } else {
bec84e6b
VV
3425 status = be_vfs_if_create(adapter);
3426 if (status)
3427 goto err;
3428
39f1d94d
SP
3429 status = be_vf_eth_addr_config(adapter);
3430 if (status)
3431 goto err;
3432 }
f9449ab7 3433
11ac75ed 3434 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3435 /* Allow VFs to programs MAC/VLAN filters */
3436 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3437 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3438 status = be_cmd_set_fn_privileges(adapter,
3439 privileges |
3440 BE_PRIV_FILTMGMT,
3441 vf + 1);
3442 if (!status)
3443 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3444 vf);
3445 }
3446
0f77ba73
RN
3447 /* Allow full available bandwidth */
3448 if (!old_vfs)
3449 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3450
bdce2ad7 3451 if (!old_vfs) {
0599863d 3452 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3453 be_cmd_set_logical_link_config(adapter,
3454 IFLA_VF_LINK_STATE_AUTO,
3455 vf+1);
3456 }
f9449ab7 3457 }
b4c1df93
SP
3458
3459 if (!old_vfs) {
3460 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3461 if (status) {
3462 dev_err(dev, "SRIOV enable failed\n");
3463 adapter->num_vfs = 0;
3464 goto err;
3465 }
3466 }
f174c7ec
VV
3467
3468 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3469 return 0;
3470err:
4c876616
SP
3471 dev_err(dev, "VF setup failed\n");
3472 be_vf_clear(adapter);
f9449ab7
SP
3473 return status;
3474}
3475
f93f160b
VV
3476/* Converting function_mode bits on BE3 to SH mc_type enums */
3477
3478static u8 be_convert_mc_type(u32 function_mode)
3479{
66064dbc 3480 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3481 return vNIC1;
66064dbc 3482 else if (function_mode & QNQ_MODE)
f93f160b
VV
3483 return FLEX10;
3484 else if (function_mode & VNIC_MODE)
3485 return vNIC2;
3486 else if (function_mode & UMC_ENABLED)
3487 return UMC;
3488 else
3489 return MC_NONE;
3490}
3491
92bf14ab
SP
3492/* On BE2/BE3 FW does not suggest the supported limits */
3493static void BEx_get_resources(struct be_adapter *adapter,
3494 struct be_resources *res)
3495{
bec84e6b 3496 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3497
3498 if (be_physfn(adapter))
3499 res->max_uc_mac = BE_UC_PMAC_COUNT;
3500 else
3501 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3502
f93f160b
VV
3503 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3504
3505 if (be_is_mc(adapter)) {
3506 /* Assuming that there are 4 channels per port,
3507 * when multi-channel is enabled
3508 */
3509 if (be_is_qnq_mode(adapter))
3510 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3511 else
3512 /* In a non-qnq multichannel mode, the pvid
3513 * takes up one vlan entry
3514 */
3515 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3516 } else {
92bf14ab 3517 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3518 }
3519
92bf14ab
SP
3520 res->max_mcast_mac = BE_MAX_MC;
3521
a5243dab
VV
3522 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3523 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3524 * *only* if it is RSS-capable.
3525 */
3526 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3527 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3528 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3529 res->max_tx_qs = 1;
a28277dc
SR
3530 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3531 struct be_resources super_nic_res = {0};
3532
3533 /* On a SuperNIC profile, the driver needs to use the
3534 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3535 */
3536 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3537 /* Some old versions of BE3 FW don't report max_tx_qs value */
3538 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3539 } else {
92bf14ab 3540 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3541 }
92bf14ab
SP
3542
3543 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3544 !use_sriov && be_physfn(adapter))
3545 res->max_rss_qs = (adapter->be3_native) ?
3546 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3547 res->max_rx_qs = res->max_rss_qs + 1;
3548
e3dc867c 3549 if (be_physfn(adapter))
d3518e21 3550 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3551 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3552 else
3553 res->max_evt_qs = 1;
92bf14ab
SP
3554
3555 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3556 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3557 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3558}
3559
30128031
SP
3560static void be_setup_init(struct be_adapter *adapter)
3561{
3562 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3563 adapter->phy.link_speed = -1;
30128031
SP
3564 adapter->if_handle = -1;
3565 adapter->be3_native = false;
f66b7cfd 3566 adapter->if_flags = 0;
f25b119c
PR
3567 if (be_physfn(adapter))
3568 adapter->cmd_privileges = MAX_PRIVILEGES;
3569 else
3570 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3571}
3572
bec84e6b
VV
3573static int be_get_sriov_config(struct be_adapter *adapter)
3574{
3575 struct device *dev = &adapter->pdev->dev;
3576 struct be_resources res = {0};
d3d18312 3577 int max_vfs, old_vfs;
bec84e6b
VV
3578
3579 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3580 be_cmd_get_profile_config(adapter, &res, 0);
3581
bec84e6b
VV
3582 if (BE3_chip(adapter) && !res.max_vfs) {
3583 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3584 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3585 }
3586
d3d18312 3587 adapter->pool_res = res;
bec84e6b
VV
3588
3589 if (!be_max_vfs(adapter)) {
3590 if (num_vfs)
50762667 3591 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3592 adapter->num_vfs = 0;
3593 return 0;
3594 }
3595
d3d18312
SP
3596 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3597
bec84e6b
VV
3598 /* validate num_vfs module param */
3599 old_vfs = pci_num_vf(adapter->pdev);
3600 if (old_vfs) {
3601 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3602 if (old_vfs != num_vfs)
3603 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3604 adapter->num_vfs = old_vfs;
3605 } else {
3606 if (num_vfs > be_max_vfs(adapter)) {
3607 dev_info(dev, "Resources unavailable to init %d VFs\n",
3608 num_vfs);
3609 dev_info(dev, "Limiting to %d VFs\n",
3610 be_max_vfs(adapter));
3611 }
3612 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3613 }
3614
3615 return 0;
3616}
3617
92bf14ab 3618static int be_get_resources(struct be_adapter *adapter)
abb93951 3619{
92bf14ab
SP
3620 struct device *dev = &adapter->pdev->dev;
3621 struct be_resources res = {0};
3622 int status;
abb93951 3623
92bf14ab
SP
3624 if (BEx_chip(adapter)) {
3625 BEx_get_resources(adapter, &res);
3626 adapter->res = res;
abb93951
PR
3627 }
3628
92bf14ab
SP
3629 /* For Lancer, SH etc read per-function resource limits from FW.
3630 * GET_FUNC_CONFIG returns per function guaranteed limits.
3631 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3632 */
3633 if (!BEx_chip(adapter)) {
3634 status = be_cmd_get_func_config(adapter, &res);
3635 if (status)
3636 return status;
abb93951 3637
92bf14ab
SP
3638 /* If RoCE may be enabled stash away half the EQs for RoCE */
3639 if (be_roce_supported(adapter))
3640 res.max_evt_qs /= 2;
3641 adapter->res = res;
abb93951 3642 }
4c876616 3643
acbafeb1
SP
3644 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3645 be_max_txqs(adapter), be_max_rxqs(adapter),
3646 be_max_rss(adapter), be_max_eqs(adapter),
3647 be_max_vfs(adapter));
3648 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3649 be_max_uc(adapter), be_max_mc(adapter),
3650 be_max_vlans(adapter));
3651
92bf14ab 3652 return 0;
abb93951
PR
3653}
3654
d3d18312
SP
3655static void be_sriov_config(struct be_adapter *adapter)
3656{
3657 struct device *dev = &adapter->pdev->dev;
3658 int status;
3659
3660 status = be_get_sriov_config(adapter);
3661 if (status) {
3662 dev_err(dev, "Failed to query SR-IOV configuration\n");
3663 dev_err(dev, "SR-IOV cannot be enabled\n");
3664 return;
3665 }
3666
3667 /* When the HW is in SRIOV capable configuration, the PF-pool
3668 * resources are equally distributed across the max-number of
3669 * VFs. The user may request only a subset of the max-vfs to be
3670 * enabled. Based on num_vfs, redistribute the resources across
3671 * num_vfs so that each VF will have access to more number of
3672 * resources. This facility is not available in BE3 FW.
3673 * Also, this is done by FW in Lancer chip.
3674 */
3675 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3676 status = be_cmd_set_sriov_config(adapter,
3677 adapter->pool_res,
3678 adapter->num_vfs);
3679 if (status)
3680 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3681 }
3682}
3683
39f1d94d
SP
3684static int be_get_config(struct be_adapter *adapter)
3685{
542963b7 3686 u16 profile_id;
4c876616 3687 int status;
39f1d94d 3688
e97e3cda 3689 status = be_cmd_query_fw_cfg(adapter);
abb93951 3690 if (status)
92bf14ab 3691 return status;
abb93951 3692
21252377
VV
3693 be_cmd_query_port_name(adapter);
3694
3695 if (be_physfn(adapter)) {
542963b7
VV
3696 status = be_cmd_get_active_profile(adapter, &profile_id);
3697 if (!status)
3698 dev_info(&adapter->pdev->dev,
3699 "Using profile 0x%x\n", profile_id);
962bcb75 3700 }
bec84e6b 3701
d3d18312
SP
3702 if (!BE2_chip(adapter) && be_physfn(adapter))
3703 be_sriov_config(adapter);
542963b7 3704
92bf14ab
SP
3705 status = be_get_resources(adapter);
3706 if (status)
3707 return status;
abb93951 3708
46ee9c14
RN
3709 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3710 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3711 if (!adapter->pmac_id)
3712 return -ENOMEM;
abb93951 3713
92bf14ab
SP
3714 /* Sanitize cfg_num_qs based on HW and platform limits */
3715 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3716
3717 return 0;
39f1d94d
SP
3718}
3719
95046b92
SP
3720static int be_mac_setup(struct be_adapter *adapter)
3721{
3722 u8 mac[ETH_ALEN];
3723 int status;
3724
3725 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3726 status = be_cmd_get_perm_mac(adapter, mac);
3727 if (status)
3728 return status;
3729
3730 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3731 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3732 } else {
3733 /* Maybe the HW was reset; dev_addr must be re-programmed */
3734 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3735 }
3736
2c7a9dc1
AK
3737 /* For BE3-R VFs, the PF programs the initial MAC address */
3738 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3739 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3740 &adapter->pmac_id[0], 0);
95046b92
SP
3741 return 0;
3742}
3743
68d7bdcb
SP
3744static void be_schedule_worker(struct be_adapter *adapter)
3745{
3746 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3747 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3748}
3749
7707133c 3750static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3751{
68d7bdcb 3752 struct net_device *netdev = adapter->netdev;
10ef9ab4 3753 int status;
ba343c77 3754
7707133c 3755 status = be_evt_queues_create(adapter);
abb93951
PR
3756 if (status)
3757 goto err;
73d540f2 3758
7707133c 3759 status = be_tx_qs_create(adapter);
c2bba3df
SK
3760 if (status)
3761 goto err;
10ef9ab4 3762
7707133c 3763 status = be_rx_cqs_create(adapter);
10ef9ab4 3764 if (status)
a54769f5 3765 goto err;
6b7c5b94 3766
7707133c 3767 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3768 if (status)
3769 goto err;
3770
68d7bdcb
SP
3771 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3772 if (status)
3773 goto err;
3774
3775 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3776 if (status)
3777 goto err;
3778
7707133c
SP
3779 return 0;
3780err:
3781 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3782 return status;
3783}
3784
68d7bdcb
SP
3785int be_update_queues(struct be_adapter *adapter)
3786{
3787 struct net_device *netdev = adapter->netdev;
3788 int status;
3789
3790 if (netif_running(netdev))
3791 be_close(netdev);
3792
3793 be_cancel_worker(adapter);
3794
3795 /* If any vectors have been shared with RoCE we cannot re-program
3796 * the MSIx table.
3797 */
3798 if (!adapter->num_msix_roce_vec)
3799 be_msix_disable(adapter);
3800
3801 be_clear_queues(adapter);
3802
3803 if (!msix_enabled(adapter)) {
3804 status = be_msix_enable(adapter);
3805 if (status)
3806 return status;
3807 }
3808
3809 status = be_setup_queues(adapter);
3810 if (status)
3811 return status;
3812
3813 be_schedule_worker(adapter);
3814
3815 if (netif_running(netdev))
3816 status = be_open(netdev);
3817
3818 return status;
3819}
3820
f7062ee5
SP
3821static inline int fw_major_num(const char *fw_ver)
3822{
3823 int fw_major = 0, i;
3824
3825 i = sscanf(fw_ver, "%d.", &fw_major);
3826 if (i != 1)
3827 return 0;
3828
3829 return fw_major;
3830}
3831
7707133c
SP
3832static int be_setup(struct be_adapter *adapter)
3833{
3834 struct device *dev = &adapter->pdev->dev;
7707133c
SP
3835 int status;
3836
3837 be_setup_init(adapter);
3838
3839 if (!lancer_chip(adapter))
3840 be_cmd_req_native_mode(adapter);
3841
3842 status = be_get_config(adapter);
10ef9ab4 3843 if (status)
a54769f5 3844 goto err;
6b7c5b94 3845
7707133c 3846 status = be_msix_enable(adapter);
10ef9ab4 3847 if (status)
a54769f5 3848 goto err;
6b7c5b94 3849
0700d816
KA
3850 status = be_if_create(adapter, &adapter->if_handle,
3851 be_if_cap_flags(adapter), 0);
7707133c 3852 if (status)
a54769f5 3853 goto err;
6b7c5b94 3854
68d7bdcb
SP
3855 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3856 rtnl_lock();
7707133c 3857 status = be_setup_queues(adapter);
68d7bdcb 3858 rtnl_unlock();
95046b92 3859 if (status)
1578e777
PR
3860 goto err;
3861
7707133c 3862 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3863
3864 status = be_mac_setup(adapter);
10ef9ab4
SP
3865 if (status)
3866 goto err;
3867
e97e3cda 3868 be_cmd_get_fw_ver(adapter);
acbafeb1 3869 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3870
e9e2a904 3871 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3872 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3873 adapter->fw_ver);
3874 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3875 }
3876
1d1e9a46 3877 if (adapter->vlans_added)
10329df8 3878 be_vid_config(adapter);
7ab8b0b4 3879
a54769f5 3880 be_set_rx_mode(adapter->netdev);
5fb379ee 3881
76a9e08e
SR
3882 be_cmd_get_acpi_wol_cap(adapter);
3883
00d594c3
KA
3884 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3885 adapter->rx_fc);
3886 if (status)
3887 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3888 &adapter->rx_fc);
590c391d 3889
00d594c3
KA
3890 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3891 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 3892
bdce2ad7
SR
3893 if (be_physfn(adapter))
3894 be_cmd_set_logical_link_config(adapter,
3895 IFLA_VF_LINK_STATE_AUTO, 0);
3896
bec84e6b
VV
3897 if (adapter->num_vfs)
3898 be_vf_setup(adapter);
f9449ab7 3899
f25b119c
PR
3900 status = be_cmd_get_phy_info(adapter);
3901 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3902 adapter->phy.fc_autoneg = 1;
3903
68d7bdcb 3904 be_schedule_worker(adapter);
e1ad8e33 3905 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3906 return 0;
a54769f5
SP
3907err:
3908 be_clear(adapter);
3909 return status;
3910}
6b7c5b94 3911
66268739
IV
3912#ifdef CONFIG_NET_POLL_CONTROLLER
3913static void be_netpoll(struct net_device *netdev)
3914{
3915 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3916 struct be_eq_obj *eqo;
66268739
IV
3917 int i;
3918
e49cc34f
SP
3919 for_all_evt_queues(adapter, eqo, i) {
3920 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3921 napi_schedule(&eqo->napi);
3922 }
66268739
IV
3923}
3924#endif
3925
96c9b2e4 3926static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3927
306f1348
SP
3928static bool phy_flashing_required(struct be_adapter *adapter)
3929{
e02cfd96 3930 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 3931 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3932}
3933
c165541e
PR
3934static bool is_comp_in_ufi(struct be_adapter *adapter,
3935 struct flash_section_info *fsec, int type)
3936{
3937 int i = 0, img_type = 0;
3938 struct flash_section_info_g2 *fsec_g2 = NULL;
3939
ca34fe38 3940 if (BE2_chip(adapter))
c165541e
PR
3941 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3942
3943 for (i = 0; i < MAX_FLASH_COMP; i++) {
3944 if (fsec_g2)
3945 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3946 else
3947 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3948
3949 if (img_type == type)
3950 return true;
3951 }
3952 return false;
3953
3954}
3955
4188e7df 3956static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3957 int header_size,
3958 const struct firmware *fw)
c165541e
PR
3959{
3960 struct flash_section_info *fsec = NULL;
3961 const u8 *p = fw->data;
3962
3963 p += header_size;
3964 while (p < (fw->data + fw->size)) {
3965 fsec = (struct flash_section_info *)p;
3966 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3967 return fsec;
3968 p += 32;
3969 }
3970 return NULL;
3971}
3972
96c9b2e4
VV
3973static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3974 u32 img_offset, u32 img_size, int hdr_size,
3975 u16 img_optype, bool *crc_match)
3976{
3977 u32 crc_offset;
3978 int status;
3979 u8 crc[4];
3980
70a7b525
VV
3981 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
3982 img_size - 4);
96c9b2e4
VV
3983 if (status)
3984 return status;
3985
3986 crc_offset = hdr_size + img_offset + img_size - 4;
3987
3988 /* Skip flashing, if crc of flashed region matches */
3989 if (!memcmp(crc, p + crc_offset, 4))
3990 *crc_match = true;
3991 else
3992 *crc_match = false;
3993
3994 return status;
3995}
3996
773a2d7c 3997static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
3998 struct be_dma_mem *flash_cmd, int optype, int img_size,
3999 u32 img_offset)
773a2d7c 4000{
70a7b525 4001 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4002 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4003 int status;
773a2d7c 4004
773a2d7c
PR
4005 while (total_bytes) {
4006 num_bytes = min_t(u32, 32*1024, total_bytes);
4007
4008 total_bytes -= num_bytes;
4009
4010 if (!total_bytes) {
4011 if (optype == OPTYPE_PHY_FW)
4012 flash_op = FLASHROM_OPER_PHY_FLASH;
4013 else
4014 flash_op = FLASHROM_OPER_FLASH;
4015 } else {
4016 if (optype == OPTYPE_PHY_FW)
4017 flash_op = FLASHROM_OPER_PHY_SAVE;
4018 else
4019 flash_op = FLASHROM_OPER_SAVE;
4020 }
4021
be716446 4022 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4023 img += num_bytes;
4024 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4025 flash_op, img_offset +
4026 bytes_sent, num_bytes);
4c60005f 4027 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4028 optype == OPTYPE_PHY_FW)
4029 break;
4030 else if (status)
773a2d7c 4031 return status;
70a7b525
VV
4032
4033 bytes_sent += num_bytes;
773a2d7c
PR
4034 }
4035 return 0;
4036}
4037
0ad3157e 4038/* For BE2, BE3 and BE3-R */
ca34fe38 4039static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4040 const struct firmware *fw,
4041 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4042{
c165541e 4043 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4044 struct device *dev = &adapter->pdev->dev;
c165541e 4045 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4046 int status, i, filehdr_size, num_comp;
4047 const struct flash_comp *pflashcomp;
4048 bool crc_match;
4049 const u8 *p;
c165541e
PR
4050
4051 struct flash_comp gen3_flash_types[] = {
4052 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4053 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4054 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4055 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4056 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4057 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4058 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4059 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4060 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4061 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4062 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4063 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4064 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4065 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4066 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4067 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4068 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4069 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4070 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4071 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4072 };
c165541e
PR
4073
4074 struct flash_comp gen2_flash_types[] = {
4075 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4076 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4077 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4078 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4079 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4080 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4081 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4082 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4083 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4084 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4085 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4086 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4087 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4088 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4089 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4090 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4091 };
4092
ca34fe38 4093 if (BE3_chip(adapter)) {
3f0d4560
AK
4094 pflashcomp = gen3_flash_types;
4095 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4096 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4097 } else {
4098 pflashcomp = gen2_flash_types;
4099 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4100 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4101 img_hdrs_size = 0;
84517482 4102 }
ca34fe38 4103
c165541e
PR
4104 /* Get flash section info*/
4105 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4106 if (!fsec) {
96c9b2e4 4107 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4108 return -1;
4109 }
9fe96934 4110 for (i = 0; i < num_comp; i++) {
c165541e 4111 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4112 continue;
c165541e
PR
4113
4114 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4115 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4116 continue;
4117
773a2d7c
PR
4118 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4119 !phy_flashing_required(adapter))
306f1348 4120 continue;
c165541e 4121
773a2d7c 4122 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4123 status = be_check_flash_crc(adapter, fw->data,
4124 pflashcomp[i].offset,
4125 pflashcomp[i].size,
4126 filehdr_size +
4127 img_hdrs_size,
4128 OPTYPE_REDBOOT, &crc_match);
4129 if (status) {
4130 dev_err(dev,
4131 "Could not get CRC for 0x%x region\n",
4132 pflashcomp[i].optype);
4133 continue;
4134 }
4135
4136 if (crc_match)
773a2d7c
PR
4137 continue;
4138 }
c165541e 4139
96c9b2e4
VV
4140 p = fw->data + filehdr_size + pflashcomp[i].offset +
4141 img_hdrs_size;
306f1348
SP
4142 if (p + pflashcomp[i].size > fw->data + fw->size)
4143 return -1;
773a2d7c
PR
4144
4145 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4146 pflashcomp[i].size, 0);
773a2d7c 4147 if (status) {
96c9b2e4 4148 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4149 pflashcomp[i].img_type);
4150 return status;
84517482 4151 }
84517482 4152 }
84517482
AK
4153 return 0;
4154}
4155
96c9b2e4
VV
4156static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4157{
4158 u32 img_type = le32_to_cpu(fsec_entry.type);
4159 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4160
4161 if (img_optype != 0xFFFF)
4162 return img_optype;
4163
4164 switch (img_type) {
4165 case IMAGE_FIRMWARE_iSCSI:
4166 img_optype = OPTYPE_ISCSI_ACTIVE;
4167 break;
4168 case IMAGE_BOOT_CODE:
4169 img_optype = OPTYPE_REDBOOT;
4170 break;
4171 case IMAGE_OPTION_ROM_ISCSI:
4172 img_optype = OPTYPE_BIOS;
4173 break;
4174 case IMAGE_OPTION_ROM_PXE:
4175 img_optype = OPTYPE_PXE_BIOS;
4176 break;
4177 case IMAGE_OPTION_ROM_FCoE:
4178 img_optype = OPTYPE_FCOE_BIOS;
4179 break;
4180 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4181 img_optype = OPTYPE_ISCSI_BACKUP;
4182 break;
4183 case IMAGE_NCSI:
4184 img_optype = OPTYPE_NCSI_FW;
4185 break;
4186 case IMAGE_FLASHISM_JUMPVECTOR:
4187 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4188 break;
4189 case IMAGE_FIRMWARE_PHY:
4190 img_optype = OPTYPE_SH_PHY_FW;
4191 break;
4192 case IMAGE_REDBOOT_DIR:
4193 img_optype = OPTYPE_REDBOOT_DIR;
4194 break;
4195 case IMAGE_REDBOOT_CONFIG:
4196 img_optype = OPTYPE_REDBOOT_CONFIG;
4197 break;
4198 case IMAGE_UFI_DIR:
4199 img_optype = OPTYPE_UFI_DIR;
4200 break;
4201 default:
4202 break;
4203 }
4204
4205 return img_optype;
4206}
4207
773a2d7c 4208static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4209 const struct firmware *fw,
4210 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4211{
773a2d7c 4212 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4213 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4214 struct device *dev = &adapter->pdev->dev;
773a2d7c 4215 struct flash_section_info *fsec = NULL;
96c9b2e4 4216 u32 img_offset, img_size, img_type;
70a7b525 4217 u16 img_optype, flash_optype;
96c9b2e4 4218 int status, i, filehdr_size;
96c9b2e4 4219 const u8 *p;
773a2d7c
PR
4220
4221 filehdr_size = sizeof(struct flash_file_hdr_g3);
4222 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4223 if (!fsec) {
96c9b2e4 4224 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4225 return -EINVAL;
773a2d7c
PR
4226 }
4227
70a7b525 4228retry_flash:
773a2d7c
PR
4229 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4230 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4231 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4232 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4233 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4234 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4235
96c9b2e4 4236 if (img_optype == 0xFFFF)
773a2d7c 4237 continue;
70a7b525
VV
4238
4239 if (flash_offset_support)
4240 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4241 else
4242 flash_optype = img_optype;
4243
96c9b2e4
VV
4244 /* Don't bother verifying CRC if an old FW image is being
4245 * flashed
4246 */
4247 if (old_fw_img)
4248 goto flash;
4249
4250 status = be_check_flash_crc(adapter, fw->data, img_offset,
4251 img_size, filehdr_size +
70a7b525 4252 img_hdrs_size, flash_optype,
96c9b2e4 4253 &crc_match);
4c60005f
KA
4254 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4255 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4256 /* The current FW image on the card does not support
4257 * OFFSET based flashing. Retry using older mechanism
4258 * of OPTYPE based flashing
4259 */
4260 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4261 flash_offset_support = false;
4262 goto retry_flash;
4263 }
4264
4265 /* The current FW image on the card does not recognize
4266 * the new FLASH op_type. The FW download is partially
4267 * complete. Reboot the server now to enable FW image
4268 * to recognize the new FLASH op_type. To complete the
4269 * remaining process, download the same FW again after
4270 * the reboot.
4271 */
96c9b2e4
VV
4272 dev_err(dev, "Flash incomplete. Reset the server\n");
4273 dev_err(dev, "Download FW image again after reset\n");
4274 return -EAGAIN;
4275 } else if (status) {
4276 dev_err(dev, "Could not get CRC for 0x%x region\n",
4277 img_optype);
4278 return -EFAULT;
773a2d7c
PR
4279 }
4280
96c9b2e4
VV
4281 if (crc_match)
4282 continue;
773a2d7c 4283
96c9b2e4
VV
4284flash:
4285 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4286 if (p + img_size > fw->data + fw->size)
4287 return -1;
4288
70a7b525
VV
4289 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4290 img_offset);
4291
4292 /* The current FW image on the card does not support OFFSET
4293 * based flashing. Retry using older mechanism of OPTYPE based
4294 * flashing
4295 */
4296 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4297 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4298 flash_offset_support = false;
4299 goto retry_flash;
4300 }
4301
96c9b2e4
VV
4302 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4303 * UFI_DIR region
4304 */
4c60005f
KA
4305 if (old_fw_img &&
4306 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4307 (img_optype == OPTYPE_UFI_DIR &&
4308 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4309 continue;
4310 } else if (status) {
4311 dev_err(dev, "Flashing section type 0x%x failed\n",
4312 img_type);
4313 return -EFAULT;
773a2d7c
PR
4314 }
4315 }
4316 return 0;
3f0d4560
AK
4317}
4318
485bf569 4319static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4320 const struct firmware *fw)
84517482 4321{
485bf569
SN
4322#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4323#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4324 struct device *dev = &adapter->pdev->dev;
84517482 4325 struct be_dma_mem flash_cmd;
485bf569
SN
4326 const u8 *data_ptr = NULL;
4327 u8 *dest_image_ptr = NULL;
4328 size_t image_size = 0;
4329 u32 chunk_size = 0;
4330 u32 data_written = 0;
4331 u32 offset = 0;
4332 int status = 0;
4333 u8 add_status = 0;
f67ef7ba 4334 u8 change_status;
84517482 4335
485bf569 4336 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4337 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4338 return -EINVAL;
d9efd2af
SB
4339 }
4340
485bf569
SN
4341 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4342 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4343 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4344 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4345 if (!flash_cmd.va)
4346 return -ENOMEM;
84517482 4347
485bf569
SN
4348 dest_image_ptr = flash_cmd.va +
4349 sizeof(struct lancer_cmd_req_write_object);
4350 image_size = fw->size;
4351 data_ptr = fw->data;
4352
4353 while (image_size) {
4354 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4355
4356 /* Copy the image chunk content. */
4357 memcpy(dest_image_ptr, data_ptr, chunk_size);
4358
4359 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4360 chunk_size, offset,
4361 LANCER_FW_DOWNLOAD_LOCATION,
4362 &data_written, &change_status,
4363 &add_status);
485bf569
SN
4364 if (status)
4365 break;
4366
4367 offset += data_written;
4368 data_ptr += data_written;
4369 image_size -= data_written;
4370 }
4371
4372 if (!status) {
4373 /* Commit the FW written */
4374 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4375 0, offset,
4376 LANCER_FW_DOWNLOAD_LOCATION,
4377 &data_written, &change_status,
4378 &add_status);
485bf569
SN
4379 }
4380
bb864e07 4381 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4382 if (status) {
bb864e07 4383 dev_err(dev, "Firmware load error\n");
3fb8cb80 4384 return be_cmd_status(status);
485bf569
SN
4385 }
4386
bb864e07
KA
4387 dev_info(dev, "Firmware flashed successfully\n");
4388
f67ef7ba 4389 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4390 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4391 status = lancer_physdev_ctrl(adapter,
4392 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4393 if (status) {
bb864e07
KA
4394 dev_err(dev, "Adapter busy, could not reset FW\n");
4395 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4396 }
4397 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4398 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4399 }
3fb8cb80
KA
4400
4401 return 0;
485bf569
SN
4402}
4403
5d3acd0d
VV
4404#define BE2_UFI 2
4405#define BE3_UFI 3
4406#define BE3R_UFI 10
4407#define SH_UFI 4
81a9e226 4408#define SH_P2_UFI 11
5d3acd0d 4409
ca34fe38 4410static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4411 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4412{
5d3acd0d
VV
4413 if (!fhdr) {
4414 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4415 return -1;
4416 }
773a2d7c 4417
5d3acd0d
VV
4418 /* First letter of the build version is used to identify
4419 * which chip this image file is meant for.
4420 */
4421 switch (fhdr->build[0]) {
4422 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4423 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4424 SH_UFI;
5d3acd0d
VV
4425 case BLD_STR_UFI_TYPE_BE3:
4426 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4427 BE3_UFI;
4428 case BLD_STR_UFI_TYPE_BE2:
4429 return BE2_UFI;
4430 default:
4431 return -1;
4432 }
4433}
773a2d7c 4434
5d3acd0d
VV
4435/* Check if the flash image file is compatible with the adapter that
4436 * is being flashed.
4437 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4438 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4439 */
4440static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4441 struct flash_file_hdr_g3 *fhdr)
4442{
4443 int ufi_type = be_get_ufi_type(adapter, fhdr);
4444
4445 switch (ufi_type) {
81a9e226 4446 case SH_P2_UFI:
5d3acd0d 4447 return skyhawk_chip(adapter);
81a9e226
VV
4448 case SH_UFI:
4449 return (skyhawk_chip(adapter) &&
4450 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4451 case BE3R_UFI:
4452 return BE3_chip(adapter);
4453 case BE3_UFI:
4454 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4455 case BE2_UFI:
4456 return BE2_chip(adapter);
4457 default:
4458 return false;
4459 }
773a2d7c
PR
4460}
4461
485bf569
SN
4462static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4463{
5d3acd0d 4464 struct device *dev = &adapter->pdev->dev;
485bf569 4465 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4466 struct image_hdr *img_hdr_ptr;
4467 int status = 0, i, num_imgs;
485bf569 4468 struct be_dma_mem flash_cmd;
84517482 4469
5d3acd0d
VV
4470 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4471 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4472 dev_err(dev, "Flash image is not compatible with adapter\n");
4473 return -EINVAL;
84517482
AK
4474 }
4475
5d3acd0d
VV
4476 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4477 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4478 GFP_KERNEL);
4479 if (!flash_cmd.va)
4480 return -ENOMEM;
773a2d7c 4481
773a2d7c
PR
4482 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4483 for (i = 0; i < num_imgs; i++) {
4484 img_hdr_ptr = (struct image_hdr *)(fw->data +
4485 (sizeof(struct flash_file_hdr_g3) +
4486 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4487 if (!BE2_chip(adapter) &&
4488 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4489 continue;
84517482 4490
5d3acd0d
VV
4491 if (skyhawk_chip(adapter))
4492 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4493 num_imgs);
4494 else
4495 status = be_flash_BEx(adapter, fw, &flash_cmd,
4496 num_imgs);
84517482
AK
4497 }
4498
5d3acd0d
VV
4499 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4500 if (!status)
4501 dev_info(dev, "Firmware flashed successfully\n");
84517482 4502
485bf569
SN
4503 return status;
4504}
4505
4506int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4507{
4508 const struct firmware *fw;
4509 int status;
4510
4511 if (!netif_running(adapter->netdev)) {
4512 dev_err(&adapter->pdev->dev,
4513 "Firmware load not allowed (interface is down)\n");
940a3fcd 4514 return -ENETDOWN;
485bf569
SN
4515 }
4516
4517 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4518 if (status)
4519 goto fw_exit;
4520
4521 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4522
4523 if (lancer_chip(adapter))
4524 status = lancer_fw_download(adapter, fw);
4525 else
4526 status = be_fw_download(adapter, fw);
4527
eeb65ced 4528 if (!status)
e97e3cda 4529 be_cmd_get_fw_ver(adapter);
eeb65ced 4530
84517482
AK
4531fw_exit:
4532 release_firmware(fw);
4533 return status;
4534}
4535
add511b3
RP
4536static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4537 u16 flags)
a77dcb8c
AK
4538{
4539 struct be_adapter *adapter = netdev_priv(dev);
4540 struct nlattr *attr, *br_spec;
4541 int rem;
4542 int status = 0;
4543 u16 mode = 0;
4544
4545 if (!sriov_enabled(adapter))
4546 return -EOPNOTSUPP;
4547
4548 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4549 if (!br_spec)
4550 return -EINVAL;
a77dcb8c
AK
4551
4552 nla_for_each_nested(attr, br_spec, rem) {
4553 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4554 continue;
4555
b7c1a314
TG
4556 if (nla_len(attr) < sizeof(mode))
4557 return -EINVAL;
4558
a77dcb8c
AK
4559 mode = nla_get_u16(attr);
4560 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4561 return -EINVAL;
4562
4563 status = be_cmd_set_hsw_config(adapter, 0, 0,
4564 adapter->if_handle,
4565 mode == BRIDGE_MODE_VEPA ?
4566 PORT_FWD_TYPE_VEPA :
4567 PORT_FWD_TYPE_VEB);
4568 if (status)
4569 goto err;
4570
4571 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4572 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4573
4574 return status;
4575 }
4576err:
4577 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4578 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4579
4580 return status;
4581}
4582
4583static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4584 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4585{
4586 struct be_adapter *adapter = netdev_priv(dev);
4587 int status = 0;
4588 u8 hsw_mode;
4589
4590 if (!sriov_enabled(adapter))
4591 return 0;
4592
4593 /* BE and Lancer chips support VEB mode only */
4594 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4595 hsw_mode = PORT_FWD_TYPE_VEB;
4596 } else {
4597 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4598 adapter->if_handle, &hsw_mode);
4599 if (status)
4600 return 0;
4601 }
4602
4603 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4604 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4605 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4606 0, 0);
a77dcb8c
AK
4607}
4608
c5abe7c0 4609#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4610/* VxLAN offload Notes:
4611 *
4612 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4613 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4614 * is expected to work across all types of IP tunnels once exported. Skyhawk
4615 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4616 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4617 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4618 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4619 *
4620 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4621 * adds more than one port, disable offloads and don't re-enable them again
4622 * until after all the tunnels are removed.
4623 */
c9c47142
SP
4624static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4625 __be16 port)
4626{
4627 struct be_adapter *adapter = netdev_priv(netdev);
4628 struct device *dev = &adapter->pdev->dev;
4629 int status;
4630
4631 if (lancer_chip(adapter) || BEx_chip(adapter))
4632 return;
4633
4634 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4635 dev_info(dev,
4636 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4637 dev_info(dev, "Disabling VxLAN offloads\n");
4638 adapter->vxlan_port_count++;
4639 goto err;
c9c47142
SP
4640 }
4641
630f4b70
SB
4642 if (adapter->vxlan_port_count++ >= 1)
4643 return;
4644
c9c47142
SP
4645 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4646 OP_CONVERT_NORMAL_TO_TUNNEL);
4647 if (status) {
4648 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4649 goto err;
4650 }
4651
4652 status = be_cmd_set_vxlan_port(adapter, port);
4653 if (status) {
4654 dev_warn(dev, "Failed to add VxLAN port\n");
4655 goto err;
4656 }
4657 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4658 adapter->vxlan_port = port;
4659
630f4b70
SB
4660 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4661 NETIF_F_TSO | NETIF_F_TSO6 |
4662 NETIF_F_GSO_UDP_TUNNEL;
4663 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4664 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4665
c9c47142
SP
4666 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4667 be16_to_cpu(port));
4668 return;
4669err:
4670 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4671}
4672
4673static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4674 __be16 port)
4675{
4676 struct be_adapter *adapter = netdev_priv(netdev);
4677
4678 if (lancer_chip(adapter) || BEx_chip(adapter))
4679 return;
4680
4681 if (adapter->vxlan_port != port)
630f4b70 4682 goto done;
c9c47142
SP
4683
4684 be_disable_vxlan_offloads(adapter);
4685
4686 dev_info(&adapter->pdev->dev,
4687 "Disabled VxLAN offloads for UDP port %d\n",
4688 be16_to_cpu(port));
630f4b70
SB
4689done:
4690 adapter->vxlan_port_count--;
c9c47142 4691}
725d548f 4692
5f35227e
JG
4693static netdev_features_t be_features_check(struct sk_buff *skb,
4694 struct net_device *dev,
4695 netdev_features_t features)
725d548f 4696{
16dde0d6
SB
4697 struct be_adapter *adapter = netdev_priv(dev);
4698 u8 l4_hdr = 0;
4699
4700 /* The code below restricts offload features for some tunneled packets.
4701 * Offload features for normal (non tunnel) packets are unchanged.
4702 */
4703 if (!skb->encapsulation ||
4704 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4705 return features;
4706
4707 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4708 * should disable tunnel offload features if it's not a VxLAN packet,
4709 * as tunnel offloads have been enabled only for VxLAN. This is done to
4710 * allow other tunneled traffic like GRE work fine while VxLAN
4711 * offloads are configured in Skyhawk-R.
4712 */
4713 switch (vlan_get_protocol(skb)) {
4714 case htons(ETH_P_IP):
4715 l4_hdr = ip_hdr(skb)->protocol;
4716 break;
4717 case htons(ETH_P_IPV6):
4718 l4_hdr = ipv6_hdr(skb)->nexthdr;
4719 break;
4720 default:
4721 return features;
4722 }
4723
4724 if (l4_hdr != IPPROTO_UDP ||
4725 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4726 skb->inner_protocol != htons(ETH_P_TEB) ||
4727 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4728 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4729 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4730
4731 return features;
725d548f 4732}
c5abe7c0 4733#endif
c9c47142 4734
e5686ad8 4735static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4736 .ndo_open = be_open,
4737 .ndo_stop = be_close,
4738 .ndo_start_xmit = be_xmit,
a54769f5 4739 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4740 .ndo_set_mac_address = be_mac_addr_set,
4741 .ndo_change_mtu = be_change_mtu,
ab1594e9 4742 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4743 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4744 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4745 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4746 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4747 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4748 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4749 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4750 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4751#ifdef CONFIG_NET_POLL_CONTROLLER
4752 .ndo_poll_controller = be_netpoll,
4753#endif
a77dcb8c
AK
4754 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4755 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4756#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4757 .ndo_busy_poll = be_busy_poll,
6384a4d0 4758#endif
c5abe7c0 4759#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4760 .ndo_add_vxlan_port = be_add_vxlan_port,
4761 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4762 .ndo_features_check = be_features_check,
c5abe7c0 4763#endif
6b7c5b94
SP
4764};
4765
4766static void be_netdev_init(struct net_device *netdev)
4767{
4768 struct be_adapter *adapter = netdev_priv(netdev);
4769
6332c8d3 4770 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4771 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4772 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4773 if (be_multi_rxq(adapter))
4774 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4775
4776 netdev->features |= netdev->hw_features |
f646968f 4777 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4778
eb8a50d9 4779 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4780 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4781
fbc13f01
AK
4782 netdev->priv_flags |= IFF_UNICAST_FLT;
4783
6b7c5b94
SP
4784 netdev->flags |= IFF_MULTICAST;
4785
b7e5887e 4786 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4787
10ef9ab4 4788 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4789
7ad24ea4 4790 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4791}
4792
4793static void be_unmap_pci_bars(struct be_adapter *adapter)
4794{
c5b3ad4c
SP
4795 if (adapter->csr)
4796 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4797 if (adapter->db)
ce66f781 4798 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4799}
4800
ce66f781
SP
4801static int db_bar(struct be_adapter *adapter)
4802{
4803 if (lancer_chip(adapter) || !be_physfn(adapter))
4804 return 0;
4805 else
4806 return 4;
4807}
4808
4809static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4810{
dbf0f2a7 4811 if (skyhawk_chip(adapter)) {
ce66f781
SP
4812 adapter->roce_db.size = 4096;
4813 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4814 db_bar(adapter));
4815 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4816 db_bar(adapter));
4817 }
045508a8 4818 return 0;
6b7c5b94
SP
4819}
4820
4821static int be_map_pci_bars(struct be_adapter *adapter)
4822{
4823 u8 __iomem *addr;
fe6d2a38 4824
c5b3ad4c
SP
4825 if (BEx_chip(adapter) && be_physfn(adapter)) {
4826 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4827 if (!adapter->csr)
c5b3ad4c
SP
4828 return -ENOMEM;
4829 }
4830
ce66f781 4831 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4832 if (!addr)
6b7c5b94 4833 goto pci_map_err;
ba343c77 4834 adapter->db = addr;
ce66f781
SP
4835
4836 be_roce_map_pci_bars(adapter);
6b7c5b94 4837 return 0;
ce66f781 4838
6b7c5b94 4839pci_map_err:
acbafeb1 4840 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4841 be_unmap_pci_bars(adapter);
4842 return -ENOMEM;
4843}
4844
6b7c5b94
SP
4845static void be_ctrl_cleanup(struct be_adapter *adapter)
4846{
8788fdc2 4847 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4848
4849 be_unmap_pci_bars(adapter);
4850
4851 if (mem->va)
2b7bcebf
IV
4852 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4853 mem->dma);
e7b909a6 4854
5b8821b7 4855 mem = &adapter->rx_filter;
e7b909a6 4856 if (mem->va)
2b7bcebf
IV
4857 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4858 mem->dma);
6b7c5b94
SP
4859}
4860
6b7c5b94
SP
4861static int be_ctrl_init(struct be_adapter *adapter)
4862{
8788fdc2
SP
4863 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4864 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4865 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4866 u32 sli_intf;
6b7c5b94 4867 int status;
6b7c5b94 4868
ce66f781
SP
4869 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4870 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4871 SLI_INTF_FAMILY_SHIFT;
4872 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4873
6b7c5b94
SP
4874 status = be_map_pci_bars(adapter);
4875 if (status)
e7b909a6 4876 goto done;
6b7c5b94
SP
4877
4878 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4879 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4880 mbox_mem_alloc->size,
4881 &mbox_mem_alloc->dma,
4882 GFP_KERNEL);
6b7c5b94 4883 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4884 status = -ENOMEM;
4885 goto unmap_pci_bars;
6b7c5b94
SP
4886 }
4887 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4888 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4889 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4890 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4891
5b8821b7 4892 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4893 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4894 rx_filter->size, &rx_filter->dma,
4895 GFP_KERNEL);
ddf1169f 4896 if (!rx_filter->va) {
e7b909a6
SP
4897 status = -ENOMEM;
4898 goto free_mbox;
4899 }
1f9061d2 4900
2984961c 4901 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4902 spin_lock_init(&adapter->mcc_lock);
4903 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4904
5eeff635 4905 init_completion(&adapter->et_cmd_compl);
cf588477 4906 pci_save_state(adapter->pdev);
6b7c5b94 4907 return 0;
e7b909a6
SP
4908
4909free_mbox:
2b7bcebf
IV
4910 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4911 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4912
4913unmap_pci_bars:
4914 be_unmap_pci_bars(adapter);
4915
4916done:
4917 return status;
6b7c5b94
SP
4918}
4919
4920static void be_stats_cleanup(struct be_adapter *adapter)
4921{
3abcdeda 4922 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4923
4924 if (cmd->va)
2b7bcebf
IV
4925 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4926 cmd->va, cmd->dma);
6b7c5b94
SP
4927}
4928
4929static int be_stats_init(struct be_adapter *adapter)
4930{
3abcdeda 4931 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4932
ca34fe38
SP
4933 if (lancer_chip(adapter))
4934 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4935 else if (BE2_chip(adapter))
89a88ab8 4936 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4937 else if (BE3_chip(adapter))
ca34fe38 4938 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4939 else
4940 /* ALL non-BE ASICs */
4941 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4942
ede23fa8
JP
4943 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4944 GFP_KERNEL);
ddf1169f 4945 if (!cmd->va)
6b568689 4946 return -ENOMEM;
6b7c5b94
SP
4947 return 0;
4948}
4949
3bc6b06c 4950static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4951{
4952 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4953
6b7c5b94
SP
4954 if (!adapter)
4955 return;
4956
045508a8 4957 be_roce_dev_remove(adapter);
8cef7a78 4958 be_intr_set(adapter, false);
045508a8 4959
f67ef7ba
PR
4960 cancel_delayed_work_sync(&adapter->func_recovery_work);
4961
6b7c5b94
SP
4962 unregister_netdev(adapter->netdev);
4963
5fb379ee
SP
4964 be_clear(adapter);
4965
bf99e50d
PR
4966 /* tell fw we're done with firing cmds */
4967 be_cmd_fw_clean(adapter);
4968
6b7c5b94
SP
4969 be_stats_cleanup(adapter);
4970
4971 be_ctrl_cleanup(adapter);
4972
d6b6d987
SP
4973 pci_disable_pcie_error_reporting(pdev);
4974
6b7c5b94
SP
4975 pci_release_regions(pdev);
4976 pci_disable_device(pdev);
4977
4978 free_netdev(adapter->netdev);
4979}
4980
39f1d94d 4981static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4982{
baaa08d1 4983 int status, level;
6b7c5b94 4984
9e1453c5
AK
4985 status = be_cmd_get_cntl_attributes(adapter);
4986 if (status)
4987 return status;
4988
7aeb2156
PR
4989 /* Must be a power of 2 or else MODULO will BUG_ON */
4990 adapter->be_get_temp_freq = 64;
4991
baaa08d1
VV
4992 if (BEx_chip(adapter)) {
4993 level = be_cmd_get_fw_log_level(adapter);
4994 adapter->msg_enable =
4995 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4996 }
941a77d5 4997
92bf14ab 4998 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4999 return 0;
6b7c5b94
SP
5000}
5001
f67ef7ba 5002static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 5003{
01e5b2c4 5004 struct device *dev = &adapter->pdev->dev;
d8110f62 5005 int status;
d8110f62 5006
f67ef7ba
PR
5007 status = lancer_test_and_set_rdy_state(adapter);
5008 if (status)
5009 goto err;
d8110f62 5010
f67ef7ba
PR
5011 if (netif_running(adapter->netdev))
5012 be_close(adapter->netdev);
d8110f62 5013
f67ef7ba
PR
5014 be_clear(adapter);
5015
01e5b2c4 5016 be_clear_all_error(adapter);
f67ef7ba
PR
5017
5018 status = be_setup(adapter);
5019 if (status)
5020 goto err;
d8110f62 5021
f67ef7ba
PR
5022 if (netif_running(adapter->netdev)) {
5023 status = be_open(adapter->netdev);
d8110f62
PR
5024 if (status)
5025 goto err;
f67ef7ba 5026 }
d8110f62 5027
4bebb56a 5028 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
5029 return 0;
5030err:
01e5b2c4
SK
5031 if (status == -EAGAIN)
5032 dev_err(dev, "Waiting for resource provisioning\n");
5033 else
4bebb56a 5034 dev_err(dev, "Adapter recovery failed\n");
d8110f62 5035
f67ef7ba
PR
5036 return status;
5037}
5038
5039static void be_func_recovery_task(struct work_struct *work)
5040{
5041 struct be_adapter *adapter =
5042 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 5043 int status = 0;
d8110f62 5044
f67ef7ba 5045 be_detect_error(adapter);
d8110f62 5046
f67ef7ba 5047 if (adapter->hw_error && lancer_chip(adapter)) {
f67ef7ba
PR
5048 rtnl_lock();
5049 netif_device_detach(adapter->netdev);
5050 rtnl_unlock();
d8110f62 5051
f67ef7ba 5052 status = lancer_recover_func(adapter);
f67ef7ba
PR
5053 if (!status)
5054 netif_device_attach(adapter->netdev);
d8110f62 5055 }
f67ef7ba 5056
01e5b2c4
SK
5057 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5058 * no need to attempt further recovery.
5059 */
5060 if (!status || status == -EAGAIN)
5061 schedule_delayed_work(&adapter->func_recovery_work,
5062 msecs_to_jiffies(1000));
d8110f62
PR
5063}
5064
21252377
VV
5065static void be_log_sfp_info(struct be_adapter *adapter)
5066{
5067 int status;
5068
5069 status = be_cmd_query_sfp_info(adapter);
5070 if (!status) {
5071 dev_err(&adapter->pdev->dev,
5072 "Unqualified SFP+ detected on %c from %s part no: %s",
5073 adapter->port_name, adapter->phy.vendor_name,
5074 adapter->phy.vendor_pn);
5075 }
5076 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5077}
5078
d8110f62
PR
5079static void be_worker(struct work_struct *work)
5080{
5081 struct be_adapter *adapter =
5082 container_of(work, struct be_adapter, work.work);
5083 struct be_rx_obj *rxo;
5084 int i;
5085
d8110f62
PR
5086 /* when interrupts are not yet enabled, just reap any pending
5087 * mcc completions */
5088 if (!netif_running(adapter->netdev)) {
072a9c48 5089 local_bh_disable();
10ef9ab4 5090 be_process_mcc(adapter);
072a9c48 5091 local_bh_enable();
d8110f62
PR
5092 goto reschedule;
5093 }
5094
5095 if (!adapter->stats_cmd_sent) {
5096 if (lancer_chip(adapter))
5097 lancer_cmd_get_pport_stats(adapter,
cd3307aa 5098 &adapter->stats_cmd);
d8110f62
PR
5099 else
5100 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5101 }
5102
d696b5e2
VV
5103 if (be_physfn(adapter) &&
5104 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
5105 be_cmd_get_die_temperature(adapter);
5106
d8110f62 5107 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
5108 /* Replenish RX-queues starved due to memory
5109 * allocation failures.
5110 */
5111 if (rxo->rx_post_starved)
c30d7266 5112 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
5113 }
5114
2632bafd 5115 be_eqd_update(adapter);
10ef9ab4 5116
21252377
VV
5117 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5118 be_log_sfp_info(adapter);
5119
d8110f62
PR
5120reschedule:
5121 adapter->work_counter++;
5122 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5123}
5124
257a3feb 5125/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
5126static bool be_reset_required(struct be_adapter *adapter)
5127{
257a3feb 5128 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
5129}
5130
d379142b
SP
5131static char *mc_name(struct be_adapter *adapter)
5132{
f93f160b
VV
5133 char *str = ""; /* default */
5134
5135 switch (adapter->mc_type) {
5136 case UMC:
5137 str = "UMC";
5138 break;
5139 case FLEX10:
5140 str = "FLEX10";
5141 break;
5142 case vNIC1:
5143 str = "vNIC-1";
5144 break;
5145 case nPAR:
5146 str = "nPAR";
5147 break;
5148 case UFP:
5149 str = "UFP";
5150 break;
5151 case vNIC2:
5152 str = "vNIC-2";
5153 break;
5154 default:
5155 str = "";
5156 }
5157
5158 return str;
d379142b
SP
5159}
5160
5161static inline char *func_name(struct be_adapter *adapter)
5162{
5163 return be_physfn(adapter) ? "PF" : "VF";
5164}
5165
f7062ee5
SP
5166static inline char *nic_name(struct pci_dev *pdev)
5167{
5168 switch (pdev->device) {
5169 case OC_DEVICE_ID1:
5170 return OC_NAME;
5171 case OC_DEVICE_ID2:
5172 return OC_NAME_BE;
5173 case OC_DEVICE_ID3:
5174 case OC_DEVICE_ID4:
5175 return OC_NAME_LANCER;
5176 case BE_DEVICE_ID2:
5177 return BE3_NAME;
5178 case OC_DEVICE_ID5:
5179 case OC_DEVICE_ID6:
5180 return OC_NAME_SH;
5181 default:
5182 return BE_NAME;
5183 }
5184}
5185
1dd06ae8 5186static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5187{
6b7c5b94
SP
5188 struct be_adapter *adapter;
5189 struct net_device *netdev;
21252377 5190 int status = 0;
6b7c5b94 5191
acbafeb1
SP
5192 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5193
6b7c5b94
SP
5194 status = pci_enable_device(pdev);
5195 if (status)
5196 goto do_none;
5197
5198 status = pci_request_regions(pdev, DRV_NAME);
5199 if (status)
5200 goto disable_dev;
5201 pci_set_master(pdev);
5202
7f640062 5203 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5204 if (!netdev) {
6b7c5b94
SP
5205 status = -ENOMEM;
5206 goto rel_reg;
5207 }
5208 adapter = netdev_priv(netdev);
5209 adapter->pdev = pdev;
5210 pci_set_drvdata(pdev, adapter);
5211 adapter->netdev = netdev;
2243e2e9 5212 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5213
4c15c243 5214 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5215 if (!status) {
5216 netdev->features |= NETIF_F_HIGHDMA;
5217 } else {
4c15c243 5218 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5219 if (status) {
5220 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5221 goto free_netdev;
5222 }
5223 }
5224
2f951a9a
KA
5225 status = pci_enable_pcie_error_reporting(pdev);
5226 if (!status)
5227 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5228
6b7c5b94
SP
5229 status = be_ctrl_init(adapter);
5230 if (status)
39f1d94d 5231 goto free_netdev;
6b7c5b94 5232
2243e2e9 5233 /* sync up with fw's ready state */
ba343c77 5234 if (be_physfn(adapter)) {
bf99e50d 5235 status = be_fw_wait_ready(adapter);
ba343c77
SB
5236 if (status)
5237 goto ctrl_clean;
ba343c77 5238 }
6b7c5b94 5239
39f1d94d
SP
5240 if (be_reset_required(adapter)) {
5241 status = be_cmd_reset_function(adapter);
5242 if (status)
5243 goto ctrl_clean;
556ae191 5244
2d177be8
KA
5245 /* Wait for interrupts to quiesce after an FLR */
5246 msleep(100);
5247 }
8cef7a78
SK
5248
5249 /* Allow interrupts for other ULPs running on NIC function */
5250 be_intr_set(adapter, true);
10ef9ab4 5251
2d177be8
KA
5252 /* tell fw we're ready to fire cmds */
5253 status = be_cmd_fw_init(adapter);
5254 if (status)
5255 goto ctrl_clean;
5256
2243e2e9
SP
5257 status = be_stats_init(adapter);
5258 if (status)
5259 goto ctrl_clean;
5260
39f1d94d 5261 status = be_get_initial_config(adapter);
6b7c5b94
SP
5262 if (status)
5263 goto stats_clean;
6b7c5b94
SP
5264
5265 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 5266 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5f820b6c
KA
5267 adapter->rx_fc = true;
5268 adapter->tx_fc = true;
6b7c5b94 5269
5fb379ee
SP
5270 status = be_setup(adapter);
5271 if (status)
55f5c3c5 5272 goto stats_clean;
2243e2e9 5273
3abcdeda 5274 be_netdev_init(netdev);
6b7c5b94
SP
5275 status = register_netdev(netdev);
5276 if (status != 0)
5fb379ee 5277 goto unsetup;
6b7c5b94 5278
045508a8
PP
5279 be_roce_dev_add(adapter);
5280
f67ef7ba
PR
5281 schedule_delayed_work(&adapter->func_recovery_work,
5282 msecs_to_jiffies(1000));
b4e32a71 5283
d379142b 5284 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5285 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5286
6b7c5b94
SP
5287 return 0;
5288
5fb379ee
SP
5289unsetup:
5290 be_clear(adapter);
6b7c5b94
SP
5291stats_clean:
5292 be_stats_cleanup(adapter);
5293ctrl_clean:
5294 be_ctrl_cleanup(adapter);
f9449ab7 5295free_netdev:
fe6d2a38 5296 free_netdev(netdev);
6b7c5b94
SP
5297rel_reg:
5298 pci_release_regions(pdev);
5299disable_dev:
5300 pci_disable_device(pdev);
5301do_none:
c4ca2374 5302 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5303 return status;
5304}
5305
5306static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5307{
5308 struct be_adapter *adapter = pci_get_drvdata(pdev);
5309 struct net_device *netdev = adapter->netdev;
5310
76a9e08e 5311 if (adapter->wol_en)
71d8d1b5
AK
5312 be_setup_wol(adapter, true);
5313
d4360d6f 5314 be_intr_set(adapter, false);
f67ef7ba
PR
5315 cancel_delayed_work_sync(&adapter->func_recovery_work);
5316
6b7c5b94
SP
5317 netif_device_detach(netdev);
5318 if (netif_running(netdev)) {
5319 rtnl_lock();
5320 be_close(netdev);
5321 rtnl_unlock();
5322 }
9b0365f1 5323 be_clear(adapter);
6b7c5b94
SP
5324
5325 pci_save_state(pdev);
5326 pci_disable_device(pdev);
5327 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5328 return 0;
5329}
5330
5331static int be_resume(struct pci_dev *pdev)
5332{
5333 int status = 0;
5334 struct be_adapter *adapter = pci_get_drvdata(pdev);
5335 struct net_device *netdev = adapter->netdev;
5336
5337 netif_device_detach(netdev);
5338
5339 status = pci_enable_device(pdev);
5340 if (status)
5341 return status;
5342
1ca01512 5343 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5344 pci_restore_state(pdev);
5345
dd5746bf
SB
5346 status = be_fw_wait_ready(adapter);
5347 if (status)
5348 return status;
5349
9a6d73d9
KA
5350 status = be_cmd_reset_function(adapter);
5351 if (status)
5352 return status;
5353
d4360d6f 5354 be_intr_set(adapter, true);
2243e2e9
SP
5355 /* tell fw we're ready to fire cmds */
5356 status = be_cmd_fw_init(adapter);
5357 if (status)
5358 return status;
5359
9b0365f1 5360 be_setup(adapter);
6b7c5b94
SP
5361 if (netif_running(netdev)) {
5362 rtnl_lock();
5363 be_open(netdev);
5364 rtnl_unlock();
5365 }
f67ef7ba
PR
5366
5367 schedule_delayed_work(&adapter->func_recovery_work,
5368 msecs_to_jiffies(1000));
6b7c5b94 5369 netif_device_attach(netdev);
71d8d1b5 5370
76a9e08e 5371 if (adapter->wol_en)
71d8d1b5 5372 be_setup_wol(adapter, false);
a4ca055f 5373
6b7c5b94
SP
5374 return 0;
5375}
5376
82456b03
SP
5377/*
5378 * An FLR will stop BE from DMAing any data.
5379 */
5380static void be_shutdown(struct pci_dev *pdev)
5381{
5382 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5383
2d5d4154
AK
5384 if (!adapter)
5385 return;
82456b03 5386
d114f99a 5387 be_roce_dev_shutdown(adapter);
0f4a6828 5388 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5389 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5390
2d5d4154 5391 netif_device_detach(adapter->netdev);
82456b03 5392
57841869
AK
5393 be_cmd_reset_function(adapter);
5394
82456b03 5395 pci_disable_device(pdev);
82456b03
SP
5396}
5397
cf588477 5398static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5399 pci_channel_state_t state)
cf588477
SP
5400{
5401 struct be_adapter *adapter = pci_get_drvdata(pdev);
5402 struct net_device *netdev = adapter->netdev;
5403
5404 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5405
01e5b2c4
SK
5406 if (!adapter->eeh_error) {
5407 adapter->eeh_error = true;
cf588477 5408
01e5b2c4 5409 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5410
cf588477 5411 rtnl_lock();
01e5b2c4
SK
5412 netif_device_detach(netdev);
5413 if (netif_running(netdev))
5414 be_close(netdev);
cf588477 5415 rtnl_unlock();
01e5b2c4
SK
5416
5417 be_clear(adapter);
cf588477 5418 }
cf588477
SP
5419
5420 if (state == pci_channel_io_perm_failure)
5421 return PCI_ERS_RESULT_DISCONNECT;
5422
5423 pci_disable_device(pdev);
5424
eeb7fc7b
SK
5425 /* The error could cause the FW to trigger a flash debug dump.
5426 * Resetting the card while flash dump is in progress
c8a54163
PR
5427 * can cause it not to recover; wait for it to finish.
5428 * Wait only for first function as it is needed only once per
5429 * adapter.
eeb7fc7b 5430 */
c8a54163
PR
5431 if (pdev->devfn == 0)
5432 ssleep(30);
5433
cf588477
SP
5434 return PCI_ERS_RESULT_NEED_RESET;
5435}
5436
5437static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5438{
5439 struct be_adapter *adapter = pci_get_drvdata(pdev);
5440 int status;
5441
5442 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5443
5444 status = pci_enable_device(pdev);
5445 if (status)
5446 return PCI_ERS_RESULT_DISCONNECT;
5447
5448 pci_set_master(pdev);
1ca01512 5449 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5450 pci_restore_state(pdev);
5451
5452 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5453 dev_info(&adapter->pdev->dev,
5454 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5455 status = be_fw_wait_ready(adapter);
cf588477
SP
5456 if (status)
5457 return PCI_ERS_RESULT_DISCONNECT;
5458
d6b6d987 5459 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5460 be_clear_all_error(adapter);
cf588477
SP
5461 return PCI_ERS_RESULT_RECOVERED;
5462}
5463
5464static void be_eeh_resume(struct pci_dev *pdev)
5465{
5466 int status = 0;
5467 struct be_adapter *adapter = pci_get_drvdata(pdev);
5468 struct net_device *netdev = adapter->netdev;
5469
5470 dev_info(&adapter->pdev->dev, "EEH resume\n");
5471
5472 pci_save_state(pdev);
5473
2d177be8 5474 status = be_cmd_reset_function(adapter);
cf588477
SP
5475 if (status)
5476 goto err;
5477
03a58baa
KA
5478 /* On some BE3 FW versions, after a HW reset,
5479 * interrupts will remain disabled for each function.
5480 * So, explicitly enable interrupts
5481 */
5482 be_intr_set(adapter, true);
5483
2d177be8
KA
5484 /* tell fw we're ready to fire cmds */
5485 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5486 if (status)
5487 goto err;
5488
cf588477
SP
5489 status = be_setup(adapter);
5490 if (status)
5491 goto err;
5492
5493 if (netif_running(netdev)) {
5494 status = be_open(netdev);
5495 if (status)
5496 goto err;
5497 }
f67ef7ba
PR
5498
5499 schedule_delayed_work(&adapter->func_recovery_work,
5500 msecs_to_jiffies(1000));
cf588477
SP
5501 netif_device_attach(netdev);
5502 return;
5503err:
5504 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5505}
5506
3646f0e5 5507static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5508 .error_detected = be_eeh_err_detected,
5509 .slot_reset = be_eeh_reset,
5510 .resume = be_eeh_resume,
5511};
5512
6b7c5b94
SP
5513static struct pci_driver be_driver = {
5514 .name = DRV_NAME,
5515 .id_table = be_dev_ids,
5516 .probe = be_probe,
5517 .remove = be_remove,
5518 .suspend = be_suspend,
cf588477 5519 .resume = be_resume,
82456b03 5520 .shutdown = be_shutdown,
cf588477 5521 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5522};
5523
5524static int __init be_init_module(void)
5525{
8e95a202
JP
5526 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5527 rx_frag_size != 2048) {
6b7c5b94
SP
5528 printk(KERN_WARNING DRV_NAME
5529 " : Module param rx_frag_size must be 2048/4096/8192."
5530 " Using 2048\n");
5531 rx_frag_size = 2048;
5532 }
6b7c5b94
SP
5533
5534 return pci_register_driver(&be_driver);
5535}
5536module_init(be_init_module);
5537
5538static void __exit be_exit_module(void)
5539{
5540 pci_unregister_driver(&be_driver);
5541}
5542module_exit(be_exit_module);
This page took 1.170259 seconds and 5 git commands to generate.