Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6b7c5b94
SP
25
26MODULE_VERSION(DRV_VER);
27MODULE_DEVICE_TABLE(pci, be_dev_ids);
28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 29MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
30MODULE_LICENSE("GPL");
31
ba343c77 32static unsigned int num_vfs;
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
11ac75ed
SP
36static ushort rx_frag_size = 2048;
37module_param(rx_frag_size, ushort, S_IRUGO);
38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
6b7c5b94 40static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
49 { 0 }
50};
51MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 52/* UE Status Low CSR */
42c8b11e 53static const char * const ue_status_low_desc[] = {
7c185276
AK
54 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86};
87/* UE Status High CSR */
42c8b11e 88static const char * const ue_status_hi_desc[] = {
7c185276
AK
89 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
42c8b11e 112 "NETC",
7c185276
AK
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121};
6b7c5b94 122
752961a1
SP
123/* Is BE in a multi-channel mode */
124static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128}
129
6b7c5b94
SP
130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 133 if (mem->va) {
2b7bcebf
IV
134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
1cfafab9
SP
136 mem->va = NULL;
137 }
6b7c5b94
SP
138}
139
140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142{
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
ede23fa8
JP
149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
6b7c5b94 151 if (!mem->va)
10ef9ab4 152 return -ENOMEM;
6b7c5b94
SP
153 return 0;
154}
155
68c45a2d 156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
db3ea781
SP
160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
5f0b849e 164 if (!enabled && enable)
6b7c5b94 165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 166 else if (enabled && !enable)
6b7c5b94 167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 168 else
6b7c5b94 169 return;
5f0b849e 170
db3ea781
SP
171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
173}
174
68c45a2d
SK
175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
8788fdc2 191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
196
197 wmb();
8788fdc2 198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
199}
200
94d73aaa
VV
201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
6b7c5b94
SP
203{
204 u32 val = 0;
94d73aaa 205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
207
208 wmb();
94d73aaa 209 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
210}
211
8788fdc2 212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
213 bool arm, bool clear_int, u16 num_popped)
214{
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 219
f67ef7ba 220 if (adapter->eeh_error)
cf588477
SP
221 return;
222
6b7c5b94
SP
223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
230}
231
8788fdc2 232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
233{
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 238
f67ef7ba 239 if (adapter->eeh_error)
cf588477
SP
240 return;
241
6b7c5b94
SP
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
246}
247
6b7c5b94
SP
248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 251 struct device *dev = &adapter->pdev->dev;
6b7c5b94 252 struct sockaddr *addr = p;
5a712c13
SP
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 256
ca9e4988
AK
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
5a712c13
SP
260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
704e4c88 265 */
5a712c13
SP
266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
704e4c88
PR
277 }
278
5a712c13
SP
279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
704e4c88 281 */
5a712c13 282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
a65027e4 283 if (status)
e3a7ae2c 284 goto err;
6b7c5b94 285
5a712c13
SP
286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
e3a7ae2c 294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 295 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
296 return 0;
297err:
5a712c13 298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
299 return status;
300}
301
ca34fe38
SP
302/* BE2 supports only v0 cmd */
303static void *hw_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
309 } else {
310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312 return &cmd->hw_stats;
313 }
314}
315
316/* BE2 supports only v0 cmd */
317static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318{
319 if (BE2_chip(adapter)) {
320 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321
322 return &hw_stats->erx;
323 } else {
324 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
327 }
328}
329
330static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 331{
ac124ff9
SP
332 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
334 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 335 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
336 &rxf_stats->port[adapter->port_num];
337 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 338
ac124ff9 339 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
340 drvs->rx_pause_frames = port_stats->rx_pause_frames;
341 drvs->rx_crc_errors = port_stats->rx_crc_errors;
342 drvs->rx_control_frames = port_stats->rx_control_frames;
343 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
344 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
345 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
346 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
347 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
348 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
349 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
350 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
351 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
352 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
353 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 354 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
355 drvs->rx_dropped_header_too_small =
356 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
357 drvs->rx_address_filtered =
358 port_stats->rx_address_filtered +
359 port_stats->rx_vlan_filtered;
89a88ab8
AK
360 drvs->rx_alignment_symbol_errors =
361 port_stats->rx_alignment_symbol_errors;
362
363 drvs->tx_pauseframes = port_stats->tx_pauseframes;
364 drvs->tx_controlframes = port_stats->tx_controlframes;
365
366 if (adapter->port_num)
ac124ff9 367 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 368 else
ac124ff9 369 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 370 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 371 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
372 drvs->forwarded_packets = rxf_stats->forwarded_packets;
373 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
374 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
375 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
376 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
377}
378
ca34fe38 379static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 380{
ac124ff9
SP
381 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
382 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
383 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 384 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
385 &rxf_stats->port[adapter->port_num];
386 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 387
ac124ff9 388 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
389 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
390 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
391 drvs->rx_pause_frames = port_stats->rx_pause_frames;
392 drvs->rx_crc_errors = port_stats->rx_crc_errors;
393 drvs->rx_control_frames = port_stats->rx_control_frames;
394 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
395 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
396 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
397 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
398 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
399 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
400 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
401 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
404 drvs->rx_dropped_header_too_small =
405 port_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop =
407 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 408 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
409 drvs->rx_alignment_symbol_errors =
410 port_stats->rx_alignment_symbol_errors;
ac124ff9 411 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
412 drvs->tx_pauseframes = port_stats->tx_pauseframes;
413 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 414 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
415 drvs->jabber_events = port_stats->jabber_events;
416 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 417 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
418 drvs->forwarded_packets = rxf_stats->forwarded_packets;
419 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
420 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
421 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
422 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423}
424
005d5696
SX
425static void populate_lancer_stats(struct be_adapter *adapter)
426{
89a88ab8 427
005d5696 428 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
429 struct lancer_pport_stats *pport_stats =
430 pport_stats_from_cmd(adapter);
431
432 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
433 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
434 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
435 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 436 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 437 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
438 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
439 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
440 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
441 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
442 drvs->rx_dropped_tcp_length =
443 pport_stats->rx_dropped_invalid_tcp_length;
444 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
445 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
446 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
447 drvs->rx_dropped_header_too_small =
448 pport_stats->rx_dropped_header_too_small;
449 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
450 drvs->rx_address_filtered =
451 pport_stats->rx_address_filtered +
452 pport_stats->rx_vlan_filtered;
ac124ff9 453 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 454 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
455 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
456 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 457 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
458 drvs->forwarded_packets = pport_stats->num_forwards_lo;
459 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 460 drvs->rx_drops_too_many_frags =
ac124ff9 461 pport_stats->rx_drops_too_many_frags_lo;
005d5696 462}
89a88ab8 463
09c1c68f
SP
464static void accumulate_16bit_val(u32 *acc, u16 val)
465{
466#define lo(x) (x & 0xFFFF)
467#define hi(x) (x & 0xFFFF0000)
468 bool wrapped = val < lo(*acc);
469 u32 newacc = hi(*acc) + val;
470
471 if (wrapped)
472 newacc += 65536;
473 ACCESS_ONCE(*acc) = newacc;
474}
475
4188e7df 476static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
477 struct be_rx_obj *rxo,
478 u32 erx_stat)
479{
480 if (!BEx_chip(adapter))
481 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482 else
483 /* below erx HW counter can actually wrap around after
484 * 65535. Driver accumulates a 32-bit value
485 */
486 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
487 (u16)erx_stat);
488}
489
89a88ab8
AK
490void be_parse_stats(struct be_adapter *adapter)
491{
ac124ff9
SP
492 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
493 struct be_rx_obj *rxo;
494 int i;
a6c578ef 495 u32 erx_stat;
ac124ff9 496
ca34fe38
SP
497 if (lancer_chip(adapter)) {
498 populate_lancer_stats(adapter);
005d5696 499 } else {
ca34fe38
SP
500 if (BE2_chip(adapter))
501 populate_be_v0_stats(adapter);
502 else
503 /* for BE3 and Skyhawk */
504 populate_be_v1_stats(adapter);
d51ebd33 505
ca34fe38
SP
506 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
507 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
508 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 510 }
09c1c68f 511 }
89a88ab8
AK
512}
513
ab1594e9
SP
514static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
515 struct rtnl_link_stats64 *stats)
6b7c5b94 516{
ab1594e9 517 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 518 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 519 struct be_rx_obj *rxo;
3c8def97 520 struct be_tx_obj *txo;
ab1594e9
SP
521 u64 pkts, bytes;
522 unsigned int start;
3abcdeda 523 int i;
6b7c5b94 524
3abcdeda 525 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
526 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527 do {
528 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
529 pkts = rx_stats(rxo)->rx_pkts;
530 bytes = rx_stats(rxo)->rx_bytes;
531 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
532 stats->rx_packets += pkts;
533 stats->rx_bytes += bytes;
534 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
535 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
536 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
537 }
538
3c8def97 539 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
540 const struct be_tx_stats *tx_stats = tx_stats(txo);
541 do {
542 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
543 pkts = tx_stats(txo)->tx_pkts;
544 bytes = tx_stats(txo)->tx_bytes;
545 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
546 stats->tx_packets += pkts;
547 stats->tx_bytes += bytes;
3c8def97 548 }
6b7c5b94
SP
549
550 /* bad pkts received */
ab1594e9 551 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
552 drvs->rx_alignment_symbol_errors +
553 drvs->rx_in_range_errors +
554 drvs->rx_out_range_errors +
555 drvs->rx_frame_too_long +
556 drvs->rx_dropped_too_small +
557 drvs->rx_dropped_too_short +
558 drvs->rx_dropped_header_too_small +
559 drvs->rx_dropped_tcp_length +
ab1594e9 560 drvs->rx_dropped_runt;
68110868 561
6b7c5b94 562 /* detailed rx errors */
ab1594e9 563 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
564 drvs->rx_out_range_errors +
565 drvs->rx_frame_too_long;
68110868 566
ab1594e9 567 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
568
569 /* frame alignment errors */
ab1594e9 570 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 571
6b7c5b94
SP
572 /* receiver fifo overrun */
573 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 574 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
575 drvs->rx_input_fifo_overflow_drop +
576 drvs->rx_drops_no_pbuf;
ab1594e9 577 return stats;
6b7c5b94
SP
578}
579
b236916a 580void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 581{
6b7c5b94
SP
582 struct net_device *netdev = adapter->netdev;
583
b236916a 584 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 585 netif_carrier_off(netdev);
b236916a 586 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 587 }
b236916a
AK
588
589 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
590 netif_carrier_on(netdev);
591 else
592 netif_carrier_off(netdev);
6b7c5b94
SP
593}
594
3c8def97 595static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 596 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 597{
3c8def97
SP
598 struct be_tx_stats *stats = tx_stats(txo);
599
ab1594e9 600 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
601 stats->tx_reqs++;
602 stats->tx_wrbs += wrb_cnt;
603 stats->tx_bytes += copied;
604 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 605 if (stopped)
ac124ff9 606 stats->tx_stops++;
ab1594e9 607 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
608}
609
610/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
611static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
612 bool *dummy)
6b7c5b94 613{
ebc8d2ab
DM
614 int cnt = (skb->len > skb->data_len);
615
616 cnt += skb_shinfo(skb)->nr_frags;
617
6b7c5b94
SP
618 /* to account for hdr wrb */
619 cnt++;
fe6d2a38
SP
620 if (lancer_chip(adapter) || !(cnt & 1)) {
621 *dummy = false;
622 } else {
6b7c5b94
SP
623 /* add a dummy to make it an even num */
624 cnt++;
625 *dummy = true;
fe6d2a38 626 }
6b7c5b94
SP
627 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
628 return cnt;
629}
630
631static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632{
633 wrb->frag_pa_hi = upper_32_bits(addr);
634 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
635 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 636 wrb->rsvd0 = 0;
6b7c5b94
SP
637}
638
1ded132d
AK
639static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
640 struct sk_buff *skb)
641{
642 u8 vlan_prio;
643 u16 vlan_tag;
644
645 vlan_tag = vlan_tx_tag_get(skb);
646 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
647 /* If vlan priority provided by OS is NOT in available bmap */
648 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
649 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
650 adapter->recommended_prio;
651
652 return vlan_tag;
653}
654
cc4ce020 655static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 656 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 657{
1ded132d 658 u16 vlan_tag;
cc4ce020 659
6b7c5b94
SP
660 memset(hdr, 0, sizeof(*hdr));
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663
49e4b847 664 if (skb_is_gso(skb)) {
6b7c5b94
SP
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
667 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 668 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
670 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671 if (is_tcp_pkt(skb))
672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
673 else if (is_udp_pkt(skb))
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
675 }
676
4c5102f9 677 if (vlan_tx_tag_present(skb)) {
6b7c5b94 678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 679 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 680 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
681 }
682
bc0c3405
AK
683 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
687 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
688}
689
2b7bcebf 690static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
691 bool unmap_single)
692{
693 dma_addr_t dma;
694
695 be_dws_le_to_cpu(wrb, sizeof(*wrb));
696
697 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 698 if (wrb->frag_len) {
7101e111 699 if (unmap_single)
2b7bcebf
IV
700 dma_unmap_single(dev, dma, wrb->frag_len,
701 DMA_TO_DEVICE);
7101e111 702 else
2b7bcebf 703 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
704 }
705}
6b7c5b94 706
3c8def97 707static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
708 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
709 bool skip_hw_vlan)
6b7c5b94 710{
7101e111
SP
711 dma_addr_t busaddr;
712 int i, copied = 0;
2b7bcebf 713 struct device *dev = &adapter->pdev->dev;
6b7c5b94 714 struct sk_buff *first_skb = skb;
6b7c5b94
SP
715 struct be_eth_wrb *wrb;
716 struct be_eth_hdr_wrb *hdr;
7101e111
SP
717 bool map_single = false;
718 u16 map_head;
6b7c5b94 719
6b7c5b94
SP
720 hdr = queue_head_node(txq);
721 queue_head_inc(txq);
7101e111 722 map_head = txq->head;
6b7c5b94 723
ebc8d2ab 724 if (skb->len > skb->data_len) {
e743d313 725 int len = skb_headlen(skb);
2b7bcebf
IV
726 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
727 if (dma_mapping_error(dev, busaddr))
7101e111
SP
728 goto dma_err;
729 map_single = true;
ebc8d2ab
DM
730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, busaddr, len);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 copied += len;
735 }
6b7c5b94 736
ebc8d2ab 737 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 738 const struct skb_frag_struct *frag =
ebc8d2ab 739 &skb_shinfo(skb)->frags[i];
b061b39e 740 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 741 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 742 if (dma_mapping_error(dev, busaddr))
7101e111 743 goto dma_err;
ebc8d2ab 744 wrb = queue_head_node(txq);
9e903e08 745 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
746 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747 queue_head_inc(txq);
9e903e08 748 copied += skb_frag_size(frag);
6b7c5b94
SP
749 }
750
751 if (dummy_wrb) {
752 wrb = queue_head_node(txq);
753 wrb_fill(wrb, 0, 0);
754 be_dws_cpu_to_le(wrb, sizeof(*wrb));
755 queue_head_inc(txq);
756 }
757
bc0c3405 758 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
759 be_dws_cpu_to_le(hdr, sizeof(*hdr));
760
761 return copied;
7101e111
SP
762dma_err:
763 txq->head = map_head;
764 while (copied) {
765 wrb = queue_head_node(txq);
2b7bcebf 766 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
767 map_single = false;
768 copied -= wrb->frag_len;
769 queue_head_inc(txq);
770 }
771 return 0;
6b7c5b94
SP
772}
773
93040ae5 774static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
775 struct sk_buff *skb,
776 bool *skip_hw_vlan)
93040ae5
SK
777{
778 u16 vlan_tag = 0;
779
780 skb = skb_share_check(skb, GFP_ATOMIC);
781 if (unlikely(!skb))
782 return skb;
783
efee8e87 784 if (vlan_tx_tag_present(skb))
93040ae5 785 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
786
787 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
788 if (!vlan_tag)
789 vlan_tag = adapter->pvid;
790 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
791 * skip VLAN insertion
792 */
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
bc0c3405
AK
796
797 if (vlan_tag) {
58717686 798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
799 if (unlikely(!skb))
800 return skb;
bc0c3405
AK
801 skb->vlan_tci = 0;
802 }
803
804 /* Insert the outer VLAN, if any */
805 if (adapter->qnq_vid) {
806 vlan_tag = adapter->qnq_vid;
58717686 807 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
808 if (unlikely(!skb))
809 return skb;
810 if (skip_hw_vlan)
811 *skip_hw_vlan = true;
812 }
813
93040ae5
SK
814 return skb;
815}
816
bc0c3405
AK
817static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818{
819 struct ethhdr *eh = (struct ethhdr *)skb->data;
820 u16 offset = ETH_HLEN;
821
822 if (eh->h_proto == htons(ETH_P_IPV6)) {
823 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825 offset += sizeof(struct ipv6hdr);
826 if (ip6h->nexthdr != NEXTHDR_TCP &&
827 ip6h->nexthdr != NEXTHDR_UDP) {
828 struct ipv6_opt_hdr *ehdr =
829 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832 if (ehdr->hdrlen == 0xff)
833 return true;
834 }
835 }
836 return false;
837}
838
839static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840{
841 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842}
843
ee9c799c
SP
844static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
845 struct sk_buff *skb)
bc0c3405 846{
ee9c799c 847 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
848}
849
ee9c799c
SP
850static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
851 struct sk_buff *skb,
852 bool *skip_hw_vlan)
6b7c5b94 853{
d2cb6ce7 854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
855 unsigned int eth_hdr_len;
856 struct iphdr *ip;
93040ae5 857
b54881f9 858 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
48265667 859 * may cause a transmit stall on that port. So the work-around is to
b54881f9 860 * pad short packets (<= 32 bytes) to a 36-byte length.
48265667 861 */
b54881f9 862 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
48265667
SK
863 if (skb_padto(skb, 36))
864 goto tx_drop;
865 skb->len = 36;
866 }
867
1297f9db
AK
868 /* For padded packets, BE HW modifies tot_len field in IP header
869 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 870 * For padded packets, Lancer computes incorrect checksum.
1ded132d 871 */
ee9c799c
SP
872 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
873 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
874 if (skb->len <= 60 &&
875 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 876 is_ipv4_pkt(skb)) {
93040ae5
SK
877 ip = (struct iphdr *)ip_hdr(skb);
878 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
879 }
1ded132d 880
d2cb6ce7
AK
881 /* If vlan tag is already inlined in the packet, skip HW VLAN
882 * tagging in UMC mode
883 */
884 if ((adapter->function_mode & UMC_ENABLED) &&
885 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 886 *skip_hw_vlan = true;
d2cb6ce7 887
93040ae5
SK
888 /* HW has a bug wherein it will calculate CSUM for VLAN
889 * pkts even though it is disabled.
890 * Manually insert VLAN in pkt.
891 */
892 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
893 vlan_tx_tag_present(skb)) {
894 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
895 if (unlikely(!skb))
896 goto tx_drop;
897 }
898
899 /* HW may lockup when VLAN HW tagging is requested on
900 * certain ipv6 packets. Drop such pkts if the HW workaround to
901 * skip HW tagging is not enabled by FW.
902 */
903 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
904 (adapter->pvid || adapter->qnq_vid) &&
905 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
906 goto tx_drop;
907
908 /* Manual VLAN tag insertion to prevent:
909 * ASIC lockup when the ASIC inserts VLAN tag into
910 * certain ipv6 packets. Insert VLAN tags in driver,
911 * and set event, completion, vlan bits accordingly
912 * in the Tx WRB.
913 */
914 if (be_ipv6_tx_stall_chk(adapter, skb) &&
915 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 916 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
917 if (unlikely(!skb))
918 goto tx_drop;
1ded132d
AK
919 }
920
ee9c799c
SP
921 return skb;
922tx_drop:
923 dev_kfree_skb_any(skb);
924 return NULL;
925}
926
927static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
928{
929 struct be_adapter *adapter = netdev_priv(netdev);
930 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
931 struct be_queue_info *txq = &txo->q;
932 bool dummy_wrb, stopped = false;
933 u32 wrb_cnt = 0, copied = 0;
934 bool skip_hw_vlan = false;
935 u32 start = txq->head;
936
937 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
938 if (!skb)
939 return NETDEV_TX_OK;
940
fe6d2a38 941 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 942
bc0c3405
AK
943 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
944 skip_hw_vlan);
c190e3c8 945 if (copied) {
cd8f76c0
ED
946 int gso_segs = skb_shinfo(skb)->gso_segs;
947
c190e3c8 948 /* record the sent skb in the sent_skb table */
3c8def97
SP
949 BUG_ON(txo->sent_skb_list[start]);
950 txo->sent_skb_list[start] = skb;
c190e3c8
AK
951
952 /* Ensure txq has space for the next skb; Else stop the queue
953 * *BEFORE* ringing the tx doorbell, so that we serialze the
954 * tx compls of the current transmit which'll wake up the queue
955 */
7101e111 956 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
957 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
958 txq->len) {
3c8def97 959 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
960 stopped = true;
961 }
6b7c5b94 962
94d73aaa 963 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 964
cd8f76c0 965 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
966 } else {
967 txq->head = start;
968 dev_kfree_skb_any(skb);
6b7c5b94 969 }
6b7c5b94
SP
970 return NETDEV_TX_OK;
971}
972
973static int be_change_mtu(struct net_device *netdev, int new_mtu)
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
976 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
977 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
978 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
979 dev_info(&adapter->pdev->dev,
980 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
981 BE_MIN_MTU,
982 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
983 return -EINVAL;
984 }
985 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
986 netdev->mtu, new_mtu);
987 netdev->mtu = new_mtu;
988 return 0;
989}
990
991/*
82903e4b
AK
992 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
993 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 994 */
10329df8 995static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 996{
10329df8
SP
997 u16 vids[BE_NUM_VLANS_SUPPORTED];
998 u16 num = 0, i;
82903e4b 999 int status = 0;
1da87b7f 1000
c0e64ef4
SP
1001 /* No need to further configure vids if in promiscuous mode */
1002 if (adapter->promiscuous)
1003 return 0;
1004
92bf14ab 1005 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1006 goto set_vlan_promisc;
1007
1008 /* Construct VLAN Table to give to HW */
1009 for (i = 0; i < VLAN_N_VID; i++)
1010 if (adapter->vlan_tag[i])
10329df8 1011 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1012
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 1014 vids, num, 1, 0);
0fc16ebf 1015
0fc16ebf 1016 if (status) {
d9d604f8
AK
1017 /* Set to VLAN promisc mode as setting VLAN filter failed */
1018 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1019 goto set_vlan_promisc;
1020 dev_err(&adapter->pdev->dev,
1021 "Setting HW VLAN filtering failed.\n");
1022 } else {
1023 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024 /* hw VLAN filtering re-enabled. */
1025 status = be_cmd_rx_filter(adapter,
1026 BE_FLAGS_VLAN_PROMISC, OFF);
1027 if (!status) {
1028 dev_info(&adapter->pdev->dev,
1029 "Disabling VLAN Promiscuous mode.\n");
1030 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031 dev_info(&adapter->pdev->dev,
1032 "Re-Enabling HW VLAN filtering\n");
1033 }
1034 }
6b7c5b94 1035 }
1da87b7f 1036
b31c50a7 1037 return status;
0fc16ebf
PR
1038
1039set_vlan_promisc:
d9d604f8
AK
1040 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1041
1042 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043 if (!status) {
1044 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047 } else
1048 dev_err(&adapter->pdev->dev,
1049 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1050 return status;
6b7c5b94
SP
1051}
1052
80d5c368 1053static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1054{
1055 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1056 int status = 0;
6b7c5b94 1057
ba343c77 1058
a85e9986
PR
1059 /* Packets with VID 0 are always received by Lancer by default */
1060 if (lancer_chip(adapter) && vid == 0)
1061 goto ret;
1062
6b7c5b94 1063 adapter->vlan_tag[vid] = 1;
92bf14ab 1064 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
10329df8 1065 status = be_vid_config(adapter);
8e586137 1066
80817cbf
AK
1067 if (!status)
1068 adapter->vlans_added++;
1069 else
1070 adapter->vlan_tag[vid] = 0;
1071ret:
1072 return status;
6b7c5b94
SP
1073}
1074
80d5c368 1075static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1076{
1077 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1078 int status = 0;
6b7c5b94 1079
a85e9986
PR
1080 /* Packets with VID 0 are always received by Lancer by default */
1081 if (lancer_chip(adapter) && vid == 0)
1082 goto ret;
1083
6b7c5b94 1084 adapter->vlan_tag[vid] = 0;
92bf14ab 1085 if (adapter->vlans_added <= be_max_vlans(adapter))
10329df8 1086 status = be_vid_config(adapter);
8e586137 1087
80817cbf
AK
1088 if (!status)
1089 adapter->vlans_added--;
1090 else
1091 adapter->vlan_tag[vid] = 1;
1092ret:
1093 return status;
6b7c5b94
SP
1094}
1095
a54769f5 1096static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1097{
1098 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1099 int status;
6b7c5b94 1100
24307eef 1101 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1102 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1103 adapter->promiscuous = true;
1104 goto done;
6b7c5b94
SP
1105 }
1106
25985edc 1107 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1108 if (adapter->promiscuous) {
1109 adapter->promiscuous = false;
5b8821b7 1110 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1111
1112 if (adapter->vlans_added)
10329df8 1113 be_vid_config(adapter);
6b7c5b94
SP
1114 }
1115
e7b909a6 1116 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1117 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1118 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1119 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1120 goto done;
6b7c5b94 1121 }
6b7c5b94 1122
fbc13f01
AK
1123 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1124 struct netdev_hw_addr *ha;
1125 int i = 1; /* First slot is claimed by the Primary MAC */
1126
1127 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1128 be_cmd_pmac_del(adapter, adapter->if_handle,
1129 adapter->pmac_id[i], 0);
1130 }
1131
92bf14ab 1132 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1133 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1134 adapter->promiscuous = true;
1135 goto done;
1136 }
1137
1138 netdev_for_each_uc_addr(ha, adapter->netdev) {
1139 adapter->uc_macs++; /* First slot is for Primary MAC */
1140 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1141 adapter->if_handle,
1142 &adapter->pmac_id[adapter->uc_macs], 0);
1143 }
1144 }
1145
0fc16ebf
PR
1146 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1147
1148 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1149 if (status) {
1150 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1151 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1152 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1153 }
24307eef
SP
1154done:
1155 return;
6b7c5b94
SP
1156}
1157
ba343c77
SB
1158static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1159{
1160 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1161 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1162 int status;
1163
11ac75ed 1164 if (!sriov_enabled(adapter))
ba343c77
SB
1165 return -EPERM;
1166
11ac75ed 1167 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1168 return -EINVAL;
1169
3175d8c2
SP
1170 if (BEx_chip(adapter)) {
1171 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1172 vf + 1);
ba343c77 1173
11ac75ed
SP
1174 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1175 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1176 } else {
1177 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1178 vf + 1);
590c391d
PR
1179 }
1180
64600ea5 1181 if (status)
ba343c77
SB
1182 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1183 mac, vf);
64600ea5 1184 else
11ac75ed 1185 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1186
ba343c77
SB
1187 return status;
1188}
1189
64600ea5
AK
1190static int be_get_vf_config(struct net_device *netdev, int vf,
1191 struct ifla_vf_info *vi)
1192{
1193 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1194 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1195
11ac75ed 1196 if (!sriov_enabled(adapter))
64600ea5
AK
1197 return -EPERM;
1198
11ac75ed 1199 if (vf >= adapter->num_vfs)
64600ea5
AK
1200 return -EINVAL;
1201
1202 vi->vf = vf;
11ac75ed 1203 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1204 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1205 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1206 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1207
1208 return 0;
1209}
1210
1da87b7f
AK
1211static int be_set_vf_vlan(struct net_device *netdev,
1212 int vf, u16 vlan, u8 qos)
1213{
1214 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1215 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1216 int status = 0;
1217
11ac75ed 1218 if (!sriov_enabled(adapter))
1da87b7f
AK
1219 return -EPERM;
1220
b9fc0e53 1221 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1222 return -EINVAL;
1223
b9fc0e53
AK
1224 if (vlan || qos) {
1225 vlan |= qos << VLAN_PRIO_SHIFT;
1226 if (vf_cfg->vlan_tag != vlan) {
f1f3ee1b 1227 /* If this is new value, program it. Else skip. */
b9fc0e53
AK
1228 vf_cfg->vlan_tag = vlan;
1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230 vf_cfg->if_handle, 0);
f1f3ee1b 1231 }
1da87b7f 1232 } else {
f1f3ee1b 1233 /* Reset Transparent Vlan Tagging. */
b9fc0e53
AK
1234 vf_cfg->vlan_tag = 0;
1235 vlan = vf_cfg->def_vid;
f1f3ee1b 1236 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
b9fc0e53 1237 vf_cfg->if_handle, 0);
1da87b7f
AK
1238 }
1239
1da87b7f
AK
1240
1241 if (status)
1242 dev_info(&adapter->pdev->dev,
1243 "VLAN %d config on VF %d failed\n", vlan, vf);
1244 return status;
1245}
1246
e1d18735
AK
1247static int be_set_vf_tx_rate(struct net_device *netdev,
1248 int vf, int rate)
1249{
1250 struct be_adapter *adapter = netdev_priv(netdev);
1251 int status = 0;
1252
11ac75ed 1253 if (!sriov_enabled(adapter))
e1d18735
AK
1254 return -EPERM;
1255
94f434c2 1256 if (vf >= adapter->num_vfs)
e1d18735
AK
1257 return -EINVAL;
1258
94f434c2
AK
1259 if (rate < 100 || rate > 10000) {
1260 dev_err(&adapter->pdev->dev,
1261 "tx rate must be between 100 and 10000 Mbps\n");
1262 return -EINVAL;
1263 }
e1d18735 1264
d5c18473
PR
1265 if (lancer_chip(adapter))
1266 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1267 else
1268 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1269
1270 if (status)
94f434c2 1271 dev_err(&adapter->pdev->dev,
e1d18735 1272 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1273 else
1274 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1275 return status;
1276}
1277
10ef9ab4 1278static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1279{
10ef9ab4 1280 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1281 ulong now = jiffies;
ac124ff9 1282 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1283 u64 pkts;
1284 unsigned int start, eqd;
ac124ff9 1285
10ef9ab4
SP
1286 if (!eqo->enable_aic) {
1287 eqd = eqo->eqd;
1288 goto modify_eqd;
1289 }
1290
1291 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1292 return;
6b7c5b94 1293
10ef9ab4
SP
1294 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1295
4097f663 1296 /* Wrapped around */
3abcdeda
SP
1297 if (time_before(now, stats->rx_jiffies)) {
1298 stats->rx_jiffies = now;
4097f663
SP
1299 return;
1300 }
6b7c5b94 1301
ac124ff9
SP
1302 /* Update once a second */
1303 if (delta < HZ)
6b7c5b94
SP
1304 return;
1305
ab1594e9
SP
1306 do {
1307 start = u64_stats_fetch_begin_bh(&stats->sync);
1308 pkts = stats->rx_pkts;
1309 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1310
68c3e5a7 1311 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1312 stats->rx_pkts_prev = pkts;
3abcdeda 1313 stats->rx_jiffies = now;
10ef9ab4
SP
1314 eqd = (stats->rx_pps / 110000) << 3;
1315 eqd = min(eqd, eqo->max_eqd);
1316 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1317 if (eqd < 10)
1318 eqd = 0;
10ef9ab4
SP
1319
1320modify_eqd:
1321 if (eqd != eqo->cur_eqd) {
1322 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1323 eqo->cur_eqd = eqd;
ac124ff9 1324 }
6b7c5b94
SP
1325}
1326
3abcdeda 1327static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1328 struct be_rx_compl_info *rxcp)
4097f663 1329{
ac124ff9 1330 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1331
ab1594e9 1332 u64_stats_update_begin(&stats->sync);
3abcdeda 1333 stats->rx_compl++;
2e588f84 1334 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1335 stats->rx_pkts++;
2e588f84 1336 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1337 stats->rx_mcast_pkts++;
2e588f84 1338 if (rxcp->err)
ac124ff9 1339 stats->rx_compl_err++;
ab1594e9 1340 u64_stats_update_end(&stats->sync);
4097f663
SP
1341}
1342
2e588f84 1343static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1344{
19fad86f
PR
1345 /* L4 checksum is not reliable for non TCP/UDP packets.
1346 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1347 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1348 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1349}
1350
10ef9ab4
SP
1351static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1352 u16 frag_idx)
6b7c5b94 1353{
10ef9ab4 1354 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1355 struct be_rx_page_info *rx_page_info;
3abcdeda 1356 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1357
3abcdeda 1358 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1359 BUG_ON(!rx_page_info->page);
1360
205859a2 1361 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1362 dma_unmap_page(&adapter->pdev->dev,
1363 dma_unmap_addr(rx_page_info, bus),
1364 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1365 rx_page_info->last_page_user = false;
1366 }
6b7c5b94
SP
1367
1368 atomic_dec(&rxq->used);
1369 return rx_page_info;
1370}
1371
1372/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1373static void be_rx_compl_discard(struct be_rx_obj *rxo,
1374 struct be_rx_compl_info *rxcp)
6b7c5b94 1375{
3abcdeda 1376 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1377 struct be_rx_page_info *page_info;
2e588f84 1378 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1379
e80d9da6 1380 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1381 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1382 put_page(page_info->page);
1383 memset(page_info, 0, sizeof(*page_info));
2e588f84 1384 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1385 }
1386}
1387
1388/*
1389 * skb_fill_rx_data forms a complete skb for an ether frame
1390 * indicated by rxcp.
1391 */
10ef9ab4
SP
1392static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1393 struct be_rx_compl_info *rxcp)
6b7c5b94 1394{
3abcdeda 1395 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1396 struct be_rx_page_info *page_info;
2e588f84
SP
1397 u16 i, j;
1398 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1399 u8 *start;
6b7c5b94 1400
10ef9ab4 1401 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1402 start = page_address(page_info->page) + page_info->page_offset;
1403 prefetch(start);
1404
1405 /* Copy data in the first descriptor of this completion */
2e588f84 1406 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1407
6b7c5b94
SP
1408 skb->len = curr_frag_len;
1409 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1410 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1411 /* Complete packet has now been moved to data */
1412 put_page(page_info->page);
1413 skb->data_len = 0;
1414 skb->tail += curr_frag_len;
1415 } else {
ac1ae5f3
ED
1416 hdr_len = ETH_HLEN;
1417 memcpy(skb->data, start, hdr_len);
6b7c5b94 1418 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1419 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1420 skb_shinfo(skb)->frags[0].page_offset =
1421 page_info->page_offset + hdr_len;
9e903e08 1422 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1423 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1424 skb->truesize += rx_frag_size;
6b7c5b94
SP
1425 skb->tail += hdr_len;
1426 }
205859a2 1427 page_info->page = NULL;
6b7c5b94 1428
2e588f84
SP
1429 if (rxcp->pkt_size <= rx_frag_size) {
1430 BUG_ON(rxcp->num_rcvd != 1);
1431 return;
6b7c5b94
SP
1432 }
1433
1434 /* More frags present for this completion */
2e588f84
SP
1435 index_inc(&rxcp->rxq_idx, rxq->len);
1436 remaining = rxcp->pkt_size - curr_frag_len;
1437 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1438 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1439 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1440
bd46cb6c
AK
1441 /* Coalesce all frags from the same physical page in one slot */
1442 if (page_info->page_offset == 0) {
1443 /* Fresh page */
1444 j++;
b061b39e 1445 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1446 skb_shinfo(skb)->frags[j].page_offset =
1447 page_info->page_offset;
9e903e08 1448 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1449 skb_shinfo(skb)->nr_frags++;
1450 } else {
1451 put_page(page_info->page);
1452 }
1453
9e903e08 1454 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1455 skb->len += curr_frag_len;
1456 skb->data_len += curr_frag_len;
bdb28a97 1457 skb->truesize += rx_frag_size;
2e588f84
SP
1458 remaining -= curr_frag_len;
1459 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1460 page_info->page = NULL;
6b7c5b94 1461 }
bd46cb6c 1462 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1463}
1464
5be93b9a 1465/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1466static void be_rx_compl_process(struct be_rx_obj *rxo,
1467 struct be_rx_compl_info *rxcp)
6b7c5b94 1468{
10ef9ab4 1469 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1470 struct net_device *netdev = adapter->netdev;
6b7c5b94 1471 struct sk_buff *skb;
89420424 1472
bb349bb4 1473 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1474 if (unlikely(!skb)) {
ac124ff9 1475 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1476 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1477 return;
1478 }
1479
10ef9ab4 1480 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1481
6332c8d3 1482 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1484 else
1485 skb_checksum_none_assert(skb);
6b7c5b94 1486
6332c8d3 1487 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1488 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1489 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1490 skb->rxhash = rxcp->rss_hash;
1491
6b7c5b94 1492
343e43c0 1493 if (rxcp->vlanf)
86a9bad3 1494 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1495
1496 netif_receive_skb(skb);
6b7c5b94
SP
1497}
1498
5be93b9a 1499/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1500static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1501 struct napi_struct *napi,
1502 struct be_rx_compl_info *rxcp)
6b7c5b94 1503{
10ef9ab4 1504 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1505 struct be_rx_page_info *page_info;
5be93b9a 1506 struct sk_buff *skb = NULL;
3abcdeda 1507 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1508 u16 remaining, curr_frag_len;
1509 u16 i, j;
3968fa1e 1510
10ef9ab4 1511 skb = napi_get_frags(napi);
5be93b9a 1512 if (!skb) {
10ef9ab4 1513 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1514 return;
1515 }
1516
2e588f84
SP
1517 remaining = rxcp->pkt_size;
1518 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1519 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1520
1521 curr_frag_len = min(remaining, rx_frag_size);
1522
bd46cb6c
AK
1523 /* Coalesce all frags from the same physical page in one slot */
1524 if (i == 0 || page_info->page_offset == 0) {
1525 /* First frag or Fresh page */
1526 j++;
b061b39e 1527 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1528 skb_shinfo(skb)->frags[j].page_offset =
1529 page_info->page_offset;
9e903e08 1530 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1531 } else {
1532 put_page(page_info->page);
1533 }
9e903e08 1534 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1535 skb->truesize += rx_frag_size;
bd46cb6c 1536 remaining -= curr_frag_len;
2e588f84 1537 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1538 memset(page_info, 0, sizeof(*page_info));
1539 }
bd46cb6c 1540 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1541
5be93b9a 1542 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1543 skb->len = rxcp->pkt_size;
1544 skb->data_len = rxcp->pkt_size;
5be93b9a 1545 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1546 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1547 if (adapter->netdev->features & NETIF_F_RXHASH)
1548 skb->rxhash = rxcp->rss_hash;
5be93b9a 1549
343e43c0 1550 if (rxcp->vlanf)
86a9bad3 1551 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1552
10ef9ab4 1553 napi_gro_frags(napi);
2e588f84
SP
1554}
1555
10ef9ab4
SP
1556static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1557 struct be_rx_compl_info *rxcp)
2e588f84
SP
1558{
1559 rxcp->pkt_size =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1561 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1562 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1563 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1564 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1565 rxcp->ip_csum =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1567 rxcp->l4_csum =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1569 rxcp->ipv6 =
1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1571 rxcp->rxq_idx =
1572 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1573 rxcp->num_rcvd =
1574 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1575 rxcp->pkt_type =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1577 rxcp->rss_hash =
c297977e 1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1579 if (rxcp->vlanf) {
1580 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1581 compl);
1582 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1583 compl);
15d72184 1584 }
12004ae9 1585 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1586}
1587
10ef9ab4
SP
1588static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1589 struct be_rx_compl_info *rxcp)
2e588f84
SP
1590{
1591 rxcp->pkt_size =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1593 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1594 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1595 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1596 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1597 rxcp->ip_csum =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1599 rxcp->l4_csum =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1601 rxcp->ipv6 =
1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1603 rxcp->rxq_idx =
1604 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1605 rxcp->num_rcvd =
1606 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1607 rxcp->pkt_type =
1608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1609 rxcp->rss_hash =
c297977e 1610 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1611 if (rxcp->vlanf) {
1612 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1613 compl);
1614 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1615 compl);
15d72184 1616 }
12004ae9 1617 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1618 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1619 ip_frag, compl);
2e588f84
SP
1620}
1621
1622static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1623{
1624 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1625 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1626 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1627
2e588f84
SP
1628 /* For checking the valid bit it is Ok to use either definition as the
1629 * valid bit is at the same position in both v0 and v1 Rx compl */
1630 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1631 return NULL;
6b7c5b94 1632
2e588f84
SP
1633 rmb();
1634 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1635
2e588f84 1636 if (adapter->be3_native)
10ef9ab4 1637 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1638 else
10ef9ab4 1639 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1640
e38b1706
SK
1641 if (rxcp->ip_frag)
1642 rxcp->l4_csum = 0;
1643
15d72184
SP
1644 if (rxcp->vlanf) {
1645 /* vlanf could be wrongly set in some cards.
1646 * ignore if vtm is not set */
752961a1 1647 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1648 rxcp->vlanf = 0;
6b7c5b94 1649
15d72184 1650 if (!lancer_chip(adapter))
3c709f8f 1651 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1652
939cf306 1653 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1654 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1655 rxcp->vlanf = 0;
1656 }
2e588f84
SP
1657
1658 /* As the compl has been parsed, reset it; we wont touch it again */
1659 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1660
3abcdeda 1661 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1662 return rxcp;
1663}
1664
1829b086 1665static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1666{
6b7c5b94 1667 u32 order = get_order(size);
1829b086 1668
6b7c5b94 1669 if (order > 0)
1829b086
ED
1670 gfp |= __GFP_COMP;
1671 return alloc_pages(gfp, order);
6b7c5b94
SP
1672}
1673
1674/*
1675 * Allocate a page, split it to fragments of size rx_frag_size and post as
1676 * receive buffers to BE
1677 */
1829b086 1678static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1679{
3abcdeda 1680 struct be_adapter *adapter = rxo->adapter;
26d92f92 1681 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1682 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1683 struct page *pagep = NULL;
1684 struct be_eth_rx_d *rxd;
1685 u64 page_dmaaddr = 0, frag_dmaaddr;
1686 u32 posted, page_offset = 0;
1687
3abcdeda 1688 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1689 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1690 if (!pagep) {
1829b086 1691 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1692 if (unlikely(!pagep)) {
ac124ff9 1693 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1694 break;
1695 }
2b7bcebf
IV
1696 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1697 0, adapter->big_page_size,
1698 DMA_FROM_DEVICE);
6b7c5b94
SP
1699 page_info->page_offset = 0;
1700 } else {
1701 get_page(pagep);
1702 page_info->page_offset = page_offset + rx_frag_size;
1703 }
1704 page_offset = page_info->page_offset;
1705 page_info->page = pagep;
fac6da5b 1706 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1707 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1708
1709 rxd = queue_head_node(rxq);
1710 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1711 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1712
1713 /* Any space left in the current big page for another frag? */
1714 if ((page_offset + rx_frag_size + rx_frag_size) >
1715 adapter->big_page_size) {
1716 pagep = NULL;
1717 page_info->last_page_user = true;
1718 }
26d92f92
SP
1719
1720 prev_page_info = page_info;
1721 queue_head_inc(rxq);
10ef9ab4 1722 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1723 }
1724 if (pagep)
26d92f92 1725 prev_page_info->last_page_user = true;
6b7c5b94
SP
1726
1727 if (posted) {
6b7c5b94 1728 atomic_add(posted, &rxq->used);
8788fdc2 1729 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1730 } else if (atomic_read(&rxq->used) == 0) {
1731 /* Let be_worker replenish when memory is available */
3abcdeda 1732 rxo->rx_post_starved = true;
6b7c5b94 1733 }
6b7c5b94
SP
1734}
1735
5fb379ee 1736static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1737{
6b7c5b94
SP
1738 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1739
1740 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1741 return NULL;
1742
f3eb62d2 1743 rmb();
6b7c5b94
SP
1744 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1745
1746 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1747
1748 queue_tail_inc(tx_cq);
1749 return txcp;
1750}
1751
3c8def97
SP
1752static u16 be_tx_compl_process(struct be_adapter *adapter,
1753 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1754{
3c8def97 1755 struct be_queue_info *txq = &txo->q;
a73b796e 1756 struct be_eth_wrb *wrb;
3c8def97 1757 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1758 struct sk_buff *sent_skb;
ec43b1a6
SP
1759 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1760 bool unmap_skb_hdr = true;
6b7c5b94 1761
ec43b1a6 1762 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1763 BUG_ON(!sent_skb);
ec43b1a6
SP
1764 sent_skbs[txq->tail] = NULL;
1765
1766 /* skip header wrb */
a73b796e 1767 queue_tail_inc(txq);
6b7c5b94 1768
ec43b1a6 1769 do {
6b7c5b94 1770 cur_index = txq->tail;
a73b796e 1771 wrb = queue_tail_node(txq);
2b7bcebf
IV
1772 unmap_tx_frag(&adapter->pdev->dev, wrb,
1773 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1774 unmap_skb_hdr = false;
1775
6b7c5b94
SP
1776 num_wrbs++;
1777 queue_tail_inc(txq);
ec43b1a6 1778 } while (cur_index != last_index);
6b7c5b94 1779
6b7c5b94 1780 kfree_skb(sent_skb);
4d586b82 1781 return num_wrbs;
6b7c5b94
SP
1782}
1783
10ef9ab4
SP
1784/* Return the number of events in the event queue */
1785static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1786{
10ef9ab4
SP
1787 struct be_eq_entry *eqe;
1788 int num = 0;
859b1e4e 1789
10ef9ab4
SP
1790 do {
1791 eqe = queue_tail_node(&eqo->q);
1792 if (eqe->evt == 0)
1793 break;
859b1e4e 1794
10ef9ab4
SP
1795 rmb();
1796 eqe->evt = 0;
1797 num++;
1798 queue_tail_inc(&eqo->q);
1799 } while (true);
1800
1801 return num;
859b1e4e
SP
1802}
1803
10ef9ab4
SP
1804/* Leaves the EQ is disarmed state */
1805static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1806{
10ef9ab4 1807 int num = events_get(eqo);
859b1e4e 1808
10ef9ab4 1809 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1810}
1811
10ef9ab4 1812static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1813{
1814 struct be_rx_page_info *page_info;
3abcdeda
SP
1815 struct be_queue_info *rxq = &rxo->q;
1816 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1817 struct be_rx_compl_info *rxcp;
d23e946c
SP
1818 struct be_adapter *adapter = rxo->adapter;
1819 int flush_wait = 0;
6b7c5b94
SP
1820 u16 tail;
1821
d23e946c
SP
1822 /* Consume pending rx completions.
1823 * Wait for the flush completion (identified by zero num_rcvd)
1824 * to arrive. Notify CQ even when there are no more CQ entries
1825 * for HW to flush partially coalesced CQ entries.
1826 * In Lancer, there is no need to wait for flush compl.
1827 */
1828 for (;;) {
1829 rxcp = be_rx_compl_get(rxo);
1830 if (rxcp == NULL) {
1831 if (lancer_chip(adapter))
1832 break;
1833
1834 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1835 dev_warn(&adapter->pdev->dev,
1836 "did not receive flush compl\n");
1837 break;
1838 }
1839 be_cq_notify(adapter, rx_cq->id, true, 0);
1840 mdelay(1);
1841 } else {
1842 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1843 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1844 if (rxcp->num_rcvd == 0)
1845 break;
1846 }
6b7c5b94
SP
1847 }
1848
d23e946c
SP
1849 /* After cleanup, leave the CQ in unarmed state */
1850 be_cq_notify(adapter, rx_cq->id, false, 0);
1851
1852 /* Then free posted rx buffers that were not used */
6b7c5b94 1853 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1854 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1855 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1856 put_page(page_info->page);
1857 memset(page_info, 0, sizeof(*page_info));
1858 }
1859 BUG_ON(atomic_read(&rxq->used));
482c9e79 1860 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1861}
1862
0ae57bb3 1863static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1864{
0ae57bb3
SP
1865 struct be_tx_obj *txo;
1866 struct be_queue_info *txq;
a8e9179a 1867 struct be_eth_tx_compl *txcp;
4d586b82 1868 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1869 struct sk_buff *sent_skb;
1870 bool dummy_wrb;
0ae57bb3 1871 int i, pending_txqs;
a8e9179a
SP
1872
1873 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1874 do {
0ae57bb3
SP
1875 pending_txqs = adapter->num_tx_qs;
1876
1877 for_all_tx_queues(adapter, txo, i) {
1878 txq = &txo->q;
1879 while ((txcp = be_tx_compl_get(&txo->cq))) {
1880 end_idx =
1881 AMAP_GET_BITS(struct amap_eth_tx_compl,
1882 wrb_index, txcp);
1883 num_wrbs += be_tx_compl_process(adapter, txo,
1884 end_idx);
1885 cmpl++;
1886 }
1887 if (cmpl) {
1888 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1889 atomic_sub(num_wrbs, &txq->used);
1890 cmpl = 0;
1891 num_wrbs = 0;
1892 }
1893 if (atomic_read(&txq->used) == 0)
1894 pending_txqs--;
a8e9179a
SP
1895 }
1896
0ae57bb3 1897 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1898 break;
1899
1900 mdelay(1);
1901 } while (true);
1902
0ae57bb3
SP
1903 for_all_tx_queues(adapter, txo, i) {
1904 txq = &txo->q;
1905 if (atomic_read(&txq->used))
1906 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1907 atomic_read(&txq->used));
1908
1909 /* free posted tx for which compls will never arrive */
1910 while (atomic_read(&txq->used)) {
1911 sent_skb = txo->sent_skb_list[txq->tail];
1912 end_idx = txq->tail;
1913 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1914 &dummy_wrb);
1915 index_adv(&end_idx, num_wrbs - 1, txq->len);
1916 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1917 atomic_sub(num_wrbs, &txq->used);
1918 }
b03388d6 1919 }
6b7c5b94
SP
1920}
1921
10ef9ab4
SP
1922static void be_evt_queues_destroy(struct be_adapter *adapter)
1923{
1924 struct be_eq_obj *eqo;
1925 int i;
1926
1927 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1928 if (eqo->q.created) {
1929 be_eq_clean(eqo);
10ef9ab4 1930 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
68d7bdcb 1931 netif_napi_del(&eqo->napi);
19d59aa7 1932 }
10ef9ab4
SP
1933 be_queue_free(adapter, &eqo->q);
1934 }
1935}
1936
1937static int be_evt_queues_create(struct be_adapter *adapter)
1938{
1939 struct be_queue_info *eq;
1940 struct be_eq_obj *eqo;
1941 int i, rc;
1942
92bf14ab
SP
1943 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1944 adapter->cfg_num_qs);
10ef9ab4
SP
1945
1946 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
1947 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1948 BE_NAPI_WEIGHT);
10ef9ab4
SP
1949 eqo->adapter = adapter;
1950 eqo->tx_budget = BE_TX_BUDGET;
1951 eqo->idx = i;
1952 eqo->max_eqd = BE_MAX_EQD;
1953 eqo->enable_aic = true;
1954
1955 eq = &eqo->q;
1956 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1957 sizeof(struct be_eq_entry));
1958 if (rc)
1959 return rc;
1960
f2f781a7 1961 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
1962 if (rc)
1963 return rc;
1964 }
1cfafab9 1965 return 0;
10ef9ab4
SP
1966}
1967
5fb379ee
SP
1968static void be_mcc_queues_destroy(struct be_adapter *adapter)
1969{
1970 struct be_queue_info *q;
5fb379ee 1971
8788fdc2 1972 q = &adapter->mcc_obj.q;
5fb379ee 1973 if (q->created)
8788fdc2 1974 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1975 be_queue_free(adapter, q);
1976
8788fdc2 1977 q = &adapter->mcc_obj.cq;
5fb379ee 1978 if (q->created)
8788fdc2 1979 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1980 be_queue_free(adapter, q);
1981}
1982
1983/* Must be called only after TX qs are created as MCC shares TX EQ */
1984static int be_mcc_queues_create(struct be_adapter *adapter)
1985{
1986 struct be_queue_info *q, *cq;
5fb379ee 1987
8788fdc2 1988 cq = &adapter->mcc_obj.cq;
5fb379ee 1989 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1990 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1991 goto err;
1992
10ef9ab4
SP
1993 /* Use the default EQ for MCC completions */
1994 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1995 goto mcc_cq_free;
1996
8788fdc2 1997 q = &adapter->mcc_obj.q;
5fb379ee
SP
1998 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1999 goto mcc_cq_destroy;
2000
8788fdc2 2001 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2002 goto mcc_q_free;
2003
2004 return 0;
2005
2006mcc_q_free:
2007 be_queue_free(adapter, q);
2008mcc_cq_destroy:
8788fdc2 2009 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2010mcc_cq_free:
2011 be_queue_free(adapter, cq);
2012err:
2013 return -1;
2014}
2015
6b7c5b94
SP
2016static void be_tx_queues_destroy(struct be_adapter *adapter)
2017{
2018 struct be_queue_info *q;
3c8def97
SP
2019 struct be_tx_obj *txo;
2020 u8 i;
6b7c5b94 2021
3c8def97
SP
2022 for_all_tx_queues(adapter, txo, i) {
2023 q = &txo->q;
2024 if (q->created)
2025 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2026 be_queue_free(adapter, q);
6b7c5b94 2027
3c8def97
SP
2028 q = &txo->cq;
2029 if (q->created)
2030 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2031 be_queue_free(adapter, q);
2032 }
6b7c5b94
SP
2033}
2034
7707133c 2035static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2036{
10ef9ab4 2037 struct be_queue_info *cq, *eq;
3c8def97 2038 struct be_tx_obj *txo;
92bf14ab 2039 int status, i;
6b7c5b94 2040
92bf14ab 2041 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2042
10ef9ab4
SP
2043 for_all_tx_queues(adapter, txo, i) {
2044 cq = &txo->cq;
2045 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2046 sizeof(struct be_eth_tx_compl));
2047 if (status)
2048 return status;
3c8def97 2049
10ef9ab4
SP
2050 /* If num_evt_qs is less than num_tx_qs, then more than
2051 * one txq share an eq
2052 */
2053 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2054 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2055 if (status)
2056 return status;
6b7c5b94 2057
10ef9ab4
SP
2058 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2059 sizeof(struct be_eth_wrb));
2060 if (status)
2061 return status;
6b7c5b94 2062
94d73aaa 2063 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2064 if (status)
2065 return status;
3c8def97 2066 }
6b7c5b94 2067
d379142b
SP
2068 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2069 adapter->num_tx_qs);
10ef9ab4 2070 return 0;
6b7c5b94
SP
2071}
2072
10ef9ab4 2073static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2074{
2075 struct be_queue_info *q;
3abcdeda
SP
2076 struct be_rx_obj *rxo;
2077 int i;
2078
2079 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2080 q = &rxo->cq;
2081 if (q->created)
2082 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2083 be_queue_free(adapter, q);
ac6a0c4a
SP
2084 }
2085}
2086
10ef9ab4 2087static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2088{
10ef9ab4 2089 struct be_queue_info *eq, *cq;
3abcdeda
SP
2090 struct be_rx_obj *rxo;
2091 int rc, i;
6b7c5b94 2092
92bf14ab
SP
2093 /* We can create as many RSS rings as there are EQs. */
2094 adapter->num_rx_qs = adapter->num_evt_qs;
2095
2096 /* We'll use RSS only if atleast 2 RSS rings are supported.
2097 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2098 */
92bf14ab
SP
2099 if (adapter->num_rx_qs > 1)
2100 adapter->num_rx_qs++;
2101
6b7c5b94 2102 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2103 for_all_rx_queues(adapter, rxo, i) {
2104 rxo->adapter = adapter;
3abcdeda
SP
2105 cq = &rxo->cq;
2106 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2107 sizeof(struct be_eth_rx_compl));
2108 if (rc)
10ef9ab4 2109 return rc;
3abcdeda 2110
10ef9ab4
SP
2111 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2112 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2113 if (rc)
10ef9ab4 2114 return rc;
3abcdeda 2115 }
6b7c5b94 2116
d379142b
SP
2117 dev_info(&adapter->pdev->dev,
2118 "created %d RSS queue(s) and 1 default RX queue\n",
2119 adapter->num_rx_qs - 1);
10ef9ab4 2120 return 0;
b628bde2
SP
2121}
2122
6b7c5b94
SP
2123static irqreturn_t be_intx(int irq, void *dev)
2124{
e49cc34f
SP
2125 struct be_eq_obj *eqo = dev;
2126 struct be_adapter *adapter = eqo->adapter;
2127 int num_evts = 0;
6b7c5b94 2128
d0b9cec3
SP
2129 /* IRQ is not expected when NAPI is scheduled as the EQ
2130 * will not be armed.
2131 * But, this can happen on Lancer INTx where it takes
2132 * a while to de-assert INTx or in BE2 where occasionaly
2133 * an interrupt may be raised even when EQ is unarmed.
2134 * If NAPI is already scheduled, then counting & notifying
2135 * events will orphan them.
e49cc34f 2136 */
d0b9cec3 2137 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2138 num_evts = events_get(eqo);
d0b9cec3
SP
2139 __napi_schedule(&eqo->napi);
2140 if (num_evts)
2141 eqo->spurious_intr = 0;
2142 }
2143 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2144
d0b9cec3
SP
2145 /* Return IRQ_HANDLED only for the the first spurious intr
2146 * after a valid intr to stop the kernel from branding
2147 * this irq as a bad one!
e49cc34f 2148 */
d0b9cec3
SP
2149 if (num_evts || eqo->spurious_intr++ == 0)
2150 return IRQ_HANDLED;
2151 else
2152 return IRQ_NONE;
6b7c5b94
SP
2153}
2154
10ef9ab4 2155static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2156{
10ef9ab4 2157 struct be_eq_obj *eqo = dev;
6b7c5b94 2158
0b545a62
SP
2159 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2160 napi_schedule(&eqo->napi);
6b7c5b94
SP
2161 return IRQ_HANDLED;
2162}
2163
2e588f84 2164static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2165{
e38b1706 2166 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2167}
2168
10ef9ab4
SP
2169static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2170 int budget)
6b7c5b94 2171{
3abcdeda
SP
2172 struct be_adapter *adapter = rxo->adapter;
2173 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2174 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2175 u32 work_done;
2176
2177 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2178 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2179 if (!rxcp)
2180 break;
2181
12004ae9
SP
2182 /* Is it a flush compl that has no data */
2183 if (unlikely(rxcp->num_rcvd == 0))
2184 goto loop_continue;
2185
2186 /* Discard compl with partial DMA Lancer B0 */
2187 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2188 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2189 goto loop_continue;
2190 }
2191
2192 /* On BE drop pkts that arrive due to imperfect filtering in
2193 * promiscuous mode on some skews
2194 */
2195 if (unlikely(rxcp->port != adapter->port_num &&
2196 !lancer_chip(adapter))) {
10ef9ab4 2197 be_rx_compl_discard(rxo, rxcp);
12004ae9 2198 goto loop_continue;
64642811 2199 }
009dd872 2200
12004ae9 2201 if (do_gro(rxcp))
10ef9ab4 2202 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2203 else
10ef9ab4 2204 be_rx_compl_process(rxo, rxcp);
12004ae9 2205loop_continue:
2e588f84 2206 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2207 }
2208
10ef9ab4
SP
2209 if (work_done) {
2210 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2211
10ef9ab4
SP
2212 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2213 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2214 }
10ef9ab4 2215
6b7c5b94
SP
2216 return work_done;
2217}
2218
10ef9ab4
SP
2219static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2220 int budget, int idx)
6b7c5b94 2221{
6b7c5b94 2222 struct be_eth_tx_compl *txcp;
10ef9ab4 2223 int num_wrbs = 0, work_done;
3c8def97 2224
10ef9ab4
SP
2225 for (work_done = 0; work_done < budget; work_done++) {
2226 txcp = be_tx_compl_get(&txo->cq);
2227 if (!txcp)
2228 break;
2229 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2230 AMAP_GET_BITS(struct amap_eth_tx_compl,
2231 wrb_index, txcp));
10ef9ab4 2232 }
6b7c5b94 2233
10ef9ab4
SP
2234 if (work_done) {
2235 be_cq_notify(adapter, txo->cq.id, true, work_done);
2236 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2237
10ef9ab4
SP
2238 /* As Tx wrbs have been freed up, wake up netdev queue
2239 * if it was stopped due to lack of tx wrbs. */
2240 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2241 atomic_read(&txo->q.used) < txo->q.len / 2) {
2242 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2243 }
10ef9ab4
SP
2244
2245 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2246 tx_stats(txo)->tx_compl += work_done;
2247 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2248 }
10ef9ab4
SP
2249 return (work_done < budget); /* Done */
2250}
6b7c5b94 2251
68d7bdcb 2252int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2253{
2254 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2255 struct be_adapter *adapter = eqo->adapter;
0b545a62 2256 int max_work = 0, work, i, num_evts;
10ef9ab4 2257 bool tx_done;
f31e50a8 2258
0b545a62
SP
2259 num_evts = events_get(eqo);
2260
10ef9ab4
SP
2261 /* Process all TXQs serviced by this EQ */
2262 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2263 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2264 eqo->tx_budget, i);
2265 if (!tx_done)
2266 max_work = budget;
f31e50a8
SP
2267 }
2268
10ef9ab4
SP
2269 /* This loop will iterate twice for EQ0 in which
2270 * completions of the last RXQ (default one) are also processed
2271 * For other EQs the loop iterates only once
2272 */
2273 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2274 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2275 max_work = max(work, max_work);
2276 }
6b7c5b94 2277
10ef9ab4
SP
2278 if (is_mcc_eqo(eqo))
2279 be_process_mcc(adapter);
93c86700 2280
10ef9ab4
SP
2281 if (max_work < budget) {
2282 napi_complete(napi);
0b545a62 2283 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2284 } else {
2285 /* As we'll continue in polling mode, count and clear events */
0b545a62 2286 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2287 }
10ef9ab4 2288 return max_work;
6b7c5b94
SP
2289}
2290
f67ef7ba 2291void be_detect_error(struct be_adapter *adapter)
7c185276 2292{
e1cfb67a
PR
2293 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2294 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2295 u32 i;
2296
d23e946c 2297 if (be_hw_error(adapter))
72f02485
SP
2298 return;
2299
e1cfb67a
PR
2300 if (lancer_chip(adapter)) {
2301 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2302 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2303 sliport_err1 = ioread32(adapter->db +
2304 SLIPORT_ERROR1_OFFSET);
2305 sliport_err2 = ioread32(adapter->db +
2306 SLIPORT_ERROR2_OFFSET);
2307 }
2308 } else {
2309 pci_read_config_dword(adapter->pdev,
2310 PCICFG_UE_STATUS_LOW, &ue_lo);
2311 pci_read_config_dword(adapter->pdev,
2312 PCICFG_UE_STATUS_HIGH, &ue_hi);
2313 pci_read_config_dword(adapter->pdev,
2314 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2315 pci_read_config_dword(adapter->pdev,
2316 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2317
f67ef7ba
PR
2318 ue_lo = (ue_lo & ~ue_lo_mask);
2319 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2320 }
7c185276 2321
1451ae6e
AK
2322 /* On certain platforms BE hardware can indicate spurious UEs.
2323 * Allow the h/w to stop working completely in case of a real UE.
2324 * Hence not setting the hw_error for UE detection.
2325 */
2326 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2327 adapter->hw_error = true;
434b3648 2328 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2329 "Error detected in the card\n");
2330 }
2331
2332 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2333 dev_err(&adapter->pdev->dev,
2334 "ERR: sliport status 0x%x\n", sliport_status);
2335 dev_err(&adapter->pdev->dev,
2336 "ERR: sliport error1 0x%x\n", sliport_err1);
2337 dev_err(&adapter->pdev->dev,
2338 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2339 }
2340
e1cfb67a
PR
2341 if (ue_lo) {
2342 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2343 if (ue_lo & 1)
7c185276
AK
2344 dev_err(&adapter->pdev->dev,
2345 "UE: %s bit set\n", ue_status_low_desc[i]);
2346 }
2347 }
f67ef7ba 2348
e1cfb67a
PR
2349 if (ue_hi) {
2350 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2351 if (ue_hi & 1)
7c185276
AK
2352 dev_err(&adapter->pdev->dev,
2353 "UE: %s bit set\n", ue_status_hi_desc[i]);
2354 }
2355 }
2356
2357}
2358
8d56ff11
SP
2359static void be_msix_disable(struct be_adapter *adapter)
2360{
ac6a0c4a 2361 if (msix_enabled(adapter)) {
8d56ff11 2362 pci_disable_msix(adapter->pdev);
ac6a0c4a 2363 adapter->num_msix_vec = 0;
68d7bdcb 2364 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2365 }
2366}
2367
c2bba3df 2368static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2369{
92bf14ab 2370 int i, status, num_vec;
d379142b 2371 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2372
92bf14ab
SP
2373 /* If RoCE is supported, program the max number of NIC vectors that
2374 * may be configured via set-channels, along with vectors needed for
2375 * RoCe. Else, just program the number we'll use initially.
2376 */
2377 if (be_roce_supported(adapter))
2378 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2379 2 * num_online_cpus());
2380 else
2381 num_vec = adapter->cfg_num_qs;
3abcdeda 2382
ac6a0c4a 2383 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2384 adapter->msix_entries[i].entry = i;
2385
ac6a0c4a 2386 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2387 if (status == 0) {
2388 goto done;
92bf14ab 2389 } else if (status >= MIN_MSIX_VECTORS) {
ac6a0c4a 2390 num_vec = status;
c2bba3df
SK
2391 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2392 num_vec);
2393 if (!status)
3abcdeda 2394 goto done;
3abcdeda 2395 }
d379142b
SP
2396
2397 dev_warn(dev, "MSIx enable failed\n");
92bf14ab 2398
c2bba3df
SK
2399 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2400 if (!be_physfn(adapter))
2401 return status;
2402 return 0;
3abcdeda 2403done:
92bf14ab
SP
2404 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2405 adapter->num_msix_roce_vec = num_vec / 2;
2406 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2407 adapter->num_msix_roce_vec);
2408 }
2409
2410 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2411
2412 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2413 adapter->num_msix_vec);
c2bba3df 2414 return 0;
6b7c5b94
SP
2415}
2416
fe6d2a38 2417static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2418 struct be_eq_obj *eqo)
b628bde2 2419{
f2f781a7 2420 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2421}
6b7c5b94 2422
b628bde2
SP
2423static int be_msix_register(struct be_adapter *adapter)
2424{
10ef9ab4
SP
2425 struct net_device *netdev = adapter->netdev;
2426 struct be_eq_obj *eqo;
2427 int status, i, vec;
6b7c5b94 2428
10ef9ab4
SP
2429 for_all_evt_queues(adapter, eqo, i) {
2430 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2431 vec = be_msix_vec_get(adapter, eqo);
2432 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2433 if (status)
2434 goto err_msix;
2435 }
b628bde2 2436
6b7c5b94 2437 return 0;
3abcdeda 2438err_msix:
10ef9ab4
SP
2439 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2440 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2441 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2442 status);
ac6a0c4a 2443 be_msix_disable(adapter);
6b7c5b94
SP
2444 return status;
2445}
2446
2447static int be_irq_register(struct be_adapter *adapter)
2448{
2449 struct net_device *netdev = adapter->netdev;
2450 int status;
2451
ac6a0c4a 2452 if (msix_enabled(adapter)) {
6b7c5b94
SP
2453 status = be_msix_register(adapter);
2454 if (status == 0)
2455 goto done;
ba343c77
SB
2456 /* INTx is not supported for VF */
2457 if (!be_physfn(adapter))
2458 return status;
6b7c5b94
SP
2459 }
2460
e49cc34f 2461 /* INTx: only the first EQ is used */
6b7c5b94
SP
2462 netdev->irq = adapter->pdev->irq;
2463 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2464 &adapter->eq_obj[0]);
6b7c5b94
SP
2465 if (status) {
2466 dev_err(&adapter->pdev->dev,
2467 "INTx request IRQ failed - err %d\n", status);
2468 return status;
2469 }
2470done:
2471 adapter->isr_registered = true;
2472 return 0;
2473}
2474
2475static void be_irq_unregister(struct be_adapter *adapter)
2476{
2477 struct net_device *netdev = adapter->netdev;
10ef9ab4 2478 struct be_eq_obj *eqo;
3abcdeda 2479 int i;
6b7c5b94
SP
2480
2481 if (!adapter->isr_registered)
2482 return;
2483
2484 /* INTx */
ac6a0c4a 2485 if (!msix_enabled(adapter)) {
e49cc34f 2486 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2487 goto done;
2488 }
2489
2490 /* MSIx */
10ef9ab4
SP
2491 for_all_evt_queues(adapter, eqo, i)
2492 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2493
6b7c5b94
SP
2494done:
2495 adapter->isr_registered = false;
6b7c5b94
SP
2496}
2497
10ef9ab4 2498static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2499{
2500 struct be_queue_info *q;
2501 struct be_rx_obj *rxo;
2502 int i;
2503
2504 for_all_rx_queues(adapter, rxo, i) {
2505 q = &rxo->q;
2506 if (q->created) {
2507 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2508 be_rx_cq_clean(rxo);
482c9e79 2509 }
10ef9ab4 2510 be_queue_free(adapter, q);
482c9e79
SP
2511 }
2512}
2513
889cd4b2
SP
2514static int be_close(struct net_device *netdev)
2515{
2516 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2517 struct be_eq_obj *eqo;
2518 int i;
889cd4b2 2519
045508a8
PP
2520 be_roce_dev_close(adapter);
2521
04d3d624
SK
2522 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2523 for_all_evt_queues(adapter, eqo, i)
2524 napi_disable(&eqo->napi);
2525 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2526 }
a323d9bf
SP
2527
2528 be_async_mcc_disable(adapter);
2529
2530 /* Wait for all pending tx completions to arrive so that
2531 * all tx skbs are freed.
2532 */
fba87559 2533 netif_tx_disable(netdev);
6e1f9975 2534 be_tx_compl_clean(adapter);
a323d9bf
SP
2535
2536 be_rx_qs_destroy(adapter);
2537
2538 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2539 if (msix_enabled(adapter))
2540 synchronize_irq(be_msix_vec_get(adapter, eqo));
2541 else
2542 synchronize_irq(netdev->irq);
2543 be_eq_clean(eqo);
63fcb27f
PR
2544 }
2545
889cd4b2
SP
2546 be_irq_unregister(adapter);
2547
482c9e79
SP
2548 return 0;
2549}
2550
10ef9ab4 2551static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2552{
2553 struct be_rx_obj *rxo;
e9008ee9
PR
2554 int rc, i, j;
2555 u8 rsstable[128];
482c9e79
SP
2556
2557 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2558 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2559 sizeof(struct be_eth_rx_d));
2560 if (rc)
2561 return rc;
2562 }
2563
2564 /* The FW would like the default RXQ to be created first */
2565 rxo = default_rxo(adapter);
2566 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2567 adapter->if_handle, false, &rxo->rss_id);
2568 if (rc)
2569 return rc;
2570
2571 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2572 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2573 rx_frag_size, adapter->if_handle,
2574 true, &rxo->rss_id);
482c9e79
SP
2575 if (rc)
2576 return rc;
2577 }
2578
2579 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2580 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2581 for_all_rss_queues(adapter, rxo, i) {
2582 if ((j + i) >= 128)
2583 break;
2584 rsstable[j + i] = rxo->rss_id;
2585 }
2586 }
594ad54a
SR
2587 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2588 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2589
2590 if (!BEx_chip(adapter))
2591 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2592 RSS_ENABLE_UDP_IPV6;
2593
2594 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2595 128);
2596 if (rc) {
2597 adapter->rss_flags = 0;
482c9e79 2598 return rc;
594ad54a 2599 }
482c9e79
SP
2600 }
2601
2602 /* First time posting */
10ef9ab4 2603 for_all_rx_queues(adapter, rxo, i)
482c9e79 2604 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2605 return 0;
2606}
2607
6b7c5b94
SP
2608static int be_open(struct net_device *netdev)
2609{
2610 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2611 struct be_eq_obj *eqo;
3abcdeda 2612 struct be_rx_obj *rxo;
10ef9ab4 2613 struct be_tx_obj *txo;
b236916a 2614 u8 link_status;
3abcdeda 2615 int status, i;
5fb379ee 2616
10ef9ab4 2617 status = be_rx_qs_create(adapter);
482c9e79
SP
2618 if (status)
2619 goto err;
2620
c2bba3df
SK
2621 status = be_irq_register(adapter);
2622 if (status)
2623 goto err;
5fb379ee 2624
10ef9ab4 2625 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2626 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2627
10ef9ab4
SP
2628 for_all_tx_queues(adapter, txo, i)
2629 be_cq_notify(adapter, txo->cq.id, true, 0);
2630
7a1e9b20
SP
2631 be_async_mcc_enable(adapter);
2632
10ef9ab4
SP
2633 for_all_evt_queues(adapter, eqo, i) {
2634 napi_enable(&eqo->napi);
2635 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2636 }
04d3d624 2637 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2638
323ff71e 2639 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2640 if (!status)
2641 be_link_status_update(adapter, link_status);
2642
fba87559 2643 netif_tx_start_all_queues(netdev);
045508a8 2644 be_roce_dev_open(adapter);
889cd4b2
SP
2645 return 0;
2646err:
2647 be_close(adapter->netdev);
2648 return -EIO;
5fb379ee
SP
2649}
2650
71d8d1b5
AK
2651static int be_setup_wol(struct be_adapter *adapter, bool enable)
2652{
2653 struct be_dma_mem cmd;
2654 int status = 0;
2655 u8 mac[ETH_ALEN];
2656
2657 memset(mac, 0, ETH_ALEN);
2658
2659 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2660 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2661 GFP_KERNEL);
71d8d1b5
AK
2662 if (cmd.va == NULL)
2663 return -1;
71d8d1b5
AK
2664
2665 if (enable) {
2666 status = pci_write_config_dword(adapter->pdev,
2667 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2668 if (status) {
2669 dev_err(&adapter->pdev->dev,
2381a55c 2670 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2671 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2672 cmd.dma);
71d8d1b5
AK
2673 return status;
2674 }
2675 status = be_cmd_enable_magic_wol(adapter,
2676 adapter->netdev->dev_addr, &cmd);
2677 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2678 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2679 } else {
2680 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2681 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2682 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2683 }
2684
2b7bcebf 2685 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2686 return status;
2687}
2688
6d87f5c3
AK
2689/*
2690 * Generate a seed MAC address from the PF MAC Address using jhash.
2691 * MAC Address for VFs are assigned incrementally starting from the seed.
2692 * These addresses are programmed in the ASIC by the PF and the VF driver
2693 * queries for the MAC address during its probe.
2694 */
4c876616 2695static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2696{
f9449ab7 2697 u32 vf;
3abcdeda 2698 int status = 0;
6d87f5c3 2699 u8 mac[ETH_ALEN];
11ac75ed 2700 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2701
2702 be_vf_eth_addr_generate(adapter, mac);
2703
11ac75ed 2704 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2705 if (BEx_chip(adapter))
590c391d 2706 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2707 vf_cfg->if_handle,
2708 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2709 else
2710 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2711 vf + 1);
590c391d 2712
6d87f5c3
AK
2713 if (status)
2714 dev_err(&adapter->pdev->dev,
590c391d 2715 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2716 else
11ac75ed 2717 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2718
2719 mac[5] += 1;
2720 }
2721 return status;
2722}
2723
4c876616
SP
2724static int be_vfs_mac_query(struct be_adapter *adapter)
2725{
2726 int status, vf;
2727 u8 mac[ETH_ALEN];
2728 struct be_vf_cfg *vf_cfg;
95046b92 2729 bool active = false;
4c876616
SP
2730
2731 for_all_vfs(adapter, vf_cfg, vf) {
2732 be_cmd_get_mac_from_list(adapter, mac, &active,
2733 &vf_cfg->pmac_id, 0);
2734
2735 status = be_cmd_mac_addr_query(adapter, mac, false,
2736 vf_cfg->if_handle, 0);
2737 if (status)
2738 return status;
2739 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2740 }
2741 return 0;
2742}
2743
f9449ab7 2744static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2745{
11ac75ed 2746 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2747 u32 vf;
2748
257a3feb 2749 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2750 dev_warn(&adapter->pdev->dev,
2751 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2752 goto done;
2753 }
2754
b4c1df93
SP
2755 pci_disable_sriov(adapter->pdev);
2756
11ac75ed 2757 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2758 if (BEx_chip(adapter))
11ac75ed
SP
2759 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2760 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2761 else
2762 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2763 vf + 1);
f9449ab7 2764
11ac75ed
SP
2765 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2766 }
39f1d94d
SP
2767done:
2768 kfree(adapter->vf_cfg);
2769 adapter->num_vfs = 0;
6d87f5c3
AK
2770}
2771
7707133c
SP
2772static void be_clear_queues(struct be_adapter *adapter)
2773{
2774 be_mcc_queues_destroy(adapter);
2775 be_rx_cqs_destroy(adapter);
2776 be_tx_queues_destroy(adapter);
2777 be_evt_queues_destroy(adapter);
2778}
2779
68d7bdcb 2780static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2781{
191eb756
SP
2782 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2783 cancel_delayed_work_sync(&adapter->work);
2784 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2785 }
68d7bdcb
SP
2786}
2787
2788static int be_clear(struct be_adapter *adapter)
2789{
2790 int i;
2791
2792 be_cancel_worker(adapter);
191eb756 2793
11ac75ed 2794 if (sriov_enabled(adapter))
f9449ab7
SP
2795 be_vf_clear(adapter);
2796
2d17f403
SP
2797 /* delete the primary mac along with the uc-mac list */
2798 for (i = 0; i < (adapter->uc_macs + 1); i++)
fbc13f01 2799 be_cmd_pmac_del(adapter, adapter->if_handle,
2d17f403
SP
2800 adapter->pmac_id[i], 0);
2801 adapter->uc_macs = 0;
fbc13f01 2802
f9449ab7 2803 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2804
7707133c 2805 be_clear_queues(adapter);
a54769f5 2806
abb93951
PR
2807 kfree(adapter->pmac_id);
2808 adapter->pmac_id = NULL;
2809
10ef9ab4 2810 be_msix_disable(adapter);
a54769f5
SP
2811 return 0;
2812}
2813
4c876616 2814static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2815{
92bf14ab 2816 struct be_resources res = {0};
4c876616
SP
2817 struct be_vf_cfg *vf_cfg;
2818 u32 cap_flags, en_flags, vf;
922bbe88 2819 int status = 0;
abb93951 2820
4c876616
SP
2821 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2822 BE_IF_FLAGS_MULTICAST;
abb93951 2823
4c876616 2824 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
2825 if (!BE3_chip(adapter)) {
2826 status = be_cmd_get_profile_config(adapter, &res,
2827 vf + 1);
2828 if (!status)
2829 cap_flags = res.if_cap_flags;
2830 }
4c876616
SP
2831
2832 /* If a FW profile exists, then cap_flags are updated */
2833 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2834 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2835 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2836 &vf_cfg->if_handle, vf + 1);
2837 if (status)
2838 goto err;
2839 }
2840err:
2841 return status;
abb93951
PR
2842}
2843
39f1d94d 2844static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2845{
11ac75ed 2846 struct be_vf_cfg *vf_cfg;
30128031
SP
2847 int vf;
2848
39f1d94d
SP
2849 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2850 GFP_KERNEL);
2851 if (!adapter->vf_cfg)
2852 return -ENOMEM;
2853
11ac75ed
SP
2854 for_all_vfs(adapter, vf_cfg, vf) {
2855 vf_cfg->if_handle = -1;
2856 vf_cfg->pmac_id = -1;
30128031 2857 }
39f1d94d 2858 return 0;
30128031
SP
2859}
2860
f9449ab7
SP
2861static int be_vf_setup(struct be_adapter *adapter)
2862{
11ac75ed 2863 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2864 u16 def_vlan, lnk_speed;
4c876616
SP
2865 int status, old_vfs, vf;
2866 struct device *dev = &adapter->pdev->dev;
04a06028 2867 u32 privileges;
39f1d94d 2868
257a3feb 2869 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
2870 if (old_vfs) {
2871 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2872 if (old_vfs != num_vfs)
2873 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2874 adapter->num_vfs = old_vfs;
39f1d94d 2875 } else {
92bf14ab 2876 if (num_vfs > be_max_vfs(adapter))
4c876616 2877 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
2878 be_max_vfs(adapter), num_vfs);
2879 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 2880 if (!adapter->num_vfs)
4c876616 2881 return 0;
39f1d94d
SP
2882 }
2883
2884 status = be_vf_setup_init(adapter);
2885 if (status)
2886 goto err;
30128031 2887
4c876616
SP
2888 if (old_vfs) {
2889 for_all_vfs(adapter, vf_cfg, vf) {
2890 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2891 if (status)
2892 goto err;
2893 }
2894 } else {
2895 status = be_vfs_if_create(adapter);
f9449ab7
SP
2896 if (status)
2897 goto err;
f9449ab7
SP
2898 }
2899
4c876616
SP
2900 if (old_vfs) {
2901 status = be_vfs_mac_query(adapter);
2902 if (status)
2903 goto err;
2904 } else {
39f1d94d
SP
2905 status = be_vf_eth_addr_config(adapter);
2906 if (status)
2907 goto err;
2908 }
f9449ab7 2909
11ac75ed 2910 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
2911 /* Allow VFs to programs MAC/VLAN filters */
2912 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2913 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2914 status = be_cmd_set_fn_privileges(adapter,
2915 privileges |
2916 BE_PRIV_FILTMGMT,
2917 vf + 1);
2918 if (!status)
2919 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2920 vf);
2921 }
2922
4c876616
SP
2923 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2924 * Allow full available bandwidth
2925 */
2926 if (BE3_chip(adapter) && !old_vfs)
2927 be_cmd_set_qos(adapter, 1000, vf+1);
2928
2929 status = be_cmd_link_status_query(adapter, &lnk_speed,
2930 NULL, vf + 1);
2931 if (!status)
2932 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2933
2934 status = be_cmd_get_hsw_config(adapter, &def_vlan,
a77dcb8c 2935 vf + 1, vf_cfg->if_handle, NULL);
f1f3ee1b
AK
2936 if (status)
2937 goto err;
2938 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2939
2940 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 2941 }
b4c1df93
SP
2942
2943 if (!old_vfs) {
2944 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2945 if (status) {
2946 dev_err(dev, "SRIOV enable failed\n");
2947 adapter->num_vfs = 0;
2948 goto err;
2949 }
2950 }
f9449ab7
SP
2951 return 0;
2952err:
4c876616
SP
2953 dev_err(dev, "VF setup failed\n");
2954 be_vf_clear(adapter);
f9449ab7
SP
2955 return status;
2956}
2957
92bf14ab
SP
2958/* On BE2/BE3 FW does not suggest the supported limits */
2959static void BEx_get_resources(struct be_adapter *adapter,
2960 struct be_resources *res)
2961{
2962 struct pci_dev *pdev = adapter->pdev;
2963 bool use_sriov = false;
2964
2965 if (BE3_chip(adapter) && be_physfn(adapter)) {
2966 int max_vfs;
2967
2968 max_vfs = pci_sriov_get_totalvfs(pdev);
2969 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2970 use_sriov = res->max_vfs && num_vfs;
2971 }
2972
2973 if (be_physfn(adapter))
2974 res->max_uc_mac = BE_UC_PMAC_COUNT;
2975 else
2976 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2977
2978 if (adapter->function_mode & FLEX10_MODE)
2979 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
1aa9673c
AK
2980 else if (adapter->function_mode & UMC_ENABLED)
2981 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
92bf14ab
SP
2982 else
2983 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2984 res->max_mcast_mac = BE_MAX_MC;
2985
2986 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2987 !be_physfn(adapter))
2988 res->max_tx_qs = 1;
2989 else
2990 res->max_tx_qs = BE3_MAX_TX_QS;
2991
2992 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2993 !use_sriov && be_physfn(adapter))
2994 res->max_rss_qs = (adapter->be3_native) ?
2995 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2996 res->max_rx_qs = res->max_rss_qs + 1;
2997
68d7bdcb 2998 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
92bf14ab
SP
2999
3000 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3001 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3002 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3003}
3004
30128031
SP
3005static void be_setup_init(struct be_adapter *adapter)
3006{
3007 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3008 adapter->phy.link_speed = -1;
30128031
SP
3009 adapter->if_handle = -1;
3010 adapter->be3_native = false;
3011 adapter->promiscuous = false;
f25b119c
PR
3012 if (be_physfn(adapter))
3013 adapter->cmd_privileges = MAX_PRIVILEGES;
3014 else
3015 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3016}
3017
92bf14ab 3018static int be_get_resources(struct be_adapter *adapter)
abb93951 3019{
92bf14ab
SP
3020 struct device *dev = &adapter->pdev->dev;
3021 struct be_resources res = {0};
3022 int status;
abb93951 3023
92bf14ab
SP
3024 if (BEx_chip(adapter)) {
3025 BEx_get_resources(adapter, &res);
3026 adapter->res = res;
abb93951
PR
3027 }
3028
92bf14ab
SP
3029 /* For BE3 only check if FW suggests a different max-txqs value */
3030 if (BE3_chip(adapter)) {
3031 status = be_cmd_get_profile_config(adapter, &res, 0);
3032 if (!status && res.max_tx_qs)
3033 adapter->res.max_tx_qs =
3034 min(adapter->res.max_tx_qs, res.max_tx_qs);
3035 }
abb93951 3036
92bf14ab
SP
3037 /* For Lancer, SH etc read per-function resource limits from FW.
3038 * GET_FUNC_CONFIG returns per function guaranteed limits.
3039 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3040 */
3041 if (!BEx_chip(adapter)) {
3042 status = be_cmd_get_func_config(adapter, &res);
3043 if (status)
3044 return status;
abb93951 3045
92bf14ab
SP
3046 /* If RoCE may be enabled stash away half the EQs for RoCE */
3047 if (be_roce_supported(adapter))
3048 res.max_evt_qs /= 2;
3049 adapter->res = res;
abb93951 3050
92bf14ab
SP
3051 if (be_physfn(adapter)) {
3052 status = be_cmd_get_profile_config(adapter, &res, 0);
3053 if (status)
3054 return status;
3055 adapter->res.max_vfs = res.max_vfs;
3056 }
abb93951 3057
92bf14ab
SP
3058 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3059 be_max_txqs(adapter), be_max_rxqs(adapter),
3060 be_max_rss(adapter), be_max_eqs(adapter),
3061 be_max_vfs(adapter));
3062 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3063 be_max_uc(adapter), be_max_mc(adapter),
3064 be_max_vlans(adapter));
abb93951 3065 }
4c876616 3066
92bf14ab 3067 return 0;
abb93951
PR
3068}
3069
39f1d94d
SP
3070/* Routine to query per function resource limits */
3071static int be_get_config(struct be_adapter *adapter)
3072{
4c876616 3073 int status;
39f1d94d 3074
abb93951
PR
3075 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3076 &adapter->function_mode,
0ad3157e
VV
3077 &adapter->function_caps,
3078 &adapter->asic_rev);
abb93951 3079 if (status)
92bf14ab 3080 return status;
abb93951 3081
92bf14ab
SP
3082 status = be_get_resources(adapter);
3083 if (status)
3084 return status;
abb93951
PR
3085
3086 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3087 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3088 GFP_KERNEL);
3089 if (!adapter->pmac_id)
3090 return -ENOMEM;
abb93951 3091
92bf14ab
SP
3092 /* Sanitize cfg_num_qs based on HW and platform limits */
3093 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3094
3095 return 0;
39f1d94d
SP
3096}
3097
95046b92
SP
3098static int be_mac_setup(struct be_adapter *adapter)
3099{
3100 u8 mac[ETH_ALEN];
3101 int status;
3102
3103 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3104 status = be_cmd_get_perm_mac(adapter, mac);
3105 if (status)
3106 return status;
3107
3108 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3109 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3110 } else {
3111 /* Maybe the HW was reset; dev_addr must be re-programmed */
3112 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3113 }
3114
3115 /* On BE3 VFs this cmd may fail due to lack of privilege.
3116 * Ignore the failure as in this case pmac_id is fetched
3117 * in the IFACE_CREATE cmd.
3118 */
3119 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3120 &adapter->pmac_id[0], 0);
3121 return 0;
3122}
3123
68d7bdcb
SP
3124static void be_schedule_worker(struct be_adapter *adapter)
3125{
3126 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3127 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3128}
3129
7707133c 3130static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3131{
68d7bdcb 3132 struct net_device *netdev = adapter->netdev;
10ef9ab4 3133 int status;
ba343c77 3134
7707133c 3135 status = be_evt_queues_create(adapter);
abb93951
PR
3136 if (status)
3137 goto err;
73d540f2 3138
7707133c 3139 status = be_tx_qs_create(adapter);
c2bba3df
SK
3140 if (status)
3141 goto err;
10ef9ab4 3142
7707133c 3143 status = be_rx_cqs_create(adapter);
10ef9ab4 3144 if (status)
a54769f5 3145 goto err;
6b7c5b94 3146
7707133c 3147 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3148 if (status)
3149 goto err;
3150
68d7bdcb
SP
3151 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3152 if (status)
3153 goto err;
3154
3155 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3156 if (status)
3157 goto err;
3158
7707133c
SP
3159 return 0;
3160err:
3161 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3162 return status;
3163}
3164
68d7bdcb
SP
3165int be_update_queues(struct be_adapter *adapter)
3166{
3167 struct net_device *netdev = adapter->netdev;
3168 int status;
3169
3170 if (netif_running(netdev))
3171 be_close(netdev);
3172
3173 be_cancel_worker(adapter);
3174
3175 /* If any vectors have been shared with RoCE we cannot re-program
3176 * the MSIx table.
3177 */
3178 if (!adapter->num_msix_roce_vec)
3179 be_msix_disable(adapter);
3180
3181 be_clear_queues(adapter);
3182
3183 if (!msix_enabled(adapter)) {
3184 status = be_msix_enable(adapter);
3185 if (status)
3186 return status;
3187 }
3188
3189 status = be_setup_queues(adapter);
3190 if (status)
3191 return status;
3192
3193 be_schedule_worker(adapter);
3194
3195 if (netif_running(netdev))
3196 status = be_open(netdev);
3197
3198 return status;
3199}
3200
7707133c
SP
3201static int be_setup(struct be_adapter *adapter)
3202{
3203 struct device *dev = &adapter->pdev->dev;
3204 u32 tx_fc, rx_fc, en_flags;
3205 int status;
3206
3207 be_setup_init(adapter);
3208
3209 if (!lancer_chip(adapter))
3210 be_cmd_req_native_mode(adapter);
3211
3212 status = be_get_config(adapter);
10ef9ab4 3213 if (status)
a54769f5 3214 goto err;
6b7c5b94 3215
7707133c 3216 status = be_msix_enable(adapter);
10ef9ab4 3217 if (status)
a54769f5 3218 goto err;
6b7c5b94 3219
f9449ab7 3220 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3221 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3222 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3223 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3224 en_flags = en_flags & be_if_cap_flags(adapter);
3225 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3226 &adapter->if_handle, 0);
7707133c 3227 if (status)
a54769f5 3228 goto err;
6b7c5b94 3229
68d7bdcb
SP
3230 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3231 rtnl_lock();
7707133c 3232 status = be_setup_queues(adapter);
68d7bdcb 3233 rtnl_unlock();
95046b92 3234 if (status)
1578e777
PR
3235 goto err;
3236
7707133c
SP
3237 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3238 /* In UMC mode FW does not return right privileges.
3239 * Override with correct privilege equivalent to PF.
3240 */
3241 if (be_is_mc(adapter))
3242 adapter->cmd_privileges = MAX_PRIVILEGES;
3243
3244 status = be_mac_setup(adapter);
10ef9ab4
SP
3245 if (status)
3246 goto err;
3247
eeb65ced 3248 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3249
e9e2a904
SK
3250 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3251 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3252 adapter->fw_ver);
3253 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3254 }
3255
1d1e9a46 3256 if (adapter->vlans_added)
10329df8 3257 be_vid_config(adapter);
7ab8b0b4 3258
a54769f5 3259 be_set_rx_mode(adapter->netdev);
5fb379ee 3260
ddc3f5cb 3261 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3262
ddc3f5cb
AK
3263 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3264 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3265 adapter->rx_fc);
2dc1deb6 3266
92bf14ab
SP
3267 if (be_physfn(adapter) && num_vfs) {
3268 if (be_max_vfs(adapter))
39f1d94d
SP
3269 be_vf_setup(adapter);
3270 else
3271 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3272 }
3273
f25b119c
PR
3274 status = be_cmd_get_phy_info(adapter);
3275 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3276 adapter->phy.fc_autoneg = 1;
3277
68d7bdcb 3278 be_schedule_worker(adapter);
f9449ab7 3279 return 0;
a54769f5
SP
3280err:
3281 be_clear(adapter);
3282 return status;
3283}
6b7c5b94 3284
66268739
IV
3285#ifdef CONFIG_NET_POLL_CONTROLLER
3286static void be_netpoll(struct net_device *netdev)
3287{
3288 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3289 struct be_eq_obj *eqo;
66268739
IV
3290 int i;
3291
e49cc34f
SP
3292 for_all_evt_queues(adapter, eqo, i) {
3293 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3294 napi_schedule(&eqo->napi);
3295 }
10ef9ab4
SP
3296
3297 return;
66268739
IV
3298}
3299#endif
3300
84517482 3301#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3302static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3303
fa9a6fed 3304static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3305 const u8 *p, u32 img_start, int image_size,
3306 int hdr_size)
fa9a6fed
SB
3307{
3308 u32 crc_offset;
3309 u8 flashed_crc[4];
3310 int status;
3f0d4560
AK
3311
3312 crc_offset = hdr_size + img_start + image_size - 4;
3313
fa9a6fed 3314 p += crc_offset;
3f0d4560
AK
3315
3316 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3317 (image_size - 4));
fa9a6fed
SB
3318 if (status) {
3319 dev_err(&adapter->pdev->dev,
3320 "could not get crc from flash, not flashing redboot\n");
3321 return false;
3322 }
3323
3324 /*update redboot only if crc does not match*/
3325 if (!memcmp(flashed_crc, p, 4))
3326 return false;
3327 else
3328 return true;
fa9a6fed
SB
3329}
3330
306f1348
SP
3331static bool phy_flashing_required(struct be_adapter *adapter)
3332{
42f11cf2
AK
3333 return (adapter->phy.phy_type == TN_8022 &&
3334 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3335}
3336
c165541e
PR
3337static bool is_comp_in_ufi(struct be_adapter *adapter,
3338 struct flash_section_info *fsec, int type)
3339{
3340 int i = 0, img_type = 0;
3341 struct flash_section_info_g2 *fsec_g2 = NULL;
3342
ca34fe38 3343 if (BE2_chip(adapter))
c165541e
PR
3344 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3345
3346 for (i = 0; i < MAX_FLASH_COMP; i++) {
3347 if (fsec_g2)
3348 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3349 else
3350 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3351
3352 if (img_type == type)
3353 return true;
3354 }
3355 return false;
3356
3357}
3358
4188e7df 3359static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3360 int header_size,
3361 const struct firmware *fw)
3362{
3363 struct flash_section_info *fsec = NULL;
3364 const u8 *p = fw->data;
3365
3366 p += header_size;
3367 while (p < (fw->data + fw->size)) {
3368 fsec = (struct flash_section_info *)p;
3369 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3370 return fsec;
3371 p += 32;
3372 }
3373 return NULL;
3374}
3375
773a2d7c
PR
3376static int be_flash(struct be_adapter *adapter, const u8 *img,
3377 struct be_dma_mem *flash_cmd, int optype, int img_size)
3378{
3379 u32 total_bytes = 0, flash_op, num_bytes = 0;
3380 int status = 0;
3381 struct be_cmd_write_flashrom *req = flash_cmd->va;
3382
3383 total_bytes = img_size;
3384 while (total_bytes) {
3385 num_bytes = min_t(u32, 32*1024, total_bytes);
3386
3387 total_bytes -= num_bytes;
3388
3389 if (!total_bytes) {
3390 if (optype == OPTYPE_PHY_FW)
3391 flash_op = FLASHROM_OPER_PHY_FLASH;
3392 else
3393 flash_op = FLASHROM_OPER_FLASH;
3394 } else {
3395 if (optype == OPTYPE_PHY_FW)
3396 flash_op = FLASHROM_OPER_PHY_SAVE;
3397 else
3398 flash_op = FLASHROM_OPER_SAVE;
3399 }
3400
be716446 3401 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3402 img += num_bytes;
3403 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3404 flash_op, num_bytes);
3405 if (status) {
3406 if (status == ILLEGAL_IOCTL_REQ &&
3407 optype == OPTYPE_PHY_FW)
3408 break;
3409 dev_err(&adapter->pdev->dev,
3410 "cmd to write to flash rom failed.\n");
3411 return status;
3412 }
3413 }
3414 return 0;
3415}
3416
0ad3157e 3417/* For BE2, BE3 and BE3-R */
ca34fe38 3418static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3419 const struct firmware *fw,
3420 struct be_dma_mem *flash_cmd,
3421 int num_of_images)
3f0d4560 3422
84517482 3423{
3f0d4560 3424 int status = 0, i, filehdr_size = 0;
c165541e 3425 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3426 const u8 *p = fw->data;
215faf9c 3427 const struct flash_comp *pflashcomp;
773a2d7c 3428 int num_comp, redboot;
c165541e
PR
3429 struct flash_section_info *fsec = NULL;
3430
3431 struct flash_comp gen3_flash_types[] = {
3432 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3433 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3434 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3435 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3436 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3437 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3438 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3439 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3440 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3441 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3442 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3443 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3444 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3445 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3446 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3447 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3448 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3449 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3450 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3451 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3452 };
c165541e
PR
3453
3454 struct flash_comp gen2_flash_types[] = {
3455 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3456 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3457 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3458 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3459 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3460 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3461 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3462 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3463 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3464 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3465 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3466 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3467 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3468 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3469 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3470 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3471 };
3472
ca34fe38 3473 if (BE3_chip(adapter)) {
3f0d4560
AK
3474 pflashcomp = gen3_flash_types;
3475 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3476 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3477 } else {
3478 pflashcomp = gen2_flash_types;
3479 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3480 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3481 }
ca34fe38 3482
c165541e
PR
3483 /* Get flash section info*/
3484 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3485 if (!fsec) {
3486 dev_err(&adapter->pdev->dev,
3487 "Invalid Cookie. UFI corrupted ?\n");
3488 return -1;
3489 }
9fe96934 3490 for (i = 0; i < num_comp; i++) {
c165541e 3491 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3492 continue;
c165541e
PR
3493
3494 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3495 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3496 continue;
3497
773a2d7c
PR
3498 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3499 !phy_flashing_required(adapter))
306f1348 3500 continue;
c165541e 3501
773a2d7c
PR
3502 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3503 redboot = be_flash_redboot(adapter, fw->data,
3504 pflashcomp[i].offset, pflashcomp[i].size,
3505 filehdr_size + img_hdrs_size);
3506 if (!redboot)
3507 continue;
3508 }
c165541e 3509
3f0d4560 3510 p = fw->data;
c165541e 3511 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3512 if (p + pflashcomp[i].size > fw->data + fw->size)
3513 return -1;
773a2d7c
PR
3514
3515 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3516 pflashcomp[i].size);
3517 if (status) {
3518 dev_err(&adapter->pdev->dev,
3519 "Flashing section type %d failed.\n",
3520 pflashcomp[i].img_type);
3521 return status;
84517482 3522 }
84517482 3523 }
84517482
AK
3524 return 0;
3525}
3526
773a2d7c
PR
3527static int be_flash_skyhawk(struct be_adapter *adapter,
3528 const struct firmware *fw,
3529 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3530{
773a2d7c
PR
3531 int status = 0, i, filehdr_size = 0;
3532 int img_offset, img_size, img_optype, redboot;
3533 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3534 const u8 *p = fw->data;
3535 struct flash_section_info *fsec = NULL;
3536
3537 filehdr_size = sizeof(struct flash_file_hdr_g3);
3538 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3539 if (!fsec) {
3540 dev_err(&adapter->pdev->dev,
3541 "Invalid Cookie. UFI corrupted ?\n");
3542 return -1;
3543 }
3544
3545 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3546 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3547 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3548
3549 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3550 case IMAGE_FIRMWARE_iSCSI:
3551 img_optype = OPTYPE_ISCSI_ACTIVE;
3552 break;
3553 case IMAGE_BOOT_CODE:
3554 img_optype = OPTYPE_REDBOOT;
3555 break;
3556 case IMAGE_OPTION_ROM_ISCSI:
3557 img_optype = OPTYPE_BIOS;
3558 break;
3559 case IMAGE_OPTION_ROM_PXE:
3560 img_optype = OPTYPE_PXE_BIOS;
3561 break;
3562 case IMAGE_OPTION_ROM_FCoE:
3563 img_optype = OPTYPE_FCOE_BIOS;
3564 break;
3565 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3566 img_optype = OPTYPE_ISCSI_BACKUP;
3567 break;
3568 case IMAGE_NCSI:
3569 img_optype = OPTYPE_NCSI_FW;
3570 break;
3571 default:
3572 continue;
3573 }
3574
3575 if (img_optype == OPTYPE_REDBOOT) {
3576 redboot = be_flash_redboot(adapter, fw->data,
3577 img_offset, img_size,
3578 filehdr_size + img_hdrs_size);
3579 if (!redboot)
3580 continue;
3581 }
3582
3583 p = fw->data;
3584 p += filehdr_size + img_offset + img_hdrs_size;
3585 if (p + img_size > fw->data + fw->size)
3586 return -1;
3587
3588 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3589 if (status) {
3590 dev_err(&adapter->pdev->dev,
3591 "Flashing section type %d failed.\n",
3592 fsec->fsec_entry[i].type);
3593 return status;
3594 }
3595 }
3596 return 0;
3f0d4560
AK
3597}
3598
485bf569
SN
3599static int lancer_fw_download(struct be_adapter *adapter,
3600 const struct firmware *fw)
84517482 3601{
485bf569
SN
3602#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3603#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3604 struct be_dma_mem flash_cmd;
485bf569
SN
3605 const u8 *data_ptr = NULL;
3606 u8 *dest_image_ptr = NULL;
3607 size_t image_size = 0;
3608 u32 chunk_size = 0;
3609 u32 data_written = 0;
3610 u32 offset = 0;
3611 int status = 0;
3612 u8 add_status = 0;
f67ef7ba 3613 u8 change_status;
84517482 3614
485bf569 3615 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3616 dev_err(&adapter->pdev->dev,
485bf569
SN
3617 "FW Image not properly aligned. "
3618 "Length must be 4 byte aligned.\n");
3619 status = -EINVAL;
3620 goto lancer_fw_exit;
d9efd2af
SB
3621 }
3622
485bf569
SN
3623 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3624 + LANCER_FW_DOWNLOAD_CHUNK;
3625 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3626 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3627 if (!flash_cmd.va) {
3628 status = -ENOMEM;
485bf569
SN
3629 goto lancer_fw_exit;
3630 }
84517482 3631
485bf569
SN
3632 dest_image_ptr = flash_cmd.va +
3633 sizeof(struct lancer_cmd_req_write_object);
3634 image_size = fw->size;
3635 data_ptr = fw->data;
3636
3637 while (image_size) {
3638 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3639
3640 /* Copy the image chunk content. */
3641 memcpy(dest_image_ptr, data_ptr, chunk_size);
3642
3643 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3644 chunk_size, offset,
3645 LANCER_FW_DOWNLOAD_LOCATION,
3646 &data_written, &change_status,
3647 &add_status);
485bf569
SN
3648 if (status)
3649 break;
3650
3651 offset += data_written;
3652 data_ptr += data_written;
3653 image_size -= data_written;
3654 }
3655
3656 if (!status) {
3657 /* Commit the FW written */
3658 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3659 0, offset,
3660 LANCER_FW_DOWNLOAD_LOCATION,
3661 &data_written, &change_status,
3662 &add_status);
485bf569
SN
3663 }
3664
3665 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3666 flash_cmd.dma);
3667 if (status) {
3668 dev_err(&adapter->pdev->dev,
3669 "Firmware load error. "
3670 "Status code: 0x%x Additional Status: 0x%x\n",
3671 status, add_status);
3672 goto lancer_fw_exit;
3673 }
3674
f67ef7ba 3675 if (change_status == LANCER_FW_RESET_NEEDED) {
5c510811
SK
3676 status = lancer_physdev_ctrl(adapter,
3677 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3678 if (status) {
3679 dev_err(&adapter->pdev->dev,
3680 "Adapter busy for FW reset.\n"
3681 "New FW will not be active.\n");
3682 goto lancer_fw_exit;
3683 }
3684 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3685 dev_err(&adapter->pdev->dev,
3686 "System reboot required for new FW"
3687 " to be active\n");
3688 }
3689
485bf569
SN
3690 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3691lancer_fw_exit:
3692 return status;
3693}
3694
ca34fe38
SP
3695#define UFI_TYPE2 2
3696#define UFI_TYPE3 3
0ad3157e 3697#define UFI_TYPE3R 10
ca34fe38
SP
3698#define UFI_TYPE4 4
3699static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3700 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3701{
3702 if (fhdr == NULL)
3703 goto be_get_ufi_exit;
3704
ca34fe38
SP
3705 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3706 return UFI_TYPE4;
0ad3157e
VV
3707 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3708 if (fhdr->asic_type_rev == 0x10)
3709 return UFI_TYPE3R;
3710 else
3711 return UFI_TYPE3;
3712 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3713 return UFI_TYPE2;
773a2d7c
PR
3714
3715be_get_ufi_exit:
3716 dev_err(&adapter->pdev->dev,
3717 "UFI and Interface are not compatible for flashing\n");
3718 return -1;
3719}
3720
485bf569
SN
3721static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3722{
485bf569
SN
3723 struct flash_file_hdr_g3 *fhdr3;
3724 struct image_hdr *img_hdr_ptr = NULL;
3725 struct be_dma_mem flash_cmd;
3726 const u8 *p;
773a2d7c 3727 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3728
be716446 3729 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3730 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3731 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3732 if (!flash_cmd.va) {
3733 status = -ENOMEM;
485bf569 3734 goto be_fw_exit;
84517482
AK
3735 }
3736
773a2d7c 3737 p = fw->data;
0ad3157e 3738 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3739
0ad3157e 3740 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3741
773a2d7c
PR
3742 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3743 for (i = 0; i < num_imgs; i++) {
3744 img_hdr_ptr = (struct image_hdr *)(fw->data +
3745 (sizeof(struct flash_file_hdr_g3) +
3746 i * sizeof(struct image_hdr)));
3747 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3748 switch (ufi_type) {
3749 case UFI_TYPE4:
773a2d7c
PR
3750 status = be_flash_skyhawk(adapter, fw,
3751 &flash_cmd, num_imgs);
0ad3157e
VV
3752 break;
3753 case UFI_TYPE3R:
ca34fe38
SP
3754 status = be_flash_BEx(adapter, fw, &flash_cmd,
3755 num_imgs);
0ad3157e
VV
3756 break;
3757 case UFI_TYPE3:
3758 /* Do not flash this ufi on BE3-R cards */
3759 if (adapter->asic_rev < 0x10)
3760 status = be_flash_BEx(adapter, fw,
3761 &flash_cmd,
3762 num_imgs);
3763 else {
3764 status = -1;
3765 dev_err(&adapter->pdev->dev,
3766 "Can't load BE3 UFI on BE3R\n");
3767 }
3768 }
3f0d4560 3769 }
773a2d7c
PR
3770 }
3771
ca34fe38
SP
3772 if (ufi_type == UFI_TYPE2)
3773 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3774 else if (ufi_type == -1)
3f0d4560 3775 status = -1;
84517482 3776
2b7bcebf
IV
3777 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3778 flash_cmd.dma);
84517482
AK
3779 if (status) {
3780 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3781 goto be_fw_exit;
84517482
AK
3782 }
3783
af901ca1 3784 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3785
485bf569
SN
3786be_fw_exit:
3787 return status;
3788}
3789
3790int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3791{
3792 const struct firmware *fw;
3793 int status;
3794
3795 if (!netif_running(adapter->netdev)) {
3796 dev_err(&adapter->pdev->dev,
3797 "Firmware load not allowed (interface is down)\n");
3798 return -1;
3799 }
3800
3801 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3802 if (status)
3803 goto fw_exit;
3804
3805 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3806
3807 if (lancer_chip(adapter))
3808 status = lancer_fw_download(adapter, fw);
3809 else
3810 status = be_fw_download(adapter, fw);
3811
eeb65ced
SK
3812 if (!status)
3813 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3814 adapter->fw_on_flash);
3815
84517482
AK
3816fw_exit:
3817 release_firmware(fw);
3818 return status;
3819}
3820
a77dcb8c
AK
3821static int be_ndo_bridge_setlink(struct net_device *dev,
3822 struct nlmsghdr *nlh)
3823{
3824 struct be_adapter *adapter = netdev_priv(dev);
3825 struct nlattr *attr, *br_spec;
3826 int rem;
3827 int status = 0;
3828 u16 mode = 0;
3829
3830 if (!sriov_enabled(adapter))
3831 return -EOPNOTSUPP;
3832
3833 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3834
3835 nla_for_each_nested(attr, br_spec, rem) {
3836 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3837 continue;
3838
3839 mode = nla_get_u16(attr);
3840 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3841 return -EINVAL;
3842
3843 status = be_cmd_set_hsw_config(adapter, 0, 0,
3844 adapter->if_handle,
3845 mode == BRIDGE_MODE_VEPA ?
3846 PORT_FWD_TYPE_VEPA :
3847 PORT_FWD_TYPE_VEB);
3848 if (status)
3849 goto err;
3850
3851 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3852 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3853
3854 return status;
3855 }
3856err:
3857 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3858 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3859
3860 return status;
3861}
3862
3863static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3864 struct net_device *dev,
3865 u32 filter_mask)
3866{
3867 struct be_adapter *adapter = netdev_priv(dev);
3868 int status = 0;
3869 u8 hsw_mode;
3870
3871 if (!sriov_enabled(adapter))
3872 return 0;
3873
3874 /* BE and Lancer chips support VEB mode only */
3875 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3876 hsw_mode = PORT_FWD_TYPE_VEB;
3877 } else {
3878 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3879 adapter->if_handle, &hsw_mode);
3880 if (status)
3881 return 0;
3882 }
3883
3884 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3885 hsw_mode == PORT_FWD_TYPE_VEPA ?
3886 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3887}
3888
e5686ad8 3889static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3890 .ndo_open = be_open,
3891 .ndo_stop = be_close,
3892 .ndo_start_xmit = be_xmit,
a54769f5 3893 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3894 .ndo_set_mac_address = be_mac_addr_set,
3895 .ndo_change_mtu = be_change_mtu,
ab1594e9 3896 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3897 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3898 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3899 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3900 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3901 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3902 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3903 .ndo_get_vf_config = be_get_vf_config,
3904#ifdef CONFIG_NET_POLL_CONTROLLER
3905 .ndo_poll_controller = be_netpoll,
3906#endif
a77dcb8c
AK
3907 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3908 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6b7c5b94
SP
3909};
3910
3911static void be_netdev_init(struct net_device *netdev)
3912{
3913 struct be_adapter *adapter = netdev_priv(netdev);
3914
6332c8d3 3915 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3916 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3917 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3918 if (be_multi_rxq(adapter))
3919 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3920
3921 netdev->features |= netdev->hw_features |
f646968f 3922 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3923
eb8a50d9 3924 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3925 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3926
fbc13f01
AK
3927 netdev->priv_flags |= IFF_UNICAST_FLT;
3928
6b7c5b94
SP
3929 netdev->flags |= IFF_MULTICAST;
3930
b7e5887e 3931 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3932
10ef9ab4 3933 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3934
3935 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
3936}
3937
3938static void be_unmap_pci_bars(struct be_adapter *adapter)
3939{
c5b3ad4c
SP
3940 if (adapter->csr)
3941 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3942 if (adapter->db)
ce66f781 3943 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3944}
3945
ce66f781
SP
3946static int db_bar(struct be_adapter *adapter)
3947{
3948 if (lancer_chip(adapter) || !be_physfn(adapter))
3949 return 0;
3950 else
3951 return 4;
3952}
3953
3954static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3955{
dbf0f2a7 3956 if (skyhawk_chip(adapter)) {
ce66f781
SP
3957 adapter->roce_db.size = 4096;
3958 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3959 db_bar(adapter));
3960 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3961 db_bar(adapter));
3962 }
045508a8 3963 return 0;
6b7c5b94
SP
3964}
3965
3966static int be_map_pci_bars(struct be_adapter *adapter)
3967{
3968 u8 __iomem *addr;
ce66f781 3969 u32 sli_intf;
6b7c5b94 3970
ce66f781
SP
3971 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3972 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3973 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3974
c5b3ad4c
SP
3975 if (BEx_chip(adapter) && be_physfn(adapter)) {
3976 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3977 if (adapter->csr == NULL)
3978 return -ENOMEM;
3979 }
3980
ce66f781 3981 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3982 if (addr == NULL)
3983 goto pci_map_err;
ba343c77 3984 adapter->db = addr;
ce66f781
SP
3985
3986 be_roce_map_pci_bars(adapter);
6b7c5b94 3987 return 0;
ce66f781 3988
6b7c5b94
SP
3989pci_map_err:
3990 be_unmap_pci_bars(adapter);
3991 return -ENOMEM;
3992}
3993
6b7c5b94
SP
3994static void be_ctrl_cleanup(struct be_adapter *adapter)
3995{
8788fdc2 3996 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3997
3998 be_unmap_pci_bars(adapter);
3999
4000 if (mem->va)
2b7bcebf
IV
4001 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4002 mem->dma);
e7b909a6 4003
5b8821b7 4004 mem = &adapter->rx_filter;
e7b909a6 4005 if (mem->va)
2b7bcebf
IV
4006 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4007 mem->dma);
6b7c5b94
SP
4008}
4009
6b7c5b94
SP
4010static int be_ctrl_init(struct be_adapter *adapter)
4011{
8788fdc2
SP
4012 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4013 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4014 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4015 u32 sli_intf;
6b7c5b94 4016 int status;
6b7c5b94 4017
ce66f781
SP
4018 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4019 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4020 SLI_INTF_FAMILY_SHIFT;
4021 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4022
6b7c5b94
SP
4023 status = be_map_pci_bars(adapter);
4024 if (status)
e7b909a6 4025 goto done;
6b7c5b94
SP
4026
4027 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4028 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4029 mbox_mem_alloc->size,
4030 &mbox_mem_alloc->dma,
4031 GFP_KERNEL);
6b7c5b94 4032 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4033 status = -ENOMEM;
4034 goto unmap_pci_bars;
6b7c5b94
SP
4035 }
4036 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4037 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4038 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4039 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4040
5b8821b7 4041 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4042 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4043 rx_filter->size, &rx_filter->dma,
4044 GFP_KERNEL);
5b8821b7 4045 if (rx_filter->va == NULL) {
e7b909a6
SP
4046 status = -ENOMEM;
4047 goto free_mbox;
4048 }
1f9061d2 4049
2984961c 4050 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4051 spin_lock_init(&adapter->mcc_lock);
4052 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4053
dd131e76 4054 init_completion(&adapter->flash_compl);
cf588477 4055 pci_save_state(adapter->pdev);
6b7c5b94 4056 return 0;
e7b909a6
SP
4057
4058free_mbox:
2b7bcebf
IV
4059 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4060 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4061
4062unmap_pci_bars:
4063 be_unmap_pci_bars(adapter);
4064
4065done:
4066 return status;
6b7c5b94
SP
4067}
4068
4069static void be_stats_cleanup(struct be_adapter *adapter)
4070{
3abcdeda 4071 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4072
4073 if (cmd->va)
2b7bcebf
IV
4074 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4075 cmd->va, cmd->dma);
6b7c5b94
SP
4076}
4077
4078static int be_stats_init(struct be_adapter *adapter)
4079{
3abcdeda 4080 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4081
ca34fe38
SP
4082 if (lancer_chip(adapter))
4083 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4084 else if (BE2_chip(adapter))
89a88ab8 4085 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
4086 else
4087 /* BE3 and Skyhawk */
4088 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4089
ede23fa8
JP
4090 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4091 GFP_KERNEL);
6b7c5b94
SP
4092 if (cmd->va == NULL)
4093 return -1;
4094 return 0;
4095}
4096
3bc6b06c 4097static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4098{
4099 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4100
6b7c5b94
SP
4101 if (!adapter)
4102 return;
4103
045508a8 4104 be_roce_dev_remove(adapter);
8cef7a78 4105 be_intr_set(adapter, false);
045508a8 4106
f67ef7ba
PR
4107 cancel_delayed_work_sync(&adapter->func_recovery_work);
4108
6b7c5b94
SP
4109 unregister_netdev(adapter->netdev);
4110
5fb379ee
SP
4111 be_clear(adapter);
4112
bf99e50d
PR
4113 /* tell fw we're done with firing cmds */
4114 be_cmd_fw_clean(adapter);
4115
6b7c5b94
SP
4116 be_stats_cleanup(adapter);
4117
4118 be_ctrl_cleanup(adapter);
4119
d6b6d987
SP
4120 pci_disable_pcie_error_reporting(pdev);
4121
6b7c5b94
SP
4122 pci_set_drvdata(pdev, NULL);
4123 pci_release_regions(pdev);
4124 pci_disable_device(pdev);
4125
4126 free_netdev(adapter->netdev);
4127}
4128
4762f6ce
AK
4129bool be_is_wol_supported(struct be_adapter *adapter)
4130{
4131 return ((adapter->wol_cap & BE_WOL_CAP) &&
4132 !be_is_wol_excluded(adapter)) ? true : false;
4133}
4134
941a77d5
SK
4135u32 be_get_fw_log_level(struct be_adapter *adapter)
4136{
4137 struct be_dma_mem extfat_cmd;
4138 struct be_fat_conf_params *cfgs;
4139 int status;
4140 u32 level = 0;
4141 int j;
4142
f25b119c
PR
4143 if (lancer_chip(adapter))
4144 return 0;
4145
941a77d5
SK
4146 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4147 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4148 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4149 &extfat_cmd.dma);
4150
4151 if (!extfat_cmd.va) {
4152 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4153 __func__);
4154 goto err;
4155 }
4156
4157 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4158 if (!status) {
4159 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4160 sizeof(struct be_cmd_resp_hdr));
ac46a462 4161 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4162 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4163 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4164 }
4165 }
4166 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4167 extfat_cmd.dma);
4168err:
4169 return level;
4170}
abb93951 4171
39f1d94d 4172static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4173{
6b7c5b94 4174 int status;
941a77d5 4175 u32 level;
6b7c5b94 4176
9e1453c5
AK
4177 status = be_cmd_get_cntl_attributes(adapter);
4178 if (status)
4179 return status;
4180
4762f6ce
AK
4181 status = be_cmd_get_acpi_wol_cap(adapter);
4182 if (status) {
4183 /* in case of a failure to get wol capabillities
4184 * check the exclusion list to determine WOL capability */
4185 if (!be_is_wol_excluded(adapter))
4186 adapter->wol_cap |= BE_WOL_CAP;
4187 }
4188
4189 if (be_is_wol_supported(adapter))
4190 adapter->wol = true;
4191
7aeb2156
PR
4192 /* Must be a power of 2 or else MODULO will BUG_ON */
4193 adapter->be_get_temp_freq = 64;
4194
941a77d5
SK
4195 level = be_get_fw_log_level(adapter);
4196 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4197
92bf14ab 4198 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4199 return 0;
6b7c5b94
SP
4200}
4201
f67ef7ba 4202static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4203{
01e5b2c4 4204 struct device *dev = &adapter->pdev->dev;
d8110f62 4205 int status;
d8110f62 4206
f67ef7ba
PR
4207 status = lancer_test_and_set_rdy_state(adapter);
4208 if (status)
4209 goto err;
d8110f62 4210
f67ef7ba
PR
4211 if (netif_running(adapter->netdev))
4212 be_close(adapter->netdev);
d8110f62 4213
f67ef7ba
PR
4214 be_clear(adapter);
4215
01e5b2c4 4216 be_clear_all_error(adapter);
f67ef7ba
PR
4217
4218 status = be_setup(adapter);
4219 if (status)
4220 goto err;
d8110f62 4221
f67ef7ba
PR
4222 if (netif_running(adapter->netdev)) {
4223 status = be_open(adapter->netdev);
d8110f62
PR
4224 if (status)
4225 goto err;
f67ef7ba 4226 }
d8110f62 4227
01e5b2c4 4228 dev_err(dev, "Error recovery successful\n");
f67ef7ba
PR
4229 return 0;
4230err:
01e5b2c4
SK
4231 if (status == -EAGAIN)
4232 dev_err(dev, "Waiting for resource provisioning\n");
4233 else
4234 dev_err(dev, "Error recovery failed\n");
d8110f62 4235
f67ef7ba
PR
4236 return status;
4237}
4238
4239static void be_func_recovery_task(struct work_struct *work)
4240{
4241 struct be_adapter *adapter =
4242 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4243 int status = 0;
d8110f62 4244
f67ef7ba 4245 be_detect_error(adapter);
d8110f62 4246
f67ef7ba 4247 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4248
f67ef7ba
PR
4249 rtnl_lock();
4250 netif_device_detach(adapter->netdev);
4251 rtnl_unlock();
d8110f62 4252
f67ef7ba 4253 status = lancer_recover_func(adapter);
f67ef7ba
PR
4254 if (!status)
4255 netif_device_attach(adapter->netdev);
d8110f62 4256 }
f67ef7ba 4257
01e5b2c4
SK
4258 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4259 * no need to attempt further recovery.
4260 */
4261 if (!status || status == -EAGAIN)
4262 schedule_delayed_work(&adapter->func_recovery_work,
4263 msecs_to_jiffies(1000));
d8110f62
PR
4264}
4265
4266static void be_worker(struct work_struct *work)
4267{
4268 struct be_adapter *adapter =
4269 container_of(work, struct be_adapter, work.work);
4270 struct be_rx_obj *rxo;
10ef9ab4 4271 struct be_eq_obj *eqo;
d8110f62
PR
4272 int i;
4273
d8110f62
PR
4274 /* when interrupts are not yet enabled, just reap any pending
4275 * mcc completions */
4276 if (!netif_running(adapter->netdev)) {
072a9c48 4277 local_bh_disable();
10ef9ab4 4278 be_process_mcc(adapter);
072a9c48 4279 local_bh_enable();
d8110f62
PR
4280 goto reschedule;
4281 }
4282
4283 if (!adapter->stats_cmd_sent) {
4284 if (lancer_chip(adapter))
4285 lancer_cmd_get_pport_stats(adapter,
4286 &adapter->stats_cmd);
4287 else
4288 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4289 }
4290
d696b5e2
VV
4291 if (be_physfn(adapter) &&
4292 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4293 be_cmd_get_die_temperature(adapter);
4294
d8110f62 4295 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4296 if (rxo->rx_post_starved) {
4297 rxo->rx_post_starved = false;
4298 be_post_rx_frags(rxo, GFP_KERNEL);
4299 }
4300 }
4301
10ef9ab4
SP
4302 for_all_evt_queues(adapter, eqo, i)
4303 be_eqd_update(adapter, eqo);
4304
d8110f62
PR
4305reschedule:
4306 adapter->work_counter++;
4307 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4308}
4309
257a3feb 4310/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4311static bool be_reset_required(struct be_adapter *adapter)
4312{
257a3feb 4313 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4314}
4315
d379142b
SP
4316static char *mc_name(struct be_adapter *adapter)
4317{
4318 if (adapter->function_mode & FLEX10_MODE)
4319 return "FLEX10";
4320 else if (adapter->function_mode & VNIC_MODE)
4321 return "vNIC";
4322 else if (adapter->function_mode & UMC_ENABLED)
4323 return "UMC";
4324 else
4325 return "";
4326}
4327
4328static inline char *func_name(struct be_adapter *adapter)
4329{
4330 return be_physfn(adapter) ? "PF" : "VF";
4331}
4332
1dd06ae8 4333static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4334{
4335 int status = 0;
4336 struct be_adapter *adapter;
4337 struct net_device *netdev;
b4e32a71 4338 char port_name;
6b7c5b94
SP
4339
4340 status = pci_enable_device(pdev);
4341 if (status)
4342 goto do_none;
4343
4344 status = pci_request_regions(pdev, DRV_NAME);
4345 if (status)
4346 goto disable_dev;
4347 pci_set_master(pdev);
4348
7f640062 4349 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4350 if (netdev == NULL) {
4351 status = -ENOMEM;
4352 goto rel_reg;
4353 }
4354 adapter = netdev_priv(netdev);
4355 adapter->pdev = pdev;
4356 pci_set_drvdata(pdev, adapter);
4357 adapter->netdev = netdev;
2243e2e9 4358 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4359
2b7bcebf 4360 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4361 if (!status) {
2bd92cd2
CH
4362 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4363 if (status < 0) {
4364 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4365 goto free_netdev;
4366 }
6b7c5b94
SP
4367 netdev->features |= NETIF_F_HIGHDMA;
4368 } else {
2b7bcebf 4369 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
0c5fed09
SK
4370 if (!status)
4371 status = dma_set_coherent_mask(&pdev->dev,
4372 DMA_BIT_MASK(32));
6b7c5b94
SP
4373 if (status) {
4374 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4375 goto free_netdev;
4376 }
4377 }
4378
d6b6d987
SP
4379 status = pci_enable_pcie_error_reporting(pdev);
4380 if (status)
4ce1fd61 4381 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
d6b6d987 4382
6b7c5b94
SP
4383 status = be_ctrl_init(adapter);
4384 if (status)
39f1d94d 4385 goto free_netdev;
6b7c5b94 4386
2243e2e9 4387 /* sync up with fw's ready state */
ba343c77 4388 if (be_physfn(adapter)) {
bf99e50d 4389 status = be_fw_wait_ready(adapter);
ba343c77
SB
4390 if (status)
4391 goto ctrl_clean;
ba343c77 4392 }
6b7c5b94 4393
39f1d94d
SP
4394 if (be_reset_required(adapter)) {
4395 status = be_cmd_reset_function(adapter);
4396 if (status)
4397 goto ctrl_clean;
556ae191 4398
2d177be8
KA
4399 /* Wait for interrupts to quiesce after an FLR */
4400 msleep(100);
4401 }
8cef7a78
SK
4402
4403 /* Allow interrupts for other ULPs running on NIC function */
4404 be_intr_set(adapter, true);
10ef9ab4 4405
2d177be8
KA
4406 /* tell fw we're ready to fire cmds */
4407 status = be_cmd_fw_init(adapter);
4408 if (status)
4409 goto ctrl_clean;
4410
2243e2e9
SP
4411 status = be_stats_init(adapter);
4412 if (status)
4413 goto ctrl_clean;
4414
39f1d94d 4415 status = be_get_initial_config(adapter);
6b7c5b94
SP
4416 if (status)
4417 goto stats_clean;
6b7c5b94
SP
4418
4419 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4420 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4421 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4422
5fb379ee
SP
4423 status = be_setup(adapter);
4424 if (status)
55f5c3c5 4425 goto stats_clean;
2243e2e9 4426
3abcdeda 4427 be_netdev_init(netdev);
6b7c5b94
SP
4428 status = register_netdev(netdev);
4429 if (status != 0)
5fb379ee 4430 goto unsetup;
6b7c5b94 4431
045508a8
PP
4432 be_roce_dev_add(adapter);
4433
f67ef7ba
PR
4434 schedule_delayed_work(&adapter->func_recovery_work,
4435 msecs_to_jiffies(1000));
b4e32a71
PR
4436
4437 be_cmd_query_port_name(adapter, &port_name);
4438
d379142b
SP
4439 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4440 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4441
6b7c5b94
SP
4442 return 0;
4443
5fb379ee
SP
4444unsetup:
4445 be_clear(adapter);
6b7c5b94
SP
4446stats_clean:
4447 be_stats_cleanup(adapter);
4448ctrl_clean:
4449 be_ctrl_cleanup(adapter);
f9449ab7 4450free_netdev:
fe6d2a38 4451 free_netdev(netdev);
8d56ff11 4452 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4453rel_reg:
4454 pci_release_regions(pdev);
4455disable_dev:
4456 pci_disable_device(pdev);
4457do_none:
c4ca2374 4458 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4459 return status;
4460}
4461
4462static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4463{
4464 struct be_adapter *adapter = pci_get_drvdata(pdev);
4465 struct net_device *netdev = adapter->netdev;
4466
71d8d1b5
AK
4467 if (adapter->wol)
4468 be_setup_wol(adapter, true);
4469
f67ef7ba
PR
4470 cancel_delayed_work_sync(&adapter->func_recovery_work);
4471
6b7c5b94
SP
4472 netif_device_detach(netdev);
4473 if (netif_running(netdev)) {
4474 rtnl_lock();
4475 be_close(netdev);
4476 rtnl_unlock();
4477 }
9b0365f1 4478 be_clear(adapter);
6b7c5b94
SP
4479
4480 pci_save_state(pdev);
4481 pci_disable_device(pdev);
4482 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4483 return 0;
4484}
4485
4486static int be_resume(struct pci_dev *pdev)
4487{
4488 int status = 0;
4489 struct be_adapter *adapter = pci_get_drvdata(pdev);
4490 struct net_device *netdev = adapter->netdev;
4491
4492 netif_device_detach(netdev);
4493
4494 status = pci_enable_device(pdev);
4495 if (status)
4496 return status;
4497
1ca01512 4498 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4499 pci_restore_state(pdev);
4500
dd5746bf
SB
4501 status = be_fw_wait_ready(adapter);
4502 if (status)
4503 return status;
4504
2243e2e9
SP
4505 /* tell fw we're ready to fire cmds */
4506 status = be_cmd_fw_init(adapter);
4507 if (status)
4508 return status;
4509
9b0365f1 4510 be_setup(adapter);
6b7c5b94
SP
4511 if (netif_running(netdev)) {
4512 rtnl_lock();
4513 be_open(netdev);
4514 rtnl_unlock();
4515 }
f67ef7ba
PR
4516
4517 schedule_delayed_work(&adapter->func_recovery_work,
4518 msecs_to_jiffies(1000));
6b7c5b94 4519 netif_device_attach(netdev);
71d8d1b5
AK
4520
4521 if (adapter->wol)
4522 be_setup_wol(adapter, false);
a4ca055f 4523
6b7c5b94
SP
4524 return 0;
4525}
4526
82456b03
SP
4527/*
4528 * An FLR will stop BE from DMAing any data.
4529 */
4530static void be_shutdown(struct pci_dev *pdev)
4531{
4532 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4533
2d5d4154
AK
4534 if (!adapter)
4535 return;
82456b03 4536
0f4a6828 4537 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4538 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4539
2d5d4154 4540 netif_device_detach(adapter->netdev);
82456b03 4541
57841869
AK
4542 be_cmd_reset_function(adapter);
4543
82456b03 4544 pci_disable_device(pdev);
82456b03
SP
4545}
4546
cf588477
SP
4547static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4548 pci_channel_state_t state)
4549{
4550 struct be_adapter *adapter = pci_get_drvdata(pdev);
4551 struct net_device *netdev = adapter->netdev;
4552
4553 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4554
01e5b2c4
SK
4555 if (!adapter->eeh_error) {
4556 adapter->eeh_error = true;
cf588477 4557
01e5b2c4 4558 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4559
cf588477 4560 rtnl_lock();
01e5b2c4
SK
4561 netif_device_detach(netdev);
4562 if (netif_running(netdev))
4563 be_close(netdev);
cf588477 4564 rtnl_unlock();
01e5b2c4
SK
4565
4566 be_clear(adapter);
cf588477 4567 }
cf588477
SP
4568
4569 if (state == pci_channel_io_perm_failure)
4570 return PCI_ERS_RESULT_DISCONNECT;
4571
4572 pci_disable_device(pdev);
4573
eeb7fc7b
SK
4574 /* The error could cause the FW to trigger a flash debug dump.
4575 * Resetting the card while flash dump is in progress
c8a54163
PR
4576 * can cause it not to recover; wait for it to finish.
4577 * Wait only for first function as it is needed only once per
4578 * adapter.
eeb7fc7b 4579 */
c8a54163
PR
4580 if (pdev->devfn == 0)
4581 ssleep(30);
4582
cf588477
SP
4583 return PCI_ERS_RESULT_NEED_RESET;
4584}
4585
4586static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4587{
4588 struct be_adapter *adapter = pci_get_drvdata(pdev);
4589 int status;
4590
4591 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4592
4593 status = pci_enable_device(pdev);
4594 if (status)
4595 return PCI_ERS_RESULT_DISCONNECT;
4596
4597 pci_set_master(pdev);
1ca01512 4598 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4599 pci_restore_state(pdev);
4600
4601 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4602 dev_info(&adapter->pdev->dev,
4603 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4604 status = be_fw_wait_ready(adapter);
cf588477
SP
4605 if (status)
4606 return PCI_ERS_RESULT_DISCONNECT;
4607
d6b6d987 4608 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4609 be_clear_all_error(adapter);
cf588477
SP
4610 return PCI_ERS_RESULT_RECOVERED;
4611}
4612
4613static void be_eeh_resume(struct pci_dev *pdev)
4614{
4615 int status = 0;
4616 struct be_adapter *adapter = pci_get_drvdata(pdev);
4617 struct net_device *netdev = adapter->netdev;
4618
4619 dev_info(&adapter->pdev->dev, "EEH resume\n");
4620
4621 pci_save_state(pdev);
4622
2d177be8 4623 status = be_cmd_reset_function(adapter);
cf588477
SP
4624 if (status)
4625 goto err;
4626
2d177be8
KA
4627 /* tell fw we're ready to fire cmds */
4628 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4629 if (status)
4630 goto err;
4631
cf588477
SP
4632 status = be_setup(adapter);
4633 if (status)
4634 goto err;
4635
4636 if (netif_running(netdev)) {
4637 status = be_open(netdev);
4638 if (status)
4639 goto err;
4640 }
f67ef7ba
PR
4641
4642 schedule_delayed_work(&adapter->func_recovery_work,
4643 msecs_to_jiffies(1000));
cf588477
SP
4644 netif_device_attach(netdev);
4645 return;
4646err:
4647 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4648}
4649
3646f0e5 4650static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4651 .error_detected = be_eeh_err_detected,
4652 .slot_reset = be_eeh_reset,
4653 .resume = be_eeh_resume,
4654};
4655
6b7c5b94
SP
4656static struct pci_driver be_driver = {
4657 .name = DRV_NAME,
4658 .id_table = be_dev_ids,
4659 .probe = be_probe,
4660 .remove = be_remove,
4661 .suspend = be_suspend,
cf588477 4662 .resume = be_resume,
82456b03 4663 .shutdown = be_shutdown,
cf588477 4664 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4665};
4666
4667static int __init be_init_module(void)
4668{
8e95a202
JP
4669 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4670 rx_frag_size != 2048) {
6b7c5b94
SP
4671 printk(KERN_WARNING DRV_NAME
4672 " : Module param rx_frag_size must be 2048/4096/8192."
4673 " Using 2048\n");
4674 rx_frag_size = 2048;
4675 }
6b7c5b94
SP
4676
4677 return pci_register_driver(&be_driver);
4678}
4679module_init(be_init_module);
4680
4681static void __exit be_exit_module(void)
4682{
4683 pci_unregister_driver(&be_driver);
4684}
4685module_exit(be_exit_module);
This page took 0.847695 seconds and 5 git commands to generate.