be2net: fix tx completion cleanup
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
2b7bcebf
IV
144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94 146 if (!mem->va)
10ef9ab4 147 return -ENOMEM;
6b7c5b94
SP
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
8788fdc2 152static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
cf588477
SP
156 if (adapter->eeh_err)
157 return;
158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
8788fdc2 174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
179
180 wmb();
8788fdc2 181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
182}
183
8788fdc2 184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
189
190 wmb();
8788fdc2 191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
192}
193
8788fdc2 194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
201
202 if (adapter->eeh_err)
203 return;
204
6b7c5b94
SP
205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
212}
213
8788fdc2 214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
220
221 if (adapter->eeh_err)
222 return;
223
6b7c5b94
SP
224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
228}
229
6b7c5b94
SP
230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
e3a7ae2c
SK
235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
6b7c5b94 237
ca9e4988
AK
238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
e3a7ae2c 241 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
a65027e4 244 if (status)
e3a7ae2c 245 goto err;
6b7c5b94 246
e3a7ae2c
SK
247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 249 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
250 if (status)
251 goto err;
6b7c5b94 252
e3a7ae2c
SK
253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
259 return status;
260}
261
89a88ab8
AK
262static void populate_be2_stats(struct be_adapter *adapter)
263{
ac124ff9
SP
264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 267 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 270
ac124ff9 271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
289 drvs->rx_address_mismatch_drops =
290 port_stats->rx_address_mismatch_drops +
291 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
292 drvs->rx_alignment_symbol_errors =
293 port_stats->rx_alignment_symbol_errors;
294
295 drvs->tx_pauseframes = port_stats->tx_pauseframes;
296 drvs->tx_controlframes = port_stats->tx_controlframes;
297
298 if (adapter->port_num)
ac124ff9 299 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 300 else
ac124ff9 301 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 302 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 303 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
ac124ff9
SP
313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 316 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 319
ac124ff9 320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 340 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
ac124ff9 343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 348 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
349 drvs->forwarded_packets = rxf_stats->forwarded_packets;
350 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
351 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
352 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
005d5696
SX
356static void populate_lancer_stats(struct be_adapter *adapter)
357{
89a88ab8 358
005d5696 359 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
360 struct lancer_pport_stats *pport_stats =
361 pport_stats_from_cmd(adapter);
362
363 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
364 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
365 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
366 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 367 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 368 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
369 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
370 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
371 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
372 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
373 drvs->rx_dropped_tcp_length =
374 pport_stats->rx_dropped_invalid_tcp_length;
375 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
376 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
377 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
378 drvs->rx_dropped_header_too_small =
379 pport_stats->rx_dropped_header_too_small;
380 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
381 drvs->rx_address_mismatch_drops =
382 pport_stats->rx_address_mismatch_drops +
383 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 388 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
389 drvs->forwarded_packets = pport_stats->num_forwards_lo;
390 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 391 drvs->rx_drops_too_many_frags =
ac124ff9 392 pport_stats->rx_drops_too_many_frags_lo;
005d5696 393}
89a88ab8 394
09c1c68f
SP
395static void accumulate_16bit_val(u32 *acc, u16 val)
396{
397#define lo(x) (x & 0xFFFF)
398#define hi(x) (x & 0xFFFF0000)
399 bool wrapped = val < lo(*acc);
400 u32 newacc = hi(*acc) + val;
401
402 if (wrapped)
403 newacc += 65536;
404 ACCESS_ONCE(*acc) = newacc;
405}
406
89a88ab8
AK
407void be_parse_stats(struct be_adapter *adapter)
408{
ac124ff9
SP
409 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
410 struct be_rx_obj *rxo;
411 int i;
412
005d5696
SX
413 if (adapter->generation == BE_GEN3) {
414 if (lancer_chip(adapter))
415 populate_lancer_stats(adapter);
416 else
417 populate_be3_stats(adapter);
418 } else {
89a88ab8 419 populate_be2_stats(adapter);
005d5696 420 }
ac124ff9
SP
421
422 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
423 for_all_rx_queues(adapter, rxo, i) {
424 /* below erx HW counter can actually wrap around after
425 * 65535. Driver accumulates a 32-bit value
426 */
427 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
428 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
429 }
89a88ab8
AK
430}
431
ab1594e9
SP
432static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
433 struct rtnl_link_stats64 *stats)
6b7c5b94 434{
ab1594e9 435 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 436 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 437 struct be_rx_obj *rxo;
3c8def97 438 struct be_tx_obj *txo;
ab1594e9
SP
439 u64 pkts, bytes;
440 unsigned int start;
3abcdeda 441 int i;
6b7c5b94 442
3abcdeda 443 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
444 const struct be_rx_stats *rx_stats = rx_stats(rxo);
445 do {
446 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
447 pkts = rx_stats(rxo)->rx_pkts;
448 bytes = rx_stats(rxo)->rx_bytes;
449 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
450 stats->rx_packets += pkts;
451 stats->rx_bytes += bytes;
452 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
453 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
454 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
455 }
456
3c8def97 457 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
458 const struct be_tx_stats *tx_stats = tx_stats(txo);
459 do {
460 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
461 pkts = tx_stats(txo)->tx_pkts;
462 bytes = tx_stats(txo)->tx_bytes;
463 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
464 stats->tx_packets += pkts;
465 stats->tx_bytes += bytes;
3c8def97 466 }
6b7c5b94
SP
467
468 /* bad pkts received */
ab1594e9 469 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
470 drvs->rx_alignment_symbol_errors +
471 drvs->rx_in_range_errors +
472 drvs->rx_out_range_errors +
473 drvs->rx_frame_too_long +
474 drvs->rx_dropped_too_small +
475 drvs->rx_dropped_too_short +
476 drvs->rx_dropped_header_too_small +
477 drvs->rx_dropped_tcp_length +
ab1594e9 478 drvs->rx_dropped_runt;
68110868 479
6b7c5b94 480 /* detailed rx errors */
ab1594e9 481 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
482 drvs->rx_out_range_errors +
483 drvs->rx_frame_too_long;
68110868 484
ab1594e9 485 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
486
487 /* frame alignment errors */
ab1594e9 488 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 489
6b7c5b94
SP
490 /* receiver fifo overrun */
491 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 492 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
493 drvs->rx_input_fifo_overflow_drop +
494 drvs->rx_drops_no_pbuf;
ab1594e9 495 return stats;
6b7c5b94
SP
496}
497
b236916a 498void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 499{
6b7c5b94
SP
500 struct net_device *netdev = adapter->netdev;
501
b236916a 502 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 503 netif_carrier_off(netdev);
b236916a 504 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 505 }
b236916a
AK
506
507 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
508 netif_carrier_on(netdev);
509 else
510 netif_carrier_off(netdev);
6b7c5b94
SP
511}
512
3c8def97 513static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 514 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 515{
3c8def97
SP
516 struct be_tx_stats *stats = tx_stats(txo);
517
ab1594e9 518 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
519 stats->tx_reqs++;
520 stats->tx_wrbs += wrb_cnt;
521 stats->tx_bytes += copied;
522 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 523 if (stopped)
ac124ff9 524 stats->tx_stops++;
ab1594e9 525 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
526}
527
528/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
529static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
530 bool *dummy)
6b7c5b94 531{
ebc8d2ab
DM
532 int cnt = (skb->len > skb->data_len);
533
534 cnt += skb_shinfo(skb)->nr_frags;
535
6b7c5b94
SP
536 /* to account for hdr wrb */
537 cnt++;
fe6d2a38
SP
538 if (lancer_chip(adapter) || !(cnt & 1)) {
539 *dummy = false;
540 } else {
6b7c5b94
SP
541 /* add a dummy to make it an even num */
542 cnt++;
543 *dummy = true;
fe6d2a38 544 }
6b7c5b94
SP
545 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
546 return cnt;
547}
548
549static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
550{
551 wrb->frag_pa_hi = upper_32_bits(addr);
552 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
553 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
554}
555
1ded132d
AK
556static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
557 struct sk_buff *skb)
558{
559 u8 vlan_prio;
560 u16 vlan_tag;
561
562 vlan_tag = vlan_tx_tag_get(skb);
563 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
564 /* If vlan priority provided by OS is NOT in available bmap */
565 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
566 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
567 adapter->recommended_prio;
568
569 return vlan_tag;
570}
571
cc4ce020
SK
572static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
573 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 574{
1ded132d 575 u16 vlan_tag;
cc4ce020 576
6b7c5b94
SP
577 memset(hdr, 0, sizeof(*hdr));
578
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
580
49e4b847 581 if (skb_is_gso(skb)) {
6b7c5b94
SP
582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
584 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 585 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
587 if (lancer_chip(adapter) && adapter->sli_family ==
588 LANCER_A0_SLI_FAMILY) {
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
590 if (is_tcp_pkt(skb))
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
592 tcpcs, hdr, 1);
593 else if (is_udp_pkt(skb))
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
595 udpcs, hdr, 1);
596 }
6b7c5b94
SP
597 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
598 if (is_tcp_pkt(skb))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
602 }
603
4c5102f9 604 if (vlan_tx_tag_present(skb)) {
6b7c5b94 605 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 606 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
608 }
609
610 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
614}
615
2b7bcebf 616static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
617 bool unmap_single)
618{
619 dma_addr_t dma;
620
621 be_dws_le_to_cpu(wrb, sizeof(*wrb));
622
623 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 624 if (wrb->frag_len) {
7101e111 625 if (unmap_single)
2b7bcebf
IV
626 dma_unmap_single(dev, dma, wrb->frag_len,
627 DMA_TO_DEVICE);
7101e111 628 else
2b7bcebf 629 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
630 }
631}
6b7c5b94 632
3c8def97 633static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
634 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
635{
7101e111
SP
636 dma_addr_t busaddr;
637 int i, copied = 0;
2b7bcebf 638 struct device *dev = &adapter->pdev->dev;
6b7c5b94 639 struct sk_buff *first_skb = skb;
6b7c5b94
SP
640 struct be_eth_wrb *wrb;
641 struct be_eth_hdr_wrb *hdr;
7101e111
SP
642 bool map_single = false;
643 u16 map_head;
6b7c5b94 644
6b7c5b94
SP
645 hdr = queue_head_node(txq);
646 queue_head_inc(txq);
7101e111 647 map_head = txq->head;
6b7c5b94 648
ebc8d2ab 649 if (skb->len > skb->data_len) {
e743d313 650 int len = skb_headlen(skb);
2b7bcebf
IV
651 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
652 if (dma_mapping_error(dev, busaddr))
7101e111
SP
653 goto dma_err;
654 map_single = true;
ebc8d2ab
DM
655 wrb = queue_head_node(txq);
656 wrb_fill(wrb, busaddr, len);
657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
659 copied += len;
660 }
6b7c5b94 661
ebc8d2ab 662 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 663 const struct skb_frag_struct *frag =
ebc8d2ab 664 &skb_shinfo(skb)->frags[i];
b061b39e 665 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 666 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 667 if (dma_mapping_error(dev, busaddr))
7101e111 668 goto dma_err;
ebc8d2ab 669 wrb = queue_head_node(txq);
9e903e08 670 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
671 be_dws_cpu_to_le(wrb, sizeof(*wrb));
672 queue_head_inc(txq);
9e903e08 673 copied += skb_frag_size(frag);
6b7c5b94
SP
674 }
675
676 if (dummy_wrb) {
677 wrb = queue_head_node(txq);
678 wrb_fill(wrb, 0, 0);
679 be_dws_cpu_to_le(wrb, sizeof(*wrb));
680 queue_head_inc(txq);
681 }
682
cc4ce020 683 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
684 be_dws_cpu_to_le(hdr, sizeof(*hdr));
685
686 return copied;
7101e111
SP
687dma_err:
688 txq->head = map_head;
689 while (copied) {
690 wrb = queue_head_node(txq);
2b7bcebf 691 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
692 map_single = false;
693 copied -= wrb->frag_len;
694 queue_head_inc(txq);
695 }
696 return 0;
6b7c5b94
SP
697}
698
61357325 699static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 700 struct net_device *netdev)
6b7c5b94
SP
701{
702 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
703 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
704 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
705 u32 wrb_cnt = 0, copied = 0;
706 u32 start = txq->head;
707 bool dummy_wrb, stopped = false;
708
1ded132d
AK
709 /* For vlan tagged pkts, BE
710 * 1) calculates checksum even when CSO is not requested
711 * 2) calculates checksum wrongly for padded pkt less than
712 * 60 bytes long.
713 * As a workaround disable TX vlan offloading in such cases.
714 */
715 if (unlikely(vlan_tx_tag_present(skb) &&
716 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 goto tx_drop;
720
721 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
722 if (unlikely(!skb))
723 goto tx_drop;
724
725 skb->vlan_tci = 0;
726 }
727
fe6d2a38 728 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 729
3c8def97 730 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
731 if (copied) {
732 /* record the sent skb in the sent_skb table */
3c8def97
SP
733 BUG_ON(txo->sent_skb_list[start]);
734 txo->sent_skb_list[start] = skb;
c190e3c8
AK
735
736 /* Ensure txq has space for the next skb; Else stop the queue
737 * *BEFORE* ringing the tx doorbell, so that we serialze the
738 * tx compls of the current transmit which'll wake up the queue
739 */
7101e111 740 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
741 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
742 txq->len) {
3c8def97 743 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
744 stopped = true;
745 }
6b7c5b94 746
c190e3c8 747 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 748
3c8def97 749 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 750 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
751 } else {
752 txq->head = start;
753 dev_kfree_skb_any(skb);
6b7c5b94 754 }
1ded132d 755tx_drop:
6b7c5b94
SP
756 return NETDEV_TX_OK;
757}
758
759static int be_change_mtu(struct net_device *netdev, int new_mtu)
760{
761 struct be_adapter *adapter = netdev_priv(netdev);
762 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
763 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
764 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
765 dev_info(&adapter->pdev->dev,
766 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
767 BE_MIN_MTU,
768 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
769 return -EINVAL;
770 }
771 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
772 netdev->mtu, new_mtu);
773 netdev->mtu = new_mtu;
774 return 0;
775}
776
777/*
82903e4b
AK
778 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
779 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 780 */
1da87b7f 781static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 782{
11ac75ed 783 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
6b7c5b94
SP
784 u16 vtag[BE_NUM_VLANS_SUPPORTED];
785 u16 ntags = 0, i;
82903e4b 786 int status = 0;
1da87b7f
AK
787
788 if (vf) {
11ac75ed
SP
789 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
790 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
791 1, 1, 0);
1da87b7f 792 }
6b7c5b94 793
c0e64ef4
SP
794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
82903e4b 798 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 799 /* Construct VLAN Table to give to HW */
b738127d 800 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
801 if (adapter->vlan_tag[i]) {
802 vtag[ntags] = cpu_to_le16(i);
803 ntags++;
804 }
805 }
b31c50a7
SP
806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
807 vtag, ntags, 1, 0);
6b7c5b94 808 } else {
b31c50a7
SP
809 status = be_cmd_vlan_config(adapter, adapter->if_handle,
810 NULL, 0, 1, 1);
6b7c5b94 811 }
1da87b7f 812
b31c50a7 813 return status;
6b7c5b94
SP
814}
815
8e586137 816static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
817{
818 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 819 int status = 0;
6b7c5b94 820
80817cbf
AK
821 if (!be_physfn(adapter)) {
822 status = -EINVAL;
823 goto ret;
824 }
ba343c77 825
6b7c5b94 826 adapter->vlan_tag[vid] = 1;
82903e4b 827 if (adapter->vlans_added <= (adapter->max_vlans + 1))
80817cbf 828 status = be_vid_config(adapter, false, 0);
8e586137 829
80817cbf
AK
830 if (!status)
831 adapter->vlans_added++;
832 else
833 adapter->vlan_tag[vid] = 0;
834ret:
835 return status;
6b7c5b94
SP
836}
837
8e586137 838static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
839{
840 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 841 int status = 0;
6b7c5b94 842
80817cbf
AK
843 if (!be_physfn(adapter)) {
844 status = -EINVAL;
845 goto ret;
846 }
ba343c77 847
6b7c5b94 848 adapter->vlan_tag[vid] = 0;
82903e4b 849 if (adapter->vlans_added <= adapter->max_vlans)
80817cbf 850 status = be_vid_config(adapter, false, 0);
8e586137 851
80817cbf
AK
852 if (!status)
853 adapter->vlans_added--;
854 else
855 adapter->vlan_tag[vid] = 1;
856ret:
857 return status;
6b7c5b94
SP
858}
859
a54769f5 860static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
861{
862 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 863
24307eef 864 if (netdev->flags & IFF_PROMISC) {
5b8821b7 865 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
866 adapter->promiscuous = true;
867 goto done;
6b7c5b94
SP
868 }
869
25985edc 870 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
871 if (adapter->promiscuous) {
872 adapter->promiscuous = false;
5b8821b7 873 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
874
875 if (adapter->vlans_added)
876 be_vid_config(adapter, false, 0);
6b7c5b94
SP
877 }
878
e7b909a6 879 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 880 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
881 netdev_mc_count(netdev) > BE_MAX_MC) {
882 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 883 goto done;
6b7c5b94 884 }
6b7c5b94 885
5b8821b7 886 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
887done:
888 return;
6b7c5b94
SP
889}
890
ba343c77
SB
891static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
892{
893 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 894 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
895 int status;
896
11ac75ed 897 if (!sriov_enabled(adapter))
ba343c77
SB
898 return -EPERM;
899
11ac75ed 900 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
901 return -EINVAL;
902
590c391d
PR
903 if (lancer_chip(adapter)) {
904 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
905 } else {
11ac75ed
SP
906 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
907 vf_cfg->pmac_id, vf + 1);
ba343c77 908
11ac75ed
SP
909 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
910 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
911 }
912
64600ea5 913 if (status)
ba343c77
SB
914 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
915 mac, vf);
64600ea5 916 else
11ac75ed 917 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 918
ba343c77
SB
919 return status;
920}
921
64600ea5
AK
922static int be_get_vf_config(struct net_device *netdev, int vf,
923 struct ifla_vf_info *vi)
924{
925 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 926 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 927
11ac75ed 928 if (!sriov_enabled(adapter))
64600ea5
AK
929 return -EPERM;
930
11ac75ed 931 if (vf >= adapter->num_vfs)
64600ea5
AK
932 return -EINVAL;
933
934 vi->vf = vf;
11ac75ed
SP
935 vi->tx_rate = vf_cfg->tx_rate;
936 vi->vlan = vf_cfg->vlan_tag;
64600ea5 937 vi->qos = 0;
11ac75ed 938 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
939
940 return 0;
941}
942
1da87b7f
AK
943static int be_set_vf_vlan(struct net_device *netdev,
944 int vf, u16 vlan, u8 qos)
945{
946 struct be_adapter *adapter = netdev_priv(netdev);
947 int status = 0;
948
11ac75ed 949 if (!sriov_enabled(adapter))
1da87b7f
AK
950 return -EPERM;
951
11ac75ed 952 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
953 return -EINVAL;
954
955 if (vlan) {
11ac75ed 956 adapter->vf_cfg[vf].vlan_tag = vlan;
1da87b7f
AK
957 adapter->vlans_added++;
958 } else {
11ac75ed 959 adapter->vf_cfg[vf].vlan_tag = 0;
1da87b7f
AK
960 adapter->vlans_added--;
961 }
962
963 status = be_vid_config(adapter, true, vf);
964
965 if (status)
966 dev_info(&adapter->pdev->dev,
967 "VLAN %d config on VF %d failed\n", vlan, vf);
968 return status;
969}
970
e1d18735
AK
971static int be_set_vf_tx_rate(struct net_device *netdev,
972 int vf, int rate)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 int status = 0;
976
11ac75ed 977 if (!sriov_enabled(adapter))
e1d18735
AK
978 return -EPERM;
979
94f434c2 980 if (vf >= adapter->num_vfs)
e1d18735
AK
981 return -EINVAL;
982
94f434c2
AK
983 if (rate < 100 || rate > 10000) {
984 dev_err(&adapter->pdev->dev,
985 "tx rate must be between 100 and 10000 Mbps\n");
986 return -EINVAL;
987 }
e1d18735 988
856c4012 989 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
990
991 if (status)
94f434c2 992 dev_err(&adapter->pdev->dev,
e1d18735 993 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
994 else
995 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
996 return status;
997}
998
10ef9ab4 999static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1000{
10ef9ab4 1001 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1002 ulong now = jiffies;
ac124ff9 1003 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1004 u64 pkts;
1005 unsigned int start, eqd;
ac124ff9 1006
10ef9ab4
SP
1007 if (!eqo->enable_aic) {
1008 eqd = eqo->eqd;
1009 goto modify_eqd;
1010 }
1011
1012 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1013 return;
6b7c5b94 1014
10ef9ab4
SP
1015 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1016
4097f663 1017 /* Wrapped around */
3abcdeda
SP
1018 if (time_before(now, stats->rx_jiffies)) {
1019 stats->rx_jiffies = now;
4097f663
SP
1020 return;
1021 }
6b7c5b94 1022
ac124ff9
SP
1023 /* Update once a second */
1024 if (delta < HZ)
6b7c5b94
SP
1025 return;
1026
ab1594e9
SP
1027 do {
1028 start = u64_stats_fetch_begin_bh(&stats->sync);
1029 pkts = stats->rx_pkts;
1030 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1031
68c3e5a7 1032 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1033 stats->rx_pkts_prev = pkts;
3abcdeda 1034 stats->rx_jiffies = now;
10ef9ab4
SP
1035 eqd = (stats->rx_pps / 110000) << 3;
1036 eqd = min(eqd, eqo->max_eqd);
1037 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1038 if (eqd < 10)
1039 eqd = 0;
10ef9ab4
SP
1040
1041modify_eqd:
1042 if (eqd != eqo->cur_eqd) {
1043 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1044 eqo->cur_eqd = eqd;
ac124ff9 1045 }
6b7c5b94
SP
1046}
1047
3abcdeda 1048static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1049 struct be_rx_compl_info *rxcp)
4097f663 1050{
ac124ff9 1051 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1052
ab1594e9 1053 u64_stats_update_begin(&stats->sync);
3abcdeda 1054 stats->rx_compl++;
2e588f84 1055 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1056 stats->rx_pkts++;
2e588f84 1057 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1058 stats->rx_mcast_pkts++;
2e588f84 1059 if (rxcp->err)
ac124ff9 1060 stats->rx_compl_err++;
ab1594e9 1061 u64_stats_update_end(&stats->sync);
4097f663
SP
1062}
1063
2e588f84 1064static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1065{
19fad86f
PR
1066 /* L4 checksum is not reliable for non TCP/UDP packets.
1067 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1068 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1069 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1070}
1071
10ef9ab4
SP
1072static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1073 u16 frag_idx)
6b7c5b94 1074{
10ef9ab4 1075 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1076 struct be_rx_page_info *rx_page_info;
3abcdeda 1077 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1078
3abcdeda 1079 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1080 BUG_ON(!rx_page_info->page);
1081
205859a2 1082 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1083 dma_unmap_page(&adapter->pdev->dev,
1084 dma_unmap_addr(rx_page_info, bus),
1085 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1086 rx_page_info->last_page_user = false;
1087 }
6b7c5b94
SP
1088
1089 atomic_dec(&rxq->used);
1090 return rx_page_info;
1091}
1092
1093/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1094static void be_rx_compl_discard(struct be_rx_obj *rxo,
1095 struct be_rx_compl_info *rxcp)
6b7c5b94 1096{
3abcdeda 1097 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1098 struct be_rx_page_info *page_info;
2e588f84 1099 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1100
e80d9da6 1101 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1102 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1103 put_page(page_info->page);
1104 memset(page_info, 0, sizeof(*page_info));
2e588f84 1105 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1106 }
1107}
1108
1109/*
1110 * skb_fill_rx_data forms a complete skb for an ether frame
1111 * indicated by rxcp.
1112 */
10ef9ab4
SP
1113static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1114 struct be_rx_compl_info *rxcp)
6b7c5b94 1115{
3abcdeda 1116 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1117 struct be_rx_page_info *page_info;
2e588f84
SP
1118 u16 i, j;
1119 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1120 u8 *start;
6b7c5b94 1121
10ef9ab4 1122 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1123 start = page_address(page_info->page) + page_info->page_offset;
1124 prefetch(start);
1125
1126 /* Copy data in the first descriptor of this completion */
2e588f84 1127 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1128
1129 /* Copy the header portion into skb_data */
2e588f84 1130 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1131 memcpy(skb->data, start, hdr_len);
1132 skb->len = curr_frag_len;
1133 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1134 /* Complete packet has now been moved to data */
1135 put_page(page_info->page);
1136 skb->data_len = 0;
1137 skb->tail += curr_frag_len;
1138 } else {
1139 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1140 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1141 skb_shinfo(skb)->frags[0].page_offset =
1142 page_info->page_offset + hdr_len;
9e903e08 1143 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1144 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1145 skb->truesize += rx_frag_size;
6b7c5b94
SP
1146 skb->tail += hdr_len;
1147 }
205859a2 1148 page_info->page = NULL;
6b7c5b94 1149
2e588f84
SP
1150 if (rxcp->pkt_size <= rx_frag_size) {
1151 BUG_ON(rxcp->num_rcvd != 1);
1152 return;
6b7c5b94
SP
1153 }
1154
1155 /* More frags present for this completion */
2e588f84
SP
1156 index_inc(&rxcp->rxq_idx, rxq->len);
1157 remaining = rxcp->pkt_size - curr_frag_len;
1158 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1159 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1160 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1161
bd46cb6c
AK
1162 /* Coalesce all frags from the same physical page in one slot */
1163 if (page_info->page_offset == 0) {
1164 /* Fresh page */
1165 j++;
b061b39e 1166 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1167 skb_shinfo(skb)->frags[j].page_offset =
1168 page_info->page_offset;
9e903e08 1169 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1170 skb_shinfo(skb)->nr_frags++;
1171 } else {
1172 put_page(page_info->page);
1173 }
1174
9e903e08 1175 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1176 skb->len += curr_frag_len;
1177 skb->data_len += curr_frag_len;
bdb28a97 1178 skb->truesize += rx_frag_size;
2e588f84
SP
1179 remaining -= curr_frag_len;
1180 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1181 page_info->page = NULL;
6b7c5b94 1182 }
bd46cb6c 1183 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1184}
1185
5be93b9a 1186/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1187static void be_rx_compl_process(struct be_rx_obj *rxo,
1188 struct be_rx_compl_info *rxcp)
6b7c5b94 1189{
10ef9ab4 1190 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1191 struct net_device *netdev = adapter->netdev;
6b7c5b94 1192 struct sk_buff *skb;
89420424 1193
bb349bb4 1194 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1195 if (unlikely(!skb)) {
ac124ff9 1196 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1197 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1198 return;
1199 }
1200
10ef9ab4 1201 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1202
6332c8d3 1203 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1204 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1205 else
1206 skb_checksum_none_assert(skb);
6b7c5b94 1207
6332c8d3 1208 skb->protocol = eth_type_trans(skb, netdev);
10ef9ab4 1209 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1210 skb->rxhash = rxcp->rss_hash;
1211
6b7c5b94 1212
343e43c0 1213 if (rxcp->vlanf)
4c5102f9
AK
1214 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1215
1216 netif_receive_skb(skb);
6b7c5b94
SP
1217}
1218
5be93b9a 1219/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1220void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1221 struct be_rx_compl_info *rxcp)
6b7c5b94 1222{
10ef9ab4 1223 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1224 struct be_rx_page_info *page_info;
5be93b9a 1225 struct sk_buff *skb = NULL;
3abcdeda 1226 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1227 u16 remaining, curr_frag_len;
1228 u16 i, j;
3968fa1e 1229
10ef9ab4 1230 skb = napi_get_frags(napi);
5be93b9a 1231 if (!skb) {
10ef9ab4 1232 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1233 return;
1234 }
1235
2e588f84
SP
1236 remaining = rxcp->pkt_size;
1237 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1238 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1239
1240 curr_frag_len = min(remaining, rx_frag_size);
1241
bd46cb6c
AK
1242 /* Coalesce all frags from the same physical page in one slot */
1243 if (i == 0 || page_info->page_offset == 0) {
1244 /* First frag or Fresh page */
1245 j++;
b061b39e 1246 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1247 skb_shinfo(skb)->frags[j].page_offset =
1248 page_info->page_offset;
9e903e08 1249 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1250 } else {
1251 put_page(page_info->page);
1252 }
9e903e08 1253 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1254 skb->truesize += rx_frag_size;
bd46cb6c 1255 remaining -= curr_frag_len;
2e588f84 1256 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1257 memset(page_info, 0, sizeof(*page_info));
1258 }
bd46cb6c 1259 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1260
5be93b9a 1261 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1262 skb->len = rxcp->pkt_size;
1263 skb->data_len = rxcp->pkt_size;
5be93b9a 1264 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1265 if (adapter->netdev->features & NETIF_F_RXHASH)
1266 skb->rxhash = rxcp->rss_hash;
5be93b9a 1267
343e43c0 1268 if (rxcp->vlanf)
4c5102f9
AK
1269 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1270
10ef9ab4 1271 napi_gro_frags(napi);
2e588f84
SP
1272}
1273
10ef9ab4
SP
1274static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
2e588f84
SP
1276{
1277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
15d72184 1302 }
12004ae9 1303 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1304}
1305
10ef9ab4
SP
1306static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
2e588f84
SP
1308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333 compl);
15d72184 1334 }
12004ae9 1335 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1336}
1337
1338static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1339{
1340 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1341 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1342 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1343
2e588f84
SP
1344 /* For checking the valid bit it is Ok to use either definition as the
1345 * valid bit is at the same position in both v0 and v1 Rx compl */
1346 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1347 return NULL;
6b7c5b94 1348
2e588f84
SP
1349 rmb();
1350 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1351
2e588f84 1352 if (adapter->be3_native)
10ef9ab4 1353 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1354 else
10ef9ab4 1355 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1356
15d72184
SP
1357 if (rxcp->vlanf) {
1358 /* vlanf could be wrongly set in some cards.
1359 * ignore if vtm is not set */
752961a1 1360 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1361 rxcp->vlanf = 0;
6b7c5b94 1362
15d72184 1363 if (!lancer_chip(adapter))
3c709f8f 1364 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1365
939cf306 1366 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1367 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1368 rxcp->vlanf = 0;
1369 }
2e588f84
SP
1370
1371 /* As the compl has been parsed, reset it; we wont touch it again */
1372 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1373
3abcdeda 1374 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1375 return rxcp;
1376}
1377
1829b086 1378static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1379{
6b7c5b94 1380 u32 order = get_order(size);
1829b086 1381
6b7c5b94 1382 if (order > 0)
1829b086
ED
1383 gfp |= __GFP_COMP;
1384 return alloc_pages(gfp, order);
6b7c5b94
SP
1385}
1386
1387/*
1388 * Allocate a page, split it to fragments of size rx_frag_size and post as
1389 * receive buffers to BE
1390 */
1829b086 1391static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1392{
3abcdeda 1393 struct be_adapter *adapter = rxo->adapter;
26d92f92 1394 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1395 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1396 struct page *pagep = NULL;
1397 struct be_eth_rx_d *rxd;
1398 u64 page_dmaaddr = 0, frag_dmaaddr;
1399 u32 posted, page_offset = 0;
1400
3abcdeda 1401 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1402 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1403 if (!pagep) {
1829b086 1404 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1405 if (unlikely(!pagep)) {
ac124ff9 1406 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1407 break;
1408 }
2b7bcebf
IV
1409 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1410 0, adapter->big_page_size,
1411 DMA_FROM_DEVICE);
6b7c5b94
SP
1412 page_info->page_offset = 0;
1413 } else {
1414 get_page(pagep);
1415 page_info->page_offset = page_offset + rx_frag_size;
1416 }
1417 page_offset = page_info->page_offset;
1418 page_info->page = pagep;
fac6da5b 1419 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1420 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1421
1422 rxd = queue_head_node(rxq);
1423 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1424 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1425
1426 /* Any space left in the current big page for another frag? */
1427 if ((page_offset + rx_frag_size + rx_frag_size) >
1428 adapter->big_page_size) {
1429 pagep = NULL;
1430 page_info->last_page_user = true;
1431 }
26d92f92
SP
1432
1433 prev_page_info = page_info;
1434 queue_head_inc(rxq);
10ef9ab4 1435 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1436 }
1437 if (pagep)
26d92f92 1438 prev_page_info->last_page_user = true;
6b7c5b94
SP
1439
1440 if (posted) {
6b7c5b94 1441 atomic_add(posted, &rxq->used);
8788fdc2 1442 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1443 } else if (atomic_read(&rxq->used) == 0) {
1444 /* Let be_worker replenish when memory is available */
3abcdeda 1445 rxo->rx_post_starved = true;
6b7c5b94 1446 }
6b7c5b94
SP
1447}
1448
5fb379ee 1449static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1450{
6b7c5b94
SP
1451 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1452
1453 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1454 return NULL;
1455
f3eb62d2 1456 rmb();
6b7c5b94
SP
1457 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1458
1459 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1460
1461 queue_tail_inc(tx_cq);
1462 return txcp;
1463}
1464
3c8def97
SP
1465static u16 be_tx_compl_process(struct be_adapter *adapter,
1466 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1467{
3c8def97 1468 struct be_queue_info *txq = &txo->q;
a73b796e 1469 struct be_eth_wrb *wrb;
3c8def97 1470 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1471 struct sk_buff *sent_skb;
ec43b1a6
SP
1472 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1473 bool unmap_skb_hdr = true;
6b7c5b94 1474
ec43b1a6 1475 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1476 BUG_ON(!sent_skb);
ec43b1a6
SP
1477 sent_skbs[txq->tail] = NULL;
1478
1479 /* skip header wrb */
a73b796e 1480 queue_tail_inc(txq);
6b7c5b94 1481
ec43b1a6 1482 do {
6b7c5b94 1483 cur_index = txq->tail;
a73b796e 1484 wrb = queue_tail_node(txq);
2b7bcebf
IV
1485 unmap_tx_frag(&adapter->pdev->dev, wrb,
1486 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1487 unmap_skb_hdr = false;
1488
6b7c5b94
SP
1489 num_wrbs++;
1490 queue_tail_inc(txq);
ec43b1a6 1491 } while (cur_index != last_index);
6b7c5b94 1492
6b7c5b94 1493 kfree_skb(sent_skb);
4d586b82 1494 return num_wrbs;
6b7c5b94
SP
1495}
1496
10ef9ab4
SP
1497/* Return the number of events in the event queue */
1498static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1499{
10ef9ab4
SP
1500 struct be_eq_entry *eqe;
1501 int num = 0;
859b1e4e 1502
10ef9ab4
SP
1503 do {
1504 eqe = queue_tail_node(&eqo->q);
1505 if (eqe->evt == 0)
1506 break;
859b1e4e 1507
10ef9ab4
SP
1508 rmb();
1509 eqe->evt = 0;
1510 num++;
1511 queue_tail_inc(&eqo->q);
1512 } while (true);
1513
1514 return num;
859b1e4e
SP
1515}
1516
10ef9ab4 1517static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1518{
10ef9ab4
SP
1519 bool rearm = false;
1520 int num = events_get(eqo);
859b1e4e 1521
10ef9ab4 1522 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1523 if (!num)
1524 rearm = true;
1525
10ef9ab4 1526 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
859b1e4e 1527 if (num)
10ef9ab4 1528 napi_schedule(&eqo->napi);
859b1e4e
SP
1529
1530 return num;
1531}
1532
10ef9ab4
SP
1533/* Leaves the EQ is disarmed state */
1534static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1535{
10ef9ab4 1536 int num = events_get(eqo);
859b1e4e 1537
10ef9ab4 1538 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1539}
1540
10ef9ab4 1541static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1542{
1543 struct be_rx_page_info *page_info;
3abcdeda
SP
1544 struct be_queue_info *rxq = &rxo->q;
1545 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1546 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1547 u16 tail;
1548
1549 /* First cleanup pending rx completions */
3abcdeda 1550 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1551 be_rx_compl_discard(rxo, rxcp);
1552 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1553 }
1554
1555 /* Then free posted rx buffer that were not used */
1556 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1557 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1558 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1559 put_page(page_info->page);
1560 memset(page_info, 0, sizeof(*page_info));
1561 }
1562 BUG_ON(atomic_read(&rxq->used));
482c9e79 1563 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1564}
1565
0ae57bb3 1566static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1567{
0ae57bb3
SP
1568 struct be_tx_obj *txo;
1569 struct be_queue_info *txq;
a8e9179a 1570 struct be_eth_tx_compl *txcp;
4d586b82 1571 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1572 struct sk_buff *sent_skb;
1573 bool dummy_wrb;
0ae57bb3 1574 int i, pending_txqs;
a8e9179a
SP
1575
1576 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1577 do {
0ae57bb3
SP
1578 pending_txqs = adapter->num_tx_qs;
1579
1580 for_all_tx_queues(adapter, txo, i) {
1581 txq = &txo->q;
1582 while ((txcp = be_tx_compl_get(&txo->cq))) {
1583 end_idx =
1584 AMAP_GET_BITS(struct amap_eth_tx_compl,
1585 wrb_index, txcp);
1586 num_wrbs += be_tx_compl_process(adapter, txo,
1587 end_idx);
1588 cmpl++;
1589 }
1590 if (cmpl) {
1591 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1592 atomic_sub(num_wrbs, &txq->used);
1593 cmpl = 0;
1594 num_wrbs = 0;
1595 }
1596 if (atomic_read(&txq->used) == 0)
1597 pending_txqs--;
a8e9179a
SP
1598 }
1599
0ae57bb3 1600 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1601 break;
1602
1603 mdelay(1);
1604 } while (true);
1605
0ae57bb3
SP
1606 for_all_tx_queues(adapter, txo, i) {
1607 txq = &txo->q;
1608 if (atomic_read(&txq->used))
1609 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1610 atomic_read(&txq->used));
1611
1612 /* free posted tx for which compls will never arrive */
1613 while (atomic_read(&txq->used)) {
1614 sent_skb = txo->sent_skb_list[txq->tail];
1615 end_idx = txq->tail;
1616 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1617 &dummy_wrb);
1618 index_adv(&end_idx, num_wrbs - 1, txq->len);
1619 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1620 atomic_sub(num_wrbs, &txq->used);
1621 }
b03388d6 1622 }
6b7c5b94
SP
1623}
1624
10ef9ab4
SP
1625static void be_evt_queues_destroy(struct be_adapter *adapter)
1626{
1627 struct be_eq_obj *eqo;
1628 int i;
1629
1630 for_all_evt_queues(adapter, eqo, i) {
1631 be_eq_clean(eqo);
1632 if (eqo->q.created)
1633 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1634 be_queue_free(adapter, &eqo->q);
1635 }
1636}
1637
1638static int be_evt_queues_create(struct be_adapter *adapter)
1639{
1640 struct be_queue_info *eq;
1641 struct be_eq_obj *eqo;
1642 int i, rc;
1643
1644 adapter->num_evt_qs = num_irqs(adapter);
1645
1646 for_all_evt_queues(adapter, eqo, i) {
1647 eqo->adapter = adapter;
1648 eqo->tx_budget = BE_TX_BUDGET;
1649 eqo->idx = i;
1650 eqo->max_eqd = BE_MAX_EQD;
1651 eqo->enable_aic = true;
1652
1653 eq = &eqo->q;
1654 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1655 sizeof(struct be_eq_entry));
1656 if (rc)
1657 return rc;
1658
1659 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1660 if (rc)
1661 return rc;
1662 }
1663 return rc;
1664}
1665
5fb379ee
SP
1666static void be_mcc_queues_destroy(struct be_adapter *adapter)
1667{
1668 struct be_queue_info *q;
5fb379ee 1669
8788fdc2 1670 q = &adapter->mcc_obj.q;
5fb379ee 1671 if (q->created)
8788fdc2 1672 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1673 be_queue_free(adapter, q);
1674
8788fdc2 1675 q = &adapter->mcc_obj.cq;
5fb379ee 1676 if (q->created)
8788fdc2 1677 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1678 be_queue_free(adapter, q);
1679}
1680
1681/* Must be called only after TX qs are created as MCC shares TX EQ */
1682static int be_mcc_queues_create(struct be_adapter *adapter)
1683{
1684 struct be_queue_info *q, *cq;
5fb379ee 1685
8788fdc2 1686 cq = &adapter->mcc_obj.cq;
5fb379ee 1687 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1688 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1689 goto err;
1690
10ef9ab4
SP
1691 /* Use the default EQ for MCC completions */
1692 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1693 goto mcc_cq_free;
1694
8788fdc2 1695 q = &adapter->mcc_obj.q;
5fb379ee
SP
1696 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1697 goto mcc_cq_destroy;
1698
8788fdc2 1699 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1700 goto mcc_q_free;
1701
1702 return 0;
1703
1704mcc_q_free:
1705 be_queue_free(adapter, q);
1706mcc_cq_destroy:
8788fdc2 1707 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1708mcc_cq_free:
1709 be_queue_free(adapter, cq);
1710err:
1711 return -1;
1712}
1713
6b7c5b94
SP
1714static void be_tx_queues_destroy(struct be_adapter *adapter)
1715{
1716 struct be_queue_info *q;
3c8def97
SP
1717 struct be_tx_obj *txo;
1718 u8 i;
6b7c5b94 1719
3c8def97
SP
1720 for_all_tx_queues(adapter, txo, i) {
1721 q = &txo->q;
1722 if (q->created)
1723 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1724 be_queue_free(adapter, q);
6b7c5b94 1725
3c8def97
SP
1726 q = &txo->cq;
1727 if (q->created)
1728 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1729 be_queue_free(adapter, q);
1730 }
6b7c5b94
SP
1731}
1732
dafc0fe3
SP
1733static int be_num_txqs_want(struct be_adapter *adapter)
1734{
11ac75ed 1735 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
dafc0fe3
SP
1736 lancer_chip(adapter) || !be_physfn(adapter) ||
1737 adapter->generation == BE_GEN2)
1738 return 1;
1739 else
1740 return MAX_TX_QS;
1741}
1742
10ef9ab4 1743static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1744{
10ef9ab4
SP
1745 struct be_queue_info *cq, *eq;
1746 int status;
3c8def97
SP
1747 struct be_tx_obj *txo;
1748 u8 i;
6b7c5b94 1749
dafc0fe3 1750 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1751 if (adapter->num_tx_qs != MAX_TX_QS) {
1752 rtnl_lock();
dafc0fe3
SP
1753 netif_set_real_num_tx_queues(adapter->netdev,
1754 adapter->num_tx_qs);
3bb62f4f
PR
1755 rtnl_unlock();
1756 }
dafc0fe3 1757
10ef9ab4
SP
1758 for_all_tx_queues(adapter, txo, i) {
1759 cq = &txo->cq;
1760 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1761 sizeof(struct be_eth_tx_compl));
1762 if (status)
1763 return status;
3c8def97 1764
10ef9ab4
SP
1765 /* If num_evt_qs is less than num_tx_qs, then more than
1766 * one txq share an eq
1767 */
1768 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1769 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1770 if (status)
1771 return status;
1772 }
1773 return 0;
1774}
6b7c5b94 1775
10ef9ab4
SP
1776static int be_tx_qs_create(struct be_adapter *adapter)
1777{
1778 struct be_tx_obj *txo;
1779 int i, status;
fe6d2a38 1780
3c8def97 1781 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1782 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1783 sizeof(struct be_eth_wrb));
1784 if (status)
1785 return status;
6b7c5b94 1786
10ef9ab4
SP
1787 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1788 if (status)
1789 return status;
3c8def97 1790 }
6b7c5b94 1791
10ef9ab4 1792 return 0;
6b7c5b94
SP
1793}
1794
10ef9ab4 1795static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1796{
1797 struct be_queue_info *q;
3abcdeda
SP
1798 struct be_rx_obj *rxo;
1799 int i;
1800
1801 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1802 q = &rxo->cq;
1803 if (q->created)
1804 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1805 be_queue_free(adapter, q);
ac6a0c4a
SP
1806 }
1807}
1808
10ef9ab4 1809static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1810{
10ef9ab4 1811 struct be_queue_info *eq, *cq;
3abcdeda
SP
1812 struct be_rx_obj *rxo;
1813 int rc, i;
6b7c5b94 1814
10ef9ab4
SP
1815 /* We'll create as many RSS rings as there are irqs.
1816 * But when there's only one irq there's no use creating RSS rings
1817 */
1818 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1819 num_irqs(adapter) + 1 : 1;
ac6a0c4a 1820
6b7c5b94 1821 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1822 for_all_rx_queues(adapter, rxo, i) {
1823 rxo->adapter = adapter;
3abcdeda
SP
1824 cq = &rxo->cq;
1825 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1826 sizeof(struct be_eth_rx_compl));
1827 if (rc)
10ef9ab4 1828 return rc;
3abcdeda 1829
10ef9ab4
SP
1830 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1831 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1832 if (rc)
10ef9ab4 1833 return rc;
3abcdeda 1834 }
6b7c5b94 1835
10ef9ab4
SP
1836 if (adapter->num_rx_qs != MAX_RX_QS)
1837 dev_info(&adapter->pdev->dev,
1838 "Created only %d receive queues", adapter->num_rx_qs);
6b7c5b94 1839
10ef9ab4 1840 return 0;
b628bde2
SP
1841}
1842
6b7c5b94
SP
1843static irqreturn_t be_intx(int irq, void *dev)
1844{
1845 struct be_adapter *adapter = dev;
10ef9ab4 1846 int num_evts;
6b7c5b94 1847
10ef9ab4
SP
1848 /* With INTx only one EQ is used */
1849 num_evts = event_handle(&adapter->eq_obj[0]);
1850 if (num_evts)
1851 return IRQ_HANDLED;
1852 else
1853 return IRQ_NONE;
6b7c5b94
SP
1854}
1855
10ef9ab4 1856static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1857{
10ef9ab4 1858 struct be_eq_obj *eqo = dev;
6b7c5b94 1859
10ef9ab4 1860 event_handle(eqo);
6b7c5b94
SP
1861 return IRQ_HANDLED;
1862}
1863
2e588f84 1864static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1865{
2e588f84 1866 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1867}
1868
10ef9ab4
SP
1869static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1870 int budget)
6b7c5b94 1871{
3abcdeda
SP
1872 struct be_adapter *adapter = rxo->adapter;
1873 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1874 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1875 u32 work_done;
1876
1877 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1878 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1879 if (!rxcp)
1880 break;
1881
12004ae9
SP
1882 /* Is it a flush compl that has no data */
1883 if (unlikely(rxcp->num_rcvd == 0))
1884 goto loop_continue;
1885
1886 /* Discard compl with partial DMA Lancer B0 */
1887 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 1888 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
1889 goto loop_continue;
1890 }
1891
1892 /* On BE drop pkts that arrive due to imperfect filtering in
1893 * promiscuous mode on some skews
1894 */
1895 if (unlikely(rxcp->port != adapter->port_num &&
1896 !lancer_chip(adapter))) {
10ef9ab4 1897 be_rx_compl_discard(rxo, rxcp);
12004ae9 1898 goto loop_continue;
64642811 1899 }
009dd872 1900
12004ae9 1901 if (do_gro(rxcp))
10ef9ab4 1902 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 1903 else
10ef9ab4 1904 be_rx_compl_process(rxo, rxcp);
12004ae9 1905loop_continue:
2e588f84 1906 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1907 }
1908
10ef9ab4
SP
1909 if (work_done) {
1910 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 1911
10ef9ab4
SP
1912 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1913 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 1914 }
10ef9ab4 1915
6b7c5b94
SP
1916 return work_done;
1917}
1918
10ef9ab4
SP
1919static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1920 int budget, int idx)
6b7c5b94 1921{
6b7c5b94 1922 struct be_eth_tx_compl *txcp;
10ef9ab4 1923 int num_wrbs = 0, work_done;
3c8def97 1924
10ef9ab4
SP
1925 for (work_done = 0; work_done < budget; work_done++) {
1926 txcp = be_tx_compl_get(&txo->cq);
1927 if (!txcp)
1928 break;
1929 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
1930 AMAP_GET_BITS(struct amap_eth_tx_compl,
1931 wrb_index, txcp));
10ef9ab4 1932 }
6b7c5b94 1933
10ef9ab4
SP
1934 if (work_done) {
1935 be_cq_notify(adapter, txo->cq.id, true, work_done);
1936 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 1937
10ef9ab4
SP
1938 /* As Tx wrbs have been freed up, wake up netdev queue
1939 * if it was stopped due to lack of tx wrbs. */
1940 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1941 atomic_read(&txo->q.used) < txo->q.len / 2) {
1942 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 1943 }
10ef9ab4
SP
1944
1945 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1946 tx_stats(txo)->tx_compl += work_done;
1947 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 1948 }
10ef9ab4
SP
1949 return (work_done < budget); /* Done */
1950}
6b7c5b94 1951
10ef9ab4
SP
1952int be_poll(struct napi_struct *napi, int budget)
1953{
1954 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1955 struct be_adapter *adapter = eqo->adapter;
1956 int max_work = 0, work, i;
1957 bool tx_done;
f31e50a8 1958
10ef9ab4
SP
1959 /* Process all TXQs serviced by this EQ */
1960 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1961 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1962 eqo->tx_budget, i);
1963 if (!tx_done)
1964 max_work = budget;
f31e50a8
SP
1965 }
1966
10ef9ab4
SP
1967 /* This loop will iterate twice for EQ0 in which
1968 * completions of the last RXQ (default one) are also processed
1969 * For other EQs the loop iterates only once
1970 */
1971 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
1972 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
1973 max_work = max(work, max_work);
1974 }
6b7c5b94 1975
10ef9ab4
SP
1976 if (is_mcc_eqo(eqo))
1977 be_process_mcc(adapter);
93c86700 1978
10ef9ab4
SP
1979 if (max_work < budget) {
1980 napi_complete(napi);
1981 be_eq_notify(adapter, eqo->q.id, true, false, 0);
1982 } else {
1983 /* As we'll continue in polling mode, count and clear events */
1984 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 1985 }
10ef9ab4 1986 return max_work;
6b7c5b94
SP
1987}
1988
d053de91 1989void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 1990{
e1cfb67a
PR
1991 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1992 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
1993 u32 i;
1994
72f02485
SP
1995 if (adapter->eeh_err || adapter->ue_detected)
1996 return;
1997
e1cfb67a
PR
1998 if (lancer_chip(adapter)) {
1999 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2000 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2001 sliport_err1 = ioread32(adapter->db +
2002 SLIPORT_ERROR1_OFFSET);
2003 sliport_err2 = ioread32(adapter->db +
2004 SLIPORT_ERROR2_OFFSET);
2005 }
2006 } else {
2007 pci_read_config_dword(adapter->pdev,
2008 PCICFG_UE_STATUS_LOW, &ue_lo);
2009 pci_read_config_dword(adapter->pdev,
2010 PCICFG_UE_STATUS_HIGH, &ue_hi);
2011 pci_read_config_dword(adapter->pdev,
2012 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2013 pci_read_config_dword(adapter->pdev,
2014 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2015
2016 ue_lo = (ue_lo & (~ue_lo_mask));
2017 ue_hi = (ue_hi & (~ue_hi_mask));
2018 }
7c185276 2019
e1cfb67a
PR
2020 if (ue_lo || ue_hi ||
2021 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2022 adapter->ue_detected = true;
7acc2087 2023 adapter->eeh_err = true;
434b3648
SP
2024 dev_err(&adapter->pdev->dev,
2025 "Unrecoverable error in the card\n");
d053de91
AK
2026 }
2027
e1cfb67a
PR
2028 if (ue_lo) {
2029 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2030 if (ue_lo & 1)
7c185276
AK
2031 dev_err(&adapter->pdev->dev,
2032 "UE: %s bit set\n", ue_status_low_desc[i]);
2033 }
2034 }
e1cfb67a
PR
2035 if (ue_hi) {
2036 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2037 if (ue_hi & 1)
7c185276
AK
2038 dev_err(&adapter->pdev->dev,
2039 "UE: %s bit set\n", ue_status_hi_desc[i]);
2040 }
2041 }
2042
e1cfb67a
PR
2043 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2044 dev_err(&adapter->pdev->dev,
2045 "sliport status 0x%x\n", sliport_status);
2046 dev_err(&adapter->pdev->dev,
2047 "sliport error1 0x%x\n", sliport_err1);
2048 dev_err(&adapter->pdev->dev,
2049 "sliport error2 0x%x\n", sliport_err2);
2050 }
7c185276
AK
2051}
2052
8d56ff11
SP
2053static void be_msix_disable(struct be_adapter *adapter)
2054{
ac6a0c4a 2055 if (msix_enabled(adapter)) {
8d56ff11 2056 pci_disable_msix(adapter->pdev);
ac6a0c4a 2057 adapter->num_msix_vec = 0;
3abcdeda
SP
2058 }
2059}
2060
10ef9ab4
SP
2061static uint be_num_rss_want(struct be_adapter *adapter)
2062{
2063 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2064 adapter->num_vfs == 0 && be_physfn(adapter) &&
2065 !be_is_mc(adapter))
2066 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2067 else
2068 return 0;
2069}
2070
6b7c5b94
SP
2071static void be_msix_enable(struct be_adapter *adapter)
2072{
10ef9ab4 2073#define BE_MIN_MSIX_VECTORS 1
ac6a0c4a 2074 int i, status, num_vec;
6b7c5b94 2075
10ef9ab4
SP
2076 /* If RSS queues are not used, need a vec for default RX Q */
2077 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2078 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2079
ac6a0c4a 2080 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2081 adapter->msix_entries[i].entry = i;
2082
ac6a0c4a 2083 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2084 if (status == 0) {
2085 goto done;
2086 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2087 num_vec = status;
3abcdeda 2088 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2089 num_vec) == 0)
3abcdeda 2090 goto done;
3abcdeda
SP
2091 }
2092 return;
2093done:
ac6a0c4a
SP
2094 adapter->num_msix_vec = num_vec;
2095 return;
6b7c5b94
SP
2096}
2097
f9449ab7 2098static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2099{
344dbf10 2100 be_check_sriov_fn_type(adapter);
11ac75ed 2101
6dedec81 2102#ifdef CONFIG_PCI_IOV
ba343c77 2103 if (be_physfn(adapter) && num_vfs) {
81be8f0a 2104 int status, pos;
11ac75ed 2105 u16 dev_vfs;
81be8f0a
AK
2106
2107 pos = pci_find_ext_capability(adapter->pdev,
2108 PCI_EXT_CAP_ID_SRIOV);
2109 pci_read_config_word(adapter->pdev,
11ac75ed 2110 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
81be8f0a 2111
11ac75ed
SP
2112 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2113 if (adapter->num_vfs != num_vfs)
81be8f0a 2114 dev_info(&adapter->pdev->dev,
11ac75ed
SP
2115 "Device supports %d VFs and not %d\n",
2116 adapter->num_vfs, num_vfs);
6dedec81 2117
11ac75ed
SP
2118 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2119 if (status)
2120 adapter->num_vfs = 0;
f9449ab7 2121
11ac75ed 2122 if (adapter->num_vfs) {
f9449ab7
SP
2123 adapter->vf_cfg = kcalloc(num_vfs,
2124 sizeof(struct be_vf_cfg),
2125 GFP_KERNEL);
2126 if (!adapter->vf_cfg)
2127 return -ENOMEM;
2128 }
ba343c77
SB
2129 }
2130#endif
f9449ab7 2131 return 0;
ba343c77
SB
2132}
2133
2134static void be_sriov_disable(struct be_adapter *adapter)
2135{
2136#ifdef CONFIG_PCI_IOV
11ac75ed 2137 if (sriov_enabled(adapter)) {
ba343c77 2138 pci_disable_sriov(adapter->pdev);
f9449ab7 2139 kfree(adapter->vf_cfg);
11ac75ed 2140 adapter->num_vfs = 0;
ba343c77
SB
2141 }
2142#endif
2143}
2144
fe6d2a38 2145static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2146 struct be_eq_obj *eqo)
b628bde2 2147{
10ef9ab4 2148 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2149}
6b7c5b94 2150
b628bde2
SP
2151static int be_msix_register(struct be_adapter *adapter)
2152{
10ef9ab4
SP
2153 struct net_device *netdev = adapter->netdev;
2154 struct be_eq_obj *eqo;
2155 int status, i, vec;
6b7c5b94 2156
10ef9ab4
SP
2157 for_all_evt_queues(adapter, eqo, i) {
2158 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2159 vec = be_msix_vec_get(adapter, eqo);
2160 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2161 if (status)
2162 goto err_msix;
2163 }
b628bde2 2164
6b7c5b94 2165 return 0;
3abcdeda 2166err_msix:
10ef9ab4
SP
2167 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2168 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2169 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2170 status);
ac6a0c4a 2171 be_msix_disable(adapter);
6b7c5b94
SP
2172 return status;
2173}
2174
2175static int be_irq_register(struct be_adapter *adapter)
2176{
2177 struct net_device *netdev = adapter->netdev;
2178 int status;
2179
ac6a0c4a 2180 if (msix_enabled(adapter)) {
6b7c5b94
SP
2181 status = be_msix_register(adapter);
2182 if (status == 0)
2183 goto done;
ba343c77
SB
2184 /* INTx is not supported for VF */
2185 if (!be_physfn(adapter))
2186 return status;
6b7c5b94
SP
2187 }
2188
2189 /* INTx */
2190 netdev->irq = adapter->pdev->irq;
2191 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2192 adapter);
2193 if (status) {
2194 dev_err(&adapter->pdev->dev,
2195 "INTx request IRQ failed - err %d\n", status);
2196 return status;
2197 }
2198done:
2199 adapter->isr_registered = true;
2200 return 0;
2201}
2202
2203static void be_irq_unregister(struct be_adapter *adapter)
2204{
2205 struct net_device *netdev = adapter->netdev;
10ef9ab4 2206 struct be_eq_obj *eqo;
3abcdeda 2207 int i;
6b7c5b94
SP
2208
2209 if (!adapter->isr_registered)
2210 return;
2211
2212 /* INTx */
ac6a0c4a 2213 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2214 free_irq(netdev->irq, adapter);
2215 goto done;
2216 }
2217
2218 /* MSIx */
10ef9ab4
SP
2219 for_all_evt_queues(adapter, eqo, i)
2220 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2221
6b7c5b94
SP
2222done:
2223 adapter->isr_registered = false;
6b7c5b94
SP
2224}
2225
10ef9ab4 2226static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2227{
2228 struct be_queue_info *q;
2229 struct be_rx_obj *rxo;
2230 int i;
2231
2232 for_all_rx_queues(adapter, rxo, i) {
2233 q = &rxo->q;
2234 if (q->created) {
2235 be_cmd_rxq_destroy(adapter, q);
2236 /* After the rxq is invalidated, wait for a grace time
2237 * of 1ms for all dma to end and the flush compl to
2238 * arrive
2239 */
2240 mdelay(1);
10ef9ab4 2241 be_rx_cq_clean(rxo);
482c9e79 2242 }
10ef9ab4 2243 be_queue_free(adapter, q);
482c9e79
SP
2244 }
2245}
2246
889cd4b2
SP
2247static int be_close(struct net_device *netdev)
2248{
2249 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2250 struct be_eq_obj *eqo;
2251 int i;
889cd4b2 2252
889cd4b2
SP
2253 be_async_mcc_disable(adapter);
2254
fe6d2a38
SP
2255 if (!lancer_chip(adapter))
2256 be_intr_set(adapter, false);
889cd4b2 2257
10ef9ab4
SP
2258 for_all_evt_queues(adapter, eqo, i) {
2259 napi_disable(&eqo->napi);
2260 if (msix_enabled(adapter))
2261 synchronize_irq(be_msix_vec_get(adapter, eqo));
2262 else
2263 synchronize_irq(netdev->irq);
2264 be_eq_clean(eqo);
63fcb27f
PR
2265 }
2266
889cd4b2
SP
2267 be_irq_unregister(adapter);
2268
889cd4b2
SP
2269 /* Wait for all pending tx completions to arrive so that
2270 * all tx skbs are freed.
2271 */
0ae57bb3 2272 be_tx_compl_clean(adapter);
889cd4b2 2273
10ef9ab4 2274 be_rx_qs_destroy(adapter);
482c9e79
SP
2275 return 0;
2276}
2277
10ef9ab4 2278static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2279{
2280 struct be_rx_obj *rxo;
e9008ee9
PR
2281 int rc, i, j;
2282 u8 rsstable[128];
482c9e79
SP
2283
2284 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2285 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2286 sizeof(struct be_eth_rx_d));
2287 if (rc)
2288 return rc;
2289 }
2290
2291 /* The FW would like the default RXQ to be created first */
2292 rxo = default_rxo(adapter);
2293 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2294 adapter->if_handle, false, &rxo->rss_id);
2295 if (rc)
2296 return rc;
2297
2298 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2299 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2300 rx_frag_size, adapter->if_handle,
2301 true, &rxo->rss_id);
482c9e79
SP
2302 if (rc)
2303 return rc;
2304 }
2305
2306 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2307 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2308 for_all_rss_queues(adapter, rxo, i) {
2309 if ((j + i) >= 128)
2310 break;
2311 rsstable[j + i] = rxo->rss_id;
2312 }
2313 }
2314 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2315 if (rc)
2316 return rc;
2317 }
2318
2319 /* First time posting */
10ef9ab4 2320 for_all_rx_queues(adapter, rxo, i)
482c9e79 2321 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2322 return 0;
2323}
2324
6b7c5b94
SP
2325static int be_open(struct net_device *netdev)
2326{
2327 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2328 struct be_eq_obj *eqo;
3abcdeda 2329 struct be_rx_obj *rxo;
10ef9ab4 2330 struct be_tx_obj *txo;
b236916a 2331 u8 link_status;
3abcdeda 2332 int status, i;
5fb379ee 2333
10ef9ab4 2334 status = be_rx_qs_create(adapter);
482c9e79
SP
2335 if (status)
2336 goto err;
2337
5fb379ee
SP
2338 be_irq_register(adapter);
2339
fe6d2a38
SP
2340 if (!lancer_chip(adapter))
2341 be_intr_set(adapter, true);
5fb379ee 2342
10ef9ab4 2343 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2344 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2345
10ef9ab4
SP
2346 for_all_tx_queues(adapter, txo, i)
2347 be_cq_notify(adapter, txo->cq.id, true, 0);
2348
7a1e9b20
SP
2349 be_async_mcc_enable(adapter);
2350
10ef9ab4
SP
2351 for_all_evt_queues(adapter, eqo, i) {
2352 napi_enable(&eqo->napi);
2353 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2354 }
2355
b236916a
AK
2356 status = be_cmd_link_status_query(adapter, NULL, NULL,
2357 &link_status, 0);
2358 if (!status)
2359 be_link_status_update(adapter, link_status);
2360
889cd4b2
SP
2361 return 0;
2362err:
2363 be_close(adapter->netdev);
2364 return -EIO;
5fb379ee
SP
2365}
2366
71d8d1b5
AK
2367static int be_setup_wol(struct be_adapter *adapter, bool enable)
2368{
2369 struct be_dma_mem cmd;
2370 int status = 0;
2371 u8 mac[ETH_ALEN];
2372
2373 memset(mac, 0, ETH_ALEN);
2374
2375 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2376 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2377 GFP_KERNEL);
71d8d1b5
AK
2378 if (cmd.va == NULL)
2379 return -1;
2380 memset(cmd.va, 0, cmd.size);
2381
2382 if (enable) {
2383 status = pci_write_config_dword(adapter->pdev,
2384 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2385 if (status) {
2386 dev_err(&adapter->pdev->dev,
2381a55c 2387 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2388 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2389 cmd.dma);
71d8d1b5
AK
2390 return status;
2391 }
2392 status = be_cmd_enable_magic_wol(adapter,
2393 adapter->netdev->dev_addr, &cmd);
2394 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2395 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2396 } else {
2397 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2398 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2399 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2400 }
2401
2b7bcebf 2402 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2403 return status;
2404}
2405
6d87f5c3
AK
2406/*
2407 * Generate a seed MAC address from the PF MAC Address using jhash.
2408 * MAC Address for VFs are assigned incrementally starting from the seed.
2409 * These addresses are programmed in the ASIC by the PF and the VF driver
2410 * queries for the MAC address during its probe.
2411 */
2412static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2413{
f9449ab7 2414 u32 vf;
3abcdeda 2415 int status = 0;
6d87f5c3 2416 u8 mac[ETH_ALEN];
11ac75ed 2417 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2418
2419 be_vf_eth_addr_generate(adapter, mac);
2420
11ac75ed 2421 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2422 if (lancer_chip(adapter)) {
2423 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2424 } else {
2425 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2426 vf_cfg->if_handle,
2427 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2428 }
2429
6d87f5c3
AK
2430 if (status)
2431 dev_err(&adapter->pdev->dev,
590c391d 2432 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2433 else
11ac75ed 2434 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2435
2436 mac[5] += 1;
2437 }
2438 return status;
2439}
2440
f9449ab7 2441static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2442{
11ac75ed 2443 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2444 u32 vf;
2445
11ac75ed 2446 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2447 if (lancer_chip(adapter))
2448 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2449 else
11ac75ed
SP
2450 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2451 vf_cfg->pmac_id, vf + 1);
f9449ab7 2452
11ac75ed
SP
2453 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2454 }
6d87f5c3
AK
2455}
2456
a54769f5
SP
2457static int be_clear(struct be_adapter *adapter)
2458{
191eb756
SP
2459 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2460 cancel_delayed_work_sync(&adapter->work);
2461 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2462 }
2463
11ac75ed 2464 if (sriov_enabled(adapter))
f9449ab7
SP
2465 be_vf_clear(adapter);
2466
2467 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2468
2469 be_mcc_queues_destroy(adapter);
10ef9ab4 2470 be_rx_cqs_destroy(adapter);
a54769f5 2471 be_tx_queues_destroy(adapter);
10ef9ab4 2472 be_evt_queues_destroy(adapter);
a54769f5
SP
2473
2474 /* tell fw we're done with firing cmds */
2475 be_cmd_fw_clean(adapter);
10ef9ab4
SP
2476
2477 be_msix_disable(adapter);
a54769f5
SP
2478 return 0;
2479}
2480
30128031
SP
2481static void be_vf_setup_init(struct be_adapter *adapter)
2482{
11ac75ed 2483 struct be_vf_cfg *vf_cfg;
30128031
SP
2484 int vf;
2485
11ac75ed
SP
2486 for_all_vfs(adapter, vf_cfg, vf) {
2487 vf_cfg->if_handle = -1;
2488 vf_cfg->pmac_id = -1;
30128031
SP
2489 }
2490}
2491
f9449ab7
SP
2492static int be_vf_setup(struct be_adapter *adapter)
2493{
11ac75ed 2494 struct be_vf_cfg *vf_cfg;
f9449ab7
SP
2495 u32 cap_flags, en_flags, vf;
2496 u16 lnk_speed;
2497 int status;
2498
30128031
SP
2499 be_vf_setup_init(adapter);
2500
590c391d
PR
2501 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2502 BE_IF_FLAGS_MULTICAST;
11ac75ed 2503 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2504 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
11ac75ed 2505 &vf_cfg->if_handle, NULL, vf + 1);
f9449ab7
SP
2506 if (status)
2507 goto err;
f9449ab7
SP
2508 }
2509
590c391d
PR
2510 status = be_vf_eth_addr_config(adapter);
2511 if (status)
2512 goto err;
f9449ab7 2513
11ac75ed 2514 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2515 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
b236916a 2516 NULL, vf + 1);
f9449ab7
SP
2517 if (status)
2518 goto err;
11ac75ed 2519 vf_cfg->tx_rate = lnk_speed * 10;
f9449ab7
SP
2520 }
2521 return 0;
2522err:
2523 return status;
2524}
2525
30128031
SP
2526static void be_setup_init(struct be_adapter *adapter)
2527{
2528 adapter->vlan_prio_bmap = 0xff;
2529 adapter->link_speed = -1;
2530 adapter->if_handle = -1;
2531 adapter->be3_native = false;
2532 adapter->promiscuous = false;
2533 adapter->eq_next_idx = 0;
2534}
2535
e5e1ee89 2536static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
590c391d
PR
2537{
2538 u32 pmac_id;
e5e1ee89
PR
2539 int status;
2540 bool pmac_id_active;
2541
2542 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2543 &pmac_id, mac);
590c391d
PR
2544 if (status != 0)
2545 goto do_none;
e5e1ee89
PR
2546
2547 if (pmac_id_active) {
2548 status = be_cmd_mac_addr_query(adapter, mac,
2549 MAC_ADDRESS_TYPE_NETWORK,
2550 false, adapter->if_handle, pmac_id);
2551
2552 if (!status)
2553 adapter->pmac_id = pmac_id;
2554 } else {
2555 status = be_cmd_pmac_add(adapter, mac,
2556 adapter->if_handle, &adapter->pmac_id, 0);
2557 }
590c391d
PR
2558do_none:
2559 return status;
2560}
2561
5fb379ee
SP
2562static int be_setup(struct be_adapter *adapter)
2563{
5fb379ee 2564 struct net_device *netdev = adapter->netdev;
f9449ab7 2565 u32 cap_flags, en_flags;
a54769f5 2566 u32 tx_fc, rx_fc;
10ef9ab4 2567 int status;
ba343c77
SB
2568 u8 mac[ETH_ALEN];
2569
30128031 2570 be_setup_init(adapter);
6b7c5b94 2571
f9449ab7 2572 be_cmd_req_native_mode(adapter);
73d540f2 2573
10ef9ab4
SP
2574 be_msix_enable(adapter);
2575
2576 status = be_evt_queues_create(adapter);
2577 if (status)
a54769f5 2578 goto err;
6b7c5b94 2579
10ef9ab4
SP
2580 status = be_tx_cqs_create(adapter);
2581 if (status)
2582 goto err;
2583
2584 status = be_rx_cqs_create(adapter);
2585 if (status)
a54769f5 2586 goto err;
6b7c5b94 2587
f9449ab7 2588 status = be_mcc_queues_create(adapter);
10ef9ab4 2589 if (status)
a54769f5 2590 goto err;
6b7c5b94 2591
f9449ab7
SP
2592 memset(mac, 0, ETH_ALEN);
2593 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2594 true /*permanent */, 0, 0);
f9449ab7
SP
2595 if (status)
2596 return status;
2597 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2598 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2599
f9449ab7
SP
2600 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2602 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2603 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2604
f9449ab7
SP
2605 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2606 cap_flags |= BE_IF_FLAGS_RSS;
2607 en_flags |= BE_IF_FLAGS_RSS;
2608 }
2609 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2610 netdev->dev_addr, &adapter->if_handle,
2611 &adapter->pmac_id, 0);
5fb379ee 2612 if (status != 0)
a54769f5 2613 goto err;
6b7c5b94 2614
590c391d
PR
2615 /* The VF's permanent mac queried from card is incorrect.
2616 * For BEx: Query the mac configued by the PF using if_handle
2617 * For Lancer: Get and use mac_list to obtain mac address.
2618 */
2619 if (!be_physfn(adapter)) {
2620 if (lancer_chip(adapter))
e5e1ee89 2621 status = be_add_mac_from_list(adapter, mac);
590c391d
PR
2622 else
2623 status = be_cmd_mac_addr_query(adapter, mac,
2624 MAC_ADDRESS_TYPE_NETWORK, false,
2625 adapter->if_handle, 0);
f9449ab7
SP
2626 if (!status) {
2627 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2628 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2629 }
2630 }
0dffc83e 2631
10ef9ab4
SP
2632 status = be_tx_qs_create(adapter);
2633 if (status)
2634 goto err;
2635
04b71175 2636 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2637
a54769f5
SP
2638 status = be_vid_config(adapter, false, 0);
2639 if (status)
2640 goto err;
7ab8b0b4 2641
a54769f5 2642 be_set_rx_mode(adapter->netdev);
5fb379ee 2643
a54769f5 2644 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2645 /* For Lancer: It is legal for this cmd to fail on VF */
2646 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2647 goto err;
590c391d 2648
a54769f5
SP
2649 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2650 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2651 adapter->rx_fc);
590c391d
PR
2652 /* For Lancer: It is legal for this cmd to fail on VF */
2653 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2654 goto err;
2655 }
2dc1deb6 2656
a54769f5 2657 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2658
11ac75ed 2659 if (sriov_enabled(adapter)) {
f9449ab7
SP
2660 status = be_vf_setup(adapter);
2661 if (status)
2662 goto err;
2663 }
2664
191eb756
SP
2665 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2666 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2667
f9449ab7 2668 return 0;
a54769f5
SP
2669err:
2670 be_clear(adapter);
2671 return status;
2672}
6b7c5b94 2673
66268739
IV
2674#ifdef CONFIG_NET_POLL_CONTROLLER
2675static void be_netpoll(struct net_device *netdev)
2676{
2677 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2678 struct be_eq_obj *eqo;
66268739
IV
2679 int i;
2680
10ef9ab4
SP
2681 for_all_evt_queues(adapter, eqo, i)
2682 event_handle(eqo);
2683
2684 return;
66268739
IV
2685}
2686#endif
2687
84517482 2688#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2689static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2690 const u8 *p, u32 img_start, int image_size,
2691 int hdr_size)
fa9a6fed
SB
2692{
2693 u32 crc_offset;
2694 u8 flashed_crc[4];
2695 int status;
3f0d4560
AK
2696
2697 crc_offset = hdr_size + img_start + image_size - 4;
2698
fa9a6fed 2699 p += crc_offset;
3f0d4560
AK
2700
2701 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2702 (image_size - 4));
fa9a6fed
SB
2703 if (status) {
2704 dev_err(&adapter->pdev->dev,
2705 "could not get crc from flash, not flashing redboot\n");
2706 return false;
2707 }
2708
2709 /*update redboot only if crc does not match*/
2710 if (!memcmp(flashed_crc, p, 4))
2711 return false;
2712 else
2713 return true;
fa9a6fed
SB
2714}
2715
306f1348
SP
2716static bool phy_flashing_required(struct be_adapter *adapter)
2717{
2718 int status = 0;
2719 struct be_phy_info phy_info;
2720
2721 status = be_cmd_get_phy_info(adapter, &phy_info);
2722 if (status)
2723 return false;
2724 if ((phy_info.phy_type == TN_8022) &&
2725 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2726 return true;
2727 }
2728 return false;
2729}
2730
3f0d4560 2731static int be_flash_data(struct be_adapter *adapter,
84517482 2732 const struct firmware *fw,
3f0d4560
AK
2733 struct be_dma_mem *flash_cmd, int num_of_images)
2734
84517482 2735{
3f0d4560
AK
2736 int status = 0, i, filehdr_size = 0;
2737 u32 total_bytes = 0, flash_op;
84517482
AK
2738 int num_bytes;
2739 const u8 *p = fw->data;
2740 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2741 const struct flash_comp *pflashcomp;
9fe96934 2742 int num_comp;
3f0d4560 2743
306f1348 2744 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2745 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2746 FLASH_IMAGE_MAX_SIZE_g3},
2747 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2748 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2749 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2750 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2751 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2752 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2753 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2754 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2755 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2756 FLASH_IMAGE_MAX_SIZE_g3},
2757 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2758 FLASH_IMAGE_MAX_SIZE_g3},
2759 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2760 FLASH_IMAGE_MAX_SIZE_g3},
2761 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2762 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2763 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2764 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2765 };
215faf9c 2766 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2767 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2768 FLASH_IMAGE_MAX_SIZE_g2},
2769 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2770 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2771 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2772 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2773 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2774 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2775 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2776 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2777 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2778 FLASH_IMAGE_MAX_SIZE_g2},
2779 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2780 FLASH_IMAGE_MAX_SIZE_g2},
2781 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2782 FLASH_IMAGE_MAX_SIZE_g2}
2783 };
2784
2785 if (adapter->generation == BE_GEN3) {
2786 pflashcomp = gen3_flash_types;
2787 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2788 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2789 } else {
2790 pflashcomp = gen2_flash_types;
2791 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2792 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2793 }
9fe96934
SB
2794 for (i = 0; i < num_comp; i++) {
2795 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2796 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2797 continue;
306f1348
SP
2798 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2799 if (!phy_flashing_required(adapter))
2800 continue;
2801 }
3f0d4560
AK
2802 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2803 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2804 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2805 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2806 continue;
2807 p = fw->data;
2808 p += filehdr_size + pflashcomp[i].offset
2809 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2810 if (p + pflashcomp[i].size > fw->data + fw->size)
2811 return -1;
2812 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2813 while (total_bytes) {
2814 if (total_bytes > 32*1024)
2815 num_bytes = 32*1024;
2816 else
2817 num_bytes = total_bytes;
2818 total_bytes -= num_bytes;
306f1348
SP
2819 if (!total_bytes) {
2820 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2821 flash_op = FLASHROM_OPER_PHY_FLASH;
2822 else
2823 flash_op = FLASHROM_OPER_FLASH;
2824 } else {
2825 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2826 flash_op = FLASHROM_OPER_PHY_SAVE;
2827 else
2828 flash_op = FLASHROM_OPER_SAVE;
2829 }
3f0d4560
AK
2830 memcpy(req->params.data_buf, p, num_bytes);
2831 p += num_bytes;
2832 status = be_cmd_write_flashrom(adapter, flash_cmd,
2833 pflashcomp[i].optype, flash_op, num_bytes);
2834 if (status) {
306f1348
SP
2835 if ((status == ILLEGAL_IOCTL_REQ) &&
2836 (pflashcomp[i].optype ==
2837 IMG_TYPE_PHY_FW))
2838 break;
3f0d4560
AK
2839 dev_err(&adapter->pdev->dev,
2840 "cmd to write to flash rom failed.\n");
2841 return -1;
2842 }
84517482 2843 }
84517482 2844 }
84517482
AK
2845 return 0;
2846}
2847
3f0d4560
AK
2848static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2849{
2850 if (fhdr == NULL)
2851 return 0;
2852 if (fhdr->build[0] == '3')
2853 return BE_GEN3;
2854 else if (fhdr->build[0] == '2')
2855 return BE_GEN2;
2856 else
2857 return 0;
2858}
2859
485bf569
SN
2860static int lancer_fw_download(struct be_adapter *adapter,
2861 const struct firmware *fw)
84517482 2862{
485bf569
SN
2863#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2864#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2865 struct be_dma_mem flash_cmd;
485bf569
SN
2866 const u8 *data_ptr = NULL;
2867 u8 *dest_image_ptr = NULL;
2868 size_t image_size = 0;
2869 u32 chunk_size = 0;
2870 u32 data_written = 0;
2871 u32 offset = 0;
2872 int status = 0;
2873 u8 add_status = 0;
84517482 2874
485bf569 2875 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2876 dev_err(&adapter->pdev->dev,
485bf569
SN
2877 "FW Image not properly aligned. "
2878 "Length must be 4 byte aligned.\n");
2879 status = -EINVAL;
2880 goto lancer_fw_exit;
d9efd2af
SB
2881 }
2882
485bf569
SN
2883 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2884 + LANCER_FW_DOWNLOAD_CHUNK;
2885 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2886 &flash_cmd.dma, GFP_KERNEL);
2887 if (!flash_cmd.va) {
2888 status = -ENOMEM;
2889 dev_err(&adapter->pdev->dev,
2890 "Memory allocation failure while flashing\n");
2891 goto lancer_fw_exit;
2892 }
84517482 2893
485bf569
SN
2894 dest_image_ptr = flash_cmd.va +
2895 sizeof(struct lancer_cmd_req_write_object);
2896 image_size = fw->size;
2897 data_ptr = fw->data;
2898
2899 while (image_size) {
2900 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2901
2902 /* Copy the image chunk content. */
2903 memcpy(dest_image_ptr, data_ptr, chunk_size);
2904
2905 status = lancer_cmd_write_object(adapter, &flash_cmd,
2906 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2907 &data_written, &add_status);
2908
2909 if (status)
2910 break;
2911
2912 offset += data_written;
2913 data_ptr += data_written;
2914 image_size -= data_written;
2915 }
2916
2917 if (!status) {
2918 /* Commit the FW written */
2919 status = lancer_cmd_write_object(adapter, &flash_cmd,
2920 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2921 &data_written, &add_status);
2922 }
2923
2924 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2925 flash_cmd.dma);
2926 if (status) {
2927 dev_err(&adapter->pdev->dev,
2928 "Firmware load error. "
2929 "Status code: 0x%x Additional Status: 0x%x\n",
2930 status, add_status);
2931 goto lancer_fw_exit;
2932 }
2933
2934 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2935lancer_fw_exit:
2936 return status;
2937}
2938
2939static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2940{
2941 struct flash_file_hdr_g2 *fhdr;
2942 struct flash_file_hdr_g3 *fhdr3;
2943 struct image_hdr *img_hdr_ptr = NULL;
2944 struct be_dma_mem flash_cmd;
2945 const u8 *p;
2946 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2947
2948 p = fw->data;
3f0d4560 2949 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2950
84517482 2951 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2952 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2953 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2954 if (!flash_cmd.va) {
2955 status = -ENOMEM;
2956 dev_err(&adapter->pdev->dev,
2957 "Memory allocation failure while flashing\n");
485bf569 2958 goto be_fw_exit;
84517482
AK
2959 }
2960
3f0d4560
AK
2961 if ((adapter->generation == BE_GEN3) &&
2962 (get_ufigen_type(fhdr) == BE_GEN3)) {
2963 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2964 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2965 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2966 img_hdr_ptr = (struct image_hdr *) (fw->data +
2967 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2968 i * sizeof(struct image_hdr)));
2969 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2970 status = be_flash_data(adapter, fw, &flash_cmd,
2971 num_imgs);
3f0d4560
AK
2972 }
2973 } else if ((adapter->generation == BE_GEN2) &&
2974 (get_ufigen_type(fhdr) == BE_GEN2)) {
2975 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2976 } else {
2977 dev_err(&adapter->pdev->dev,
2978 "UFI and Interface are not compatible for flashing\n");
2979 status = -1;
84517482
AK
2980 }
2981
2b7bcebf
IV
2982 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2983 flash_cmd.dma);
84517482
AK
2984 if (status) {
2985 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2986 goto be_fw_exit;
84517482
AK
2987 }
2988
af901ca1 2989 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2990
485bf569
SN
2991be_fw_exit:
2992 return status;
2993}
2994
2995int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2996{
2997 const struct firmware *fw;
2998 int status;
2999
3000 if (!netif_running(adapter->netdev)) {
3001 dev_err(&adapter->pdev->dev,
3002 "Firmware load not allowed (interface is down)\n");
3003 return -1;
3004 }
3005
3006 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3007 if (status)
3008 goto fw_exit;
3009
3010 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3011
3012 if (lancer_chip(adapter))
3013 status = lancer_fw_download(adapter, fw);
3014 else
3015 status = be_fw_download(adapter, fw);
3016
84517482
AK
3017fw_exit:
3018 release_firmware(fw);
3019 return status;
3020}
3021
e5686ad8 3022static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3023 .ndo_open = be_open,
3024 .ndo_stop = be_close,
3025 .ndo_start_xmit = be_xmit,
a54769f5 3026 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3027 .ndo_set_mac_address = be_mac_addr_set,
3028 .ndo_change_mtu = be_change_mtu,
ab1594e9 3029 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3030 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3031 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3032 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3033 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3034 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3035 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3036 .ndo_get_vf_config = be_get_vf_config,
3037#ifdef CONFIG_NET_POLL_CONTROLLER
3038 .ndo_poll_controller = be_netpoll,
3039#endif
6b7c5b94
SP
3040};
3041
3042static void be_netdev_init(struct net_device *netdev)
3043{
3044 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3045 struct be_eq_obj *eqo;
3abcdeda 3046 int i;
6b7c5b94 3047
6332c8d3 3048 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3049 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3050 NETIF_F_HW_VLAN_TX;
3051 if (be_multi_rxq(adapter))
3052 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3053
3054 netdev->features |= netdev->hw_features |
8b8ddc68 3055 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3056
eb8a50d9 3057 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3058 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3059
6b7c5b94
SP
3060 netdev->flags |= IFF_MULTICAST;
3061
c190e3c8
AK
3062 netif_set_gso_max_size(netdev, 65535);
3063
10ef9ab4 3064 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3065
3066 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3067
10ef9ab4
SP
3068 for_all_evt_queues(adapter, eqo, i)
3069 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3070}
3071
3072static void be_unmap_pci_bars(struct be_adapter *adapter)
3073{
8788fdc2
SP
3074 if (adapter->csr)
3075 iounmap(adapter->csr);
3076 if (adapter->db)
3077 iounmap(adapter->db);
6b7c5b94
SP
3078}
3079
3080static int be_map_pci_bars(struct be_adapter *adapter)
3081{
3082 u8 __iomem *addr;
db3ea781 3083 int db_reg;
6b7c5b94 3084
fe6d2a38
SP
3085 if (lancer_chip(adapter)) {
3086 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3087 pci_resource_len(adapter->pdev, 0));
3088 if (addr == NULL)
3089 return -ENOMEM;
3090 adapter->db = addr;
3091 return 0;
3092 }
3093
ba343c77
SB
3094 if (be_physfn(adapter)) {
3095 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3096 pci_resource_len(adapter->pdev, 2));
3097 if (addr == NULL)
3098 return -ENOMEM;
3099 adapter->csr = addr;
3100 }
6b7c5b94 3101
ba343c77 3102 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3103 db_reg = 4;
3104 } else {
ba343c77
SB
3105 if (be_physfn(adapter))
3106 db_reg = 4;
3107 else
3108 db_reg = 0;
3109 }
3110 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3111 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3112 if (addr == NULL)
3113 goto pci_map_err;
ba343c77
SB
3114 adapter->db = addr;
3115
6b7c5b94
SP
3116 return 0;
3117pci_map_err:
3118 be_unmap_pci_bars(adapter);
3119 return -ENOMEM;
3120}
3121
3122
3123static void be_ctrl_cleanup(struct be_adapter *adapter)
3124{
8788fdc2 3125 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3126
3127 be_unmap_pci_bars(adapter);
3128
3129 if (mem->va)
2b7bcebf
IV
3130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3131 mem->dma);
e7b909a6 3132
5b8821b7 3133 mem = &adapter->rx_filter;
e7b909a6 3134 if (mem->va)
2b7bcebf
IV
3135 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3136 mem->dma);
6b7c5b94
SP
3137}
3138
6b7c5b94
SP
3139static int be_ctrl_init(struct be_adapter *adapter)
3140{
8788fdc2
SP
3141 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3142 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3143 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3144 int status;
6b7c5b94
SP
3145
3146 status = be_map_pci_bars(adapter);
3147 if (status)
e7b909a6 3148 goto done;
6b7c5b94
SP
3149
3150 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3151 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3152 mbox_mem_alloc->size,
3153 &mbox_mem_alloc->dma,
3154 GFP_KERNEL);
6b7c5b94 3155 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3156 status = -ENOMEM;
3157 goto unmap_pci_bars;
6b7c5b94
SP
3158 }
3159 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3160 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3161 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3162 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3163
5b8821b7
SP
3164 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3165 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3166 &rx_filter->dma, GFP_KERNEL);
3167 if (rx_filter->va == NULL) {
e7b909a6
SP
3168 status = -ENOMEM;
3169 goto free_mbox;
3170 }
5b8821b7 3171 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3172
2984961c 3173 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3174 spin_lock_init(&adapter->mcc_lock);
3175 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3176
dd131e76 3177 init_completion(&adapter->flash_compl);
cf588477 3178 pci_save_state(adapter->pdev);
6b7c5b94 3179 return 0;
e7b909a6
SP
3180
3181free_mbox:
2b7bcebf
IV
3182 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3183 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3184
3185unmap_pci_bars:
3186 be_unmap_pci_bars(adapter);
3187
3188done:
3189 return status;
6b7c5b94
SP
3190}
3191
3192static void be_stats_cleanup(struct be_adapter *adapter)
3193{
3abcdeda 3194 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3195
3196 if (cmd->va)
2b7bcebf
IV
3197 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3198 cmd->va, cmd->dma);
6b7c5b94
SP
3199}
3200
3201static int be_stats_init(struct be_adapter *adapter)
3202{
3abcdeda 3203 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3204
005d5696 3205 if (adapter->generation == BE_GEN2) {
89a88ab8 3206 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3207 } else {
3208 if (lancer_chip(adapter))
3209 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3210 else
3211 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3212 }
2b7bcebf
IV
3213 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3214 GFP_KERNEL);
6b7c5b94
SP
3215 if (cmd->va == NULL)
3216 return -1;
d291b9af 3217 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3218 return 0;
3219}
3220
3221static void __devexit be_remove(struct pci_dev *pdev)
3222{
3223 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3224
6b7c5b94
SP
3225 if (!adapter)
3226 return;
3227
3228 unregister_netdev(adapter->netdev);
3229
5fb379ee
SP
3230 be_clear(adapter);
3231
6b7c5b94
SP
3232 be_stats_cleanup(adapter);
3233
3234 be_ctrl_cleanup(adapter);
3235
ba343c77
SB
3236 be_sriov_disable(adapter);
3237
6b7c5b94
SP
3238 pci_set_drvdata(pdev, NULL);
3239 pci_release_regions(pdev);
3240 pci_disable_device(pdev);
3241
3242 free_netdev(adapter->netdev);
3243}
3244
2243e2e9 3245static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3246{
6b7c5b94
SP
3247 int status;
3248
3abcdeda
SP
3249 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3250 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3251 if (status)
3252 return status;
3253
752961a1 3254 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3255 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3256 else
3257 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3258
9e1453c5
AK
3259 status = be_cmd_get_cntl_attributes(adapter);
3260 if (status)
3261 return status;
3262
2243e2e9 3263 return 0;
6b7c5b94
SP
3264}
3265
fe6d2a38
SP
3266static int be_dev_family_check(struct be_adapter *adapter)
3267{
3268 struct pci_dev *pdev = adapter->pdev;
3269 u32 sli_intf = 0, if_type;
3270
3271 switch (pdev->device) {
3272 case BE_DEVICE_ID1:
3273 case OC_DEVICE_ID1:
3274 adapter->generation = BE_GEN2;
3275 break;
3276 case BE_DEVICE_ID2:
3277 case OC_DEVICE_ID2:
ecedb6ae 3278 case OC_DEVICE_ID5:
fe6d2a38
SP
3279 adapter->generation = BE_GEN3;
3280 break;
3281 case OC_DEVICE_ID3:
12f4d0a8 3282 case OC_DEVICE_ID4:
fe6d2a38
SP
3283 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3284 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3285 SLI_INTF_IF_TYPE_SHIFT;
3286
3287 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3288 if_type != 0x02) {
3289 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3290 return -EINVAL;
3291 }
fe6d2a38
SP
3292 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3293 SLI_INTF_FAMILY_SHIFT);
3294 adapter->generation = BE_GEN3;
3295 break;
3296 default:
3297 adapter->generation = 0;
3298 }
3299 return 0;
3300}
3301
37eed1cb
PR
3302static int lancer_wait_ready(struct be_adapter *adapter)
3303{
d8110f62 3304#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3305 u32 sliport_status;
3306 int status = 0, i;
3307
3308 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3309 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3310 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3311 break;
3312
d8110f62 3313 msleep(1000);
37eed1cb
PR
3314 }
3315
3316 if (i == SLIPORT_READY_TIMEOUT)
3317 status = -1;
3318
3319 return status;
3320}
3321
3322static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3323{
3324 int status;
3325 u32 sliport_status, err, reset_needed;
3326 status = lancer_wait_ready(adapter);
3327 if (!status) {
3328 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3329 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3330 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3331 if (err && reset_needed) {
3332 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3333 adapter->db + SLIPORT_CONTROL_OFFSET);
3334
3335 /* check adapter has corrected the error */
3336 status = lancer_wait_ready(adapter);
3337 sliport_status = ioread32(adapter->db +
3338 SLIPORT_STATUS_OFFSET);
3339 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3340 SLIPORT_STATUS_RN_MASK);
3341 if (status || sliport_status)
3342 status = -1;
3343 } else if (err || reset_needed) {
3344 status = -1;
3345 }
3346 }
3347 return status;
3348}
3349
d8110f62
PR
3350static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3351{
3352 int status;
3353 u32 sliport_status;
3354
3355 if (adapter->eeh_err || adapter->ue_detected)
3356 return;
3357
3358 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3359
3360 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3361 dev_err(&adapter->pdev->dev,
3362 "Adapter in error state."
3363 "Trying to recover.\n");
3364
3365 status = lancer_test_and_set_rdy_state(adapter);
3366 if (status)
3367 goto err;
3368
3369 netif_device_detach(adapter->netdev);
3370
3371 if (netif_running(adapter->netdev))
3372 be_close(adapter->netdev);
3373
3374 be_clear(adapter);
3375
3376 adapter->fw_timeout = false;
3377
3378 status = be_setup(adapter);
3379 if (status)
3380 goto err;
3381
3382 if (netif_running(adapter->netdev)) {
3383 status = be_open(adapter->netdev);
3384 if (status)
3385 goto err;
3386 }
3387
3388 netif_device_attach(adapter->netdev);
3389
3390 dev_err(&adapter->pdev->dev,
3391 "Adapter error recovery succeeded\n");
3392 }
3393 return;
3394err:
3395 dev_err(&adapter->pdev->dev,
3396 "Adapter error recovery failed\n");
3397}
3398
3399static void be_worker(struct work_struct *work)
3400{
3401 struct be_adapter *adapter =
3402 container_of(work, struct be_adapter, work.work);
3403 struct be_rx_obj *rxo;
10ef9ab4 3404 struct be_eq_obj *eqo;
d8110f62
PR
3405 int i;
3406
3407 if (lancer_chip(adapter))
3408 lancer_test_and_recover_fn_err(adapter);
3409
3410 be_detect_dump_ue(adapter);
3411
3412 /* when interrupts are not yet enabled, just reap any pending
3413 * mcc completions */
3414 if (!netif_running(adapter->netdev)) {
10ef9ab4 3415 be_process_mcc(adapter);
d8110f62
PR
3416 goto reschedule;
3417 }
3418
3419 if (!adapter->stats_cmd_sent) {
3420 if (lancer_chip(adapter))
3421 lancer_cmd_get_pport_stats(adapter,
3422 &adapter->stats_cmd);
3423 else
3424 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3425 }
3426
3427 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3428 if (rxo->rx_post_starved) {
3429 rxo->rx_post_starved = false;
3430 be_post_rx_frags(rxo, GFP_KERNEL);
3431 }
3432 }
3433
10ef9ab4
SP
3434 for_all_evt_queues(adapter, eqo, i)
3435 be_eqd_update(adapter, eqo);
3436
d8110f62
PR
3437reschedule:
3438 adapter->work_counter++;
3439 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3440}
3441
6b7c5b94
SP
3442static int __devinit be_probe(struct pci_dev *pdev,
3443 const struct pci_device_id *pdev_id)
3444{
3445 int status = 0;
3446 struct be_adapter *adapter;
3447 struct net_device *netdev;
6b7c5b94
SP
3448
3449 status = pci_enable_device(pdev);
3450 if (status)
3451 goto do_none;
3452
3453 status = pci_request_regions(pdev, DRV_NAME);
3454 if (status)
3455 goto disable_dev;
3456 pci_set_master(pdev);
3457
3c8def97 3458 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3459 if (netdev == NULL) {
3460 status = -ENOMEM;
3461 goto rel_reg;
3462 }
3463 adapter = netdev_priv(netdev);
3464 adapter->pdev = pdev;
3465 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3466
3467 status = be_dev_family_check(adapter);
63657b9c 3468 if (status)
fe6d2a38
SP
3469 goto free_netdev;
3470
6b7c5b94 3471 adapter->netdev = netdev;
2243e2e9 3472 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3473
2b7bcebf 3474 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3475 if (!status) {
3476 netdev->features |= NETIF_F_HIGHDMA;
3477 } else {
2b7bcebf 3478 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3479 if (status) {
3480 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3481 goto free_netdev;
3482 }
3483 }
3484
f9449ab7
SP
3485 status = be_sriov_enable(adapter);
3486 if (status)
3487 goto free_netdev;
ba343c77 3488
6b7c5b94
SP
3489 status = be_ctrl_init(adapter);
3490 if (status)
f9449ab7 3491 goto disable_sriov;
6b7c5b94 3492
37eed1cb 3493 if (lancer_chip(adapter)) {
d8110f62
PR
3494 status = lancer_wait_ready(adapter);
3495 if (!status) {
3496 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3497 adapter->db + SLIPORT_CONTROL_OFFSET);
3498 status = lancer_test_and_set_rdy_state(adapter);
3499 }
37eed1cb
PR
3500 if (status) {
3501 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3502 goto ctrl_clean;
37eed1cb
PR
3503 }
3504 }
3505
2243e2e9 3506 /* sync up with fw's ready state */
ba343c77
SB
3507 if (be_physfn(adapter)) {
3508 status = be_cmd_POST(adapter);
3509 if (status)
3510 goto ctrl_clean;
ba343c77 3511 }
6b7c5b94 3512
2243e2e9
SP
3513 /* tell fw we're ready to fire cmds */
3514 status = be_cmd_fw_init(adapter);
6b7c5b94 3515 if (status)
2243e2e9
SP
3516 goto ctrl_clean;
3517
a4b4dfab
AK
3518 status = be_cmd_reset_function(adapter);
3519 if (status)
3520 goto ctrl_clean;
556ae191 3521
10ef9ab4
SP
3522 /* The INTR bit may be set in the card when probed by a kdump kernel
3523 * after a crash.
3524 */
3525 if (!lancer_chip(adapter))
3526 be_intr_set(adapter, false);
3527
2243e2e9
SP
3528 status = be_stats_init(adapter);
3529 if (status)
3530 goto ctrl_clean;
3531
3532 status = be_get_config(adapter);
6b7c5b94
SP
3533 if (status)
3534 goto stats_clean;
6b7c5b94
SP
3535
3536 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3537 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3538
5fb379ee
SP
3539 status = be_setup(adapter);
3540 if (status)
3abcdeda 3541 goto msix_disable;
2243e2e9 3542
3abcdeda 3543 be_netdev_init(netdev);
6b7c5b94
SP
3544 status = register_netdev(netdev);
3545 if (status != 0)
5fb379ee 3546 goto unsetup;
6b7c5b94 3547
10ef9ab4
SP
3548 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3549 adapter->port_num);
34b1ef04 3550
6b7c5b94
SP
3551 return 0;
3552
5fb379ee
SP
3553unsetup:
3554 be_clear(adapter);
3abcdeda
SP
3555msix_disable:
3556 be_msix_disable(adapter);
6b7c5b94
SP
3557stats_clean:
3558 be_stats_cleanup(adapter);
3559ctrl_clean:
3560 be_ctrl_cleanup(adapter);
f9449ab7 3561disable_sriov:
ba343c77 3562 be_sriov_disable(adapter);
f9449ab7 3563free_netdev:
fe6d2a38 3564 free_netdev(netdev);
8d56ff11 3565 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3566rel_reg:
3567 pci_release_regions(pdev);
3568disable_dev:
3569 pci_disable_device(pdev);
3570do_none:
c4ca2374 3571 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3572 return status;
3573}
3574
3575static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3576{
3577 struct be_adapter *adapter = pci_get_drvdata(pdev);
3578 struct net_device *netdev = adapter->netdev;
3579
71d8d1b5
AK
3580 if (adapter->wol)
3581 be_setup_wol(adapter, true);
3582
6b7c5b94
SP
3583 netif_device_detach(netdev);
3584 if (netif_running(netdev)) {
3585 rtnl_lock();
3586 be_close(netdev);
3587 rtnl_unlock();
3588 }
9b0365f1 3589 be_clear(adapter);
6b7c5b94
SP
3590
3591 pci_save_state(pdev);
3592 pci_disable_device(pdev);
3593 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3594 return 0;
3595}
3596
3597static int be_resume(struct pci_dev *pdev)
3598{
3599 int status = 0;
3600 struct be_adapter *adapter = pci_get_drvdata(pdev);
3601 struct net_device *netdev = adapter->netdev;
3602
3603 netif_device_detach(netdev);
3604
3605 status = pci_enable_device(pdev);
3606 if (status)
3607 return status;
3608
3609 pci_set_power_state(pdev, 0);
3610 pci_restore_state(pdev);
3611
2243e2e9
SP
3612 /* tell fw we're ready to fire cmds */
3613 status = be_cmd_fw_init(adapter);
3614 if (status)
3615 return status;
3616
9b0365f1 3617 be_setup(adapter);
6b7c5b94
SP
3618 if (netif_running(netdev)) {
3619 rtnl_lock();
3620 be_open(netdev);
3621 rtnl_unlock();
3622 }
3623 netif_device_attach(netdev);
71d8d1b5
AK
3624
3625 if (adapter->wol)
3626 be_setup_wol(adapter, false);
a4ca055f 3627
6b7c5b94
SP
3628 return 0;
3629}
3630
82456b03
SP
3631/*
3632 * An FLR will stop BE from DMAing any data.
3633 */
3634static void be_shutdown(struct pci_dev *pdev)
3635{
3636 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3637
2d5d4154
AK
3638 if (!adapter)
3639 return;
82456b03 3640
0f4a6828 3641 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3642
2d5d4154 3643 netif_device_detach(adapter->netdev);
82456b03 3644
82456b03
SP
3645 if (adapter->wol)
3646 be_setup_wol(adapter, true);
3647
57841869
AK
3648 be_cmd_reset_function(adapter);
3649
82456b03 3650 pci_disable_device(pdev);
82456b03
SP
3651}
3652
cf588477
SP
3653static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3654 pci_channel_state_t state)
3655{
3656 struct be_adapter *adapter = pci_get_drvdata(pdev);
3657 struct net_device *netdev = adapter->netdev;
3658
3659 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3660
3661 adapter->eeh_err = true;
3662
3663 netif_device_detach(netdev);
3664
3665 if (netif_running(netdev)) {
3666 rtnl_lock();
3667 be_close(netdev);
3668 rtnl_unlock();
3669 }
3670 be_clear(adapter);
3671
3672 if (state == pci_channel_io_perm_failure)
3673 return PCI_ERS_RESULT_DISCONNECT;
3674
3675 pci_disable_device(pdev);
3676
3677 return PCI_ERS_RESULT_NEED_RESET;
3678}
3679
3680static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3681{
3682 struct be_adapter *adapter = pci_get_drvdata(pdev);
3683 int status;
3684
3685 dev_info(&adapter->pdev->dev, "EEH reset\n");
3686 adapter->eeh_err = false;
6589ade0
SP
3687 adapter->ue_detected = false;
3688 adapter->fw_timeout = false;
cf588477
SP
3689
3690 status = pci_enable_device(pdev);
3691 if (status)
3692 return PCI_ERS_RESULT_DISCONNECT;
3693
3694 pci_set_master(pdev);
3695 pci_set_power_state(pdev, 0);
3696 pci_restore_state(pdev);
3697
3698 /* Check if card is ok and fw is ready */
3699 status = be_cmd_POST(adapter);
3700 if (status)
3701 return PCI_ERS_RESULT_DISCONNECT;
3702
3703 return PCI_ERS_RESULT_RECOVERED;
3704}
3705
3706static void be_eeh_resume(struct pci_dev *pdev)
3707{
3708 int status = 0;
3709 struct be_adapter *adapter = pci_get_drvdata(pdev);
3710 struct net_device *netdev = adapter->netdev;
3711
3712 dev_info(&adapter->pdev->dev, "EEH resume\n");
3713
3714 pci_save_state(pdev);
3715
3716 /* tell fw we're ready to fire cmds */
3717 status = be_cmd_fw_init(adapter);
3718 if (status)
3719 goto err;
3720
3721 status = be_setup(adapter);
3722 if (status)
3723 goto err;
3724
3725 if (netif_running(netdev)) {
3726 status = be_open(netdev);
3727 if (status)
3728 goto err;
3729 }
3730 netif_device_attach(netdev);
3731 return;
3732err:
3733 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3734}
3735
3736static struct pci_error_handlers be_eeh_handlers = {
3737 .error_detected = be_eeh_err_detected,
3738 .slot_reset = be_eeh_reset,
3739 .resume = be_eeh_resume,
3740};
3741
6b7c5b94
SP
3742static struct pci_driver be_driver = {
3743 .name = DRV_NAME,
3744 .id_table = be_dev_ids,
3745 .probe = be_probe,
3746 .remove = be_remove,
3747 .suspend = be_suspend,
cf588477 3748 .resume = be_resume,
82456b03 3749 .shutdown = be_shutdown,
cf588477 3750 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3751};
3752
3753static int __init be_init_module(void)
3754{
8e95a202
JP
3755 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3756 rx_frag_size != 2048) {
6b7c5b94
SP
3757 printk(KERN_WARNING DRV_NAME
3758 " : Module param rx_frag_size must be 2048/4096/8192."
3759 " Using 2048\n");
3760 rx_frag_size = 2048;
3761 }
6b7c5b94
SP
3762
3763 return pci_register_driver(&be_driver);
3764}
3765module_init(be_init_module);
3766
3767static void __exit be_exit_module(void)
3768{
3769 pci_unregister_driver(&be_driver);
3770}
3771module_exit(be_exit_module);
This page took 0.848242 seconds and 5 git commands to generate.